This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] malloc: Lindent before functional changes


From: Joern Engel <joern@purestorage.org>

JIRA: PURE-27597
---
 tpc/malloc2.13/arena.h  | 477 +++++++++++++++++++++++-------------------------
 tpc/malloc2.13/malloc.c |  46 ++---
 2 files changed, 253 insertions(+), 270 deletions(-)

diff --git a/tpc/malloc2.13/arena.h b/tpc/malloc2.13/arena.h
index 803d7b3bf020..c854de12910c 100644
--- a/tpc/malloc2.13/arena.h
+++ b/tpc/malloc2.13/arena.h
@@ -108,37 +108,37 @@ static int __malloc_initialized = -1;
    in the new arena. */
 
 #define arena_get(ptr, size) do { \
-  arena_lookup(ptr); \
-  arena_lock(ptr, size); \
+	arena_lookup(ptr); \
+	arena_lock(ptr, size); \
 } while(0)
 
 #define arena_lookup(ptr) do { \
-  Void_t *vptr = NULL; \
-  ptr = (struct malloc_state *)tsd_getspecific(arena_key, vptr); \
+	Void_t *vptr = NULL; \
+	ptr = (struct malloc_state *)tsd_getspecific(arena_key, vptr); \
 } while(0)
 
 #ifdef PER_THREAD
 #define arena_lock(ptr, size) do { \
-  if(ptr) \
-    (void)mutex_lock(&ptr->mutex); \
-  else \
-    ptr = arena_get2(ptr, (size)); \
+	if(ptr) \
+		(void)mutex_lock(&ptr->mutex); \
+	else \
+		ptr = arena_get2(ptr, (size)); \
 } while(0)
 #else
 #define arena_lock(ptr, size) do { \
-  if(ptr && !mutex_trylock(&ptr->mutex)) { \
-    THREAD_STAT(++(ptr->stat_lock_direct)); \
-  } else \
-    ptr = arena_get2(ptr, (size)); \
+	if(ptr && !mutex_trylock(&ptr->mutex)) { \
+		THREAD_STAT(++(ptr->stat_lock_direct)); \
+	} else \
+		ptr = arena_get2(ptr, (size)); \
 } while(0)
 #endif
 
 /* find the heap and corresponding arena for a given ptr */
 
 #define heap_for_ptr(ptr) \
- ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
+	((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
 #define arena_for_chunk(ptr) \
- (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
+	(chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
 
 
 /**************************************************************************/
@@ -436,168 +436,157 @@ __libc_malloc_pthread_startup (bool first_time)
 # endif
 #endif
 
-static void
-ptmalloc_init (void)
+static void ptmalloc_init(void)
 {
-  const char* s;
-  int secure = 0;
+	const char *s;
+	int secure = 0;
 
-  if(__malloc_initialized >= 0) return;
-  __malloc_initialized = 0;
+	if (__malloc_initialized >= 0)
+		return;
+	__malloc_initialized = 0;
 
 #ifdef _LIBC
-# if defined SHARED && !USE___THREAD
-  /* ptmalloc_init_minimal may already have been called via
-     __libc_malloc_pthread_startup, above.  */
-  if (mp_.pagesize == 0)
-# endif
+#if defined SHARED && !USE___THREAD
+	/* ptmalloc_init_minimal may already have been called via
+	   __libc_malloc_pthread_startup, above.  */
+	if (mp_.pagesize == 0)
+#endif
 #endif
-    ptmalloc_init_minimal();
+		ptmalloc_init_minimal();
 
 #ifndef NO_THREADS
-# if defined _LIBC
-  /* We know __pthread_initialize_minimal has already been called,
-     and that is enough.  */
-#   define NO_STARTER
-# endif
-# ifndef NO_STARTER
-  /* With some threads implementations, creating thread-specific data
-     or initializing a mutex may call malloc() itself.  Provide a
-     simple starter version (realloc() won't work). */
-  save_malloc_hook = dlmalloc_hook;
-  save_memalign_hook = dlmemalign_hook;
-  save_free_hook = dlfree_hook;
-  dlmalloc_hook = malloc_starter;
-  dlmemalign_hook = memalign_starter;
-  dlfree_hook = free_starter;
-#  ifdef _LIBC
-  /* Initialize the pthreads interface. */
-  if (__pthread_initialize != NULL)
-    __pthread_initialize();
-#  endif /* !defined _LIBC */
-# endif	/* !defined NO_STARTER */
-#endif /* !defined NO_THREADS */
-  mutex_init(&main_arena.mutex);
-  main_arena.next = &main_arena;
+#if defined _LIBC
+	/* We know __pthread_initialize_minimal has already been called,
+	   and that is enough.  */
+#define NO_STARTER
+#endif
+#ifndef NO_STARTER
+	/* With some threads implementations, creating thread-specific data
+	   or initializing a mutex may call malloc() itself.  Provide a
+	   simple starter version (realloc() won't work). */
+	save_malloc_hook = dlmalloc_hook;
+	save_memalign_hook = dlmemalign_hook;
+	save_free_hook = dlfree_hook;
+	dlmalloc_hook = malloc_starter;
+	dlmemalign_hook = memalign_starter;
+	dlfree_hook = free_starter;
+#ifdef _LIBC
+	/* Initialize the pthreads interface. */
+	if (__pthread_initialize != NULL)
+		__pthread_initialize();
+#endif				/* !defined _LIBC */
+#endif				/* !defined NO_STARTER */
+#endif				/* !defined NO_THREADS */
+	mutex_init(&main_arena.mutex);
+	main_arena.next = &main_arena;
 
 #if defined _LIBC && defined SHARED
-  /* In case this libc copy is in a non-default namespace, never use brk.
-     Likewise if dlopened from statically linked program.  */
-  Dl_info di;
-  struct link_map *l;
-
-  if (_dl_open_hook != NULL
-      || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
-	  && l->l_ns != LM_ID_BASE))
-    __morecore = __failing_morecore;
+	/* In case this libc copy is in a non-default namespace, never use brk.
+	   Likewise if dlopened from statically linked program.  */
+	Dl_info di;
+	struct link_map *l;
+
+	if (_dl_open_hook != NULL || (_dl_addr(ptmalloc_init, &di, &l, NULL) != 0 && l->l_ns != LM_ID_BASE))
+		__morecore = __failing_morecore;
 #endif
 
-  mutex_init(&list_lock);
-  tsd_key_create(&arena_key, NULL);
-  tsd_setspecific(arena_key, (Void_t *)&main_arena);
-  thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
+	mutex_init(&list_lock);
+	tsd_key_create(&arena_key, NULL);
+	tsd_setspecific(arena_key, (Void_t *) & main_arena);
+	thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
 #ifndef NO_THREADS
-# ifndef NO_STARTER
-  dlmalloc_hook = save_malloc_hook;
-  dlmemalign_hook = save_memalign_hook;
-  dlfree_hook = save_free_hook;
-# else
-#  undef NO_STARTER
-# endif
+#ifndef NO_STARTER
+	dlmalloc_hook = save_malloc_hook;
+	dlmemalign_hook = save_memalign_hook;
+	dlfree_hook = save_free_hook;
+#else
+#undef NO_STARTER
+#endif
 #endif
 #ifdef _LIBC
-  secure = __libc_enable_secure;
-  s = NULL;
-  if (__builtin_expect (_environ != NULL, 1))
-    {
-      char **runp = _environ;
-      char *envline;
-
-      while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
-			       0))
-	{
-	  size_t len = strcspn (envline, "=");
-
-	  if (envline[len] != '=')
-	    /* This is a "MALLOC_" variable at the end of the string
-	       without a '=' character.  Ignore it since otherwise we
-	       will access invalid memory below.  */
-	    continue;
-
-	  switch (len)
-	    {
-	    case 6:
-	      if (memcmp (envline, "CHECK_", 6) == 0)
-		s = &envline[7];
-	      break;
-	    case 8:
-	      if (! secure)
-		{
-		  if (memcmp (envline, "TOP_PAD_", 8) == 0)
-		    mALLOPt(M_TOP_PAD, atoi(&envline[9]));
-		  else if (memcmp (envline, "PERTURB_", 8) == 0)
-		    mALLOPt(M_PERTURB, atoi(&envline[9]));
-		}
-	      break;
-	    case 9:
-	      if (! secure)
-		{
-		  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
-		    mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
+	secure = __libc_enable_secure;
+	s = NULL;
+	if (__builtin_expect(_environ != NULL, 1)) {
+		char **runp = _environ;
+		char *envline;
+
+		while (__builtin_expect((envline = next_env_entry(&runp)) != NULL, 0)) {
+			size_t len = strcspn(envline, "=");
+
+			if (envline[len] != '=')
+				/* This is a "MALLOC_" variable at the end of the string
+				   without a '=' character.  Ignore it since otherwise we
+				   will access invalid memory below.  */
+				continue;
+
+			switch (len) {
+			case 6:
+				if (memcmp(envline, "CHECK_", 6) == 0)
+					s = &envline[7];
+				break;
+			case 8:
+				if (!secure) {
+					if (memcmp(envline, "TOP_PAD_", 8) == 0)
+						mALLOPt(M_TOP_PAD, atoi(&envline[9]));
+					else if (memcmp(envline, "PERTURB_", 8) == 0)
+						mALLOPt(M_PERTURB, atoi(&envline[9]));
+				}
+				break;
+			case 9:
+				if (!secure) {
+					if (memcmp(envline, "MMAP_MAX_", 9) == 0)
+						mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
 #ifdef PER_THREAD
-		  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
-		    mALLOPt(M_ARENA_MAX, atoi(&envline[10]));
+					else if (memcmp(envline, "ARENA_MAX", 9) == 0)
+						mALLOPt(M_ARENA_MAX, atoi(&envline[10]));
 #endif
-		}
-	      break;
+				}
+				break;
 #ifdef PER_THREAD
-	    case 10:
-	      if (! secure)
-		{
-		  if (memcmp (envline, "ARENA_TEST", 10) == 0)
-		    mALLOPt(M_ARENA_TEST, atoi(&envline[11]));
-		}
-	      break;
+			case 10:
+				if (!secure) {
+					if (memcmp(envline, "ARENA_TEST", 10) == 0)
+						mALLOPt(M_ARENA_TEST, atoi(&envline[11]));
+				}
+				break;
 #endif
-	    case 15:
-	      if (! secure)
-		{
-		  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
-		    mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
-		  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
-		    mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
+			case 15:
+				if (!secure) {
+					if (memcmp(envline, "TRIM_THRESHOLD_", 15) == 0)
+						mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
+					else if (memcmp(envline, "MMAP_THRESHOLD_", 15) == 0)
+						mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
+				}
+				break;
+			default:
+				break;
+			}
 		}
-	      break;
-	    default:
-	      break;
-	    }
 	}
-    }
 #else
-  if (! secure)
-    {
-      if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
-	mALLOPt(M_TRIM_THRESHOLD, atoi(s));
-      if((s = getenv("MALLOC_TOP_PAD_")))
-	mALLOPt(M_TOP_PAD, atoi(s));
-      if((s = getenv("MALLOC_PERTURB_")))
-	mALLOPt(M_PERTURB, atoi(s));
-      if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
-	mALLOPt(M_MMAP_THRESHOLD, atoi(s));
-      if((s = getenv("MALLOC_MMAP_MAX_")))
-	mALLOPt(M_MMAP_MAX, atoi(s));
-    }
-  s = getenv("MALLOC_CHECK_");
+	if (!secure) {
+		if ((s = getenv("MALLOC_TRIM_THRESHOLD_")))
+			mALLOPt(M_TRIM_THRESHOLD, atoi(s));
+		if ((s = getenv("MALLOC_TOP_PAD_")))
+			mALLOPt(M_TOP_PAD, atoi(s));
+		if ((s = getenv("MALLOC_PERTURB_")))
+			mALLOPt(M_PERTURB, atoi(s));
+		if ((s = getenv("MALLOC_MMAP_THRESHOLD_")))
+			mALLOPt(M_MMAP_THRESHOLD, atoi(s));
+		if ((s = getenv("MALLOC_MMAP_MAX_")))
+			mALLOPt(M_MMAP_MAX, atoi(s));
+	}
+	s = getenv("MALLOC_CHECK_");
 #endif
-  if(s && s[0]) {
-    mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
-    if (check_action != 0)
-      dlmalloc_check_init();
-  }
-  void (*hook) (void) = force_reg (dlmalloc_initialize_hook);
-  if (hook != NULL)
-    (*hook)();
-  __malloc_initialized = 1;
+	if (s && s[0]) {
+		mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
+		if (check_action != 0)
+			dlmalloc_check_init();
+	}
+	void (*hook) (void) = force_reg(dlmalloc_initialize_hook);
+	if (hook != NULL)
+		(*hook) ();
+	__malloc_initialized = 1;
 }
 
 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
@@ -836,65 +825,62 @@ heap_trim(heap_info *heap, size_t pad)
 
 /* Create a new arena with initial size "size".  */
 
-static struct malloc_state *
-_int_new_arena(size_t size)
+static struct malloc_state *_int_new_arena(size_t size)
 {
-  struct malloc_state * a;
-  heap_info *h;
-  char *ptr;
-  unsigned long misalign;
-
-  h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
-	       mp_.top_pad);
-  if(!h) {
-    /* Maybe size is too large to fit in a single heap.  So, just try
-       to create a minimally-sized arena and let _int_malloc() attempt
-       to deal with the large request via mmap_chunk().  */
-    h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
-    if(!h)
-      return 0;
-  }
-  a = h->ar_ptr = (struct malloc_state *)(h+1);
-  malloc_init_state(a);
-  /*a->next = NULL;*/
-  a->system_mem = a->max_system_mem = h->size;
-  arena_mem += h->size;
+	struct malloc_state *a;
+	heap_info *h;
+	char *ptr;
+	unsigned long misalign;
+
+	h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), mp_.top_pad);
+	if (!h) {
+		/* Maybe size is too large to fit in a single heap.  So, just try
+		   to create a minimally-sized arena and let _int_malloc() attempt
+		   to deal with the large request via mmap_chunk().  */
+		h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
+		if (!h)
+			return 0;
+	}
+	a = h->ar_ptr = (struct malloc_state *)(h + 1);
+	malloc_init_state(a);
+	/*a->next = NULL; */
+	a->system_mem = a->max_system_mem = h->size;
+	arena_mem += h->size;
 #ifdef NO_THREADS
-  if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
-     mp_.max_total_mem)
-    mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
+	if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) > mp_.max_total_mem)
+		mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
 #endif
 
-  /* Set up the top chunk, with proper alignment. */
-  ptr = (char *)(a + 1);
-  misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
-  if (misalign > 0)
-    ptr += MALLOC_ALIGNMENT - misalign;
-  top(a) = (mchunkptr)ptr;
-  set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
+	/* Set up the top chunk, with proper alignment. */
+	ptr = (char *)(a + 1);
+	misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
+	if (misalign > 0)
+		ptr += MALLOC_ALIGNMENT - misalign;
+	top(a) = (mchunkptr) ptr;
+	set_head(top(a), (((char *)h + h->size) - ptr) | PREV_INUSE);
 
-  tsd_setspecific(arena_key, (Void_t *)a);
-  mutex_init(&a->mutex);
-  (void)mutex_lock(&a->mutex);
+	tsd_setspecific(arena_key, (Void_t *) a);
+	mutex_init(&a->mutex);
+	(void)mutex_lock(&a->mutex);
 
 #ifdef PER_THREAD
-  (void)mutex_lock(&list_lock);
+	(void)mutex_lock(&list_lock);
 #endif
 
-  /* Add the new arena to the global list.  */
-  a->next = main_arena.next;
-  atomic_write_barrier ();
-  main_arena.next = a;
+	/* Add the new arena to the global list.  */
+	a->next = main_arena.next;
+	atomic_write_barrier();
+	main_arena.next = a;
 
 #ifdef PER_THREAD
-  ++narenas;
+	++narenas;
 
-  (void)mutex_unlock(&list_lock);
+	(void)mutex_unlock(&list_lock);
 #endif
 
-  THREAD_STAT(++(a->stat_lock_loop));
+	THREAD_STAT(++(a->stat_lock_loop));
 
-  return a;
+	return a;
 }
 
 
@@ -977,64 +963,61 @@ reused_arena (void)
 }
 #endif
 
-static struct malloc_state *
-internal_function
-arena_get2(struct malloc_state * a_tsd, size_t size)
+static struct malloc_state *internal_function arena_get2(struct malloc_state *a_tsd, size_t size)
 {
-  struct malloc_state * a;
+	struct malloc_state *a;
 
 #ifdef PER_THREAD
-  if ((a = get_free_list ()) == NULL
-      && (a = reused_arena ()) == NULL)
-    /* Nothing immediately available, so generate a new arena.  */
-    a = _int_new_arena(size);
+	if ((a = get_free_list()) == NULL && (a = reused_arena()) == NULL)
+		/* Nothing immediately available, so generate a new arena.  */
+		a = _int_new_arena(size);
 #else
-  if(!a_tsd)
-    a = a_tsd = &main_arena;
-  else {
-    a = a_tsd->next;
-    if(!a) {
-      /* This can only happen while initializing the new arena. */
-      (void)mutex_lock(&main_arena.mutex);
-      THREAD_STAT(++(main_arena.stat_lock_wait));
-      return &main_arena;
-    }
-  }
+	if (!a_tsd)
+		a = a_tsd = &main_arena;
+	else {
+		a = a_tsd->next;
+		if (!a) {
+			/* This can only happen while initializing the new arena. */
+			(void)mutex_lock(&main_arena.mutex);
+			THREAD_STAT(++(main_arena.stat_lock_wait));
+			return &main_arena;
+		}
+	}
 
-  /* Check the global, circularly linked list for available arenas. */
-  bool retried = false;
+	/* Check the global, circularly linked list for available arenas. */
+	bool retried = false;
  repeat:
-  do {
-    if(!mutex_trylock(&a->mutex)) {
-      if (retried)
-	(void)mutex_unlock(&list_lock);
-      THREAD_STAT(++(a->stat_lock_loop));
-      tsd_setspecific(arena_key, (Void_t *)a);
-      return a;
-    }
-    a = a->next;
-  } while(a != a_tsd);
-
-  /* If not even the list_lock can be obtained, try again.  This can
-     happen during `atfork', or for example on systems where thread
-     creation makes it temporarily impossible to obtain _any_
-     locks. */
-  if(!retried && mutex_trylock(&list_lock)) {
-    /* We will block to not run in a busy loop.  */
-    (void)mutex_lock(&list_lock);
-
-    /* Since we blocked there might be an arena available now.  */
-    retried = true;
-    a = a_tsd;
-    goto repeat;
-  }
+	do {
+		if (!mutex_trylock(&a->mutex)) {
+			if (retried)
+				(void)mutex_unlock(&list_lock);
+			THREAD_STAT(++(a->stat_lock_loop));
+			tsd_setspecific(arena_key, (Void_t *) a);
+			return a;
+		}
+		a = a->next;
+	} while (a != a_tsd);
+
+	/* If not even the list_lock can be obtained, try again.  This can
+	   happen during `atfork', or for example on systems where thread
+	   creation makes it temporarily impossible to obtain _any_
+	   locks. */
+	if (!retried && mutex_trylock(&list_lock)) {
+		/* We will block to not run in a busy loop.  */
+		(void)mutex_lock(&list_lock);
+
+		/* Since we blocked there might be an arena available now.  */
+		retried = true;
+		a = a_tsd;
+		goto repeat;
+	}
 
-  /* Nothing immediately available, so generate a new arena.  */
-  a = _int_new_arena(size);
-  (void)mutex_unlock(&list_lock);
+	/* Nothing immediately available, so generate a new arena.  */
+	a = _int_new_arena(size);
+	(void)mutex_unlock(&list_lock);
 #endif
 
-  return a;
+	return a;
 }
 
 #ifdef PER_THREAD
diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index 7c94a8cefcac..c9644c382e05 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -2209,43 +2209,43 @@ typedef struct malloc_chunk* mfastbinptr;
 */
 
 struct malloc_state {
-  /* Serialize access.  */
-  mutex_t mutex;
+	/* Serialize access.  */
+	mutex_t mutex;
 
-  /* Flags (formerly in max_fast).  */
-  int flags;
+	/* Flags (formerly in max_fast).  */
+	int flags;
 
 #if THREAD_STATS
-  /* Statistics for locking.  Only used if THREAD_STATS is defined.  */
-  long stat_lock_direct, stat_lock_loop, stat_lock_wait;
+	/* Statistics for locking.  Only used if THREAD_STATS is defined.  */
+	long stat_lock_direct, stat_lock_loop, stat_lock_wait;
 #endif
 
-  /* Fastbins */
-  mfastbinptr      fastbinsY[NFASTBINS];
+	/* Fastbins */
+	mfastbinptr fastbinsY[NFASTBINS];
 
-  /* Base of the topmost chunk -- not otherwise kept in a bin */
-  mchunkptr        top;
+	/* Base of the topmost chunk -- not otherwise kept in a bin */
+	mchunkptr top;
 
-  /* The remainder from the most recent split of a small request */
-  mchunkptr        last_remainder;
+	/* The remainder from the most recent split of a small request */
+	mchunkptr last_remainder;
 
-  /* Normal bins packed as described above */
-  mchunkptr        bins[NBINS * 2 - 2];
+	/* Normal bins packed as described above */
+	mchunkptr bins[NBINS * 2 - 2];
 
-  /* Bitmap of bins */
-  unsigned int     binmap[BINMAPSIZE];
+	/* Bitmap of bins */
+	unsigned int binmap[BINMAPSIZE];
 
-  /* Linked list */
-  struct malloc_state *next;
+	/* Linked list */
+	struct malloc_state *next;
 
 #ifdef PER_THREAD
-  /* Linked list for free arenas.  */
-  struct malloc_state *next_free;
+	/* Linked list for free arenas.  */
+	struct malloc_state *next_free;
 #endif
 
-  /* Memory allocated from the system in this arena.  */
-  INTERNAL_SIZE_T system_mem;
-  INTERNAL_SIZE_T max_system_mem;
+	/* Memory allocated from the system in this arena.  */
+	INTERNAL_SIZE_T system_mem;
+	INTERNAL_SIZE_T max_system_mem;
 };
 
 struct malloc_par {
-- 
2.7.0.rc3


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]