This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH] Fix formatting in arena.c
- From: Siddhesh Poyarekar <siddhesh at redhat dot com>
- To: libc-alpha at sourceware dot org
- Date: Sat, 11 Aug 2012 01:12:40 +0530
- Subject: [PATCH] Fix formatting in arena.c
Hi,
While reading through malloc/arena.c, I realized that the entire file
is wrongly formatted. I have run the indent program on it and given it
a quick tweak to *largely* comply with coding standards. I know I
could commit this directly as an obvious formatting change, but I would
like someone else to go through and confirm that the change does not
make any unintentional functional changes (like uncommenting commented
code, etc.). I have verified that the fixed code builds and does not
regress.
Regards,
Siddhesh
2012-08-11 Siddhesh Poyarekar <siddhesh@redhat.com>
* malloc/arena.c: Fix indentation.
diff --git a/malloc/arena.c b/malloc/arena.c
index 06bdd77..eece686 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -27,7 +27,7 @@
# ifdef DEFAULT_MMAP_THRESHOLD_MAX
# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
# else
-# define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
+# define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
# endif
#endif
@@ -40,7 +40,7 @@
#ifndef THREAD_STATS
-#define THREAD_STATS 0
+# define THREAD_STATS 0
#endif
/* If THREAD_STATS is non-zero, some statistics on mutex locking are
@@ -54,15 +54,16 @@
malloc_chunks. It is allocated with mmap() and always starts at an
address aligned to HEAP_MAX_SIZE. */
-typedef struct _heap_info {
- mstate ar_ptr; /* Arena for this heap. */
- struct _heap_info *prev; /* Previous heap. */
- size_t size; /* Current size in bytes. */
- size_t mprotect_size; /* Size in bytes that has been mprotected
- PROT_READ|PROT_WRITE. */
+typedef struct _heap_info
+{
+ mstate ar_ptr; /* Arena for this heap. */
+ struct _heap_info *prev; /* Previous heap. */
+ size_t size; /* Current size in bytes. */
+ size_t mprotect_size; /* Size in bytes that has been mprotected
+ PROT_READ|PROT_WRITE. */
/* Make sure the following data is properly aligned, particularly
that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
- MALLOC_ALIGNMENT. */
+ MALLOC_ALIGNMENT. */
char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
} heap_info;
@@ -83,12 +84,12 @@ static mstate free_list;
#if THREAD_STATS
static int stat_n_heaps;
-#define THREAD_STAT(x) x
+# define THREAD_STAT(x) x
#else
-#define THREAD_STAT(x) do ; while(0)
+# define THREAD_STAT(x) do ; while(0)
#endif
-/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
+/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
static unsigned long arena_mem;
/* Already initialized? */
@@ -103,7 +104,7 @@ int __malloc_initialized = -1;
once over the circularly linked list of arenas. If no arena is
readily available, create a new one. In this latter case, `size'
is just a hint as to how much memory will be required immediately
- in the new arena. */
+ in the new arena. */
#define arena_get(ptr, size) do { \
arena_lookup(ptr); \
@@ -131,7 +132,7 @@ int __malloc_initialized = -1;
} while(0)
#endif
-/* find the heap and corresponding arena for a given ptr */
+/* Find the heap and corresponding arena for a given ptr. */
#define heap_for_ptr(ptr) \
((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
@@ -145,9 +146,8 @@ int __malloc_initialized = -1;
static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
const __malloc_ptr_t);
-static void (*save_free_hook) (__malloc_ptr_t __ptr,
- const __malloc_ptr_t);
-static void* save_arena;
+static void (*save_free_hook) (__malloc_ptr_t __ptr, const __malloc_ptr_t);
+static void *save_arena;
#ifdef ATFORK_MEM
ATFORK_MEM;
@@ -159,56 +159,66 @@ ATFORK_MEM;
#define ATFORK_ARENA_PTR ((void*)-1)
/* The following hooks are used while the `atfork' handling mechanism
- is active. */
+ is active. */
-static void*
-malloc_atfork(size_t sz, const void *caller)
+static void *
+malloc_atfork (size_t sz, const void *caller)
{
void *vptr = NULL;
void *victim;
- tsd_getspecific(arena_key, vptr);
- if(vptr == ATFORK_ARENA_PTR) {
- /* We are the only thread that may allocate at all. */
- if(save_malloc_hook != malloc_check) {
- return _int_malloc(&main_arena, sz);
- } else {
- if(top_check()<0)
- return 0;
- victim = _int_malloc(&main_arena, sz+1);
- return mem2mem_check(victim, sz);
+ tsd_getspecific (arena_key, vptr);
+ if (vptr == ATFORK_ARENA_PTR)
+ {
+ /* We are the only thread that may allocate at all. */
+ if (save_malloc_hook != malloc_check)
+ {
+ return _int_malloc (&main_arena, sz);
+ }
+ else
+ {
+ if (top_check () < 0)
+ return 0;
+ victim = _int_malloc (&main_arena, sz + 1);
+ return mem2mem_check (victim, sz);
+ }
+ }
+ else
+ {
+ /* Suspend the thread until the `atfork' handlers have completed.
+ By that time, the hooks will have been reset as well, so that
+ mALLOc() can be used again. */
+ (void) mutex_lock (&list_lock);
+ (void) mutex_unlock (&list_lock);
+ return __libc_malloc (sz);
}
- } else {
- /* Suspend the thread until the `atfork' handlers have completed.
- By that time, the hooks will have been reset as well, so that
- mALLOc() can be used again. */
- (void)mutex_lock(&list_lock);
- (void)mutex_unlock(&list_lock);
- return __libc_malloc(sz);
- }
}
static void
-free_atfork(void* mem, const void *caller)
+free_atfork (void *mem, const void *caller)
{
void *vptr = NULL;
mstate ar_ptr;
- mchunkptr p; /* chunk corresponding to mem */
+ /* chunk corresponding to mem. */
+ mchunkptr p;
- if (mem == 0) /* free(0) has no effect */
+ /* free(0) has no effect. */
+ if (mem == 0)
return;
- p = mem2chunk(mem); /* do not bother to replicate free_check here */
+ /* Do not bother to replicate free_check here. */
+ p = mem2chunk (mem);
- if (chunk_is_mmapped(p)) /* release mmapped memory. */
- {
- munmap_chunk(p);
- return;
- }
+ /* Release mmapped memory. */
+ if (chunk_is_mmapped (p))
+ {
+ munmap_chunk (p);
+ return;
+ }
- ar_ptr = arena_for_chunk(p);
- tsd_getspecific(arena_key, vptr);
- _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
+ ar_ptr = arena_for_chunk (p);
+ tsd_getspecific (arena_key, vptr);
+ _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
}
@@ -219,40 +229,42 @@ static unsigned int atfork_recursive_cntr;
make sure that the mutexes remain in a consistent state in the
fork()ed version of a thread. Also adapt the malloc and free hooks
temporarily, because the `atfork' handler mechanism may use
- malloc/free internally (e.g. in LinuxThreads). */
+ malloc/free internally (e.g. in LinuxThreads). */
static void
ptmalloc_lock_all (void)
{
mstate ar_ptr;
- if(__malloc_initialized < 1)
+ if (__malloc_initialized < 1)
return;
- if (mutex_trylock(&list_lock))
+ if (mutex_trylock (&list_lock))
{
void *my_arena;
- tsd_getspecific(arena_key, my_arena);
+ tsd_getspecific (arena_key, my_arena);
if (my_arena == ATFORK_ARENA_PTR)
/* This is the same thread which already locks the global list.
Just bump the counter. */
goto out;
/* This thread has to wait its turn. */
- (void)mutex_lock(&list_lock);
+ (void) mutex_lock (&list_lock);
+ }
+ for (ar_ptr = &main_arena;;)
+ {
+ (void) mutex_lock (&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if (ar_ptr == &main_arena)
+ break;
}
- for(ar_ptr = &main_arena;;) {
- (void)mutex_lock(&ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
save_malloc_hook = __malloc_hook;
save_free_hook = __free_hook;
__malloc_hook = malloc_atfork;
__free_hook = free_atfork;
- /* Only the current thread may perform malloc/free calls now. */
- tsd_getspecific(arena_key, save_arena);
- tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
- out:
+ /* Only the current thread may perform malloc/free calls now. */
+ tsd_getspecific (arena_key, save_arena);
+ tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
+out:
++atfork_recursive_cntr;
}
@@ -261,19 +273,21 @@ ptmalloc_unlock_all (void)
{
mstate ar_ptr;
- if(__malloc_initialized < 1)
+ if (__malloc_initialized < 1)
return;
if (--atfork_recursive_cntr != 0)
return;
- tsd_setspecific(arena_key, save_arena);
+ tsd_setspecific (arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
- for(ar_ptr = &main_arena;;) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- (void)mutex_unlock(&list_lock);
+ for (ar_ptr = &main_arena;;)
+ {
+ (void) mutex_unlock (&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if (ar_ptr == &main_arena)
+ break;
+ }
+ (void) mutex_unlock (&list_lock);
}
#ifdef __linux__
@@ -281,48 +295,50 @@ ptmalloc_unlock_all (void)
/* In NPTL, unlocking a mutex in the child process after a
fork() is currently unsafe, whereas re-initializing it is safe and
does not leak resources. Therefore, a special atfork handler is
- installed for the child. */
+ installed for the child. */
static void
ptmalloc_unlock_all2 (void)
{
mstate ar_ptr;
- if(__malloc_initialized < 1)
+ if (__malloc_initialized < 1)
return;
- tsd_setspecific(arena_key, save_arena);
+ tsd_setspecific (arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
-#ifdef PER_THREAD
+# ifdef PER_THREAD
free_list = NULL;
-#endif
- for(ar_ptr = &main_arena;;) {
- mutex_init(&ar_ptr->mutex);
-#ifdef PER_THREAD
- if (ar_ptr != save_arena) {
- ar_ptr->next_free = free_list;
- free_list = ar_ptr;
+# endif
+ for (ar_ptr = &main_arena;;)
+ {
+ mutex_init (&ar_ptr->mutex);
+# ifdef PER_THREAD
+ if (ar_ptr != save_arena)
+ {
+ ar_ptr->next_free = free_list;
+ free_list = ar_ptr;
+ }
+# endif
+ ar_ptr = ar_ptr->next;
+ if (ar_ptr == &main_arena)
+ break;
}
-#endif
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- mutex_init(&list_lock);
+ mutex_init (&list_lock);
atfork_recursive_cntr = 0;
}
#else
-#define ptmalloc_unlock_all2 ptmalloc_unlock_all
+# define ptmalloc_unlock_all2 ptmalloc_unlock_all
#endif
-/* Initialization routine. */
+/* Initialization routine. */
#include <string.h>
extern char **_environ;
-static char *
-internal_function
+static char *internal_function
next_env_entry (char ***position)
{
char **current = *position;
@@ -335,8 +351,7 @@ next_env_entry (char ***position)
&& (*current)[2] == 'L'
&& (*current)[3] == 'L'
&& (*current)[4] == 'O'
- && (*current)[5] == 'C'
- && (*current)[6] == '_')
+ && (*current)[5] == 'C' && (*current)[6] == '_')
{
result = &(*current)[7];
@@ -367,7 +382,8 @@ libc_hidden_proto (_dl_open_hook);
static void
ptmalloc_init (void)
{
- if(__malloc_initialized >= 0) return;
+ if (__malloc_initialized >= 0)
+ return;
__malloc_initialized = 0;
#ifdef SHARED
@@ -382,17 +398,17 @@ ptmalloc_init (void)
__morecore = __failing_morecore;
#endif
- tsd_key_create(&arena_key, NULL);
- tsd_setspecific(arena_key, (void *)&main_arena);
- thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
+ tsd_key_create (&arena_key, NULL);
+ tsd_setspecific (arena_key, (void *) &main_arena);
+ thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all,
+ ptmalloc_unlock_all2);
const char *s = NULL;
if (__builtin_expect (_environ != NULL, 1))
{
char **runp = _environ;
char *envline;
- while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
- 0))
+ while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0))
{
size_t len = strcspn (envline, "=");
@@ -409,41 +425,41 @@ ptmalloc_init (void)
s = &envline[7];
break;
case 8:
- if (! __builtin_expect (__libc_enable_secure, 0))
+ if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "TOP_PAD_", 8) == 0)
- __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
+ __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
else if (memcmp (envline, "PERTURB_", 8) == 0)
- __libc_mallopt(M_PERTURB, atoi(&envline[9]));
+ __libc_mallopt (M_PERTURB, atoi (&envline[9]));
}
break;
case 9:
- if (! __builtin_expect (__libc_enable_secure, 0))
+ if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "MMAP_MAX_", 9) == 0)
- __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
+ __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
#ifdef PER_THREAD
else if (memcmp (envline, "ARENA_MAX", 9) == 0)
- __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
+ __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
#endif
}
break;
#ifdef PER_THREAD
case 10:
- if (! __builtin_expect (__libc_enable_secure, 0))
+ if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "ARENA_TEST", 10) == 0)
- __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
+ __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
}
break;
#endif
case 15:
- if (! __builtin_expect (__libc_enable_secure, 0))
+ if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
- __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
+ __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
- __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
+ __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
}
break;
default:
@@ -451,54 +467,54 @@ ptmalloc_init (void)
}
}
}
- if(s && s[0]) {
- __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
- if (check_action != 0)
- __malloc_check_init();
- }
+ if (s && s[0])
+ {
+ __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
+ if (check_action != 0)
+ __malloc_check_init ();
+ }
void (*hook) (void) = force_reg (__malloc_initialize_hook);
if (hook != NULL)
- (*hook)();
+ (*hook) ();
__malloc_initialized = 1;
}
-/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
+/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
#ifdef thread_atfork_static
-thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
- ptmalloc_unlock_all2)
+thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all,
+ ptmalloc_unlock_all2)
#endif
-
-
/* Managing heaps and arenas (for concurrent threads) */
-
#if MALLOC_DEBUG > 1
-
-/* Print the complete contents of a single heap to stderr. */
-
+/* Print the complete contents of a single heap to stderr. */
static void
-dump_heap(heap_info *heap)
+dump_heap (heap_info * heap)
{
char *ptr;
mchunkptr p;
- fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
- ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
- (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
- p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
- ~MALLOC_ALIGN_MASK);
- for(;;) {
- fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
- if(p == top(heap->ar_ptr)) {
- fprintf(stderr, " (top)\n");
- break;
- } else if(p->size == (0|PREV_INUSE)) {
- fprintf(stderr, " (fence)\n");
- break;
+ fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
+ ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
+ (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
+ p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
+ ~MALLOC_ALIGN_MASK);
+ for (;;)
+ {
+ fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
+ if (p == top (heap->ar_ptr))
+ {
+ fprintf (stderr, " (top)\n");
+ break;
+ }
+ else if (p->size == (0 | PREV_INUSE))
+ {
+ fprintf (stderr, " (fence)\n");
+ break;
+ }
+ fprintf (stderr, "\n");
+ p = next_chunk (p);
}
- fprintf(stderr, "\n");
- p = next_chunk(p);
- }
}
#endif /* MALLOC_DEBUG > 1 */
@@ -514,22 +530,21 @@ dump_heap(heap_info *heap)
static char *aligned_heap_area;
/* Create a new heap. size is automatically rounded up to a multiple
- of the page size. */
+ of the page size. */
-static heap_info *
-internal_function
-new_heap(size_t size, size_t top_pad)
+static heap_info *internal_function
+new_heap (size_t size, size_t top_pad)
{
- size_t page_mask = GLRO(dl_pagesize) - 1;
+ size_t page_mask = GLRO (dl_pagesize) - 1;
char *p1, *p2;
unsigned long ul;
heap_info *h;
- if(size+top_pad < HEAP_MIN_SIZE)
+ if (size + top_pad < HEAP_MIN_SIZE)
size = HEAP_MIN_SIZE;
- else if(size+top_pad <= HEAP_MAX_SIZE)
+ else if (size + top_pad <= HEAP_MAX_SIZE)
size += top_pad;
- else if(size > HEAP_MAX_SIZE)
+ else if (size > HEAP_MAX_SIZE)
return 0;
else
size = HEAP_MAX_SIZE;
@@ -538,71 +553,80 @@ new_heap(size_t size, size_t top_pad)
/* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
No swap space needs to be reserved for the following large
mapping (on Linux, this is the case for all non-writable mappings
- anyway). */
+ anyway). */
p2 = MAP_FAILED;
- if(aligned_heap_area) {
- p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
- MAP_NORESERVE);
- aligned_heap_area = NULL;
- if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
- __munmap(p2, HEAP_MAX_SIZE);
- p2 = MAP_FAILED;
+ if (aligned_heap_area)
+ {
+ p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
+ MAP_NORESERVE);
+ aligned_heap_area = NULL;
+ if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
+ {
+ __munmap (p2, HEAP_MAX_SIZE);
+ p2 = MAP_FAILED;
+ }
}
- }
- if(p2 == MAP_FAILED) {
- p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
- if(p1 != MAP_FAILED) {
- p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
- & ~(HEAP_MAX_SIZE-1));
- ul = p2 - p1;
- if (ul)
- __munmap(p1, ul);
+ if (p2 == MAP_FAILED)
+ {
+ p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
+ if (p1 != MAP_FAILED)
+ {
+ p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
+ & ~(HEAP_MAX_SIZE - 1));
+ ul = p2 - p1;
+ if (ul)
+ __munmap (p1, ul);
+ else
+ aligned_heap_area = p2 + HEAP_MAX_SIZE;
+ __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
+ }
else
- aligned_heap_area = p2 + HEAP_MAX_SIZE;
- __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
- } else {
- /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
- is already aligned. */
- p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
- if(p2 == MAP_FAILED)
- return 0;
- if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
- __munmap(p2, HEAP_MAX_SIZE);
- return 0;
- }
+ {
+ /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
+ is already aligned. */
+ p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
+ if (p2 == MAP_FAILED)
+ return 0;
+ if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
+ {
+ __munmap (p2, HEAP_MAX_SIZE);
+ return 0;
+ }
+ }
}
- }
- if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
- __munmap(p2, HEAP_MAX_SIZE);
- return 0;
- }
- h = (heap_info *)p2;
+ if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
+ {
+ __munmap (p2, HEAP_MAX_SIZE);
+ return 0;
+ }
+ h = (heap_info *) p2;
h->size = size;
h->mprotect_size = size;
- THREAD_STAT(stat_n_heaps++);
+ THREAD_STAT (stat_n_heaps++);
return h;
}
/* Grow a heap. size is automatically rounded up to a
- multiple of the page size. */
+ multiple of the page size. */
static int
-grow_heap(heap_info *h, long diff)
+grow_heap (heap_info * h, long diff)
{
- size_t page_mask = GLRO(dl_pagesize) - 1;
+ size_t page_mask = GLRO (dl_pagesize) - 1;
long new_size;
diff = (diff + page_mask) & ~page_mask;
- new_size = (long)h->size + diff;
- if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
+ new_size = (long) h->size + diff;
+ if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
return -1;
- if((unsigned long) new_size > h->mprotect_size) {
- if (__mprotect((char *)h + h->mprotect_size,
- (unsigned long) new_size - h->mprotect_size,
- PROT_READ|PROT_WRITE) != 0)
- return -2;
- h->mprotect_size = new_size;
- }
+ if ((unsigned long) new_size > h->mprotect_size)
+ {
+ if (__mprotect ((char *) h + h->mprotect_size,
+ (unsigned long) new_size - h->mprotect_size,
+ PROT_READ | PROT_WRITE) != 0)
+ return -2;
+ h->mprotect_size = new_size;
+ }
h->size = new_size;
return 0;
@@ -611,31 +635,31 @@ grow_heap(heap_info *h, long diff)
/* Shrink a heap. */
static int
-shrink_heap(heap_info *h, long diff)
+shrink_heap (heap_info * h, long diff)
{
long new_size;
- new_size = (long)h->size - diff;
- if(new_size < (long)sizeof(*h))
+ new_size = (long) h->size - diff;
+ if (new_size < (long) sizeof (*h))
return -1;
/* Try to re-map the extra heap space freshly to save memory, and
- make it inaccessible. */
+ make it inaccessible. */
if (__builtin_expect (__libc_enable_secure, 0))
{
- if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
- MAP_FIXED) == (char *) MAP_FAILED)
+ if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
+ MAP_FIXED) == (char *) MAP_FAILED)
return -2;
h->mprotect_size = new_size;
}
else
- madvise ((char *)h + new_size, diff, MADV_DONTNEED);
- /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
+ madvise ((char *) h + new_size, diff, MADV_DONTNEED);
+ /*fprintf(stderr, "shrink %p %08lx\n", h, new_size); */
h->size = new_size;
return 0;
}
-/* Delete a heap. */
+/* Delete a heap. */
#define delete_heap(heap) \
do { \
@@ -644,99 +668,105 @@ shrink_heap(heap_info *h, long diff)
__munmap((char*)(heap), HEAP_MAX_SIZE); \
} while (0)
-static int
-internal_function
-heap_trim(heap_info *heap, size_t pad)
+static int internal_function
+heap_trim (heap_info * heap, size_t pad)
{
mstate ar_ptr = heap->ar_ptr;
- unsigned long pagesz = GLRO(dl_pagesize);
- mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
+ unsigned long pagesz = GLRO (dl_pagesize);
+ mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
heap_info *prev_heap;
long new_size, top_size, extra;
/* Can this heap go away completely? */
- while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
- prev_heap = heap->prev;
- p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
- assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
- p = prev_chunk(p);
- new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
- assert(new_size>0 && new_size<(long)(2*MINSIZE));
- if(!prev_inuse(p))
- new_size += p->prev_size;
- assert(new_size>0 && new_size<HEAP_MAX_SIZE);
- if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
- break;
- ar_ptr->system_mem -= heap->size;
- arena_mem -= heap->size;
- delete_heap(heap);
- heap = prev_heap;
- if(!prev_inuse(p)) { /* consolidate backward */
- p = prev_chunk(p);
- unlink(p, bck, fwd);
+ while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
+ {
+ prev_heap = heap->prev;
+ p =
+ chunk_at_offset (prev_heap,
+ prev_heap->size - (MINSIZE - 2 * SIZE_SZ));
+ assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
+ p = prev_chunk (p);
+ new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ);
+ assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
+ if (!prev_inuse (p))
+ new_size += p->prev_size;
+ assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
+ if (new_size + (HEAP_MAX_SIZE - prev_heap->size) <
+ pad + MINSIZE + pagesz)
+ break;
+ ar_ptr->system_mem -= heap->size;
+ arena_mem -= heap->size;
+ delete_heap (heap);
+ heap = prev_heap;
+ if (!prev_inuse (p))
+ { /* consolidate backward */
+ p = prev_chunk (p);
+ unlink (p, bck, fwd);
+ }
+ assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
+ assert (((char *) p + new_size) == ((char *) heap + heap->size));
+ top (ar_ptr) = top_chunk = p;
+ set_head (top_chunk, new_size | PREV_INUSE);
+ /*check_chunk(ar_ptr, top_chunk); */
}
- assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
- assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
- top(ar_ptr) = top_chunk = p;
- set_head(top_chunk, new_size | PREV_INUSE);
- /*check_chunk(ar_ptr, top_chunk);*/
- }
- top_size = chunksize(top_chunk);
+ top_size = chunksize (top_chunk);
extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
- if(extra < (long)pagesz)
+ if (extra < (long) pagesz)
return 0;
- /* Try to shrink. */
- if(shrink_heap(heap, extra) != 0)
+ /* Try to shrink. */
+ if (shrink_heap (heap, extra) != 0)
return 0;
ar_ptr->system_mem -= extra;
arena_mem -= extra;
- /* Success. Adjust top accordingly. */
- set_head(top_chunk, (top_size - extra) | PREV_INUSE);
- /*check_chunk(ar_ptr, top_chunk);*/
+ /* Success. Adjust top accordingly. */
+ set_head (top_chunk, (top_size - extra) | PREV_INUSE);
+ /*check_chunk(ar_ptr, top_chunk); */
return 1;
}
/* Create a new arena with initial size "size". */
static mstate
-_int_new_arena(size_t size)
+_int_new_arena (size_t size)
{
mstate a;
heap_info *h;
char *ptr;
unsigned long misalign;
- h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
- mp_.top_pad);
- if(!h) {
- /* Maybe size is too large to fit in a single heap. So, just try
- to create a minimally-sized arena and let _int_malloc() attempt
- to deal with the large request via mmap_chunk(). */
- h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
- if(!h)
- return 0;
- }
- a = h->ar_ptr = (mstate)(h+1);
- malloc_init_state(a);
- /*a->next = NULL;*/
+ h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
+ mp_.top_pad);
+ if (!h)
+ {
+ /* Maybe size is too large to fit in a single heap. So, just try
+ to create a minimally-sized arena and let _int_malloc() attempt
+ to deal with the large request via mmap_chunk(). */
+ h =
+ new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
+ if (!h)
+ return 0;
+ }
+ a = h->ar_ptr = (mstate) (h + 1);
+ malloc_init_state (a);
+ /*a->next = NULL; */
a->system_mem = a->max_system_mem = h->size;
arena_mem += h->size;
- /* Set up the top chunk, with proper alignment. */
- ptr = (char *)(a + 1);
- misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
+ /* Set up the top chunk, with proper alignment. */
+ ptr = (char *) (a + 1);
+ misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
if (misalign > 0)
ptr += MALLOC_ALIGNMENT - misalign;
- top(a) = (mchunkptr)ptr;
- set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
+ top (a) = (mchunkptr) ptr;
+ set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
- tsd_setspecific(arena_key, (void *)a);
- mutex_init(&a->mutex);
- (void)mutex_lock(&a->mutex);
+ tsd_setspecific (arena_key, (void *) a);
+ mutex_init (&a->mutex);
+ (void) mutex_lock (&a->mutex);
#ifdef PER_THREAD
- (void)mutex_lock(&list_lock);
+ (void) mutex_lock (&list_lock);
#endif
/* Add the new arena to the global list. */
@@ -745,10 +775,10 @@ _int_new_arena(size_t size)
main_arena.next = a;
#ifdef PER_THREAD
- (void)mutex_unlock(&list_lock);
+ (void) mutex_unlock (&list_lock);
#endif
- THREAD_STAT(++(a->stat_lock_loop));
+ THREAD_STAT (++(a->stat_lock_loop));
return a;
}
@@ -761,17 +791,17 @@ get_free_list (void)
mstate result = free_list;
if (result != NULL)
{
- (void)mutex_lock(&list_lock);
+ (void) mutex_lock (&list_lock);
result = free_list;
if (result != NULL)
free_list = result->next_free;
- (void)mutex_unlock(&list_lock);
+ (void) mutex_unlock (&list_lock);
if (result != NULL)
{
- (void)mutex_lock(&result->mutex);
- tsd_setspecific(arena_key, (void *)result);
- THREAD_STAT(++(result->stat_lock_loop));
+ (void) mutex_lock (&result->mutex);
+ tsd_setspecific (arena_key, (void *) result);
+ THREAD_STAT (++(result->stat_lock_loop));
}
}
@@ -792,7 +822,7 @@ reused_arena (mstate avoid_arena)
result = next_to_use;
do
{
- if (!mutex_trylock(&result->mutex))
+ if (!mutex_trylock (&result->mutex))
goto out;
result = result->next;
@@ -805,20 +835,19 @@ reused_arena (mstate avoid_arena)
result = result->next;
/* No arena available. Wait for the next in line. */
- (void)mutex_lock(&result->mutex);
+ (void) mutex_lock (&result->mutex);
- out:
- tsd_setspecific(arena_key, (void *)result);
- THREAD_STAT(++(result->stat_lock_loop));
+out:
+ tsd_setspecific (arena_key, (void *) result);
+ THREAD_STAT (++(result->stat_lock_loop));
next_to_use = result->next;
return result;
}
#endif
-static mstate
-internal_function
-arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
+static mstate internal_function
+arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
{
mstate a;
@@ -835,7 +864,7 @@ arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
narenas_limit = mp_.arena_max;
else if (narenas > mp_.arena_test)
{
- int n = __get_nprocs ();
+ int n = __get_nprocs ();
if (n >= 1)
narenas_limit = NARENAS_FROM_NCORES (n);
@@ -866,49 +895,55 @@ arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
a = reused_arena (avoid_arena);
}
#else
- if(!a_tsd)
+ if (!a_tsd)
a = a_tsd = &main_arena;
- else {
- a = a_tsd->next;
- if(!a) {
- /* This can only happen while initializing the new arena. */
- (void)mutex_lock(&main_arena.mutex);
- THREAD_STAT(++(main_arena.stat_lock_wait));
- return &main_arena;
+ else
+ {
+ a = a_tsd->next;
+ if (!a)
+ {
+ /* This can only happen while initializing the new arena. */
+ (void) mutex_lock (&main_arena.mutex);
+ THREAD_STAT (++(main_arena.stat_lock_wait));
+ return &main_arena;
+ }
}
- }
- /* Check the global, circularly linked list for available arenas. */
+ /* Check the global, circularly linked list for available arenas. */
bool retried = false;
- repeat:
- do {
- if(!mutex_trylock(&a->mutex)) {
- if (retried)
- (void)mutex_unlock(&list_lock);
- THREAD_STAT(++(a->stat_lock_loop));
- tsd_setspecific(arena_key, (void *)a);
- return a;
+repeat:
+ do
+ {
+ if (!mutex_trylock (&a->mutex))
+ {
+ if (retried)
+ (void) mutex_unlock (&list_lock);
+ THREAD_STAT (++(a->stat_lock_loop));
+ tsd_setspecific (arena_key, (void *) a);
+ return a;
+ }
+ a = a->next;
}
- a = a->next;
- } while(a != a_tsd);
+ while (a != a_tsd);
/* If not even the list_lock can be obtained, try again. This can
happen during `atfork', or for example on systems where thread
creation makes it temporarily impossible to obtain _any_
- locks. */
- if(!retried && mutex_trylock(&list_lock)) {
- /* We will block to not run in a busy loop. */
- (void)mutex_lock(&list_lock);
+ locks. */
+ if (!retried && mutex_trylock (&list_lock))
+ {
+ /* We will block to not run in a busy loop. */
+ (void) mutex_lock (&list_lock);
- /* Since we blocked there might be an arena available now. */
- retried = true;
- a = a_tsd;
- goto repeat;
- }
+ /* Since we blocked there might be an arena available now. */
+ retried = true;
+ a = a_tsd;
+ goto repeat;
+ }
/* Nothing immediately available, so generate a new arena. */
- a = _int_new_arena(size);
- (void)mutex_unlock(&list_lock);
+ a = _int_new_arena (size);
+ (void) mutex_unlock (&list_lock);
#endif
return a;
@@ -919,17 +954,18 @@ static void __attribute__ ((section ("__libc_thread_freeres_fn")))
arena_thread_freeres (void)
{
void *vptr = NULL;
- mstate a = tsd_getspecific(arena_key, vptr);
- tsd_setspecific(arena_key, NULL);
+ mstate a = tsd_getspecific (arena_key, vptr);
+ tsd_setspecific (arena_key, NULL);
if (a != NULL)
{
- (void)mutex_lock(&list_lock);
+ (void) mutex_lock (&list_lock);
a->next_free = free_list;
free_list = a;
- (void)mutex_unlock(&list_lock);
+ (void) mutex_unlock (&list_lock);
}
}
+
text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
#endif