This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH] malloc: introduce get_backup_arena()
- From: Joern Engel <joern at purestorage dot com>
- To: "GNU C. Library" <libc-alpha at sourceware dot org>
- Cc: Siddhesh Poyarekar <siddhesh dot poyarekar at gmail dot com>, Joern Engel <joern at purestorage dot org>
- Date: Mon, 25 Jan 2016 16:24:51 -0800
- Subject: [PATCH] malloc: introduce get_backup_arena()
- Authentication-results: sourceware.org; auth=none
- References: <1453767942-19369-1-git-send-email-joern at purestorage dot com>
From: Joern Engel <joern@purestorage.org>
Removes a lot of duplicate code. Not all copies were identical and I
believe some were somewhat buggy. Then again, this code is very
unlikely to run at all, so those bugs were equally unlikely to matter in
practice.
JIRA: PURE-27597
---
tpc/malloc2.13/malloc.c | 124 ++++++++++++++----------------------------------
1 file changed, 35 insertions(+), 89 deletions(-)
diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index 28d9d902b7ec..7c94a8cefcac 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -3396,6 +3396,20 @@ mremap_chunk(mchunkptr p, size_t new_size)
#endif /* HAVE_MREMAP */
+static struct malloc_state *get_backup_arena(struct malloc_state *ar_ptr, size_t bytes)
+{
+ if (ar_ptr != &main_arena) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ (void)mutex_unlock(&ar_ptr->mutex);
+ ar_ptr = &main_arena;
+ (void)mutex_lock(&ar_ptr->mutex);
+ } else {
+ /* ... or sbrk() has failed and there is still a chance to mmap() */
+ ar_ptr = arena_get2(ar_ptr, bytes);
+ (void)mutex_unlock(&main_arena.mutex);
+ }
+ return ar_ptr;
+}
/*------------------------ Public wrappers. --------------------------------*/
@@ -3409,30 +3423,15 @@ Void_t *public_mALLOc(size_t bytes)
if (__builtin_expect(hook != NULL, 0))
return (*hook) (bytes, RETURN_ADDRESS(0));
- arena_lookup(ar_ptr);
- arena_lock(ar_ptr, bytes);
+ arena_get(ar_ptr, bytes);
if (!ar_ptr)
return 0;
victim = _int_malloc(ar_ptr, bytes);
if (!victim) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if (ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
- victim = _int_malloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
- (void)mutex_unlock(&main_arena.mutex);
- if (ar_ptr) {
- victim = _int_malloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
- }
- } else
- (void)mutex_unlock(&ar_ptr->mutex);
+ ar_ptr = get_backup_arena(ar_ptr, bytes);
+ victim = _int_malloc(ar_ptr, bytes);
+ }
+ (void)mutex_unlock(&ar_ptr->mutex);
assert(!victim || chunk_is_mmapped(mem2chunk(victim)) || ar_ptr == arena_for_chunk(mem2chunk(victim)));
return victim;
}
@@ -3618,25 +3617,10 @@ Void_t *public_mEMALIGn(size_t alignment, size_t bytes)
return 0;
p = _int_memalign(ar_ptr, alignment, bytes);
if (!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if (ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
- p = _int_memalign(ar_ptr, alignment, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- struct malloc_state *prev = ar_ptr->next ? ar_ptr : 0;
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = arena_get2(prev, bytes);
- if (ar_ptr) {
- p = _int_memalign(ar_ptr, alignment, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
- }
- } else
- (void)mutex_unlock(&ar_ptr->mutex);
+ ar_ptr = get_backup_arena(ar_ptr, bytes);
+ p = _int_memalign(ar_ptr, alignment, bytes);
+ }
+ (void)mutex_unlock(&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
}
@@ -3661,21 +3645,10 @@ Void_t *public_vALLOc(size_t bytes)
p = _int_valloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
if (!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if (ar_ptr != &main_arena) {
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
- p = _int_memalign(ar_ptr, pagesz, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
- if (ar_ptr) {
- p = _int_memalign(ar_ptr, pagesz, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
- }
+ ar_ptr = get_backup_arena(ar_ptr, bytes);
+ p = _int_memalign(ar_ptr, pagesz, bytes);
}
+ (void)mutex_unlock(&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
@@ -3701,21 +3674,10 @@ Void_t *public_pVALLOc(size_t bytes)
p = _int_pvalloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
if (!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if (ar_ptr != &main_arena) {
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
- p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes + 2 * pagesz + MINSIZE);
- if (ar_ptr) {
- p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
- }
+ ar_ptr = get_backup_arena(ar_ptr, bytes);
+ p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
}
+ (void)mutex_unlock(&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
@@ -3780,31 +3742,15 @@ Void_t *public_cALLOc(size_t n, size_t elem_size)
}
#endif
mem = _int_malloc(av, sz);
-
- /* Only clearing follows, so we can unlock early. */
+ if (mem == 0) {
+ av = get_backup_arena(av, bytes);
+ mem = _int_malloc(&main_arena, sz);
+ }
(void)mutex_unlock(&av->mutex);
assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || av == arena_for_chunk(mem2chunk(mem)));
-
- if (mem == 0) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if (av != &main_arena) {
- (void)mutex_lock(&main_arena.mutex);
- mem = _int_malloc(&main_arena, sz);
- (void)mutex_unlock(&main_arena.mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- (void)mutex_lock(&main_arena.mutex);
- av = arena_get2(av->next ? av : 0, sz);
- (void)mutex_unlock(&main_arena.mutex);
- if (av) {
- mem = _int_malloc(av, sz);
- (void)mutex_unlock(&av->mutex);
- }
- }
- if (mem == 0)
- return 0;
- }
+ if (mem == 0)
+ return 0;
p = mem2chunk(mem);
/* Two optional cases in which clearing not necessary */
--
2.7.0.rc3