This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
Re: [PATCH] Cleanup code duplication in malloc on fallback to useanother arena
- From: Siddhesh Poyarekar <siddhesh at redhat dot com>
- To: Alexandre Oliva <aoliva at redhat dot com>
- Cc: Andreas Schwab <schwab at linux-m68k dot org>, libc-alpha at sourceware dot org
- Date: Wed, 5 Sep 2012 16:29:36 +0530
- Subject: Re: [PATCH] Cleanup code duplication in malloc on fallback to useanother arena
- References: <20120830145849.7d2fdf04@spoyarek><m24nnku33g.fsf@igel.home><20120830154412.5559f6d6@spoyarek><or392x5up6.fsf@livre.localdomain>
On Tue, 04 Sep 2012 18:44:53 -0300, Alexandre wrote:
> I'd have preferred to place this in arena.c instead, where other arena
> selection code is. Would you consider doing that along with the
> following fix?
OK, I guess arena.c is the more suitable place for this function.
> Of these 3 functions, that impose additional alignment requirements,
> only the last one is passing to get_another_arena the same size passed
> to arena_get. I think this is a mistake (cut&pasto?)
That's an interesting observation. It may not make a difference when a
thread is already associated with an arena, but when a new arena is
created, this may result in two mprotect calls on the arena, once for
'bytes' and the second time for the aligned size. I'll verify this. If
it needs a change, then I think we could do it as a separate fix, since
this patch is just intended to be a cleanup.
> Now, how about renaming get_another_arena to say arena_get_retry, or
> arena_get3 ;-) for greater similarity with the other functions that
> choose an arena?
I don't like arena_get2 as a function name, so I'll hate arena_get3
even more. arena_get_retry is good though :)
I've attached the updated patch.
Regards,
Siddhesh
ChangeLog:
* malloc/arena.c (arena_get_retry): New function that gets
another arena for the caller to try its request on.
* malloc/malloc.c (__libc_malloc): Use get_another_arena if the
current arena cannot fulfil the request.
(__libc_memalign): Likewise.
(__libc_memalign): Likewise.
(__libc_pvalloc): Likewise.
(__libc_calloc): Likewise.
diff --git a/malloc/arena.c b/malloc/arena.c
index 9e5e332..a5a6713 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -918,6 +918,27 @@ arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
return a;
}
+/* If we don't have the main arena, then maybe the failure is due to running
+ out of mmapped areas, so we can try allocating on the main arena.
+ Otherwise, it is likely that sbrk() has failed and there is still a chance
+ to mmap(), so try one one of the other arenas. */
+static mstate
+arena_get_retry (mstate ar_ptr, size_t bytes)
+{
+ if(ar_ptr != &main_arena) {
+ (void)mutex_unlock(&ar_ptr->mutex);
+ ar_ptr = &main_arena;
+ (void)mutex_lock(&ar_ptr->mutex);
+ } else {
+ /* Grab ar_ptr->next prior to releasing its lock. */
+ mstate prev = ar_ptr->next ? ar_ptr : 0;
+ (void)mutex_unlock(&ar_ptr->mutex);
+ ar_ptr = arena_get2(prev, bytes, ar_ptr);
+ }
+
+ return ar_ptr;
+}
+
#ifdef PER_THREAD
static void __attribute__ ((section ("__libc_thread_freeres_fn")))
arena_thread_freeres (void)
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 0f1796c..ca27355 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2858,23 +2858,10 @@ __libc_malloc(size_t bytes)
return 0;
victim = _int_malloc(ar_ptr, bytes);
if(!victim) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry(ar_ptr, bytes);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap()
- Grab ar_ptr->next prior to releasing its lock. */
- mstate prev = ar_ptr->next ? ar_ptr : 0;
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = arena_get2(prev, bytes, ar_ptr);
- if(ar_ptr) {
- victim = _int_malloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
} else
(void)mutex_unlock(&ar_ptr->mutex);
@@ -3038,23 +3025,10 @@ __libc_memalign(size_t alignment, size_t bytes)
return 0;
p = _int_memalign(ar_ptr, alignment, bytes);
if(!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry (ar_ptr, bytes);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, alignment, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap()
- Grab ar_ptr->next prior to releasing its lock. */
- mstate prev = ar_ptr->next ? ar_ptr : 0;
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = arena_get2(prev, bytes, ar_ptr);
- if(ar_ptr) {
- p = _int_memalign(ar_ptr, alignment, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
} else
(void)mutex_unlock(&ar_ptr->mutex);
@@ -3088,23 +3062,10 @@ __libc_valloc(size_t bytes)
return 0;
p = _int_valloc(ar_ptr, bytes);
if(!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry (ar_ptr, bytes);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap()
- Grab ar_ptr->next prior to releasing its lock. */
- mstate prev = ar_ptr->next ? ar_ptr : 0;
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = arena_get2(prev, bytes, ar_ptr);
- if(ar_ptr) {
- p = _int_memalign(ar_ptr, pagesz, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
} else
(void)mutex_unlock (&ar_ptr->mutex);
@@ -3136,23 +3097,10 @@ __libc_pvalloc(size_t bytes)
arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes);
if(!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap()
- Grab ar_ptr->next prior to releasing its lock. */
- mstate prev = ar_ptr->next ? ar_ptr : 0;
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE, ar_ptr);
- if(ar_ptr) {
- p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
} else
(void)mutex_unlock(&ar_ptr->mutex);
@@ -3225,22 +3173,10 @@ __libc_calloc(size_t n, size_t elem_size)
av == arena_for_chunk(mem2chunk(mem)));
if (mem == 0) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(av != &main_arena) {
+ av = arena_get_retry (av, sz);
+ if (__builtin_expect(av != NULL, 1)) {
+ mem = _int_malloc(av, sz);
(void)mutex_unlock(&av->mutex);
- (void)mutex_lock(&main_arena.mutex);
- mem = _int_malloc(&main_arena, sz);
- (void)mutex_unlock(&main_arena.mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap()
- Grab av->next prior to releasing its lock. */
- mstate prev = av->next ? av : 0;
- (void)mutex_unlock(&av->mutex);
- av = arena_get2(prev, sz, av);
- if(av) {
- mem = _int_malloc(av, sz);
- (void)mutex_unlock(&av->mutex);
- }
}
if (mem == 0) return 0;
} else