This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH v2] malloc: Prevent arena free_list from turning cyclic [BZ #19048]
- From: Florian Weimer <fweimer at redhat dot com>
- To: Siddhesh Poyarekar <sid at reserved-bit dot com>, GNU C Library <libc-alpha at sourceware dot org>
- Date: Tue, 13 Oct 2015 13:31:27 +0200
- Subject: [PATCH v2] malloc: Prevent arena free_list from turning cyclic [BZ #19048]
- Authentication-results: sourceware.org; auth=none
- References: <5617E843 dot 3060504 at redhat dot com> <5618075F dot 105 at reserved-bit dot com>
On 10/09/2015 08:28 PM, Siddhesh Poyarekar wrote:
> The only time a thread ever switches arenas is when it fails to allocate
> on the earlier arena. Also, what's hidden in the implementation is that
> the switch always happens from the main arena to a non-main arena and
> never in any other condition. So you could consolidate the replacement
> logic to just arena_get2 where you do tsd_setspecific (arena_key, NULL)
> if avoid_arena is set and decrement the refcount on avoid_arena.
> There's no other place where you want to detach from an arena.
>
> It makes no sense to stick to the avoided arena anyway because we failed
> to allocate on that arena and if we don't find another arena either
> through a free list or reused_arena, we will try mmap and if even that
> fails, we fail allocation.
Thanks for your comments. The code is difficult to follow (and it does
not seem to do what some people expect it to do, it seems). But I do
not want to refactor it in a major way right now.
I believe I have addressed your comments in the attached patch. I would
really like to have a review of the pthread_atfork changes because these
bits are really tricky.
Florian
2015-10-13 Florian Weimer <fweimer@redhat.com>
[BZ# 19048]
* malloc/malloc.c (struct malloc_state): Update comment. Add
attached_threads member.
(main_arena): Initialize attached_threads.
* malloc/arena.c (list_lock): Update comment.
(ptmalloc_lock_all, ptmalloc_unlock_all): Likewise.
(ptmalloc_unlock_all2): Reinitialize arena reference counts.
(deattach_arena): New function.
(_int_new_arena): Initialize arena reference count and deattach
replaced arena.
(get_free_list, reused_arena): Update reference count and deattach
replaced arena.
(arena_thread_freeres): Update arena reference count and only put
unreferenced arenas on the free list.
diff --git a/malloc/arena.c b/malloc/arena.c
index cb45a04..5476fe8 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -64,7 +64,10 @@ extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
+ 2 * SIZE_SZ) % MALLOC_ALIGNMENT
? -1 : 1];
-/* Thread specific data */
+/* Thread specific data. list_lock protects the free_list variable
+ below, and the next_free and attached_threads members of the mstate
+ objects. No other (malloc) locks must be taken while list_lock is
+ active, otherwise deadlocks may occur. */
static tsd_key_t arena_key;
static mutex_t list_lock = MUTEX_INITIALIZER;
@@ -233,7 +236,10 @@ ptmalloc_lock_all (void)
save_free_hook = __free_hook;
__malloc_hook = malloc_atfork;
__free_hook = free_atfork;
- /* Only the current thread may perform malloc/free calls now. */
+ /* Only the current thread may perform malloc/free calls now.
+ save_arena will be reattached to the current thread, in
+ ptmalloc_lock_all, so save_arena->attached_threads is not
+ updated. */
tsd_getspecific (arena_key, save_arena);
tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
out:
@@ -251,6 +257,9 @@ ptmalloc_unlock_all (void)
if (--atfork_recursive_cntr != 0)
return;
+ /* Replace ATFORK_ARENA_PTR with save_arena.
+ save_arena->attached_threads was not changed in ptmalloc_lock_all
+ and is still correct. */
tsd_setspecific (arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
@@ -282,12 +291,19 @@ ptmalloc_unlock_all2 (void)
tsd_setspecific (arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
+
+ /* Push all arenas to the free list, except save_arena, which is
+ attached to the current thread. */
+ if (save_arena != NULL)
+ ((mstate) save_arena)->attached_threads = 1;
free_list = NULL;
for (ar_ptr = &main_arena;; )
{
mutex_init (&ar_ptr->mutex);
if (ar_ptr != save_arena)
{
+ /* This arena is no longer attached to any thread. */
+ ar_ptr->attached_threads = 0;
ar_ptr->next_free = free_list;
free_list = ar_ptr;
}
@@ -727,6 +743,22 @@ heap_trim (heap_info *heap, size_t pad)
/* Create a new arena with initial size "size". */
+/* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
+ called while list_lock is held. */
+static void
+detach_arena (mstate replaced_arena)
+{
+ if (replaced_arena != NULL)
+ {
+ assert (replaced_arena->attached_threads > 0);
+ /* The current implementation only detaches from main_arena in
+ case of allocation failure. This means that it is likely not
+ beneficial to put the arena on free_list even if the
+ reference count reaches zero. */
+ --replaced_arena->attached_threads;
+ }
+}
+
static mstate
_int_new_arena (size_t size)
{
@@ -748,6 +780,7 @@ _int_new_arena (size_t size)
}
a = h->ar_ptr = (mstate) (h + 1);
malloc_init_state (a);
+ a->attached_threads = 1;
/*a->next = NULL;*/
a->system_mem = a->max_system_mem = h->size;
arena_mem += h->size;
@@ -761,12 +794,16 @@ _int_new_arena (size_t size)
set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
LIBC_PROBE (memory_arena_new, 2, a, size);
+ void *vptr;
+ mstate replaced_arena = tsd_getspecific (arena_key, vptr);
tsd_setspecific (arena_key, (void *) a);
mutex_init (&a->mutex);
(void) mutex_lock (&a->mutex);
(void) mutex_lock (&list_lock);
+ detach_arena (replaced_arena);
+
/* Add the new arena to the global list. */
a->next = main_arena.next;
atomic_write_barrier ();
@@ -781,13 +818,25 @@ _int_new_arena (size_t size)
static mstate
get_free_list (void)
{
+ void *vptr;
+ mstate replaced_arena = tsd_getspecific (arena_key, vptr);
mstate result = free_list;
if (result != NULL)
{
(void) mutex_lock (&list_lock);
result = free_list;
if (result != NULL)
- free_list = result->next_free;
+ {
+ free_list = result->next_free;
+
+ /* Arenas on the free list are not attached to any thread. */
+ assert (result->attached_threads == 0);
+ /* But the arena will now be attached to this thread. */
+ result->attached_threads = 1;
+
+ if (result != NULL)
+ detach_arena (replaced_arena);
+ }
(void) mutex_unlock (&list_lock);
if (result != NULL)
@@ -846,6 +895,15 @@ reused_arena (mstate avoid_arena)
(void) mutex_lock (&result->mutex);
out:
+ {
+ void *vptr;
+ mstate replaced_arena = tsd_getspecific (arena_key, vptr);
+ (void) mutex_lock (&list_lock);
+ detach_arena (replaced_arena);
+ ++result->attached_threads;
+ (void) mutex_unlock (&list_lock);
+ }
+
LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
tsd_setspecific (arena_key, (void *) result);
next_to_use = result->next;
@@ -941,8 +999,14 @@ arena_thread_freeres (void)
if (a != NULL)
{
(void) mutex_lock (&list_lock);
- a->next_free = free_list;
- free_list = a;
+ /* If this was the last attached thread for this arena, put the
+ arena on the free list. */
+ assert (a->attached_threads > 0);
+ if (--a->attached_threads == 0)
+ {
+ a->next_free = free_list;
+ free_list = a;
+ }
(void) mutex_unlock (&list_lock);
}
}
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 0eca9ce..051c463 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1709,9 +1709,15 @@ struct malloc_state
/* Linked list */
struct malloc_state *next;
- /* Linked list for free arenas. */
+ /* Linked list for free arenas. Access to this field is serialized
+ by list_lock in arena.c. */
struct malloc_state *next_free;
+ /* Number of threads attached to this arena. 0 if the arena is on
+ the free list. Access to this field is serialized by list_lock
+ in arena.c. */
+ INTERNAL_SIZE_T attached_threads;
+
/* Memory allocated from the system in this arena. */
INTERNAL_SIZE_T system_mem;
INTERNAL_SIZE_T max_system_mem;
@@ -1755,7 +1761,8 @@ struct malloc_par
static struct malloc_state main_arena =
{
.mutex = MUTEX_INITIALIZER,
- .next = &main_arena
+ .next = &main_arena,
+ .attached_threads = 1
};
/* There is only one instance of the malloc parameters. */