+2016-09-06 Florian Weimer <fweimer@redhat.com>
+
+ Convert malloc to __libc_lock. Automated part, using this Perl
+ s/// command:
+ s/(?:\(void\)\s*)?mutex_((?:|un|try)lock|init)
+ \s*\(\&([^\)]+)\)/__libc_lock_$1\ ($2)/gx;
+ * malloc/malloc.c, malloc/arena.c, malloc/hooks.c: Perform
+ conversion.
+
2016-09-05 Aurelien Jarno <aurelien@aurel32.net>
* conform/Makefile (conformtest-header-tests): Pass -I. to $(PERL).
#define arena_lock(ptr, size) do { \
if (ptr && !arena_is_corrupt (ptr)) \
- (void) mutex_lock (&ptr->mutex); \
+ __libc_lock_lock (ptr->mutex); \
else \
ptr = arena_get2 ((size), NULL); \
} while (0)
/* We do not acquire free_list_lock here because we completely
reconstruct free_list in __malloc_fork_unlock_child. */
- (void) mutex_lock (&list_lock);
+ __libc_lock_lock (list_lock);
for (mstate ar_ptr = &main_arena;; )
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
for (mstate ar_ptr = &main_arena;; )
{
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
}
- (void) mutex_unlock (&list_lock);
+ __libc_lock_unlock (list_lock);
}
void
/* Push all arenas to the free list, except thread_arena, which is
attached to the current thread. */
- mutex_init (&free_list_lock);
+ __libc_lock_init (free_list_lock);
if (thread_arena != NULL)
thread_arena->attached_threads = 1;
free_list = NULL;
for (mstate ar_ptr = &main_arena;; )
{
- mutex_init (&ar_ptr->mutex);
+ __libc_lock_init (ar_ptr->mutex);
if (ar_ptr != thread_arena)
{
/* This arena is no longer attached to any thread. */
break;
}
- mutex_init (&list_lock);
+ __libc_lock_init (list_lock);
}
/* Initialization routine. */
LIBC_PROBE (memory_arena_new, 2, a, size);
mstate replaced_arena = thread_arena;
thread_arena = a;
- mutex_init (&a->mutex);
+ __libc_lock_init (a->mutex);
- (void) mutex_lock (&list_lock);
+ __libc_lock_lock (list_lock);
/* Add the new arena to the global list. */
a->next = main_arena.next;
atomic_write_barrier ();
main_arena.next = a;
- (void) mutex_unlock (&list_lock);
+ __libc_lock_unlock (list_lock);
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
detach_arena (replaced_arena);
- (void) mutex_unlock (&free_list_lock);
+ __libc_lock_unlock (free_list_lock);
/* Lock this arena. NB: Another thread may have been attached to
this arena because the arena is now accessible from the
but this could result in a deadlock with
__malloc_fork_lock_parent. */
- (void) mutex_lock (&a->mutex);
+ __libc_lock_lock (a->mutex);
return a;
}
mstate result = free_list;
if (result != NULL)
{
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
result = free_list;
if (result != NULL)
{
detach_arena (replaced_arena);
}
- (void) mutex_unlock (&free_list_lock);
+ __libc_lock_unlock (free_list_lock);
if (result != NULL)
{
LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
- (void) mutex_lock (&result->mutex);
+ __libc_lock_lock (result->mutex);
thread_arena = result;
}
}
result = next_to_use;
do
{
- if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
+ if (!arena_is_corrupt (result) && !__libc_lock_trylock (result->mutex))
goto out;
/* FIXME: This is a data race, see _int_new_arena. */
/* No arena available without contention. Wait for the next in line. */
LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
- (void) mutex_lock (&result->mutex);
+ __libc_lock_lock (result->mutex);
out:
/* Attach the arena to the current thread. */
{
/* Update the arena thread attachment counters. */
mstate replaced_arena = thread_arena;
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
detach_arena (replaced_arena);
/* We may have picked up an arena on the free list. We need to
++result->attached_threads;
- (void) mutex_unlock (&free_list_lock);
+ __libc_lock_unlock (free_list_lock);
}
LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
if (ar_ptr != &main_arena)
{
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
/* Don't touch the main arena if it is corrupt. */
if (arena_is_corrupt (&main_arena))
return NULL;
ar_ptr = &main_arena;
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
}
else
{
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = arena_get2 (bytes, ar_ptr);
}
if (a != NULL)
{
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
/* If this was the last attached thread for this arena, put the
arena on the free list. */
assert (a->attached_threads > 0);
a->next_free = free_list;
free_list = a;
}
- (void) mutex_unlock (&free_list_lock);
+ __libc_lock_unlock (free_list_lock);
}
}
text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
return NULL;
}
- (void) mutex_lock (&main_arena.mutex);
+ __libc_lock_lock (main_arena.mutex);
victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
return mem2mem_check (victim, sz);
}
if (!mem)
return;
- (void) mutex_lock (&main_arena.mutex);
+ __libc_lock_lock (main_arena.mutex);
p = mem2chunk_check (mem, NULL);
if (!p)
{
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
malloc_printerr (check_action, "free(): invalid pointer", mem,
&main_arena);
}
if (chunk_is_mmapped (p))
{
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
munmap_chunk (p);
return;
}
_int_free (&main_arena, p, 1);
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
}
static void *
free_check (oldmem, NULL);
return NULL;
}
- (void) mutex_lock (&main_arena.mutex);
+ __libc_lock_lock (main_arena.mutex);
const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
if (!oldp)
{
malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
const INTERNAL_SIZE_T oldsize = chunksize (oldp);
checked_request2size (bytes + 1, nb);
- (void) mutex_lock (&main_arena.mutex);
+ __libc_lock_lock (main_arena.mutex);
if (chunk_is_mmapped (oldp))
{
if (newmem == NULL)
*magic_p ^= 0xFF;
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
return mem2mem_check (newmem, bytes);
}
alignment = a;
}
- (void) mutex_lock (&main_arena.mutex);
+ __libc_lock_lock (main_arena.mutex);
mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
NULL;
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
return mem2mem_check (mem, bytes);
}
if (!ms)
return 0;
- (void) mutex_lock (&main_arena.mutex);
+ __libc_lock_lock (main_arena.mutex);
malloc_consolidate (&main_arena);
ms->magic = MALLOC_STATE_MAGIC;
ms->version = MALLOC_STATE_VERSION;
ms->arena_test = mp_.arena_test;
ms->arena_max = mp_.arena_max;
ms->narenas = narenas;
- (void) mutex_unlock (&main_arena.mutex);
+ __libc_lock_unlock (main_arena.mutex);
return (void *) ms;
}
}
if (ar_ptr != NULL)
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
return newmem;
}
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
ar_ptr == arena_for_chunk (mem2chunk (newp)));
}
if (ar_ptr != NULL)
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
ar_ptr == arena_for_chunk (mem2chunk (p)));
}
if (av != NULL)
- (void) mutex_unlock (&av->mutex);
+ __libc_lock_unlock (av->mutex);
/* Allocation failed even after a retry. */
if (mem == 0)
errstr = "free(): invalid pointer";
errout:
if (!have_lock && locked)
- (void) mutex_unlock (&av->mutex);
+ __libc_lock_unlock (av->mutex);
malloc_printerr (check_action, errstr, chunk2mem (p), av);
return;
}
after getting the lock. */
if (have_lock
|| ({ assert (locked == 0);
- mutex_lock(&av->mutex);
+ __libc_lock_lock (av->mutex);
locked = 1;
chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem;
}
if (! have_lock)
{
- (void)mutex_unlock(&av->mutex);
+ __libc_lock_unlock (av->mutex);
locked = 0;
}
}
else if (!chunk_is_mmapped(p)) {
if (! have_lock) {
- (void)mutex_lock(&av->mutex);
+ __libc_lock_lock (av->mutex);
locked = 1;
}
if (! have_lock) {
assert (locked);
- (void)mutex_unlock(&av->mutex);
+ __libc_lock_unlock (av->mutex);
}
}
/*
mstate ar_ptr = &main_arena;
do
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
result |= mtrim (ar_ptr, s);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
}
ar_ptr = &main_arena;
do
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
int_mallinfo (ar_ptr, &m);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
}
struct mallinfo mi;
memset (&mi, 0, sizeof (mi));
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
int_mallinfo (ar_ptr, &mi);
fprintf (stderr, "Arena %d:\n", i);
fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
#endif
system_b += mi.arena;
in_use_b += mi.uordblks;
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
if (__malloc_initialized < 0)
ptmalloc_init ();
- (void) mutex_lock (&av->mutex);
+ __libc_lock_lock (av->mutex);
/* Ensure initialization/consolidation */
malloc_consolidate (av);
}
break;
}
- (void) mutex_unlock (&av->mutex);
+ __libc_lock_unlock (av->mutex);
return res;
}
libc_hidden_def (__libc_mallopt)
} sizes[NFASTBINS + NBINS - 1];
#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
- mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
for (size_t i = 0; i < NFASTBINS; ++i)
{
avail += sizes[NFASTBINS - 1 + i].total;
}
- mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
total_nfastblocks += nfastblocks;
total_fastavail += fastavail;