This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH] malloc: Use __libc_lock interfaces directly


On Wed, Nov 25, 2015 at 03:31:33PM +0100, Florian Weimer wrote:
> This removes the mutex_t wrappers.  The benefit is that it is easier to
> introduce direct dependencies on lock implementation detailsâotherwise,
> I would have to provide __libc_lock definitions *and* mutex_t definitions.
> 
> On x86_64-redhat-linux-gnu, the only assembly changes are due to moved
> line numbers in asserts.
> 
> Florian

> 2015-11-25  Florian Weimer  <fweimer@redhat.com>
> 
> 	* malloc/arena.c: Remove NO_THREADS references.
> 	(list_lock): Define using __libc_lock_define_initialized.
> 	(arena_get2): Replace mutex_lock with __libc_lock_lock.
> 	(malloc_atfork): Likewise.  Replace mutex_unlock with
> 	__libc_lock_unlock.
> 	(ptmalloc_lock_all): Replace mutex_lock with __libc_lock_lock,
> 	and mutex_trylock with __libc_lock_trylock.
> 	(ptmalloc_unlock_all): Replace mutex_unlock with
> 	__libc_lock_unlock.
> 	(ptmalloc_unlock_all2): Replace mutex_init with __libc_lock_init.
> 	(_int_new_arena): Likewise.  Replace mutex_lock with
> 	__libc_lock_lock, and mutex_unlock with __libc_lock_unlock.
> 	(get_free_list): Replace mutex_lock with
> 	__libc_lock_lock, and mutex_unlock with __libc_lock_unlock.
> 	(reused_arena): Replace mutex_trylock with __libc_lock_trylock,
> 	mutex_lock with __libc_lock_lock, and mutex_unlock with
> 	__libc_lock_unlock.
> 	(arena_get_retry): Replace mutex_unlock with __libc_lock_unlock,
> 	and mutex_lock with __libc_lock_lock.
> 	(arena_thread_freeres): Likewise.
> 	* malloc/hooks.c (malloc_check, free_check, realloc_check)
> 	(memalign_check, __malloc_get_state, __malloc_set_state):
> 	Likewise.
> 	* malloc/malloc.c (malloc_atfork, free_atfork): Declare
> 	uncodintionally.
> 	(struct malloc_state): Define mutex using __libc_lock_define.
> 	(__libc_malloc): Replace mutex_unlock with __libc_lock_unlock.
> 	(__libc_realloc): Likewise.  Replace mutex_lock with
> 	__libc_lock_lock.
> 	(_mid_memalign, __libc_calloc): Replace mutex_unlock with
> 	__libc_lock_unlock.
> 	(_int_free, __malloc_trim, __libc_mallinfo, __malloc_stats)
> 	(__libc_mallopt, __malloc_info): Likewise.  Replace mutex_lock
> 	with __libc_lock_lock.
> 	* sysdeps/generic/malloc-machine.h [!defined (mutex_init)]
> 	(mutex_t, mutex_init, mutex_lock, mutex_trylock, mutex_unlock):
> 	Remove dummy declarations.
> 	(atomic_full_barrier, atomic_read_barrier, atomic_write_barrier):
> 	Remove dummy definitions.
> 	* sysdeps/mach/hurd/malloc-machine.h (mutex_t, mutex_lock)
> 	(mutex_unlock, mutex_trylock): Remove declarations.
> 	(__pthread_initialize): Remove dummy definition.
> 	* sysdeps/nptl/malloc-machine.h (mutex_t, mutex_lock)
> 	(mutex_unlock, mutex_trylock): Remove declarations.
> 
> diff --git a/malloc/arena.c b/malloc/arena.c
> index 3dab7bb..b4c9fdb 100644
> --- a/malloc/arena.c
> +++ b/malloc/arena.c
> @@ -73,11 +73,11 @@ static __thread mstate thread_arena attribute_tls_model_ie;
>     objects.  No other (malloc) locks must be taken while list_lock is
>     active, otherwise deadlocks may occur.  */
>  
> -static mutex_t list_lock = _LIBC_LOCK_INITIALIZER;
> +__libc_lock_define_initialized (static, list_lock);
>  static size_t narenas = 1;
>  static mstate free_list;
>  
> -/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
> +/* Mapped memory in non-main arenas (unreliable due to data races). */
>  static unsigned long arena_mem;
>  
>  /* Already initialized? */
> @@ -101,7 +101,7 @@ int __malloc_initialized = -1;
>  
>  #define arena_lock(ptr, size) do {					      \
>        if (ptr && !arena_is_corrupt (ptr))				      \
> -        (void) mutex_lock (&ptr->mutex);				      \
> +        __libc_lock_lock (ptr->mutex);					      \
>        else								      \
>          ptr = arena_get2 ((size), NULL);				      \
>    } while (0)
> @@ -116,8 +116,6 @@ int __malloc_initialized = -1;
>  
>  /**************************************************************************/
>  
> -#ifndef NO_THREADS
> -

NO_THREADS reference removal should be a separate patch.  It is a distinct cleanup.

>  /* atfork support.  */
>  
>  static void *(*save_malloc_hook)(size_t __size, const void *);
> @@ -162,8 +160,8 @@ malloc_atfork (size_t sz, const void *caller)
>        /* Suspend the thread until the `atfork' handlers have completed.
>           By that time, the hooks will have been reset as well, so that
>           mALLOc() can be used again. */
> -      (void) mutex_lock (&list_lock);
> -      (void) mutex_unlock (&list_lock);
> +      __libc_lock_lock (list_lock);
> +      __libc_lock_unlock (list_lock);
>        return __libc_malloc (sz);
>      }
>  }
> @@ -207,7 +205,7 @@ ptmalloc_lock_all (void)
>    if (__malloc_initialized < 1)
>      return;
>  
> -  if (mutex_trylock (&list_lock))
> +  if (__libc_lock_trylock (list_lock))
>      {
>        if (thread_arena == ATFORK_ARENA_PTR)
>          /* This is the same thread which already locks the global list.
> @@ -215,11 +213,11 @@ ptmalloc_lock_all (void)
>          goto out;
>  
>        /* This thread has to wait its turn.  */
> -      (void) mutex_lock (&list_lock);
> +      __libc_lock_lock (list_lock);
>      }
>    for (ar_ptr = &main_arena;; )
>      {
> -      (void) mutex_lock (&ar_ptr->mutex);
> +      __libc_lock_lock (ar_ptr->mutex);
>        ar_ptr = ar_ptr->next;
>        if (ar_ptr == &main_arena)
>          break;
> @@ -257,12 +255,12 @@ ptmalloc_unlock_all (void)
>    __free_hook = save_free_hook;
>    for (ar_ptr = &main_arena;; )
>      {
> -      (void) mutex_unlock (&ar_ptr->mutex);
> +      __libc_lock_unlock (ar_ptr->mutex);
>        ar_ptr = ar_ptr->next;
>        if (ar_ptr == &main_arena)
>          break;
>      }
> -  (void) mutex_unlock (&list_lock);
> +  __libc_lock_unlock (list_lock);
>  }
>  
>  # ifdef __linux__
> @@ -291,7 +289,7 @@ ptmalloc_unlock_all2 (void)
>    free_list = NULL;
>    for (ar_ptr = &main_arena;; )
>      {
> -      mutex_init (&ar_ptr->mutex);
> +      __libc_lock_init (ar_ptr->mutex);
>        if (ar_ptr != save_arena)
>          {
>  	  /* This arena is no longer attached to any thread.  */
> @@ -303,7 +301,7 @@ ptmalloc_unlock_all2 (void)
>        if (ar_ptr == &main_arena)
>          break;
>      }
> -  mutex_init (&list_lock);
> +  __libc_lock_init (list_lock);
>    atfork_recursive_cntr = 0;
>  }
>  
> @@ -311,7 +309,6 @@ ptmalloc_unlock_all2 (void)
>  
>  #  define ptmalloc_unlock_all2 ptmalloc_unlock_all
>  # endif
> -#endif  /* !NO_THREADS */
>  
>  /* Initialization routine. */
>  #include <string.h>
> @@ -787,10 +784,10 @@ _int_new_arena (size_t size)
>    LIBC_PROBE (memory_arena_new, 2, a, size);
>    mstate replaced_arena = thread_arena;
>    thread_arena = a;
> -  mutex_init (&a->mutex);
> -  (void) mutex_lock (&a->mutex);
> +  __libc_lock_init (a->mutex);
> +  __libc_lock_lock (a->mutex);
>  
> -  (void) mutex_lock (&list_lock);
> +  __libc_lock_lock (list_lock);
>  
>    detach_arena (replaced_arena);
>  
> @@ -799,7 +796,7 @@ _int_new_arena (size_t size)
>    atomic_write_barrier ();
>    main_arena.next = a;
>  
> -  (void) mutex_unlock (&list_lock);
> +  __libc_lock_unlock (list_lock);
>  
>    return a;
>  }
> @@ -812,7 +809,7 @@ get_free_list (void)
>    mstate result = free_list;
>    if (result != NULL)
>      {
> -      (void) mutex_lock (&list_lock);
> +      __libc_lock_lock (list_lock);
>        result = free_list;
>        if (result != NULL)
>  	{
> @@ -825,12 +822,12 @@ get_free_list (void)
>  
>  	  detach_arena (replaced_arena);
>  	}
> -      (void) mutex_unlock (&list_lock);
> +      __libc_lock_unlock (list_lock);
>  
>        if (result != NULL)
>          {
>            LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
> -          (void) mutex_lock (&result->mutex);
> +          __libc_lock_lock (result->mutex);
>  	  thread_arena = result;
>          }
>      }
> @@ -852,7 +849,7 @@ reused_arena (mstate avoid_arena)
>    result = next_to_use;
>    do
>      {
> -      if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
> +      if (!arena_is_corrupt (result) && !__libc_lock_trylock (result->mutex))
>          goto out;
>  
>        result = result->next;
> @@ -880,15 +877,15 @@ reused_arena (mstate avoid_arena)
>  
>    /* No arena available without contention.  Wait for the next in line.  */
>    LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
> -  (void) mutex_lock (&result->mutex);
> +  __libc_lock_lock (result->mutex);
>  
>  out:
>    {
>      mstate replaced_arena = thread_arena;
> -    (void) mutex_lock (&list_lock);
> +    __libc_lock_lock (list_lock);
>      detach_arena (replaced_arena);
>      ++result->attached_threads;
> -    (void) mutex_unlock (&list_lock);
> +    __libc_lock_unlock (list_lock);
>    }
>  
>    LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
> @@ -959,17 +956,17 @@ arena_get_retry (mstate ar_ptr, size_t bytes)
>    LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
>    if (ar_ptr != &main_arena)
>      {
> -      (void) mutex_unlock (&ar_ptr->mutex);
> +      __libc_lock_unlock (ar_ptr->mutex);
>        /* Don't touch the main arena if it is corrupt.  */
>        if (arena_is_corrupt (&main_arena))
>  	return NULL;
>  
>        ar_ptr = &main_arena;
> -      (void) mutex_lock (&ar_ptr->mutex);
> +      __libc_lock_lock (ar_ptr->mutex);
>      }
>    else
>      {
> -      (void) mutex_unlock (&ar_ptr->mutex);
> +      __libc_lock_unlock (ar_ptr->mutex);
>        ar_ptr = arena_get2 (bytes, ar_ptr);
>      }
>  
> @@ -984,7 +981,7 @@ arena_thread_freeres (void)
>  
>    if (a != NULL)
>      {
> -      (void) mutex_lock (&list_lock);
> +      __libc_lock_lock (list_lock);
>        /* If this was the last attached thread for this arena, put the
>  	 arena on the free list.  */
>        assert (a->attached_threads > 0);
> @@ -993,7 +990,7 @@ arena_thread_freeres (void)
>  	  a->next_free = free_list;
>  	  free_list = a;
>  	}
> -      (void) mutex_unlock (&list_lock);
> +      __libc_lock_unlock (list_lock);
>      }
>  }
>  text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
> diff --git a/malloc/hooks.c b/malloc/hooks.c
> index ec79f0a..5abf705 100644
> --- a/malloc/hooks.c
> +++ b/malloc/hooks.c
> @@ -291,9 +291,9 @@ malloc_check (size_t sz, const void *caller)
>        return NULL;
>      }
>  
> -  (void) mutex_lock (&main_arena.mutex);
> +  (void) __libc_lock_lock (main_arena.mutex);
>    victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
> -  (void) mutex_unlock (&main_arena.mutex);
> +  (void) __libc_lock_unlock (main_arena.mutex);
>    return mem2mem_check (victim, sz);
>  }
>  
> @@ -305,11 +305,11 @@ free_check (void *mem, const void *caller)
>    if (!mem)
>      return;
>  
> -  (void) mutex_lock (&main_arena.mutex);
> +  (void) __libc_lock_lock (main_arena.mutex);
>    p = mem2chunk_check (mem, NULL);
>    if (!p)
>      {
> -      (void) mutex_unlock (&main_arena.mutex);
> +      (void) __libc_lock_unlock (main_arena.mutex);
>  
>        malloc_printerr (check_action, "free(): invalid pointer", mem,
>  		       &main_arena);
> @@ -317,12 +317,12 @@ free_check (void *mem, const void *caller)
>      }
>    if (chunk_is_mmapped (p))
>      {
> -      (void) mutex_unlock (&main_arena.mutex);
> +      (void) __libc_lock_unlock (main_arena.mutex);
>        munmap_chunk (p);
>        return;
>      }
>    _int_free (&main_arena, p, 1);
> -  (void) mutex_unlock (&main_arena.mutex);
> +  (void) __libc_lock_unlock (main_arena.mutex);
>  }
>  
>  static void *
> @@ -345,9 +345,9 @@ realloc_check (void *oldmem, size_t bytes, const void *caller)
>        free_check (oldmem, NULL);
>        return NULL;
>      }
> -  (void) mutex_lock (&main_arena.mutex);
> +  (void) __libc_lock_lock (main_arena.mutex);
>    const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
> -  (void) mutex_unlock (&main_arena.mutex);
> +  (void) __libc_lock_unlock (main_arena.mutex);
>    if (!oldp)
>      {
>        malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
> @@ -357,7 +357,7 @@ realloc_check (void *oldmem, size_t bytes, const void *caller)
>    const INTERNAL_SIZE_T oldsize = chunksize (oldp);
>  
>    checked_request2size (bytes + 1, nb);
> -  (void) mutex_lock (&main_arena.mutex);
> +  (void) __libc_lock_lock (main_arena.mutex);
>  
>    if (chunk_is_mmapped (oldp))
>      {
> @@ -400,7 +400,7 @@ realloc_check (void *oldmem, size_t bytes, const void *caller)
>    if (newmem == NULL)
>      *magic_p ^= 0xFF;
>  
> -  (void) mutex_unlock (&main_arena.mutex);
> +  (void) __libc_lock_unlock (main_arena.mutex);
>  
>    return mem2mem_check (newmem, bytes);
>  }
> @@ -440,10 +440,10 @@ memalign_check (size_t alignment, size_t bytes, const void *caller)
>        alignment = a;
>      }
>  
> -  (void) mutex_lock (&main_arena.mutex);
> +  (void) __libc_lock_lock (main_arena.mutex);
>    mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
>          NULL;
> -  (void) mutex_unlock (&main_arena.mutex);
> +  (void) __libc_lock_unlock (main_arena.mutex);
>    return mem2mem_check (mem, bytes);
>  }
>  
> @@ -503,7 +503,7 @@ __malloc_get_state (void)
>    if (!ms)
>      return 0;
>  
> -  (void) mutex_lock (&main_arena.mutex);
> +  (void) __libc_lock_lock (main_arena.mutex);
>    malloc_consolidate (&main_arena);
>    ms->magic = MALLOC_STATE_MAGIC;
>    ms->version = MALLOC_STATE_VERSION;
> @@ -540,7 +540,7 @@ __malloc_get_state (void)
>    ms->arena_test = mp_.arena_test;
>    ms->arena_max = mp_.arena_max;
>    ms->narenas = narenas;
> -  (void) mutex_unlock (&main_arena.mutex);
> +  (void) __libc_lock_unlock (main_arena.mutex);
>    return (void *) ms;
>  }
>  
> @@ -560,7 +560,7 @@ __malloc_set_state (void *msptr)
>    if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
>      return -2;
>  
> -  (void) mutex_lock (&main_arena.mutex);
> +  (void) __libc_lock_lock (main_arena.mutex);
>    /* There are no fastchunks.  */
>    clear_fastchunks (&main_arena);
>    if (ms->version >= 4)
> @@ -659,7 +659,7 @@ __malloc_set_state (void *msptr)
>      }
>    check_malloc_state (&main_arena);
>  
> -  (void) mutex_unlock (&main_arena.mutex);
> +  (void) __libc_lock_unlock (main_arena.mutex);
>    return 0;
>  }
>  
> diff --git a/malloc/malloc.c b/malloc/malloc.c
> index a030109..639f8ed 100644
> --- a/malloc/malloc.c
> +++ b/malloc/malloc.c
> @@ -1074,10 +1074,8 @@ static void*   realloc_check(void* oldmem, size_t bytes,
>  			       const void *caller);
>  static void*   memalign_check(size_t alignment, size_t bytes,
>  				const void *caller);
> -#ifndef NO_THREADS
>  static void*   malloc_atfork(size_t sz, const void *caller);
>  static void      free_atfork(void* mem, const void *caller);
> -#endif
>  
>  /* ------------------ MMAP support ------------------  */
>  
> @@ -1686,7 +1684,7 @@ typedef struct malloc_chunk *mfastbinptr;
>  struct malloc_state
>  {
>    /* Serialize access.  */
> -  mutex_t mutex;
> +  __libc_lock_define (, mutex);
>  
>    /* Flags (formerly in max_fast).  */
>    int flags;
> @@ -2922,7 +2920,7 @@ __libc_malloc (size_t bytes)
>      }
>  
>    if (ar_ptr != NULL)
> -    (void) mutex_unlock (&ar_ptr->mutex);
> +    __libc_lock_unlock (ar_ptr->mutex);
>  
>    assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
>            ar_ptr == arena_for_chunk (mem2chunk (victim)));
> @@ -3041,11 +3039,11 @@ __libc_realloc (void *oldmem, size_t bytes)
>        return newmem;
>      }
>  
> -  (void) mutex_lock (&ar_ptr->mutex);
> +  __libc_lock_lock (ar_ptr->mutex);
>  
>    newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
>  
> -  (void) mutex_unlock (&ar_ptr->mutex);
> +  __libc_lock_unlock (ar_ptr->mutex);
>    assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
>            ar_ptr == arena_for_chunk (mem2chunk (newp)));
>  
> @@ -3127,7 +3125,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
>      }
>  
>    if (ar_ptr != NULL)
> -    (void) mutex_unlock (&ar_ptr->mutex);
> +    __libc_lock_unlock (ar_ptr->mutex);
>  
>    assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
>            ar_ptr == arena_for_chunk (mem2chunk (p)));
> @@ -3248,7 +3246,7 @@ __libc_calloc (size_t n, size_t elem_size)
>      }
>  
>    if (av != NULL)
> -    (void) mutex_unlock (&av->mutex);
> +    __libc_lock_unlock (av->mutex);
>  
>    /* Allocation failed even after a retry.  */
>    if (mem == 0)
> @@ -3864,7 +3862,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
>        errstr = "free(): invalid pointer";
>      errout:
>        if (!have_lock && locked)
> -        (void) mutex_unlock (&av->mutex);
> +        __libc_lock_unlock (av->mutex);
>        malloc_printerr (check_action, errstr, chunk2mem (p), av);
>        return;
>      }
> @@ -3903,7 +3901,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
>  	   after getting the lock.  */
>  	if (have_lock
>  	    || ({ assert (locked == 0);
> -		  mutex_lock(&av->mutex);
> +		  __libc_lock_lock (av->mutex);
>  		  locked = 1;
>  		  chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
>  		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
> @@ -3914,7 +3912,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
>  	  }
>  	if (! have_lock)
>  	  {
> -	    (void)mutex_unlock(&av->mutex);
> +	    __libc_lock_unlock (av->mutex);
>  	    locked = 0;
>  	  }
>        }
> @@ -3960,7 +3958,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
>  
>    else if (!chunk_is_mmapped(p)) {
>      if (! have_lock) {
> -      (void)mutex_lock(&av->mutex);
> +      __libc_lock_lock (av->mutex);
>        locked = 1;
>      }
>  
> @@ -4093,7 +4091,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
>  
>      if (! have_lock) {
>        assert (locked);
> -      (void)mutex_unlock(&av->mutex);
> +      __libc_lock_unlock (av->mutex);
>      }
>    }
>    /*
> @@ -4560,9 +4558,9 @@ __malloc_trim (size_t s)
>    mstate ar_ptr = &main_arena;
>    do
>      {
> -      (void) mutex_lock (&ar_ptr->mutex);
> +      __libc_lock_lock (ar_ptr->mutex);
>        result |= mtrim (ar_ptr, s);
> -      (void) mutex_unlock (&ar_ptr->mutex);
> +      __libc_lock_unlock (ar_ptr->mutex);
>  
>        ar_ptr = ar_ptr->next;
>      }
> @@ -4686,9 +4684,9 @@ __libc_mallinfo (void)
>    ar_ptr = &main_arena;
>    do
>      {
> -      (void) mutex_lock (&ar_ptr->mutex);
> +      __libc_lock_lock (ar_ptr->mutex);
>        int_mallinfo (ar_ptr, &m);
> -      (void) mutex_unlock (&ar_ptr->mutex);
> +      __libc_lock_unlock (ar_ptr->mutex);
>  
>        ar_ptr = ar_ptr->next;
>      }
> @@ -4718,7 +4716,7 @@ __malloc_stats (void)
>        struct mallinfo mi;
>  
>        memset (&mi, 0, sizeof (mi));
> -      (void) mutex_lock (&ar_ptr->mutex);
> +      __libc_lock_lock (ar_ptr->mutex);
>        int_mallinfo (ar_ptr, &mi);
>        fprintf (stderr, "Arena %d:\n", i);
>        fprintf (stderr, "system bytes     = %10u\n", (unsigned int) mi.arena);
> @@ -4729,7 +4727,7 @@ __malloc_stats (void)
>  #endif
>        system_b += mi.arena;
>        in_use_b += mi.uordblks;
> -      (void) mutex_unlock (&ar_ptr->mutex);
> +      __libc_lock_unlock (ar_ptr->mutex);
>        ar_ptr = ar_ptr->next;
>        if (ar_ptr == &main_arena)
>          break;
> @@ -4757,7 +4755,7 @@ __libc_mallopt (int param_number, int value)
>  
>    if (__malloc_initialized < 0)
>      ptmalloc_init ();
> -  (void) mutex_lock (&av->mutex);
> +  __libc_lock_lock (av->mutex);
>    /* Ensure initialization/consolidation */
>    malloc_consolidate (av);
>  
> @@ -4835,7 +4833,7 @@ __libc_mallopt (int param_number, int value)
>          }
>        break;
>      }
> -  (void) mutex_unlock (&av->mutex);
> +  __libc_lock_unlock (av->mutex);
>    return res;
>  }
>  libc_hidden_def (__libc_mallopt)
> @@ -5082,7 +5080,7 @@ __malloc_info (int options, FILE *fp)
>        } sizes[NFASTBINS + NBINS - 1];
>  #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
>  
> -      mutex_lock (&ar_ptr->mutex);
> +      __libc_lock_lock (ar_ptr->mutex);
>  
>        for (size_t i = 0; i < NFASTBINS; ++i)
>  	{
> @@ -5141,7 +5139,7 @@ __malloc_info (int options, FILE *fp)
>  	  avail += sizes[NFASTBINS - 1 + i].total;
>  	}
>  
> -      mutex_unlock (&ar_ptr->mutex);
> +      __libc_lock_unlock (ar_ptr->mutex);
>  
>        total_nfastblocks += nfastblocks;
>        total_fastavail += fastavail;
> diff --git a/sysdeps/generic/malloc-machine.h b/sysdeps/generic/malloc-machine.h
> index 7b7eae8..1f564ef 100644
> --- a/sysdeps/generic/malloc-machine.h
> +++ b/sysdeps/generic/malloc-machine.h
> @@ -22,37 +22,6 @@
>  
>  #include <atomic.h>

This is not needed anymore.

>  
> -#ifndef mutex_init /* No threads, provide dummy macros */
> -
> -# define NO_THREADS
> -
> -/* The mutex functions used to do absolutely nothing, i.e. lock,
> -   trylock and unlock would always just return 0.  However, even
> -   without any concurrently active threads, a mutex can be used
> -   legitimately as an `in use' flag.  To make the code that is
> -   protected by a mutex async-signal safe, these macros would have to
> -   be based on atomic test-and-set operations, for example. */
> -typedef int mutex_t;
> -
> -# define mutex_init(m)          (*(m) = 0)
> -# define mutex_lock(m)          ({ *(m) = 1; 0; })
> -# define mutex_trylock(m)       (*(m) ? 1 : ((*(m) = 1), 0))
> -# define mutex_unlock(m)        (*(m) = 0)
> -
> -#endif /* !defined mutex_init */
> -
> -#ifndef atomic_full_barrier
> -# define atomic_full_barrier() __asm ("" ::: "memory")
> -#endif
> -
> -#ifndef atomic_read_barrier
> -# define atomic_read_barrier() atomic_full_barrier ()
> -#endif
> -
> -#ifndef atomic_write_barrier
> -# define atomic_write_barrier() atomic_full_barrier ()
> -#endif
> -
>  #ifndef DEFAULT_TOP_PAD
>  # define DEFAULT_TOP_PAD 131072
>  #endif

A related (separate) cleanup is to drop a similar construct from
malloc.c where it sets DEFAULT_TOP_PAD to 0.

> diff --git a/sysdeps/mach/hurd/malloc-machine.h b/sysdeps/mach/hurd/malloc-machine.h
> index 9221d1b..7c4a380 100644
> --- a/sysdeps/mach/hurd/malloc-machine.h
> +++ b/sysdeps/mach/hurd/malloc-machine.h
> @@ -25,33 +25,12 @@
>  #include <atomic.h>
>  #include <libc-lock.h>

I think they're not needed anymore.

>  
> -/* Assume hurd, with cthreads */
> -
> -/* Cthreads `mutex_t' is a pointer to a mutex, and malloc wants just the
> -   mutex itself.  */
> -#undef mutex_t
> -#define mutex_t struct mutex
> -
> -#undef mutex_init
> -#define mutex_init(m) ({ __mutex_init(m); 0; })
> -
> -#undef mutex_lock
> -#define mutex_lock(m) ({ __mutex_lock(m); 0; })
> -
> -#undef mutex_unlock
> -#define mutex_unlock(m) ({ __mutex_unlock(m); 0; })
> -
> -#define mutex_trylock(m) (!__mutex_trylock(m))
> -
>  #define thread_atfork(prepare, parent, child) do {} while(0)
>  #define thread_atfork_static(prepare, parent, child) \
>   text_set_element(_hurd_fork_prepare_hook, prepare); \
>   text_set_element(_hurd_fork_parent_hook, parent); \
>   text_set_element(_hurd_fork_child_hook, child);
>  
> -/* No we're *not* using pthreads.  */
> -#define __pthread_initialize ((void (*)(void))0)
> -
>  /* madvise is a stub on Hurd, so don't bother calling it.  */
>  
>  #include <sys/mman.h>
> diff --git a/sysdeps/nptl/malloc-machine.h b/sysdeps/nptl/malloc-machine.h
> index d46a9d9..ff12c1b 100644
> --- a/sysdeps/nptl/malloc-machine.h
> +++ b/sysdeps/nptl/malloc-machine.h
> @@ -25,13 +25,6 @@
>  #include <atomic.h>
>  #include <libc-lock.h>

Likewise.

>  
> -__libc_lock_define (typedef, mutex_t)
> -
> -#define mutex_init(m)		__libc_lock_init (*(m))
> -#define mutex_lock(m)		__libc_lock_lock (*(m))
> -#define mutex_trylock(m)	__libc_lock_trylock (*(m))
> -#define mutex_unlock(m)		__libc_lock_unlock (*(m))
> -
>  /* This is defined by newer gcc version unique for each module.  */
>  extern void *__dso_handle __attribute__ ((__weak__));
>  
> -- 
> 2.4.3
> 

Siddhesh


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]