[PATCH] Use atomic_thread_fence

Carlos O'Donell carlos@redhat.com
Mon Aug 1 13:24:35 GMT 2022


On 7/28/22 10:27, Wilco Dijkstra via Libc-alpha wrote:
> Replace atomic barriers based on sync primitives with atomic_thread_fence.
> Many uses appear suspect and in the future fixing these to use load_acquire
> or store_release would be useful.
> 
> Passes regress on AArch64 and buildmanyglibc.

This came up in review today in the weekly meeting.

The pre-commit CI shows this doesn't apply:
https://patchwork.sourceware.org/project/glibc/patch/AM5PR0801MB16682C3A037C622C8D3C4CDE83969@AM5PR0801MB1668.eurprd08.prod.outlook.com/

How are you generating these patches?
 
> ---
> 
> diff --git a/crypt/crypt_util.c b/crypt/crypt_util.c
> index be925e3484e65d2180e07915f5d91b47f6b96393..a8c2b26ed13c27804a9465f2a54caa09aaec3814 100644
> --- a/crypt/crypt_util.c
> +++ b/crypt/crypt_util.c
> @@ -453,14 +453,14 @@ __init_des_r (struct crypt_data * __restrict __data)
>  	  efp[comes_from_word][word_value][o_long] |= mask2;
>        }
>      }
> -    atomic_write_barrier ();
> +    atomic_thread_fence_release ();
>      small_tables_initialized = 1;
>  #ifdef __GNU_LIBRARY__
>  small_tables_done:
>      __libc_lock_unlock(_ufc_tables_lock);
>  #endif
>    } else
> -    atomic_read_barrier ();
> +    atomic_thread_fence_acquire ();
>  
>    /*
>     * Create the sb tables:
> diff --git a/elf/dl-deps.c b/elf/dl-deps.c
> index 06005a0cc8686cc7e63cd8e1b1e7deda01fe6688..11b3fda5fdeb3830d3d5a0031084b43847444e04 100644
> --- a/elf/dl-deps.c
> +++ b/elf/dl-deps.c
> @@ -430,7 +430,7 @@ _dl_map_object_deps (struct link_map *map,
>  	  memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
>  	  memcpy (&l_initfini[nneeded + 1], l_initfini,
>  		  nneeded * sizeof needed[0]);
> -	  atomic_write_barrier ();
> +	  atomic_thread_fence_release ();
>  	  l->l_initfini = l_initfini;
>  	  l->l_free_initfini = 1;
>  	}
> @@ -555,12 +555,12 @@ _dl_map_object_deps (struct link_map *map,
>  
>    /* Terminate the list of dependencies.  */
>    l_initfini[nlist] = NULL;
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>    map->l_initfini = l_initfini;
>    map->l_free_initfini = 1;
>    if (l_reldeps != NULL)
>      {
> -      atomic_write_barrier ();
> +      atomic_thread_fence_release ();
>        void *old_l_reldeps = map->l_reldeps;
>        map->l_reldeps = l_reldeps;
>        _dl_scope_free (old_l_reldeps);
> diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
> index 02c63a7062b2be0f37a412160fdb2b3468cc70cf..894d3e7db198a2a08940e9a1d82c345b0e0343a0 100644
> --- a/elf/dl-lookup.c
> +++ b/elf/dl-lookup.c
> @@ -695,7 +695,7 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
>  			l_reldepsact * sizeof (struct link_map *));
>  	      newp->list[l_reldepsact] = map;
>  	      newp->act = l_reldepsact + 1;
> -	      atomic_write_barrier ();
> +	      atomic_thread_fence_release ();
>  	      void *old = undef_map->l_reldeps;
>  	      undef_map->l_reldeps = newp;
>  	      undef_map->l_reldepsmax = max;
> @@ -706,7 +706,7 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
>        else
>  	{
>  	  undef_map->l_reldeps->list[l_reldepsact] = map;
> -	  atomic_write_barrier ();
> +	  atomic_thread_fence_release ();
>  	  undef_map->l_reldeps->act = l_reldepsact + 1;
>  	}
>  
> diff --git a/elf/dl-open.c b/elf/dl-open.c
> index a23e65926bcfe797f06f8b4175f65040f4547a05..ba77f4b774cae69c382bceb6599936664203ef05 100644
> --- a/elf/dl-open.c
> +++ b/elf/dl-open.c
> @@ -202,7 +202,7 @@ add_to_global_update (struct link_map *new)
>    assert (added <= ns->_ns_global_scope_pending_adds);
>    ns->_ns_global_scope_pending_adds -= added;
>  
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>    ns->_ns_main_searchlist->r_nlist = new_nlist;
>  }
>  
> @@ -342,7 +342,7 @@ update_scopes (struct link_map *new)
>  	     might use the new last element and then use the garbage
>  	     at offset IDX+1.  */
>  	  imap->l_scope[cnt + 1] = NULL;
> -	  atomic_write_barrier ();
> +	  atomic_thread_fence_release ();
>  	  imap->l_scope[cnt] = &new->l_searchlist;
>  
>  	  from_scope = cnt;
> diff --git a/include/atomic.h b/include/atomic.h
> index 8eb56362ba18eb4836070930d5f2e769fb6a0a1e..dfe60ddb27fed1a06fb4967968e7d5d64de2f9c3 100644
> --- a/include/atomic.h
> +++ b/include/atomic.h
> @@ -104,21 +104,6 @@
>  #endif
>  
>  
> -#ifndef atomic_full_barrier
> -# define atomic_full_barrier() __sync_synchronize()
> -#endif
> -
> -
> -#ifndef atomic_read_barrier
> -# define atomic_read_barrier() atomic_full_barrier ()
> -#endif
> -
> -
> -#ifndef atomic_write_barrier
> -# define atomic_write_barrier() atomic_full_barrier ()
> -#endif
> -
> -
>  /* This is equal to 1 iff the architecture supports 64b atomic operations.  */
>  #ifndef __HAVE_64B_ATOMICS
>  #error Unable to determine if 64-bit atomics are present.
> diff --git a/include/list.h b/include/list.h
> index 7bea2c50a3759c0c8640971eff1e80874e3b543f..31a8a93fa2491b38c368b9e07dd65c473e4eb19f 100644
> --- a/include/list.h
> +++ b/include/list.h
> @@ -43,7 +43,7 @@ list_add (list_t *newp, list_t *head)
>    newp->next = head->next;
>    newp->prev = head;
>    head->next->prev = newp;
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>    head->next = newp;
>  }
>  
> diff --git a/malloc/arena.c b/malloc/arena.c
> index 7c74a18381a4be5fe6bcb94b38a62dbfa6b674f4..3ef52ade3aeaaba6bb5e5d4e70024202e0dc162b 100644
> --- a/malloc/arena.c
> +++ b/malloc/arena.c
> @@ -111,7 +111,7 @@ static mstate free_list;
>     malloc_state objects.
>  
>     Read access to the next member is supposed to synchronize with the
> -   atomic_write_barrier and the write to the next member in
> +   atomic_thread_fence_release and the write to the next member in
>     _int_new_arena.  This suffers from data races; see the FIXME
>     comments in _int_new_arena and reused_arena.
>  
> @@ -778,7 +778,7 @@ _int_new_arena (size_t size)
>    /* FIXME: The barrier is an attempt to synchronize with read access
>       in reused_arena, which does not acquire list_lock while
>       traversing the list.  */
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>    main_arena.next = a;
>  
>    __libc_lock_unlock (list_lock);
> diff --git a/manual/llio.texi b/manual/llio.texi
> index 1b801ee817db2935d8866894be23ffa516690ca3..eb8711a0f8e28281baf68206b7891f16f662de0b 100644
> --- a/manual/llio.texi
> +++ b/manual/llio.texi
> @@ -2543,14 +2543,14 @@ aiocb64}, since the LFS transparently replaces the old interface.
>  @c    deallocate_stack @asulock @ascuheap @aculock @acsmem
>  @c     lll_lock (state_cache_lock) @asulock @aculock
>  @c     stack_list_del ok
> -@c      atomic_write_barrier ok
> +@c      atomic_thread_fence_release ok
>  @c      list_del ok
> -@c      atomic_write_barrier ok
> +@c      atomic_thread_fence_release ok
>  @c     queue_stack @ascuheap @acsmem
>  @c      stack_list_add ok
> -@c       atomic_write_barrier ok
> +@c       atomic_thread_fence_release ok
>  @c       list_add ok
> -@c       atomic_write_barrier ok
> +@c       atomic_thread_fence_release ok
>  @c      free_stacks @ascuheap @acsmem
>  @c       list_for_each_prev_safe ok
>  @c       list_entry ok
> diff --git a/manual/memory.texi b/manual/memory.texi
> index 110e736a64c667988f4ca2fe92deb409225a4a88..2dfd09ea4aace004067e2e1d51c9e1292d1f7452 100644
> --- a/manual/memory.texi
> +++ b/manual/memory.texi
> @@ -395,7 +395,7 @@ this function is in @file{stdlib.h}.
>  @c     mutex_init ok
>  @c     mutex_lock (just-created mutex) ok, returns locked
>  @c     mutex_lock (list_lock) dup @asulock @aculock
> -@c     atomic_write_barrier ok
> +@c     atomic_thread_fence_release ok
>  @c     mutex_unlock (list_lock) @aculock
>  @c    atomic_fetch_add_relaxed ok
>  @c    reused_arena @asulock @aculock
> diff --git a/manual/startup.texi b/manual/startup.texi
> index 9bf24123f562f75ba27a4770c69147e003b94755..4c7c2976a92d772909203bca028347e8d798b2d7 100644
> --- a/manual/startup.texi
> +++ b/manual/startup.texi
> @@ -947,7 +947,7 @@ using @code{atexit} or @code{on_exit}.
>  @c     __libc_lock_lock @asulock @aculock
>  @c     calloc dup @ascuheap @acsmem
>  @c     __libc_lock_unlock @aculock
> -@c    atomic_write_barrier dup ok
> +@c    atomic_thread_fence_release dup ok
>  The @code{atexit} function registers the function @var{function} to be
>  called at normal program termination.  The @var{function} is called with
>  no arguments.
> @@ -961,7 +961,7 @@ the function cannot be registered.
>  @safety{@prelim{}@mtsafe{}@asunsafe{@ascuheap{} @asulock{}}@acunsafe{@aculock{} @acsmem{}}}
>  @c on_exit @ascuheap @asulock @aculock @acsmem
>  @c  new_exitfn dup @ascuheap @asulock @aculock @acsmem
> -@c  atomic_write_barrier dup ok
> +@c  atomic_thread_fence_release dup ok
>  This function is a somewhat more powerful variant of @code{atexit}.  It
>  accepts two arguments, a function @var{function} and an arbitrary
>  pointer @var{arg}.  At normal program termination, the @var{function} is
> diff --git a/nptl/nptl-stack.c b/nptl/nptl-stack.c
> index 20ce78eddbf100833d453d7032f63bc2ba8f01c7..7c04e7faaae5c15bf5ad98c32935bcba4849c1c1 100644
> --- a/nptl/nptl-stack.c
> +++ b/nptl/nptl-stack.c
> @@ -27,11 +27,11 @@ __nptl_stack_list_del (list_t *elem)
>  {
>    GL (dl_in_flight_stack) = (uintptr_t) elem;
>  
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>  
>    list_del (elem);
>  
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>  
>    GL (dl_in_flight_stack) = 0;
>  }
> @@ -42,11 +42,11 @@ __nptl_stack_list_add (list_t *elem, list_t *list)
>  {
>    GL (dl_in_flight_stack) = (uintptr_t) elem | 1;
>  
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>  
>    list_add (elem, list);
>  
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>  
>    GL (dl_in_flight_stack) = 0;
>  }
> diff --git a/nptl/pthread_mutex_setprioceiling.c b/nptl/pthread_mutex_setprioceiling.c
> index 2d71a750c8981e8ca271c265031887e2c510583a..b574a77250664bbb1487c932b9a32a9dee415072 100644
> --- a/nptl/pthread_mutex_setprioceiling.c
> +++ b/nptl/pthread_mutex_setprioceiling.c
> @@ -113,7 +113,7 @@ __pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling,
>      newlock = (mutex->__data.__lock & ~PTHREAD_MUTEX_PRIO_CEILING_MASK);
>    mutex->__data.__lock = newlock
>  			 | (prioceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT);
> -  atomic_full_barrier ();
> +  atomic_thread_fence_seq_cst ();
>  
>    futex_wake ((unsigned int *)&mutex->__data.__lock, INT_MAX,
>  	      PTHREAD_MUTEX_PSHARED (mutex));
> diff --git a/nptl/sem_post.c b/nptl/sem_post.c
> index 7ec21e92eb4c71d7f17764e96bc7603837f7522d..d4e37cb0888cb1004881e608b82147900bc420a5 100644
> --- a/nptl/sem_post.c
> +++ b/nptl/sem_post.c
> @@ -90,7 +90,7 @@ __old_sem_post (sem_t *sem)
>  
>    /* We must need to synchronize with consumers of this token, so the atomic
>       increment must have release MO semantics.  */
> -  atomic_write_barrier ();
> +  atomic_thread_fence_release ();
>    atomic_fetch_add_release (futex, 1);
>    /* We always have to assume it is a shared semaphore.  */
>    futex_wake (futex, 1, LLL_SHARED);
> diff --git a/stdlib/msort.c b/stdlib/msort.c
> index cbe9a4a8fdb38113a4c18976c9f297be103d458f..e2f1eca94ad7e9005145c376b0de3dbd1ca14f18 100644
> --- a/stdlib/msort.c
> +++ b/stdlib/msort.c
> @@ -197,7 +197,7 @@ __qsort_r (void *b, size_t n, size_t s, __compar_d_fn_t cmp, void *arg)
>  	  phys_pages /= 4;
>  
>  	  /* Make sure phys_pages is written to memory.  */
> -	  atomic_write_barrier ();
> +	  atomic_thread_fence_release ();
>  
>  	  pagesize = __sysconf (_SC_PAGESIZE);
>  	}
> diff --git a/sysdeps/aarch64/nptl/tls.h b/sysdeps/aarch64/nptl/tls.h
> index 08aa2eff891b7be32243e9955d998892807c7b2e..0e5b4ece6a118b4b066bd2fd024dc85e978cc786 100644
> --- a/sysdeps/aarch64/nptl/tls.h
> +++ b/sysdeps/aarch64/nptl/tls.h
> @@ -108,7 +108,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/alpha/atomic-machine.h b/sysdeps/alpha/atomic-machine.h
> index f384a2bf0b3376cf240dc25d501e1d64a94bffe1..7fbe5b87eebf323d38fe1349b02aa56fe199cab3 100644
> --- a/sysdeps/alpha/atomic-machine.h
> +++ b/sysdeps/alpha/atomic-machine.h
> @@ -21,7 +21,3 @@
>  
>  /* XXX Is this actually correct?  */
>  #define ATOMIC_EXCHANGE_USES_CAS 1
> -
> -#define atomic_full_barrier()	__asm ("mb" : : : "memory");
> -#define atomic_read_barrier()	__asm ("mb" : : : "memory");
> -#define atomic_write_barrier()	__asm ("wmb" : : : "memory");
> diff --git a/sysdeps/alpha/nptl/tls.h b/sysdeps/alpha/nptl/tls.h
> index 8f5b69ad3b1b0c557fa1bae55278547572a374cc..914dba422c50e4531d22eb459b41c8b958a75263 100644
> --- a/sysdeps/alpha/nptl/tls.h
> +++ b/sysdeps/alpha/nptl/tls.h
> @@ -105,7 +105,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/arc/nptl/tls.h b/sysdeps/arc/nptl/tls.h
> index 7fc6602b236fa2455f8de4a0540442ae85d27c98..b2749f81d2980502043f507bf7c81da48f17aa9f 100644
> --- a/sysdeps/arc/nptl/tls.h
> +++ b/sysdeps/arc/nptl/tls.h
> @@ -113,7 +113,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/arm/nptl/tls.h b/sysdeps/arm/nptl/tls.h
> index 7657ca3dccc2d929c71236d42fc060a4b4902e2b..b1389ba034966aff17692f2b6d0e7b04a0baf9a0 100644
> --- a/sysdeps/arm/nptl/tls.h
> +++ b/sysdeps/arm/nptl/tls.h
> @@ -99,7 +99,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/csky/nptl/tls.h b/sysdeps/csky/nptl/tls.h
> index 58d6ab0fb2ae90de50cffd5b4a98426c6a793050..ac54606c3c0e28c1c8d57a6475d9249ca3566abe 100644
> --- a/sysdeps/csky/nptl/tls.h
> +++ b/sysdeps/csky/nptl/tls.h
> @@ -128,7 +128,7 @@ typedef struct
>    do									      \
>      {									      \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	      \
> -      atomic_write_barrier ();						      \
> +      atomic_thread_fence_release ();					      \
>      }									      \
>    while (0)
>  
> diff --git a/sysdeps/hppa/dl-fptr.c b/sysdeps/hppa/dl-fptr.c
> index 40bf5cd3b306315d8eeb6bdba2b2b46b1ea5059e..0562467d6f79f76b78b2cf169fdd059a993296d3 100644
> --- a/sysdeps/hppa/dl-fptr.c
> +++ b/sysdeps/hppa/dl-fptr.c
> @@ -369,7 +369,7 @@ _dl_lookup_address (const void *address)
>  
>    /* First load the relocation offset.  */
>    reloc_arg = (ElfW(Word)) desc[1];
> -  atomic_full_barrier();
> +  atomic_thread_fence_seq_cst ();
>  
>    /* Then load first word of candidate descriptor.  It should be a pointer
>       with word alignment and point to memory that can be read.  */
> diff --git a/sysdeps/hppa/dl-machine.h b/sysdeps/hppa/dl-machine.h
> index c865713be1e3f8e0430bbb35c8db7ebe3e7a6abf..61635ca9115e1fa77305eaa3cc4ab5bf9bb91d7b 100644
> --- a/sysdeps/hppa/dl-machine.h
> +++ b/sysdeps/hppa/dl-machine.h
> @@ -136,7 +136,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
>        /* Need to ensure that the gp is visible before the code
>           entry point is updated */
>        rfdesc[1] = value.gp;
> -      atomic_full_barrier();
> +      atomic_thread_fence_seq_cst ();
>        rfdesc[0] = value.ip;
>      }
>    else
> diff --git a/sysdeps/hppa/nptl/tls.h b/sysdeps/hppa/nptl/tls.h
> index e6b0bd5c7182b497aaf0d2bb08f62551a223c403..5b2495637b8aec9df3c8b3d47a2b46aa632c84da 100644
> --- a/sysdeps/hppa/nptl/tls.h
> +++ b/sysdeps/hppa/nptl/tls.h
> @@ -133,7 +133,7 @@ static inline void __set_cr27(struct pthread *cr27)
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/htl/pt-once.c b/sysdeps/htl/pt-once.c
> index b85b196645958fc7f47b08b39e91077b82817cdc..55db6c3d7176a9ca6cf2d0caccc1cf01aeb5ea2f 100644
> --- a/sysdeps/htl/pt-once.c
> +++ b/sysdeps/htl/pt-once.c
> @@ -33,7 +33,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
>  {
>    ASSERT_TYPE_SIZE (pthread_once_t, __SIZEOF_PTHREAD_ONCE_T);
>  
> -  atomic_full_barrier ();
> +  atomic_thread_fence_seq_cst ();
>    if (once_control->__run == 0)
>      {
>        __pthread_spin_wait (&once_control->__lock);
> @@ -44,7 +44,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
>  	  init_routine ();
>  	  pthread_cleanup_pop (0);
>  
> -	  atomic_full_barrier ();
> +	  atomic_thread_fence_seq_cst ();
>  	  once_control->__run = 1;
>  	}
>  
> diff --git a/sysdeps/ia64/nptl/tls.h b/sysdeps/ia64/nptl/tls.h
> index d2411b3c1ac29733c0bb3683d83388e2e0e8e277..7709e644ee04ebc935dc659806481eebcb4129f0 100644
> --- a/sysdeps/ia64/nptl/tls.h
> +++ b/sysdeps/ia64/nptl/tls.h
> @@ -157,7 +157,7 @@ register struct pthread *__thread_self __asm__("r13");
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/m68k/nptl/tls.h b/sysdeps/m68k/nptl/tls.h
> index 742e1b6767d99fa6011ac1d207264c7b82e53787..dfba7a568016b8e10dac6c21d65c785eaab12a09 100644
> --- a/sysdeps/m68k/nptl/tls.h
> +++ b/sysdeps/m68k/nptl/tls.h
> @@ -132,7 +132,7 @@ extern void * __m68k_read_tp (void);
>    do									\
>      {									\
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	\
> -      atomic_write_barrier ();						\
> +      atomic_thread_fence_release ();					\
>      }									\
>    while (0)
>  
> diff --git a/sysdeps/mach/hurd/htl/pt-mutex-destroy.c b/sysdeps/mach/hurd/htl/pt-mutex-destroy.c
> index 71f789cbdfa20b79f9e3cef5a2523d4e243b4f19..feb9085af9499b5d954a1bf4f04d9cbcb804e8cf 100644
> --- a/sysdeps/mach/hurd/htl/pt-mutex-destroy.c
> +++ b/sysdeps/mach/hurd/htl/pt-mutex-destroy.c
> @@ -26,7 +26,7 @@
>  int
>  __pthread_mutex_destroy (pthread_mutex_t *mtxp)
>  {
> -  atomic_read_barrier ();
> +  atomic_thread_fence_acquire ();
>    if (*(volatile unsigned int *) &mtxp->__lock != 0)
>      return EBUSY;
>  
> diff --git a/sysdeps/mach/hurd/htl/pt-mutex.h b/sysdeps/mach/hurd/htl/pt-mutex.h
> index 4021e72a6e8d15316336296ff732a4e7fd1acdff..ebdf8a5fbde0f755c7625cd38ad185ee0c977b5f 100644
> --- a/sysdeps/mach/hurd/htl/pt-mutex.h
> +++ b/sysdeps/mach/hurd/htl/pt-mutex.h
> @@ -54,7 +54,7 @@
>            if (ret == EOWNERDEAD)   \
>              {   \
>                mtxp->__lock = mtxp->__lock | LLL_DEAD_OWNER;   \
> -              atomic_write_barrier ();   \
> +              atomic_thread_fence_release ();   \
>              }   \
>          }   \
>      }   \
> diff --git a/sysdeps/microblaze/nptl/tls.h b/sysdeps/microblaze/nptl/tls.h
> index 588fd1c5d63ee4e6a1b284cc19e216b6730a2091..30e5d628be8b78cf9c7b8e9386ab1b2355819f4a 100644
> --- a/sysdeps/microblaze/nptl/tls.h
> +++ b/sysdeps/microblaze/nptl/tls.h
> @@ -110,7 +110,7 @@ typedef struct
>    do                                                                        \
>      {                                                                       \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;            \
> -      atomic_write_barrier ();                                              \
> +      atomic_thread_fence_release ();                                       \
>      }                                                                       \
>    while (0)
>  
> diff --git a/sysdeps/mips/nptl/tls.h b/sysdeps/mips/nptl/tls.h
> index 2aa7cb4bb8d0b5a31889aa33d5751104ff1e4f45..e4c5d2a876db7943b38daa270f4681d17b441c58 100644
> --- a/sysdeps/mips/nptl/tls.h
> +++ b/sysdeps/mips/nptl/tls.h
> @@ -160,7 +160,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/nios2/nptl/tls.h b/sysdeps/nios2/nptl/tls.h
> index cb231e2a4bbfa52495c4c017a7d3e1c6dd7937ca..50dbbef119af30112136a9bef187c79037d0849d 100644
> --- a/sysdeps/nios2/nptl/tls.h
> +++ b/sysdeps/nios2/nptl/tls.h
> @@ -140,7 +140,7 @@ register struct pthread *__thread_self __asm__("r23");
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/or1k/nptl/tls.h b/sysdeps/or1k/nptl/tls.h
> index e82f444738de222c0e4866d5a2ab8191ce99ddc9..886c017be12d06ee63198a36917c59e24be77a60 100644
> --- a/sysdeps/or1k/nptl/tls.h
> +++ b/sysdeps/or1k/nptl/tls.h
> @@ -175,7 +175,7 @@ register tcbhead_t *__thread_self __asm__("r10");
>    do									\
>      {									\
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	\
> -      atomic_write_barrier ();						\
> +      atomic_thread_fence_release ();					\
>      }									\
>    while (0)
>  
> diff --git a/sysdeps/powerpc/nptl/tls.h b/sysdeps/powerpc/nptl/tls.h
> index e62a96238aa95c79ac1f749b4dbf03985b6e15d4..c8d233a7347f609b4cdbffb5dafa2f55e18ac18e 100644
> --- a/sysdeps/powerpc/nptl/tls.h
> +++ b/sysdeps/powerpc/nptl/tls.h
> @@ -224,7 +224,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/powerpc/powerpc32/atomic-machine.h b/sysdeps/powerpc/powerpc32/atomic-machine.h
> index f72d4be13709e38006255d236efb0e94f3976e68..6a2aae8bdb34281144e6810924377a6a62857d15 100644
> --- a/sysdeps/powerpc/powerpc32/atomic-machine.h
> +++ b/sysdeps/powerpc/powerpc32/atomic-machine.h
> @@ -36,24 +36,3 @@
>  
>  #define __HAVE_64B_ATOMICS 0
>  #define ATOMIC_EXCHANGE_USES_CAS 1
> -
> -#ifdef _ARCH_PWR4
> -/*
> - * Newer powerpc64 processors support the new "light weight" sync (lwsync)
> - * So if the build is using -mcpu=[power4,power5,power5+,970] we can
> - * safely use lwsync.
> - */
> -# define atomic_read_barrier()	__asm ("lwsync" ::: "memory")
> -/*
> - * "light weight" sync can also be used for the release barrier.
> - */
> -# define atomic_write_barrier()	__asm ("lwsync" ::: "memory")
> -#else
> -/*
> - * Older powerpc32 processors don't support the new "light weight"
> - * sync (lwsync).  So the only safe option is to use normal sync
> - * for all powerpc32 applications.
> - */
> -# define atomic_read_barrier()	__asm ("sync" ::: "memory")
> -# define atomic_write_barrier()	__asm ("sync" ::: "memory")
> -#endif
> diff --git a/sysdeps/powerpc/powerpc64/atomic-machine.h b/sysdeps/powerpc/powerpc64/atomic-machine.h
> index fcb1592be9ad6a3981f56c513deac2f5f8ac5bb7..2932f889c5bc6d0fa49d5ad36875b50c27ad07e9 100644
> --- a/sysdeps/powerpc/powerpc64/atomic-machine.h
> +++ b/sysdeps/powerpc/powerpc64/atomic-machine.h
> @@ -36,12 +36,3 @@
>  
>  #define __HAVE_64B_ATOMICS 1
>  #define ATOMIC_EXCHANGE_USES_CAS 1
> -
> -/*
> - * All powerpc64 processors support the new "light weight"  sync (lwsync).
> - */
> -#define atomic_read_barrier()	__asm ("lwsync" ::: "memory")
> -/*
> - * "light weight" sync can also be used for the release barrier.
> - */
> -#define atomic_write_barrier()	__asm ("lwsync" ::: "memory")
> diff --git a/sysdeps/riscv/nptl/tls.h b/sysdeps/riscv/nptl/tls.h
> index 700c2f51899b0385d7ebaa4810c84de4fa6f2b45..020a986ceee89e1feb8f76c51f224a8faea71bbb 100644
> --- a/sysdeps/riscv/nptl/tls.h
> +++ b/sysdeps/riscv/nptl/tls.h
> @@ -123,7 +123,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/s390/nptl/tls.h b/sysdeps/s390/nptl/tls.h
> index 98d7870148ce6bc1d6397b1465dfabe96f7280b2..f1664d9ade6fd562db38c3dddd3fa6237a47faea 100644
> --- a/sysdeps/s390/nptl/tls.h
> +++ b/sysdeps/s390/nptl/tls.h
> @@ -167,7 +167,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/sh/nptl/tls.h b/sysdeps/sh/nptl/tls.h
> index 1530489a6ce4286bc5146e6cd83e3b463b965467..00ae1b998b9e0b1e6c347c4be4e99a90e530e924 100644
> --- a/sysdeps/sh/nptl/tls.h
> +++ b/sysdeps/sh/nptl/tls.h
> @@ -139,7 +139,7 @@ typedef struct
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/sparc/atomic-machine.h b/sysdeps/sparc/atomic-machine.h
> index a7042f1ee546b9f238153cb923409d42eb45cc03..1f0eb0a9b1171c06dc19dc21c4fe7de94adc4bce 100644
> --- a/sysdeps/sparc/atomic-machine.h
> +++ b/sysdeps/sparc/atomic-machine.h
> @@ -29,13 +29,6 @@
>  #define ATOMIC_EXCHANGE_USES_CAS     __HAVE_64B_ATOMICS
>  
>  #ifdef __sparc_v9__
> -# define atomic_full_barrier() \
> -  __asm __volatile ("membar #LoadLoad | #LoadStore"			      \
> -		    " | #StoreLoad | #StoreStore" : : : "memory")
> -# define atomic_read_barrier() \
> -  __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory")
> -# define atomic_write_barrier() \
> -  __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory")
>  
>  extern void __cpu_relax (void);
>  # define atomic_spin_nop() __cpu_relax ()
> diff --git a/sysdeps/sparc/nptl/tls.h b/sysdeps/sparc/nptl/tls.h
> index 95a69cb8249dc79c3a063637a21d976d2660c48f..bc7ada0d3fe66751506e1cf5516f14ec2c205af8 100644
> --- a/sysdeps/sparc/nptl/tls.h
> +++ b/sysdeps/sparc/nptl/tls.h
> @@ -140,7 +140,7 @@ register struct pthread *__thread_self __asm__("%g7");
>    do									     \
>      {									     \
>        THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;	     \
> -      atomic_write_barrier ();						     \
> +      atomic_thread_fence_release ();					     \
>      }									     \
>    while (0)
>  
> diff --git a/sysdeps/unix/sysv/linux/arm/atomic-machine.h b/sysdeps/unix/sysv/linux/arm/atomic-machine.h
> deleted file mode 100644
> index 20068c72f359442769f8d49e11f7e771c922ef0b..0000000000000000000000000000000000000000
> --- a/sysdeps/unix/sysv/linux/arm/atomic-machine.h
> +++ /dev/null
> @@ -1,115 +0,0 @@
> -/* Atomic operations.  ARM/Linux version.
> -   Copyright (C) 2002-2022 Free Software Foundation, Inc.
> -   This file is part of the GNU C Library.
> -
> -   The GNU C Library is free software; you can redistribute it and/or
> -   modify it under the terms of the GNU Lesser General Public
> -   License as published by the Free Software Foundation; either
> -   version 2.1 of the License, or (at your option) any later version.
> -
> -   The GNU C Library is distributed in the hope that it will be useful,
> -   but WITHOUT ANY WARRANTY; without even the implied warranty of
> -   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> -   Lesser General Public License for more details.
> -
> -   You should have received a copy of the GNU Lesser General Public
> -   License along with the GNU C Library.  If not, see
> -   <https://www.gnu.org/licenses/>.  */
> -
> -#include <stdint.h>
> -
> -/* If the compiler doesn't provide a primitive, we'll use this macro
> -   to get assistance from the kernel.  */
> -#ifdef __thumb2__
> -# define __arm_assisted_full_barrier() \
> -     __asm__ __volatile__						      \
> -	     ("movw\tip, #0x0fa0\n\t"					      \
> -	      "movt\tip, #0xffff\n\t"					      \
> -	      "blx\tip"							      \
> -	      : : : "ip", "lr", "cc", "memory");
> -#else
> -# define __arm_assisted_full_barrier() \
> -     __asm__ __volatile__						      \
> -	     ("mov\tip, #0xffff0fff\n\t"				      \
> -	      "mov\tlr, pc\n\t"						      \
> -	      "add\tpc, ip, #(0xffff0fa0 - 0xffff0fff)"			      \
> -	      : : : "ip", "lr", "cc", "memory");
> -#endif
> -
> -/* Atomic compare and exchange.  This sequence relies on the kernel to
> -   provide a compare and exchange operation which is atomic on the
> -   current architecture, either via cleverness on pre-ARMv6 or via
> -   ldrex / strex on ARMv6.
> -
> -   It doesn't matter what register is used for a_oldval2, but we must
> -   specify one to work around GCC PR rtl-optimization/21223.  Otherwise
> -   it may cause a_oldval or a_tmp to be moved to a different register.
> -
> -   We use the union trick rather than simply using __typeof (...) in the
> -   declarations of A_OLDVAL et al because when NEWVAL or OLDVAL is of the
> -   form *PTR and PTR has a 'volatile ... *' type, then __typeof (*PTR) has
> -   a 'volatile ...' type and this triggers -Wvolatile-register-var to
> -   complain about 'register volatile ... asm ("reg")'.
> -
> -   We use the same union trick in the declaration of A_PTR because when
> -   MEM is of the from *PTR and PTR has a 'const ... *' type, then __typeof
> -   (*PTR) has a 'const ...' type and this enables the compiler to substitute
> -   the variable with its initializer in asm statements, which may cause the
> -   corresponding operand to appear in a different register.  */
> -#ifdef __thumb2__
> -/* Thumb-2 has ldrex/strex.  However it does not have barrier instructions,
> -   so we still need to use the kernel helper.  */
> -# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \
> -  ({ union { __typeof (mem) a; uint32_t v; } mem_arg = { .a = (mem) };       \
> -     union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) };\
> -     union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) };\
> -     register uint32_t a_oldval asm ("r0");				      \
> -     register uint32_t a_newval asm ("r1") = newval_arg.v;		      \
> -     register uint32_t a_ptr asm ("r2") = mem_arg.v;			      \
> -     register uint32_t a_tmp asm ("r3");				      \
> -     register uint32_t a_oldval2 asm ("r4") = oldval_arg.v;		      \
> -     __asm__ __volatile__						      \
> -	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \
> -	      "cmp\t%[tmp], %[old2]\n\t"				      \
> -	      "bne\t1f\n\t"						      \
> -	      "mov\t%[old], %[old2]\n\t"				      \
> -	      "movw\t%[tmp], #0x0fc0\n\t"				      \
> -	      "movt\t%[tmp], #0xffff\n\t"				      \
> -	      "blx\t%[tmp]\n\t"						      \
> -	      "bcc\t0b\n\t"						      \
> -	      "mov\t%[tmp], %[old2]\n\t"				      \
> -	      "1:"							      \
> -	      : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)		      \
> -	      : [new] "r" (a_newval), [ptr] "r" (a_ptr),		      \
> -		[old2] "r" (a_oldval2)					      \
> -	      : "ip", "lr", "cc", "memory");				      \
> -     (__typeof (oldval)) a_tmp; })
> -#else
> -# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \
> -  ({ union { __typeof (mem) a; uint32_t v; } mem_arg = { .a = (mem) };       \
> -     union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) };\
> -     union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) };\
> -     register uint32_t a_oldval asm ("r0");				      \
> -     register uint32_t a_newval asm ("r1") = newval_arg.v;		      \
> -     register uint32_t a_ptr asm ("r2") = mem_arg.v;			      \
> -     register uint32_t a_tmp asm ("r3");				      \
> -     register uint32_t a_oldval2 asm ("r4") = oldval_arg.v;		      \
> -     __asm__ __volatile__						      \
> -	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \
> -	      "cmp\t%[tmp], %[old2]\n\t"				      \
> -	      "bne\t1f\n\t"						      \
> -	      "mov\t%[old], %[old2]\n\t"				      \
> -	      "mov\t%[tmp], #0xffff0fff\n\t"				      \
> -	      "mov\tlr, pc\n\t"						      \
> -	      "add\tpc, %[tmp], #(0xffff0fc0 - 0xffff0fff)\n\t"		      \
> -	      "bcc\t0b\n\t"						      \
> -	      "mov\t%[tmp], %[old2]\n\t"				      \
> -	      "1:"							      \
> -	      : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)		      \
> -	      : [new] "r" (a_newval), [ptr] "r" (a_ptr),		      \
> -		[old2] "r" (a_oldval2)					      \
> -	      : "ip", "lr", "cc", "memory");				      \
> -     (__typeof (oldval)) a_tmp; })
> -#endif
> -
> -#include <sysdeps/arm/atomic-machine.h>
> diff --git a/sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h b/sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h
> index 6f83fb2965bd162f0f76e0e3586472ade39af607..02e54847a42bfbc93ae1e07b7e32965be644daba 100644
> --- a/sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h
> +++ b/sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h
> @@ -25,7 +25,4 @@
>  /* XXX Is this actually correct?  */
>  #define ATOMIC_EXCHANGE_USES_CAS 1
>  
> -# define atomic_full_barrier()				\
> -  (INTERNAL_SYSCALL_CALL (atomic_barrier), (void) 0)
> -
>  #endif
> diff --git a/sysdeps/x86/atomic-machine.h b/sysdeps/x86/atomic-machine.h
> index b9be51c52d8cbef2a95a62192c8ef7011e7f2c12..98541a2d06ff5e4aa8c789ab7405215097471971 100644
> --- a/sysdeps/x86/atomic-machine.h
> +++ b/sysdeps/x86/atomic-machine.h
> @@ -32,9 +32,6 @@
>  #endif
>  #define ATOMIC_EXCHANGE_USES_CAS	0
>  
> -#define atomic_read_barrier() __asm ("" ::: "memory")
> -#define atomic_write_barrier() __asm ("" ::: "memory")
> -
>  #define atomic_spin_nop() __asm ("pause")
>  
>  #endif /* atomic-machine.h */
> 
> 


-- 
Cheers,
Carlos.



More information about the Libc-alpha mailing list