This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH 5/5] Refactor PI mutexes internal defintions


On 10/30/19 4:00 PM, Adhemerval Zanella wrote:
> This patch adds the generic futex_lock_pi and futex_unlock_pi to wrap
> around the syscall machinery required to issue the syscall calls. It
> simplifies a bit the futex code required to implement PI mutexes.
> 

OK for master with minor typos fixed.

Reviewed-by: Carlos O'Donell <carlos@redhat.com>

> No function changes, checked on x86_64-linux-gnu.
> ---
>  nptl/pthread_mutex_init.c         | 13 ++---
>  nptl/pthread_mutex_lock.c         | 24 +++------
>  nptl/pthread_mutex_timedlock.c    | 60 +++++++++------------
>  nptl/pthread_mutex_trylock.c      |  8 ++-
>  nptl/pthread_mutex_unlock.c       |  6 +--
>  sysdeps/nptl/futex-internal.h     | 86 +++++++++++++++++++++++++++++++
>  sysdeps/nptl/lowlevellock-futex.h |  9 ++++
>  7 files changed, 137 insertions(+), 69 deletions(-)
> 
> diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c
> index fe4eeee37c..20800b80f5 100644
> --- a/nptl/pthread_mutex_init.c
> +++ b/nptl/pthread_mutex_init.c
> @@ -24,6 +24,7 @@
>  #include "pthreadP.h"
>  #include <atomic.h>
>  #include <pthread-offsets.h>
> +#include <futex-internal.h>

OK.

>  
>  #include <stap-probe.h>
>  
> @@ -37,19 +38,13 @@ static const struct pthread_mutexattr default_mutexattr =
>  static bool
>  prio_inherit_missing (void)
>  {
> -#ifdef __NR_futex
>    static int tpi_supported;
> -  if (__glibc_unlikely (tpi_supported == 0))
> +  if (__glibc_unlikely (atomic_load_relaxed (&tpi_supported) == 0))

OK.

>      {
> -      int lock = 0;
> -      INTERNAL_SYSCALL_DECL (err);
> -      int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI, 0, 0);
> -      assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
> -      tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
> +      int e = futex_unlock_pi (&(unsigned int){0}, 0);
> +      atomic_store_relaxed (&tpi_supported, e == ENOSYS ? -1 : 1);

OK.

>      }
>    return __glibc_unlikely (tpi_supported < 0);
> -#endif
> -  return true;
>  }
>  
>  int
> diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
> index ace436d5a6..05bba50666 100644
> --- a/nptl/pthread_mutex_lock.c
> +++ b/nptl/pthread_mutex_lock.c
> @@ -24,7 +24,7 @@
>  #include <not-cancel.h>
>  #include "pthreadP.h"
>  #include <atomic.h>
> -#include <lowlevellock.h>
> +#include <futex-internal.h>
>  #include <stap-probe.h>
>  
>  #ifndef lll_lock_elision
> @@ -416,21 +416,16 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
>  	    int private = (robust
>  			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
>  			   : PTHREAD_MUTEX_PSHARED (mutex));
> -	    INTERNAL_SYSCALL_DECL (__err);
> -	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
> -				      __lll_private_flag (FUTEX_LOCK_PI,
> -							  private), 1, 0);
> -
> -	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
> -		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
> -		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
> +	    int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock,
> +				   NULL, private);
> +	    if (e == ESRCH || e == EDEADLK)

OK.

>  	      {
> -		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
> +		assert (e != EDEADLK
>  			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
>  			    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
>  		/* ESRCH can happen only for non-robust PI mutexes where
>  		   the owner of the lock died.  */
> -		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
> +		assert (e != ESRCH || !robust);
>  
>  		/* Delay the thread indefinitely.  */
>  		while (1)
> @@ -479,11 +474,8 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
>  	    /* This mutex is now not recoverable.  */
>  	    mutex->__data.__count = 0;
>  
> -	    INTERNAL_SYSCALL_DECL (__err);
> -	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
> -			      __lll_private_flag (FUTEX_UNLOCK_PI,
> -						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
> -			      0, 0);
> +	    futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
> +			     PTHREAD_ROBUST_MUTEX_PSHARED (mutex));

OK.

>  
>  	    /* To the kernel, this will be visible after the kernel has
>  	       acquired the mutex in the syscall.  */
> diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
> index 490064d8cf..112175eb11 100644
> --- a/nptl/pthread_mutex_timedlock.c
> +++ b/nptl/pthread_mutex_timedlock.c
> @@ -25,6 +25,7 @@
>  #include <atomic.h>
>  #include <lowlevellock.h>
>  #include <not-cancel.h>
> +#include <futex-internal.h>
>  
>  #include <stap-probe.h>
>  
> @@ -377,39 +378,29 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
>  	    int private = (robust
>  			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
>  			   : PTHREAD_MUTEX_PSHARED (mutex));
> -	    INTERNAL_SYSCALL_DECL (__err);
> -
> -	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
> -				      __lll_private_flag (FUTEX_LOCK_PI,
> -							  private), 1,
> -				      abstime);
> -	    if (INTERNAL_SYSCALL_ERROR_P (e, __err))
> +	    int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock,
> +				   abstime, private);
> +	    if (e == ETIMEDOUT)
> +	      return ETIMEDOUT;
> +	    else if (e == ESRCH || e == EDEADLK)

OK.

>  	      {
> -		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
> -		  return ETIMEDOUT;
> -
> -		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
> -		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
> -		  {
> -		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
> -			    || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
> -				&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
> -		    /* ESRCH can happen only for non-robust PI mutexes where
> -		       the owner of the lock died.  */
> -		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
> -			    || !robust);
> -
> -		    /* Delay the thread until the timeout is reached.
> -		       Then return ETIMEDOUT.  */
> -		    do
> -		      e = lll_timedwait (&(int){0}, 0, clockid, abstime,
> -					 private);
> -		    while (e != ETIMEDOUT);
> -		    return ETIMEDOUT;
> -		  }
> -
> -		return INTERNAL_SYSCALL_ERRNO (e, __err);
> +		assert (e != EDEADLK
> +			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
> +			   && kind != PTHREAD_MUTEX_RECURSIVE_NP));
> +		/* ESRCH can happen only for non-robust PI mutexes where
> +		   the owner of the lock died.  */
> +		assert (e != ESRCH || !robust);
> +
> +		/* Delay the thread until the timeout is reached. Then return
> +		   ETIMEDOUT.  */
> +		do
> +		  e = lll_timedwait (&(int){0}, 0, clockid, abstime,
> +				     private);
> +		while (e != ETIMEDOUT);
> +		return ETIMEDOUT;

OK.

>  	      }
> +	    else if (e != 0)
> +	      return e;
>  
>  	    oldval = mutex->__data.__lock;
>  
> @@ -447,11 +438,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
>  	    /* This mutex is now not recoverable.  */
>  	    mutex->__data.__count = 0;
>  
> -	    INTERNAL_SYSCALL_DECL (__err);
> -	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
> -			      __lll_private_flag (FUTEX_UNLOCK_PI,
> -						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
> -			      0, 0);
> +	    futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
> +			     PTHREAD_ROBUST_MUTEX_PSHARED (mutex));

OK.

>  
>  	    /* To the kernel, this will be visible after the kernel has
>  	       acquired the mutex in the syscall.  */
> diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
> index 87e87c013a..d24bb58a8b 100644
> --- a/nptl/pthread_mutex_trylock.c
> +++ b/nptl/pthread_mutex_trylock.c
> @@ -21,6 +21,7 @@
>  #include <stdlib.h>
>  #include "pthreadP.h"
>  #include <lowlevellock.h>
> +#include <futex-internal.h>
>  
>  #ifndef lll_trylock_elision
>  #define lll_trylock_elision(a,t) lll_trylock(a)
> @@ -346,11 +347,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	    /* This mutex is now not recoverable.  */
>  	    mutex->__data.__count = 0;
>  
> -	    INTERNAL_SYSCALL_DECL (__err);
> -	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
> -			      __lll_private_flag (FUTEX_UNLOCK_PI,
> -						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
> -			      0, 0);
> +	    futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
> +			     PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
>  

OK.

>  	    /* To the kernel, this will be visible after the kernel has
>  	       acquired the mutex in the syscall.  */
> diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
> index 71038f92e4..53f8b868e4 100644
> --- a/nptl/pthread_mutex_unlock.c
> +++ b/nptl/pthread_mutex_unlock.c
> @@ -22,6 +22,7 @@
>  #include "pthreadP.h"
>  #include <lowlevellock.h>
>  #include <stap-probe.h>
> +#include <futex-internal.h>
>  
>  #ifndef lll_unlock_elision
>  #define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
> @@ -277,9 +278,8 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
>  	  if (((l & FUTEX_WAITERS) != 0)
>  	      || (l != THREAD_GETMEM (THREAD_SELF, tid)))
>  	    {
> -	      INTERNAL_SYSCALL_DECL (__err);
> -	      INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
> -				__lll_private_flag (FUTEX_UNLOCK_PI, private));
> +	      futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
> +			       private);

OK.

>  	      break;
>  	    }
>  	}
> diff --git a/sysdeps/nptl/futex-internal.h b/sysdeps/nptl/futex-internal.h
> index 76921466f0..2a8e3fb323 100644
> --- a/sysdeps/nptl/futex-internal.h
> +++ b/sysdeps/nptl/futex-internal.h
> @@ -381,4 +381,90 @@ futex_wake (unsigned int* futex_word, int processes_to_wake, int private)
>      }
>  }
>  
> +/* The operation checks the value of the futer, if the value is 0, then the

s/futer/futex/g

> +   it is atomically set to the caller's thread ID.  If the futex value is
> +   nonzero, it is atomically sets the FUTEX_WAITERS bit, which signals wrt

s/it is/it/g

> +   other futex owner that it cannot unlock the futex in user space

s/space/space by/g

> +   atomically by setting its value to 0.
> +
> +   If more than one wait operations is issued, the enqueueing of the waiters
> +   are done in descending priority order.
> +
> +   The ABSTIME arguments provides an absolute timeout (measured against the
> +   CLOCK_REALTIME clock).  If TIMEOUT is NULL, the operation will block
> +   indefinitely.
> +
> +   Returns:
> +
> +     - 0 if woken by a PI unlock operation or spuriously.
> +     - EAGAIN if the futex owner thread ID is about to exit, but has not yet
> +       handled the state cleanup.
> +     - EDEADLK if the futex is already locked by the caller.
> +     - ESRCH if the thread ID int he futex does not exist.
> +     - EINVAL is the state is corrupted or if there is a waiter on the
> +       futex.
> +     - ETIMEDOUT if the ABSTIME expires.
> +*/
> +static __always_inline int
> +futex_lock_pi (unsigned int *futex_word, const struct timespec *abstime,
> +	       int private)
> +{
> +  int err = lll_futex_timed_lock_pi (futex_word, abstime, private);

OK.

> +  switch (err)
> +    {
> +    case 0:
> +    case -EAGAIN:
> +    case -EINTR:
> +    case -ETIMEDOUT:
> +    case -ESRCH:
> +    case -EDEADLK:
> +    case -EINVAL: /* This indicates either state corruption or that the kernel
> +		     found a waiter on futex address which is waiting via
> +		     FUTEX_WAIT or FUTEX_WAIT_BITSET.  This is reported on
> +		     some futex_lock_pi usage (pthread_mutex_timedlock for
> +		     instance).  */
> +      return -err;
> +
> +    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
> +    case -ENOSYS: /* Must have been caused by a glibc bug.  */
> +    /* No other errors are documented at this time.  */
> +    default:
> +      futex_fatal_error ();

OK.

> +    }
> +}
> +
> +/* Wakes the top priority waiter that called a futex_lock_pi operation on
> +   the futex.
> +
> +   Returns the same values as futex_lock_pi under those same conditions;
> +   additionally, returns EPERM when the caller is not allowed to attach
> +   itself to the futex.  */
> +static __always_inline int
> +futex_unlock_pi (unsigned int *futex_word, int private)
> +{
> +  int err = lll_futex_timed_unlock_pi (futex_word, private);

OK.

> +  switch (err)
> +    {
> +    case 0:
> +    case -EAGAIN:
> +    case -EINTR:
> +    case -ETIMEDOUT:
> +    case -ESRCH:
> +    case -EDEADLK:
> +    case -ENOSYS:
> +    case -EPERM:  /*  The caller is not allowed to attach itself to the futex.
> +		      Used to check if PI futexes are supported by the
> +		      kernel.  */
> +      return -err;
> +
> +    case -EINVAL: /* Either due to wrong alignment or due to the timeout not
> +		     being normalized.  Must have been caused by a glibc or
> +		     application bug.  */
> +    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
> +    /* No other errors are documented at this time.  */
> +    default:
> +      futex_fatal_error ();
> +    }
> +}
> +
>  #endif  /* futex-internal.h */
> diff --git a/sysdeps/nptl/lowlevellock-futex.h b/sysdeps/nptl/lowlevellock-futex.h
> index 392277e8a5..ff0fd4edc4 100644
> --- a/sysdeps/nptl/lowlevellock-futex.h
> +++ b/sysdeps/nptl/lowlevellock-futex.h
> @@ -140,6 +140,15 @@
>  
>  
>  /* Priority Inheritance support.  */
> +#define lll_futex_timed_lock_pi(futexp, abstime, private) 		\
> +  lll_futex_syscall (4, futexp,						\
> +		     __lll_private_flag (FUTEX_LOCK_PI, private),	\
> +		     0, abstime)
> +
> +#define lll_futex_timed_unlock_pi(futexp, private) 			\
> +  lll_futex_syscall (4, futexp,						\
> +		     __lll_private_flag (FUTEX_UNLOCK_PI, private),	\
> +		     0, 0)

OK.

>  
>  /* Like lll_futex_wait (FUTEXP, VAL, PRIVATE) but with the expectation
>     that lll_futex_cmp_requeue_pi (FUTEXP, _, _, MUTEX, _, PRIVATE) will
> 


-- 
Cheers,
Carlos.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]