This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH] Add compiler barriers around modifications of the robust mutex list for pthread_mutex_trylock.


On 2/6/19 6:25 AM, Stefan Liebler wrote:
> Hi Carlos,
> I've updated the patch with three additional comments and I've mentioned the filed bug.
> Please review it once again before I commit it to master and cherry pick it to the release branches.

Thank you! Reviewed.

>> Yes, I did that backport to RHEL 7.6. These fixes are just "further"
>> fixes right? I'll work on getting this fixed in RHEL 7.7, and RHEL 8
>> for all arches.
> Sounds great.
> That's the same fix for pthread_mutex_trylock as previously done for pthread_mutex_lock and pthread_mutex_timedlock.

I've filed these:
https://bugzilla.redhat.com/show_bug.cgi?id=1672771
https://bugzilla.redhat.com/show_bug.cgi?id=1672773

Feel free to comment or verify if they are going to be needed in RHEL7.7
or RHEL8. I haven't done the analysis of the disassembly yet. If you
could have a look that would help.

> 20190206_pthread_mutex_trylock_barriers.patch
> 
> commit b4c6ee19e804b0e90c117ec353ce67d321f0319b
> Author: Stefan Liebler <stli@linux.ibm.com>
> Date:   Wed Feb 6 11:27:03 2019 +0100
> 
>     Add compiler barriers around modifications of the robust mutex list for pthread_mutex_trylock. [BZ #24180]
>     
>     While debugging a kernel warning, Thomas Gleixner, Sebastian Sewior and
>     Heiko Carstens found a bug in pthread_mutex_trylock due to misordered
>     instructions:
>     140:   a5 1b 00 01             oill    %r1,1
>     144:   e5 48 a0 f0 00 00       mvghi   240(%r10),0   <--- THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>     14a:   e3 10 a0 e0 00 24       stg     %r1,224(%r10) <--- last THREAD_SETMEM of ENQUEUE_MUTEX_PI
>     
>     vs (with compiler barriers):
>     140:   a5 1b 00 01             oill    %r1,1
>     144:   e3 10 a0 e0 00 24       stg     %r1,224(%r10)
>     14a:   e5 48 a0 f0 00 00       mvghi   240(%r10),0
>     
>     Please have a look at the discussion:
>     "Re: WARN_ON_ONCE(!new_owner) within wake_futex_pi() triggerede"
>     (https://lore.kernel.org/lkml/20190202112006.GB3381@osiris/)
>     
>     This patch is introducing the same compiler barriers and comments
>     for pthread_mutex_trylock as introduced for pthread_mutex_lock and
>     pthread_mutex_timedlock by commit 8f9450a0b7a9e78267e8ae1ab1000ebca08e473e
>     "Add compiler barriers around modifications of the robust mutex list."
>     
>     ChangeLog:
>     
>             [BZ #24180]
>             * nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock):
>             Add compiler barriers and comments.

OK for master.

Reviewed-by: Carlos O'Donell <carlos@redhat.com>

> diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
> index 8fe43b8f0f..bf2869eca2 100644
> --- a/nptl/pthread_mutex_trylock.c
> +++ b/nptl/pthread_mutex_trylock.c
> @@ -94,6 +94,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>      case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
>        THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
>  		     &mutex->__data.__list.__next);
> +      /* We need to set op_pending before starting the operation.  Also
> +	 see comments at ENQUEUE_MUTEX.  */
> +      __asm ("" ::: "memory");

OK. 1/10 barriers.

>  
>        oldval = mutex->__data.__lock;
>        do
> @@ -119,7 +122,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	      /* But it is inconsistent unless marked otherwise.  */
>  	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
>  
> +	      /* We must not enqueue the mutex before we have acquired it.
> +		 Also see comments at ENQUEUE_MUTEX.  */
> +	      __asm ("" ::: "memory");

OK. 2/10 barriers.

>  	      ENQUEUE_MUTEX (mutex);
> +	      /* We need to clear op_pending after we enqueue the mutex.  */
> +	      __asm ("" ::: "memory");

OK. 3/10 barriers.

>  	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  
>  	      /* Note that we deliberately exist here.  If we fall
> @@ -135,6 +143,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	      int kind = PTHREAD_MUTEX_TYPE (mutex);
>  	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
>  		{
> +		  /* We do not need to ensure ordering wrt another memory
> +		     access.  Also see comments at ENQUEUE_MUTEX. */

OK. 1/9 comments.

>  		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
>  				 NULL);
>  		  return EDEADLK;
> @@ -142,6 +152,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  
>  	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
>  		{
> +		  /* We do not need to ensure ordering wrt another memory
> +		     access.  */

OK. 2/9 comments.

>  		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
>  				 NULL);
>  
> @@ -160,6 +172,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  							id, 0);
>  	  if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
>  	    {
> +	      /* We haven't acquired the lock as it is already acquired by
> +		 another owner.  We do not need to ensure ordering wrt another
> +		 memory access.  */

OK. 3/9 comments.

>  	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  
>  	      return EBUSY;
> @@ -173,13 +188,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	      if (oldval == id)
>  		lll_unlock (mutex->__data.__lock,
>  			    PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
> +	      /* FIXME This violates the mutex destruction requirements.  See
> +		 __pthread_mutex_unlock_full.  */

OK. 4/9 comments.

>  	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  	      return ENOTRECOVERABLE;
>  	    }
>  	}
>        while ((oldval & FUTEX_OWNER_DIED) != 0);
>  
> +      /* We must not enqueue the mutex before we have acquired it.
> +	 Also see comments at ENQUEUE_MUTEX.  */
> +      __asm ("" ::: "memory");

OK. 4/10 barriers.

>        ENQUEUE_MUTEX (mutex);
> +      /* We need to clear op_pending after we enqueue the mutex.  */
> +      __asm ("" ::: "memory");

OK. 5/10 barriers.

>        THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  
>        mutex->__data.__owner = id;
> @@ -211,10 +233,15 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	}
>  
>  	if (robust)
> -	  /* Note: robust PI futexes are signaled by setting bit 0.  */
> -	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
> -			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
> -				   | 1));
> +	  {
> +	    /* Note: robust PI futexes are signaled by setting bit 0.  */
> +	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
> +			   (void *) (((uintptr_t) &mutex->__data.__list.__next)
> +				     | 1));
> +	    /* We need to set op_pending before starting the operation.  Also
> +	       see comments at ENQUEUE_MUTEX.  */
> +	    __asm ("" ::: "memory");

OK. 6/10 barriers.

> +	  }
>  
>  	oldval = mutex->__data.__lock;
>  
> @@ -223,12 +250,16 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	  {
>  	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
>  	      {
> +		/* We do not need to ensure ordering wrt another memory
> +		   access.  */

OK. 5/9 comments.

>  		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  		return EDEADLK;
>  	      }
>  
>  	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
>  	      {
> +		/* We do not need to ensure ordering wrt another memory
> +		   access.  */

OK. 6/9 comments.

>  		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  
>  		/* Just bump the counter.  */
> @@ -250,6 +281,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	  {
>  	    if ((oldval & FUTEX_OWNER_DIED) == 0)
>  	      {
> +		/* We haven't acquired the lock as it is already acquired by
> +		   another owner.  We do not need to ensure ordering wrt another
> +		   memory access.  */

OK. 7/9 comments.

>  		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  
>  		return EBUSY;
> @@ -270,6 +304,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
>  		&& INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
>  	      {
> +		/* The kernel has not yet finished the mutex owner death.
> +		   We do not need to ensure ordering wrt another memory
> +		   access.  */

OK. 8/9 comments.

>  		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  
>  		return EBUSY;
> @@ -287,7 +324,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  	    /* But it is inconsistent unless marked otherwise.  */
>  	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
>  
> +	    /* We must not enqueue the mutex before we have acquired it.
> +	       Also see comments at ENQUEUE_MUTEX.  */
> +	    __asm ("" ::: "memory");

OK. 7/10 barriers.

>  	    ENQUEUE_MUTEX (mutex);
> +	    /* We need to clear op_pending after we enqueue the mutex.  */
> +	    __asm ("" ::: "memory");

OK. 8/10 barriers.

>  	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  
>  	    /* Note that we deliberately exit here.  If we fall
> @@ -310,13 +352,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
>  						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
>  			      0, 0);
>  
> +	    /* To the kernel, this will be visible after the kernel has
> +	       acquired the mutex in the syscall.  */

OK. 9/9 comments.

>  	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  	    return ENOTRECOVERABLE;
>  	  }
>  
>  	if (robust)
>  	  {
> +	    /* We must not enqueue the mutex before we have acquired it.
> +	       Also see comments at ENQUEUE_MUTEX.  */
> +	    __asm ("" ::: "memory");

OK. 9/10 barriers.

>  	    ENQUEUE_MUTEX_PI (mutex);
> +	    /* We need to clear op_pending after we enqueue the mutex.  */
> +	    __asm ("" ::: "memory");


OK. 10/10 barriers.

>  	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
>  	  }
>  


-- 
Cheers,
Carlos.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]