This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH] powerpc: Fix usage of elision transient failure adapt param


LGTM, thanks for checking on that.

On 20-08-2015 16:08, Paul E. Murphy wrote:
> The skip_lock_out_of_tbegin_retries adaptive parameter was
> not being used correctly, nor as described.  This prevents
> a fallback for all users of the lock if a transient abort
> occurs within the accepted number of retries.
> 
> 2015-08-20  Paul E. Murphy  <murphyp@linux.vnet.ibm.com>
> 	* sysdeps/powerpc/nptl/elide.h(__elide_lock): Fix
> 	usage of .skip_lock_out_of_tbegin_retries
> 	* sysdeps/unix/sysv/linux/powerpc/elision-lock.c (__lll_lock_elision):
> 	Likewise, and respect a value of try_tbegin <= 0
> ---
>  sysdeps/powerpc/nptl/elide.h                   |   10 +++++-----
>  sysdeps/unix/sysv/linux/powerpc/elision-lock.c |   15 ++++++---------
>  2 files changed, 11 insertions(+), 14 deletions(-)
> 
> diff --git a/sysdeps/powerpc/nptl/elide.h b/sysdeps/powerpc/nptl/elide.h
> index 389f5a5..38e1565 100644
> --- a/sysdeps/powerpc/nptl/elide.h
> +++ b/sysdeps/powerpc/nptl/elide.h
> @@ -55,14 +55,14 @@ __elide_lock (uint8_t *adapt_count, int is_lock_free)
>  		*adapt_count = __elision_aconf.skip_lock_internal_abort;
>  	      break;
>  	    }
> -	  /* Same logic as above, but for a number of temporary failures in a
> -	     a row.  */
> -	  else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
> -		   && __elision_aconf.try_tbegin > 0)
> -	    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
>  	}
>       }
>  
> +  /* Fall back to locks for a bit if retries have been exceeded */
> +  if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
> +      && __elision_aconf.try_tbegin > 0)
> +    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
> +
>    return false;
>  }
>  
> diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> index 7f9bcc2..c6731ca 100644
> --- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> +++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> @@ -72,8 +72,7 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>        goto use_lock;
>      }
>  
> -  int try_begin = aconf.try_tbegin;
> -  while (1)
> +  for (int i = aconf.try_tbegin; i > 0; i--)
>      {
>        if (__builtin_tbegin (0))
>  	{
> @@ -87,21 +86,19 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>  	  /* A persistent failure indicates that a retry will probably
>  	     result in another failure.  Use normal locking now and
>  	     for the next couple of calls.  */
> -	  if (try_begin-- <= 0
> -	      || _TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
> +	  if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
>  	    {
>  	      if (aconf.skip_lock_internal_abort > 0)
>  		*adapt_count = aconf.skip_lock_internal_abort;
>  	      goto use_lock;
>  	    }
> -	  /* Same logic as above, but for for a number of temporary failures
> -	     in a row.  */
> -	  else if (aconf.skip_lock_out_of_tbegin_retries > 0
> -                   && aconf.try_tbegin > 0)
> -	    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
>  	}
>       }
>  
> +  /* Fall back to locks for a bit if retries have been exhausted */
> +  if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
> +    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
> +
>  use_lock:
>    return LLL_LOCK ((*lock), pshared);
>  }
> 


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]