This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[RFC] [PATCH 1/2] nptl: Enable pthread mutex to use the TP futex


This patch adds a new protocol attribute PTHREAD_THROUGHPUT_NP to
the pthread mutex code that enables the use of the new TP futexes.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 nptl/pthreadP.h                              | 18 ++++++++++
 nptl/pthread_mutex_init.c                    | 27 +++++++++++++++
 nptl/pthread_mutex_lock.c                    | 49 ++++++++++++++++++++++----
 nptl/pthread_mutex_timedlock.c               | 52 ++++++++++++++++++++++++----
 nptl/pthread_mutex_trylock.c                 | 20 +++++++++--
 nptl/pthread_mutex_unlock.c                  | 20 ++++++++---
 nptl/pthread_mutexattr_setprotocol.c         |  1 +
 sysdeps/nptl/pthread.h                       |  3 ++
 sysdeps/unix/sysv/linux/hppa/pthread.h       |  3 +-
 sysdeps/unix/sysv/linux/lowlevellock-futex.h |  2 ++
 10 files changed, 173 insertions(+), 22 deletions(-)

diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index dbf46b0..e74010b 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -107,6 +107,24 @@ enum
 	  PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_ELISION_NP,
   PTHREAD_MUTEX_TIMED_NO_ELISION_NP =
 	  PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_NO_ELISION_NP,
+
+  PTHREAD_MUTEX_THROUGHPUT_NP = 8,
+  PTHREAD_MUTEX_TP_NORMAL_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_NORMAL,
+  PTHREAD_MUTEX_TP_RECURSIVE_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+  PTHREAD_MUTEX_TP_ERRORCHECK_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+  PTHREAD_MUTEX_TP_ADAPTIVE_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
+  PTHREAD_MUTEX_TP_ROBUST_NORMAL_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP,
+  PTHREAD_MUTEX_TP_ROBUST_RECURSIVE_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP,
+  PTHREAD_MUTEX_TP_ROBUST_ERRORCHECK_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP,
+  PTHREAD_MUTEX_TP_ROBUST_ADAPTIVE_NP
+  = PTHREAD_MUTEX_THROUGHPUT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP,
 };
 #define PTHREAD_MUTEX_PSHARED_BIT 128
 
diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c
index 6f2fc80..d96570f 100644
--- a/nptl/pthread_mutex_init.c
+++ b/nptl/pthread_mutex_init.c
@@ -51,6 +51,24 @@ prio_inherit_missing (void)
   return true;
 }
 
+static bool
+tp_futex_missing (void)
+{
+#ifdef __NR_futex
+  static int tp_futex_supported;
+  if (__glibc_unlikely (tp_futex_supported == 0))
+    {
+      int lock = 0;
+      INTERNAL_SYSCALL_DECL (err);
+      int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK, 0, 0);
+      assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
+      tp_futex_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
+    }
+  return __glibc_unlikely (tp_futex_supported < 0);
+#endif
+  return true;
+}
+
 int
 __pthread_mutex_init (pthread_mutex_t *mutex,
 		      const pthread_mutexattr_t *mutexattr)
@@ -76,6 +94,11 @@ __pthread_mutex_init (pthread_mutex_t *mutex,
 	return ENOTSUP;
       break;
 
+    case PTHREAD_THROUGHPUT_NP << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
+      if (__glibc_unlikely (tp_futex_missing ()))
+        return ENOTSUP;
+      break;
+
     default:
       /* XXX: For now we don't support robust priority protected mutexes.  */
       if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
@@ -123,6 +146,10 @@ __pthread_mutex_init (pthread_mutex_t *mutex,
       mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
       break;
 
+    case PTHREAD_THROUGHPUT_NP << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
+       mutex->__data.__kind |= PTHREAD_MUTEX_THROUGHPUT_NP;
+       break;
+
     default:
       break;
     }
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index 7f8254b..afdf058 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -339,7 +339,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
       break;
 
-    /* The PI support requires the Linux futex system call.  If that's not
+    /* The PI/TP support requires the Linux futex system call.  If that's not
        available, pthread_mutex_init should never have allowed the type to
        be set.  So it will get the default case for an invalid type.  */
 #ifdef __NR_futex
@@ -351,13 +351,37 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ADAPTIVE_NP:
       {
 	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
 	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	int lock_op, unlock_op, futex_val, e;
+
+	if (mutex->__data.__kind & PTHREAD_MUTEX_THROUGHPUT_NP)
+	  {
+	    /* We will try 5 userspace locking attempts before doing kernel
+	       lock */
+	    lock_op = FUTEX_LOCK;
+	    unlock_op = FUTEX_UNLOCK;
+	    futex_val = 5;
+	  }
+	else
+	  {
+	    lock_op = FUTEX_LOCK_PI;
+	    unlock_op = FUTEX_UNLOCK_PI;
+	    futex_val = 1;
+	  }
 
 	if (robust)
 	  {
-	    /* Note: robust PI futexes are signaled by setting bit 0.  */
+	    /* Note: robust PI/TP futexes are signaled by setting bit 0.  */
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 			   (void *) (((uintptr_t) &mutex->__data.__list.__next)
 				     | 1));
@@ -411,9 +435,22 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
 			   : PTHREAD_MUTEX_PSHARED (mutex));
 	    INTERNAL_SYSCALL_DECL (__err);
-	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-				      __lll_private_flag (FUTEX_LOCK_PI,
-							  private), 1, 0);
+	    do
+	      {
+		/* We need to do userspace locking for TP futexes */
+		e = INTERNAL_SYSCALL (futex, __err, 4,
+				&mutex->__data.__lock,
+				__lll_private_flag (lock_op, private),
+				futex_val, 0);
+		if (lock_op != FUTEX_LOCK
+		    || !INTERNAL_SYSCALL_ERROR_P (e, __err)
+		    || INTERNAL_SYSCALL_ERRNO (e, __err) != EAGAIN)
+		  break;
+		oldval = atomic_compare_and_exchange_val_acq
+				(&mutex->__data.__lock, newval, 0);
+		futex_val--;
+	      }
+	    while (oldval != 0 && (oldval & FUTEX_TID_MASK) != id);
 
 	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
 		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
@@ -474,7 +511,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 
 	    INTERNAL_SYSCALL_DECL (__err);
 	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-			      __lll_private_flag (FUTEX_UNLOCK_PI,
+			      __lll_private_flag (unlock_op,
 						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
 			      0, 0);
 
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 45f3454..7d80b83 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -337,13 +337,38 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex,
     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ADAPTIVE_NP:
       {
 	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
 	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	int lock_op, unlock_op, futex_val, e;
+
+	if (mutex->__data.__kind & PTHREAD_MUTEX_THROUGHPUT_NP)
+	  {
+	    /* We will try 5 userspace locking attempts before doing kernel
+	       lock */
+
+	    lock_op = FUTEX_LOCK;
+	    unlock_op = FUTEX_UNLOCK;
+	    futex_val = 5;
+	  }
+        else
+	  {
+	    lock_op = FUTEX_LOCK_PI;
+	    unlock_op = FUTEX_UNLOCK_PI;
+	    futex_val = 1;
+	  }
 
 	if (robust)
 	  {
-	    /* Note: robust PI futexes are signaled by setting bit 0.  */
+	    /* Note: robust PI/TP futexes are signaled by setting bit 0.  */
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 			   (void *) (((uintptr_t) &mutex->__data.__list.__next)
 				     | 1));
@@ -371,7 +396,7 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex,
 		   access.  */
 		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
-		/* Just bump the counter.  */
+		/* Just bump the  counter.  */
 		if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
 		  /* Overflow of the counter.  */
 		  return EAGAIN;
@@ -397,10 +422,23 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex,
 			   : PTHREAD_MUTEX_PSHARED (mutex));
 	    INTERNAL_SYSCALL_DECL (__err);
 
-	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-				      __lll_private_flag (FUTEX_LOCK_PI,
-							  private), 1,
-				      abstime);
+	    do
+	      {
+		/* We need to do userspace locking for TP futexes */
+		e = INTERNAL_SYSCALL (futex, __err, 4,
+				&mutex->__data.__lock,
+				__lll_private_flag (lock_op, private),
+				futex_val, abstime);
+		if (lock_op != FUTEX_LOCK
+		    || !INTERNAL_SYSCALL_ERROR_P (e, __err)
+		    || INTERNAL_SYSCALL_ERRNO (e, __err) != EAGAIN)
+		  break;
+		oldval = atomic_compare_and_exchange_val_acq
+				(&mutex->__data.__lock, id, 0);
+		futex_val--;
+	      }
+	    while (oldval != 0 && (oldval & FUTEX_TID_MASK) != id);
+
 	    if (INTERNAL_SYSCALL_ERROR_P (e, __err))
 	      {
 		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
@@ -479,7 +517,7 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex,
 
 	    INTERNAL_SYSCALL_DECL (__err);
 	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-			      __lll_private_flag (FUTEX_UNLOCK_PI,
+			      __lll_private_flag (unlock_op,
 						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
 			      0, 0);
 
diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
index ec7da61..9f0189f 100644
--- a/nptl/pthread_mutex_trylock.c
+++ b/nptl/pthread_mutex_trylock.c
@@ -186,7 +186,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
 
       return 0;
 
-    /* The PI support requires the Linux futex system call.  If that's not
+    /* The PI/TP support requires the Linux futex system call.  If that's not
        available, pthread_mutex_init should never have allowed the type to
        be set.  So it will get the default case for an invalid type.  */
 #ifdef __NR_futex
@@ -198,12 +198,21 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ADAPTIVE_NP:
       {
 	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
 	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+	bool tp_futex = mutex->__data.__kind & PTHREAD_MUTEX_THROUGHPUT_NP;
 
 	if (robust)
-	  /* Note: robust PI futexes are signaled by setting bit 0.  */
+	  /* Note: robust PI/TP futexes are signaled by setting bit 0.  */
 	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
 				   | 1));
@@ -247,6 +256,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
 		return EBUSY;
 	      }
 
+	    if (tp_futex)
+		goto futex_dead;
+
 	    assert (robust);
 
 	    /* The mutex owner died.  The kernel will now take care of
@@ -272,6 +284,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
 
 	if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
 	  {
+futex_dead:
 	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
 
 	    /* We got the mutex.  */
@@ -298,7 +311,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
 
 	    INTERNAL_SYSCALL_DECL (__err);
 	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-			      __lll_private_flag (FUTEX_UNLOCK_PI,
+			      __lll_private_flag (tp_futex ? FUTEX_UNLOCK
+							   : FUTEX_UNLOCK_PI,
 						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
 			      0, 0);
 
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index c7e6795..58cbffa 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -176,6 +176,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
        be set.  So it will get the default case for an invalid type.  */
 #ifdef __NR_futex
     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_RECURSIVE_NP:
       /* Recursive mutex.  */
       if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
 	return EPERM;
@@ -183,9 +184,10 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
       if (--mutex->__data.__count != 0)
 	/* We still hold the mutex.  */
 	return 0;
-      goto continue_pi_non_robust;
+      goto continue_pi_tp_non_robust;
 
     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_RECURSIVE_NP:
       /* Recursive mutex.  */
       if ((mutex->__data.__lock & FUTEX_TID_MASK)
 	  == THREAD_GETMEM (THREAD_SELF, tid)
@@ -206,7 +208,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
 	/* We still hold the mutex.  */
 	return 0;
 
-      goto continue_pi_robust;
+      goto continue_pi_tp_robust;
 
     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
     case PTHREAD_MUTEX_PI_NORMAL_NP:
@@ -214,6 +216,12 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_TP_ROBUST_ADAPTIVE_NP:
       if ((mutex->__data.__lock & FUTEX_TID_MASK)
 	  != THREAD_GETMEM (THREAD_SELF, tid)
 	  || ! lll_islocked (mutex->__data.__lock))
@@ -230,7 +238,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
 
       if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
 	{
-	continue_pi_robust:
+	continue_pi_tp_robust:
 	  /* Remove mutex from the list.
 	     Note: robust PI futexes are signaled by setting bit 0.  */
 	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
@@ -242,7 +250,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
 	  DEQUEUE_MUTEX (mutex);
 	}
 
-    continue_pi_non_robust:
+    continue_pi_tp_non_robust:
       mutex->__data.__owner = newowner;
       if (decr)
 	/* One less user.  */
@@ -260,6 +268,8 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
 	 kernel take care of the situation.  Use release MO in the CAS to
 	 synchronize with acquire MO in lock acquisitions.  */
       int l = atomic_load_relaxed (&mutex->__data.__lock);
+      int unlock_op = (mutex->__data.__kind & PTHREAD_MUTEX_THROUGHPUT_NP)
+		      ? FUTEX_UNLOCK : FUTEX_UNLOCK_PI;
       do
 	{
 	  if (((l & FUTEX_WAITERS) != 0)
@@ -267,7 +277,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
 	    {
 	      INTERNAL_SYSCALL_DECL (__err);
 	      INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
-				__lll_private_flag (FUTEX_UNLOCK_PI, private));
+				__lll_private_flag (unlock_op, private));
 	      break;
 	    }
 	}
diff --git a/nptl/pthread_mutexattr_setprotocol.c b/nptl/pthread_mutexattr_setprotocol.c
index 301fd0a..52f71b9 100644
--- a/nptl/pthread_mutexattr_setprotocol.c
+++ b/nptl/pthread_mutexattr_setprotocol.c
@@ -26,6 +26,7 @@ pthread_mutexattr_setprotocol (pthread_mutexattr_t *attr, int protocol)
 {
   if (protocol != PTHREAD_PRIO_NONE
       && protocol != PTHREAD_PRIO_INHERIT
+      && protocol != PTHREAD_THROUGHPUT_NP
       && __builtin_expect (protocol != PTHREAD_PRIO_PROTECT, 0))
     return EINVAL;
 
diff --git a/sysdeps/nptl/pthread.h b/sysdeps/nptl/pthread.h
index 632ea7b..b2fe9e6 100644
--- a/sysdeps/nptl/pthread.h
+++ b/sysdeps/nptl/pthread.h
@@ -79,6 +79,9 @@ enum
   PTHREAD_PRIO_NONE,
   PTHREAD_PRIO_INHERIT,
   PTHREAD_PRIO_PROTECT
+#ifdef __USE_GNU
+  , PTHREAD_THROUGHPUT_NP
+#endif
 };
 #endif
 
diff --git a/sysdeps/unix/sysv/linux/hppa/pthread.h b/sysdeps/unix/sysv/linux/hppa/pthread.h
index d197374..37f9fdf 100644
--- a/sysdeps/unix/sysv/linux/hppa/pthread.h
+++ b/sysdeps/unix/sysv/linux/hppa/pthread.h
@@ -78,7 +78,8 @@ enum
 {
   PTHREAD_PRIO_NONE,
   PTHREAD_PRIO_INHERIT,
-  PTHREAD_PRIO_PROTECT
+  PTHREAD_PRIO_PROTECT,
+  PTHREAD_THROUGHPUT_NP
 };
 #endif
 
diff --git a/sysdeps/unix/sysv/linux/lowlevellock-futex.h b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
index bb4fbae..89b1674 100644
--- a/sysdeps/unix/sysv/linux/lowlevellock-futex.h
+++ b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
@@ -38,6 +38,8 @@
 #define FUTEX_WAKE_BITSET	10
 #define FUTEX_WAIT_REQUEUE_PI   11
 #define FUTEX_CMP_REQUEUE_PI    12
+#define FUTEX_LOCK		13
+#define FUTEX_UNLOCK		14
 #define FUTEX_PRIVATE_FLAG	128
 #define FUTEX_CLOCK_REALTIME	256
 
-- 
1.8.3.1


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]