Deprecated and removed features, and other changes affecting compatibility:
* The glibc.tune tunable namespace has been renamed to glibc.cpu and the
diff --git a/nptl/Makefile b/nptl/Makefile
index 34ae830..997da5e 100644
--- a/nptl/Makefile
+++ b/nptl/Makefile
@@ -145,7 +145,8 @@ libpthread-routines = nptl-init nptlfreeres vars events version pt-interp \
mtx_destroy mtx_init mtx_lock mtx_timedlock \
mtx_trylock mtx_unlock call_once cnd_broadcast \
cnd_destroy cnd_init cnd_signal cnd_timedwait cnd_wait \
- tss_create tss_delete tss_get tss_set pthread_mutex_conf
+ tss_create tss_delete tss_get tss_set pthread_mutex_conf \
+ mcs_lock
# pthread_setuid pthread_seteuid pthread_setreuid \
# pthread_setresuid \
# pthread_setgid pthread_setegid pthread_setregid \
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 04e3f08..9f47129 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -749,7 +749,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
might have happened in the kernel. */
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
- offsetof (pthread_mutex_t,
- __data.__list.__next));
+ __data.__list.__list_t.__next));
pd->robust_head.list_op_pending = NULL;
#if __PTHREAD_MUTEX_HAVE_PREV
pd->robust_prev = &pd->robust_head;
diff --git a/nptl/descr.h b/nptl/descr.h
index 9c01e1b..dc24dd8 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -184,38 +184,38 @@ struct pthread
FIXME We should use relaxed MO atomic operations here and signal fences
because this kind of concurrency is similar to synchronizing with a
signal handler. */
-# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
+# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __list_t.__next))
# define ENQUEUE_MUTEX_BOTH(mutex, val) \
do { \
__pthread_list_t *next = (__pthread_list_t *) \
((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \
- QUEUE_PTR_ADJUST); \
- next->__prev = (void *) &mutex->__data.__list.__next; \
- mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \
+ next->__list_t.__prev = (void *) &mutex->__data.__list.__list_t.__next; \
+ mutex->__data.__list.__list_t.__next = THREAD_GETMEM (THREAD_SELF, \
robust_head.list); \
- mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
+ mutex->__data.__list.__list_t.__prev = (void *) &THREAD_SELF->robust_head; \
/* Ensure that the new list entry is ready before we insert it. */ \
__asm ("" ::: "memory"); \
THREAD_SETMEM (THREAD_SELF, robust_head.list, \
- (void *) (((uintptr_t) &mutex->__data.__list.__next) \
+ (void *) (((uintptr_t) &mutex->__data.__list.__list_t.__next) \
| val)); \
} while (0)
# define DEQUEUE_MUTEX(mutex) \
do { \
__pthread_list_t *next = (__pthread_list_t *) \
- ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \
+ ((char *) (((uintptr_t) mutex->__data.__list.__list_t.__next) & ~1ul) \
- QUEUE_PTR_ADJUST); \
- next->__prev = mutex->__data.__list.__prev; \
+ next->__list_t.__prev = mutex->__data.__list.__list_t.__prev; \
__pthread_list_t *prev = (__pthread_list_t *) \
- ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \
+ ((char *) (((uintptr_t) mutex->__data.__list.__list_t.__prev) & ~1ul) \
- QUEUE_PTR_ADJUST); \
- prev->__next = mutex->__data.__list.__next; \
+ prev->__list_t.__next = mutex->__data.__list.__list_t.__next; \
/* Ensure that we remove the entry from the list before we change the \
__next pointer of the entry, which is read by the kernel. */ \
__asm ("" ::: "memory"); \
- mutex->__data.__list.__prev = NULL; \
- mutex->__data.__list.__next = NULL; \
+ mutex->__data.__list.__list_t.__prev = NULL; \
+ mutex->__data.__list.__list_t.__next = NULL; \
} while (0)
#else
union
@@ -226,7 +226,7 @@ struct pthread
# define ENQUEUE_MUTEX_BOTH(mutex, val) \
do { \
- mutex->__data.__list.__next \
+ mutex->__data.__list.__list_t.__next \
= THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
/* Ensure that the new list entry is ready before we insert it. */ \
__asm ("" ::: "memory"); \
@@ -253,7 +253,7 @@ struct pthread
/* Ensure that we remove the entry from the list before we change the \
__next pointer of the entry, which is read by the kernel. */ \
__asm ("" ::: "memory"); \
- mutex->__data.__list.__next = NULL; \
+ mutex->__data.__list.__list_t.__next = NULL; \
} \
} while (0)
#endif
diff --git a/nptl/mcs_lock.c b/nptl/mcs_lock.c
new file mode 100644
index 0000000..43f90dc
--- /dev/null
+++ b/nptl/mcs_lock.c
@@ -0,0 +1,72 @@
+/* MCS-style lock for queuing spinners
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "pthreadP.h"
+#include <atomic.h>
+
+static __thread mcs_lock_t node = {
+ NULL,
+ 0,
+};
+
+void mcs_lock (mcs_lock_t **lock)
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index 20ff3fd..d20481a 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -289,7 +289,7 @@ __pthread_initialize_minimal_internal (void)
#ifdef __NR_set_robust_list
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
- offsetof (pthread_mutex_t,
- __data.__list.__next));
+ __data.__list.__list_t.__next));
INTERNAL_SYSCALL_DECL (err);
int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 7f16ba9..1179864 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -67,7 +67,7 @@ static inline short max_adaptive_count (void)
/* Internal mutex type value. */
enum
{
- PTHREAD_MUTEX_KIND_MASK_NP = 3,
+ PTHREAD_MUTEX_KIND_MASK_NP = 7,
PTHREAD_MUTEX_ELISION_NP = 256,
PTHREAD_MUTEX_NO_ELISION_NP = 512,
diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c
index 5cf290c..99f1707 100644
--- a/nptl/pthread_mutex_init.c
+++ b/nptl/pthread_mutex_init.c
@@ -111,6 +111,11 @@ __pthread_mutex_init (pthread_mutex_t *mutex,
return ENOTSUP;
#endif
+ /* Robust mutex does not support the PTHREAD_MUTEX_QUEUESPINNER_NP
+ GNU extension. */
+ if ((imutexattr->mutexkind & PTHREAD_MUTEX_QUEUESPINNER_NP) != 0)
+ return ENOTSUP;
+
mutex_kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
}
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index 474b4df..7b81470 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -26,6 +26,7 @@
#include <atomic.h>
#include <lowlevellock.h>
#include <stap-probe.h>
+#include <mcs_lock.h>
#ifndef lll_lock_elision
#define lll_lock_elision(lock, try_lock, private) ({ \
@@ -118,6 +119,35 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
mutex->__data.__count = 1;
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+ == PTHREAD_MUTEX_QUEUESPINNER_NP, 1))
+ {
+ if (! __is_smp)
+ goto simple;
+
+ if (LLL_MUTEX_TRYLOCK (mutex) != 0)
+ {
+ int cnt = 0;
+ int max_cnt = MIN (max_adaptive_count (),
+ mutex->__data.__spins * 2 + 10);
+ int val = 0;
+
+ mcs_lock ((mcs_lock_t **)&mutex->__data.__list.mcs_lock);
+
+ do
+ {
+ atomic_spin_nop ();
+ val = atomic_load_relaxed (&mutex->__data.__lock);
+ }
+ while (val != 0 && ++cnt < max_cnt);
+
+ mcs_unlock ((mcs_lock_t **)&mutex->__data.__list.mcs_lock);
+ LLL_MUTEX_LOCK (mutex);
+
+ mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
+ }
+ break;
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
- &mutex->__data.__list.__next);
+ &mutex->__data.__list.__list_t.__next);
/* We need to set op_pending before starting the operation. Also
see comments at ENQUEUE_MUTEX. */
__asm ("" ::: "memory");
@@ -353,7 +378,7 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex,
{
/* Note: robust PI futexes are signaled by setting bit 0. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
- (void *) (((uintptr_t) &mutex->__data.__list.__next)
+ (void *) (((uintptr_t) &mutex->__data.__list.__list_t.__next)
| 1));
/* We need to set op_pending before starting the operation. Also
see comments at ENQUEUE_MUTEX. */
diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
index fa90c1d..a22de5e 100644
--- a/nptl/pthread_mutex_trylock.c
+++ b/nptl/pthread_mutex_trylock.c
@@ -78,6 +78,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
FORCE_ELISION (mutex, goto elision);
/*FALL THROUGH*/
case PTHREAD_MUTEX_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_QUEUESPINNER_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
if (lll_trylock (mutex->__data.__lock) != 0)
break;
@@ -93,7 +94,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
- &mutex->__data.__list.__next);
+ &mutex->__data.__list.__list_t.__next);
oldval = mutex->__data.__lock;
do
@@ -213,7 +214,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
if (robust)
/* Note: robust PI futexes are signaled by setting bit 0. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
- (void *) (((uintptr_t) &mutex->__data.__list.__next)
+ (void *) (((uintptr_t) &mutex->__data.__list.__list_t.__next)
| 1));
oldval = mutex->__data.__lock;
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index 68d04d5..c3e8ef4 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -78,6 +78,9 @@ __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
goto normal;
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+ == PTHREAD_MUTEX_QUEUESPINNER_NP, 1))
+ goto normal;
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
== PTHREAD_MUTEX_ADAPTIVE_NP, 1))
goto normal;
else
@@ -142,7 +145,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
robust:
/* Remove mutex from the list. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
- &mutex->__data.__list.__next);
+ &mutex->__data.__list.__list_t.__next);
/* We must set op_pending before we dequeue the mutex. Also see
comments at ENQUEUE_MUTEX. */
__asm ("" ::: "memory");
@@ -242,7 +245,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
/* Remove mutex from the list.
Note: robust PI futexes are signaled by setting bit 0. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
- (void *) (((uintptr_t) &mutex->__data.__list.__next)
+ (void *) (((uintptr_t) &mutex->__data.__list.__list_t.__next)
| 1));
/* We must set op_pending before we dequeue the mutex. Also see
comments at ENQUEUE_MUTEX. */
diff --git a/nptl/pthread_mutexattr_settype.c b/nptl/pthread_mutexattr_settype.c
index 7d36cc3..c2382b4 100644
--- a/nptl/pthread_mutexattr_settype.c
+++ b/nptl/pthread_mutexattr_settype.c
@@ -25,7 +25,7 @@ __pthread_mutexattr_settype (pthread_mutexattr_t *attr, int kind)
{
struct pthread_mutexattr *iattr;
- if (kind < PTHREAD_MUTEX_NORMAL || kind > PTHREAD_MUTEX_ADAPTIVE_NP)
+ if (kind < PTHREAD_MUTEX_NORMAL || kind > PTHREAD_MUTEX_QUEUESPINNER_NP)
return EINVAL;
/* Cannot distinguish between DEFAULT and NORMAL. So any settype
diff --git a/sysdeps/nptl/bits/thread-shared-types.h b/sysdeps/nptl/bits/thread-shared-types.h
index 05c94e7..1cf8874 100644
--- a/sysdeps/nptl/bits/thread-shared-types.h
+++ b/sysdeps/nptl/bits/thread-shared-types.h
@@ -79,15 +79,19 @@
/* Common definition of pthread_mutex_t. */
#if !__PTHREAD_MUTEX_USE_UNION
-typedef struct __pthread_internal_list
+typedef union __pthread_internal_list
{
- struct __pthread_internal_list *__prev;
- struct __pthread_internal_list *__next;
+ struct {
+ union __pthread_internal_list *__prev;
+ union __pthread_internal_list *__next;
+ }__list_t;
+ void *mcs_lock;
} __pthread_list_t;
#else
-typedef struct __pthread_internal_slist
+typedef union __pthread_internal_slist
{
- struct __pthread_internal_slist *__next;
+ union __pthread_internal_slist *__next;
+ void *mcs_lock;
} __pthread_slist_t;
#endif
@@ -165,6 +169,13 @@ struct __pthread_mutex_s
__PTHREAD_COMPAT_PADDING_END
};
+struct mcs_lock
+{
+ struct mcs_lock *next;
+ int locked;
+};
+
+typedef struct mcs_lock mcs_lock_t;
/* Common definition of pthread_cond_t. */
diff --git a/sysdeps/nptl/pthread.h b/sysdeps/nptl/pthread.h
index df049ab..4b4b80a 100644
--- a/sysdeps/nptl/pthread.h
+++ b/sysdeps/nptl/pthread.h
@@ -45,7 +45,8 @@ enum
PTHREAD_MUTEX_TIMED_NP,
PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ERRORCHECK_NP,
- PTHREAD_MUTEX_ADAPTIVE_NP
+ PTHREAD_MUTEX_ADAPTIVE_NP,
+ PTHREAD_MUTEX_QUEUESPINNER_NP
#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
,
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
@@ -85,14 +86,16 @@ enum
#if __PTHREAD_MUTEX_HAVE_PREV
# define PTHREAD_MUTEX_INITIALIZER \
- { { 0, 0, 0, 0, 0, __PTHREAD_SPINS, { 0, 0 } } }
+ { { 0, 0, 0, 0, 0, __PTHREAD_SPINS, { { 0, 0 } } } }
# ifdef __USE_GNU
# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
- { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __PTHREAD_SPINS, { 0, 0 } } }
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __PTHREAD_SPINS, { { 0, 0 } } } }
# define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
- { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __PTHREAD_SPINS, { 0, 0 } } }
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __PTHREAD_SPINS, { { 0, 0 } } } }
# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
- { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __PTHREAD_SPINS, { 0, 0 } } }
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __PTHREAD_SPINS, { { 0, 0 } } } }
+# define PTHREAD_QUEUESPINNER_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_QUEUESPINNER_NP, __PTHREAD_SPINS, { { 0, 0 } } } }
# endif
#else
@@ -105,6 +108,8 @@ enum
{ { 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, 0, { __PTHREAD_SPINS } } }
# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
{ { 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, 0, { __PTHREAD_SPINS } } }
+# define PTHREAD_QUEUESPINNER_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, PTHREAD_MUTEX_QUEUESPINNER_NP, 0, { __PTHREAD_SPINS } } }
# endif
#endif
diff --git a/sysdeps/unix/sysv/linux/hppa/pthread.h b/sysdeps/unix/sysv/linux/hppa/pthread.h
index 11a024d..57c101c 100644
--- a/sysdeps/unix/sysv/linux/hppa/pthread.h
+++ b/sysdeps/unix/sysv/linux/hppa/pthread.h
@@ -46,6 +46,7 @@ enum
PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_ADAPTIVE_NP
+ PTHREAD_MUTEX_QUEUESPINNER_NP,
#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
,
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
@@ -95,6 +96,9 @@ enum
# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
{ { 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, { 0, 0, 0, 0 }, 0, \
{ __PTHREAD_SPINS }, { 0, 0 } } }
+# define PTHREAD_QUEUESPINNER_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, PTHREAD_MUTEX_QUEUESPINNER_NP, { 0, 0, 0, 0 }, 0, \
+ { __PTHREAD_SPINS }, { 0, 0 } } }
#endif