[RFC/PATCH] RT-NPTL-2.1 4/5
Hu, Boris
boris.hu@intel.com
Mon Feb 2 03:18:00 GMT 2004
nptl/Versions | 8
nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h | 138
+++++++
nptl/sysdeps/unix/sysv/linux/i386/lowlevelrtlock.c | 226
+++++++++++++
nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.c | 87 +++++
nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.c | 69 +++
nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.c | 204
+++++++++++
nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.c | 167
+++++++++
nptl/sysdeps/unix/sysv/linux/internaltypes.h | 11
nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c | 12
sysdeps/generic/bits/confname.h | 4
sysdeps/posix/sysconf.c | 7
11 files changed, 928 insertions(+), 5 deletions(-)
diff -urN src.cvs/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
src/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
--- src.cvs/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
2004-01-31 11:49:05.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
2004-01-31 11:56:54.000000000 +0800
@@ -2,6 +2,9 @@
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+ Redirect lowlevellock to use Fast User SYNchronization(fusyn).
+ Boris Hu <boris.hu@intel.com>, 2003
+
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -35,7 +38,7 @@
#define SYS_futex 240
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
-
+#define FUTEX_REQUEUE 3
/* Initializer for compatibility lock. */
#define LLL_MUTEX_LOCK_INITIALIZER (0)
@@ -89,6 +92,20 @@
"i" (offsetof (tcbhead_t, sysinfo)));
\
} while (0)
+#define lll_futex_timed_wait(futex, val, timespec) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (futex, err, 5, futex, FUTEX_WAIT, val,
\
+ timespec, 0);
\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO
(ret, err) \
+ : ret; })
+
+#define lll_futex_requeue(futex, nr_wake, nr_move, mutex) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (futex, err, 5, futex, FUTEX_WAKE, INT_MAX,
0, 0);\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
/* Does not preserve %eax and %ecx. */
extern int __lll_mutex_lock_wait (int val, int *__futex)
@@ -351,5 +368,124 @@
#define lll_cond_broadcast(cond) \
__lll_cond_broadcast (cond)
+#include <linux/fulock.h>
+
+#ifdef USE_FUSYN_ROBUST_MUTEX
+//#define SEA_DEBUG 1 // To Enable the debug info
+#ifdef SEA_DEBUG
+/* Indicate location */
+# define SEA_L
\
+ do {
\
+ unsigned id = THREAD_GETMEM (THREAD_SELF, tid);
\
+ printf("[%d] %s:%s() %d line: \n", id, __FILE__,
\
+ __FUNCTION__, __LINE__);
\
+ } while (0)
+
+/* Location-aware printf */
+# define SEA_P(fmt, args...)
\
+ do {
\
+ unsigned id = THREAD_GETMEM (THREAD_SELF, tid);
\
+ printf("[%d] %s:%s() %d line: " fmt "\n",
\
+ id, __FILE__,__FUNCTION__,__LINE__,args);
\
+ } while (0)
+
+#else
+# define SEA_L
+# define SEA_P(fmt, args...)
+#endif
+
+/* Add lll_rtmutex_* to support fusyn */
+#define FUSYN_FL_RT_MASK 0x78000000
+
+#define __LK_FL(fulock)
\
+ ({ unsigned k_flags = 0;
\
+ unsigned flags = ((pthread_mutex_t *)(fulock))->__data.__kind
\
+ & FUSYN_FL_RT_MASK;
\
+ k_flags = ((flags << 1) & FULOCK_FL_USER_MK);
\
+ k_flags;})
+
+
+extern int is_mutex_robust(const pthread_mutex_t *mutex);
+
+extern int __lll_rtmutex_trylock (volatile unsigned *vfulock, unsigned
tid);
+#define lll_rtmutex_trylock(futex, tid) __lll_rtmutex_trylock(&(futex),
tid)
+
+
+extern int __lll_rtmutex_timedlock (volatile unsigned *vfulock,
unsigned flags,
+ unsigned tid, struct timespec
*rel);
+#define lll_rtmutex_timedlock(futex, tid, timeout) \
+ __lll_rtmutex_timedlock(&(futex), __LK_FL(&(futex)), tid,
timeout)
+
+
+extern int __lll_rtmutex_lock (volatile unsigned *vfulock, unsigned
flags,
+ unsigned tid);
+#define lll_rtmutex_lock(futex, tid) \
+ __lll_rtmutex_lock(&(futex), __LK_FL(&(futex)), tid)
+
+
+extern int __lll_rtmutex_unlock (volatile unsigned *vfulock, unsigned
flags,
+ unsigned tid);
+#define lll_rtmutex_unlock(futex, tid) \
+ __lll_rtmutex_unlock(&(futex), __LK_FL(&(futex)), tid)
+
+
+extern int __lll_rtmutex_unlock_nocheck (volatile unsigned *vfulock);
+#define lll_rtmutex_unlock_nocheck(futex) \
+ __lll_rtmutex_unlock_nocheck(&(futex))
+
+
+extern int __lll_rtmutex_set_consistency (volatile unsigned *vfulock,
+ enum fulock_con consistency)
;
+#define lll_rtmutex_set_consistency(futex, state) \
+ __lll_rtmutex_set_consistency(&(futex), state)
+
+
+extern int __lll_rtmutex_get_consistency (volatile unsigned *vfulock,
+ int *state);
+#define lll_rtmutex_get_consistency(futex, state) \
+ __lll_rtmutex_get_consistency(&(futex), state)
+
+
+#define CONDVAR_RM_FLAGS FULOCK_FL_RM
+
+#define lll_cmutex_lock(fulock, id) \
+ do {
\
+ while (__lll_rtmutex_lock(&(fulock), CONDVAR_RM_FLAGS, id))
\
+ lll_rtmutex_set_consistency(fulock,
\
+ PTHREAD_MUTEX_ROBUST_CONSISTENT_NP);
\
+ } while (0)
+
+#define lll_cmutex_unlock(fulock, id) \
+ __lll_rtmutex_unlock(&(fulock), CONDVAR_RM_FLAGS, id)
+
+#define LLL_CMUTEX_LOCK(mutex, tid) \
+ lll_cmutex_lock (mutex, tid)
+
+#define LLL_CMUTEX_UNLOCK(mutex, tid) \
+ lll_cmutex_unlock (mutex, tid)
+
+#else /* Normal NPTL */
+
+#define FUSYN_FL_RT_MASK 0
+
+#define LLL_CMUTEX_LOCK(mutex, tid) \
+ lll_mutex_lock(mutex)
+
+#define LLL_CMUTEX_UNLOCK(mutex, tid) \
+ lll_mutex_unlock (mutex)
+
+#define lll_rtmutex_get_consistency(futex, state) 0
+
+#define lll_rtmutex_set_consistency(futex, state) 0
+
+#define lll_rtmutex_unlock_nocheck(futex) 0
+
+#define __LK_FL(fulock) 0
+
+
+
+#endif /* USE_FUSYN_ROBUST_MUTEX */
+
+
#endif /* lowlevellock.h */
diff -urN src.cvs/nptl/sysdeps/unix/sysv/linux/i386/lowlevelrtlock.c
src/nptl/sysdeps/unix/sysv/linux/i386/lowlevelrtlock.c
--- src.cvs/nptl/sysdeps/unix/sysv/linux/i386/lowlevelrtlock.c
1970-01-01 08:00:00.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/i386/lowlevelrtlock.c
2004-01-31 11:51:20.000000000 +0800
@@ -0,0 +1,226 @@
+/*
+ * (C) 2003 Intel Corporation
+ * Boris Hu <boris.hu@intel.com>
+ *
+ * Distributed under the FSF's LGPL license, v2 or later. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <sys/syscall.h>
+#include <pthread.h>
+#include <pthreadP.h>
+#include <lowlevellock.h>
+#include <sys/time.h>
+#include <atomic.h>
+
+#include <linux/fulock.h>
+
+
+inline int
+__attribute__ ((always_inline))
+is_mutex_robust (const pthread_mutex_t *mutex)
+{
+ return (mutex->__data.__kind & ((FULOCK_FL_RM | FULOCK_FL_RM_SUN) >>
1));
+}
+
+
+inline int
+__attribute__ ((always_inline))
+is_mutexattr_robust (const struct pthread_mutexattr *attr)
+{
+ return (attr->mutexkind & ((FULOCK_FL_RM | FULOCK_FL_RM_SUN) >> 1));
+}
+
+
+inline int
+__attribute__ ((always_inline))
+is_mutex_healthy (const pthread_mutex_t *mutex)
+{
+ int state;
+ pthread_mutex_getconsistency_np (mutex, &state);
+ return PTHREAD_MUTEX_ROBUST_CONSISTENT_NP == state;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_trylock (volatile unsigned *vfulock, unsigned tid)
+{
+ unsigned old_value;
+ int result;
+ unsigned flags = __LK_FL (vfulock);
+ INTERNAL_SYSCALL_DECL (err);
+
+ restart:
+ result = EBUSY;
+ old_value = atomic_compare_and_exchange_val_acq (vfulock, tid,
VFULOCK_UNLOCKED);
+ if (old_value == VFULOCK_UNLOCKED) /* If it was unlocked, fulock
acquired */
+ result = 0;
+ else if (old_value == VFULOCK_NR)
+ result = ENOTRECOVERABLE;
+ else if ((old_value == VFULOCK_KCO) || (flags & FULOCK_FL_RM))
+ {
+ result = INTERNAL_SYSCALL (ufulock_lock, err, 3, vfulock, flags,
0);
+ if (INTERNAL_SYSCALL_ERROR_P (result, err))
+ result = INTERNAL_SYSCALL_ERRNO (result, err);
+
+ switch (result)
+ {
+ case 0:
+ case EBUSY:
+ case ETIMEDOUT:
+ case EOWNERDEAD:
+ case ENOTRECOVERABLE:
+ return result;
+ default:
+ goto restart;
+ }
+ }
+ return result; /* Taken (waiters in kernel)! */
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_timedlock (volatile unsigned *vfulock, unsigned flags,
+ unsigned pid, struct timespec *rel)
+{
+ int result;
+ struct timeval tv;
+ struct timespec rt, *p = NULL;
+ INTERNAL_SYSCALL_DECL (err);
+
+ if ((void *)-1 != rel && NULL != rel) {
+ if (rel->tv_nsec < 0 || rel->tv_nsec >= 1000000000)
+ return EINVAL;
+
+ (void) __gettimeofday (&tv, NULL);
+
+ rt.tv_sec = rel->tv_sec - tv.tv_sec;
+ rt.tv_nsec = rel->tv_nsec - tv.tv_usec * 1000;
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+ if (rt.tv_sec < 0)
+ return ETIMEDOUT;
+ p = &rt;
+ } else if ((void *)-1 == rel)
+ p = (void *)-1;
+
+ restart:
+ if (! atomic_compare_and_exchange_bool_acq (vfulock, pid,
VFULOCK_UNLOCKED))
+ return 0;
+
+ result = INTERNAL_SYSCALL (ufulock_lock, err, 3, vfulock, flags, p);
+
+ result = INTERNAL_SYSCALL_ERROR_P (result, err)
+ ? INTERNAL_SYSCALL_ERRNO (result, err) : result;
+
+ switch (result)
+ {
+ case 0:
+ case EBUSY:
+ case ETIMEDOUT:
+ case EOWNERDEAD:
+ case ENOTRECOVERABLE:
+ return result;
+ default:
+ goto restart;
+ }
+ return 0; /* Lock acquired. */
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_lock (volatile unsigned *vfulock, unsigned flags,
+ unsigned tid)
+{
+ return __lll_rtmutex_timedlock (vfulock, flags, tid, (void *) -1);
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_unlock (volatile unsigned *vfulock, unsigned flags,
+ unsigned tid)
+{
+ int result = EPERM;
+ unsigned old_value;
+ INTERNAL_SYSCALL_DECL (err);
+
+ while (1)
+ {
+ old_value = *vfulock;
+ if (old_value == VFULOCK_NR)
+ {
+ result = ENOTRECOVERABLE;
+ break;
+ }
+ else if (old_value >= VFULOCK_KCO)
+ {
+ result = INTERNAL_SYSCALL (ufulock_unlock, err, 3, vfulock,
flags, 0);
+ if (INTERNAL_SYSCALL_ERROR_P (result, err)) {
+ result = INTERNAL_SYSCALL_ERRNO (result, err);
+ break;
+ }
+ break;
+ }
+ else if (! atomic_compare_and_exchange_bool_acq (vfulock,
VFULOCK_UNLOCKED,
+ old_value)) {
+ result = 0;
+ break;
+ }
+ }
+ return result;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_unlock_nocheck (volatile unsigned *vfulock)
+{
+ unsigned flags = __LK_FL (vfulock);
+ int result;
+ INTERNAL_SYSCALL_DECL (err);
+
+ result = INTERNAL_SYSCALL (ufulock_unlock, err, 3, vfulock, flags,
0);
+ return INTERNAL_SYSCALL_ERROR_P (result, err)
+ ? INTERNAL_SYSCALL_ERRNO (result, err)
+ : result;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_set_consistency (volatile unsigned *vfulock,
+ enum fulock_con consistency)
+{
+ unsigned flags = __LK_FL (vfulock);
+ int result;
+ INTERNAL_SYSCALL_DECL (err);
+
+ result = INTERNAL_SYSCALL (ufulock_consistency, err, 3, vfulock,
+ flags, consistency);
+ return INTERNAL_SYSCALL_ERROR_P (result, err)
+ ? INTERNAL_SYSCALL_ERRNO (result, err)
+ : result;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_get_consistency (volatile unsigned *vfulock,
+ int *state)
+{
+ unsigned flags = __LK_FL (vfulock);
+ int result;
+ INTERNAL_SYSCALL_DECL (err);
+
+ result = INTERNAL_SYSCALL (ufulock_consistency, err, 3, vfulock,
flags, 0);
+ if (! INTERNAL_SYSCALL_ERROR_P (result, err))
+ *state = result;
+ return 0;
+}
diff -urN
src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.c
src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.c
--- src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.c
1970-01-01 08:00:00.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.c
2004-01-31 11:51:20.000000000 +0800
@@ -0,0 +1,87 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+#include <kernel-features.h>
+
+
+int
+__pthread_cond_broadcast (cond)
+ pthread_cond_t *cond;
+{
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Make sure we are alone. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Are there any waiters to be woken? */
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
+ {
+ /* Yes. Mark them all as woken. */
+ cond->__data.__wakeup_seq = cond->__data.__total_seq;
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* The futex syscall operates on a 32-bit word. That is fine,
+ we just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ /* Do not use requeue for pshared condvars. */
+ if (cond->__data.__mutex == (void *) ~0l)
+ goto wake_all;
+
+ /* Wake everybody. */
+ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
+ if (__builtin_expect (lll_futex_requeue (futex, 1, INT_MAX,
+ &mut->__data.__lock) ==
-EINVAL,
+ 0))
+ {
+ /* The requeue functionality is not available. */
+ wake_all:
+ lll_futex_wake (futex, INT_MAX);
+ }
+
+ /* That's all. */
+ return 0;
+ }
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return 0;
+}
+
+versioned_symbol (libpthread, __pthread_cond_broadcast,
pthread_cond_broadcast,
+ GLIBC_2_3_2);
diff -urN
src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.c
src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.c
--- src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.c
1970-01-01 08:00:00.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.c
2004-01-31 11:51:20.000000000 +0800
@@ -0,0 +1,69 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+#include <kernel-features.h>
+
+
+int
+__pthread_cond_signal (cond)
+ pthread_cond_t *cond;
+{
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Make sure we are alone. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Are there any waiters to be woken? */
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
+ {
+ /* Yes. Mark one of them as woken. */
+ ++cond->__data.__wakeup_seq;
+
+ /* The futex syscall operates on a 32-bit word. That is fine,
+ we just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ /* Wake one. */
+ lll_futex_wake (futex, 1);
+ }
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return 0;
+}
+
+versioned_symbol (libpthread, __pthread_cond_signal,
pthread_cond_signal,
+ GLIBC_2_3_2);
diff -urN
src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.c
src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.c
--- src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.c
1970-01-01 08:00:00.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.c
2004-01-31 11:51:20.000000000 +0800
@@ -0,0 +1,204 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+
+
+/* Cleanup handler, defined in pthread_cond_wait.c. */
+extern void __condvar_cleanup (void *arg)
+ __attribute__ ((visibility ("hidden")));
+
+struct _condvar_cleanup_buffer
+{
+ int oldtype;
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+};
+
+int
+__pthread_cond_timedwait (cond, mutex, abstime)
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+ const struct timespec *abstime;
+{
+ struct _pthread_cleanup_buffer buffer;
+ struct _condvar_cleanup_buffer cbuffer;
+ int result = 0;
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Catch invalid parameters. */
+ if (abstime->tv_nsec >= 1000000000)
+ return EINVAL;
+
+ /* Make sure we are along. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Now we can release the mutex. */
+ int err = __pthread_mutex_unlock_usercnt (mutex, 0);
+ if (err)
+ {
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return err;
+ }
+
+ /* We have one new user of the condvar. */
+ ++cond->__data.__total_seq;
+
+ /* Remember the mutex we are using here. If there is already a
+ different address store this is a bad user bug. Do not store
+ anything for pshared condvars. */
+ if (cond->__data.__mutex != (void *) ~0l)
+ cond->__data.__mutex = mutex;
+
+ /* Prepare structure passed to cancellation handler. */
+ cbuffer.cond = cond;
+ cbuffer.mutex = mutex;
+
+ /* Before we block we enable cancellation. Therefore we have to
+ install a cancellation handler. */
+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
+
+ /* The current values of the wakeup counter. The "woken" counter
+ must exceed this value. */
+ unsigned long long int val;
+ unsigned long long int seq;
+ val = seq = cond->__data.__wakeup_seq;
+
+ /* The futex syscall operates on a 32-bit word. That is fine, we
+ just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ while (1)
+ {
+ struct timespec rt;
+ {
+#ifdef __NR_clock_gettime
+ INTERNAL_SYSCALL_DECL (err);
+ int val;
+ val = INTERNAL_SYSCALL (clock_gettime, err, 2,
+ cond->__data.__clock, &rt);
+# ifndef __ASSUME_POSIX_TIMERS
+ if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (val, err), 0))
+ {
+ struct timeval tv;
+ (void) gettimeofday (&tv, NULL);
+
+ /* Convert the absolute timeout value to a relative timeout.
*/
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+ }
+ else
+# endif
+ {
+ /* Convert the absolute timeout value to a relative timeout.
*/
+ rt.tv_sec = abstime->tv_sec - rt.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - rt.tv_nsec;
+ }
+#else
+ /* Get the current time. So far we support only one clock. */
+ struct timeval tv;
+ (void) gettimeofday (&tv, NULL);
+
+ /* Convert the absolute timeout value to a relative timeout. */
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+#endif
+ }
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+ /* Did we already time out? */
+ if (rt.tv_sec < 0)
+ {
+ /* Yep. Adjust the sequence counter. */
+ ++cond->__data.__wakeup_seq;
+
+ /* The error value. */
+ result = ETIMEDOUT;
+ break;
+ }
+
+ /* Prepare to wait. Release the condvar futex. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* Enable asynchronous cancellation. Required by the standard.
*/
+ cbuffer.oldtype = __pthread_enable_asynccancel ();
+
+ /* Wait until woken by signal or broadcast. Note that we
+ truncate the 'val' value to 32 bits. */
+ err = lll_futex_timed_wait (futex, (unsigned int) val, &rt);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (cbuffer.oldtype);
+
+ /* We are going to look at shared data again, so get the lock.
*/
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Check whether we are eligible for wakeup. */
+ val = cond->__data.__wakeup_seq;
+ if (val > seq && cond->__data.__woken_seq < val)
+ break;
+
+ /* Not woken yet. Maybe the time expired? */
+ if (err == -ETIMEDOUT)
+ {
+ /* Yep. Adjust the counters. */
+ ++cond->__data.__wakeup_seq;
+
+ /* The error value. */
+ result = ETIMEDOUT;
+ break;
+ }
+ }
+
+ /* Another thread woken up. */
+ ++cond->__data.__woken_seq;
+
+ /* We are done with the condvar. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* The cancellation handling is back to normal, remove the handler.
*/
+ __pthread_cleanup_pop (&buffer, 0);
+
+ /* Get the mutex before returning. */
+ err = __pthread_mutex_cond_lock (mutex);
+
+ return err ?: result;
+}
+
+versioned_symbol (libpthread, __pthread_cond_timedwait,
pthread_cond_timedwait,
+ GLIBC_2_3_2);
diff -urN src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.c
src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.c
--- src.cvs/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.c
1970-01-01 08:00:00.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.c
2004-01-31 11:51:20.000000000 +0800
@@ -0,0 +1,167 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+
+
+struct _condvar_cleanup_buffer
+{
+ int oldtype;
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+};
+
+
+void
+__attribute__ ((visibility ("hidden")))
+__condvar_cleanup (void *arg)
+{
+ struct _condvar_cleanup_buffer *cbuffer =
+ (struct _condvar_cleanup_buffer *) arg;
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* We are going to modify shared data. */
+ LLL_CMUTEX_LOCK (cbuffer->cond->__data.__lock, id);
+
+ /* This thread is not waiting anymore. Adjust the sequence counters
+ appropriately. */
+ ++cbuffer->cond->__data.__wakeup_seq;
+ ++cbuffer->cond->__data.__woken_seq;
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cbuffer->cond->__data.__lock, id);
+
+ /* Wake everybody to make sure no condvar signal gets lost. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cbuffer->cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cbuffer->cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+ lll_futex_wake (futex, INT_MAX);
+
+ /* Get the mutex before returning unless asynchronous cancellation
+ is in effect. */
+ __pthread_mutex_cond_lock (cbuffer->mutex);
+}
+
+
+int
+__pthread_cond_wait (cond, mutex)
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+{
+ struct _pthread_cleanup_buffer buffer;
+ struct _condvar_cleanup_buffer cbuffer;
+ int err;
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Make sure we are along. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Now we can release the mutex. */
+ err = __pthread_mutex_unlock_usercnt (mutex, 0);
+ if (err)
+ {
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return err;
+ }
+
+ /* We have one new user of the condvar. */
+ ++cond->__data.__total_seq;
+
+ /* Remember the mutex we are using here. If there is already a
+ different address store this is a bad user bug. Do not store
+ anything for pshared condvars. */
+ if (cond->__data.__mutex != (void *) ~0l)
+ cond->__data.__mutex = mutex;
+
+ /* Prepare structure passed to cancellation handler. */
+ cbuffer.cond = cond;
+ cbuffer.mutex = mutex;
+
+ /* Before we block we enable cancellation. Therefore we have to
+ install a cancellation handler. */
+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
+
+ /* The current values of the wakeup counter. The "woken" counter
+ must exceed this value. */
+ unsigned long long int val;
+ unsigned long long int seq;
+ val = seq = cond->__data.__wakeup_seq;
+
+ /* The futex syscall operates on a 32-bit word. That is fine, we
+ just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ do
+ {
+ /* Prepare to wait. Release the condvar futex. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* Enable asynchronous cancellation. Required by the standard.
*/
+ cbuffer.oldtype = __pthread_enable_asynccancel ();
+
+ /* Wait until woken by signal or broadcast. Note that we
+ truncate the 'val' value to 32 bits. */
+ lll_futex_wait (futex, (unsigned int) val);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (cbuffer.oldtype);
+
+ /* We are going to look at shared data again, so get the lock.
*/
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Check whether we are eligible for wakeup. */
+ val = cond->__data.__wakeup_seq;
+ }
+ while (! (val > seq && cond->__data.__woken_seq < val));
+
+ /* Another thread woken up. */
+ ++cond->__data.__woken_seq;
+
+ /* We are done with the condvar. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* The cancellation handling is back to normal, remove the handler.
*/
+ __pthread_cleanup_pop (&buffer, 0);
+
+ /* Get the mutex before returning. */
+ return __pthread_mutex_cond_lock (mutex);
+}
+
+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
+ GLIBC_2_3_2);
diff -urN src.cvs/nptl/sysdeps/unix/sysv/linux/internaltypes.h
src/nptl/sysdeps/unix/sysv/linux/internaltypes.h
--- src.cvs/nptl/sysdeps/unix/sysv/linux/internaltypes.h
2004-01-31 11:49:05.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/internaltypes.h 2004-01-31
11:51:20.000000000 +0800
@@ -49,7 +49,8 @@
#define ATTR_FLAG_STACKADDR 0x0008
#define ATTR_FLAG_OLDATTR 0x0010
-
+#define MAX_USER_RT_PRIO 100
+#define PRIOCEILING_MASK 0x3f
/* Mutex attribute data structure. */
struct pthread_mutexattr
{
@@ -57,7 +58,13 @@
Bit 31 is set if the mutex is to be shared between processes.
- Bit 0 to 30 contain one of the PTHREAD_MUTEX_ values to identify
+ Flags for realtime mutex extension.
+ Bit 30 and 29 for mutex protocol attributes.
+ Bit 28 and 27 for mutex robustness attributes.
+
+ Bit 0-6 to record priority ceiling value.
+
+ Bit 8 to 26 contain one of the PTHREAD_MUTEX_ values to identify
the type of the mutex. */
int mutexkind;
};
diff -urN src.cvs/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
src/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
--- src.cvs/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
2004-01-31 11:49:05.000000000 +0800
+++ src/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
2004-01-31 11:51:20.000000000 +0800
@@ -1,6 +1,16 @@
#include <pthreadP.h>
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock(mutex)
+#ifdef USE_FUSYN_ROBUST_MUTEX
+# define LLL_MUTEX_LOCK(mutex, tid) \
+ do { \
+ result = lll_rtmutex_lock (mutex,tid); \
+ if (__builtin_expect (0 != result, 0)) \
+ goto out_err; \
+ } while (0)
+#else
+# define LLL_MUTEX_LOCK(mutex, tid) lll_mutex_cond_lock(mutex)
+#endif
+
#define __pthread_mutex_lock __pthread_mutex_cond_lock
#define NO_INCR
diff -urN src.cvs/nptl/Versions src/nptl/Versions
--- src.cvs/nptl/Versions 2004-01-31 11:49:05.000000000 +0800
+++ src/nptl/Versions 2004-01-31 11:51:20.000000000 +0800
@@ -212,6 +212,14 @@
# Proposed API extensions.
pthread_tryjoin_np; pthread_timedjoin_np;
+ pthread_mutexattr_getrobust_np; pthread_mutexattr_setrobust_np;
+ pthread_mutex_getconsistency_np; pthread_mutex_setconsistency_np;
+ pthread_mutexattr_getprioceiling; pthread_mutexattr_setprioceiling;
+ pthread_mutex_getprioceiling; pthread_mutex_setprioceiling;
+ pthread_mutex_lock_waiting_for_mutex_whose_owner_died_np;
+
+ # 1003.1-2001 function (realtime)
+ pthread_mutexattr_getprotocol; pthread_mutexattr_setprotocol;
# New cancellation cleanup handling.
__pthread_register_cancel; __pthread_unregister_cancel;
diff -urN src.cvs/sysdeps/generic/bits/confname.h
src/sysdeps/generic/bits/confname.h
--- src.cvs/sysdeps/generic/bits/confname.h 2004-01-31
11:49:10.000000000 +0800
+++ src/sysdeps/generic/bits/confname.h 2004-01-31 11:51:20.000000000
+0800
@@ -460,8 +460,10 @@
#define _SC_TRACE_EVENT_FILTER _SC_TRACE_EVENT_FILTER
_SC_TRACE_INHERIT,
#define _SC_TRACE_INHERIT _SC_TRACE_INHERIT
- _SC_TRACE_LOG
+ _SC_TRACE_LOG,
#define _SC_TRACE_LOG _SC_TRACE_LOG
+ _SC_THREAD_ROBUST_MUTEX_NP
+#define _SC_THREAD_ROBUST_MUTEX_NP _SC_THREAD_ROBUST_MUTEX_NP
};
/* Values for the NAME argument to `confstr'. */
diff -urN src.cvs/sysdeps/posix/sysconf.c src/sysdeps/posix/sysconf.c
--- src.cvs/sysdeps/posix/sysconf.c 2004-01-31 11:49:12.000000000
+0800
+++ src/sysdeps/posix/sysconf.c 2004-01-31 11:51:20.000000000 +0800
@@ -1115,6 +1115,13 @@
#else
return -1;
#endif
+
+ case _SC_THREAD_ROBUST_MUTEX_NP:
+#ifdef _POSIX_THREAD_ROBUST_MUTEX_NP
+ return _POSIX_THREAD_ROBUST_MUTEX_NP;
+#else
+ return -1;
+#endif
}
}
Good Luck !
Boris Hu (Hu Jiangtao)
Software Engineer@ICSL
86-021-5257-4545#1277
iNET: 8-752-1277
*****************************************
There are my thoughts, not my employer's.
*****************************************
"gpg --recv-keys --keyserver wwwkeys.pgp.net 0FD7685F"
{0FD7685F:CFD6 6F5C A2CB 7881 725B CEA0 956F 9F14 0FD7 685F}
More information about the Libc-alpha
mailing list