[RFC/PATCH] RT-NPTL-2.3 5/7
Hu, Boris
boris.hu@intel.com
Sat Jul 24 06:48:00 GMT 2004
i386/lowlevellock.h | 23 +++
i386/pthread_cond_broadcast.c | 82 +++++++++++++
lowlevelrtlock.h | 251
++++++++++++++++++++++++++++++++++++++++++
pthread_mutex_cond_lock.c | 16 ++
pthread_mutex_rq_lock.c | 70 +++++++++++
5 files changed, 439 insertions(+), 3 deletions(-)
--- /dev/null Fri Jul 23 03:18:17 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/lowlevelrtlock.h
Mon Jul 19 02:48:10 2004
@@ -0,0 +1,251 @@
+/*
+ * (C) 2003,2004 Intel Corporation
+ * Boris Hu <boris.hu@intel.com>
+ *
+ * Distributed under the FSF's LGPL license, v2 or later. */
+
+#ifndef _LOWLEVELRTLOCK_H
+#define _LOWLEVELRTLOCK_H 1
+
+#ifdef USE_FUSYN_ROBUST_MUTEX
+#include <linux/fuqueue.h>
+#else
+#define FUQUEUE_WAITER_GOT_LOCK 0x401449
+#define FUQUEUE_CTL_RELEASE 0
+#endif
+
+
+enum {
+ /** Timeout is relative to the current clock source time. */
+ TIMEOUT_RELATIVE = 0x2,
+};
+
+struct timeout
+{
+ int flags;
+ clockid_t clock_id; /* Currently unused */
+ struct timespec ts;
+};
+
+#ifdef USE_FUSYN_ROBUST_MUTEX
+//#define SEA_DEBUG 1 // To Enable the debug info
+#ifdef SEA_DEBUG
+/* Indicate location */
+# define SEA_L
\
+ do {
\
+ unsigned id = THREAD_GETMEM (THREAD_SELF, tid);
\
+ printf("[%d] %s:%s() %d line: \n", id, __FILE__,
\
+ __FUNCTION__, __LINE__);
\
+ } while (0)
+
+/* Location-aware printf */
+# define SEA_P(fmt, args...)
\
+ do {
\
+ unsigned id = THREAD_GETMEM (THREAD_SELF, tid);
\
+ printf("[%d] %s:%s() %d line: " fmt "\n",
\
+ id, __FILE__,__FUNCTION__,__LINE__,args);
\
+ } while (0)
+
+#else
+# define SEA_L
+# define SEA_P(fmt, args...)
+#endif
+
+/* Add lll_rtmutex_* to support fusyn */
+#define FUSYN_FL_RT_MASK (0x78000000 | FULOCK_FASTPATH_MODE |
\
+ FULOCK_UNLOCK_TYPE_MASK |
PRIOCEILING_MASK)
+#define FUSYN_FL_RT2K_MASK (0x78000000 | FULOCK_FASTPATH_MODE |
\
+ PRIOCEILING_MASK)
+/* Get fusyn flags. */
+#define __LK_FL(fulock)
\
+ ({ unsigned k_flags = 0;
\
+ unsigned flags = ((pthread_mutex_t *)(fulock))->__data.__kind
\
+ & FUSYN_FL_RT2K_MASK;
\
+ k_flags = ((flags << 1) & FULOCK_FL_USER_MK);
\
+ k_flags;})
+/* Get rtnptl flags. */
+#define __RT_FL(fulock)
\
+ ({ unsigned flags = ((pthread_mutex_t *)(fulock))->__data.__kind
\
+ & FUSYN_FL_RT_MASK;
\
+ flags;})
+
+
+extern int is_mutex_robust(const pthread_mutex_t *mutex);
+
+extern int __lll_rtmutex_trylock (volatile int *vfulock, unsigned
flags,
+ unsigned rtflags, unsigned tid);
+#define lll_rtmutex_trylock(futex, tid) \
+ __lll_rtmutex_trylock(&(futex), __LK_FL(&(futex)),
\
+ __RT_FL(&(futex)), tid)
+
+
+extern int __lll_rtmutex_timedlock (volatile int *vfulock, unsigned
flags,
+ unsigned rtflags, unsigned tid,
+ const struct timeout *timeout);
+#define lll_rtmutex_timedlock(futex, tid, abs_time) \
+ ({ struct timeout to;
\
+ int __result = 0;
\
+ if (abs_time->tv_sec < 0 || abstime->tv_nsec >= 1000000000)
\
+ __result = EINVAL;
\
+ else {
\
+ memset(&to, 0, sizeof(to));
\
+ to.ts = *abs_time;
\
+ __result = __lll_rtmutex_timedlock(&(futex),
\
+ __LK_FL(&(futex)),
\
+ __RT_FL(&(futex)), tid, &to);
\
+ }
\
+ __result; })
+
+
+extern int __lll_rtmutex_lock (volatile int *vfulock, unsigned flags,
+ unsigned rtflags, unsigned tid);
+#define lll_rtmutex_lock(futex, tid) \
+ __lll_rtmutex_lock(&(futex), __LK_FL(&(futex)),
\
+ __RT_FL(&(futex)), tid)
+
+extern int __lll_rtmutex_unlock (volatile int *vfulock, unsigned flags,
+ unsigned rtflags, unsigned tid);
+#define lll_rtmutex_unlock(futex, tid) \
+ __lll_rtmutex_unlock(&(futex), __LK_FL(&(futex)),
\
+ __RT_FL(&(futex)), tid)
+
+
+extern int __lll_rtmutex_unlock_nocheck (volatile int *vfulock,
+ unsigned flags, unsigned
rtflags);
+#define lll_rtmutex_unlock_nocheck(futex) \
+ __lll_rtmutex_unlock_nocheck(&(futex), __LK_FL(&(futex)),
\
+ __RT_FL(&(futex)))
+
+
+extern int __lll_rtmutex_set_consistency (volatile int *vfulock,
+ unsigned flags, unsigned
rtflags,
+ enum fulock_ctl consistency);
+#define lll_rtmutex_set_consistency(futex, state) \
+ __lll_rtmutex_set_consistency(&(futex), __LK_FL(&(futex)),\
+ __RT_FL(&(futex)), state)
+
+
+extern int __lll_rtmutex_get_consistency (volatile int *vfulock,
+ unsigned flags, unsigned
rtflags,
+ int *state);
+#define lll_rtmutex_get_consistency(futex, state) \
+ __lll_rtmutex_get_consistency(&(futex), __LK_FL(&(futex)),\
+ __RT_FL(&(futex)), state)
+
+extern int __lll_rtmutex_ctl(volatile int *vfulock,
+ unsigned flags, int ctl);
+#define lll_rtmutex_ctl(futex, ctl) \
+ __lll_rtmutex_ctl(&(futex), __LK_FL(&(futex)), ctl)
+
+#define CONDVAR_RM_FLAGS FULOCK_FL_RM
+
+#define lll_cmutex_lock(fulock, id) \
+ do {
\
+ while (__lll_rtmutex_lock(&(fulock), CONDVAR_RM_FLAGS, 0, id))
\
+ lll_rtmutex_set_consistency(fulock,
\
+ PTHREAD_MUTEX_ROBUST_CONSISTENT_NP);
\
+ } while (0)
+
+#define lll_cmutex_unlock(fulock, id) \
+ __lll_rtmutex_unlock(&(fulock), CONDVAR_RM_FLAGS, 0, id)
+
+#define LLL_CMUTEX_LOCK(mutex, tid) \
+ lll_cmutex_lock (mutex, tid)
+
+#define LLL_CMUTEX_UNLOCK(mutex, tid) \
+ lll_cmutex_unlock (mutex, tid)
+
+#define ROBUST_MUTEX_FLAGS FULOCK_FL_RM
+
+#define lll_robust_mutex_lock(mutex, tid) \
+ __lll_rtmutex_lock(&(mutex), ROBUST_MUTEX_FLAGS, 0, tid)
+
+#define lll_robust_mutex_trylock(mutex, tid) \
+ __lll_rtmutex_trylock(&(mutex), ROBUST_MUTEX_FLAGS, 0, tid)
+
+#define lll_robust_mutex_unlock(mutex, tid) \
+ __lll_rtmutex_unlock(&(mutex), ROBUST_MUTEX_FLAGS, 0, tid)
+
+#define lll_fuqueue_wait(futex, val) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (ufuqueue_wait, err, 3, futex, val, (void
*)-1); \
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+#define lll_fuqueue_wake(futex, nr) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (ufuqueue_wake, err, 3, futex, nr, 0);
\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+#define lll_fuqueue_timedwait(futex, val, abs_time) \
+ ({ int ret;
\
+ struct timeout to;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ memset(&to, 0, sizeof(to));
\
+ to.ts = *abs_time;
\
+ ret = INTERNAL_SYSCALL (ufuqueue_wait, err, 3, futex, val, &to);
\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+#define lll_fuqueue_requeue(futex, nr_wake, nr_move, mutex) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (ufulock_requeue, err, 4, futex, *futex,
mutex, \
+ __LK_FL(mutex));
\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+#define lll_fuqueue_ctl(futex, code) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (ufuqueue_ctl, err, 2, futex, code);
\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+#else /* Normal NPTL */
+
+#define FUSYN_FL_RT_MASK 0
+
+#define LLL_CMUTEX_LOCK(mutex, tid) \
+ lll_mutex_lock(mutex)
+
+#define LLL_CMUTEX_UNLOCK(mutex, tid) \
+ lll_mutex_unlock (mutex)
+
+#define lll_rtmutex_get_consistency(futex, state) 0
+
+#define lll_rtmutex_set_consistency(futex, state) 0
+
+#define lll_rtmutex_ctl(futex, ctl) 0
+
+#define lll_rtmutex_unlock_nocheck(futex) 0
+
+#define __LK_FL(fulock) 0
+#define __RT_FL(fulock) 0
+
+#define lll_robust_mutex_lock(mutex, tid) 0
+#define lll_robust_mutex_trylock(mutex, tid) 0
+#define lll_robust_mutex_unlock(mutex, tid) 0
+
+#define lll_fuqueue_wait(futex, val) \
+ lll_futex_wait (futex, val)
+
+#define lll_fuqueue_wake(futex, nr) \
+ lll_futex_wake (futex, nr)
+
+#define lll_fuqueue_timed_wait(futex, val, timeout) \
+ lll_futex_timed_wait (futex, val, timeout)
+
+#define lll_fuqueue_requeue(futex, nr_wake, nr_move, mutex) \
+ lll_futex_requeue(futex, nr_wake, nr_move, mutex)
+
+#define lll_fuqueue_ctl(futex, code) 0
+
+#endif /* USE_FUSYN_ROBUST_MUTEX */
+
+extern int is_mutex_robust(const pthread_mutex_t *mutex);
+
+#endif
---
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond
_lock.c:1.1.1.1.2.1 Fri Mar 26 02:41:40 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond
_lock.c Tue Mar 30 09:20:29 2004
@@ -1,7 +1,19 @@
#include <pthreadP.h>
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock(mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock(mutex)
+#ifdef USE_FUSYN_ROBUST_MUTEX
+# define LLL_MUTEX_LOCK(mutex, tid) \
+ do { \
+ result = lll_rtmutex_lock (mutex,tid); \
+ if (__builtin_expect (0 != result, 0)) \
+ goto out_err; \
+ } while (0)
+# define LLL_MUTEX_TRYLOCK(mutex, tid) \
+ lll_rtmutex_trylock(mutex, tid)
+#else
+# define LLL_MUTEX_LOCK(mutex, tid) lll_mutex_cond_lock(mutex)
+# define LLL_MUTEX_TRYLOCK(mutex, tid) lll_mutex_cond_trylock(mutex)
+#endif
+
#define __pthread_mutex_lock __pthread_mutex_cond_lock
#define NO_INCR
--- /dev/null Fri Jul 23 03:18:18 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/pthread_mutex_rq_l
ock.c Mon Jul 19 08:38:39 2004
@@ -0,0 +1,70 @@
+/*
+ * (C) 2003,2004 Intel Corporation
+ * Boris Hu <boris.hu@intel.com>
+ *
+ * Distributed under the FSF's LGPL license, v2 or later. */
+
+#include <assert.h>
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+__pthread_mutex_rq_lock (mutex)
+ pthread_mutex_t *mutex;
+{
+ assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
+
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+ int result = 0;
+ switch (__builtin_expect (mutex->__data.__kind &
~NON_MUTEX_KIND_MASK,
+ PTHREAD_MUTEX_TIMED_NP))
+ {
+ /* Recursive mutex. */
+ case PTHREAD_MUTEX_RECURSIVE_NP:
+ /* Check whether we already hold the mutex. */
+ if (mutex->__data.__owner == id)
+ {
+ /* Just bump the counter. */
+ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ /* Overflow of the counter. */
+ return EAGAIN;
+
+ ++mutex->__data.__count;
+
+ return 0;
+ }
+
+ mutex->__data.__count = 1;
+ break;
+
+ /* Error checking mutex. */
+ case PTHREAD_MUTEX_ERRORCHECK_NP:
+ /* Check whether we already hold the mutex. */
+ if (mutex->__data.__owner == id)
+ return EDEADLK;
+
+ /* FALLTHROUGH */
+
+ default:
+ /* Correct code cannot set any other type. */
+ case PTHREAD_MUTEX_TIMED_NP:
+ simple:
+ break;
+
+ case PTHREAD_MUTEX_ADAPTIVE_NP:
+ if (! __is_smp)
+ goto simple;
+ break;
+ }
+ /* Record the ownership. */
+ assert (mutex->__data.__owner == 0);
+ mutex->__data.__owner = id;
+#ifndef NO_INCR
+ ++mutex->__data.__nusers;
+#endif
+
+ return 0;
+}
+strong_alias (__pthread_mutex_rq_lock, pthread_mutex_rq_lock)
+
---
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.
h:1.1.1.1.2.2 Fri Mar 26 02:41:42 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.
h Mon Jul 19 08:38:40 2004
@@ -2,6 +2,9 @@
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+ Redirect lowlevellock to use Fast User SYNchronization(fusyn).
+ Boris Hu <boris.hu@intel.com>, 2003
+
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -35,7 +38,7 @@
#define SYS_futex 240
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
-
+#define FUTEX_REQUEUE 3
/* Initializer for compatibility lock. */
#define LLL_MUTEX_LOCK_INITIALIZER (0)
@@ -93,6 +96,20 @@
"i" (offsetof (tcbhead_t, sysinfo)));
\
} while (0)
+#define lll_futex_timed_wait(futex, val, timespec) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (futex, err, 5, futex, FUTEX_WAIT, val,
\
+ timespec, 0);
\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+#define lll_futex_requeue(futex, nr_wake, nr_move, mutex) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (futex, err, 5, futex, FUTEX_WAKE, INT_MAX,
0, 0);\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
/* Does not preserve %eax and %ecx. */
extern int __lll_mutex_lock_wait (int val, int *__futex)
@@ -370,5 +387,9 @@
#define lll_cond_broadcast(cond) \
__lll_cond_broadcast (cond)
+/* To Enable Robust Mutexes features. */
+#include <linux/fulock.h>
+#include <lowlevelrtlock.h>
+
#endif /* lowlevellock.h */
--- /dev/null Fri Jul 23 03:18:18 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_
broadcast.c Wed Jun 9 12:38:03 2004
@@ -0,0 +1,82 @@
+/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+#include <kernel-features.h>
+
+
+int
+__pthread_cond_broadcast (cond)
+ pthread_cond_t *cond;
+{
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Make sure we are alone. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Are there any waiters to be woken? */
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
+ {
+ /* Yes. Mark them all as woken. */
+ cond->__data.__wakeup_seq = cond->__data.__total_seq;
+ cond->__data.__woken_seq = cond->__data.__total_seq;
+ cond->__data.__futex = (unsigned int) cond->__data.__total_seq *
2;
+ /* Signal that a broadcast happened. */
+ ++cond->__data.__broadcast_seq;
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* Do not use requeue for pshared condvars. */
+ if (cond->__data.__mutex == (void *) ~0l)
+ goto wake_all;
+
+ /* Wake everybody. */
+ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
+ /* lll_futex_requeue returns 0 for success and non-zero for
errors. */
+ if (__builtin_expect (lll_fuqueue_requeue (&cond->__data.__futex,
1,
+ INT_MAX,
+ &mut->__data.__lock),
0))
+ {
+ /* The requeue functionality is not available. */
+ wake_all:
+ lll_fuqueue_wake (&cond->__data.__futex, INT_MAX);
+ }
+
+ /* That's all. */
+ return 0;
+ }
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return 0;
+}
+
+versioned_symbol (libpthread, __pthread_cond_broadcast,
pthread_cond_broadcast,
+ GLIBC_2_3_2);
Boris Hu (Hu Jiangtao)
Intel China Software Center
86-021-5257-4545#1277
iNET: 8-752-1277
************************************
There are my thoughts, not my employer's.
************************************
"gpg --recv-keys --keyserver wwwkeys.pgp.net 0FD7685F"
{0FD7685F:CFD6 6F5C A2CB 7881 725B CEA0 956F 9F14 0FD7 685F}
-------------- next part --------------
A non-text attachment was scrubbed...
Name: rtnptl-2.3.patch-4
Type: application/octet-stream
Size: 18955 bytes
Desc: rtnptl-2.3.patch-4
URL: <http://sourceware.org/pipermail/libc-alpha/attachments/20040724/807e978d/attachment.obj>
More information about the Libc-alpha
mailing list