]> sourceware.org Git - glibc.git/commitdiff
* include/atomic.h (atomic_and, atomic_or): Define.
authorUlrich Drepper <drepper@redhat.com>
Fri, 17 Feb 2006 18:52:09 +0000 (18:52 +0000)
committerUlrich Drepper <drepper@redhat.com>
Fri, 17 Feb 2006 18:52:09 +0000 (18:52 +0000)
ChangeLog
include/atomic.h
nptl/ChangeLog
nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c [new file with mode: 0644]
nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h

index df82e2b0b307332759fc05a34afa2ce676c75791..7d478859dc1a4ff71bdfee8202eee7983aa168af 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2006-02-17  Jakub Jelinek  <jakub@redhat.com>
+
+       * include/atomic.h (atomic_and, atomic_or): Define.
+
 2006-02-15  Ulrich Drepper  <drepper@redhat.com>
 
        * sysdeps/unix/sysv/linux/sparc/bits/mman.h: Define MADV_DONTFORK
index 8b76435a8168c64c57b7577eb41ff38c13a80586..a1598e3850a1a9bde707c5a8673ed0f2da00d306 100644 (file)
@@ -1,5 +1,5 @@
 /* Internal macros for atomic operations for GNU C Library.
-   Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
      __oldval & __mask; })
 #endif
 
+/* Atomically *mem &= mask and return the old value of *mem.  */
+#ifndef atomic_and
+# define atomic_and(mem, mask) \
+  ({ __typeof (*(mem)) __oldval;                                             \
+     __typeof (mem) __memp = (mem);                                          \
+     __typeof (*(mem)) __mask = (mask);                                              \
+                                                                             \
+     do                                                                              \
+       __oldval = (*__memp);                                                 \
+     while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp,   \
+                                                                   __oldval  \
+                                                                   & __mask, \
+                                                                   __oldval),\
+                             0));                                            \
+                                                                             \
+     __oldval; })
+#endif
+
+/* Atomically *mem |= mask and return the old value of *mem.  */
+#ifndef atomic_or
+# define atomic_or(mem, mask) \
+  ({ __typeof (*(mem)) __oldval;                                             \
+     __typeof (mem) __memp = (mem);                                          \
+     __typeof (*(mem)) __mask = (mask);                                              \
+                                                                             \
+     do                                                                              \
+       __oldval = (*__memp);                                                 \
+     while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp,   \
+                                                                   __oldval  \
+                                                                   | __mask, \
+                                                                   __oldval),\
+                             0));                                            \
+                                                                             \
+     __oldval; })
+#endif
 
 #ifndef atomic_full_barrier
 # define atomic_full_barrier() __asm ("" ::: "memory")
index aa206ebd2f0b776d8087da3ef6f6f8ddaab248b6..00501134c03d6eae0bef8de22166046161dd1588 100644 (file)
@@ -1,3 +1,15 @@
+2006-02-17  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_robust_mutex_dead,
+       lll_robust_mutex_trylock, lll_robust_mutex_lock,
+       lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
+       lll_robust_mutex_unlock): New macros.
+       (__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.c: New file.
+
 2006-02-17  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
 
        * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Add lll_robust_mutex_*
index ab325d2b061c926175b1ec768fa7deb2ae48748c..1a2e8cbb0718854bf38bb1604ded3f8711ce9a9f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
     INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret;                 \
   })
 
+#define lll_robust_mutex_dead(futexv) \
+  do                                                                         \
+    {                                                                        \
+      int *__futexp = &(futexv);                                             \
+      atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
+      lll_futex_wake (__futexp, 1);                                          \
+    }                                                                        \
+  while (0)
+
 /* Returns non-zero if error happened, zero if success.  */
 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
   ({                                                                         \
@@ -106,7 +115,16 @@ __lll_mutex_cond_trylock(int *futex)
 #define lll_mutex_cond_trylock(lock)   __lll_mutex_cond_trylock (&(lock))
 
 
+static inline int __attribute__((always_inline))
+__lll_robust_mutex_trylock(int *futex, int id)
+{
+  return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
+}
+#define lll_robust_mutex_trylock(lock, id) \
+  __lll_robust_mutex_trylock (&(lock), id)
+
 extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
 
 static inline void __attribute__((always_inline))
 __lll_mutex_lock(int *futex)
@@ -117,6 +135,18 @@ __lll_mutex_lock(int *futex)
 #define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
 
 
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+    result = __lll_robust_lock_wait (futex);
+  return result;
+}
+#define lll_robust_mutex_lock(futex, id) \
+  __lll_robust_mutex_lock (&(futex), id)
+
+
 static inline void __attribute__ ((always_inline))
 __lll_mutex_cond_lock (int *futex)
 {
@@ -126,8 +156,14 @@ __lll_mutex_cond_lock (int *futex)
 #define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
 
 
+#define lll_robust_mutex_cond_lock(futex, id) \
+  __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
+
 extern int __lll_timedlock_wait (int *futex, const struct timespec *)
        attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+       attribute_hidden;
 
 static inline int __attribute__ ((always_inline))
 __lll_mutex_timedlock (int *futex, const struct timespec *abstime)
@@ -141,6 +177,19 @@ __lll_mutex_timedlock (int *futex, const struct timespec *abstime)
   __lll_mutex_timedlock (&(futex), abstime)
 
 
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+                             int id)
+{
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+    result = __lll_robust_timedlock_wait (futex, abstime);
+  return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+  __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
 static inline void __attribute__ ((always_inline))
 __lll_mutex_unlock (int *futex)
 {
@@ -151,6 +200,17 @@ __lll_mutex_unlock (int *futex)
 #define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
 
 
+static inline void __attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex, int mask)
+{
+  int val = atomic_exchange_rel (futex, 0);
+  if (__builtin_expect (val & mask, 0))
+    lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) \
+  __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
+
+
 static inline void __attribute__ ((always_inline))
 __lll_mutex_unlock_force (int *futex)
 {
index 4219fe2716d48ce61cbc0340dd9e779cf8595435..ece9a7fc729cf0cc008609d35ba45997b0e353f9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
    _r10 == -1 ? -_retval : _retval;                                    \
 })
 
+#define lll_robust_mutex_dead(futexv)                                  \
+do                                                                     \
+  {                                                                    \
+    int *__futexp = &(futexv);                                         \
+    atomic_or (__futexp, FUTEX_OWNER_DIED);                            \
+    DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1);       \
+  }                                                                    \
+while (0)
+
 /* Returns non-zero if error happened, zero if success.  */
 #define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val)                \
 ({                                                                          \
 #define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
 
 
+#define __lll_robust_mutex_trylock(futex, id) \
+  (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
+#define lll_robust_mutex_trylock(futex, id) \
+  __lll_robust_mutex_trylock (&(futex), id)
+
+
 #define __lll_mutex_cond_trylock(futex) \
   (atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0)
 #define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
 
 
 extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
 
 
 #define __lll_mutex_lock(futex)                                                \
@@ -96,6 +112,18 @@ extern void __lll_lock_wait (int *futex) attribute_hidden;
 #define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
 
 
+#define __lll_robust_mutex_lock(futex, id)                             \
+  ({                                                                   \
+    int *__futex = (futex);                                            \
+    int __val = 0;                                                     \
+                                                                       \
+    if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0)    \
+      __val = __lll_robust_lock_wait (__futex);                                \
+    __val;                                                             \
+  })
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
+
 #define __lll_mutex_cond_lock(futex)                                   \
   ((void) ({                                                           \
     int *__futex = (futex);                                            \
@@ -105,8 +133,24 @@ extern void __lll_lock_wait (int *futex) attribute_hidden;
 #define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
 
 
+#define __lll_robust_mutex_cond_lock(futex, id)                                \
+  ({                                                                   \
+    int *__futex = (futex);                                            \
+    int __val = 0;                                                     \
+    int __id = (id) | FUTEX_WAITERS;                                   \
+                                                                       \
+    if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0)  \
+      __val = __lll_robust_lock_wait (__futex);                                \
+    __val;                                                             \
+  })
+#define lll_robust_mutex_cond_lock(futex, id) \
+  __lll_robust_mutex_cond_lock (&(futex), id)
+
+
 extern int __lll_timedlock_wait (int *futex, const struct timespec *)
      attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+     attribute_hidden;
 
 
 #define __lll_mutex_timedlock(futex, abstime)                          \
@@ -122,6 +166,19 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *)
   __lll_mutex_timedlock (&(futex), abstime)
 
 
+#define __lll_robust_mutex_timedlock(futex, abstime, id)               \
+  ({                                                                   \
+    int *__futex = (futex);                                            \
+    int __val = 0;                                                     \
+                                                                       \
+    if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0)    \
+      __val = __lll_robust_timedlock_wait (__futex, abstime);          \
+    __val;                                                             \
+  })
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+  __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
 #define __lll_mutex_unlock(futex)                      \
   ((void) ({                                           \
     int *__futex = (futex);                            \
@@ -134,6 +191,18 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *)
   __lll_mutex_unlock(&(futex))
 
 
+#define __lll_robust_mutex_unlock(futex)               \
+  ((void) ({                                           \
+    int *__futex = (futex);                            \
+    int __val = atomic_exchange_rel (__futex, 0);      \
+                                                       \
+    if (__builtin_expect (__val & FUTEX_WAITERS, 0))   \
+      lll_futex_wake (__futex, 1);                     \
+  }))
+#define lll_robust_mutex_unlock(futex) \
+  __lll_robust_mutex_unlock(&(futex))
+
+
 #define __lll_mutex_unlock_force(futex)                \
   ((void) ({                                   \
     int *__futex = (futex);                    \
diff --git a/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c b/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c
new file mode 100644 (file)
index 0000000..b7faeaf
--- /dev/null
@@ -0,0 +1,95 @@
+/* Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <sys/time.h>
+#include <pthreadP.h>
+
+
+int
+__lll_robust_lock_wait (int *futex)
+{
+  int oldval = *futex;
+  int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+  do
+    {
+      if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+       return oldval;
+
+      int newval = oldval | FUTEX_WAITERS;
+      if (oldval != newval
+         && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+       continue;
+
+      lll_futex_wait (futex, newval);
+    }
+  while ((oldval = atomic_compare_and_exchange_val_acq (futex, tid, 0)) != 0);
+  return 0;
+}
+
+
+int
+__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
+{
+  /* Reject invalid timeouts.  */
+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+    return EINVAL;
+
+  int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+  do
+    {
+      struct timeval tv;
+      struct timespec rt;
+
+      /* Get the current time.  */
+      (void) __gettimeofday (&tv, NULL);
+
+      /* Compute relative timeout.  */
+      rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+      rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+      if (rt.tv_nsec < 0)
+       {
+         rt.tv_nsec += 1000000000;
+         --rt.tv_sec;
+       }
+
+      /* Already timed out?  */
+      if (rt.tv_sec < 0)
+       return ETIMEDOUT;
+
+      /* Wait.  */
+      int oldval = *futex;
+      if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+       return oldval;
+
+      int newval = oldval | FUTEX_WAITERS;
+      if (oldval != newval
+         && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+       continue;
+
+      lll_futex_timed_wait (futex, newval, &rt);
+    }
+  while (atomic_compare_and_exchange_bool_acq (futex, tid, 0));
+
+  return 0;
+}
index f9eaa11e9861e11d658e0ab84772a47112c0bc55..fcc1240fef3d5d0f0e03189d056a9ab506c52673 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
     INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;                \
   })
 
+#define lll_robust_mutex_dead(futexv) \
+  do                                                                         \
+    {                                                                        \
+      INTERNAL_SYSCALL_DECL (__err);                                         \
+      int *__futexp = &(futexv);                                             \
+                                                                             \
+      atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
+      INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0);        \
+    }                                                                        \
+  while (0)
+
 /* Returns non-zero if error happened, zero if success.  */
 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
   ({                                                                         \
 # define __lll_rel_instr       "sync"
 #endif
 
-/* Set *futex to 1 if it is 0, atomically.  Returns the old value */
-#define __lll_trylock(futex) \
+/* Set *futex to ID if it is 0, atomically.  Returns the old value */
+#define __lll_robust_trylock(futex, id) \
   ({ int __val;                                                                      \
      __asm __volatile ("1:     lwarx   %0,0,%2\n"                            \
                       "        cmpwi   0,%0,0\n"                             \
                       "        bne-    1b\n"                                 \
                       "2:      " __lll_acq_instr                             \
                       : "=&r" (__val), "=m" (*futex)                         \
-                      : "r" (futex), "r" (1), "m" (*futex)                   \
+                      : "r" (futex), "r" (id), "m" (*futex)                  \
                       : "cr0", "memory");                                    \
      __val;                                                                  \
   })
 
+#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+
+/* Set *futex to 1 if it is 0, atomically.  Returns the old value */
+#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
+
 #define lll_mutex_trylock(lock)        __lll_trylock (&(lock))
 
 /* Set *futex to 2 if it is 0, atomically.  Returns the old value */
-#define __lll_cond_trylock(futex) \
-  ({ int __val;                                                                      \
-     __asm __volatile ("1:     lwarx   %0,0,%2\n"                            \
-                      "        cmpwi   0,%0,0\n"                             \
-                      "        bne     2f\n"                                 \
-                      "        stwcx.  %3,0,%2\n"                            \
-                      "        bne-    1b\n"                                 \
-                      "2:      " __lll_acq_instr                             \
-                      : "=&r" (__val), "=m" (*futex)                         \
-                      : "r" (futex), "r" (2), "m" (*futex)                   \
-                      : "cr0", "memory");                                    \
-     __val;                                                                  \
-  })
+#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
+
 #define lll_mutex_cond_trylock(lock)   __lll_cond_trylock (&(lock))
 
 
 extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
 
 #define lll_mutex_lock(lock) \
   (void) ({                                                                  \
@@ -146,6 +152,16 @@ extern void __lll_lock_wait (int *futex) attribute_hidden;
       __lll_lock_wait (__futex);                                             \
   })
 
+#define lll_robust_mutex_lock(lock, id) \
+  ({                                                                         \
+    int *__futex = &(lock);                                                  \
+    int __val = 0;                                                           \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+                                                               0), 0))       \
+      __val = __lll_robust_lock_wait (__futex);                                      \
+    __val;                                                                   \
+  })
+
 #define lll_mutex_cond_lock(lock) \
   (void) ({                                                                  \
     int *__futex = &(lock);                                                  \
@@ -154,8 +170,22 @@ extern void __lll_lock_wait (int *futex) attribute_hidden;
       __lll_lock_wait (__futex);                                             \
   })
 
+#define lll_robust_mutex_cond_lock(lock, id) \
+  ({                                                                         \
+    int *__futex = &(lock);                                                  \
+    int __val = 0;                                                           \
+    int __id = id | FUTEX_WAITERS;                                           \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
+                                                               0), 0))       \
+      __val = __lll_robust_lock_wait (__futex);                                      \
+    __val;                                                                   \
+  })
+
+
 extern int __lll_timedlock_wait
   (int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+  (int *futex, const struct timespec *) attribute_hidden;
 
 #define lll_mutex_timedlock(lock, abstime) \
   ({                                                                         \
@@ -167,6 +197,16 @@ extern int __lll_timedlock_wait
     __val;                                                                   \
   })
 
+#define lll_robust_mutex_timedlock(lock, abstime, id) \
+  ({                                                                         \
+    int *__futex = &(lock);                                                  \
+    int __val = 0;                                                           \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+                                                               0), 0))       \
+      __val = __lll_robust_timedlock_wait (__futex, abstime);                \
+    __val;                                                                   \
+  })
+
 #define lll_mutex_unlock(lock) \
   ((void) ({                                                                 \
     int *__futex = &(lock);                                                  \
@@ -175,6 +215,14 @@ extern int __lll_timedlock_wait
       lll_futex_wake (__futex, 1);                                           \
   }))
 
+#define lll_robust_mutex_unlock(lock) \
+  ((void) ({                                                                 \
+    int *__futex = &(lock);                                                  \
+    int __val = atomic_exchange_rel (__futex, 0);                            \
+    if (__builtin_expect (__val & FUTEX_WAITERS, 0))                         \
+      lll_futex_wake (__futex, 1);                                           \
+  }))
+
 #define lll_mutex_unlock_force(lock) \
   ((void) ({                                                                 \
     int *__futex = &(lock);                                                  \
index 5f20537943ca3c4e8cecacf06e16d3fc28c30252..6baab90f561fe5efafe97be3f9ffefb40426b81a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
   })
 
 
+#define lll_robust_mutex_dead(futexv) \
+  do                                                                         \
+    {                                                                        \
+      int *__futexp = &(futexv);                                             \
+                                                                             \
+      atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
+      lll_futex_wake (__futexp, 1);                                          \
+    }                                                                        \
+  while (0)
+
+
 /* Returns non-zero if error happened, zero if success.  */
 #define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val) \
   ({                                                                         \
@@ -167,7 +178,23 @@ __lll_mutex_cond_trylock (int *futex)
 #define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
 
 
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_trylock (int *futex, int id)
+{
+    unsigned int old;
+
+    __asm __volatile ("cs %0,%3,%1"
+                      : "=d" (old), "=Q" (*futex)
+                      : "0" (0), "d" (id), "m" (*futex) : "cc", "memory" );
+    return old != 0;
+}
+#define lll_robust_mutex_trylock(futex, id) \
+  __lll_robust_mutex_trylock (&(futex), id)
+
+
 extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
 
 static inline void
 __attribute__ ((always_inline))
@@ -178,6 +205,17 @@ __lll_mutex_lock (int *futex)
 }
 #define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
 
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+    result = __lll_robust_lock_wait (futex);
+  return result;
+}
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
 static inline void
 __attribute__ ((always_inline))
 __lll_mutex_cond_lock (int *futex)
@@ -187,8 +225,13 @@ __lll_mutex_cond_lock (int *futex)
 }
 #define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
 
+#define lll_robust_mutex_cond_lock(futex, id) \
+  __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
 extern int __lll_timedlock_wait
   (int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+  (int *futex, const struct timespec *) attribute_hidden;
 
 static inline int
 __attribute__ ((always_inline))
@@ -202,6 +245,19 @@ __lll_mutex_timedlock (int *futex, const struct timespec *abstime)
 #define lll_mutex_timedlock(futex, abstime) \
   __lll_mutex_timedlock (&(futex), abstime)
 
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+                             int id)
+{
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+    result = __lll_robust_timedlock_wait (futex, abstime);
+  return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+  __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
 
 static inline void
 __attribute__ ((always_inline))
@@ -218,6 +274,21 @@ __lll_mutex_unlock (int *futex)
   __lll_mutex_unlock(&(futex))
 
 
+static inline void
+__attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex, int mask)
+{
+  int oldval;
+  int newval = 0;
+
+  lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
+  if (oldval & mask)
+    lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) \
+  __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
+
+
 static inline void
 __attribute__ ((always_inline))
 __lll_mutex_unlock_force (int *futex)
This page took 0.084252 seconds and 5 git commands to generate.