+2006-02-17 Jakub Jelinek <jakub@redhat.com>
+
+ * include/atomic.h (atomic_and, atomic_or): Define.
+
2006-02-15 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/sparc/bits/mman.h: Define MADV_DONTFORK
/* Internal macros for atomic operations for GNU C Library.
- Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
__oldval & __mask; })
#endif
+/* Atomically *mem &= mask and return the old value of *mem. */
+#ifndef atomic_and
+# define atomic_and(mem, mask) \
+ ({ __typeof (*(mem)) __oldval; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __mask = (mask); \
+ \
+ do \
+ __oldval = (*__memp); \
+ while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
+ __oldval \
+ & __mask, \
+ __oldval),\
+ 0)); \
+ \
+ __oldval; })
+#endif
+
+/* Atomically *mem |= mask and return the old value of *mem. */
+#ifndef atomic_or
+# define atomic_or(mem, mask) \
+ ({ __typeof (*(mem)) __oldval; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __mask = (mask); \
+ \
+ do \
+ __oldval = (*__memp); \
+ while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
+ __oldval \
+ | __mask, \
+ __oldval),\
+ 0)); \
+ \
+ __oldval; })
+#endif
#ifndef atomic_full_barrier
# define atomic_full_barrier() __asm ("" ::: "memory")
+2006-02-17 Jakub Jelinek <jakub@redhat.com>
+
+ * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_robust_mutex_dead,
+ lll_robust_mutex_trylock, lll_robust_mutex_lock,
+ lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
+ lll_robust_mutex_unlock): New macros.
+ (__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
+ * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+ * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+ * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+ * sysdeps/unix/sysv/linux/lowlevelrobustlock.c: New file.
+
2006-02-17 Kaz Kojima <kkojima@rr.iij4u.or.jp>
* sysdeps/unix/sysv/linux/sh/lowlevellock.h: Add lll_robust_mutex_*
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1); \
+ } \
+ while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
({ \
#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
+static inline int __attribute__((always_inline))
+__lll_robust_mutex_trylock(int *futex, int id)
+{
+ return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
+}
+#define lll_robust_mutex_trylock(lock, id) \
+ __lll_robust_mutex_trylock (&(lock), id)
+
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
static inline void __attribute__((always_inline))
__lll_mutex_lock(int *futex)
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_lock_wait (futex);
+ return result;
+}
+#define lll_robust_mutex_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), id)
+
+
static inline void __attribute__ ((always_inline))
__lll_mutex_cond_lock (int *futex)
{
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
+
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+ attribute_hidden;
static inline int __attribute__ ((always_inline))
__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
__lll_mutex_timedlock (&(futex), abstime)
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+ int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime);
+ return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
static inline void __attribute__ ((always_inline))
__lll_mutex_unlock (int *futex)
{
#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
+static inline void __attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex, int mask)
+{
+ int val = atomic_exchange_rel (futex, 0);
+ if (__builtin_expect (val & mask, 0))
+ lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
+
+
static inline void __attribute__ ((always_inline))
__lll_mutex_unlock_force (int *futex)
{
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
_r10 == -1 ? -_retval : _retval; \
})
+#define lll_robust_mutex_dead(futexv) \
+do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1); \
+ } \
+while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
({ \
#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define __lll_robust_mutex_trylock(futex, id) \
+ (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
+#define lll_robust_mutex_trylock(futex, id) \
+ __lll_robust_mutex_trylock (&(futex), id)
+
+
#define __lll_mutex_cond_trylock(futex) \
(atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0)
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
#define __lll_mutex_lock(futex) \
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define __lll_robust_mutex_lock(futex, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
+
#define __lll_mutex_cond_lock(futex) \
((void) ({ \
int *__futex = (futex); \
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define __lll_robust_mutex_cond_lock(futex, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ int __id = (id) | FUTEX_WAITERS; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_cond_lock (&(futex), id)
+
+
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+ attribute_hidden;
#define __lll_mutex_timedlock(futex, abstime) \
__lll_mutex_timedlock (&(futex), abstime)
+#define __lll_robust_mutex_timedlock(futex, abstime, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val; \
+ })
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
#define __lll_mutex_unlock(futex) \
((void) ({ \
int *__futex = (futex); \
__lll_mutex_unlock(&(futex))
+#define __lll_robust_mutex_unlock(futex) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1); \
+ }))
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex))
+
+
#define __lll_mutex_unlock_force(futex) \
((void) ({ \
int *__futex = (futex); \
--- /dev/null
+/* Copyright (C) 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <sys/time.h>
+#include <pthreadP.h>
+
+
+int
+__lll_robust_lock_wait (int *futex)
+{
+ int oldval = *futex;
+ int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+ do
+ {
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ return oldval;
+
+ int newval = oldval | FUTEX_WAITERS;
+ if (oldval != newval
+ && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+ continue;
+
+ lll_futex_wait (futex, newval);
+ }
+ while ((oldval = atomic_compare_and_exchange_val_acq (futex, tid, 0)) != 0);
+ return 0;
+}
+
+
+int
+__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
+{
+ /* Reject invalid timeouts. */
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+ return EINVAL;
+
+ int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+ do
+ {
+ struct timeval tv;
+ struct timespec rt;
+
+ /* Get the current time. */
+ (void) __gettimeofday (&tv, NULL);
+
+ /* Compute relative timeout. */
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+
+ /* Already timed out? */
+ if (rt.tv_sec < 0)
+ return ETIMEDOUT;
+
+ /* Wait. */
+ int oldval = *futex;
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ return oldval;
+
+ int newval = oldval | FUTEX_WAITERS;
+ if (oldval != newval
+ && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+ continue;
+
+ lll_futex_timed_wait (futex, newval, &rt);
+ }
+ while (atomic_compare_and_exchange_bool_acq (futex, tid, 0));
+
+ return 0;
+}
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ INTERNAL_SYSCALL_DECL (__err); \
+ int *__futexp = &(futexv); \
+ \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \
+ } \
+ while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
({ \
# define __lll_rel_instr "sync"
#endif
-/* Set *futex to 1 if it is 0, atomically. Returns the old value */
-#define __lll_trylock(futex) \
+/* Set *futex to ID if it is 0, atomically. Returns the old value */
+#define __lll_robust_trylock(futex, id) \
({ int __val; \
__asm __volatile ("1: lwarx %0,0,%2\n" \
" cmpwi 0,%0,0\n" \
" bne- 1b\n" \
"2: " __lll_acq_instr \
: "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (1), "m" (*futex) \
+ : "r" (futex), "r" (id), "m" (*futex) \
: "cr0", "memory"); \
__val; \
})
+#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+
+/* Set *futex to 1 if it is 0, atomically. Returns the old value */
+#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
+
#define lll_mutex_trylock(lock) __lll_trylock (&(lock))
/* Set *futex to 2 if it is 0, atomically. Returns the old value */
-#define __lll_cond_trylock(futex) \
- ({ int __val; \
- __asm __volatile ("1: lwarx %0,0,%2\n" \
- " cmpwi 0,%0,0\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%2\n" \
- " bne- 1b\n" \
- "2: " __lll_acq_instr \
- : "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (2), "m" (*futex) \
- : "cr0", "memory"); \
- __val; \
- })
+#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
+
#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock))
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
#define lll_mutex_lock(lock) \
(void) ({ \
__lll_lock_wait (__futex); \
})
+#define lll_robust_mutex_lock(lock, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+
#define lll_mutex_cond_lock(lock) \
(void) ({ \
int *__futex = &(lock); \
__lll_lock_wait (__futex); \
})
+#define lll_robust_mutex_cond_lock(lock, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ int __id = id | FUTEX_WAITERS; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+
+
extern int __lll_timedlock_wait
(int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+ (int *futex, const struct timespec *) attribute_hidden;
#define lll_mutex_timedlock(lock, abstime) \
({ \
__val; \
})
+#define lll_robust_mutex_timedlock(lock, abstime, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val; \
+ })
+
#define lll_mutex_unlock(lock) \
((void) ({ \
int *__futex = &(lock); \
lll_futex_wake (__futex, 1); \
}))
+#define lll_robust_mutex_unlock(lock) \
+ ((void) ({ \
+ int *__futex = &(lock); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1); \
+ }))
+
#define lll_mutex_unlock_force(lock) \
((void) ({ \
int *__futex = &(lock); \
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1); \
+ } \
+ while (0)
+
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val) \
({ \
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_trylock (int *futex, int id)
+{
+ unsigned int old;
+
+ __asm __volatile ("cs %0,%3,%1"
+ : "=d" (old), "=Q" (*futex)
+ : "0" (0), "d" (id), "m" (*futex) : "cc", "memory" );
+ return old != 0;
+}
+#define lll_robust_mutex_trylock(futex, id) \
+ __lll_robust_mutex_trylock (&(futex), id)
+
+
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
static inline void
__attribute__ ((always_inline))
}
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_lock_wait (futex);
+ return result;
+}
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
static inline void
__attribute__ ((always_inline))
__lll_mutex_cond_lock (int *futex)
}
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
extern int __lll_timedlock_wait
(int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+ (int *futex, const struct timespec *) attribute_hidden;
static inline int
__attribute__ ((always_inline))
#define lll_mutex_timedlock(futex, abstime) \
__lll_mutex_timedlock (&(futex), abstime)
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+ int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime);
+ return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
static inline void
__attribute__ ((always_inline))
__lll_mutex_unlock(&(futex))
+static inline void
+__attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex, int mask)
+{
+ int oldval;
+ int newval = 0;
+
+ lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
+ if (oldval & mask)
+ lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
+
+
static inline void
__attribute__ ((always_inline))
__lll_mutex_unlock_force (int *futex)