This is the mail archive of the libc-hacker@sourceware.org mailing list for the glibc project.
Note that libc-hacker is a closed list. You may look at the archives of this list, but subscription and posting are not open.
Index Nav: | [Date Index] [Subject Index] [Author Index] [Thread Index] | |
---|---|---|
Message Nav: | [Date Prev] [Date Next] | [Thread Prev] [Thread Next] |
Other format: | [Raw text] |
Hi! On top of the patch I posted yesterday, done only for x86_64 and powerpc (will finish the rest of arches if you agree this is the right direction, even write a ChangeLog entry): 1) lll_lock/lll_unlock etc. now have also private argument (LLL_PRIVATE or LLL_SHARED) to make it explicit what the type of the futex it is, the LLL_PRIVATE for __builtin_constant_p argument is optimized (uses __lll_*_private helpers which have the same number of arguments as __lll_* had before and always use private futexes if supported), otherwise the helpers take additional argument (I believe most if not all current LLL_SHARED users will be in the end variable users, except perhaps for wake_tid) 2) removed the lll_mutex_lock vs. lll_lock duplication, on all architectures that support NPTL they are defined the same anyway, similarly removed various macros and prototypes of things that long time don't exist in NPTL or are never used 3) on x86_64 (later i386 too) the libc !UP optimized assembly to jump around lock is now handled through macros used in the __asm to avoid too much source duplications 4) on x86_64 (later i386 too) lowlevellock.h is now usable in __ASSEMBLER__ and defines just the few needed things for most of the *.S routines to avoid massive code duplication 5) as lll_lock etc. now has explicit private status, various always internal libpthread.so locks could be made LLL_PRIVATE, plus pthread_rwlock_* and pthread_barrier_* guard locks are now private resp. shared depending on whether the object is process private or shared I have kept LLL_PRIVATE to be 0 and LLL_SHARED 128, but it shouldn't be very hard to swap those two (in some places the current state is better, e.g. for rwlocks, in other cases the other would be better (barriers, sem_*). All futexes/locks in pthread_mutex_* and pthread_cond_* still use LLL_SHARED, that's the last big thing that needs to be handled. Built & tested on x86_64-linux (with both 2.6.21 and 2.6.22 kernels) and on ppc64-linux (only 2.6.18 kernel, will need to reinstall that box). --- libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c.jj 2007-07-23 19:36:30.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c 2007-07-25 20:44:47.000000000 +0200 @@ -29,11 +29,37 @@ int __new_sem_post (sem_t *sem) { + struct new_sem *isem = (struct new_sem *) sem; + + __asm __volatile (__lll_rel_instr ::: "memory"); + atomic_increment (&isem->value); + atomic_full_barrier (); + if (isem->nwaiters > 0) + { + int err = lll_futex_wake (&isem->value, 1, + isem->private ^ FUTEX_PRIVATE_FLAG); + if (__builtin_expect (err, 0) < 0) + { + __set_errno (-err); + return -1; + } + } + return 0; +} +versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1); + +#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1) + +int +attribute_compat_text_section +__old_sem_post (sem_t *sem) +{ int *futex = (int *) sem; __asm __volatile (__lll_rel_instr ::: "memory"); int nr = atomic_increment_val (futex); - int err = lll_futex_wake (futex, nr, LLL_SHARED); + /* We always have to assume it is a shared semaphore. */ + int err = lll_futex_wake (futex, 1, LLL_SHARED); if (__builtin_expect (err, 0) < 0) { __set_errno (-err); @@ -41,8 +67,6 @@ __new_sem_post (sem_t *sem) } return 0; } -versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1); -#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1) -strong_alias (__new_sem_post, __old_sem_post) + compat_symbol (libpthread, __old_sem_post, sem_post, GLIBC_2_0); #endif --- libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h.jj 2007-07-25 20:00:18.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h 2007-07-25 20:57:08.000000000 +0200 @@ -69,9 +69,6 @@ # endif #endif -/* Initializer for compatibility lock. */ -#define LLL_MUTEX_LOCK_INITIALIZER (0) - #define lll_futex_wait(futexp, val, private) \ lll_futex_timed_wait (futexp, val, NULL, private) @@ -97,14 +94,15 @@ INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \ }) -#define lll_robust_mutex_dead(futexv) \ +#define lll_robust_dead(futexv, private) \ do \ { \ INTERNAL_SYSCALL_DECL (__err); \ int *__futexp = &(futexv); \ \ atomic_or (__futexp, FUTEX_OWNER_DIED); \ - INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \ + INTERNAL_SYSCALL (futex, __err, 4, __futexp, \ + __lll_private_flag (FUTEX_WAKE, private), 1, 0); \ } \ while (0) @@ -171,119 +169,111 @@ __val; \ }) -#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id) +#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id) /* Set *futex to 1 if it is 0, atomically. Returns the old value */ #define __lll_trylock(futex) __lll_robust_trylock (futex, 1) -#define lll_mutex_trylock(lock) __lll_trylock (&(lock)) +#define lll_trylock(lock) __lll_trylock (&(lock)) /* Set *futex to 2 if it is 0, atomically. Returns the old value */ #define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2) -#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock)) +#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock)) -extern void __lll_lock_wait (int *futex) attribute_hidden; -extern int __lll_robust_lock_wait (int *futex) attribute_hidden; +extern void __lll_lock_wait_private (int *futex) attribute_hidden; +extern void __lll_lock_wait (int *futex, int private) attribute_hidden; +extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; -#define lll_mutex_lock(lock) \ +#define lll_lock(lock, private) \ (void) ({ \ int *__futex = &(lock); \ if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\ 0) != 0) \ - __lll_lock_wait (__futex); \ + { \ + if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ + __lll_lock_wait_private (__futex); \ + else \ + __lll_lock_wait (__futex, private); \ + } \ }) -#define lll_robust_mutex_lock(lock, id) \ +#define lll_robust_lock(lock, id, private) \ ({ \ int *__futex = &(lock); \ int __val = 0; \ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ 0), 0)) \ - __val = __lll_robust_lock_wait (__futex); \ + __val = __lll_robust_lock_wait (__futex, private); \ __val; \ }) -#define lll_mutex_cond_lock(lock) \ +#define lll_cond_lock(lock, private) \ (void) ({ \ int *__futex = &(lock); \ if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\ 0) != 0) \ - __lll_lock_wait (__futex); \ + __lll_lock_wait (__futex, private); \ }) -#define lll_robust_mutex_cond_lock(lock, id) \ +#define lll_robust_cond_lock(lock, id, private) \ ({ \ int *__futex = &(lock); \ int __val = 0; \ int __id = id | FUTEX_WAITERS; \ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\ 0), 0)) \ - __val = __lll_robust_lock_wait (__futex); \ + __val = __lll_robust_lock_wait (__futex, private); \ __val; \ }) extern int __lll_timedlock_wait - (int *futex, const struct timespec *) attribute_hidden; + (int *futex, const struct timespec *, int private) attribute_hidden; extern int __lll_robust_timedlock_wait - (int *futex, const struct timespec *) attribute_hidden; + (int *futex, const struct timespec *, int private) attribute_hidden; -#define lll_mutex_timedlock(lock, abstime) \ +#define lll_timedlock(lock, abstime, private) \ ({ \ int *__futex = &(lock); \ int __val = 0; \ if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\ 0) != 0) \ - __val = __lll_timedlock_wait (__futex, abstime); \ + __val = __lll_timedlock_wait (__futex, abstime, private); \ __val; \ }) -#define lll_robust_mutex_timedlock(lock, abstime, id) \ +#define lll_robust_timedlock(lock, abstime, id, private) \ ({ \ int *__futex = &(lock); \ int __val = 0; \ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ 0), 0)) \ - __val = __lll_robust_timedlock_wait (__futex, abstime); \ + __val = __lll_robust_timedlock_wait (__futex, abstime, private); \ __val; \ }) -#define lll_mutex_unlock(lock) \ +#define lll_unlock(lock, private) \ ((void) ({ \ int *__futex = &(lock); \ int __val = atomic_exchange_rel (__futex, 0); \ if (__builtin_expect (__val > 1, 0)) \ - lll_futex_wake (__futex, 1, LLL_SHARED); \ + lll_futex_wake (__futex, 1, private); \ })) -#define lll_robust_mutex_unlock(lock) \ +#define lll_robust_unlock(lock, private) \ ((void) ({ \ int *__futex = &(lock); \ int __val = atomic_exchange_rel (__futex, 0); \ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \ - lll_futex_wake (__futex, 1, LLL_SHARED); \ - })) - -#define lll_mutex_unlock_force(lock) \ - ((void) ({ \ - int *__futex = &(lock); \ - *__futex = 0; \ - __asm __volatile (__lll_rel_instr ::: "memory"); \ - lll_futex_wake (__futex, 1, LLL_SHARED); \ + lll_futex_wake (__futex, 1, private); \ })) -#define lll_mutex_islocked(futex) \ +#define lll_islocked(futex) \ (futex != 0) -/* Our internal lock implementation is identical to the binary-compatible - mutex implementation. */ - -/* Type for lock object. */ -typedef int lll_lock_t; - /* Initializers for lock. */ #define LLL_LOCK_INITIALIZER (0) #define LLL_LOCK_INITIALIZER_LOCKED (1) @@ -293,11 +283,6 @@ typedef int lll_lock_t; 1 - taken by one user >1 - taken by more users */ -#define lll_trylock(lock) lll_mutex_trylock (lock) -#define lll_lock(lock) lll_mutex_lock (lock) -#define lll_unlock(lock) lll_mutex_unlock (lock) -#define lll_islocked(lock) lll_mutex_islocked (lock) - /* The kernel notifies a process which uses CLONE_CLEARTID via futex wakeup when the clone terminates. The memory location contains the thread ID while the clone is running and is reset to zero @@ -320,26 +305,4 @@ extern int __lll_timedwait_tid (int *, c __res; \ }) - -/* Conditional variable handling. */ - -extern void __lll_cond_wait (pthread_cond_t *cond) - attribute_hidden; -extern int __lll_cond_timedwait (pthread_cond_t *cond, - const struct timespec *abstime) - attribute_hidden; -extern void __lll_cond_wake (pthread_cond_t *cond) - attribute_hidden; -extern void __lll_cond_broadcast (pthread_cond_t *cond) - attribute_hidden; - -#define lll_cond_wait(cond) \ - __lll_cond_wait (cond) -#define lll_cond_timedwait(cond, abstime) \ - __lll_cond_timedwait (cond, abstime) -#define lll_cond_wake(cond) \ - __lll_cond_wake (cond) -#define lll_cond_broadcast(cond) \ - __lll_cond_broadcast (cond) - #endif /* lowlevellock.h */ --- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c 2007-07-25 20:00:24.000000000 +0200 @@ -39,7 +39,7 @@ pthread_barrier_wait (barrier) int result = 0; /* Make sure we are alone. */ - lll_lock (ibarrier->b.lock); + lll_lock (ibarrier->b.lock, /* XYZ */ LLL_SHARED); /* One more arrival. */ --ibarrier->b.left; @@ -66,7 +66,7 @@ pthread_barrier_wait (barrier) unsigned int event = ibarrier->b.curr_event; /* Before suspending, make the barrier available to others. */ - lll_unlock (ibarrier->b.lock); + lll_unlock (ibarrier->b.lock, /* XYZ */ LLL_SHARED); /* Wait for the event counter of the barrier to change. */ do @@ -84,7 +84,7 @@ pthread_barrier_wait (barrier) { if (atomic_increment_val (&ibarrier->b.left) == init_count) /* We are done. */ - lll_unlock (ibarrier->b.lock); + lll_unlock (ibarrier->b.lock, /* XYZ */ LLL_SHARED); } else { @@ -97,7 +97,7 @@ pthread_barrier_wait (barrier) __sparc32_atomic_do_unlock24 (&ibarrier->left_lock); if (left == init_count) /* We are done. */ - lll_unlock (ibarrier->b.lock); + lll_unlock (ibarrier->b.lock, /* XYZ */ LLL_SHARED); } return result; --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S.jj 2007-05-28 13:45:24.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,19 +18,11 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <shlib-compat.h> #include <pthread-errnos.h> #include <structsem.h> -#ifndef UP -# define LOCK lock -#else -# define -#endif - -#define SYS_futex 202 -#define FUTEX_WAKE 1 - .text --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S 2007-07-25 20:00:24.000000000 +0200 @@ -19,19 +19,10 @@ #include <sysdep.h> #include <shlib-compat.h> +#include <lowlevellock.h> #include <lowlevelcond.h> #include <tcb-offsets.h> -#ifdef UP -# define LOCK -#else -# define LOCK lock -#endif - -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 - .text @@ -58,7 +49,9 @@ __condvar_cleanup: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_lock_wait + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi #endif @@ -105,7 +98,9 @@ __condvar_cleanup: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake /* Wake up all waiters to make sure no signal gets lost. */ 2: testq %r12, %r12 @@ -307,7 +302,9 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_lock_wait + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_lock_wait jmp 2b /* Unlock in loop requires wakeup. */ @@ -315,7 +312,9 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 4b /* Locking in loop failed. */ @@ -323,7 +322,9 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_lock_wait + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi #endif @@ -334,7 +335,9 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 11b /* The initial unlocking of the mutex failed. */ @@ -351,7 +354,9 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake 13: movq %r10, %rax jmp 14b --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S.jj 2007-05-28 13:45:24.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,23 +18,15 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <shlib-compat.h> #include <pthread-errnos.h> #include <structsem.h> -#ifndef UP -# define LOCK lock -#else -# define -#endif - -#define SYS_futex 202 -#define FUTEX_WAIT 0 /* For the calculation see asm/vsyscall.h. */ #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 - .text .globl sem_timedwait --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S.jj 2007-05-28 13:45:24.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,19 +18,11 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <shlib-compat.h> #include <pthread-errnos.h> #include <structsem.h> -#ifndef UP -# define LOCK lock -#else -# define -#endif - -#define SYS_futex 202 -#define FUTEX_WAIT 0 - .text --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S.jj 2007-07-23 19:36:30.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,23 +18,12 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <lowlevelrwlock.h> #include <pthread-errnos.h> #include <kernel-features.h> -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_PRIVATE_FLAG 128 - -#ifndef UP -# define LOCK lock -#else -# define LOCK -#endif - - .text .globl __pthread_rwlock_rdlock @@ -123,11 +112,11 @@ __pthread_rwlock_rdlock: movq %rdx, %rax retq -1: +1: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait #if MUTEX != 0 subq $MUTEX, %rdi #endif @@ -139,11 +128,11 @@ __pthread_rwlock_rdlock: movl $EDEADLK, %edx jmp 9b -6: +6: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake #if MUTEX != 0 subq $MUTEX, %rdi #endif @@ -159,21 +148,21 @@ __pthread_rwlock_rdlock: movl $EAGAIN, %edx jmp 9b -10: +10: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake #if MUTEX != 0 subq $MUTEX, %rdi #endif jmp 11b -12: +12: movl PSHARED(%rdi), %esi #if MUTEX == 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait #if MUTEX != 0 subq $MUTEX, %rdi #endif --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h.jj 2007-07-25 20:00:18.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h 2007-07-25 20:00:24.000000000 +0200 @@ -20,17 +20,27 @@ #ifndef _LOWLEVELLOCK_H #define _LOWLEVELLOCK_H 1 -#include <time.h> -#include <sys/param.h> -#include <bits/pthreadtypes.h> -#include <kernel-features.h> -#include <tcb-offsets.h> - -#ifndef LOCK_INSTR -# ifdef UP -# define LOCK_INSTR /* nothing */ -# else -# define LOCK_INSTR "lock;" +#ifndef __ASSEMBLER__ +# include <time.h> +# include <sys/param.h> +# include <bits/pthreadtypes.h> +# include <kernel-features.h> +# include <tcb-offsets.h> + +# ifndef LOCK_INSTR +# ifdef UP +# define LOCK_INSTR /* nothing */ +# else +# define LOCK_INSTR "lock;" +# endif +# endif +#else +# ifndef LOCK +# ifdef UP +# define LOCK +# else +# define LOCK lock +# endif # endif #endif @@ -38,11 +48,13 @@ #define FUTEX_WAIT 0 #define FUTEX_WAKE 1 #define FUTEX_CMP_REQUEUE 4 +#define FUTEX_WAKE_OP 5 #define FUTEX_LOCK_PI 6 #define FUTEX_UNLOCK_PI 7 #define FUTEX_TRYLOCK_PI 8 #define FUTEX_PRIVATE_FLAG 128 +#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1) /* Values for 'private' parameter of locking macros. Yes, the definition seems to be backwards. But it is not. The bit will be @@ -50,6 +62,8 @@ #define LLL_PRIVATE 0 #define LLL_SHARED FUTEX_PRIVATE_FLAG +#ifndef __ASSEMBLER__ + #if !defined NOT_IN_libc || defined IS_IN_rtld /* In libc.so or ld.so all futexes are private. */ # ifdef __ASSUME_PRIVATE_FUTEX @@ -76,13 +90,13 @@ # endif #endif -/* Initializer for compatibility lock. */ -#define LLL_MUTEX_LOCK_INITIALIZER (0) -#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1) -#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2) +/* Initializer for lock. */ +#define LLL_LOCK_INITIALIZER (0) +#define LLL_LOCK_INITIALIZER_LOCKED (1) +#define LLL_LOCK_INITIALIZER_WAITERS (2) /* Delay in spinlock loop. */ -#define BUSY_WAIT_NOP asm ("rep; nop") +#define BUSY_WAIT_NOP asm ("rep; nop") #define LLL_STUB_UNWIND_INFO_START \ @@ -196,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END : "=a" (__status) \ : "0" (SYS_futex), "D" (futex), \ "S" (__lll_private_flag (FUTEX_WAIT, private)), \ - "d" (_val), "r" (__to) \ + "d" (_val), "r" (__to) \ : "memory", "cc", "r11", "cx"); \ __status; \ }) @@ -217,240 +231,320 @@ LLL_STUB_UNWIND_INFO_END /* Does not preserve %eax and %ecx. */ -extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden; -/* Does not preserver %eax, %ecx, and %edx. */ -extern int __lll_mutex_timedlock_wait (int *__futex, int __val, - const struct timespec *__abstime) +extern int __lll_lock_wait_private (int *__futex, int __val) attribute_hidden; +extern int __lll_lock_wait (int *__futex, int __val, int private) + attribute_hidden; +/* Does not preserve %eax, %ecx, and %edx. */ +extern int __lll_timedlock_wait (int *__futex, int __val, + const struct timespec *__abstime, int private) attribute_hidden; /* Preserves all registers but %eax. */ -extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden; +extern int __lll_unlock_wake_private (int *__futex) attribute_hidden; +extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden; -/* NB: in the lll_mutex_trylock macro we simply return the value in %eax +/* NB: in the lll_trylock macro we simply return the value in %eax after the cmpxchg instruction. In case the operation succeded this value is zero. In case the operation failed, the cmpxchg instruction has loaded the current value of the memory work which is guaranteed to be nonzero. */ -#define lll_mutex_trylock(futex) \ +#if defined NOT_IN_libc || defined UP +# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1" +#else +# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ + "je 0f\n\t" \ + "lock; cmpxchgl %2, %1\n\t" \ + "jmp 1f\n\t" \ + "0:\tcmpxchgl %2, %1\n\t" \ + "1:" +#endif + +#define lll_trylock(futex) \ ({ int ret; \ - __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ + __asm __volatile (__lll_trylock_asm \ : "=a" (ret), "=m" (futex) \ - : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\ - "0" (LLL_MUTEX_LOCK_INITIALIZER) \ + : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \ + "0" (LLL_LOCK_INITIALIZER) \ : "memory"); \ ret; }) - -#define lll_robust_mutex_trylock(futex, id) \ +#define lll_robust_trylock(futex, id) \ ({ int ret; \ __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ : "=a" (ret), "=m" (futex) \ - : "r" (id), "m" (futex), \ - "0" (LLL_MUTEX_LOCK_INITIALIZER) \ + : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \ : "memory"); \ ret; }) - -#define lll_mutex_cond_trylock(futex) \ +#define lll_cond_trylock(futex) \ ({ int ret; \ __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \ : "=a" (ret), "=m" (futex) \ - : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \ - "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \ + : "r" (LLL_LOCK_INITIALIZER_WAITERS), \ + "m" (futex), "0" (LLL_LOCK_INITIALIZER) \ : "memory"); \ ret; }) - -#define lll_mutex_lock(futex) \ - (void) ({ int ignore1, ignore2, ignore3; \ - __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ +#if defined NOT_IN_libc || defined UP +# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \ + "jnz 1f\n\t" +#else +# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ + "je 0f\n\t" \ + "lock; cmpxchgl %4, %2\n\t" \ "jnz 1f\n\t" \ - ".subsection 1\n\t" \ - ".type _L_mutex_lock_%=, @function\n" \ - "_L_mutex_lock_%=:\n" \ - "1:\tleaq %2, %%rdi\n" \ - "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_mutex_lock_wait\n" \ - "4:\taddq $128, %%rsp\n" \ - "5:\tjmp 24f\n" \ - "6:\t.size _L_mutex_lock_%=, 6b-1b\n\t" \ - ".previous\n" \ - LLL_STUB_UNWIND_INFO_5 \ - "24:" \ - : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\ - "=a" (ignore3) \ - : "0" (1), "m" (futex), "3" (0) \ - : "cx", "r11", "cc", "memory"); }) + "jmp 24f\n" \ + "0:\tcmpxchgl %4, %2\n\t" \ + "jnz 1f\n\t" +#endif +#define lll_lock(futex, private) \ + (void) \ + ({ int ignore1, ignore2, ignore3; \ + if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ + __asm __volatile (__lll_lock_asm_start \ + ".subsection 1\n\t" \ + ".type _L_lock_%=, @function\n" \ + "_L_lock_%=:\n" \ + "1:\tleaq %2, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ + "3:\tcallq __lll_lock_wait_private\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ + "6:\t.size _L_lock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ + : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \ + "=a" (ignore3) \ + : "0" (1), "m" (futex), "3" (0) \ + : "cx", "r11", "cc", "memory"); \ + else \ + __asm __volatile (__lll_lock_asm_start \ + ".subsection 1\n\t" \ + ".type _L_lock_%=, @function\n" \ + "_L_lock_%=:\n" \ + "1:\tleaq %2, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ + "3:\tcallq __lll_lock_wait\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ + "6:\t.size _L_lock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ + : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ + "=a" (ignore3) \ + : "1" (1), "m" (futex), "3" (0), "0" (private) \ + : "cx", "r11", "cc", "memory"); \ + }) \ -#define lll_robust_mutex_lock(futex, id) \ +#define lll_robust_lock(futex, id, private) \ ({ int result, ignore1, ignore2; \ - __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ + __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \ "jnz 1f\n\t" \ ".subsection 1\n\t" \ - ".type _L_robust_mutex_lock_%=, @function\n" \ - "_L_robust_mutex_lock_%=:\n" \ + ".type _L_robust_lock_%=, @function\n" \ + "_L_robust_lock_%=:\n" \ "1:\tleaq %2, %%rdi\n" \ "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_robust_mutex_lock_wait\n" \ + "3:\tcallq __lll_robust_lock_wait\n" \ "4:\taddq $128, %%rsp\n" \ "5:\tjmp 24f\n" \ - "6:\t.size _L_robust_mutex_lock_%=, 6b-1b\n\t" \ + "6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \ ".previous\n" \ LLL_STUB_UNWIND_INFO_5 \ "24:" \ - : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \ + : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ "=a" (result) \ - : "0" (id), "m" (futex), "3" (0) \ + : "1" (id), "m" (futex), "3" (0), "0" (private) \ : "cx", "r11", "cc", "memory"); \ result; }) +#define lll_cond_lock(futex, private) \ + (void) \ + ({ int ignore1, ignore2, ignore3; \ + __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \ + "jnz 1f\n\t" \ + ".subsection 1\n\t" \ + ".type _L_cond_lock_%=, @function\n" \ + "_L_cond_lock_%=:\n" \ + "1:\tleaq %2, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ + "3:\tcallq __lll_lock_wait\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ + "6:\t.size _L_cond_lock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ + : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ + "=a" (ignore3) \ + : "1" (2), "m" (futex), "3" (0), "0" (private) \ + : "cx", "r11", "cc", "memory"); \ + }) -#define lll_mutex_cond_lock(futex) \ - (void) ({ int ignore1, ignore2, ignore3; \ - __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ - "jnz 1f\n\t" \ - ".subsection 1\n\t" \ - ".type _L_mutex_cond_lock_%=, @function\n" \ - "_L_mutex_cond_lock_%=:\n" \ - "1:\tleaq %2, %%rdi\n" \ - "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_mutex_lock_wait\n" \ - "4:\taddq $128, %%rsp\n" \ - "5:\tjmp 24f\n" \ - "6:\t.size _L_mutex_cond_lock_%=, 6b-1b\n\t" \ - ".previous\n" \ - LLL_STUB_UNWIND_INFO_5 \ - "24:" \ - : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\ - "=a" (ignore3) \ - : "0" (2), "m" (futex), "3" (0) \ - : "cx", "r11", "cc", "memory"); }) - - -#define lll_robust_mutex_cond_lock(futex, id) \ +#define lll_robust_cond_lock(futex, id, private) \ ({ int result, ignore1, ignore2; \ - __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \ + __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \ "jnz 1f\n\t" \ ".subsection 1\n\t" \ - ".type _L_robust_mutex_cond_lock_%=, @function\n" \ - "_L_robust_mutex_cond_lock_%=:\n" \ + ".type _L_robust_cond_lock_%=, @function\n" \ + "_L_robust_cond_lock_%=:\n" \ "1:\tleaq %2, %%rdi\n" \ "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_robust_mutex_lock_wait\n" \ + "3:\tcallq __lll_robust_lock_wait\n" \ "4:\taddq $128, %%rsp\n" \ "5:\tjmp 24f\n" \ - "6:\t.size _L_robust_mutex_cond_lock_%=, 6b-1b\n\t" \ + "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t" \ ".previous\n" \ LLL_STUB_UNWIND_INFO_5 \ "24:" \ - : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \ + : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \ "=a" (result) \ - : "0" (id | FUTEX_WAITERS), "m" (futex), "3" (0) \ + : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \ + "0" (private) \ : "cx", "r11", "cc", "memory"); \ result; }) - -#define lll_mutex_timedlock(futex, timeout) \ +#define lll_timedlock(futex, timeout, private) \ ({ int result, ignore1, ignore2, ignore3; \ - __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \ + __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \ "jnz 1f\n\t" \ ".subsection 1\n\t" \ - ".type _L_mutex_timedlock_%=, @function\n" \ - "_L_mutex_timedlock_%=:\n" \ + ".type _L_timedlock_%=, @function\n" \ + "_L_timedlock_%=:\n" \ "1:\tleaq %4, %%rdi\n" \ "0:\tmovq %8, %%rdx\n" \ "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_mutex_timedlock_wait\n" \ + "3:\tcallq __lll_timedlock_wait\n" \ "4:\taddq $128, %%rsp\n" \ "5:\tjmp 24f\n" \ - "6:\t.size _L_mutex_timedlock_%=, 6b-1b\n\t" \ + "6:\t.size _L_timedlock_%=, 6b-1b\n\t" \ ".previous\n" \ LLL_STUB_UNWIND_INFO_6 \ "24:" \ - : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \ + : "=a" (result), "=D" (ignore1), "=S" (ignore2), \ "=&d" (ignore3), "=m" (futex) \ - : "0" (0), "2" (1), "m" (futex), "m" (timeout) \ + : "0" (0), "1" (1), "m" (futex), "m" (timeout), \ + "2" (private) \ : "memory", "cx", "cc", "r10", "r11"); \ result; }) - -#define lll_robust_mutex_timedlock(futex, timeout, id) \ +#define lll_robust_timedlock(futex, timeout, id, private) \ ({ int result, ignore1, ignore2, ignore3; \ - __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \ + __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \ "jnz 1f\n\t" \ ".subsection 1\n\t" \ - ".type _L_robust_mutex_timedlock_%=, @function\n" \ - "_L_robust_mutex_timedlock_%=:\n" \ + ".type _L_robust_timedlock_%=, @function\n" \ + "_L_robust_timedlock_%=:\n" \ "1:\tleaq %4, %%rdi\n" \ "0:\tmovq %8, %%rdx\n" \ "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_robust_mutex_timedlock_wait\n" \ + "3:\tcallq __lll_robust_timedlock_wait\n" \ "4:\taddq $128, %%rsp\n" \ "5:\tjmp 24f\n" \ - "6:\t.size _L_robust_mutex_timedlock_%=, 6b-1b\n\t" \ + "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t" \ ".previous\n" \ LLL_STUB_UNWIND_INFO_6 \ "24:" \ - : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \ + : "=a" (result), "=D" (ignore1), "=S" (ignore2), \ "=&d" (ignore3), "=m" (futex) \ - : "0" (0), "2" (id), "m" (futex), "m" (timeout) \ + : "0" (0), "1" (id), "m" (futex), "m" (timeout), \ + "2" (private) \ : "memory", "cx", "cc", "r10", "r11"); \ result; }) +#if defined NOT_IN_libc || defined UP +# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \ + "jne 1f\n\t" +#else +# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ + "je 0f\n\t" \ + "lock; decl %0\n\t" \ + "jne 1f\n\t" \ + "jmp 24f\n\t" \ + "0:\tdecl %0\n\t" \ + "jne 1f\n\t" +#endif -#define lll_mutex_unlock(futex) \ - (void) ({ int ignore; \ - __asm __volatile (LOCK_INSTR "decl %0\n\t" \ - "jne 1f\n\t" \ - ".subsection 1\n\t" \ - ".type _L_mutex_unlock_%=, @function\n" \ - "_L_mutex_unlock_%=:\n" \ - "1:\tleaq %0, %%rdi\n" \ - "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_mutex_unlock_wake\n" \ - "4:\taddq $128, %%rsp\n" \ - "5:\tjmp 24f\n" \ - "6:\t.size _L_mutex_unlock_%=, 6b-1b\n\t" \ - ".previous\n" \ - LLL_STUB_UNWIND_INFO_5 \ - "24:" \ - : "=m" (futex), "=&D" (ignore) \ - : "m" (futex) \ - : "ax", "cx", "r11", "cc", "memory"); }) - - -#define lll_robust_mutex_unlock(futex) \ - (void) ({ int ignore; \ - __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \ - "jne 1f\n\t" \ - ".subsection 1\n\t" \ - ".type _L_robust_mutex_unlock_%=, @function\n" \ - "_L_robust_mutex_unlock_%=:\n" \ - "1:\tleaq %0, %%rdi\n" \ - "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_mutex_unlock_wake\n" \ - "4:\taddq $128, %%rsp\n" \ - "5:\tjmp 24f\n" \ - "6:\t.size _L_robust_mutex_unlock_%=, 6b-1b\n\t"\ - ".previous\n" \ - LLL_STUB_UNWIND_INFO_5 \ - "24:" \ - : "=m" (futex), "=&D" (ignore) \ - : "i" (FUTEX_WAITERS), "m" (futex) \ - : "ax", "cx", "r11", "cc", "memory"); }) - - -#define lll_robust_mutex_dead(futex) \ - (void) ({ int ignore; \ - __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \ - "syscall" \ - : "=m" (futex), "=a" (ignore) \ - : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \ - "S" (FUTEX_WAKE), "1" (__NR_futex), \ - "d" (1) \ - : "cx", "r11", "cc", "memory"); }) - +#define lll_unlock(futex, private) \ + (void) \ + ({ int ignore; \ + if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ + __asm __volatile (__lll_unlock_asm_start \ + ".subsection 1\n\t" \ + ".type _L_unlock_%=, @function\n" \ + "_L_unlock_%=:\n" \ + "1:\tleaq %0, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ + "3:\tcallq __lll_unlock_wake_private\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ + "6:\t.size _L_unlock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ + : "=m" (futex), "=&D" (ignore) \ + : "m" (futex) \ + : "ax", "cx", "r11", "cc", "memory"); \ + else \ + __asm __volatile (__lll_unlock_asm_start \ + ".subsection 1\n\t" \ + ".type _L_unlock_%=, @function\n" \ + "_L_unlock_%=:\n" \ + "1:\tleaq %0, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ + "3:\tcallq __lll_unlock_wake\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ + "6:\t.size _L_unlock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ + : "=m" (futex), "=&D" (ignore) \ + : "m" (futex), "S" (private) \ + : "ax", "cx", "r11", "cc", "memory"); \ + }) + +#define lll_robust_unlock(futex, private) \ + do \ + { \ + int ignore; \ + __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \ + "jne 1f\n\t" \ + ".subsection 1\n\t" \ + ".type _L_robust_unlock_%=, @function\n" \ + "_L_robust_unlock_%=:\n" \ + "1:\tleaq %0, %%rdi\n" \ + "2:\tsubq $128, %%rsp\n" \ + "3:\tcallq __lll_unlock_wake\n" \ + "4:\taddq $128, %%rsp\n" \ + "5:\tjmp 24f\n" \ + "6:\t.size _L_robust_unlock_%=, 6b-1b\n\t" \ + ".previous\n" \ + LLL_STUB_UNWIND_INFO_5 \ + "24:" \ + : "=m" (futex), "=&D" (ignore) \ + : "i" (FUTEX_WAITERS), "m" (futex), \ + "S" (private) \ + : "ax", "cx", "r11", "cc", "memory"); \ + } \ + while (0) + +#define lll_robust_dead(futex, private) \ + do \ + { \ + int ignore; \ + __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \ + "syscall" \ + : "=m" (futex), "=a" (ignore) \ + : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \ + "S" (__lll_private_flag (FUTEX_WAKE, private)), \ + "1" (__NR_futex), "d" (1) \ + : "cx", "r11", "cc", "memory"); \ + } \ + while (0) /* Returns non-zero if error happened, zero if success. */ #define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \ @@ -461,117 +555,13 @@ extern int __lll_mutex_unlock_wait (int __asm __volatile ("syscall" \ : "=a" (__res) \ : "0" (__NR_futex), "D" ((void *) ftx), \ - "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \ - "r" (__nr_move), "r" (__mutex), "r" (__val) \ + "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \ + "r" (__nr_move), "r" (__mutex), "r" (__val) \ : "cx", "r11", "cc", "memory"); \ __res < 0; }) - -#define lll_mutex_islocked(futex) \ - (futex != LLL_MUTEX_LOCK_INITIALIZER) - - -/* We have a separate internal lock implementation which is not tied - to binary compatibility. */ - -/* Type for lock object. */ -typedef int lll_lock_t; - -/* Initializers for lock. */ -#define LLL_LOCK_INITIALIZER (0) -#define LLL_LOCK_INITIALIZER_LOCKED (1) - - -/* The states of a lock are: - 0 - untaken - 1 - taken by one user - 2 - taken by more users */ - - -#if defined NOT_IN_libc || defined UP -# define lll_trylock(futex) lll_mutex_trylock (futex) -# define lll_lock(futex) lll_mutex_lock (futex) -# define lll_unlock(futex) lll_mutex_unlock (futex) -#else -/* Special versions of the macros for use in libc itself. They avoid - the lock prefix when the thread library is not used. - - The code sequence to avoid unnecessary lock prefixes is what the AMD - guys suggested. If you do not like it, bring it up with AMD. - - XXX In future we might even want to avoid it on UP machines. */ - -# define lll_trylock(futex) \ - ({ unsigned char ret; \ - __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ - "je 0f\n\t" \ - "lock; cmpxchgl %2, %1\n\t" \ - "jmp 1f\n" \ - "0:\tcmpxchgl %2, %1\n\t" \ - "1:setne %0" \ - : "=a" (ret), "=m" (futex) \ - : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\ - "0" (LLL_MUTEX_LOCK_INITIALIZER) \ - : "memory"); \ - ret; }) - - -# define lll_lock(futex) \ - (void) ({ int ignore1, ignore2, ignore3; \ - __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ - "je 0f\n\t" \ - "lock; cmpxchgl %0, %2\n\t" \ - "jnz 1f\n\t" \ - "jmp 24f\n" \ - "0:\tcmpxchgl %0, %2\n\t" \ - "jnz 1f\n\t" \ - ".subsection 1\n\t" \ - ".type _L_lock_%=, @function\n" \ - "_L_lock_%=:\n" \ - "1:\tleaq %2, %%rdi\n" \ - "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_mutex_lock_wait\n" \ - "4:\taddq $128, %%rsp\n" \ - "5:\tjmp 24f\n" \ - "6:\t.size _L_lock_%=, 6b-1b\n\t" \ - ".previous\n" \ - LLL_STUB_UNWIND_INFO_5 \ - "24:" \ - : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\ - "=a" (ignore3) \ - : "0" (1), "m" (futex), "3" (0) \ - : "cx", "r11", "cc", "memory"); }) - - -# define lll_unlock(futex) \ - (void) ({ int ignore; \ - __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \ - "je 0f\n\t" \ - "lock; decl %0\n\t" \ - "jne 1f\n\t" \ - "jmp 24f\n" \ - "0:\tdecl %0\n\t" \ - "jne 1f\n\t" \ - ".subsection 1\n\t" \ - ".type _L_unlock_%=, @function\n" \ - "_L_unlock_%=:\n" \ - "1:\tleaq %0, %%rdi\n" \ - "2:\tsubq $128, %%rsp\n" \ - "3:\tcallq __lll_mutex_unlock_wake\n" \ - "4:\taddq $128, %%rsp\n" \ - "5:\tjmp 24f\n" \ - "6:\t.size _L_unlock_%=, 6b-1b\n\t" \ - ".previous\n" \ - LLL_STUB_UNWIND_INFO_5 \ - "24:" \ - : "=m" (futex), "=&D" (ignore) \ - : "m" (futex) \ - : "ax", "cx", "r11", "cc", "memory"); }) -#endif - - #define lll_islocked(futex) \ - (futex != LLL_MUTEX_LOCK_INITIALIZER) + (futex != LLL_LOCK_INITIALIZER) /* The kernel notifies a process with uses CLONE_CLEARTID via futex @@ -610,25 +600,6 @@ extern int __lll_timedwait_tid (int *tid } \ __result; }) - -/* Conditional variable handling. */ - -extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden; -extern int __lll_cond_timedwait (pthread_cond_t *cond, - const struct timespec *abstime) - attribute_hidden; -extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden; -extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden; - - -#define lll_cond_wait(cond) \ - __lll_cond_wait (cond) -#define lll_cond_timedwait(cond, abstime) \ - __lll_cond_timedwait (cond, abstime) -#define lll_cond_wake(cond) \ - __lll_cond_wake (cond) -#define lll_cond_broadcast(cond) \ - __lll_cond_broadcast (cond) - +#endif /* !__ASSEMBLER__ */ #endif /* lowlevellock.h */ --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S.jj 2006-09-08 13:57:52.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,5 @@ -/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -19,33 +20,46 @@ #include <sysdep.h> #include <pthread-errnos.h> +#include <lowlevellock.h> #include <lowlevelrobustlock.h> +#include <kernel-features.h> .text -#ifndef LOCK -# ifdef UP -# define LOCK +#define FUTEX_WAITERS 0x80000000 +#define FUTEX_OWNER_DIED 0x40000000 + +#ifdef __ASSUME_PRIVATE_FUTEX +# define LOAD_FUTEX_WAIT(reg) \ + xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg +# define LOAD_FUTEX_WAKE(reg) \ + xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg +#else +# if FUTEX_WAIT == 0 +# define LOAD_FUTEX_WAIT(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %fs:PRIVATE_FUTEX, reg # else -# define LOCK lock +# define LOAD_FUTEX_WAIT(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %fs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAIT, reg # endif +# define LOAD_FUTEX_WAKE(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %fs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAKE, reg #endif -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_WAITERS 0x80000000 -#define FUTEX_OWNER_DIED 0x40000000 - /* For the calculation see asm/vsyscall.h. */ #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 - .globl __lll_robust_mutex_lock_wait - .type __lll_robust_mutex_lock_wait,@function - .hidden __lll_robust_mutex_lock_wait + .globl __lll_robust_lock_wait + .type __lll_robust_lock_wait,@function + .hidden __lll_robust_lock_wait .align 16 -__lll_robust_mutex_lock_wait: +__lll_robust_lock_wait: cfi_startproc pushq %r10 cfi_adjust_cfa_offset(8) @@ -55,11 +69,7 @@ __lll_robust_mutex_lock_wait: cfi_offset(%rdx, -24) xorq %r10, %r10 /* No timeout. */ -#if FUTEX_WAIT == 0 - xorl %esi, %esi -#else - movl $FUTEX_WAIT, %esi -#endif + LOAD_FUTEX_WAIT (%esi) 4: movl %eax, %edx orl $FUTEX_WAITERS, %edx @@ -97,14 +107,14 @@ __lll_robust_mutex_lock_wait: cfi_restore(%r10) retq cfi_endproc - .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait + .size __lll_robust_lock_wait,.-__lll_robust_lock_wait - .globl __lll_robust_mutex_timedlock_wait - .type __lll_robust_mutex_timedlock_wait,@function - .hidden __lll_robust_mutex_timedlock_wait + .globl __lll_robust_timedlock_wait + .type __lll_robust_timedlock_wait,@function + .hidden __lll_robust_timedlock_wait .align 16 -__lll_robust_mutex_timedlock_wait: +__lll_robust_timedlock_wait: cfi_startproc /* Check for a valid timeout value. */ cmpq $1000000000, 8(%rdx) @@ -122,10 +132,12 @@ __lll_robust_mutex_timedlock_wait: cfi_offset(%r9, -24) cfi_offset(%r12, -32) cfi_offset(%r13, -40) + pushq %rsi + cfi_adjust_cfa_offset(8) /* Stack frame for the timespec and timeval structs. */ - subq $24, %rsp - cfi_adjust_cfa_offset(24) + subq $32, %rsp + cfi_adjust_cfa_offset(32) movq %rdi, %r12 movq %rdx, %r13 @@ -174,11 +186,8 @@ __lll_robust_mutex_timedlock_wait: jnz 5f 2: movq %rsp, %r10 -#if FUTEX_WAIT == 0 - xorl %esi, %esi -#else - movl $FUTEX_WAIT, %esi -#endif + movl 32(%rsp), %esi + LOAD_FUTEX_WAIT (%esi) movq %r12, %rdi movl $SYS_futex, %eax syscall @@ -195,8 +204,8 @@ __lll_robust_mutex_timedlock_wait: cmpxchgl %edx, (%r12) jnz 7f -6: addq $24, %rsp - cfi_adjust_cfa_offset(-24) +6: addq $40, %rsp + cfi_adjust_cfa_offset(-40) popq %r13 cfi_adjust_cfa_offset(-8) cfi_restore(%r13) @@ -214,7 +223,7 @@ __lll_robust_mutex_timedlock_wait: 3: movl $EINVAL, %eax retq - cfi_adjust_cfa_offset(56) + cfi_adjust_cfa_offset(72) cfi_offset(%r8, -16) cfi_offset(%r9, -24) cfi_offset(%r12, -32) @@ -226,4 +235,4 @@ __lll_robust_mutex_timedlock_wait: 8: movl $ETIMEDOUT, %eax jmp 6b cfi_endproc - .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait + .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S.jj 2007-07-25 19:23:33.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,26 +18,15 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <lowlevelrwlock.h> #include <pthread-errnos.h> #include <kernel-features.h> -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_PRIVATE_FLAG 128 - /* For the calculation see asm/vsyscall.h. */ #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 -#ifndef UP -# define LOCK lock -#else -# define LOCK -#endif - - .text .globl pthread_rwlock_timedwrlock @@ -168,11 +157,11 @@ pthread_rwlock_timedwrlock: popq %r12 retq -1: +1: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait jmp 2b 14: cmpl %fs:TID, %eax @@ -180,13 +169,13 @@ pthread_rwlock_timedwrlock: 20: movl $EDEADLK, %edx jmp 9b -6: +6: movl PSHARED(%r12), %esi #if MUTEX == 0 movq %r12, %rdi #else leal MUTEX(%r12), %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake jmp 7b /* Overflow. */ @@ -194,22 +183,22 @@ pthread_rwlock_timedwrlock: movl $EAGAIN, %edx jmp 9b -10: +10: movl PSHARED(%r12), %esi #if MUTEX == 0 movq %r12, %rdi #else leaq MUTEX(%r12), %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake jmp 11b -12: +12: movl PSHARED(%r12), %esi #if MUTEX == 0 movq %r12, %rdi #else leaq MUTEX(%r12), %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait jmp 13b 16: movq $-ETIMEDOUT, %rdx --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S.jj 2007-05-28 13:45:24.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,18 +18,9 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <lowlevelbarrier.h> -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 - -#ifndef UP -# define LOCK lock -#else -# define LOCK -#endif - .text @@ -142,21 +133,29 @@ pthread_barrier_wait: retq -1: addq $MUTEX, %rdi - callq __lll_mutex_lock_wait +1: movl PRIVATE(%rdi), %esi + addq $MUTEX, %rdi + xorl $LLL_SHARED, %esi + callq __lll_lock_wait subq $MUTEX, %rdi jmp 2b -4: addq $MUTEX, %rdi - callq __lll_mutex_unlock_wake +4: movl PRIVATE(%rdi), %esi + addq $MUTEX, %rdi + xorl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 5b -6: addq $MUTEX, %rdi - callq __lll_mutex_unlock_wake +6: movl PRIVATE(%rdi), %esi + addq $MUTEX, %rdi + xorl $LLL_SHARED, %esi + callq __lll_unlock_wake subq $MUTEX, %rdi jmp 7b -9: addq $MUTEX, %rdi - callq __lll_mutex_unlock_wake +9: movl PRIVATE(%rdi), %esi + addq $MUTEX, %rdi + xorl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 10b .size pthread_barrier_wait,.-pthread_barrier_wait --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S 2007-07-25 20:00:24.000000000 +0200 @@ -19,33 +19,46 @@ #include <sysdep.h> #include <pthread-errnos.h> +#include <kernel-features.h> +#include <lowlevellock.h> .text -#ifndef LOCK -# ifdef UP -# define LOCK +#ifdef __ASSUME_PRIVATE_FUTEX +# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ + movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg +# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ + movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg +# define LOAD_FUTEX_WAIT(reg) \ + xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg +# define LOAD_FUTEX_WAKE(reg) \ + xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg +#else +# if FUTEX_WAIT == 0 +# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ + movl %fs:PRIVATE_FUTEX, reg # else -# define LOCK lock +# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ + movl %fs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAIT, reg # endif -#endif - -#define SYS_futex 202 -#ifndef FUTEX_WAIT -# define FUTEX_WAIT 0 -# define FUTEX_WAKE 1 -#endif - -#ifndef LOAD_FUTEX_WAIT +# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ + movl %fs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAKE, reg # if FUTEX_WAIT == 0 # define LOAD_FUTEX_WAIT(reg) \ - xorl reg, reg + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %fs:PRIVATE_FUTEX, reg # else # define LOAD_FUTEX_WAIT(reg) \ - movl $FUTEX_WAIT, reg + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %fs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAIT, reg # endif # define LOAD_FUTEX_WAKE(reg) \ - movl $FUTEX_WAKE, reg + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %fs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAKE, reg #endif @@ -53,11 +66,11 @@ #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 - .globl __lll_mutex_lock_wait - .type __lll_mutex_lock_wait,@function - .hidden __lll_mutex_lock_wait + .globl __lll_lock_wait_private + .type __lll_lock_wait_private,@function + .hidden __lll_lock_wait_private .align 16 -__lll_mutex_lock_wait: +__lll_lock_wait_private: cfi_startproc pushq %r10 cfi_adjust_cfa_offset(8) @@ -67,7 +80,7 @@ __lll_mutex_lock_wait: cfi_offset(%rdx, -24) xorq %r10, %r10 /* No timeout. */ movl $2, %edx - LOAD_FUTEX_WAIT (%esi) + LOAD_PRIVATE_FUTEX_WAIT (%esi) cmpl %edx, %eax /* NB: %edx == 2 */ jne 2f @@ -89,15 +102,52 @@ __lll_mutex_lock_wait: cfi_restore(%r10) retq cfi_endproc - .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait - + .size __lll_lock_wait_private,.-__lll_lock_wait_private #ifdef NOT_IN_libc - .globl __lll_mutex_timedlock_wait - .type __lll_mutex_timedlock_wait,@function - .hidden __lll_mutex_timedlock_wait + .globl __lll_lock_wait + .type __lll_lock_wait,@function + .hidden __lll_lock_wait .align 16 -__lll_mutex_timedlock_wait: +__lll_lock_wait: + cfi_startproc + pushq %r10 + cfi_adjust_cfa_offset(8) + pushq %rdx + cfi_adjust_cfa_offset(8) + cfi_offset(%r10, -16) + cfi_offset(%rdx, -24) + xorq %r10, %r10 /* No timeout. */ + movl $2, %edx + LOAD_FUTEX_WAIT (%esi) + + cmpl %edx, %eax /* NB: %edx == 2 */ + jne 2f + +1: movl $SYS_futex, %eax + syscall + +2: movl %edx, %eax + xchgl %eax, (%rdi) /* NB: lock is implied */ + + testl %eax, %eax + jnz 1b + + popq %rdx + cfi_adjust_cfa_offset(-8) + cfi_restore(%rdx) + popq %r10 + cfi_adjust_cfa_offset(-8) + cfi_restore(%r10) + retq + cfi_endproc + .size __lll_lock_wait,.-__lll_lock_wait + + .globl __lll_timedlock_wait + .type __lll_timedlock_wait,@function + .hidden __lll_timedlock_wait + .align 16 +__lll_timedlock_wait: cfi_startproc /* Check for a valid timeout value. */ cmpq $1000000000, 8(%rdx) @@ -118,10 +168,12 @@ __lll_mutex_timedlock_wait: cfi_offset(%r12, -32) cfi_offset(%r13, -40) cfi_offset(%r14, -48) + pushq %rsi + cfi_adjust_cfa_offset(8) /* Stack frame for the timespec and timeval structs. */ - subq $16, %rsp - cfi_adjust_cfa_offset(16) + subq $24, %rsp + cfi_adjust_cfa_offset(24) movq %rdi, %r12 movq %rdx, %r13 @@ -162,6 +214,7 @@ __lll_mutex_timedlock_wait: je 8f movq %rsp, %r10 + movl 24(%rsp), %esi LOAD_FUTEX_WAIT (%esi) movq %r12, %rdi movl $SYS_futex, %eax @@ -174,8 +227,8 @@ __lll_mutex_timedlock_wait: cmpxchgl %edx, (%r12) jnz 7f -6: addq $16, %rsp - cfi_adjust_cfa_offset(-16) +6: addq $32, %rsp + cfi_adjust_cfa_offset(-32) popq %r14 cfi_adjust_cfa_offset(-8) cfi_restore(%r14) @@ -196,7 +249,7 @@ __lll_mutex_timedlock_wait: 3: movl $EINVAL, %eax retq - cfi_adjust_cfa_offset(56) + cfi_adjust_cfa_offset(72) cfi_offset(%r8, -16) cfi_offset(%r9, -24) cfi_offset(%r12, -32) @@ -216,15 +269,15 @@ __lll_mutex_timedlock_wait: 5: movl $ETIMEDOUT, %eax jmp 6b cfi_endproc - .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait + .size __lll_timedlock_wait,.-__lll_timedlock_wait #endif - .globl __lll_mutex_unlock_wake - .type __lll_mutex_unlock_wake,@function - .hidden __lll_mutex_unlock_wake + .globl __lll_unlock_wake_private + .type __lll_unlock_wake_private,@function + .hidden __lll_unlock_wake_private .align 16 -__lll_mutex_unlock_wake: +__lll_unlock_wake_private: cfi_startproc pushq %rsi cfi_adjust_cfa_offset(8) @@ -234,7 +287,7 @@ __lll_mutex_unlock_wake: cfi_offset(%rdx, -24) movl $0, (%rdi) - LOAD_FUTEX_WAKE (%esi) + LOAD_PRIVATE_FUTEX_WAKE (%esi) movl $1, %edx /* Wake one thread. */ movl $SYS_futex, %eax syscall @@ -247,10 +300,38 @@ __lll_mutex_unlock_wake: cfi_restore(%rsi) retq cfi_endproc - .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake - + .size __lll_unlock_wake_private,.-__lll_unlock_wake_private #ifdef NOT_IN_libc + .globl __lll_unlock_wake + .type __lll_unlock_wake,@function + .hidden __lll_unlock_wake + .align 16 +__lll_unlock_wake: + cfi_startproc + pushq %rsi + cfi_adjust_cfa_offset(8) + pushq %rdx + cfi_adjust_cfa_offset(8) + cfi_offset(%rsi, -16) + cfi_offset(%rdx, -24) + + movl $0, (%rdi) + LOAD_FUTEX_WAKE (%esi) + movl $1, %edx /* Wake one thread. */ + movl $SYS_futex, %eax + syscall + + popq %rdx + cfi_adjust_cfa_offset(-8) + cfi_restore(%rdx) + popq %rsi + cfi_adjust_cfa_offset(-8) + cfi_restore(%rsi) + retq + cfi_endproc + .size __lll_unlock_wake,.-__lll_unlock_wake + .globl __lll_timedwait_tid .type __lll_timedwait_tid,@function .hidden __lll_timedwait_tid --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S.jj 2007-07-23 19:36:30.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,22 +18,11 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <lowlevelrwlock.h> #include <kernel-features.h> -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_PRIVATE_FLAG 128 - -#ifndef UP -# define LOCK lock -#else -# define LOCK -#endif - - .text .globl __pthread_rwlock_unlock @@ -107,28 +96,28 @@ __pthread_rwlock_unlock: 4: xorl %eax, %eax retq -1: +1: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait #if MUTEX != 0 subq $MUTEX, %rdi #endif jmp 2b -3: +3: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake jmp 4b -7: +7: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake jmp 8b .size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S.jj 2007-07-23 19:36:30.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,23 +18,12 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <lowlevelrwlock.h> #include <pthread-errnos.h> #include <kernel-features.h> -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_PRIVATE_FLAG 128 - -#ifndef UP -# define LOCK lock -#else -# define LOCK -#endif - - .text .globl __pthread_rwlock_wrlock @@ -121,11 +110,11 @@ __pthread_rwlock_wrlock: movq %rdx, %rax retq -1: +1: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait #if MUTEX != 0 subq $MUTEX, %rdi #endif @@ -136,32 +125,32 @@ __pthread_rwlock_wrlock: movl $EDEADLK, %edx jmp 9b -6: +6: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake jmp 7b 4: decl WRITERS_QUEUED(%rdi) movl $EAGAIN, %edx jmp 9b -10: +10: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake #if MUTEX != 0 subq $MUTEX, %rdi #endif jmp 11b -12: +12: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait #if MUTEX != 0 subq $MUTEX, %rdi #endif --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S.jj 2007-07-25 19:23:33.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,27 +18,15 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <lowlevelrwlock.h> #include <pthread-errnos.h> #include <kernel-features.h> -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_PRIVATE_FLAG 128 - /* For the calculation see asm/vsyscall.h. */ #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 - -#ifndef UP -# define LOCK lock -#else -# define LOCK -#endif - - .text .globl pthread_rwlock_timedrdlock @@ -172,11 +160,11 @@ pthread_rwlock_timedrdlock: popq %r12 retq -1: +1: movl PSHARED(%rdi), %esi #if MUTEX != 0 addq $MUTEX, %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait jmp 2b 14: cmpl %fs:TID, %eax @@ -184,13 +172,13 @@ pthread_rwlock_timedrdlock: movl $EDEADLK, %edx jmp 9b -6: +6: movl PSHARED(%r12), %esi #if MUTEX == 0 movq %r12, %rdi #else leal MUTEX(%r12), %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake jmp 7b /* Overflow. */ @@ -203,22 +191,22 @@ pthread_rwlock_timedrdlock: movl $EAGAIN, %edx jmp 9b -10: +10: movl PSHARED(%r12), %esi #if MUTEX == 0 movq %r12, %rdi #else leaq MUTEX(%r12), %rdi #endif - callq __lll_mutex_unlock_wake + callq __lll_unlock_wake jmp 11b -12: +12: movl PSHARED(%r12), %esi #if MUTEX == 0 movq %r12, %rdi #else leaq MUTEX(%r12), %rdi #endif - callq __lll_mutex_lock_wait + callq __lll_lock_wait jmp 13b 16: movq $-ETIMEDOUT, %rdx --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S.jj 2006-08-03 19:36:25.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,5 @@ -/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -19,23 +20,11 @@ #include <sysdep.h> #include <shlib-compat.h> +#include <lowlevellock.h> #include <lowlevelcond.h> #include <kernel-features.h> #include <pthread-pi-defines.h> - -#ifdef UP -# define LOCK -#else -# define LOCK lock -#endif - -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_REQUEUE 3 -#define FUTEX_CMP_REQUEUE 4 - -#define EINVAL 22 +#include <pthread-errnos.h> .text @@ -115,7 +104,9 @@ __pthread_cond_broadcast: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_lock_wait + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi #endif @@ -123,12 +114,16 @@ __pthread_cond_broadcast: /* Unlock in loop requires wakeup. */ 5: addq $cond_lock-cond_futex, %rdi - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 6b /* Unlock in loop requires wakeup. */ 7: addq $cond_lock-cond_futex, %rdi - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake subq $cond_lock-cond_futex, %rdi jmp 8b --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S.jj 2007-05-28 13:45:24.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S 2007-07-25 20:00:24.000000000 +0200 @@ -18,15 +18,10 @@ 02111-1307 USA. */ #include <sysdep.h> +#include <lowlevellock.h> #include <shlib-compat.h> #include <pthread-errnos.h> -#ifndef UP -# define LOCK lock -#else -# define -#endif - .text .globl sem_trywait --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S.jj 2005-09-09 12:58:42.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -19,23 +19,10 @@ #include <sysdep.h> #include <shlib-compat.h> +#include <lowlevellock.h> #include <lowlevelcond.h> #include <kernel-features.h> - -#ifdef UP -# define LOCK -#else -# define LOCK lock -#endif - -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_WAKE_OP 5 - -#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1) - -#define EINVAL 22 +#include <pthread-errnos.h> .text @@ -111,7 +98,9 @@ __pthread_cond_signal: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_lock_wait + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi #endif @@ -120,7 +109,9 @@ __pthread_cond_signal: /* Unlock in loop requires wakeup. */ 5: movq %r8, %rdi - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 6b .size __pthread_cond_signal, .-__pthread_cond_signal versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S.jj 2007-05-24 16:41:08.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S 2007-07-25 20:00:24.000000000 +0200 @@ -19,17 +19,8 @@ #include <kernel-features.h> #include <tcb-offsets.h> +#include <lowlevellock.h> -#ifndef UP -# define LOCK lock -#else -# define LOCK -#endif - -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_PRIVATE_FLAG 128 .comm __fork_generation, 4, 4 --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S 2007-07-25 20:00:24.000000000 +0200 @@ -17,19 +17,4 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ -#include <kernel-features.h> - -/* All locks in libc are private. Use the kernel feature if possible. */ -#define FUTEX_PRIVATE_FLAG 128 -#ifdef __ASSUME_PRIVATE_FUTEX -# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG) -# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG) -#else -# define LOAD_FUTEX_WAIT(reg) \ - movl %fs:PRIVATE_FUTEX, reg -# define LOAD_FUTEX_WAKE(reg) \ - movl %fs:PRIVATE_FUTEX, reg ; \ - orl $FUTEX_WAKE, reg -#endif - #include "lowlevellock.S" --- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S 2007-07-25 20:00:24.000000000 +0200 @@ -19,19 +19,10 @@ #include <sysdep.h> #include <shlib-compat.h> +#include <lowlevellock.h> #include <lowlevelcond.h> #include <pthread-errnos.h> -#ifdef UP -# define LOCK -#else -# define LOCK lock -#endif - -#define SYS_futex 202 -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 - /* For the calculation see asm/vsyscall.h. */ #define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000 @@ -301,7 +292,9 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_lock_wait + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_lock_wait jmp 2b /* Unlock in loop requires wakeup. */ @@ -309,7 +302,9 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 4b /* Locking in loop failed. */ @@ -317,7 +312,9 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_lock_wait + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi #endif @@ -328,7 +325,9 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake jmp 11b /* The initial unlocking of the mutex failed. */ @@ -345,7 +344,9 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - callq __lll_mutex_unlock_wake + /* XYZ */ + movl $LLL_SHARED, %esi + callq __lll_unlock_wake 17: movq (%rsp), %rax jmp 18b --- libc/nptl/sysdeps/unix/sysv/linux/fork.c.jj 2007-07-25 20:00:18.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/fork.c 2007-07-25 20:00:24.000000000 +0200 @@ -183,7 +183,7 @@ __libc_fork (void) } /* Initialize the fork lock. */ - __fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER; + __fork_lock = LLL_LOCK_INITIALIZER; } else { --- libc/nptl/sysdeps/unix/sysv/linux/sem_post.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/sem_post.c 2007-07-25 20:00:24.000000000 +0200 @@ -36,8 +36,7 @@ __new_sem_post (sem_t *sem) if (isem->nwaiters > 0) { int err = lll_futex_wake (&isem->value, 1, - // XYZ check mutex flag - LLL_SHARED); + isem->private ^ FUTEX_PRIVATE_FLAG); if (__builtin_expect (err, 0) < 0) { __set_errno (-err); --- libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c 2007-07-25 20:00:24.000000000 +0200 @@ -25,22 +25,35 @@ void -__lll_lock_wait (int *futex) +__lll_lock_wait_private (int *futex) { do { int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); if (oldval != 0) - lll_futex_wait (futex, 2, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wait (futex, 2, LLL_PRIVATE); + } + while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); +} + + +/* These functions doesn't get included in libc.so */ +#ifdef IS_IN_libpthread +void +__lll_lock_wait (int *futex, int private) +{ + do + { + int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); + if (oldval != 0) + lll_futex_wait (futex, 2, private); } while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); } int -__lll_timedlock_wait (int *futex, const struct timespec *abstime) +__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private) { /* Reject invalid timeouts. */ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) @@ -70,9 +83,7 @@ __lll_timedlock_wait (int *futex, const /* Wait. */ int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); if (oldval != 0) - lll_futex_timed_wait (futex, 2, &rt, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_timed_wait (futex, 2, &rt, private); } while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); @@ -80,8 +91,6 @@ __lll_timedlock_wait (int *futex, const } -/* This function doesn't get included in libc.so */ -#ifdef IS_IN_libpthread int __lll_timedwait_tid (int *tidp, const struct timespec *abstime) { --- libc/nptl/sysdeps/unix/sysv/linux/fork.h.jj 2006-06-21 17:36:39.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/fork.h 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -26,7 +26,7 @@ extern unsigned long int __fork_generati extern unsigned long int *__fork_generation_pointer attribute_hidden; /* Lock to protect allocation and deallocation of fork handlers. */ -extern lll_lock_t __fork_lock attribute_hidden; +extern int __fork_lock attribute_hidden; /* Elements of the fork handler lists. */ struct fork_handler --- libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c 2007-07-25 20:00:24.000000000 +0200 @@ -57,8 +57,7 @@ __new_sem_wait (sem_t *sem) int oldtype = __pthread_enable_asynccancel (); err = lll_futex_wait (&isem->value, 0, - // XYZ check mutex flag - LLL_SHARED); + isem->private ^ FUTEX_PRIVATE_FLAG); /* Disable asynchronous cancellation. */ __pthread_disable_asynccancel (oldtype); --- libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c.jj 2005-12-30 09:04:04.000000000 +0100 +++ libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -24,7 +24,7 @@ /* Lock to protect allocation and deallocation of fork handlers. */ -lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER; +int __fork_lock = LLL_LOCK_INITIALIZER; /* Number of pre-allocated handler entries. */ @@ -85,7 +85,7 @@ __register_atfork (prepare, parent, chil void *dso_handle; { /* Get the lock to not conflict with other allocations. */ - lll_lock (__fork_lock); + lll_lock (__fork_lock, LLL_PRIVATE); struct fork_handler *newp = fork_handler_alloc (); @@ -102,7 +102,7 @@ __register_atfork (prepare, parent, chil } /* Release the lock. */ - lll_unlock (__fork_lock); + lll_unlock (__fork_lock, LLL_PRIVATE); return newp == NULL ? ENOMEM : 0; } @@ -112,7 +112,7 @@ libc_hidden_def (__register_atfork) libc_freeres_fn (free_mem) { /* Get the lock to not conflict with running forks. */ - lll_lock (__fork_lock); + lll_lock (__fork_lock, LLL_PRIVATE); /* No more fork handlers. */ __fork_handlers = NULL; @@ -123,7 +123,7 @@ libc_freeres_fn (free_mem) memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool)); /* Release the lock. */ - lll_unlock (__fork_lock); + lll_unlock (__fork_lock, LLL_PRIVATE); /* We can free the memory after releasing the lock. */ while (runp != NULL) --- libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c 2007-07-25 20:00:24.000000000 +0200 @@ -85,8 +85,7 @@ sem_timedwait (sem_t *sem, const struct int oldtype = __pthread_enable_asynccancel (); err = lll_futex_timed_wait (&isem->value, 0, &rt, - // XYZ check mutex flag - LLL_SHARED); + isem->private ^ FUTEX_PRIVATE_FLAG); /* Disable asynchronous cancellation. */ __pthread_disable_asynccancel (oldtype); --- libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -25,7 +25,7 @@ int -__lll_robust_lock_wait (int *futex) +__lll_robust_lock_wait (int *futex, int private) { int oldval = *futex; int tid = THREAD_GETMEM (THREAD_SELF, tid); @@ -44,9 +44,7 @@ __lll_robust_lock_wait (int *futex) && atomic_compare_and_exchange_bool_acq (futex, newval, oldval)) continue; - lll_futex_wait (futex, newval, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wait (futex, newval, private); try: ; @@ -59,7 +57,8 @@ __lll_robust_lock_wait (int *futex) int -__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime) +__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime, + int private) { /* Reject invalid timeouts. */ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) @@ -102,9 +101,7 @@ __lll_robust_timedlock_wait (int *futex, && atomic_compare_and_exchange_bool_acq (futex, newval, oldval)) continue; - lll_futex_timed_wait (futex, newval, &rt, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_timed_wait (futex, newval, &rt, private); try: ; --- libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c.jj 2006-02-17 09:09:45.000000000 +0100 +++ libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,8 +1,8 @@ #include <pthreadP.h> -#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock (mutex) -#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock (mutex) -#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_cond_lock (mutex, id) +#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED) +#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex) +#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED) #define __pthread_mutex_lock __pthread_mutex_cond_lock #define NO_INCR --- libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c.jj 2007-07-25 20:00:18.000000000 +0200 +++ libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c 2007-07-25 20:00:24.000000000 +0200 @@ -54,7 +54,7 @@ __unregister_atfork (dso_handle) that there couldn't have been another thread deleting something. The __unregister_atfork function is only called from the dlclose() code which itself serializes the operations. */ - lll_lock (__fork_lock); + lll_lock (__fork_lock, LLL_PRIVATE); /* We have to create a new list with all the entries we don't remove. */ struct deleted_handler @@ -89,7 +89,7 @@ __unregister_atfork (dso_handle) while (runp != NULL); /* Release the lock. */ - lll_unlock (__fork_lock); + lll_unlock (__fork_lock, LLL_PRIVATE); /* Walk the list of all entries which have to be deleted. */ while (deleted != NULL) --- libc/nptl/sysdeps/pthread/createthread.c.jj 2006-09-08 13:57:51.000000000 +0200 +++ libc/nptl/sysdeps/pthread/createthread.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -60,7 +60,7 @@ do_clone (struct pthread *pd, const stru /* We Make sure the thread does not run far by forcing it to get a lock. We lock it here too so that the new thread cannot continue until we tell it to. */ - lll_lock (pd->lock); + lll_lock (pd->lock, LLL_PRIVATE); /* One more thread. We cannot have the thread do this itself, since it might exist but not have been scheduled yet by the time we've returned @@ -223,7 +223,7 @@ create_thread (struct pthread *pd, const __nptl_create_event (); /* And finally restart the new thread. */ - lll_unlock (pd->lock); + lll_unlock (pd->lock, LLL_PRIVATE); } return res; @@ -250,7 +250,7 @@ create_thread (struct pthread *pd, const if (res == 0 && stopped) /* And finally restart the new thread. */ - lll_unlock (pd->lock); + lll_unlock (pd->lock, LLL_PRIVATE); return res; } --- libc/nptl/sysdeps/pthread/bits/stdio-lock.h.jj 2007-07-23 16:21:17.000000000 +0200 +++ libc/nptl/sysdeps/pthread/bits/stdio-lock.h 2007-07-25 20:00:24.000000000 +0200 @@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void void *__self = THREAD_SELF; \ if ((_name).owner != __self) \ { \ - lll_lock ((_name).lock); \ + lll_lock ((_name).lock, LLL_PRIVATE); \ (_name).owner = __self; \ } \ ++(_name).cnt; \ @@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void if (--(_name).cnt == 0) \ { \ (_name).owner = NULL; \ - lll_unlock ((_name).lock); \ + lll_unlock ((_name).lock, LLL_PRIVATE); \ } \ } while (0) --- libc/nptl/sysdeps/pthread/bits/libc-lock.h.jj 2007-03-19 17:43:11.000000000 +0100 +++ libc/nptl/sysdeps/pthread/bits/libc-lock.h 2007-07-25 20:00:24.000000000 +0200 @@ -228,7 +228,7 @@ typedef pthread_key_t __libc_key_t; /* Lock the named lock variable. */ #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread) # define __libc_lock_lock(NAME) \ - ({ lll_lock (NAME); 0; }) + ({ lll_lock (NAME, LLL_PRIVATE); 0; }) #else # define __libc_lock_lock(NAME) \ __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0) @@ -245,7 +245,7 @@ typedef pthread_key_t __libc_key_t; void *self = THREAD_SELF; \ if ((NAME).owner != self) \ { \ - lll_lock ((NAME).lock); \ + lll_lock ((NAME).lock, LLL_PRIVATE); \ (NAME).owner = self; \ } \ ++(NAME).cnt; \ @@ -299,7 +299,7 @@ typedef pthread_key_t __libc_key_t; /* Unlock the named lock variable. */ #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread) # define __libc_lock_unlock(NAME) \ - lll_unlock (NAME) + lll_unlock (NAME, LLL_PRIVATE) #else # define __libc_lock_unlock(NAME) \ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0) @@ -315,7 +315,7 @@ typedef pthread_key_t __libc_key_t; if (--(NAME).cnt == 0) \ { \ (NAME).owner = NULL; \ - lll_unlock ((NAME).lock); \ + lll_unlock ((NAME).lock, LLL_PRIVATE); \ } \ } while (0) #else --- libc/nptl/pthread_mutex_unlock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_mutex_unlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -47,7 +47,7 @@ __pthread_mutex_unlock_usercnt (mutex, d case PTHREAD_MUTEX_ERRORCHECK_NP: /* Error checking mutex. */ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid) - || ! lll_mutex_islocked (mutex->__data.__lock)) + || ! lll_islocked (mutex->__data.__lock)) return EPERM; /* FALLTHROUGH */ @@ -61,7 +61,7 @@ __pthread_mutex_unlock_usercnt (mutex, d --mutex->__data.__nusers; /* Unlock. */ - lll_mutex_unlock (mutex->__data.__lock); + lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); break; case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: @@ -92,7 +92,7 @@ __pthread_mutex_unlock_usercnt (mutex, d case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: if ((mutex->__data.__lock & FUTEX_TID_MASK) != THREAD_GETMEM (THREAD_SELF, tid) - || ! lll_mutex_islocked (mutex->__data.__lock)) + || ! lll_islocked (mutex->__data.__lock)) return EPERM; /* If the previous owner died and the caller did not succeed in @@ -115,7 +115,7 @@ __pthread_mutex_unlock_usercnt (mutex, d --mutex->__data.__nusers; /* Unlock. */ - lll_robust_mutex_unlock (mutex->__data.__lock); + lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); break; @@ -161,7 +161,7 @@ __pthread_mutex_unlock_usercnt (mutex, d case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: if ((mutex->__data.__lock & FUTEX_TID_MASK) != THREAD_GETMEM (THREAD_SELF, tid) - || ! lll_mutex_islocked (mutex->__data.__lock)) + || ! lll_islocked (mutex->__data.__lock)) return EPERM; /* If the previous owner died and the caller did not succeed in --- libc/nptl/pthread_rwlock_unlock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_rwlock_unlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -27,7 +27,7 @@ int __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) { - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer) rwlock->__data.__writer = 0; else @@ -37,23 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_ if (rwlock->__data.__nr_writers_queued) { ++rwlock->__data.__writer_wakeup; - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); lll_futex_wake (&rwlock->__data.__writer_wakeup, 1, - // XYZ check mutex flag - LLL_SHARED); + rwlock->__data.__shared); return 0; } else if (rwlock->__data.__nr_readers_queued) { ++rwlock->__data.__readers_wakeup; - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, - // XYZ check mutex flag - LLL_SHARED); + rwlock->__data.__shared); return 0; } } - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return 0; } --- libc/nptl/pthread_rwlock_timedrdlock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_rwlock_timedrdlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (rwlock, abst int result = 0; /* Make sure we are along. */ - lll_mutex_lock(rwlock->__data.__lock); + lll_lock(rwlock->__data.__lock, rwlock->__data.__shared); while (1) { @@ -110,16 +110,14 @@ pthread_rwlock_timedrdlock (rwlock, abst int waitval = rwlock->__data.__readers_wakeup; /* Free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); /* Wait for the writer to finish. */ err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup, - waitval, &rt, - // XYZ check mutex flag - LLL_SHARED); + waitval, &rt, rwlock->__data.__shared); /* Get the lock. */ - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); --rwlock->__data.__nr_readers_queued; @@ -133,7 +131,7 @@ pthread_rwlock_timedrdlock (rwlock, abst } /* We are done, free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; } --- libc/nptl/old_pthread_cond_signal.c.jj 2003-03-21 09:02:07.000000000 +0100 +++ libc/nptl/old_pthread_cond_signal.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -33,7 +33,7 @@ __pthread_cond_signal_2_0 (cond) { pthread_cond_t *newcond; -#if LLL_MUTEX_LOCK_INITIALIZER == 0 +#if LLL_LOCK_INITIALIZER == 0 newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); if (newcond == NULL) return ENOMEM; --- libc/nptl/pthread_cond_timedwait.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_cond_timedwait.c 2007-07-25 20:00:24.000000000 +0200 @@ -54,13 +54,13 @@ __pthread_cond_timedwait (cond, mutex, a return EINVAL; /* Make sure we are along. */ - lll_mutex_lock (cond->__data.__lock); + lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Now we can release the mutex. */ int err = __pthread_mutex_unlock_usercnt (mutex, 0); if (err) { - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); return err; } @@ -146,7 +146,7 @@ __pthread_cond_timedwait (cond, mutex, a unsigned int futex_val = cond->__data.__futex; /* Prepare to wait. Release the condvar futex. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Enable asynchronous cancellation. Required by the standard. */ cbuffer.oldtype = __pthread_enable_asynccancel (); @@ -161,7 +161,7 @@ __pthread_cond_timedwait (cond, mutex, a __pthread_disable_asynccancel (cbuffer.oldtype); /* We are going to look at shared data again, so get the lock. */ - lll_mutex_lock(cond->__data.__lock); + lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED); /* If a broadcast happened, we are done. */ if (cbuffer.bc_seq != cond->__data.__broadcast_seq) @@ -203,7 +203,7 @@ __pthread_cond_timedwait (cond, mutex, a LLL_SHARED); /* We are done with the condvar. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* The cancellation handling is back to normal, remove the handler. */ __pthread_cleanup_pop (&buffer, 0); --- libc/nptl/pthread_cond_broadcast.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_cond_broadcast.c 2007-07-25 20:00:24.000000000 +0200 @@ -33,7 +33,7 @@ __pthread_cond_broadcast (cond) pthread_cond_t *cond; { /* Make sure we are alone. */ - lll_mutex_lock (cond->__data.__lock); + lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Are there any waiters to be woken? */ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) @@ -47,7 +47,7 @@ __pthread_cond_broadcast (cond) ++cond->__data.__broadcast_seq; /* We are done. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Do not use requeue for pshared condvars. */ if (cond->__data.__mutex == (void *) ~0l) @@ -79,7 +79,7 @@ __pthread_cond_broadcast (cond) } /* We are done. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); return 0; } --- libc/nptl/sem_open.c.jj 2007-05-28 13:45:23.000000000 +0200 +++ libc/nptl/sem_open.c 2007-07-25 20:00:24.000000000 +0200 @@ -147,7 +147,7 @@ __sem_search (const void *a, const void void *__sem_mappings attribute_hidden; /* Lock to protect the search tree. */ -lll_lock_t __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER; +int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER; /* Search for existing mapping and if possible add the one provided. */ @@ -161,7 +161,7 @@ check_add_mapping (const char *name, siz if (__fxstat64 (_STAT_VER, fd, &st) == 0) { /* Get the lock. */ - lll_lock (__sem_mappings_lock); + lll_lock (__sem_mappings_lock, LLL_PRIVATE); /* Search for an existing mapping given the information we have. */ struct inuse_sem *fake; @@ -210,7 +210,7 @@ check_add_mapping (const char *name, siz } /* Release the lock. */ - lll_unlock (__sem_mappings_lock); + lll_unlock (__sem_mappings_lock, LLL_PRIVATE); } if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED) --- libc/nptl/pthread_rwlock_tryrdlock.c.jj 2007-05-28 13:45:23.000000000 +0200 +++ libc/nptl/pthread_rwlock_tryrdlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -28,7 +28,7 @@ __pthread_rwlock_tryrdlock (rwlock) { int result = EBUSY; - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer == 0 && (rwlock->__data.__nr_writers_queued == 0 @@ -43,7 +43,7 @@ __pthread_rwlock_tryrdlock (rwlock) result = 0; } - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; } --- libc/nptl/pthread_rwlock_trywrlock.c.jj 2006-09-04 16:42:01.000000000 +0200 +++ libc/nptl/pthread_rwlock_trywrlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -28,7 +28,7 @@ __pthread_rwlock_trywrlock (rwlock) { int result = EBUSY; - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0) { @@ -36,7 +36,7 @@ __pthread_rwlock_trywrlock (rwlock) result = 0; } - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; } --- libc/nptl/pthread_once.c.jj 2006-10-28 07:09:12.000000000 +0200 +++ libc/nptl/pthread_once.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -22,7 +22,7 @@ -static lll_lock_t once_lock = LLL_LOCK_INITIALIZER; +static int once_lock = LLL_LOCK_INITIALIZER; int @@ -35,7 +35,7 @@ __pthread_once (once_control, init_routi object. */ if (*once_control == PTHREAD_ONCE_INIT) { - lll_lock (once_lock); + lll_lock (once_lock, LLL_PRIVATE); /* XXX This implementation is not complete. It doesn't take cancelation and fork into account. */ @@ -46,7 +46,7 @@ __pthread_once (once_control, init_routi *once_control = !PTHREAD_ONCE_INIT; } - lll_unlock (once_lock); + lll_unlock (once_lock, LLL_PRIVATE); } return 0; --- libc/nptl/pthread_getschedparam.c.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/pthread_getschedparam.c 2007-07-25 20:00:24.000000000 +0200 @@ -38,7 +38,7 @@ __pthread_getschedparam (threadid, polic int result = 0; - lll_lock (pd->lock); + lll_lock (pd->lock, LLL_PRIVATE); /* The library is responsible for maintaining the values at all times. If the user uses a interface other than @@ -68,7 +68,7 @@ __pthread_getschedparam (threadid, polic memcpy (param, &pd->schedparam, sizeof (struct sched_param)); } - lll_unlock (pd->lock); + lll_unlock (pd->lock, LLL_PRIVATE); return result; } --- libc/nptl/pthread_barrier_init.c.jj 2007-05-28 13:45:23.000000000 +0200 +++ libc/nptl/pthread_barrier_init.c 2007-07-25 20:00:24.000000000 +0200 @@ -40,7 +40,7 @@ pthread_barrier_init (barrier, attr, cou if (__builtin_expect (count == 0, 0)) return EINVAL; - struct pthread_barrierattr *iattr + const struct pthread_barrierattr *iattr = (attr != NULL ? iattr = (struct pthread_barrierattr *) attr : &default_attr); --- libc/nptl/semaphoreP.h.jj 2007-05-28 13:45:23.000000000 +0200 +++ libc/nptl/semaphoreP.h 2007-07-25 20:00:24.000000000 +0200 @@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once at extern void *__sem_mappings attribute_hidden; /* Lock to protect the search tree. */ -extern lll_lock_t __sem_mappings_lock attribute_hidden; +extern int __sem_mappings_lock attribute_hidden; /* Initializer for mountpoint. */ --- libc/nptl/pthread_setschedparam.c.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/pthread_setschedparam.c 2007-07-25 20:00:24.000000000 +0200 @@ -39,7 +39,7 @@ __pthread_setschedparam (threadid, polic int result = 0; - lll_lock (pd->lock); + lll_lock (pd->lock, LLL_PRIVATE); struct sched_param p; const struct sched_param *orig_param = param; @@ -67,7 +67,7 @@ __pthread_setschedparam (threadid, polic pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET; } - lll_unlock (pd->lock); + lll_unlock (pd->lock, LLL_PRIVATE); return result; } --- libc/nptl/pthread_cond_init.c.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/pthread_cond_init.c 2007-07-25 20:00:24.000000000 +0200 @@ -28,7 +28,7 @@ __pthread_cond_init (cond, cond_attr) { struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr; - cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER; + cond->__data.__lock = LLL_LOCK_INITIALIZER; cond->__data.__futex = 0; cond->__data.__nwaiters = (icond_attr != NULL && ((icond_attr->value --- libc/nptl/pthread_getattr_np.c.jj 2007-07-23 16:21:17.000000000 +0200 +++ libc/nptl/pthread_getattr_np.c 2007-07-25 20:00:24.000000000 +0200 @@ -39,7 +39,7 @@ pthread_getattr_np (thread_id, attr) struct pthread_attr *iattr = (struct pthread_attr *) attr; int ret = 0; - lll_lock (thread->lock); + lll_lock (thread->lock, LLL_PRIVATE); /* The thread library is responsible for keeping the values in the thread desriptor up-to-date in case the user changes them. */ @@ -173,7 +173,7 @@ pthread_getattr_np (thread_id, attr) } } - lll_unlock (thread->lock); + lll_unlock (thread->lock, LLL_PRIVATE); return ret; } --- libc/nptl/pthread_barrier_wait.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_barrier_wait.c 2007-07-25 20:00:24.000000000 +0200 @@ -32,7 +32,7 @@ pthread_barrier_wait (barrier) int result = 0; /* Make sure we are alone. */ - lll_lock (ibarrier->lock); + lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); /* One more arrival. */ --ibarrier->left; @@ -46,8 +46,7 @@ pthread_barrier_wait (barrier) /* Wake up everybody. */ lll_futex_wake (&ibarrier->curr_event, INT_MAX, - // XYZ check mutex flag - LLL_SHARED); + ibarrier->private ^ FUTEX_PRIVATE_FLAG); /* This is the thread which finished the serialization. */ result = PTHREAD_BARRIER_SERIAL_THREAD; @@ -59,13 +58,12 @@ pthread_barrier_wait (barrier) unsigned int event = ibarrier->curr_event; /* Before suspending, make the barrier available to others. */ - lll_unlock (ibarrier->lock); + lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); /* Wait for the event counter of the barrier to change. */ do lll_futex_wait (&ibarrier->curr_event, event, - // XYZ check mutex flag - LLL_SHARED); + ibarrier->private ^ FUTEX_PRIVATE_FLAG); while (event == ibarrier->curr_event); } @@ -75,7 +73,7 @@ pthread_barrier_wait (barrier) /* If this was the last woken thread, unlock. */ if (atomic_increment_val (&ibarrier->left) == init_count) /* We are done. */ - lll_unlock (ibarrier->lock); + lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); return result; } --- libc/nptl/pthread_barrier_destroy.c.jj 2002-11-26 23:49:50.000000000 +0100 +++ libc/nptl/pthread_barrier_destroy.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -31,14 +31,14 @@ pthread_barrier_destroy (barrier) ibarrier = (struct pthread_barrier *) barrier; - lll_lock (ibarrier->lock); + lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1)) /* The barrier is not used anymore. */ result = 0; else /* Still used, return with an error. */ - lll_unlock (ibarrier->lock); + lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); return result; } --- libc/nptl/descr.h.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/descr.h 2007-07-25 20:00:24.000000000 +0200 @@ -309,10 +309,10 @@ struct pthread int parent_cancelhandling; /* Lock to synchronize access to the descriptor. */ - lll_lock_t lock; + int lock; /* Lock for synchronizing setxid calls. */ - lll_lock_t setxid_futex; + int setxid_futex; #if HP_TIMING_AVAIL /* Offset of the CPU clock at start thread start time. */ --- libc/nptl/pthread_rwlock_wrlock.c.jj 2007-07-23 19:36:30.000000000 +0200 +++ libc/nptl/pthread_rwlock_wrlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -32,7 +32,7 @@ __pthread_rwlock_wrlock (rwlock) int result = 0; /* Make sure we are along. */ - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); while (1) { @@ -65,22 +65,21 @@ __pthread_rwlock_wrlock (rwlock) int waitval = rwlock->__data.__writer_wakeup; /* Free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); /* Wait for the writer or reader(s) to finish. */ lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval, - // XYZ check mutex flag - LLL_SHARED); + rwlock->__data.__shared); /* Get the lock. */ - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); /* To start over again, remove the thread from the writer list. */ --rwlock->__data.__nr_writers_queued; } /* We are done, free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; } --- libc/nptl/sem_close.c.jj 2003-05-17 22:49:02.000000000 +0200 +++ libc/nptl/sem_close.c 2007-07-25 20:00:24.000000000 +0200 @@ -47,7 +47,7 @@ sem_close (sem) int result = 0; /* Get the lock. */ - lll_lock (__sem_mappings_lock); + lll_lock (__sem_mappings_lock, LLL_PRIVATE); /* Locate the entry for the mapping the caller provided. */ rec = NULL; @@ -75,7 +75,7 @@ sem_close (sem) } /* Release the lock. */ - lll_unlock (__sem_mappings_lock); + lll_unlock (__sem_mappings_lock, LLL_PRIVATE); return result; } --- libc/nptl/pthread_rwlock_rdlock.c.jj 2007-07-23 19:36:30.000000000 +0200 +++ libc/nptl/pthread_rwlock_rdlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -32,7 +32,7 @@ __pthread_rwlock_rdlock (rwlock) int result = 0; /* Make sure we are along. */ - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); while (1) { @@ -74,21 +74,20 @@ __pthread_rwlock_rdlock (rwlock) int waitval = rwlock->__data.__readers_wakeup; /* Free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); /* Wait for the writer to finish. */ - lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval, + rwlock->__data.__shared); /* Get the lock. */ - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); --rwlock->__data.__nr_readers_queued; } /* We are done, free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; } --- libc/nptl/pthread_mutex_timedlock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_mutex_timedlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -56,7 +56,8 @@ pthread_mutex_timedlock (mutex, abstime) } /* We have to get the mutex. */ - result = lll_mutex_timedlock (mutex->__data.__lock, abstime); + result = lll_timedlock (mutex->__data.__lock, abstime, + /* XYZ */ LLL_SHARED); if (result != 0) goto out; @@ -76,14 +77,15 @@ pthread_mutex_timedlock (mutex, abstime) case PTHREAD_MUTEX_TIMED_NP: simple: /* Normal mutex. */ - result = lll_mutex_timedlock (mutex->__data.__lock, abstime); + result = lll_timedlock (mutex->__data.__lock, abstime, + /* XYZ */ LLL_SHARED); break; case PTHREAD_MUTEX_ADAPTIVE_NP: if (! __is_smp) goto simple; - if (lll_mutex_trylock (mutex->__data.__lock) != 0) + if (lll_trylock (mutex->__data.__lock) != 0) { int cnt = 0; int max_cnt = MIN (MAX_ADAPTIVE_COUNT, @@ -92,7 +94,8 @@ pthread_mutex_timedlock (mutex, abstime) { if (cnt++ >= max_cnt) { - result = lll_mutex_timedlock (mutex->__data.__lock, abstime); + result = lll_timedlock (mutex->__data.__lock, abstime, + /* XYZ */ LLL_SHARED); break; } @@ -100,7 +103,7 @@ pthread_mutex_timedlock (mutex, abstime) BUSY_WAIT_NOP; #endif } - while (lll_mutex_trylock (mutex->__data.__lock) != 0); + while (lll_trylock (mutex->__data.__lock) != 0); mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; } @@ -174,15 +177,15 @@ pthread_mutex_timedlock (mutex, abstime) } } - result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime, - id); + result = lll_robust_timedlock (mutex->__data.__lock, abstime, id, + /* XYZ */ LLL_SHARED); if (__builtin_expect (mutex->__data.__owner == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) { /* This mutex is now not recoverable. */ mutex->__data.__count = 0; - lll_mutex_unlock (mutex->__data.__lock); + lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } --- libc/nptl/pthread_cond_destroy.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_cond_destroy.c 2007-07-25 20:00:24.000000000 +0200 @@ -27,13 +27,13 @@ __pthread_cond_destroy (cond) pthread_cond_t *cond; { /* Make sure we are alone. */ - lll_mutex_lock (cond->__data.__lock); + lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); if (cond->__data.__total_seq > cond->__data.__wakeup_seq) { /* If there are still some waiters which have not been woken up, this is an application bug. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); return EBUSY; } @@ -66,13 +66,13 @@ __pthread_cond_destroy (cond) do { - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); lll_futex_wait (&cond->__data.__nwaiters, nwaiters, // XYZ check mutex flag LLL_SHARED); - lll_mutex_lock (cond->__data.__lock); + lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); nwaiters = cond->__data.__nwaiters; } --- libc/nptl/old_pthread_cond_broadcast.c.jj 2003-03-21 09:02:07.000000000 +0100 +++ libc/nptl/old_pthread_cond_broadcast.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -33,7 +33,7 @@ __pthread_cond_broadcast_2_0 (cond) { pthread_cond_t *newcond; -#if LLL_MUTEX_LOCK_INITIALIZER == 0 +#if LLL_LOCK_INITIALIZER == 0 newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); if (newcond == NULL) return ENOMEM; --- libc/nptl/pthread_mutex_trylock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_mutex_trylock.c 2007-07-25 20:00:24.000000000 +0200 @@ -48,7 +48,7 @@ __pthread_mutex_trylock (mutex) return 0; } - if (lll_mutex_trylock (mutex->__data.__lock) == 0) + if (lll_trylock (mutex->__data.__lock) == 0) { /* Record the ownership. */ mutex->__data.__owner = id; @@ -62,7 +62,7 @@ __pthread_mutex_trylock (mutex) case PTHREAD_MUTEX_TIMED_NP: case PTHREAD_MUTEX_ADAPTIVE_NP: /* Normal mutex. */ - if (lll_mutex_trylock (mutex->__data.__lock) != 0) + if (lll_trylock (mutex->__data.__lock) != 0) break; /* Record the ownership. */ @@ -140,7 +140,7 @@ __pthread_mutex_trylock (mutex) } } - oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id); + oldval = lll_robust_trylock (mutex->__data.__lock, id); if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); @@ -154,7 +154,7 @@ __pthread_mutex_trylock (mutex) /* This mutex is now not recoverable. */ mutex->__data.__count = 0; if (oldval == id) - lll_mutex_unlock (mutex->__data.__lock); + lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } --- libc/nptl/pthread_cond_wait.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_cond_wait.c 2007-07-25 20:00:24.000000000 +0200 @@ -45,7 +45,7 @@ __condvar_cleanup (void *arg) unsigned int destroying; /* We are going to modify shared data. */ - lll_mutex_lock (cbuffer->cond->__data.__lock); + lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED); if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq) { @@ -78,7 +78,7 @@ __condvar_cleanup (void *arg) } /* We are done. */ - lll_mutex_unlock (cbuffer->cond->__data.__lock); + lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Wake everybody to make sure no condvar signal gets lost. */ if (! destroying) @@ -102,13 +102,13 @@ __pthread_cond_wait (cond, mutex) int err; /* Make sure we are along. */ - lll_mutex_lock (cond->__data.__lock); + lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Now we can release the mutex. */ err = __pthread_mutex_unlock_usercnt (mutex, 0); if (__builtin_expect (err, 0)) { - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); return err; } @@ -144,7 +144,7 @@ __pthread_cond_wait (cond, mutex) unsigned int futex_val = cond->__data.__futex; /* Prepare to wait. Release the condvar futex. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Enable asynchronous cancellation. Required by the standard. */ cbuffer.oldtype = __pthread_enable_asynccancel (); @@ -158,7 +158,7 @@ __pthread_cond_wait (cond, mutex) __pthread_disable_asynccancel (cbuffer.oldtype); /* We are going to look at shared data again, so get the lock. */ - lll_mutex_lock (cond->__data.__lock); + lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* If a broadcast happened, we are done. */ if (cbuffer.bc_seq != cond->__data.__broadcast_seq) @@ -186,7 +186,7 @@ __pthread_cond_wait (cond, mutex) LLL_SHARED); /* We are done with the condvar. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* The cancellation handling is back to normal, remove the handler. */ __pthread_cleanup_pop (&buffer, 0); --- libc/nptl/old_pthread_cond_wait.c.jj 2003-03-21 09:02:07.000000000 +0100 +++ libc/nptl/old_pthread_cond_wait.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -34,7 +34,7 @@ __pthread_cond_wait_2_0 (cond, mutex) { pthread_cond_t *newcond; -#if LLL_MUTEX_LOCK_INITIALIZER == 0 +#if LLL_LOCK_INITIALIZER == 0 newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); if (newcond == NULL) return ENOMEM; --- libc/nptl/pthread_cond_signal.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_cond_signal.c 2007-07-25 20:00:24.000000000 +0200 @@ -33,7 +33,7 @@ __pthread_cond_signal (cond) pthread_cond_t *cond; { /* Make sure we are alone. */ - lll_mutex_lock (cond->__data.__lock); + lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); /* Are there any waiters to be woken? */ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) @@ -56,7 +56,7 @@ __pthread_cond_signal (cond) } /* We are done. */ - lll_mutex_unlock (cond->__data.__lock); + lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); return 0; } --- libc/nptl/pthreadP.h.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthreadP.h 2007-07-25 20:00:24.000000000 +0200 @@ -151,7 +151,7 @@ hidden_proto (__stack_user) /* Attribute handling. */ extern struct pthread_attr *__attr_list attribute_hidden; -extern lll_lock_t __attr_list_lock attribute_hidden; +extern int __attr_list_lock attribute_hidden; /* First available RT signal. */ extern int __current_sigrtmin attribute_hidden; --- libc/nptl/pthread_mutex_lock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_mutex_lock.c 2007-07-25 20:00:24.000000000 +0200 @@ -27,9 +27,9 @@ #ifndef LLL_MUTEX_LOCK -# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex) -# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex) -# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id) +# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED) +# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex) +# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED) #endif @@ -198,7 +198,7 @@ __pthread_mutex_lock (mutex) { /* This mutex is now not recoverable. */ mutex->__data.__count = 0; - lll_mutex_unlock (mutex->__data.__lock); + lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } --- libc/nptl/pthread_rwlock_timedwrlock.c.jj 2007-06-19 13:10:21.000000000 +0200 +++ libc/nptl/pthread_rwlock_timedwrlock.c 2007-07-25 20:00:24.000000000 +0200 @@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (rwlock, abst int result = 0; /* Make sure we are along. */ - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); while (1) { @@ -100,16 +100,14 @@ pthread_rwlock_timedwrlock (rwlock, abst int waitval = rwlock->__data.__writer_wakeup; /* Free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); /* Wait for the writer or reader(s) to finish. */ err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup, - waitval, &rt, - // XYZ check mutex flag - LLL_SHARED); + waitval, &rt, rwlock->__data.__shared); /* Get the lock. */ - lll_mutex_lock (rwlock->__data.__lock); + lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); /* To start over again, remove the thread from the writer list. */ --rwlock->__data.__nr_writers_queued; @@ -123,7 +121,7 @@ pthread_rwlock_timedwrlock (rwlock, abst } /* We are done, free the lock. */ - lll_mutex_unlock (rwlock->__data.__lock); + lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; } --- libc/nptl/allocatestack.c.jj 2007-07-25 20:00:18.000000000 +0200 +++ libc/nptl/allocatestack.c 2007-07-25 20:00:24.000000000 +0200 @@ -103,7 +103,7 @@ static size_t stack_cache_maxsize = 40 * static size_t stack_cache_actsize; /* Mutex protecting this variable. */ -static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER; +static int stack_cache_lock = LLL_LOCK_INITIALIZER; /* List of queued stack frames. */ static LIST_HEAD (stack_cache); @@ -139,7 +139,7 @@ get_cached_stack (size_t *sizep, void ** struct pthread *result = NULL; list_t *entry; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Search the cache for a matching entry. We search for the smallest stack which has at least the required size. Note that @@ -172,7 +172,7 @@ get_cached_stack (size_t *sizep, void ** || __builtin_expect (result->stackblock_size > 4 * size, 0)) { /* Release the lock. */ - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return NULL; } @@ -187,7 +187,7 @@ get_cached_stack (size_t *sizep, void ** stack_cache_actsize -= result->stackblock_size; /* Release the lock early. */ - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); /* Report size and location of the stack to the caller. */ *sizep = result->stackblock_size; @@ -400,12 +400,12 @@ allocate_stack (const struct pthread_att /* Prepare to modify global data. */ - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* And add to the list of stacks in use. */ list_add (&pd->list, &__stack_user); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } else { @@ -544,12 +544,12 @@ allocate_stack (const struct pthread_att /* Prepare to modify global data. */ - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* And add to the list of stacks in use. */ list_add (&pd->list, &stack_used); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); /* There might have been a race. Another thread might have @@ -598,12 +598,12 @@ allocate_stack (const struct pthread_att mprot_error: err = errno; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Remove the thread from the list. */ list_del (&pd->list); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); /* Get rid of the TLS block we allocated. */ _dl_deallocate_tls (TLS_TPADJ (pd), false); @@ -699,7 +699,7 @@ void internal_function __deallocate_stack (struct pthread *pd) { - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Remove the thread from the list of threads with user defined stacks. */ @@ -715,7 +715,7 @@ __deallocate_stack (struct pthread *pd) /* Free the memory associated with the ELF TLS. */ _dl_deallocate_tls (TLS_TPADJ (pd), false); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } @@ -732,7 +732,7 @@ __make_stacks_executable (void **stack_e const size_t pagemask = ~(__getpagesize () - 1); #endif - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); list_t *runp; list_for_each (runp, &stack_used) @@ -761,7 +761,7 @@ __make_stacks_executable (void **stack_e break; } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return err; } @@ -837,7 +837,7 @@ __find_thread_by_id (pid_t tid) { struct pthread *result = NULL; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Iterate over the list with system-allocated threads first. */ list_t *runp; @@ -869,7 +869,7 @@ __find_thread_by_id (pid_t tid) } out: - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return result; } @@ -920,7 +920,7 @@ attribute_hidden __nptl_setxid (struct xid_command *cmdp) { int result; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); __xidcmd = cmdp; cmdp->cntr = 0; @@ -966,7 +966,7 @@ __nptl_setxid (struct xid_command *cmdp) result = -1; } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return result; } @@ -995,7 +995,7 @@ void attribute_hidden __pthread_init_static_tls (struct link_map *map) { - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Iterate over the list with system-allocated threads first. */ list_t *runp; @@ -1006,7 +1006,7 @@ __pthread_init_static_tls (struct link_m list_for_each (runp, &__stack_user) init_one_static_tls (list_entry (runp, struct pthread, list), map); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } @@ -1014,7 +1014,7 @@ void attribute_hidden __wait_lookup_done (void) { - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); struct pthread *self = THREAD_SELF; @@ -1063,5 +1063,5 @@ __wait_lookup_done (void) while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } --- libc/nptl/tpp.c.jj 2006-08-15 01:02:29.000000000 +0200 +++ libc/nptl/tpp.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,5 +1,5 @@ /* Thread Priority Protect helpers. - Copyright (C) 2006 Free Software Foundation, Inc. + Copyright (C) 2006, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Jakub Jelinek <jakub@redhat.com>, 2006. @@ -93,7 +93,7 @@ __pthread_tpp_change_priority (int previ if (priomax == newpriomax) return 0; - lll_lock (self->lock); + lll_lock (self->lock, LLL_PRIVATE); tpp->priomax = newpriomax; @@ -129,7 +129,7 @@ __pthread_tpp_change_priority (int previ } } - lll_unlock (self->lock); + lll_unlock (self->lock, LLL_PRIVATE); return result; } @@ -144,7 +144,7 @@ __pthread_current_priority (void) int result = 0; - lll_lock (self->lock); + lll_lock (self->lock, LLL_PRIVATE); if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { @@ -166,7 +166,7 @@ __pthread_current_priority (void) if (result != -1) result = self->schedparam.sched_priority; - lll_unlock (self->lock); + lll_unlock (self->lock, LLL_PRIVATE); return result; } --- libc/nptl/pthread_setschedprio.c.jj 2007-06-01 12:07:58.000000000 +0200 +++ libc/nptl/pthread_setschedprio.c 2007-07-25 20:00:24.000000000 +0200 @@ -41,7 +41,7 @@ pthread_setschedprio (threadid, prio) struct sched_param param; param.sched_priority = prio; - lll_lock (pd->lock); + lll_lock (pd->lock, LLL_PRIVATE); /* If the thread should have higher priority because of some PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */ @@ -60,7 +60,7 @@ pthread_setschedprio (threadid, prio) pd->flags |= ATTR_FLAG_SCHED_SET; } - lll_unlock (pd->lock); + lll_unlock (pd->lock, LLL_PRIVATE); return result; } --- libc/nptl/pthread_attr_init.c.jj 2004-03-22 14:45:55.000000000 +0100 +++ libc/nptl/pthread_attr_init.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -27,7 +27,7 @@ struct pthread_attr *__attr_list; -lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER; +int __attr_list_lock = LLL_LOCK_INITIALIZER; int --- libc/nptl/old_pthread_cond_timedwait.c.jj 2003-03-21 09:02:07.000000000 +0100 +++ libc/nptl/old_pthread_cond_timedwait.c 2007-07-25 20:00:24.000000000 +0200 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -35,7 +35,7 @@ __pthread_cond_timedwait_2_0 (cond, mute { pthread_cond_t *newcond; -#if LLL_MUTEX_LOCK_INITIALIZER == 0 +#if LLL_LOCK_INITIALIZER == 0 newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1); if (newcond == NULL) return ENOMEM; --- libc/nptl/pthread_create.c.jj 2007-07-25 20:00:18.000000000 +0200 +++ libc/nptl/pthread_create.c 2007-07-25 20:00:24.000000000 +0200 @@ -63,7 +63,7 @@ __find_in_stack_list (pd) list_t *entry; struct pthread *result = NULL; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); list_for_each (entry, &stack_used) { @@ -90,7 +90,7 @@ __find_in_stack_list (pd) } } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return result; } @@ -284,9 +284,9 @@ start_thread (void *arg) int oldtype = CANCEL_ASYNC (); /* Get the lock the parent locked to force synchronization. */ - lll_lock (pd->lock); + lll_lock (pd->lock, LLL_PRIVATE); /* And give it up right away. */ - lll_unlock (pd->lock); + lll_unlock (pd->lock, LLL_PRIVATE); CANCEL_RESET (oldtype); } @@ -370,7 +370,7 @@ start_thread (void *arg) # endif this->__list.__next = NULL; - lll_robust_mutex_dead (this->__lock); + lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED); } while (robust != (void *) &pd->robust_head); } Jakub
Index Nav: | [Date Index] [Subject Index] [Author Index] [Thread Index] | |
---|---|---|
Message Nav: | [Date Prev] [Date Next] | [Thread Prev] [Thread Next] |