From: Adhemerval Zanella Date: Mon, 6 Oct 2014 21:08:32 +0000 (-0500) Subject: powerpc: remove linux lowlevellock.h X-Git-Tag: glibc-2.21~517 X-Git-Url: https://sourceware.org/git/?a=commitdiff_plain;h=47d4623785559a031ce33b266d1f9b5a2fd24d47;p=glibc.git powerpc: remove linux lowlevellock.h This patch remove the powerpc specific lowlevellock.h and adjust some implementation that rely on __lll_[rel/acq]_instr defines. --- diff --git a/ChangeLog b/ChangeLog index e127a08c27..0d37530437 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,14 @@ -2014-10-02 Chris Metcalf +2014-10-06 Adhemerval Zanella + + * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Remove file. + * sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c + (pthread_spin_unlock): Use __ARCH_REL_INSTR instead of + __lll_rel_instr and __ARCH_ACQ_INSTR instead of __lll_acq_instr. + * sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post): + Likewise. + (__old_sem_post): Likewise. + +2014-10-06 Chris Metcalf * sysdeps/unix/sysv/linux/tile/sysdep.h (INLINE_VSYSCALL): Define INLINE_VSYSCALL, INTERNAL_VSYSCALL, and diff --git a/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h deleted file mode 100644 index a651d23c50..0000000000 --- a/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h +++ /dev/null @@ -1,342 +0,0 @@ -/* Copyright (C) 2003-2014 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Paul Mackerras , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#ifndef _LOWLEVELLOCK_H -#define _LOWLEVELLOCK_H 1 - -#include -#include -#include -#include -#include - -#ifndef __NR_futex -# define __NR_futex 221 -#endif -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 -#define FUTEX_REQUEUE 3 -#define FUTEX_CMP_REQUEUE 4 -#define FUTEX_WAKE_OP 5 -#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1) -#define FUTEX_LOCK_PI 6 -#define FUTEX_UNLOCK_PI 7 -#define FUTEX_TRYLOCK_PI 8 -#define FUTEX_WAIT_BITSET 9 -#define FUTEX_WAKE_BITSET 10 -#define FUTEX_WAIT_REQUEUE_PI 11 -#define FUTEX_CMP_REQUEUE_PI 12 -#define FUTEX_PRIVATE_FLAG 128 -#define FUTEX_CLOCK_REALTIME 256 - -#define FUTEX_BITSET_MATCH_ANY 0xffffffff - -/* Values for 'private' parameter of locking macros. Yes, the - definition seems to be backwards. But it is not. The bit will be - reversed before passing to the system call. */ -#define LLL_PRIVATE 0 -#define LLL_SHARED FUTEX_PRIVATE_FLAG - -#if !defined NOT_IN_libc || defined IS_IN_rtld -/* In libc.so or ld.so all futexes are private. */ -# ifdef __ASSUME_PRIVATE_FUTEX -# define __lll_private_flag(fl, private) \ - ((fl) | FUTEX_PRIVATE_FLAG) -# else -# define __lll_private_flag(fl, private) \ - ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) -# endif -#else -# ifdef __ASSUME_PRIVATE_FUTEX -# define __lll_private_flag(fl, private) \ - (((fl) | FUTEX_PRIVATE_FLAG) ^ (private)) -# else -# define __lll_private_flag(fl, private) \ - (__builtin_constant_p (private) \ - ? ((private) == 0 \ - ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \ - : (fl)) \ - : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \ - & THREAD_GETMEM (THREAD_SELF, header.private_futex)))) -# endif -#endif - -#define lll_futex_wait(futexp, val, private) \ - lll_futex_timed_wait (futexp, val, NULL, private) - -#define lll_futex_timed_wait(futexp, val, timespec, private) \ - ({ \ - INTERNAL_SYSCALL_DECL (__err); \ - long int __ret; \ - \ - __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \ - __lll_private_flag (FUTEX_WAIT, private), \ - (val), (timespec)); \ - INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \ - }) - -#define lll_futex_timed_wait_bitset(futexp, val, timespec, clockbit, private) \ - ({ \ - INTERNAL_SYSCALL_DECL (__err); \ - long int __ret; \ - int __op = FUTEX_WAIT_BITSET | clockbit; \ - \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ - __lll_private_flag (__op, private), \ - (val), (timespec), NULL /* Unused. */, \ - FUTEX_BITSET_MATCH_ANY); \ - INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \ - }) - -#define lll_futex_wake(futexp, nr, private) \ - ({ \ - INTERNAL_SYSCALL_DECL (__err); \ - long int __ret; \ - \ - __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \ - __lll_private_flag (FUTEX_WAKE, private), \ - (nr), 0); \ - INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \ - }) - -/* Returns non-zero if error happened, zero if success. */ -#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \ - ({ \ - INTERNAL_SYSCALL_DECL (__err); \ - long int __ret; \ - \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ - __lll_private_flag (FUTEX_CMP_REQUEUE, private),\ - (nr_wake), (nr_move), (mutex), (val)); \ - INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ - }) - -/* Returns non-zero if error happened, zero if success. */ -#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \ - ({ \ - INTERNAL_SYSCALL_DECL (__err); \ - long int __ret; \ - \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ - __lll_private_flag (FUTEX_WAKE_OP, private), \ - (nr_wake), (nr_wake2), (futexp2), \ - FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \ - INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ - }) - -/* Priority Inheritance support. */ -#define lll_futex_wait_requeue_pi(futexp, val, mutex, private) \ - lll_futex_timed_wait_requeue_pi (futexp, val, NULL, 0, mutex, private) - -#define lll_futex_timed_wait_requeue_pi(futexp, val, timespec, clockbit, \ - mutex, private) \ - ({ \ - INTERNAL_SYSCALL_DECL (__err); \ - long int __ret; \ - int __op = FUTEX_WAIT_REQUEUE_PI | clockbit; \ - \ - __ret = INTERNAL_SYSCALL (futex, __err, 5, (futexp), \ - __lll_private_flag (__op, private), \ - (val), (timespec), mutex); \ - INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \ - }) - -#define lll_futex_cmp_requeue_pi(futexp, nr_wake, nr_move, mutex, val, priv) \ - ({ \ - INTERNAL_SYSCALL_DECL (__err); \ - long int __ret; \ - \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ - __lll_private_flag (FUTEX_CMP_REQUEUE_PI, priv),\ - (nr_wake), (nr_move), (mutex), (val)); \ - INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ - }) - - -#ifdef UP -# define __lll_acq_instr "" -# define __lll_rel_instr "" -#else -# define __lll_acq_instr "isync" -# ifdef _ARCH_PWR4 -/* - * Newer powerpc64 processors support the new "light weight" sync (lwsync) - * So if the build is using -mcpu=[power4,power5,power5+,970] we can - * safely use lwsync. - */ -# define __lll_rel_instr "lwsync" -# else -/* - * Older powerpc32 processors don't support the new "light weight" - * sync (lwsync). So the only safe option is to use normal sync - * for all powerpc32 applications. - */ -# define __lll_rel_instr "sync" -# endif -#endif - -/* Set *futex to ID if it is 0, atomically. Returns the old value */ -#define __lll_base_trylock(futex, id) \ - ({ int __val; \ - __asm __volatile ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ - " cmpwi 0,%0,0\n" \ - " bne 2f\n" \ - " stwcx. %3,0,%2\n" \ - " bne- 1b\n" \ - "2: " __lll_acq_instr \ - : "=&r" (__val), "=m" (*futex) \ - : "r" (futex), "r" (id), "m" (*futex) \ - : "cr0", "memory"); \ - __val; \ - }) - -/* Set *futex to 1 if it is 0, atomically. Returns the old value */ -#define __lll_trylock(futex) __lll_base_trylock (futex, 1) - -#define lll_trylock(lock) __lll_trylock (&(lock)) - -/* Set *futex to 2 if it is 0, atomically. Returns the old value */ -#define __lll_cond_trylock(futex) __lll_base_trylock (futex, 2) - -#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock)) - - -extern void __lll_lock_wait_private (int *futex) attribute_hidden; -extern void __lll_lock_wait (int *futex, int private) attribute_hidden; -extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; - -#define lll_lock(lock, private) \ - (void) ({ \ - int *__futex = &(lock); \ - if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\ - 0) != 0) \ - { \ - if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \ - __lll_lock_wait_private (__futex); \ - else \ - __lll_lock_wait (__futex, private); \ - } \ - }) - -#define lll_robust_lock(lock, id, private) \ - ({ \ - int *__futex = &(lock); \ - int __val = 0; \ - if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ - 0), 0)) \ - __val = __lll_robust_lock_wait (__futex, private); \ - __val; \ - }) - -#define lll_cond_lock(lock, private) \ - (void) ({ \ - int *__futex = &(lock); \ - if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\ - 0) != 0) \ - __lll_lock_wait (__futex, private); \ - }) - -#define lll_robust_cond_lock(lock, id, private) \ - ({ \ - int *__futex = &(lock); \ - int __val = 0; \ - int __id = id | FUTEX_WAITERS; \ - if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\ - 0), 0)) \ - __val = __lll_robust_lock_wait (__futex, private); \ - __val; \ - }) - - -extern int __lll_timedlock_wait - (int *futex, const struct timespec *, int private) attribute_hidden; -extern int __lll_robust_timedlock_wait - (int *futex, const struct timespec *, int private) attribute_hidden; - -#define lll_timedlock(lock, abstime, private) \ - ({ \ - int *__futex = &(lock); \ - int __val = 0; \ - if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\ - 0) != 0) \ - __val = __lll_timedlock_wait (__futex, abstime, private); \ - __val; \ - }) - -#define lll_robust_timedlock(lock, abstime, id, private) \ - ({ \ - int *__futex = &(lock); \ - int __val = 0; \ - if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \ - 0), 0)) \ - __val = __lll_robust_timedlock_wait (__futex, abstime, private); \ - __val; \ - }) - -#define lll_unlock(lock, private) \ - ((void) ({ \ - int *__futex = &(lock); \ - int __val = atomic_exchange_rel (__futex, 0); \ - if (__glibc_unlikely (__val > 1)) \ - lll_futex_wake (__futex, 1, private); \ - })) - -#define lll_robust_unlock(lock, private) \ - ((void) ({ \ - int *__futex = &(lock); \ - int __val = atomic_exchange_rel (__futex, 0); \ - if (__glibc_unlikely (__val & FUTEX_WAITERS)) \ - lll_futex_wake (__futex, 1, private); \ - })) - -#define lll_islocked(futex) \ - (futex != 0) - - -/* Initializers for lock. */ -#define LLL_LOCK_INITIALIZER (0) -#define LLL_LOCK_INITIALIZER_LOCKED (1) - -/* The states of a lock are: - 0 - untaken - 1 - taken by one user - >1 - taken by more users */ - -/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex - wakeup when the clone terminates. The memory location contains the - thread ID while the clone is running and is reset to zero - afterwards. */ -#define lll_wait_tid(tid) \ - do { \ - __typeof (tid) __tid; \ - while ((__tid = (tid)) != 0) \ - lll_futex_wait (&(tid), __tid, LLL_SHARED); \ - } while (0) - -extern int __lll_timedwait_tid (int *, const struct timespec *) - attribute_hidden; - -#define lll_timedwait_tid(tid, abstime) \ - ({ \ - int __res = 0; \ - if ((tid) != 0) \ - __res = __lll_timedwait_tid (&(tid), (abstime)); \ - __res; \ - }) - -#endif /* lowlevellock.h */ diff --git a/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c b/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c index 2b8c84d756..4906205ce0 100644 --- a/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c +++ b/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c @@ -22,7 +22,7 @@ int pthread_spin_unlock (pthread_spinlock_t *lock) { - __asm __volatile (__lll_rel_instr ::: "memory"); + __asm __volatile (__ARCH_REL_INSTR ::: "memory"); *lock = 0; return 0; } diff --git a/sysdeps/unix/sysv/linux/powerpc/sem_post.c b/sysdeps/unix/sysv/linux/powerpc/sem_post.c index f222d9a69c..831a8dd4de 100644 --- a/sysdeps/unix/sysv/linux/powerpc/sem_post.c +++ b/sysdeps/unix/sysv/linux/powerpc/sem_post.c @@ -30,9 +30,9 @@ __new_sem_post (sem_t *sem) { struct new_sem *isem = (struct new_sem *) sem; - __asm __volatile (__lll_rel_instr ::: "memory"); + __asm __volatile (__ARCH_REL_INSTR ::: "memory"); atomic_increment (&isem->value); - __asm __volatile (__lll_acq_instr ::: "memory"); + __asm __volatile (__ARCH_ACQ_INSTR ::: "memory"); if (isem->nwaiters > 0) { int err = lll_futex_wake (&isem->value, 1, @@ -55,7 +55,7 @@ __old_sem_post (sem_t *sem) { int *futex = (int *) sem; - __asm __volatile (__lll_rel_instr ::: "memory"); + __asm __volatile (__ARCH_REL_INSTR ::: "memory"); (void) atomic_increment_val (futex); /* We always have to assume it is a shared semaphore. */ int err = lll_futex_wake (futex, 1, LLL_SHARED);