This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH 2/2] PowerPC: libc single-thread lock optimization
- From: Adhemerval Zanella <azanella at linux dot vnet dot ibm dot com>
- To: "GNU C. Library" <libc-alpha at sourceware dot org>
- Date: Wed, 30 Apr 2014 10:57:17 -0300
- Subject: [PATCH 2/2] PowerPC: libc single-thread lock optimization
- Authentication-results: sourceware.org; auth=none
This patch adds a single-thread optimization for locks used within
libc.so. For each lock operations it checks it the process has already
spawned one thread and if not use non-atomic operations. Other libraries
(libpthread.so for instance) are unaffected by this change.
This is a respin on my first patch to add such optimization, but now the
code is focused only on lowlevellock.h, the atomic.h is untouched.
Tested on powerpc32 and powerpc64.
--
* nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
(__lll_is_single_thread): New macro to check if process has spawned
any threads.
(__lll_robust_trylock): Add optimization to avoid atomic operations in
single thread case.
(lll_lock): Likewise.
(lll_robust_lock): Likewise.
(lll_cond_lock): Likewise.
(lll_robust_cond_lock): Likewise.
(lll_timedlock): Likewise.
(lll_robust_timedlock): Likewise.
(lll_unlock): Likewise.
(lll_robust_unlock): Likewise.
---
diff --git a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
index ab92c3f..38529a4 100644
--- a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
@@ -76,6 +76,16 @@
# endif
#endif
+/* For internal libc.so lock calls in single-thread process we use normal
+ load/stores. */
+#if !defined NOT_IN_libc || defined UP
+# define __lll_is_single_thread \
+ __glibc_likely (THREAD_GETMEM (THREAD_SELF, header.multiple_threads) == 0)
+#else
+# define __lll_is_single_thread (0)
+#endif
+
+
#define lll_futex_wait(futexp, val, private) \
lll_futex_timed_wait (futexp, val, NULL, private)
@@ -205,7 +215,9 @@
/* Set *futex to ID if it is 0, atomically. Returns the old value */
#define __lll_robust_trylock(futex, id) \
({ int __val; \
- __asm __volatile ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
+ if (!__lll_is_single_thread) \
+ __asm __volatile ( \
+ "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
" cmpwi 0,%0,0\n" \
" bne 2f\n" \
" stwcx. %3,0,%2\n" \
@@ -214,6 +226,12 @@
: "=&r" (__val), "=m" (*futex) \
: "r" (futex), "r" (id), "m" (*futex) \
: "cr0", "memory"); \
+ else \
+ { \
+ __val = *futex; \
+ if (__val == 0) \
+ *futex = id; \
+ } \
__val; \
})
@@ -237,8 +255,16 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
#define lll_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
- if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
- 0) != 0) \
+ int __tmp; \
+ if (!__lll_is_single_thread) \
+ __tmp = atomic_compare_and_exchange_val_acq (__futex, 1, 0); \
+ else \
+ { \
+ __tmp = *__futex; \
+ if (__tmp == 0) \
+ *__futex = 1; \
+ } \
+ if (__builtin_expect (__tmp, 0) != 0) \
{ \
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
__lll_lock_wait_private (__futex); \
@@ -251,8 +277,16 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
({ \
int *__futex = &(lock); \
int __val = 0; \
- if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
- 0), 0)) \
+ int __tmp; \
+ if (!__lll_is_single_thread) \
+ __tmp = atomic_compare_and_exchange_bool_acq (__futex, id, 0); \
+ else \
+ { \
+ __tmp = (*__futex == 0); \
+ if (__tmp) \
+ *__futex = id; \
+ } \
+ if (__builtin_expect (__tmp, 0)) \
__val = __lll_robust_lock_wait (__futex, private); \
__val; \
})
@@ -260,8 +294,16 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
#define lll_cond_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
- if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
- 0) != 0) \
+ int __tmp; \
+ if (!__lll_is_single_thread) \
+ __tmp = atomic_compare_and_exchange_val_acq (__futex, 2, 0); \
+ else \
+ { \
+ __tmp = *__futex; \
+ if (__tmp == 0) \
+ *__futex = 2; \
+ } \
+ if (__builtin_expect (__tmp, 0) != 0) \
__lll_lock_wait (__futex, private); \
})
@@ -269,9 +311,17 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
({ \
int *__futex = &(lock); \
int __val = 0; \
+ int __tmp; \
int __id = id | FUTEX_WAITERS; \
- if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
- 0), 0)) \
+ if (!__lll_is_single_thread) \
+ __tmp = atomic_compare_and_exchange_bool_acq (__futex, __id, 0); \
+ else \
+ { \
+ __tmp = (*__futex == 0); \
+ if (__tmp) \
+ *__futex = id; \
+ } \
+ if (__builtin_expect (__tmp, 0)) \
__val = __lll_robust_lock_wait (__futex, private); \
__val; \
})
@@ -286,8 +336,16 @@ extern int __lll_robust_timedlock_wait
({ \
int *__futex = &(lock); \
int __val = 0; \
- if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
- 0) != 0) \
+ int __tmp; \
+ if (!__lll_is_single_thread) \
+ __tmp = atomic_compare_and_exchange_val_acq (__futex, 1, 0); \
+ else \
+ { \
+ __tmp = *__futex; \
+ if (__tmp == 0) \
+ *__futex = 1; \
+ } \
+ if (__builtin_expect (__tmp, 0) != 0) \
__val = __lll_timedlock_wait (__futex, abstime, private); \
__val; \
})
@@ -296,8 +354,16 @@ extern int __lll_robust_timedlock_wait
({ \
int *__futex = &(lock); \
int __val = 0; \
- if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
- 0), 0)) \
+ int __tmp; \
+ if (!__lll_is_single_thread) \
+ __tmp = atomic_compare_and_exchange_bool_acq (__futex, id, 0); \
+ else \
+ { \
+ __tmp = (*__futex == 0); \
+ if (__tmp) \
+ *__futex = id; \
+ } \
+ if (__builtin_expect (__tmp, 0)) \
__val = __lll_robust_timedlock_wait (__futex, abstime, private); \
__val; \
})
@@ -305,7 +371,14 @@ extern int __lll_robust_timedlock_wait
#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
- int __val = atomic_exchange_rel (__futex, 0); \
+ int __val; \
+ if (!__lll_is_single_thread) \
+ __val = atomic_exchange_rel (__futex, 0); \
+ else \
+ { \
+ __val = *__futex; \
+ *__futex = 0; \
+ } \
if (__glibc_unlikely (__val > 1)) \
lll_futex_wake (__futex, 1, private); \
}))
@@ -313,7 +386,14 @@ extern int __lll_robust_timedlock_wait
#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
- int __val = atomic_exchange_rel (__futex, 0); \
+ int __val; \
+ if (!__lll_is_single_thread) \
+ __val = atomic_exchange_rel (__futex, 0); \
+ else \
+ { \
+ __val = *__futex; \
+ *__futex = 0; \
+ } \
if (__glibc_unlikely (__val & FUTEX_WAITERS)) \
lll_futex_wake (__futex, 1, private); \
}))
--
1.8.4.2