This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH 2/5] sysdeps/generic/atomic.h: Add a generic atomic header
- From: Will Newton <will dot newton at linaro dot org>
- To: libc-alpha at sourceware dot org
- Date: Fri, 17 Oct 2014 16:31:19 +0100
- Subject: [PATCH 2/5] sysdeps/generic/atomic.h: Add a generic atomic header
- Authentication-results: sourceware.org; auth=none
- References: <1413559882-959-1-git-send-email-will dot newton at linaro dot org>
Add a new header that uses modern GCC instrinsics for implementing
the atomic_* and catomic_* functions in a relatively efficient manner.
This code is based on the existing ARM and AArch64 implementations.
ChangeLog:
2014-10-15 Will Newton <will.newton@linaro.org>
* sysdeps/generic/atomic.h: New file.
---
sysdeps/generic/atomic.h | 240 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 240 insertions(+)
create mode 100644 sysdeps/generic/atomic.h
diff --git a/sysdeps/generic/atomic.h b/sysdeps/generic/atomic.h
new file mode 100644
index 0000000..dd1854e
--- /dev/null
+++ b/sysdeps/generic/atomic.h
@@ -0,0 +1,240 @@
+/* Atomic operations. Generic GCC intrinsic version.
+ Copyright (C) 2002-2014 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+void __atomic_link_error (void);
+
+/* Barrier macro. */
+#ifndef atomic_full_barrier
+# define atomic_full_barrier() __sync_synchronize()
+#endif
+
+/* Compare and exchange.
+ For all "bool" routines, we return FALSE if exchange successful. */
+
+#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ })
+
+#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ })
+
+#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ })
+
+#if __ARCH_ATOMIC_64_SUPPORTED
+# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ })
+#else
+# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
+ ({ __atomic_link_error (); 0; })
+#endif
+
+#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
+#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
+#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
+#if __ARCH_ATOMIC_64_SUPPORTED
+# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+#else
+# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
+ ({ __atomic_link_error (); (__typeof (*(mem)))0; })
+#endif
+
+
+/* Compare and exchange with "acquire" semantics, ie barrier after. */
+
+#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
+ __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
+ mem, new, old, __ATOMIC_ACQUIRE)
+
+#define atomic_compare_and_exchange_val_acq(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
+ mem, new, old, __ATOMIC_ACQUIRE)
+
+/* Compare and exchange with "release" semantics, ie barrier before. */
+
+#define atomic_compare_and_exchange_bool_rel(mem, new, old) \
+ __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
+ mem, new, old, __ATOMIC_RELEASE)
+
+#define atomic_compare_and_exchange_val_rel(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
+ mem, new, old, __ATOMIC_RELEASE)
+
+
+/* Atomic exchange (without compare). */
+
+#define __arch_exchange_8_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
+#define __arch_exchange_16_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
+#define __arch_exchange_32_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
+#if __ARCH_ATOMIC_64_SUPPORTED
+# define __arch_exchange_64_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+#else
+# define __arch_exchange_64_int(mem, newval, model) \
+ ({ __atomic_link_error (); (__typeof (*(mem)))0; })
+#endif
+
+#define atomic_exchange_acq(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
+
+#define atomic_exchange_rel(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
+
+
+/* Atomically add value and return the previous (unincremented) value. */
+
+#define __arch_exchange_and_add_8_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
+#define __arch_exchange_and_add_16_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
+#define __arch_exchange_and_add_32_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
+#if __ARCH_ATOMIC_64_SUPPORTED
+# define __arch_exchange_and_add_64_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+#else
+# define __arch_exchange_and_add_64_int(mem, value, model) \
+ ({ __atomic_link_error (); (__typeof (*(mem)))0; })
+#endif
+
+#define atomic_exchange_and_add_acq(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
+ __ATOMIC_ACQUIRE)
+
+#define atomic_exchange_and_add_rel(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
+ __ATOMIC_RELEASE)
+
+#define atomic_exchange_and_add_relaxed(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
+ __ATOMIC_RELAXED)
+
+#define catomic_exchange_and_add atomic_exchange_and_add_acq
+
+/* Atomically bitwise and value and return the previous value. */
+
+#define __arch_exchange_and_and_8_int(mem, value, model) \
+ __atomic_fetch_and (mem, value, model)
+
+#define __arch_exchange_and_and_16_int(mem, value, model) \
+ __atomic_fetch_and (mem, value, model)
+
+#define __arch_exchange_and_and_32_int(mem, value, model) \
+ __atomic_fetch_and (mem, value, model)
+
+#if __ARCH_ATOMIC_64_SUPPORTED
+# define __arch_exchange_and_and_64_int(mem, value, model) \
+ __atomic_fetch_and (mem, value, model)
+#else
+# define __arch_exchange_and_and_64_int(mem, value, model) \
+ ({ __atomic_link_error (); (__typeof (*(mem)))0; })
+#endif
+
+#define atomic_and(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_and, int, mem, value, \
+ __ATOMIC_ACQUIRE)
+
+#define atomic_and_relaxed(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_and, int, mem, value, \
+ __ATOMIC_RELAXED)
+
+#define atomic_and_val atomic_and
+
+#define catomic_and atomic_and
+
+/* Atomically bitwise or value and return the previous value. */
+
+#define __arch_exchange_and_or_8_int(mem, value, model) \
+ __atomic_fetch_or (mem, value, model)
+
+#define __arch_exchange_and_or_16_int(mem, value, model) \
+ __atomic_fetch_or (mem, value, model)
+
+#define __arch_exchange_and_or_32_int(mem, value, model) \
+ __atomic_fetch_or (mem, value, model)
+
+#if __ARCH_ATOMIC_64_SUPPORTED
+# define __arch_exchange_and_or_64_int(mem, value, model) \
+ __atomic_fetch_or (mem, value, model)
+#else
+# define __arch_exchange_and_or_64_int(mem, value, model) \
+ ({ __atomic_link_error (); (__typeof (*(mem)))0; })
+#endif
+
+#define atomic_or(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_or, int, mem, value, \
+ __ATOMIC_ACQUIRE)
+
+#define atomic_or_relaxed(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_or, int, mem, value, \
+ __ATOMIC_RELAXED)
+
+#define atomic_or_val atomic_or
+
+#define catomic_or atomic_or
--
1.9.3