+2003-03-20 Ulrich Drepper <drepper@redhat.com>
+
+ * include/atomic.h: Define atomic_exchange and
+ atomic_decrement_if_positive if not already defined. Add some
+ __builtin_expect.
+ * sysdeps/i386/i486/bits/atomic.h: Define atomic_exchange.
+ * sysdeps/x86_64/bits/atomic.h: Likewise.
+ * sysdeps/ia64/bits/atomic.h: Pretty printing. Define atomic_exchange.
+ * sysdeps/powerpc/bits/atomic.h: Pretty printing. Define
+ atomic_exchange, atomic_exchange_and_add, and
+ atomic_decrement_if_positive
+
2003-03-20 Alexandre Oliva <aoliva@redhat.com>
* sysdeps/unix/sysv/linux/mips/mips64/n64/ioctl.S: Sign-extend
#endif
+/* Note that we need no lock prefix. */
+#define atomic_exchange(mem, newvalue) \
+ ({ __typeof (*mem) result; \
+ if (sizeof (*mem) == 1) \
+ __asm __volatile ("xchgb %b0, %1" \
+ : "=r" (result), "=m" (*mem) \
+ : "0" (newvalue), "1" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile ("xchgw %w0, %1" \
+ : "=r" (result), "=m" (*mem) \
+ : "0" (newvalue), "1" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile ("xchgl %0, %1" \
+ : "=r" (result), "=m" (*mem) \
+ : "0" (newvalue), "1" (*mem)); \
+ else \
+ { \
+ result = 0; \
+ abort (); \
+ } \
+ result; })
+
+
#define atomic_exchange_and_add(mem, value) \
({ __typeof (*mem) result; \
__typeof (value) addval = (value); \
(!__sync_bool_compare_and_swap_si ((int *) (mem), (int) (long) (oldval), \
(int) (long) (newval)))
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
(!__sync_bool_compare_and_swap_di ((long *) (mem), (long) (oldval), \
(long) (newval)))
__sync_val_compare_and_swap_si ((int *) (mem), (int) (long) (oldval), \
(int) (long) (newval))
-# define __arch_compare_and_exchange_64_val_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_64_val_acq(mem, newval, oldval) \
__sync_val_compare_and_swap_di ((long *) (mem), (long) (oldval), \
(long) (newval))
-# define atomic_exchange_and_add(mem, value) \
+/* Atomically store newval and return the old value. */
+#define atomic_exchange(mem, value) \
+ __sync_lock_test_and_set_si (mem, value)
+
+#define atomic_exchange_and_add(mem, value) \
({ \
__typeof (*mem) __oldval, __val; \
__typeof (mem) __memp = (mem); \
(abort (), 0)
#ifdef UP
-#define __ARCH_ACQ_INSTR ""
-#define __ARCH_REL_INSTR ""
+# define __ARCH_ACQ_INSTR ""
+# define __ARCH_REL_INSTR ""
#else
-#define __ARCH_ACQ_INSTR "isync"
-#define __ARCH_REL_INSTR "sync"
+# define __ARCH_ACQ_INSTR "isync"
+# define __ARCH_REL_INSTR "sync"
#endif
/*
})
#ifdef __powerpc64__
-#define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_64_acq(mem, newval, oldval)\
({ \
unsigned long __tmp; \
__asm __volatile (__ARCH_REL_INSTR "\n" \
})
#else /* powerpc32 */
-#define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
(abort (), 0)
#endif
+
+#define atomic_exchange(mem, value) \
+ ({ if (sizeof (*mem) != 4) \
+ abort (); \
+ int __val, __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: lwarx %0,0,%2\n" \
+ " stwcx. %3,0,%2\n" \
+ " bne- 1b" \
+ : "=&r" (__val), "=m" (*mem) \
+ : "r" (mem), "r" (value), "1" (*mem) \
+ : "cr0", "memory"); \
+ __val; })
+
+
+#define atomic_exchange_and_add(mem, value) \
+ ({ if (sizeof (*mem) != 4) \
+ abort (); \
+ int __val, __tmp; \
+ __asm __volatile ("1: lwarx %0,0,%3\n" \
+ " addi %1,%0,%4\n" \
+ " stwcx. %1,0,%3\n" \
+ " bne- 1b" \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "r" (mem), "I" (value), "2" (*mem) \
+ : "cr0"); \
+ __val; \
+ })
+
+
+/* Decrement *MEM if it is > 0, and return the old value. */
+#define atomic_decrement_if_positive(mem) \
+ ({ if (sizeof (*mem) != 4) \
+ abort (); \
+ int __val, __tmp; \
+ __asm __volatile ("1: lwarx %0,0,%3\n" \
+ " cmpwi 0,%0,0\n" \
+ " addi %1,%0,-1\n" \
+ " ble 2f\n" \
+ " stwcx. %1,0,%3\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "r" (mem), "2" (*mem) \
+ : "cr0"); \
+ __val; \
+ })
+
+
+
#define atomic_full_barrier() __asm ("sync" ::: "memory")
#ifdef __powerpc64__
-#define atomic_read_barrier() __asm ("lwsync" ::: "memory")
+# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
#else
-#define atomic_read_barrier() __asm ("sync" ::: "memory")
+# define atomic_read_barrier() __asm ("sync" ::: "memory")
#endif
#define atomic_write_barrier() __asm ("eieio" ::: "memory")
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
ret; })
+/* Note that we need no lock prefix. */
+#define atomic_exchange(mem, newvalue) \
+ ({ __typeof (*mem) result; \
+ if (sizeof (*mem) == 1) \
+ __asm __volatile ("xchgb %b0, %1" \
+ : "=r" (result), "=m" (*mem) \
+ : "0" (newvalue), "1" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile ("xchgw %w0, %1" \
+ : "=r" (result), "=m" (*mem) \
+ : "0" (newvalue), "1" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile ("xchgl %0, %1" \
+ : "=r" (result), "=m" (*mem) \
+ : "0" (newvalue), "1" (*mem)); \
+ else \
+ __asm __volatile ("xchgq %q0, %1" \
+ : "=r" (result), "=m" (*mem) \
+ : "0" (newvalue), "1" (*mem)); \
+ result; })
+
+
#define atomic_exchange_and_add(mem, value) \
({ __typeof (*mem) result; \
if (sizeof (*mem) == 1) \