+2003-04-14 Jakub Jelinek <jakub@redhat.com>
+
+ * sysdeps/i386/i486/bits/atomic.h: Rename LOCK to LOCK_PREFIX.
+ * sysdeps/x86_64/bits/atomic.h: Likewise.
+
+2003-04-14 Ulrich Drepper <drepper@redhat.com>
+
+ * sysdeps/unix/sysv/linux/i386/sysdep.h: Change PUSHARGS_1 and
+ POPARGS_1 to emit labels for the mov instructions.
+
+2003-04-14 Jakub Jelinek <jakub@redhat.com>
+
+ * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep.h (ret_NOERRNO): Define.
+
2003-04-14 Roland McGrath <roland@redhat.com>
* sysdeps/generic/unwind-dw2.c (_Unwind_GetCFA): Add a cast to silence
+2003-04-14 Ulrich Drepper <drepper@redhat.com>
+
+ * locales/mn_MN: Fix country_post and country_ab2.
+
2003-03-17 Ulrich Drepper <drepper@redhat.com>
* localedata/en_ZA: Changed %x for LC_TIME to use dd/mm/ccyy.
<U004E><U0025><U0063><U0025><U004E>"
country_name "<U004D><U006F><U006E><U0067><U006F><U006C><U0069><U0061>"
-country_post "<U004D><U006E>"
-country_ab2 "<U004D><U006E>"
+country_post "<U004D><U004E>"
+country_ab2 "<U004D><U004E>"
country_ab3 "<U004D><U004E><U0047>"
country_num 496
country_car "<U004D><U0047><U004C>"
2003-04-14 Jakub Jelinek <jakub@redhat.com>
+ * sysdeps/i386/tls.h: Rename LOCK to LOCK_PREFIX.
+ * sysdeps/i386/pthread_spin_lock.c: Likewise.
+ * sysdeps/x86_64/tls.h: Likewise. Define LOCK_PREFIX if not already
+ defined.
+
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Use
DW_CFA_advance_loc2 for .Laddl-.Lsubl.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Use
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
#include "pthreadP.h"
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK "lock;"
+#ifndef LOCK_PREFIX
+# ifdef UP
+# define LOCK_PREFIX /* nothing */
+# else
+# define LOCK_PREFIX "lock;"
+# endif
#endif
pthread_spinlock_t *lock;
{
asm ("\n"
- "1:\t" LOCK "decl %0\n\t"
+ "1:\t" LOCK_PREFIX "decl %0\n\t"
"jne 2f\n\t"
".subsection 1\n\t"
".align 16\n"
# define INIT_SYSINFO
#endif
-#ifndef LOCK
+#ifndef LOCK_PREFIX
# ifdef UP
-# define LOCK /* nothing */
+# define LOCK_PREFIX /* nothing */
# else
-# define LOCK "lock;"
+# define LOCK_PREFIX "lock;"
# endif
#endif
({ __typeof (descr->member) __ret; \
__typeof (oldval) __old = (oldval); \
if (sizeof (descr->member) == 4) \
- asm volatile (LOCK "cmpxchgl %2, %%gs:%P3" \
+ asm volatile (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3" \
: "=a" (__ret) \
: "0" (__old), "r" (newval), \
"i" (offsetof (struct pthread, member))); \
/* Atomic set bit. */
#define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
(void) ({ if (sizeof ((descr)->member) == 4) \
- asm volatile (LOCK "orl %1, %%gs:%P0" \
+ asm volatile (LOCK_PREFIX "orl %1, %%gs:%P0" \
:: "i" (offsetof (struct pthread, member)), \
"ir" (1 << (bit))); \
else \
/* Get the thread descriptor definition. */
# include <nptl/descr.h>
+#ifndef LOCK_PREFIX
+# ifdef UP
+# define LOCK_PREFIX /* nothing */
+# else
+# define LOCK_PREFIX "lock;"
+# endif
+#endif
+
/* This is the size of the initial TCB. */
# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t)
({ __typeof (descr->member) __ret; \
__typeof (oldval) __old = (oldval); \
if (sizeof (descr->member) == 4) \
- asm volatile (LOCK "cmpxchgl %2, %%fs:%P3" \
+ asm volatile (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
: "=a" (__ret) \
: "0" (__old), "r" (newval), \
"i" (offsetof (struct pthread, member))); \
/* Atomic set bit. */
#define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
(void) ({ if (sizeof ((descr)->member) == 4) \
- asm volatile (LOCK "orl %1, %%fs:%P0" \
+ asm volatile (LOCK_PREFIX "orl %1, %%fs:%P0" \
:: "i" (offsetof (struct pthread, member)), \
"ir" (1 << (bit))); \
else \
typedef uintmax_t uatomic_max_t;
-#ifndef LOCK
+#ifndef LOCK_PREFIX
# ifdef UP
-# define LOCK /* nothing */
+# define LOCK_PREFIX /* nothing */
# else
-# define LOCK "lock;"
+# define LOCK_PREFIX "lock;"
# endif
#endif
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchgb %b2, %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "m" (*mem), "0" (oldval)); \
ret; })
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchgw %w2, %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "m" (*mem), "0" (oldval)); \
ret; })
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchgl %2, %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "m" (*mem), "0" (oldval)); \
ret; })
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
__asm __volatile ("xchgl %2, %%ebx\n\t" \
- LOCK "cmpxchg8b %1\n\t" \
+ LOCK_PREFIX "cmpxchg8b %1\n\t" \
"xchgl %2, %%ebx" \
: "=A" (ret), "=m" (*mem) \
: "DS" (((unsigned long long int) (newval)) \
# else
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchg8b %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchg8b %1" \
: "=A" (ret), "=m" (*mem) \
: "b" (((unsigned long long int) (newval)) \
& 0xffffffff), \
- "c" (((unsigned long long int) (newval)) >> 32), \
+ "c" (((unsigned long long int) (newval)) >> 32), \
"m" (*mem), "a" (((unsigned long long int) (oldval)) \
& 0xffffffff), \
"d" (((unsigned long long int) (oldval)) >> 32)); \
({ __typeof (*mem) result; \
__typeof (value) addval = (value); \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "xaddb %b0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddb %b0, %1" \
: "=r" (result), "=m" (*mem) \
: "0" (addval), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "xaddw %w0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddw %w0, %1" \
: "=r" (result), "=m" (*mem) \
: "0" (addval), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "xaddl %0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddl %0, %1" \
: "=r" (result), "=m" (*mem) \
: "0" (addval), "m" (*mem)); \
else \
else if (__builtin_constant_p (value) && (value) == 1) \
atomic_decrement (mem); \
else if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "addb %b1, %0" \
+ __asm __volatile (LOCK_PREFIX "addb %b1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "addw %w1, %0" \
+ __asm __volatile (LOCK_PREFIX "addw %w1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "addl %1, %0" \
+ __asm __volatile (LOCK_PREFIX "addl %1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else \
#define atomic_add_negative(mem, value) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "addb %b2, %0; sets %1" \
+ __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
: "iq" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "addw %w2, %0; sets %1" \
+ __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "addl %2, %0; sets %1" \
+ __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else \
#define atomic_add_zero(mem, value) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "addb %b2, %0; setz %1" \
+ __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "addw %w2, %0; setz %1" \
+ __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "addl %2, %0; setz %1" \
+ __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else \
#define atomic_increment(mem) \
(void) ({ if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "incb %b0" \
+ __asm __volatile (LOCK_PREFIX "incb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "incw %w0" \
+ __asm __volatile (LOCK_PREFIX "incw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "incl %0" \
+ __asm __volatile (LOCK_PREFIX "incl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
#define atomic_increment_and_test(mem) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "incb %0; sete %b1" \
+ __asm __volatile (LOCK_PREFIX "incb %0; sete %b1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "incw %0; sete %w1" \
+ __asm __volatile (LOCK_PREFIX "incw %0; sete %w1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "incl %0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else \
#define atomic_decrement(mem) \
(void) ({ if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "decb %b0" \
+ __asm __volatile (LOCK_PREFIX "decb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "decw %w0" \
+ __asm __volatile (LOCK_PREFIX "decw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "decl %0" \
+ __asm __volatile (LOCK_PREFIX "decl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
#define atomic_decrement_and_test(mem) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "decb %b0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "decw %w0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "decl %0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else \
#define atomic_bit_set(mem, bit) \
(void) ({ if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "orb %b2, %0" \
+ __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "orw %w2, %0" \
+ __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "orl %2, %0" \
+ __asm __volatile (LOCK_PREFIX "orl %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else \
#define atomic_bit_test_set(mem, bit) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "btsb %3, %1; setc %0" \
+ __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
: "=q" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "btsw %3, %1; setc %0" \
+ __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
: "=q" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "btsl %3, %1; setc %0" \
+ __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
: "=q" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else \
#define _DOARGS_0(n) /* No arguments to frob. */
#define _POPARGS_0 /* No arguments to pop. */
-#define PUSHARGS_1 movl %ebx, %edx; PUSHARGS_0
+#define PUSHARGS_1 movl %ebx, %edx; L(SAVEBX1): PUSHARGS_0
#define DOARGS_1 _DOARGS_1 (4)
-#define POPARGS_1 POPARGS_0; movl %edx, %ebx
+#define POPARGS_1 POPARGS_0; movl %edx, %ebx; L(RESTBX1):
#define _PUSHARGS_1 pushl %ebx; L(PUSHBX1): _PUSHARGS_0
#define _DOARGS_1(n) movl n(%esp), %ebx; _DOARGS_0(n-4)
#define _POPARGS_1 _POPARGS_0; popl %ebx; L(POPBX1):
.size name,.-name
/* Careful here! This "ret" define can interfere; use jmpl if unsure. */
-#define ret retl; nop
+#define ret retl; nop
+#define ret_NOERRNO retl; nop
#define r0 %o0
#define r1 %o1
#define MOVE(x,y) mov x, y
typedef uintmax_t uatomic_max_t;
-#ifndef LOCK
+#ifndef LOCK_PREFIX
# ifdef UP
-# define LOCK /* nothing */
+# define LOCK_PREFIX /* nothing */
# else
-# define LOCK "lock;"
+# define LOCK_PREFIX "lock;"
# endif
#endif
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchgb %b2, %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "m" (*mem), "0" (oldval)); \
ret; })
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchgw %w2, %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "m" (*mem), "0" (oldval)); \
ret; })
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchgl %2, %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "m" (*mem), "0" (oldval)); \
ret; })
#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
- __asm __volatile (LOCK "cmpxchgq %q2, %1" \
+ __asm __volatile (LOCK_PREFIX "cmpxchgq %q2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" ((long) (newval)), "m" (*mem), \
"0" ((long) (oldval))); \
#define atomic_exchange_and_add(mem, value) \
({ __typeof (*mem) result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "xaddb %b0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddb %b0, %1" \
: "=r" (result), "=m" (*mem) \
: "0" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "xaddw %w0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddw %w0, %1" \
: "=r" (result), "=m" (*mem) \
: "0" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "xaddl %0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddl %0, %1" \
: "=r" (result), "=m" (*mem) \
: "0" (value), "m" (*mem)); \
else \
- __asm __volatile (LOCK "xaddq %q0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddq %q0, %1" \
: "=r" (result), "=m" (*mem) \
: "0" ((long) (value)), "m" (*mem)); \
result; })
else if (__builtin_constant_p (value) && (value) == 1) \
atomic_decrement (mem); \
else if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "addb %b1, %0" \
+ __asm __volatile (LOCK_PREFIX "addb %b1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "addw %w1, %0" \
+ __asm __volatile (LOCK_PREFIX "addw %w1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "addl %1, %0" \
+ __asm __volatile (LOCK_PREFIX "addl %1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else \
- __asm __volatile (LOCK "addq %q1, %0" \
+ __asm __volatile (LOCK_PREFIX "addq %q1, %0" \
: "=m" (*mem) \
: "ir" ((long) (value)), "m" (*mem)); \
})
#define atomic_add_negative(mem, value) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "addb %b2, %0; sets %1" \
+ __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "addw %w2, %0; sets %1" \
+ __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "addl %2, %0; sets %1" \
+ __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else \
- __asm __volatile (LOCK "addq %q2, %0; sets %1" \
+ __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" ((long) (value)), "m" (*mem)); \
__result; })
#define atomic_add_zero(mem, value) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "addb %b2, %0; setz %1" \
+ __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "addw %w2, %0; setz %1" \
+ __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "addl %2, %0; setz %1" \
+ __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" (value), "m" (*mem)); \
else \
- __asm __volatile (LOCK "addq %q2, %0; setz %1" \
+ __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
: "ir" ((long) (value)), "m" (*mem)); \
__result; })
#define atomic_increment(mem) \
(void) ({ if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "incb %b0" \
+ __asm __volatile (LOCK_PREFIX "incb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "incw %w0" \
+ __asm __volatile (LOCK_PREFIX "incw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "incl %0" \
+ __asm __volatile (LOCK_PREFIX "incl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
- __asm __volatile (LOCK "incq %q0" \
+ __asm __volatile (LOCK_PREFIX "incq %q0" \
: "=m" (*mem) \
: "m" (*mem)); \
})
#define atomic_increment_and_test(mem) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "incb %b0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "incw %w0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "incl %0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else \
- __asm __volatile (LOCK "incq %q0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
__result; })
#define atomic_decrement(mem) \
(void) ({ if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "decb %b0" \
+ __asm __volatile (LOCK_PREFIX "decb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "decw %w0" \
+ __asm __volatile (LOCK_PREFIX "decw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "decl %0" \
+ __asm __volatile (LOCK_PREFIX "decl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
- __asm __volatile (LOCK "decq %q0" \
+ __asm __volatile (LOCK_PREFIX "decq %q0" \
: "=m" (*mem) \
: "m" (*mem)); \
})
#define atomic_decrement_and_test(mem) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "decb %b0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "decw %w0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "decl %0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
else \
- __asm __volatile (LOCK "decq %q0; sete %1" \
+ __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
: "=m" (*mem), "=qm" (__result) \
: "m" (*mem)); \
__result; })
#define atomic_bit_set(mem, bit) \
(void) ({ if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "orb %b2, %0" \
+ __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "orw %w2, %0" \
+ __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "orl %2, %0" \
+ __asm __volatile (LOCK_PREFIX "orl %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (__builtin_constant_p (bit) && (bit) < 32) \
- __asm __volatile (LOCK "orq %2, %0" \
+ __asm __volatile (LOCK_PREFIX "orq %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "i" (1L << (bit))); \
else \
- __asm __volatile (LOCK "orq %q2, %0" \
+ __asm __volatile (LOCK_PREFIX "orq %q2, %0" \
: "=m" (*mem) \
: "m" (*mem), "r" (1UL << (bit))); \
})
#define atomic_bit_test_set(mem, bit) \
({ unsigned char __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK "btsb %3, %1; setc %0" \
+ __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
: "=q" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK "btsw %3, %1; setc %0" \
+ __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
: "=q" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK "btsl %3, %1; setc %0" \
+ __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
: "=q" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else \
- __asm __volatile (LOCK "btsq %3, %1; setc %0" \
+ __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
: "=q" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
__result; })