]> sourceware.org Git - glibc.git/blob - sysdeps/x86_64/bits/atomic.h
Remove oldish __GNUC_PREREQ.
[glibc.git] / sysdeps / x86_64 / bits / atomic.h
1 /* Copyright (C) 2002-2012 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <stdint.h>
20 #include <tls.h> /* For tcbhead_t. */
21
22
23 typedef int8_t atomic8_t;
24 typedef uint8_t uatomic8_t;
25 typedef int_fast8_t atomic_fast8_t;
26 typedef uint_fast8_t uatomic_fast8_t;
27
28 typedef int16_t atomic16_t;
29 typedef uint16_t uatomic16_t;
30 typedef int_fast16_t atomic_fast16_t;
31 typedef uint_fast16_t uatomic_fast16_t;
32
33 typedef int32_t atomic32_t;
34 typedef uint32_t uatomic32_t;
35 typedef int_fast32_t atomic_fast32_t;
36 typedef uint_fast32_t uatomic_fast32_t;
37
38 typedef int64_t atomic64_t;
39 typedef uint64_t uatomic64_t;
40 typedef int_fast64_t atomic_fast64_t;
41 typedef uint_fast64_t uatomic_fast64_t;
42
43 typedef intptr_t atomicptr_t;
44 typedef uintptr_t uatomicptr_t;
45 typedef intmax_t atomic_max_t;
46 typedef uintmax_t uatomic_max_t;
47
48
49 #ifndef LOCK_PREFIX
50 # ifdef UP
51 # define LOCK_PREFIX /* nothing */
52 # else
53 # define LOCK_PREFIX "lock;"
54 # endif
55 #endif
56
57
58 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
59 __sync_val_compare_and_swap (mem, oldval, newval)
60 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
61 (! __sync_bool_compare_and_swap (mem, oldval, newval))
62
63
64 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
65 ({ __typeof (*mem) ret; \
66 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
67 "je 0f\n\t" \
68 "lock\n" \
69 "0:\tcmpxchgb %b2, %1" \
70 : "=a" (ret), "=m" (*mem) \
71 : "q" (newval), "m" (*mem), "0" (oldval), \
72 "i" (offsetof (tcbhead_t, multiple_threads))); \
73 ret; })
74
75 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
76 ({ __typeof (*mem) ret; \
77 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
78 "je 0f\n\t" \
79 "lock\n" \
80 "0:\tcmpxchgw %w2, %1" \
81 : "=a" (ret), "=m" (*mem) \
82 : "q" (newval), "m" (*mem), "0" (oldval), \
83 "i" (offsetof (tcbhead_t, multiple_threads))); \
84 ret; })
85
86 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
87 ({ __typeof (*mem) ret; \
88 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
89 "je 0f\n\t" \
90 "lock\n" \
91 "0:\tcmpxchgl %2, %1" \
92 : "=a" (ret), "=m" (*mem) \
93 : "q" (newval), "m" (*mem), "0" (oldval), \
94 "i" (offsetof (tcbhead_t, multiple_threads))); \
95 ret; })
96
97 #define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
98 ({ __typeof (*mem) ret; \
99 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
100 "je 0f\n\t" \
101 "lock\n" \
102 "0:\tcmpxchgq %q2, %1" \
103 : "=a" (ret), "=m" (*mem) \
104 : "q" ((long int) (newval)), "m" (*mem), \
105 "0" ((long int)oldval), \
106 "i" (offsetof (tcbhead_t, multiple_threads))); \
107 ret; })
108
109
110 /* Note that we need no lock prefix. */
111 #define atomic_exchange_acq(mem, newvalue) \
112 ({ __typeof (*mem) result; \
113 if (sizeof (*mem) == 1) \
114 __asm __volatile ("xchgb %b0, %1" \
115 : "=q" (result), "=m" (*mem) \
116 : "0" (newvalue), "m" (*mem)); \
117 else if (sizeof (*mem) == 2) \
118 __asm __volatile ("xchgw %w0, %1" \
119 : "=r" (result), "=m" (*mem) \
120 : "0" (newvalue), "m" (*mem)); \
121 else if (sizeof (*mem) == 4) \
122 __asm __volatile ("xchgl %0, %1" \
123 : "=r" (result), "=m" (*mem) \
124 : "0" (newvalue), "m" (*mem)); \
125 else \
126 __asm __volatile ("xchgq %q0, %1" \
127 : "=r" (result), "=m" (*mem) \
128 : "0" ((long) (newvalue)), "m" (*mem)); \
129 result; })
130
131
132 #define __arch_exchange_and_add_body(lock, mem, value) \
133 ({ __typeof (*mem) result; \
134 if (sizeof (*mem) == 1) \
135 __asm __volatile (lock "xaddb %b0, %1" \
136 : "=q" (result), "=m" (*mem) \
137 : "0" (value), "m" (*mem), \
138 "i" (offsetof (tcbhead_t, multiple_threads))); \
139 else if (sizeof (*mem) == 2) \
140 __asm __volatile (lock "xaddw %w0, %1" \
141 : "=r" (result), "=m" (*mem) \
142 : "0" (value), "m" (*mem), \
143 "i" (offsetof (tcbhead_t, multiple_threads))); \
144 else if (sizeof (*mem) == 4) \
145 __asm __volatile (lock "xaddl %0, %1" \
146 : "=r" (result), "=m" (*mem) \
147 : "0" (value), "m" (*mem), \
148 "i" (offsetof (tcbhead_t, multiple_threads))); \
149 else \
150 __asm __volatile (lock "xaddq %q0, %1" \
151 : "=r" (result), "=m" (*mem) \
152 : "0" ((long) (value)), "m" (*mem), \
153 "i" (offsetof (tcbhead_t, multiple_threads))); \
154 result; })
155
156 #define atomic_exchange_and_add(mem, value) \
157 __sync_fetch_and_add (mem, value)
158
159 #define __arch_exchange_and_add_cprefix \
160 "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
161
162 #define catomic_exchange_and_add(mem, value) \
163 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
164
165
166 #define __arch_add_body(lock, pfx, mem, value) \
167 do { \
168 if (__builtin_constant_p (value) && (value) == 1) \
169 pfx##_increment (mem); \
170 else if (__builtin_constant_p (value) && (value) == -1) \
171 pfx##_decrement (mem); \
172 else if (sizeof (*mem) == 1) \
173 __asm __volatile (lock "addb %b1, %0" \
174 : "=m" (*mem) \
175 : "iq" (value), "m" (*mem), \
176 "i" (offsetof (tcbhead_t, multiple_threads))); \
177 else if (sizeof (*mem) == 2) \
178 __asm __volatile (lock "addw %w1, %0" \
179 : "=m" (*mem) \
180 : "ir" (value), "m" (*mem), \
181 "i" (offsetof (tcbhead_t, multiple_threads))); \
182 else if (sizeof (*mem) == 4) \
183 __asm __volatile (lock "addl %1, %0" \
184 : "=m" (*mem) \
185 : "ir" (value), "m" (*mem), \
186 "i" (offsetof (tcbhead_t, multiple_threads))); \
187 else \
188 __asm __volatile (lock "addq %q1, %0" \
189 : "=m" (*mem) \
190 : "ir" ((long) (value)), "m" (*mem), \
191 "i" (offsetof (tcbhead_t, multiple_threads))); \
192 } while (0)
193
194 #define atomic_add(mem, value) \
195 __arch_add_body (LOCK_PREFIX, atomic, mem, value)
196
197 #define __arch_add_cprefix \
198 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
199
200 #define catomic_add(mem, value) \
201 __arch_add_body (__arch_add_cprefix, catomic, mem, value)
202
203
204 #define atomic_add_negative(mem, value) \
205 ({ unsigned char __result; \
206 if (sizeof (*mem) == 1) \
207 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
208 : "=m" (*mem), "=qm" (__result) \
209 : "iq" (value), "m" (*mem)); \
210 else if (sizeof (*mem) == 2) \
211 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
212 : "=m" (*mem), "=qm" (__result) \
213 : "ir" (value), "m" (*mem)); \
214 else if (sizeof (*mem) == 4) \
215 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
216 : "=m" (*mem), "=qm" (__result) \
217 : "ir" (value), "m" (*mem)); \
218 else \
219 __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
220 : "=m" (*mem), "=qm" (__result) \
221 : "ir" ((long) (value)), "m" (*mem)); \
222 __result; })
223
224
225 #define atomic_add_zero(mem, value) \
226 ({ unsigned char __result; \
227 if (sizeof (*mem) == 1) \
228 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
229 : "=m" (*mem), "=qm" (__result) \
230 : "iq" (value), "m" (*mem)); \
231 else if (sizeof (*mem) == 2) \
232 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
233 : "=m" (*mem), "=qm" (__result) \
234 : "ir" (value), "m" (*mem)); \
235 else if (sizeof (*mem) == 4) \
236 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
237 : "=m" (*mem), "=qm" (__result) \
238 : "ir" (value), "m" (*mem)); \
239 else \
240 __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
241 : "=m" (*mem), "=qm" (__result) \
242 : "ir" ((long) (value)), "m" (*mem)); \
243 __result; })
244
245
246 #define __arch_increment_body(lock, mem) \
247 do { \
248 if (sizeof (*mem) == 1) \
249 __asm __volatile (lock "incb %b0" \
250 : "=m" (*mem) \
251 : "m" (*mem), \
252 "i" (offsetof (tcbhead_t, multiple_threads))); \
253 else if (sizeof (*mem) == 2) \
254 __asm __volatile (lock "incw %w0" \
255 : "=m" (*mem) \
256 : "m" (*mem), \
257 "i" (offsetof (tcbhead_t, multiple_threads))); \
258 else if (sizeof (*mem) == 4) \
259 __asm __volatile (lock "incl %0" \
260 : "=m" (*mem) \
261 : "m" (*mem), \
262 "i" (offsetof (tcbhead_t, multiple_threads))); \
263 else \
264 __asm __volatile (lock "incq %q0" \
265 : "=m" (*mem) \
266 : "m" (*mem), \
267 "i" (offsetof (tcbhead_t, multiple_threads))); \
268 } while (0)
269
270 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
271
272 #define __arch_increment_cprefix \
273 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
274
275 #define catomic_increment(mem) \
276 __arch_increment_body (__arch_increment_cprefix, mem)
277
278
279 #define atomic_increment_and_test(mem) \
280 ({ unsigned char __result; \
281 if (sizeof (*mem) == 1) \
282 __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \
283 : "=m" (*mem), "=qm" (__result) \
284 : "m" (*mem)); \
285 else if (sizeof (*mem) == 2) \
286 __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \
287 : "=m" (*mem), "=qm" (__result) \
288 : "m" (*mem)); \
289 else if (sizeof (*mem) == 4) \
290 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
291 : "=m" (*mem), "=qm" (__result) \
292 : "m" (*mem)); \
293 else \
294 __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
295 : "=m" (*mem), "=qm" (__result) \
296 : "m" (*mem)); \
297 __result; })
298
299
300 #define __arch_decrement_body(lock, mem) \
301 do { \
302 if (sizeof (*mem) == 1) \
303 __asm __volatile (lock "decb %b0" \
304 : "=m" (*mem) \
305 : "m" (*mem), \
306 "i" (offsetof (tcbhead_t, multiple_threads))); \
307 else if (sizeof (*mem) == 2) \
308 __asm __volatile (lock "decw %w0" \
309 : "=m" (*mem) \
310 : "m" (*mem), \
311 "i" (offsetof (tcbhead_t, multiple_threads))); \
312 else if (sizeof (*mem) == 4) \
313 __asm __volatile (lock "decl %0" \
314 : "=m" (*mem) \
315 : "m" (*mem), \
316 "i" (offsetof (tcbhead_t, multiple_threads))); \
317 else \
318 __asm __volatile (lock "decq %q0" \
319 : "=m" (*mem) \
320 : "m" (*mem), \
321 "i" (offsetof (tcbhead_t, multiple_threads))); \
322 } while (0)
323
324 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
325
326 #define __arch_decrement_cprefix \
327 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
328
329 #define catomic_decrement(mem) \
330 __arch_decrement_body (__arch_decrement_cprefix, mem)
331
332
333 #define atomic_decrement_and_test(mem) \
334 ({ unsigned char __result; \
335 if (sizeof (*mem) == 1) \
336 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
337 : "=m" (*mem), "=qm" (__result) \
338 : "m" (*mem)); \
339 else if (sizeof (*mem) == 2) \
340 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
341 : "=m" (*mem), "=qm" (__result) \
342 : "m" (*mem)); \
343 else if (sizeof (*mem) == 4) \
344 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
345 : "=m" (*mem), "=qm" (__result) \
346 : "m" (*mem)); \
347 else \
348 __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
349 : "=m" (*mem), "=qm" (__result) \
350 : "m" (*mem)); \
351 __result; })
352
353
354 #define atomic_bit_set(mem, bit) \
355 do { \
356 if (sizeof (*mem) == 1) \
357 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
358 : "=m" (*mem) \
359 : "m" (*mem), "iq" (1L << (bit))); \
360 else if (sizeof (*mem) == 2) \
361 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
362 : "=m" (*mem) \
363 : "m" (*mem), "ir" (1L << (bit))); \
364 else if (sizeof (*mem) == 4) \
365 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
366 : "=m" (*mem) \
367 : "m" (*mem), "ir" (1L << (bit))); \
368 else if (__builtin_constant_p (bit) && (bit) < 32) \
369 __asm __volatile (LOCK_PREFIX "orq %2, %0" \
370 : "=m" (*mem) \
371 : "m" (*mem), "i" (1L << (bit))); \
372 else \
373 __asm __volatile (LOCK_PREFIX "orq %q2, %0" \
374 : "=m" (*mem) \
375 : "m" (*mem), "r" (1UL << (bit))); \
376 } while (0)
377
378
379 #define atomic_bit_test_set(mem, bit) \
380 ({ unsigned char __result; \
381 if (sizeof (*mem) == 1) \
382 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
383 : "=q" (__result), "=m" (*mem) \
384 : "m" (*mem), "iq" (bit)); \
385 else if (sizeof (*mem) == 2) \
386 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
387 : "=q" (__result), "=m" (*mem) \
388 : "m" (*mem), "ir" (bit)); \
389 else if (sizeof (*mem) == 4) \
390 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
391 : "=q" (__result), "=m" (*mem) \
392 : "m" (*mem), "ir" (bit)); \
393 else \
394 __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
395 : "=q" (__result), "=m" (*mem) \
396 : "m" (*mem), "ir" (bit)); \
397 __result; })
398
399
400 #define atomic_delay() asm ("rep; nop")
401
402
403 #define __arch_and_body(lock, mem, mask) \
404 do { \
405 if (sizeof (*mem) == 1) \
406 __asm __volatile (lock "andb %b1, %0" \
407 : "=m" (*mem) \
408 : "iq" (mask), "m" (*mem), \
409 "i" (offsetof (tcbhead_t, multiple_threads))); \
410 else if (sizeof (*mem) == 2) \
411 __asm __volatile (lock "andw %w1, %0" \
412 : "=m" (*mem) \
413 : "ir" (mask), "m" (*mem), \
414 "i" (offsetof (tcbhead_t, multiple_threads))); \
415 else if (sizeof (*mem) == 4) \
416 __asm __volatile (lock "andl %1, %0" \
417 : "=m" (*mem) \
418 : "ir" (mask), "m" (*mem), \
419 "i" (offsetof (tcbhead_t, multiple_threads))); \
420 else \
421 __asm __volatile (lock "andq %q1, %0" \
422 : "=m" (*mem) \
423 : "ir" (mask), "m" (*mem), \
424 "i" (offsetof (tcbhead_t, multiple_threads))); \
425 } while (0)
426
427 #define __arch_cprefix \
428 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
429
430 #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
431
432 #define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
433
434
435 #define __arch_or_body(lock, mem, mask) \
436 do { \
437 if (sizeof (*mem) == 1) \
438 __asm __volatile (lock "orb %b1, %0" \
439 : "=m" (*mem) \
440 : "iq" (mask), "m" (*mem), \
441 "i" (offsetof (tcbhead_t, multiple_threads))); \
442 else if (sizeof (*mem) == 2) \
443 __asm __volatile (lock "orw %w1, %0" \
444 : "=m" (*mem) \
445 : "ir" (mask), "m" (*mem), \
446 "i" (offsetof (tcbhead_t, multiple_threads))); \
447 else if (sizeof (*mem) == 4) \
448 __asm __volatile (lock "orl %1, %0" \
449 : "=m" (*mem) \
450 : "ir" (mask), "m" (*mem), \
451 "i" (offsetof (tcbhead_t, multiple_threads))); \
452 else \
453 __asm __volatile (lock "orq %q1, %0" \
454 : "=m" (*mem) \
455 : "ir" (mask), "m" (*mem), \
456 "i" (offsetof (tcbhead_t, multiple_threads))); \
457 } while (0)
458
459 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
460
461 #define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)
This page took 0.059473 seconds and 5 git commands to generate.