]> sourceware.org Git - glibc.git/blame - sysdeps/unix/sysv/linux/mips/lowlevellock.h
MIPS: Consolidate nptl/ subdirectories under linux/...
[glibc.git] / sysdeps / unix / sysv / linux / mips / lowlevellock.h
CommitLineData
d4697bc9 1/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
2568b674
AJ
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
ab84e3ff
PE
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
2568b674
AJ
17
18#ifndef _LOWLEVELLOCK_H
19#define _LOWLEVELLOCK_H 1
20
21#include <time.h>
22#include <sys/param.h>
23#include <bits/pthreadtypes.h>
24#include <atomic.h>
25#include <sysdep.h>
8c276674 26#include <kernel-features.h>
2568b674
AJ
27
28#define FUTEX_WAIT 0
29#define FUTEX_WAKE 1
30#define FUTEX_REQUEUE 3
31#define FUTEX_CMP_REQUEUE 4
13d7881a
DJ
32#define FUTEX_WAKE_OP 5
33#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
0ad4d3b0
DJ
34#define FUTEX_LOCK_PI 6
35#define FUTEX_UNLOCK_PI 7
36#define FUTEX_TRYLOCK_PI 8
9a9863b4
JM
37#define FUTEX_WAIT_BITSET 9
38#define FUTEX_WAKE_BITSET 10
92363eb8
JM
39#define FUTEX_WAIT_REQUEUE_PI 11
40#define FUTEX_CMP_REQUEUE_PI 12
30efab51 41#define FUTEX_PRIVATE_FLAG 128
bb3b3056
JM
42#define FUTEX_CLOCK_REALTIME 256
43
44#define FUTEX_BITSET_MATCH_ANY 0xffffffff
2568b674 45
8c276674
DJ
46/* Values for 'private' parameter of locking macros. Yes, the
47 definition seems to be backwards. But it is not. The bit will be
48 reversed before passing to the system call. */
49#define LLL_PRIVATE 0
50#define LLL_SHARED FUTEX_PRIVATE_FLAG
51
52
53#if !defined NOT_IN_libc || defined IS_IN_rtld
54/* In libc.so or ld.so all futexes are private. */
55# ifdef __ASSUME_PRIVATE_FUTEX
56# define __lll_private_flag(fl, private) \
57 ((fl) | FUTEX_PRIVATE_FLAG)
58# else
59# define __lll_private_flag(fl, private) \
60 ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
61# endif
62#else
63# ifdef __ASSUME_PRIVATE_FUTEX
64# define __lll_private_flag(fl, private) \
65 (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
66# else
67# define __lll_private_flag(fl, private) \
68 (__builtin_constant_p (private) \
69 ? ((private) == 0 \
70 ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
71 : (fl)) \
72 : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
73 & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
5556231d 74# endif
8c276674
DJ
75#endif
76
77
78#define lll_futex_wait(futexp, val, private) \
79 lll_futex_timed_wait(futexp, val, NULL, private)
80
81#define lll_futex_timed_wait(futexp, val, timespec, private) \
2568b674
AJ
82 ({ \
83 INTERNAL_SYSCALL_DECL (__err); \
84 long int __ret; \
8c276674
DJ
85 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
86 __lll_private_flag (FUTEX_WAIT, private), \
87 (val), (timespec)); \
2568b674
AJ
88 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
89 })
90
d7fcee3a
JM
91#define lll_futex_timed_wait_bitset(futexp, val, timespec, clockbit, private) \
92 ({ \
93 INTERNAL_SYSCALL_DECL (__err); \
94 long int __ret; \
95 int __op = FUTEX_WAIT_BITSET | clockbit; \
96 __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp), \
97 __lll_private_flag (__op, private), \
98 (val), (timespec), NULL /* Unused. */, \
99 FUTEX_BITSET_MATCH_ANY); \
100 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
101 })
102
8c276674 103#define lll_futex_wake(futexp, nr, private) \
2568b674
AJ
104 ({ \
105 INTERNAL_SYSCALL_DECL (__err); \
106 long int __ret; \
8c276674
DJ
107 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
108 __lll_private_flag (FUTEX_WAKE, private), \
109 (nr), 0); \
2568b674
AJ
110 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
111 })
112
8c276674 113#define lll_robust_dead(futexv, private) \
13d7881a
DJ
114 do \
115 { \
116 int *__futexp = &(futexv); \
117 atomic_or (__futexp, FUTEX_OWNER_DIED); \
8c276674 118 lll_futex_wake (__futexp, 1, private); \
13d7881a
DJ
119 } \
120 while (0)
121
2568b674 122/* Returns non-zero if error happened, zero if success. */
8c276674 123#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
2568b674
AJ
124 ({ \
125 INTERNAL_SYSCALL_DECL (__err); \
126 long int __ret; \
8c276674
DJ
127 __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp), \
128 __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
129 (nr_wake), (nr_move), (mutex), (val)); \
2568b674
AJ
130 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
131 })
132
13d7881a 133/* Returns non-zero if error happened, zero if success. */
8c276674 134#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
13d7881a
DJ
135 ({ \
136 INTERNAL_SYSCALL_DECL (__err); \
137 long int __ret; \
138 \
8c276674
DJ
139 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
140 __lll_private_flag (FUTEX_WAKE_OP, private), \
141 (nr_wake), (nr_wake2), (futexp2), \
13d7881a
DJ
142 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
143 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
144 })
2568b674 145
92363eb8
JM
146/* Priority Inheritance support. */
147#define lll_futex_wait_requeue_pi(futexp, val, mutex, private) \
148 lll_futex_timed_wait_requeue_pi (futexp, val, NULL, 0, mutex, private)
149
150#define lll_futex_timed_wait_requeue_pi(futexp, val, timespec, clockbit, \
151 mutex, private) \
152 ({ \
153 INTERNAL_SYSCALL_DECL (__err); \
154 long int __ret; \
155 int __op = FUTEX_WAIT_REQUEUE_PI | clockbit; \
156 \
157 __ret = INTERNAL_SYSCALL (futex, __err, 5, (futexp), \
158 __lll_private_flag (__op, private), \
159 (val), (timespec), mutex); \
160 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
161 })
162
163#define lll_futex_cmp_requeue_pi(futexp, nr_wake, nr_move, mutex, val, priv) \
164 ({ \
165 INTERNAL_SYSCALL_DECL (__err); \
166 long int __ret; \
167 \
168 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
169 __lll_private_flag (FUTEX_CMP_REQUEUE_PI, priv),\
170 (nr_wake), (nr_move), (mutex), (val)); \
171 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
172 })
173
2568b674 174static inline int __attribute__((always_inline))
8c276674 175__lll_trylock(int *futex)
2568b674
AJ
176{
177 return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
178}
8c276674 179#define lll_trylock(lock) __lll_trylock (&(lock))
2568b674
AJ
180
181
182static inline int __attribute__((always_inline))
8c276674 183__lll_cond_trylock(int *futex)
2568b674
AJ
184{
185 return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
186}
8c276674 187#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
2568b674
AJ
188
189
13d7881a 190static inline int __attribute__((always_inline))
8c276674 191__lll_robust_trylock(int *futex, int id)
13d7881a
DJ
192{
193 return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
194}
8c276674
DJ
195#define lll_robust_trylock(lock, id) \
196 __lll_robust_trylock (&(lock), id)
197
198extern void __lll_lock_wait_private (int *futex) attribute_hidden;
199extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
200extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
201
202#define __lll_lock(futex, private) \
203 ((void) ({ \
204 int *__futex = (futex); \
205 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
206 1, 0), 0)) \
207 { \
208 if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
209 __lll_lock_wait_private (__futex); \
210 else \
211 __lll_lock_wait (__futex, private); \
212 } \
213 }))
214#define lll_lock(futex, private) __lll_lock (&(futex), private)
215
216
217#define __lll_robust_lock(futex, id, private) \
218 ({ \
219 int *__futex = (futex); \
220 int __val = 0; \
221 \
222 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
223 0), 0)) \
224 __val = __lll_robust_lock_wait (__futex, private); \
225 __val; \
226 })
227#define lll_robust_lock(futex, id, private) \
228 __lll_robust_lock (&(futex), id, private)
13d7881a
DJ
229
230
2568b674 231static inline void __attribute__ ((always_inline))
8c276674 232__lll_cond_lock (int *futex, int private)
2568b674
AJ
233{
234 if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
8c276674 235 __lll_lock_wait (futex, private);
2568b674 236}
8c276674 237#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
2568b674
AJ
238
239
8c276674
DJ
240#define lll_robust_cond_lock(futex, id, private) \
241 __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
13d7881a
DJ
242
243
8c276674
DJ
244extern int __lll_timedlock_wait (int *futex, const struct timespec *,
245 int private) attribute_hidden;
246extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
247 int private) attribute_hidden;
2568b674
AJ
248
249static inline int __attribute__ ((always_inline))
8c276674 250__lll_timedlock (int *futex, const struct timespec *abstime, int private)
2568b674
AJ
251{
252 int result = 0;
253 if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
8c276674 254 result = __lll_timedlock_wait (futex, abstime, private);
2568b674
AJ
255 return result;
256}
8c276674
DJ
257#define lll_timedlock(futex, abstime, private) \
258 __lll_timedlock (&(futex), abstime, private)
2568b674
AJ
259
260
13d7881a 261static inline int __attribute__ ((always_inline))
8c276674
DJ
262__lll_robust_timedlock (int *futex, const struct timespec *abstime,
263 int id, int private)
13d7881a
DJ
264{
265 int result = 0;
266 if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
8c276674 267 result = __lll_robust_timedlock_wait (futex, abstime, private);
13d7881a
DJ
268 return result;
269}
8c276674
DJ
270#define lll_robust_timedlock(futex, abstime, id, private) \
271 __lll_robust_timedlock (&(futex), abstime, id, private)
13d7881a
DJ
272
273
8c276674
DJ
274#define __lll_unlock(futex, private) \
275 ((void) ({ \
276 int *__futex = (futex); \
277 int __val = atomic_exchange_rel (__futex, 0); \
278 \
279 if (__builtin_expect (__val > 1, 0)) \
280 lll_futex_wake (__futex, 1, private); \
281 }))
282#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
13d7881a
DJ
283
284
8c276674
DJ
285#define __lll_robust_unlock(futex, private) \
286 ((void) ({ \
287 int *__futex = (futex); \
288 int __val = atomic_exchange_rel (__futex, 0); \
289 \
290 if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
291 lll_futex_wake (__futex, 1, private); \
292 }))
293#define lll_robust_unlock(futex, private) \
294 __lll_robust_unlock(&(futex), private)
2568b674
AJ
295
296
8c276674 297#define lll_islocked(futex) \
2568b674
AJ
298 (futex != 0)
299
300
301/* Our internal lock implementation is identical to the binary-compatible
302 mutex implementation. */
303
2568b674
AJ
304/* Initializers for lock. */
305#define LLL_LOCK_INITIALIZER (0)
306#define LLL_LOCK_INITIALIZER_LOCKED (1)
307
2568b674
AJ
308/* The states of a lock are:
309 0 - untaken
310 1 - taken by one user
311 >1 - taken by more users */
312
adcdc775 313/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
2568b674
AJ
314 wakeup when the clone terminates. The memory location contains the
315 thread ID while the clone is running and is reset to zero
316 afterwards. */
317#define lll_wait_tid(tid) \
8c276674
DJ
318 do { \
319 __typeof (tid) __tid; \
320 while ((__tid = (tid)) != 0) \
321 lll_futex_wait (&(tid), __tid, LLL_SHARED); \
2568b674
AJ
322 } while (0)
323
324extern int __lll_timedwait_tid (int *, const struct timespec *)
325 attribute_hidden;
326
327#define lll_timedwait_tid(tid, abstime) \
328 ({ \
329 int __res = 0; \
330 if ((tid) != 0) \
331 __res = __lll_timedwait_tid (&(tid), (abstime)); \
332 __res; \
333 })
334
309becf1
MK
335/* Implement __libc_lock_lock using exchange_and_add, which expands into
336 a single instruction on XLP processors. We enable this for all MIPS
337 processors as atomic_exchange_and_add_acq and
338 atomic_compare_and_exchange_acq take the same time to execute.
339 This is a simplified expansion of ({ lll_lock (NAME, LLL_PRIVATE); 0; }).
340
341 Note: __lll_lock_wait_private() resets lock value to '2', which prevents
342 unbounded increase of the lock value and [with billions of threads]
343 overflow. */
344#define __libc_lock_lock(NAME) \
345 ({ \
346 int *__futex = &(NAME); \
347 if (__builtin_expect (atomic_exchange_and_add_acq (__futex, 1), 0)) \
348 __lll_lock_wait_private (__futex); \
349 0; \
350 })
351
352#ifdef _MIPS_ARCH_XLP
353/* The generic version using a single atomic_compare_and_exchange_acq takes
354 less time for non-XLP processors, so we use below for XLP only. */
355# define __libc_lock_trylock(NAME) \
356 ({ \
357 int *__futex = &(NAME); \
358 int __result = atomic_exchange_and_add_acq (__futex, 1); \
359 /* If __result == 0, we succeeded in acquiring the lock. \
360 If __result == 1, we switched the lock to 'contended' state, which \
361 will cause a [possibly unnecessary] call to lll_futex_wait. This is \
362 unlikely, so we accept the possible inefficiency. \
363 If __result >= 2, we need to set the lock to 'contended' state to avoid \
364 unbounded increase from subsequent trylocks. */ \
365 if (__result >= 2) \
366 __result = atomic_exchange_acq (__futex, 2); \
367 __result; \
368 })
369#endif
370
2568b674 371#endif /* lowlevellock.h */
This page took 0.183972 seconds and 5 git commands to generate.