]> sourceware.org Git - glibc.git/blame - ports/sysdeps/unix/sysv/linux/mips/nptl/lowlevellock.h
Get rid of unused __swblk_t type.
[glibc.git] / ports / sysdeps / unix / sysv / linux / mips / nptl / lowlevellock.h
CommitLineData
309becf1 1/* Copyright (C) 2003-2012 Free Software Foundation, Inc.
2568b674
AJ
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
ab84e3ff
PE
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
2568b674
AJ
17
18#ifndef _LOWLEVELLOCK_H
19#define _LOWLEVELLOCK_H 1
20
21#include <time.h>
22#include <sys/param.h>
23#include <bits/pthreadtypes.h>
24#include <atomic.h>
25#include <sysdep.h>
8c276674 26#include <kernel-features.h>
2568b674
AJ
27
28#define FUTEX_WAIT 0
29#define FUTEX_WAKE 1
30#define FUTEX_REQUEUE 3
31#define FUTEX_CMP_REQUEUE 4
13d7881a
DJ
32#define FUTEX_WAKE_OP 5
33#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
0ad4d3b0
DJ
34#define FUTEX_LOCK_PI 6
35#define FUTEX_UNLOCK_PI 7
36#define FUTEX_TRYLOCK_PI 8
9a9863b4
JM
37#define FUTEX_WAIT_BITSET 9
38#define FUTEX_WAKE_BITSET 10
30efab51 39#define FUTEX_PRIVATE_FLAG 128
bb3b3056
JM
40#define FUTEX_CLOCK_REALTIME 256
41
42#define FUTEX_BITSET_MATCH_ANY 0xffffffff
2568b674 43
8c276674
DJ
44/* Values for 'private' parameter of locking macros. Yes, the
45 definition seems to be backwards. But it is not. The bit will be
46 reversed before passing to the system call. */
47#define LLL_PRIVATE 0
48#define LLL_SHARED FUTEX_PRIVATE_FLAG
49
50
51#if !defined NOT_IN_libc || defined IS_IN_rtld
52/* In libc.so or ld.so all futexes are private. */
53# ifdef __ASSUME_PRIVATE_FUTEX
54# define __lll_private_flag(fl, private) \
55 ((fl) | FUTEX_PRIVATE_FLAG)
56# else
57# define __lll_private_flag(fl, private) \
58 ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
59# endif
60#else
61# ifdef __ASSUME_PRIVATE_FUTEX
62# define __lll_private_flag(fl, private) \
63 (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
64# else
65# define __lll_private_flag(fl, private) \
66 (__builtin_constant_p (private) \
67 ? ((private) == 0 \
68 ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
69 : (fl)) \
70 : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
71 & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
72# endif
73#endif
74
75
76#define lll_futex_wait(futexp, val, private) \
77 lll_futex_timed_wait(futexp, val, NULL, private)
78
79#define lll_futex_timed_wait(futexp, val, timespec, private) \
2568b674
AJ
80 ({ \
81 INTERNAL_SYSCALL_DECL (__err); \
82 long int __ret; \
8c276674
DJ
83 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
84 __lll_private_flag (FUTEX_WAIT, private), \
85 (val), (timespec)); \
2568b674
AJ
86 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
87 })
88
8c276674 89#define lll_futex_wake(futexp, nr, private) \
2568b674
AJ
90 ({ \
91 INTERNAL_SYSCALL_DECL (__err); \
92 long int __ret; \
8c276674
DJ
93 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
94 __lll_private_flag (FUTEX_WAKE, private), \
95 (nr), 0); \
2568b674
AJ
96 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
97 })
98
8c276674 99#define lll_robust_dead(futexv, private) \
13d7881a
DJ
100 do \
101 { \
102 int *__futexp = &(futexv); \
103 atomic_or (__futexp, FUTEX_OWNER_DIED); \
8c276674 104 lll_futex_wake (__futexp, 1, private); \
13d7881a
DJ
105 } \
106 while (0)
107
2568b674 108/* Returns non-zero if error happened, zero if success. */
8c276674 109#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
2568b674
AJ
110 ({ \
111 INTERNAL_SYSCALL_DECL (__err); \
112 long int __ret; \
8c276674
DJ
113 __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp), \
114 __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
115 (nr_wake), (nr_move), (mutex), (val)); \
2568b674
AJ
116 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
117 })
118
13d7881a 119/* Returns non-zero if error happened, zero if success. */
8c276674 120#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
13d7881a
DJ
121 ({ \
122 INTERNAL_SYSCALL_DECL (__err); \
123 long int __ret; \
124 \
8c276674
DJ
125 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
126 __lll_private_flag (FUTEX_WAKE_OP, private), \
127 (nr_wake), (nr_wake2), (futexp2), \
13d7881a
DJ
128 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
129 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
130 })
2568b674
AJ
131
132static inline int __attribute__((always_inline))
8c276674 133__lll_trylock(int *futex)
2568b674
AJ
134{
135 return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
136}
8c276674 137#define lll_trylock(lock) __lll_trylock (&(lock))
2568b674
AJ
138
139
140static inline int __attribute__((always_inline))
8c276674 141__lll_cond_trylock(int *futex)
2568b674
AJ
142{
143 return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
144}
8c276674 145#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
2568b674
AJ
146
147
13d7881a 148static inline int __attribute__((always_inline))
8c276674 149__lll_robust_trylock(int *futex, int id)
13d7881a
DJ
150{
151 return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
152}
8c276674
DJ
153#define lll_robust_trylock(lock, id) \
154 __lll_robust_trylock (&(lock), id)
155
156extern void __lll_lock_wait_private (int *futex) attribute_hidden;
157extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
158extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
159
160#define __lll_lock(futex, private) \
161 ((void) ({ \
162 int *__futex = (futex); \
163 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
164 1, 0), 0)) \
165 { \
166 if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
167 __lll_lock_wait_private (__futex); \
168 else \
169 __lll_lock_wait (__futex, private); \
170 } \
171 }))
172#define lll_lock(futex, private) __lll_lock (&(futex), private)
173
174
175#define __lll_robust_lock(futex, id, private) \
176 ({ \
177 int *__futex = (futex); \
178 int __val = 0; \
179 \
180 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
181 0), 0)) \
182 __val = __lll_robust_lock_wait (__futex, private); \
183 __val; \
184 })
185#define lll_robust_lock(futex, id, private) \
186 __lll_robust_lock (&(futex), id, private)
13d7881a
DJ
187
188
2568b674 189static inline void __attribute__ ((always_inline))
8c276674 190__lll_cond_lock (int *futex, int private)
2568b674
AJ
191{
192 if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
8c276674 193 __lll_lock_wait (futex, private);
2568b674 194}
8c276674 195#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
2568b674
AJ
196
197
8c276674
DJ
198#define lll_robust_cond_lock(futex, id, private) \
199 __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
13d7881a
DJ
200
201
8c276674
DJ
202extern int __lll_timedlock_wait (int *futex, const struct timespec *,
203 int private) attribute_hidden;
204extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
205 int private) attribute_hidden;
2568b674
AJ
206
207static inline int __attribute__ ((always_inline))
8c276674 208__lll_timedlock (int *futex, const struct timespec *abstime, int private)
2568b674
AJ
209{
210 int result = 0;
211 if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
8c276674 212 result = __lll_timedlock_wait (futex, abstime, private);
2568b674
AJ
213 return result;
214}
8c276674
DJ
215#define lll_timedlock(futex, abstime, private) \
216 __lll_timedlock (&(futex), abstime, private)
2568b674
AJ
217
218
13d7881a 219static inline int __attribute__ ((always_inline))
8c276674
DJ
220__lll_robust_timedlock (int *futex, const struct timespec *abstime,
221 int id, int private)
13d7881a
DJ
222{
223 int result = 0;
224 if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
8c276674 225 result = __lll_robust_timedlock_wait (futex, abstime, private);
13d7881a
DJ
226 return result;
227}
8c276674
DJ
228#define lll_robust_timedlock(futex, abstime, id, private) \
229 __lll_robust_timedlock (&(futex), abstime, id, private)
13d7881a
DJ
230
231
8c276674
DJ
232#define __lll_unlock(futex, private) \
233 ((void) ({ \
234 int *__futex = (futex); \
235 int __val = atomic_exchange_rel (__futex, 0); \
236 \
237 if (__builtin_expect (__val > 1, 0)) \
238 lll_futex_wake (__futex, 1, private); \
239 }))
240#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
13d7881a
DJ
241
242
8c276674
DJ
243#define __lll_robust_unlock(futex, private) \
244 ((void) ({ \
245 int *__futex = (futex); \
246 int __val = atomic_exchange_rel (__futex, 0); \
247 \
248 if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
249 lll_futex_wake (__futex, 1, private); \
250 }))
251#define lll_robust_unlock(futex, private) \
252 __lll_robust_unlock(&(futex), private)
2568b674
AJ
253
254
8c276674 255#define lll_islocked(futex) \
2568b674
AJ
256 (futex != 0)
257
258
259/* Our internal lock implementation is identical to the binary-compatible
260 mutex implementation. */
261
2568b674
AJ
262/* Initializers for lock. */
263#define LLL_LOCK_INITIALIZER (0)
264#define LLL_LOCK_INITIALIZER_LOCKED (1)
265
2568b674
AJ
266/* The states of a lock are:
267 0 - untaken
268 1 - taken by one user
269 >1 - taken by more users */
270
2568b674
AJ
271/* The kernel notifies a process which uses CLONE_CLEARTID via futex
272 wakeup when the clone terminates. The memory location contains the
273 thread ID while the clone is running and is reset to zero
274 afterwards. */
275#define lll_wait_tid(tid) \
8c276674
DJ
276 do { \
277 __typeof (tid) __tid; \
278 while ((__tid = (tid)) != 0) \
279 lll_futex_wait (&(tid), __tid, LLL_SHARED); \
2568b674
AJ
280 } while (0)
281
282extern int __lll_timedwait_tid (int *, const struct timespec *)
283 attribute_hidden;
284
285#define lll_timedwait_tid(tid, abstime) \
286 ({ \
287 int __res = 0; \
288 if ((tid) != 0) \
289 __res = __lll_timedwait_tid (&(tid), (abstime)); \
290 __res; \
291 })
292
309becf1
MK
293/* Implement __libc_lock_lock using exchange_and_add, which expands into
294 a single instruction on XLP processors. We enable this for all MIPS
295 processors as atomic_exchange_and_add_acq and
296 atomic_compare_and_exchange_acq take the same time to execute.
297 This is a simplified expansion of ({ lll_lock (NAME, LLL_PRIVATE); 0; }).
298
299 Note: __lll_lock_wait_private() resets lock value to '2', which prevents
300 unbounded increase of the lock value and [with billions of threads]
301 overflow. */
302#define __libc_lock_lock(NAME) \
303 ({ \
304 int *__futex = &(NAME); \
305 if (__builtin_expect (atomic_exchange_and_add_acq (__futex, 1), 0)) \
306 __lll_lock_wait_private (__futex); \
307 0; \
308 })
309
310#ifdef _MIPS_ARCH_XLP
311/* The generic version using a single atomic_compare_and_exchange_acq takes
312 less time for non-XLP processors, so we use below for XLP only. */
313# define __libc_lock_trylock(NAME) \
314 ({ \
315 int *__futex = &(NAME); \
316 int __result = atomic_exchange_and_add_acq (__futex, 1); \
317 /* If __result == 0, we succeeded in acquiring the lock. \
318 If __result == 1, we switched the lock to 'contended' state, which \
319 will cause a [possibly unnecessary] call to lll_futex_wait. This is \
320 unlikely, so we accept the possible inefficiency. \
321 If __result >= 2, we need to set the lock to 'contended' state to avoid \
322 unbounded increase from subsequent trylocks. */ \
323 if (__result >= 2) \
324 __result = atomic_exchange_acq (__futex, 2); \
325 __result; \
326 })
327#endif
328
2568b674 329#endif /* lowlevellock.h */
This page took 0.109886 seconds and 5 git commands to generate.