]> sourceware.org Git - glibc.git/blame - linuxthreads/spinlock.c
Update.
[glibc.git] / linuxthreads / spinlock.c
CommitLineData
8619129f
UD
1/* Linuxthreads - a simple clone()-based implementation of Posix */
2/* threads for Linux. */
3/* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
4/* */
5/* This program is free software; you can redistribute it and/or */
6/* modify it under the terms of the GNU Library General Public License */
7/* as published by the Free Software Foundation; either version 2 */
8/* of the License, or (at your option) any later version. */
9/* */
10/* This program is distributed in the hope that it will be useful, */
11/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13/* GNU Library General Public License for more details. */
14
3387a425 15/* Internal locks */
8619129f 16
4959e310 17#include <errno.h>
8619129f
UD
18#include <sched.h>
19#include <time.h>
20#include "pthread.h"
21#include "internals.h"
22#include "spinlock.h"
3387a425 23#include "restart.h"
8619129f 24
d8d914df
UD
25/* The status field of a spinlock has the following meaning:
26 0: spinlock is free
27 1: spinlock is taken, no thread is waiting on it
28 ADDR: psinlock is taken, ADDR is address of thread descriptor for
3387a425 29 first waiting thread, other waiting threads are linked via
6a1db4ff 30 their p_nextlock field.
3387a425
UD
31 The waiting list is not sorted by priority order.
32 Actually, we always insert at top of list (sole insertion mode
33 that can be performed without locking).
34 For __pthread_unlock, we perform a linear search in the list
35 to find the highest-priority, oldest waiting thread.
36 This is safe because there are no concurrent __pthread_unlock
37 operations -- only the thread that locked the mutex can unlock it. */
38
d8d914df 39void internal_function __pthread_lock(pthread_spinlock_t * lock,
c5e340c7 40 pthread_descr self)
3387a425
UD
41{
42 long oldstatus, newstatus;
1d2fc9b3 43 int spurious_wakeup_count = 0;
3387a425
UD
44
45 do {
c70ca1fa 46 oldstatus = lock->__status;
3387a425
UD
47 if (oldstatus == 0) {
48 newstatus = 1;
49 } else {
c5e340c7
UD
50 if (self == NULL)
51 self = thread_self();
3387a425
UD
52 newstatus = (long) self;
53 }
6a1db4ff
UD
54 if (self != NULL) {
55 ASSERT(self->p_nextlock == NULL);
56 THREAD_SETMEM(self, p_nextlock, (pthread_descr) oldstatus);
de262537
UD
57 /* Make sure the store in p_nextlock completes before performing
58 the compare-and-swap */
a5a6f926 59 WRITE_MEMORY_BARRIER();
6a1db4ff 60 }
c70ca1fa
UD
61 } while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
62 &lock->__spinlock));
1d2fc9b3 63
d8d914df 64 /* Suspend with guard against spurious wakeup.
1d2fc9b3
UD
65 This can happen in pthread_cond_timedwait_relative, when the thread
66 wakes up due to timeout and is still on the condvar queue, and then
67 locks the queue to remove itself. At that point it may still be on the
68 queue, and may be resumed by a condition signal. */
69
70 if (oldstatus != 0) {
71 for (;;) {
72 suspend(self);
73 if (self->p_nextlock != NULL) {
74 /* Count resumes that don't belong to us. */
75 spurious_wakeup_count++;
76 continue;
77 }
78 break;
79 }
80 }
81
82 /* Put back any resumes we caught that don't belong to us. */
83 while (spurious_wakeup_count--)
84 restart(self);
d8d914df
UD
85}
86int __pthread_spin_lock(pthread_spinlock_t * lock)
87{
88 __pthread_lock (lock, NULL);
89 return 0;
3387a425 90}
d8d914df 91weak_alias (__pthread_spin_lock, pthread_spin_lock)
3387a425 92
d8d914df 93int __pthread_spin_unlock(pthread_spinlock_t * lock)
3387a425
UD
94{
95 long oldstatus;
96 pthread_descr thr, * ptr, * maxptr;
97 int maxprio;
98
99again:
c70ca1fa 100 oldstatus = lock->__status;
fbaf6e72
UD
101 if (oldstatus == 0 || oldstatus == 1) {
102 /* No threads are waiting for this lock. Please note that we also
103 enter this case if the lock is not taken at all. If this wouldn't
104 be done here we would crash further down. */
105 if (! compare_and_swap(&lock->__status, oldstatus, 0, &lock->__spinlock))
c70ca1fa 106 goto again;
d8d914df 107 return 0;
3387a425
UD
108 }
109 /* Find thread in waiting queue with maximal priority */
c70ca1fa 110 ptr = (pthread_descr *) &lock->__status;
3387a425
UD
111 thr = (pthread_descr) oldstatus;
112 maxprio = 0;
113 maxptr = ptr;
114 while (thr != (pthread_descr) 1) {
115 if (thr->p_priority >= maxprio) {
116 maxptr = ptr;
117 maxprio = thr->p_priority;
118 }
6a1db4ff 119 ptr = &(thr->p_nextlock);
de262537
UD
120 /* Prevent reordering of the load of lock->__status above and the
121 load of *ptr below, as well as reordering of *ptr between
122 several iterations of the while loop. Some processors (e.g.
123 multiprocessor Alphas) could perform such reordering even though
124 the loads are dependent. */
125 MEMORY_BARRIER();
3387a425
UD
126 thr = *ptr;
127 }
de262537
UD
128 /* Prevent reordering of the load of lock->__status above and
129 thr->p_nextlock below */
130 MEMORY_BARRIER();
3387a425 131 /* Remove max prio thread from waiting list. */
c70ca1fa 132 if (maxptr == (pthread_descr *) &lock->__status) {
3387a425
UD
133 /* If max prio thread is at head, remove it with compare-and-swap
134 to guard against concurrent lock operation */
135 thr = (pthread_descr) oldstatus;
c70ca1fa 136 if (! compare_and_swap(&lock->__status,
6a1db4ff 137 oldstatus, (long)(thr->p_nextlock),
c70ca1fa 138 &lock->__spinlock))
3387a425
UD
139 goto again;
140 } else {
141 /* No risk of concurrent access, remove max prio thread normally */
142 thr = *maxptr;
6a1db4ff 143 *maxptr = thr->p_nextlock;
3387a425 144 }
de262537
UD
145 /* Prevent reordering of store to *maxptr above and store to thr->p_nextlock
146 below */
147 MEMORY_BARRIER();
3387a425 148 /* Wake up the selected waiting thread */
6a1db4ff 149 thr->p_nextlock = NULL;
3387a425 150 restart(thr);
d8d914df
UD
151
152 return 0;
153}
154weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
155
156
157int __pthread_spin_trylock (pthread_spinlock_t *lock)
158{
159 return __pthread_trylock (lock);
160}
161weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
162
163int __pthread_spin_init(pthread_spinlock_t *lock, int pshared)
164{
165 if (pshared != 0)
166 return ENOSYS;
167
168 __pthread_init_lock (lock);
169 return 0;
170}
171weak_alias (__pthread_spin_init, pthread_spin_init)
172
173int __pthread_spin_destroy(pthread_spinlock_t *lock)
174{
175 /* Nothing to do. */
176 return 0;
3387a425 177}
d8d914df 178weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
3387a425
UD
179
180/* Compare-and-swap emulation with a spinlock */
181
182#ifdef TEST_FOR_COMPARE_AND_SWAP
183int __pthread_has_cas = 0;
184#endif
185
e138a800 186#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
3387a425
UD
187
188static void __pthread_acquire(int * spinlock);
189
190int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
191 int * spinlock)
192{
193 int res;
194 if (testandset(spinlock)) __pthread_acquire(spinlock);
195 if (*ptr == oldval) {
196 *ptr = newval; res = 1;
197 } else {
198 res = 0;
199 }
de262537 200 /* Prevent reordering of store to *ptr above and store to *spinlock below */
a5a6f926 201 WRITE_MEMORY_BARRIER();
3387a425
UD
202 *spinlock = 0;
203 return res;
204}
205
206/* This function is called if the inlined test-and-set
207 in __pthread_compare_and_swap() failed */
8619129f
UD
208
209/* The retry strategy is as follows:
210 - We test and set the spinlock MAX_SPIN_COUNT times, calling
211 sched_yield() each time. This gives ample opportunity for other
212 threads with priority >= our priority to make progress and
213 release the spinlock.
214 - If a thread with priority < our priority owns the spinlock,
215 calling sched_yield() repeatedly is useless, since we're preventing
216 the owning thread from making progress and releasing the spinlock.
217 So, after MAX_SPIN_LOCK attemps, we suspend the calling thread
218 using nanosleep(). This again should give time to the owning thread
219 for releasing the spinlock.
220 Notice that the nanosleep() interval must not be too small,
221 since the kernel does busy-waiting for short intervals in a realtime
222 process (!). The smallest duration that guarantees thread
223 suspension is currently 2ms.
224 - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
225 sched_yield(), then sleeping again if needed. */
226
3387a425 227static void __pthread_acquire(int * spinlock)
8619129f
UD
228{
229 int cnt = 0;
230 struct timespec tm;
231
232 while (testandset(spinlock)) {
233 if (cnt < MAX_SPIN_COUNT) {
234 sched_yield();
235 cnt++;
236 } else {
237 tm.tv_sec = 0;
238 tm.tv_nsec = SPIN_SLEEP_DURATION;
239 nanosleep(&tm, NULL);
240 cnt = 0;
241 }
242 }
243}
3387a425
UD
244
245#endif
This page took 0.109921 seconds and 5 git commands to generate.