Lines 1-840
Link Here
|
1 |
/* Copyright (C) 2002-2014 Free Software Foundation, Inc. |
|
|
2 |
This file is part of the GNU C Library. |
3 |
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 |
|
5 |
The GNU C Library is free software; you can redistribute it and/or |
6 |
modify it under the terms of the GNU Lesser General Public |
7 |
License as published by the Free Software Foundation; either |
8 |
version 2.1 of the License, or (at your option) any later version. |
9 |
|
10 |
The GNU C Library is distributed in the hope that it will be useful, |
11 |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 |
Lesser General Public License for more details. |
14 |
|
15 |
You should have received a copy of the GNU Lesser General Public |
16 |
License along with the GNU C Library; if not, see |
17 |
<http://www.gnu.org/licenses/>. */ |
18 |
|
19 |
#include <sysdep.h> |
20 |
#include <shlib-compat.h> |
21 |
#include <lowlevellock.h> |
22 |
#include <lowlevelcond.h> |
23 |
#include <pthread-pi-defines.h> |
24 |
#include <pthread-errnos.h> |
25 |
#include <stap-probe.h> |
26 |
|
27 |
#include <kernel-features.h> |
28 |
|
29 |
|
30 |
.text |
31 |
|
32 |
|
33 |
/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, |
34 |
const struct timespec *abstime) */ |
35 |
.globl __pthread_cond_timedwait |
36 |
.type __pthread_cond_timedwait, @function |
37 |
.align 16 |
38 |
__pthread_cond_timedwait: |
39 |
.LSTARTCODE: |
40 |
cfi_startproc |
41 |
#ifdef SHARED |
42 |
cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, |
43 |
DW.ref.__gcc_personality_v0) |
44 |
cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) |
45 |
#else |
46 |
cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) |
47 |
cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) |
48 |
#endif |
49 |
|
50 |
pushq %r12 |
51 |
cfi_adjust_cfa_offset(8) |
52 |
cfi_rel_offset(%r12, 0) |
53 |
pushq %r13 |
54 |
cfi_adjust_cfa_offset(8) |
55 |
cfi_rel_offset(%r13, 0) |
56 |
pushq %r14 |
57 |
cfi_adjust_cfa_offset(8) |
58 |
cfi_rel_offset(%r14, 0) |
59 |
pushq %r15 |
60 |
cfi_adjust_cfa_offset(8) |
61 |
cfi_rel_offset(%r15, 0) |
62 |
#ifdef __ASSUME_FUTEX_CLOCK_REALTIME |
63 |
# define FRAME_SIZE (32+8) |
64 |
#else |
65 |
# define FRAME_SIZE (48+8) |
66 |
#endif |
67 |
subq $FRAME_SIZE, %rsp |
68 |
cfi_adjust_cfa_offset(FRAME_SIZE) |
69 |
cfi_remember_state |
70 |
|
71 |
LIBC_PROBE (cond_timedwait, 3, %rdi, %rsi, %rdx) |
72 |
|
73 |
cmpq $1000000000, 8(%rdx) |
74 |
movl $EINVAL, %eax |
75 |
jae 48f |
76 |
|
77 |
/* Stack frame: |
78 |
|
79 |
rsp + 48 |
80 |
+--------------------------+ |
81 |
rsp + 32 | timeout value | |
82 |
+--------------------------+ |
83 |
rsp + 24 | old wake_seq value | |
84 |
+--------------------------+ |
85 |
rsp + 16 | mutex pointer | |
86 |
+--------------------------+ |
87 |
rsp + 8 | condvar pointer | |
88 |
+--------------------------+ |
89 |
rsp + 4 | old broadcast_seq value | |
90 |
+--------------------------+ |
91 |
rsp + 0 | old cancellation mode | |
92 |
+--------------------------+ |
93 |
*/ |
94 |
|
95 |
LP_OP(cmp) $-1, dep_mutex(%rdi) |
96 |
|
97 |
/* Prepare structure passed to cancellation handler. */ |
98 |
movq %rdi, 8(%rsp) |
99 |
movq %rsi, 16(%rsp) |
100 |
movq %rdx, %r13 |
101 |
|
102 |
je 22f |
103 |
mov %RSI_LP, dep_mutex(%rdi) |
104 |
|
105 |
22: |
106 |
xorb %r15b, %r15b |
107 |
|
108 |
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME |
109 |
# ifdef PIC |
110 |
cmpl $0, __have_futex_clock_realtime(%rip) |
111 |
# else |
112 |
cmpl $0, __have_futex_clock_realtime |
113 |
# endif |
114 |
je .Lreltmo |
115 |
#endif |
116 |
|
117 |
/* Get internal lock. */ |
118 |
movl $1, %esi |
119 |
xorl %eax, %eax |
120 |
LOCK |
121 |
#if cond_lock == 0 |
122 |
cmpxchgl %esi, (%rdi) |
123 |
#else |
124 |
cmpxchgl %esi, cond_lock(%rdi) |
125 |
#endif |
126 |
jnz 31f |
127 |
|
128 |
/* Unlock the mutex. */ |
129 |
32: movq 16(%rsp), %rdi |
130 |
xorl %esi, %esi |
131 |
callq __pthread_mutex_unlock_usercnt |
132 |
|
133 |
testl %eax, %eax |
134 |
jne 46f |
135 |
|
136 |
movq 8(%rsp), %rdi |
137 |
incq total_seq(%rdi) |
138 |
incl cond_futex(%rdi) |
139 |
addl $(1 << nwaiters_shift), cond_nwaiters(%rdi) |
140 |
|
141 |
/* Get and store current wakeup_seq value. */ |
142 |
movq 8(%rsp), %rdi |
143 |
movq wakeup_seq(%rdi), %r9 |
144 |
movl broadcast_seq(%rdi), %edx |
145 |
movq %r9, 24(%rsp) |
146 |
movl %edx, 4(%rsp) |
147 |
|
148 |
cmpq $0, (%r13) |
149 |
movq $-ETIMEDOUT, %r14 |
150 |
js 36f |
151 |
|
152 |
38: movl cond_futex(%rdi), %r12d |
153 |
|
154 |
/* Unlock. */ |
155 |
LOCK |
156 |
#if cond_lock == 0 |
157 |
decl (%rdi) |
158 |
#else |
159 |
decl cond_lock(%rdi) |
160 |
#endif |
161 |
jne 33f |
162 |
|
163 |
.LcleanupSTART1: |
164 |
34: callq __pthread_enable_asynccancel |
165 |
movl %eax, (%rsp) |
166 |
|
167 |
movq %r13, %r10 |
168 |
movl $FUTEX_WAIT_BITSET, %esi |
169 |
LP_OP(cmp) $-1, dep_mutex(%rdi) |
170 |
je 60f |
171 |
|
172 |
mov dep_mutex(%rdi), %R8_LP |
173 |
/* Requeue to a non-robust PI mutex if the PI bit is set and |
174 |
the robust bit is not set. */ |
175 |
movl MUTEX_KIND(%r8), %eax |
176 |
andl $(ROBUST_BIT|PI_BIT), %eax |
177 |
cmpl $PI_BIT, %eax |
178 |
jne 61f |
179 |
|
180 |
movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi |
181 |
xorl %eax, %eax |
182 |
/* The following only works like this because we only support |
183 |
two clocks, represented using a single bit. */ |
184 |
testl $1, cond_nwaiters(%rdi) |
185 |
movl $FUTEX_CLOCK_REALTIME, %edx |
186 |
cmove %edx, %eax |
187 |
orl %eax, %esi |
188 |
movq %r12, %rdx |
189 |
addq $cond_futex, %rdi |
190 |
movl $SYS_futex, %eax |
191 |
syscall |
192 |
|
193 |
cmpl $0, %eax |
194 |
sete %r15b |
195 |
|
196 |
#ifdef __ASSUME_REQUEUE_PI |
197 |
jmp 62f |
198 |
#else |
199 |
je 62f |
200 |
|
201 |
/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns |
202 |
successfully, it has already locked the mutex for us and the |
203 |
pi_flag (%r15b) is set to denote that fact. However, if another |
204 |
thread changed the futex value before we entered the wait, the |
205 |
syscall may return an EAGAIN and the mutex is not locked. We go |
206 |
ahead with a success anyway since later we look at the pi_flag to |
207 |
decide if we got the mutex or not. The sequence numbers then make |
208 |
sure that only one of the threads actually wake up. We retry using |
209 |
normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal |
210 |
and PI futexes don't mix. |
211 |
|
212 |
Note that we don't check for EAGAIN specifically; we assume that the |
213 |
only other error the futex function could return is EAGAIN (barring |
214 |
the ETIMEOUT of course, for the timeout case in futex) since |
215 |
anything else would mean an error in our function. It is too |
216 |
expensive to do that check for every call (which is quite common in |
217 |
case of a large number of threads), so it has been skipped. */ |
218 |
cmpl $-ENOSYS, %eax |
219 |
jne 62f |
220 |
|
221 |
subq $cond_futex, %rdi |
222 |
#endif |
223 |
|
224 |
61: movl $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi |
225 |
60: xorb %r15b, %r15b |
226 |
xorl %eax, %eax |
227 |
/* The following only works like this because we only support |
228 |
two clocks, represented using a single bit. */ |
229 |
testl $1, cond_nwaiters(%rdi) |
230 |
movl $FUTEX_CLOCK_REALTIME, %edx |
231 |
movl $0xffffffff, %r9d |
232 |
cmove %edx, %eax |
233 |
orl %eax, %esi |
234 |
movq %r12, %rdx |
235 |
addq $cond_futex, %rdi |
236 |
movl $SYS_futex, %eax |
237 |
syscall |
238 |
62: movq %rax, %r14 |
239 |
|
240 |
movl (%rsp), %edi |
241 |
callq __pthread_disable_asynccancel |
242 |
.LcleanupEND1: |
243 |
|
244 |
/* Lock. */ |
245 |
movq 8(%rsp), %rdi |
246 |
movl $1, %esi |
247 |
xorl %eax, %eax |
248 |
LOCK |
249 |
#if cond_lock == 0 |
250 |
cmpxchgl %esi, (%rdi) |
251 |
#else |
252 |
cmpxchgl %esi, cond_lock(%rdi) |
253 |
#endif |
254 |
jne 35f |
255 |
|
256 |
36: movl broadcast_seq(%rdi), %edx |
257 |
|
258 |
movq woken_seq(%rdi), %rax |
259 |
|
260 |
movq wakeup_seq(%rdi), %r9 |
261 |
|
262 |
cmpl 4(%rsp), %edx |
263 |
jne 53f |
264 |
|
265 |
cmpq 24(%rsp), %r9 |
266 |
jbe 45f |
267 |
|
268 |
cmpq %rax, %r9 |
269 |
ja 39f |
270 |
|
271 |
45: cmpq $-ETIMEDOUT, %r14 |
272 |
je 99f |
273 |
|
274 |
/* We need to go back to futex_wait. If we're using requeue_pi, then |
275 |
release the mutex we had acquired and go back. */ |
276 |
test %r15b, %r15b |
277 |
jz 38b |
278 |
|
279 |
/* Adjust the mutex values first and then unlock it. The unlock |
280 |
should always succeed or else the kernel did not lock the |
281 |
mutex correctly. */ |
282 |
movq %r8, %rdi |
283 |
callq __pthread_mutex_cond_lock_adjust |
284 |
xorl %esi, %esi |
285 |
callq __pthread_mutex_unlock_usercnt |
286 |
/* Reload cond_var. */ |
287 |
movq 8(%rsp), %rdi |
288 |
jmp 38b |
289 |
|
290 |
99: incq wakeup_seq(%rdi) |
291 |
incl cond_futex(%rdi) |
292 |
movl $ETIMEDOUT, %r14d |
293 |
jmp 44f |
294 |
|
295 |
53: xorq %r14, %r14 |
296 |
jmp 54f |
297 |
|
298 |
39: xorq %r14, %r14 |
299 |
44: incq woken_seq(%rdi) |
300 |
|
301 |
54: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) |
302 |
|
303 |
/* Wake up a thread which wants to destroy the condvar object. */ |
304 |
cmpq $0xffffffffffffffff, total_seq(%rdi) |
305 |
jne 55f |
306 |
movl cond_nwaiters(%rdi), %eax |
307 |
andl $~((1 << nwaiters_shift) - 1), %eax |
308 |
jne 55f |
309 |
|
310 |
addq $cond_nwaiters, %rdi |
311 |
LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi) |
312 |
movl $1, %edx |
313 |
#ifdef __ASSUME_PRIVATE_FUTEX |
314 |
movl $FUTEX_WAKE, %eax |
315 |
movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi |
316 |
cmove %eax, %esi |
317 |
#else |
318 |
movl $0, %eax |
319 |
movl %fs:PRIVATE_FUTEX, %esi |
320 |
cmove %eax, %esi |
321 |
orl $FUTEX_WAKE, %esi |
322 |
#endif |
323 |
movl $SYS_futex, %eax |
324 |
syscall |
325 |
subq $cond_nwaiters, %rdi |
326 |
|
327 |
55: LOCK |
328 |
#if cond_lock == 0 |
329 |
decl (%rdi) |
330 |
#else |
331 |
decl cond_lock(%rdi) |
332 |
#endif |
333 |
jne 40f |
334 |
|
335 |
/* If requeue_pi is used the kernel performs the locking of the |
336 |
mutex. */ |
337 |
41: movq 16(%rsp), %rdi |
338 |
testb %r15b, %r15b |
339 |
jnz 64f |
340 |
|
341 |
callq __pthread_mutex_cond_lock |
342 |
|
343 |
63: testq %rax, %rax |
344 |
cmoveq %r14, %rax |
345 |
|
346 |
48: addq $FRAME_SIZE, %rsp |
347 |
cfi_adjust_cfa_offset(-FRAME_SIZE) |
348 |
popq %r15 |
349 |
cfi_adjust_cfa_offset(-8) |
350 |
cfi_restore(%r15) |
351 |
popq %r14 |
352 |
cfi_adjust_cfa_offset(-8) |
353 |
cfi_restore(%r14) |
354 |
popq %r13 |
355 |
cfi_adjust_cfa_offset(-8) |
356 |
cfi_restore(%r13) |
357 |
popq %r12 |
358 |
cfi_adjust_cfa_offset(-8) |
359 |
cfi_restore(%r12) |
360 |
|
361 |
retq |
362 |
|
363 |
cfi_restore_state |
364 |
|
365 |
64: callq __pthread_mutex_cond_lock_adjust |
366 |
movq %r14, %rax |
367 |
jmp 48b |
368 |
|
369 |
/* Initial locking failed. */ |
370 |
31: |
371 |
#if cond_lock != 0 |
372 |
addq $cond_lock, %rdi |
373 |
#endif |
374 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
375 |
movl $LLL_PRIVATE, %eax |
376 |
movl $LLL_SHARED, %esi |
377 |
cmovne %eax, %esi |
378 |
callq __lll_lock_wait |
379 |
jmp 32b |
380 |
|
381 |
/* Unlock in loop requires wakeup. */ |
382 |
33: |
383 |
#if cond_lock != 0 |
384 |
addq $cond_lock, %rdi |
385 |
#endif |
386 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
387 |
movl $LLL_PRIVATE, %eax |
388 |
movl $LLL_SHARED, %esi |
389 |
cmovne %eax, %esi |
390 |
callq __lll_unlock_wake |
391 |
jmp 34b |
392 |
|
393 |
/* Locking in loop failed. */ |
394 |
35: |
395 |
#if cond_lock != 0 |
396 |
addq $cond_lock, %rdi |
397 |
#endif |
398 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
399 |
movl $LLL_PRIVATE, %eax |
400 |
movl $LLL_SHARED, %esi |
401 |
cmovne %eax, %esi |
402 |
callq __lll_lock_wait |
403 |
#if cond_lock != 0 |
404 |
subq $cond_lock, %rdi |
405 |
#endif |
406 |
jmp 36b |
407 |
|
408 |
/* Unlock after loop requires wakeup. */ |
409 |
40: |
410 |
#if cond_lock != 0 |
411 |
addq $cond_lock, %rdi |
412 |
#endif |
413 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
414 |
movl $LLL_PRIVATE, %eax |
415 |
movl $LLL_SHARED, %esi |
416 |
cmovne %eax, %esi |
417 |
callq __lll_unlock_wake |
418 |
jmp 41b |
419 |
|
420 |
/* The initial unlocking of the mutex failed. */ |
421 |
46: movq 8(%rsp), %rdi |
422 |
movq %rax, (%rsp) |
423 |
LOCK |
424 |
#if cond_lock == 0 |
425 |
decl (%rdi) |
426 |
#else |
427 |
decl cond_lock(%rdi) |
428 |
#endif |
429 |
jne 47f |
430 |
|
431 |
#if cond_lock != 0 |
432 |
addq $cond_lock, %rdi |
433 |
#endif |
434 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
435 |
movl $LLL_PRIVATE, %eax |
436 |
movl $LLL_SHARED, %esi |
437 |
cmovne %eax, %esi |
438 |
callq __lll_unlock_wake |
439 |
|
440 |
47: movq (%rsp), %rax |
441 |
jmp 48b |
442 |
|
443 |
|
444 |
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME |
445 |
.Lreltmo: |
446 |
/* Get internal lock. */ |
447 |
movl $1, %esi |
448 |
xorl %eax, %eax |
449 |
LOCK |
450 |
# if cond_lock == 0 |
451 |
cmpxchgl %esi, (%rdi) |
452 |
# else |
453 |
cmpxchgl %esi, cond_lock(%rdi) |
454 |
# endif |
455 |
jnz 1f |
456 |
|
457 |
/* Unlock the mutex. */ |
458 |
2: movq 16(%rsp), %rdi |
459 |
xorl %esi, %esi |
460 |
callq __pthread_mutex_unlock_usercnt |
461 |
|
462 |
testl %eax, %eax |
463 |
jne 46b |
464 |
|
465 |
movq 8(%rsp), %rdi |
466 |
incq total_seq(%rdi) |
467 |
incl cond_futex(%rdi) |
468 |
addl $(1 << nwaiters_shift), cond_nwaiters(%rdi) |
469 |
|
470 |
/* Get and store current wakeup_seq value. */ |
471 |
movq 8(%rsp), %rdi |
472 |
movq wakeup_seq(%rdi), %r9 |
473 |
movl broadcast_seq(%rdi), %edx |
474 |
movq %r9, 24(%rsp) |
475 |
movl %edx, 4(%rsp) |
476 |
|
477 |
/* Get the current time. */ |
478 |
8: |
479 |
# ifdef __NR_clock_gettime |
480 |
/* Get the clock number. Note that the field in the condvar |
481 |
structure stores the number minus 1. */ |
482 |
movq 8(%rsp), %rdi |
483 |
movl cond_nwaiters(%rdi), %edi |
484 |
andl $((1 << nwaiters_shift) - 1), %edi |
485 |
/* Only clocks 0 and 1 are allowed so far. Both are handled in the |
486 |
kernel. */ |
487 |
leaq 32(%rsp), %rsi |
488 |
# ifdef SHARED |
489 |
mov __vdso_clock_gettime@GOTPCREL(%rip), %RAX_LP |
490 |
mov (%rax), %RAX_LP |
491 |
PTR_DEMANGLE (%RAX_LP) |
492 |
call *%rax |
493 |
# else |
494 |
movl $__NR_clock_gettime, %eax |
495 |
syscall |
496 |
# endif |
497 |
|
498 |
/* Compute relative timeout. */ |
499 |
movq (%r13), %rcx |
500 |
movq 8(%r13), %rdx |
501 |
subq 32(%rsp), %rcx |
502 |
subq 40(%rsp), %rdx |
503 |
# else |
504 |
leaq 24(%rsp), %rdi |
505 |
xorl %esi, %esi |
506 |
/* This call works because we directly jump to a system call entry |
507 |
which preserves all the registers. */ |
508 |
call JUMPTARGET(__gettimeofday) |
509 |
|
510 |
/* Compute relative timeout. */ |
511 |
movq 40(%rsp), %rax |
512 |
movl $1000, %edx |
513 |
mul %rdx /* Milli seconds to nano seconds. */ |
514 |
movq (%r13), %rcx |
515 |
movq 8(%r13), %rdx |
516 |
subq 32(%rsp), %rcx |
517 |
subq %rax, %rdx |
518 |
# endif |
519 |
jns 12f |
520 |
addq $1000000000, %rdx |
521 |
decq %rcx |
522 |
12: testq %rcx, %rcx |
523 |
movq 8(%rsp), %rdi |
524 |
movq $-ETIMEDOUT, %r14 |
525 |
js 6f |
526 |
|
527 |
/* Store relative timeout. */ |
528 |
21: movq %rcx, 32(%rsp) |
529 |
movq %rdx, 40(%rsp) |
530 |
|
531 |
movl cond_futex(%rdi), %r12d |
532 |
|
533 |
/* Unlock. */ |
534 |
LOCK |
535 |
# if cond_lock == 0 |
536 |
decl (%rdi) |
537 |
# else |
538 |
decl cond_lock(%rdi) |
539 |
# endif |
540 |
jne 3f |
541 |
|
542 |
.LcleanupSTART2: |
543 |
4: callq __pthread_enable_asynccancel |
544 |
movl %eax, (%rsp) |
545 |
|
546 |
leaq 32(%rsp), %r10 |
547 |
LP_OP(cmp) $-1, dep_mutex(%rdi) |
548 |
movq %r12, %rdx |
549 |
# ifdef __ASSUME_PRIVATE_FUTEX |
550 |
movl $FUTEX_WAIT, %eax |
551 |
movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi |
552 |
cmove %eax, %esi |
553 |
# else |
554 |
movl $0, %eax |
555 |
movl %fs:PRIVATE_FUTEX, %esi |
556 |
cmove %eax, %esi |
557 |
# if FUTEX_WAIT != 0 |
558 |
orl $FUTEX_WAIT, %esi |
559 |
# endif |
560 |
# endif |
561 |
addq $cond_futex, %rdi |
562 |
movl $SYS_futex, %eax |
563 |
syscall |
564 |
movq %rax, %r14 |
565 |
|
566 |
movl (%rsp), %edi |
567 |
callq __pthread_disable_asynccancel |
568 |
.LcleanupEND2: |
569 |
|
570 |
/* Lock. */ |
571 |
movq 8(%rsp), %rdi |
572 |
movl $1, %esi |
573 |
xorl %eax, %eax |
574 |
LOCK |
575 |
# if cond_lock == 0 |
576 |
cmpxchgl %esi, (%rdi) |
577 |
# else |
578 |
cmpxchgl %esi, cond_lock(%rdi) |
579 |
# endif |
580 |
jne 5f |
581 |
|
582 |
6: movl broadcast_seq(%rdi), %edx |
583 |
|
584 |
movq woken_seq(%rdi), %rax |
585 |
|
586 |
movq wakeup_seq(%rdi), %r9 |
587 |
|
588 |
cmpl 4(%rsp), %edx |
589 |
jne 53b |
590 |
|
591 |
cmpq 24(%rsp), %r9 |
592 |
jbe 15f |
593 |
|
594 |
cmpq %rax, %r9 |
595 |
ja 39b |
596 |
|
597 |
15: cmpq $-ETIMEDOUT, %r14 |
598 |
jne 8b |
599 |
|
600 |
jmp 99b |
601 |
|
602 |
/* Initial locking failed. */ |
603 |
1: |
604 |
# if cond_lock != 0 |
605 |
addq $cond_lock, %rdi |
606 |
# endif |
607 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
608 |
movl $LLL_PRIVATE, %eax |
609 |
movl $LLL_SHARED, %esi |
610 |
cmovne %eax, %esi |
611 |
callq __lll_lock_wait |
612 |
jmp 2b |
613 |
|
614 |
/* Unlock in loop requires wakeup. */ |
615 |
3: |
616 |
# if cond_lock != 0 |
617 |
addq $cond_lock, %rdi |
618 |
# endif |
619 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
620 |
movl $LLL_PRIVATE, %eax |
621 |
movl $LLL_SHARED, %esi |
622 |
cmovne %eax, %esi |
623 |
callq __lll_unlock_wake |
624 |
jmp 4b |
625 |
|
626 |
/* Locking in loop failed. */ |
627 |
5: |
628 |
# if cond_lock != 0 |
629 |
addq $cond_lock, %rdi |
630 |
# endif |
631 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
632 |
movl $LLL_PRIVATE, %eax |
633 |
movl $LLL_SHARED, %esi |
634 |
cmovne %eax, %esi |
635 |
callq __lll_lock_wait |
636 |
# if cond_lock != 0 |
637 |
subq $cond_lock, %rdi |
638 |
# endif |
639 |
jmp 6b |
640 |
#endif |
641 |
.size __pthread_cond_timedwait, .-__pthread_cond_timedwait |
642 |
versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, |
643 |
GLIBC_2_3_2) |
644 |
|
645 |
|
646 |
.align 16 |
647 |
.type __condvar_cleanup2, @function |
648 |
__condvar_cleanup2: |
649 |
/* Stack frame: |
650 |
|
651 |
rsp + 72 |
652 |
+--------------------------+ |
653 |
rsp + 64 | %r12 | |
654 |
+--------------------------+ |
655 |
rsp + 56 | %r13 | |
656 |
+--------------------------+ |
657 |
rsp + 48 | %r14 | |
658 |
+--------------------------+ |
659 |
rsp + 24 | unused | |
660 |
+--------------------------+ |
661 |
rsp + 16 | mutex pointer | |
662 |
+--------------------------+ |
663 |
rsp + 8 | condvar pointer | |
664 |
+--------------------------+ |
665 |
rsp + 4 | old broadcast_seq value | |
666 |
+--------------------------+ |
667 |
rsp + 0 | old cancellation mode | |
668 |
+--------------------------+ |
669 |
*/ |
670 |
|
671 |
movq %rax, 24(%rsp) |
672 |
|
673 |
/* Get internal lock. */ |
674 |
movq 8(%rsp), %rdi |
675 |
movl $1, %esi |
676 |
xorl %eax, %eax |
677 |
LOCK |
678 |
#if cond_lock == 0 |
679 |
cmpxchgl %esi, (%rdi) |
680 |
#else |
681 |
cmpxchgl %esi, cond_lock(%rdi) |
682 |
#endif |
683 |
jz 1f |
684 |
|
685 |
#if cond_lock != 0 |
686 |
addq $cond_lock, %rdi |
687 |
#endif |
688 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
689 |
movl $LLL_PRIVATE, %eax |
690 |
movl $LLL_SHARED, %esi |
691 |
cmovne %eax, %esi |
692 |
callq __lll_lock_wait |
693 |
#if cond_lock != 0 |
694 |
subq $cond_lock, %rdi |
695 |
#endif |
696 |
|
697 |
1: movl broadcast_seq(%rdi), %edx |
698 |
cmpl 4(%rsp), %edx |
699 |
jne 3f |
700 |
|
701 |
/* We increment the wakeup_seq counter only if it is lower than |
702 |
total_seq. If this is not the case the thread was woken and |
703 |
then canceled. In this case we ignore the signal. */ |
704 |
movq total_seq(%rdi), %rax |
705 |
cmpq wakeup_seq(%rdi), %rax |
706 |
jbe 6f |
707 |
incq wakeup_seq(%rdi) |
708 |
incl cond_futex(%rdi) |
709 |
6: incq woken_seq(%rdi) |
710 |
|
711 |
3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) |
712 |
|
713 |
/* Wake up a thread which wants to destroy the condvar object. */ |
714 |
xorq %r12, %r12 |
715 |
cmpq $0xffffffffffffffff, total_seq(%rdi) |
716 |
jne 4f |
717 |
movl cond_nwaiters(%rdi), %eax |
718 |
andl $~((1 << nwaiters_shift) - 1), %eax |
719 |
jne 4f |
720 |
|
721 |
LP_OP(cmp) $-1, dep_mutex(%rdi) |
722 |
leaq cond_nwaiters(%rdi), %rdi |
723 |
movl $1, %edx |
724 |
#ifdef __ASSUME_PRIVATE_FUTEX |
725 |
movl $FUTEX_WAKE, %eax |
726 |
movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi |
727 |
cmove %eax, %esi |
728 |
#else |
729 |
movl $0, %eax |
730 |
movl %fs:PRIVATE_FUTEX, %esi |
731 |
cmove %eax, %esi |
732 |
orl $FUTEX_WAKE, %esi |
733 |
#endif |
734 |
movl $SYS_futex, %eax |
735 |
syscall |
736 |
subq $cond_nwaiters, %rdi |
737 |
movl $1, %r12d |
738 |
|
739 |
4: LOCK |
740 |
#if cond_lock == 0 |
741 |
decl (%rdi) |
742 |
#else |
743 |
decl cond_lock(%rdi) |
744 |
#endif |
745 |
je 2f |
746 |
#if cond_lock != 0 |
747 |
addq $cond_lock, %rdi |
748 |
#endif |
749 |
LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
750 |
movl $LLL_PRIVATE, %eax |
751 |
movl $LLL_SHARED, %esi |
752 |
cmovne %eax, %esi |
753 |
callq __lll_unlock_wake |
754 |
|
755 |
/* Wake up all waiters to make sure no signal gets lost. */ |
756 |
2: testq %r12, %r12 |
757 |
jnz 5f |
758 |
addq $cond_futex, %rdi |
759 |
LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi) |
760 |
movl $0x7fffffff, %edx |
761 |
#ifdef __ASSUME_PRIVATE_FUTEX |
762 |
movl $FUTEX_WAKE, %eax |
763 |
movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi |
764 |
cmove %eax, %esi |
765 |
#else |
766 |
movl $0, %eax |
767 |
movl %fs:PRIVATE_FUTEX, %esi |
768 |
cmove %eax, %esi |
769 |
orl $FUTEX_WAKE, %esi |
770 |
#endif |
771 |
movl $SYS_futex, %eax |
772 |
syscall |
773 |
|
774 |
/* Lock the mutex only if we don't own it already. This only happens |
775 |
in case of PI mutexes, if we got cancelled after a successful |
776 |
return of the futex syscall and before disabling async |
777 |
cancellation. */ |
778 |
5: movq 16(%rsp), %rdi |
779 |
movl MUTEX_KIND(%rdi), %eax |
780 |
andl $(ROBUST_BIT|PI_BIT), %eax |
781 |
cmpl $PI_BIT, %eax |
782 |
jne 7f |
783 |
|
784 |
movl (%rdi), %eax |
785 |
andl $TID_MASK, %eax |
786 |
cmpl %eax, %fs:TID |
787 |
jne 7f |
788 |
/* We managed to get the lock. Fix it up before returning. */ |
789 |
callq __pthread_mutex_cond_lock_adjust |
790 |
jmp 8f |
791 |
|
792 |
7: callq __pthread_mutex_cond_lock |
793 |
|
794 |
8: movq 24(%rsp), %rdi |
795 |
movq FRAME_SIZE(%rsp), %r15 |
796 |
movq FRAME_SIZE+8(%rsp), %r14 |
797 |
movq FRAME_SIZE+16(%rsp), %r13 |
798 |
movq FRAME_SIZE+24(%rsp), %r12 |
799 |
.LcallUR: |
800 |
call _Unwind_Resume@PLT |
801 |
hlt |
802 |
.LENDCODE: |
803 |
cfi_endproc |
804 |
.size __condvar_cleanup2, .-__condvar_cleanup2 |
805 |
|
806 |
|
807 |
.section .gcc_except_table,"a",@progbits |
808 |
.LexceptSTART: |
809 |
.byte DW_EH_PE_omit # @LPStart format |
810 |
.byte DW_EH_PE_omit # @TType format |
811 |
.byte DW_EH_PE_uleb128 # call-site format |
812 |
.uleb128 .Lcstend-.Lcstbegin |
813 |
.Lcstbegin: |
814 |
.uleb128 .LcleanupSTART1-.LSTARTCODE |
815 |
.uleb128 .LcleanupEND1-.LcleanupSTART1 |
816 |
.uleb128 __condvar_cleanup2-.LSTARTCODE |
817 |
.uleb128 0 |
818 |
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME |
819 |
.uleb128 .LcleanupSTART2-.LSTARTCODE |
820 |
.uleb128 .LcleanupEND2-.LcleanupSTART2 |
821 |
.uleb128 __condvar_cleanup2-.LSTARTCODE |
822 |
.uleb128 0 |
823 |
#endif |
824 |
.uleb128 .LcallUR-.LSTARTCODE |
825 |
.uleb128 .LENDCODE-.LcallUR |
826 |
.uleb128 0 |
827 |
.uleb128 0 |
828 |
.Lcstend: |
829 |
|
830 |
|
831 |
#ifdef SHARED |
832 |
.hidden DW.ref.__gcc_personality_v0 |
833 |
.weak DW.ref.__gcc_personality_v0 |
834 |
.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits |
835 |
.align LP_SIZE |
836 |
.type DW.ref.__gcc_personality_v0, @object |
837 |
.size DW.ref.__gcc_personality_v0, LP_SIZE |
838 |
DW.ref.__gcc_personality_v0: |
839 |
ASM_ADDR __gcc_personality_v0 |
840 |
#endif |