View | Details | Raw Unified | Return to bug 11588 | Differences between
and this patch

Collapse All | Expand All

(-)a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S (-179 lines)
Lines 1-179 Link Here
1
/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2
   This file is part of the GNU C Library.
3
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5
   The GNU C Library is free software; you can redistribute it and/or
6
   modify it under the terms of the GNU Lesser General Public
7
   License as published by the Free Software Foundation; either
8
   version 2.1 of the License, or (at your option) any later version.
9
10
   The GNU C Library is distributed in the hope that it will be useful,
11
   but WITHOUT ANY WARRANTY; without even the implied warranty of
12
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
   Lesser General Public License for more details.
14
15
   You should have received a copy of the GNU Lesser General Public
16
   License along with the GNU C Library; if not, see
17
   <http://www.gnu.org/licenses/>.  */
18
19
#include <sysdep.h>
20
#include <shlib-compat.h>
21
#include <lowlevellock.h>
22
#include <lowlevelcond.h>
23
#include <kernel-features.h>
24
#include <pthread-pi-defines.h>
25
#include <pthread-errnos.h>
26
#include <stap-probe.h>
27
28
	.text
29
30
	/* int pthread_cond_broadcast (pthread_cond_t *cond) */
31
	.globl	__pthread_cond_broadcast
32
	.type	__pthread_cond_broadcast, @function
33
	.align	16
34
__pthread_cond_broadcast:
35
36
	LIBC_PROBE (cond_broadcast, 1, %rdi)
37
38
	/* Get internal lock.  */
39
	movl	$1, %esi
40
	xorl	%eax, %eax
41
	LOCK
42
#if cond_lock == 0
43
	cmpxchgl %esi, (%rdi)
44
#else
45
	cmpxchgl %esi, cond_lock(%rdi)
46
#endif
47
	jnz	1f
48
49
2:	addq	$cond_futex, %rdi
50
	movq	total_seq-cond_futex(%rdi), %r9
51
	cmpq	wakeup_seq-cond_futex(%rdi), %r9
52
	jna	4f
53
54
	/* Cause all currently waiting threads to recognize they are
55
	   woken up.  */
56
	movq	%r9, wakeup_seq-cond_futex(%rdi)
57
	movq	%r9, woken_seq-cond_futex(%rdi)
58
	addq	%r9, %r9
59
	movl	%r9d, (%rdi)
60
	incl	broadcast_seq-cond_futex(%rdi)
61
62
	/* Get the address of the mutex used.  */
63
	mov	dep_mutex-cond_futex(%rdi), %R8_LP
64
65
	/* Unlock.  */
66
	LOCK
67
	decl	cond_lock-cond_futex(%rdi)
68
	jne	7f
69
70
8:	cmp	$-1, %R8_LP
71
	je	9f
72
73
	/* Do not use requeue for pshared condvars.  */
74
	testl	$PS_BIT, MUTEX_KIND(%r8)
75
	jne	9f
76
77
	/* Requeue to a PI mutex if the PI bit is set.  */
78
	movl	MUTEX_KIND(%r8), %eax
79
	andl	$(ROBUST_BIT|PI_BIT), %eax
80
	cmpl	$PI_BIT, %eax
81
	je	81f
82
83
	/* Wake up all threads.  */
84
#ifdef __ASSUME_PRIVATE_FUTEX
85
	movl	$(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi
86
#else
87
	movl	%fs:PRIVATE_FUTEX, %esi
88
	orl	$FUTEX_CMP_REQUEUE, %esi
89
#endif
90
	movl	$SYS_futex, %eax
91
	movl	$1, %edx
92
	movl	$0x7fffffff, %r10d
93
	syscall
94
95
	/* For any kind of error, which mainly is EAGAIN, we try again
96
	   with WAKE.  The general test also covers running on old
97
	   kernels.  */
98
	cmpq	$-4095, %rax
99
	jae	9f
100
101
10:	xorl	%eax, %eax
102
	retq
103
104
	/* Wake up all threads.  */
105
81:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
106
	movl	$SYS_futex, %eax
107
	movl	$1, %edx
108
	movl	$0x7fffffff, %r10d
109
	syscall
110
111
	/* For any kind of error, which mainly is EAGAIN, we try again
112
	   with WAKE.  The general test also covers running on old
113
	   kernels.  */
114
	cmpq	$-4095, %rax
115
	jb	10b
116
	jmp	9f
117
118
	.align	16
119
	/* Unlock.  */
120
4:	LOCK
121
	decl	cond_lock-cond_futex(%rdi)
122
	jne	5f
123
124
6:	xorl	%eax, %eax
125
	retq
126
127
	/* Initial locking failed.  */
128
1:
129
#if cond_lock != 0
130
	addq	$cond_lock, %rdi
131
#endif
132
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
133
	movl	$LLL_PRIVATE, %eax
134
	movl	$LLL_SHARED, %esi
135
	cmovne	%eax, %esi
136
	callq	__lll_lock_wait
137
#if cond_lock != 0
138
	subq	$cond_lock, %rdi
139
#endif
140
	jmp	2b
141
142
	/* Unlock in loop requires wakeup.  */
143
5:	addq	$cond_lock-cond_futex, %rdi
144
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
145
	movl	$LLL_PRIVATE, %eax
146
	movl	$LLL_SHARED, %esi
147
	cmovne	%eax, %esi
148
	callq	__lll_unlock_wake
149
	jmp	6b
150
151
	/* Unlock in loop requires wakeup.  */
152
7:	addq	$cond_lock-cond_futex, %rdi
153
	cmp	$-1, %R8_LP
154
	movl	$LLL_PRIVATE, %eax
155
	movl	$LLL_SHARED, %esi
156
	cmovne	%eax, %esi
157
	callq	__lll_unlock_wake
158
	subq	$cond_lock-cond_futex, %rdi
159
	jmp	8b
160
161
9:	/* The futex requeue functionality is not available.  */
162
	cmp	$-1, %R8_LP
163
	movl	$0x7fffffff, %edx
164
#ifdef __ASSUME_PRIVATE_FUTEX
165
	movl	$FUTEX_WAKE, %eax
166
	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
167
	cmove	%eax, %esi
168
#else
169
	movl	$0, %eax
170
	movl	%fs:PRIVATE_FUTEX, %esi
171
	cmove	%eax, %esi
172
	orl	$FUTEX_WAKE, %esi
173
#endif
174
	movl	$SYS_futex, %eax
175
	syscall
176
	jmp	10b
177
	.size	__pthread_cond_broadcast, .-__pthread_cond_broadcast
178
versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
179
		  GLIBC_2_3_2)
(-)a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S (-164 lines)
Lines 1-164 Link Here
1
/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2
   This file is part of the GNU C Library.
3
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5
   The GNU C Library is free software; you can redistribute it and/or
6
   modify it under the terms of the GNU Lesser General Public
7
   License as published by the Free Software Foundation; either
8
   version 2.1 of the License, or (at your option) any later version.
9
10
   The GNU C Library is distributed in the hope that it will be useful,
11
   but WITHOUT ANY WARRANTY; without even the implied warranty of
12
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
   Lesser General Public License for more details.
14
15
   You should have received a copy of the GNU Lesser General Public
16
   License along with the GNU C Library; if not, see
17
   <http://www.gnu.org/licenses/>.  */
18
19
#include <sysdep.h>
20
#include <shlib-compat.h>
21
#include <lowlevellock.h>
22
#include <lowlevelcond.h>
23
#include <pthread-pi-defines.h>
24
#include <kernel-features.h>
25
#include <pthread-errnos.h>
26
#include <stap-probe.h>
27
28
29
	.text
30
31
	/* int pthread_cond_signal (pthread_cond_t *cond) */
32
	.globl	__pthread_cond_signal
33
	.type	__pthread_cond_signal, @function
34
	.align	16
35
__pthread_cond_signal:
36
37
	LIBC_PROBE (cond_signal, 1, %rdi)
38
39
	/* Get internal lock.  */
40
	movq	%rdi, %r8
41
	movl	$1, %esi
42
	xorl	%eax, %eax
43
	LOCK
44
#if cond_lock == 0
45
	cmpxchgl %esi, (%rdi)
46
#else
47
	cmpxchgl %esi, cond_lock(%rdi)
48
#endif
49
	jnz	1f
50
51
2:	addq	$cond_futex, %rdi
52
	movq	total_seq(%r8), %rcx
53
	cmpq	wakeup_seq(%r8), %rcx
54
	jbe	4f
55
56
	/* Bump the wakeup number.  */
57
	addq	$1, wakeup_seq(%r8)
58
	addl	$1, (%rdi)
59
60
	/* Wake up one thread.  */
61
	LP_OP(cmp) $-1, dep_mutex(%r8)
62
	movl	$FUTEX_WAKE_OP, %esi
63
	movl	$1, %edx
64
	movl	$SYS_futex, %eax
65
	je	8f
66
67
	/* Get the address of the mutex used.  */
68
	mov     dep_mutex(%r8), %RCX_LP
69
	movl	MUTEX_KIND(%rcx), %r11d
70
	andl	$(ROBUST_BIT|PI_BIT), %r11d
71
	cmpl	$PI_BIT, %r11d
72
	je	9f
73
74
#ifdef __ASSUME_PRIVATE_FUTEX
75
	movl	$(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi
76
#else
77
	orl	%fs:PRIVATE_FUTEX, %esi
78
#endif
79
80
8:	movl	$1, %r10d
81
#if cond_lock != 0
82
	addq	$cond_lock, %r8
83
#endif
84
	movl	$FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %r9d
85
	syscall
86
#if cond_lock != 0
87
	subq	$cond_lock, %r8
88
#endif
89
	/* For any kind of error, we try again with WAKE.
90
	   The general test also covers running on old kernels.  */
91
	cmpq	$-4095, %rax
92
	jae	7f
93
94
	xorl	%eax, %eax
95
	retq
96
97
	/* Wake up one thread and requeue none in the PI Mutex case.  */
98
9:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
99
	movq	%rcx, %r8
100
	xorq	%r10, %r10
101
	movl	(%rdi), %r9d	// XXX Can this be right?
102
	syscall
103
104
	leaq	-cond_futex(%rdi), %r8
105
106
	/* For any kind of error, we try again with WAKE.
107
	   The general test also covers running on old kernels.  */
108
	cmpq	$-4095, %rax
109
	jb	4f
110
111
7:
112
#ifdef __ASSUME_PRIVATE_FUTEX
113
	andl	$FUTEX_PRIVATE_FLAG, %esi
114
#else
115
	andl	%fs:PRIVATE_FUTEX, %esi
116
#endif
117
	orl	$FUTEX_WAKE, %esi
118
	movl	$SYS_futex, %eax
119
	/* %rdx should be 1 already from $FUTEX_WAKE_OP syscall.
120
	movl	$1, %edx  */
121
	syscall
122
123
	/* Unlock.  */
124
4:	LOCK
125
#if cond_lock == 0
126
	decl	(%r8)
127
#else
128
	decl	cond_lock(%r8)
129
#endif
130
	jne	5f
131
132
6:	xorl	%eax, %eax
133
	retq
134
135
	/* Initial locking failed.  */
136
1:
137
#if cond_lock != 0
138
	addq	$cond_lock, %rdi
139
#endif
140
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
141
	movl	$LLL_PRIVATE, %eax
142
	movl	$LLL_SHARED, %esi
143
	cmovne	%eax, %esi
144
	callq	__lll_lock_wait
145
#if cond_lock != 0
146
	subq	$cond_lock, %rdi
147
#endif
148
	jmp	2b
149
150
	/* Unlock in loop requires wakeup.  */
151
5:
152
	movq	%r8, %rdi
153
#if cond_lock != 0
154
	addq	$cond_lock, %rdi
155
#endif
156
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
157
	movl	$LLL_PRIVATE, %eax
158
	movl	$LLL_SHARED, %esi
159
	cmovne	%eax, %esi
160
	callq	__lll_unlock_wake
161
	jmp	6b
162
	.size	__pthread_cond_signal, .-__pthread_cond_signal
163
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
164
		  GLIBC_2_3_2)
(-)a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S (-840 lines)
Lines 1-840 Link Here
1
/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2
   This file is part of the GNU C Library.
3
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5
   The GNU C Library is free software; you can redistribute it and/or
6
   modify it under the terms of the GNU Lesser General Public
7
   License as published by the Free Software Foundation; either
8
   version 2.1 of the License, or (at your option) any later version.
9
10
   The GNU C Library is distributed in the hope that it will be useful,
11
   but WITHOUT ANY WARRANTY; without even the implied warranty of
12
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
   Lesser General Public License for more details.
14
15
   You should have received a copy of the GNU Lesser General Public
16
   License along with the GNU C Library; if not, see
17
   <http://www.gnu.org/licenses/>.  */
18
19
#include <sysdep.h>
20
#include <shlib-compat.h>
21
#include <lowlevellock.h>
22
#include <lowlevelcond.h>
23
#include <pthread-pi-defines.h>
24
#include <pthread-errnos.h>
25
#include <stap-probe.h>
26
27
#include <kernel-features.h>
28
29
30
	.text
31
32
33
/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
34
			       const struct timespec *abstime)  */
35
	.globl	__pthread_cond_timedwait
36
	.type	__pthread_cond_timedwait, @function
37
	.align	16
38
__pthread_cond_timedwait:
39
.LSTARTCODE:
40
	cfi_startproc
41
#ifdef SHARED
42
	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
43
			DW.ref.__gcc_personality_v0)
44
	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
45
#else
46
	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
47
	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
48
#endif
49
50
	pushq	%r12
51
	cfi_adjust_cfa_offset(8)
52
	cfi_rel_offset(%r12, 0)
53
	pushq	%r13
54
	cfi_adjust_cfa_offset(8)
55
	cfi_rel_offset(%r13, 0)
56
	pushq	%r14
57
	cfi_adjust_cfa_offset(8)
58
	cfi_rel_offset(%r14, 0)
59
	pushq	%r15
60
	cfi_adjust_cfa_offset(8)
61
	cfi_rel_offset(%r15, 0)
62
#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
63
# define FRAME_SIZE (32+8)
64
#else
65
# define FRAME_SIZE (48+8)
66
#endif
67
	subq	$FRAME_SIZE, %rsp
68
	cfi_adjust_cfa_offset(FRAME_SIZE)
69
	cfi_remember_state
70
71
	LIBC_PROBE (cond_timedwait, 3, %rdi, %rsi, %rdx)
72
73
	cmpq	$1000000000, 8(%rdx)
74
	movl	$EINVAL, %eax
75
	jae	48f
76
77
	/* Stack frame:
78
79
	   rsp + 48
80
		    +--------------------------+
81
	   rsp + 32 | timeout value            |
82
		    +--------------------------+
83
	   rsp + 24 | old wake_seq value       |
84
		    +--------------------------+
85
	   rsp + 16 | mutex pointer            |
86
		    +--------------------------+
87
	   rsp +  8 | condvar pointer          |
88
		    +--------------------------+
89
	   rsp +  4 | old broadcast_seq value  |
90
		    +--------------------------+
91
	   rsp +  0 | old cancellation mode    |
92
		    +--------------------------+
93
	*/
94
95
	LP_OP(cmp) $-1, dep_mutex(%rdi)
96
97
	/* Prepare structure passed to cancellation handler.  */
98
	movq	%rdi, 8(%rsp)
99
	movq	%rsi, 16(%rsp)
100
	movq	%rdx, %r13
101
102
	je	22f
103
	mov	%RSI_LP, dep_mutex(%rdi)
104
105
22:
106
	xorb	%r15b, %r15b
107
108
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
109
#  ifdef PIC
110
	cmpl	$0, __have_futex_clock_realtime(%rip)
111
#  else
112
	cmpl	$0, __have_futex_clock_realtime
113
#  endif
114
	je	.Lreltmo
115
#endif
116
117
	/* Get internal lock.  */
118
	movl	$1, %esi
119
	xorl	%eax, %eax
120
	LOCK
121
#if cond_lock == 0
122
	cmpxchgl %esi, (%rdi)
123
#else
124
	cmpxchgl %esi, cond_lock(%rdi)
125
#endif
126
	jnz	31f
127
128
	/* Unlock the mutex.  */
129
32:	movq	16(%rsp), %rdi
130
	xorl	%esi, %esi
131
	callq	__pthread_mutex_unlock_usercnt
132
133
	testl	%eax, %eax
134
	jne	46f
135
136
	movq	8(%rsp), %rdi
137
	incq	total_seq(%rdi)
138
	incl	cond_futex(%rdi)
139
	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
140
141
	/* Get and store current wakeup_seq value.  */
142
	movq	8(%rsp), %rdi
143
	movq	wakeup_seq(%rdi), %r9
144
	movl	broadcast_seq(%rdi), %edx
145
	movq	%r9, 24(%rsp)
146
	movl	%edx, 4(%rsp)
147
148
	cmpq	$0, (%r13)
149
	movq	$-ETIMEDOUT, %r14
150
	js	36f
151
152
38:	movl	cond_futex(%rdi), %r12d
153
154
	/* Unlock.  */
155
	LOCK
156
#if cond_lock == 0
157
	decl	(%rdi)
158
#else
159
	decl	cond_lock(%rdi)
160
#endif
161
	jne	33f
162
163
.LcleanupSTART1:
164
34:	callq	__pthread_enable_asynccancel
165
	movl	%eax, (%rsp)
166
167
	movq	%r13, %r10
168
	movl	$FUTEX_WAIT_BITSET, %esi
169
	LP_OP(cmp) $-1, dep_mutex(%rdi)
170
	je	60f
171
172
	mov	dep_mutex(%rdi), %R8_LP
173
	/* Requeue to a non-robust PI mutex if the PI bit is set and
174
	the robust bit is not set.  */
175
	movl	MUTEX_KIND(%r8), %eax
176
	andl	$(ROBUST_BIT|PI_BIT), %eax
177
	cmpl	$PI_BIT, %eax
178
	jne	61f
179
180
	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
181
	xorl	%eax, %eax
182
	/* The following only works like this because we only support
183
	   two clocks, represented using a single bit.  */
184
	testl	$1, cond_nwaiters(%rdi)
185
	movl	$FUTEX_CLOCK_REALTIME, %edx
186
	cmove	%edx, %eax
187
	orl	%eax, %esi
188
	movq	%r12, %rdx
189
	addq	$cond_futex, %rdi
190
	movl	$SYS_futex, %eax
191
	syscall
192
193
	cmpl	$0, %eax
194
	sete	%r15b
195
196
#ifdef __ASSUME_REQUEUE_PI
197
	jmp	62f
198
#else
199
	je	62f
200
201
	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
202
	   successfully, it has already locked the mutex for us and the
203
	   pi_flag (%r15b) is set to denote that fact.  However, if another
204
	   thread changed the futex value before we entered the wait, the
205
	   syscall may return an EAGAIN and the mutex is not locked.  We go
206
	   ahead with a success anyway since later we look at the pi_flag to
207
	   decide if we got the mutex or not.  The sequence numbers then make
208
	   sure that only one of the threads actually wake up.  We retry using
209
	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
210
	   and PI futexes don't mix.
211
212
	   Note that we don't check for EAGAIN specifically; we assume that the
213
	   only other error the futex function could return is EAGAIN (barring
214
	   the ETIMEOUT of course, for the timeout case in futex) since
215
	   anything else would mean an error in our function.  It is too
216
	   expensive to do that check for every call (which is  quite common in
217
	   case of a large number of threads), so it has been skipped.  */
218
	cmpl    $-ENOSYS, %eax
219
	jne     62f
220
221
	subq	$cond_futex, %rdi
222
#endif
223
224
61:	movl	$(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
225
60:	xorb	%r15b, %r15b
226
	xorl	%eax, %eax
227
	/* The following only works like this because we only support
228
	   two clocks, represented using a single bit.  */
229
	testl	$1, cond_nwaiters(%rdi)
230
	movl	$FUTEX_CLOCK_REALTIME, %edx
231
	movl	$0xffffffff, %r9d
232
	cmove	%edx, %eax
233
	orl	%eax, %esi
234
	movq	%r12, %rdx
235
	addq	$cond_futex, %rdi
236
	movl	$SYS_futex, %eax
237
	syscall
238
62:	movq	%rax, %r14
239
240
	movl	(%rsp), %edi
241
	callq	__pthread_disable_asynccancel
242
.LcleanupEND1:
243
244
	/* Lock.  */
245
	movq	8(%rsp), %rdi
246
	movl	$1, %esi
247
	xorl	%eax, %eax
248
	LOCK
249
#if cond_lock == 0
250
	cmpxchgl %esi, (%rdi)
251
#else
252
	cmpxchgl %esi, cond_lock(%rdi)
253
#endif
254
	jne	35f
255
256
36:	movl	broadcast_seq(%rdi), %edx
257
258
	movq	woken_seq(%rdi), %rax
259
260
	movq	wakeup_seq(%rdi), %r9
261
262
	cmpl	4(%rsp), %edx
263
	jne	53f
264
265
	cmpq	24(%rsp), %r9
266
	jbe	45f
267
268
	cmpq	%rax, %r9
269
	ja	39f
270
271
45:	cmpq	$-ETIMEDOUT, %r14
272
	je	99f
273
274
	/* We need to go back to futex_wait.  If we're using requeue_pi, then
275
	   release the mutex we had acquired and go back.  */
276
	test	%r15b, %r15b
277
	jz	38b
278
279
	/* Adjust the mutex values first and then unlock it.  The unlock
280
	   should always succeed or else the kernel did not lock the
281
	   mutex correctly.  */
282
	movq	%r8, %rdi
283
	callq	__pthread_mutex_cond_lock_adjust
284
	xorl	%esi, %esi
285
	callq	__pthread_mutex_unlock_usercnt
286
	/* Reload cond_var.  */
287
	movq	8(%rsp), %rdi
288
	jmp	38b
289
290
99:	incq	wakeup_seq(%rdi)
291
	incl	cond_futex(%rdi)
292
	movl	$ETIMEDOUT, %r14d
293
	jmp	44f
294
295
53:	xorq	%r14, %r14
296
	jmp	54f
297
298
39:	xorq	%r14, %r14
299
44:	incq	woken_seq(%rdi)
300
301
54:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
302
303
	/* Wake up a thread which wants to destroy the condvar object.  */
304
	cmpq	$0xffffffffffffffff, total_seq(%rdi)
305
	jne	55f
306
	movl	cond_nwaiters(%rdi), %eax
307
	andl	$~((1 << nwaiters_shift) - 1), %eax
308
	jne	55f
309
310
	addq	$cond_nwaiters, %rdi
311
	LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
312
	movl	$1, %edx
313
#ifdef __ASSUME_PRIVATE_FUTEX
314
	movl	$FUTEX_WAKE, %eax
315
	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
316
	cmove	%eax, %esi
317
#else
318
	movl	$0, %eax
319
	movl	%fs:PRIVATE_FUTEX, %esi
320
	cmove	%eax, %esi
321
	orl	$FUTEX_WAKE, %esi
322
#endif
323
	movl	$SYS_futex, %eax
324
	syscall
325
	subq	$cond_nwaiters, %rdi
326
327
55:	LOCK
328
#if cond_lock == 0
329
	decl	(%rdi)
330
#else
331
	decl	cond_lock(%rdi)
332
#endif
333
	jne	40f
334
335
	/* If requeue_pi is used the kernel performs the locking of the
336
	   mutex. */
337
41:	movq	16(%rsp), %rdi
338
	testb	%r15b, %r15b
339
	jnz	64f
340
341
	callq	__pthread_mutex_cond_lock
342
343
63:	testq	%rax, %rax
344
	cmoveq	%r14, %rax
345
346
48:	addq	$FRAME_SIZE, %rsp
347
	cfi_adjust_cfa_offset(-FRAME_SIZE)
348
	popq	%r15
349
	cfi_adjust_cfa_offset(-8)
350
	cfi_restore(%r15)
351
	popq	%r14
352
	cfi_adjust_cfa_offset(-8)
353
	cfi_restore(%r14)
354
	popq	%r13
355
	cfi_adjust_cfa_offset(-8)
356
	cfi_restore(%r13)
357
	popq	%r12
358
	cfi_adjust_cfa_offset(-8)
359
	cfi_restore(%r12)
360
361
	retq
362
363
	cfi_restore_state
364
365
64:	callq	__pthread_mutex_cond_lock_adjust
366
	movq	%r14, %rax
367
	jmp	48b
368
369
	/* Initial locking failed.  */
370
31:
371
#if cond_lock != 0
372
	addq	$cond_lock, %rdi
373
#endif
374
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
375
	movl	$LLL_PRIVATE, %eax
376
	movl	$LLL_SHARED, %esi
377
	cmovne	%eax, %esi
378
	callq	__lll_lock_wait
379
	jmp	32b
380
381
	/* Unlock in loop requires wakeup.  */
382
33:
383
#if cond_lock != 0
384
	addq	$cond_lock, %rdi
385
#endif
386
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
387
	movl	$LLL_PRIVATE, %eax
388
	movl	$LLL_SHARED, %esi
389
	cmovne	%eax, %esi
390
	callq	__lll_unlock_wake
391
	jmp	34b
392
393
	/* Locking in loop failed.  */
394
35:
395
#if cond_lock != 0
396
	addq	$cond_lock, %rdi
397
#endif
398
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
399
	movl	$LLL_PRIVATE, %eax
400
	movl	$LLL_SHARED, %esi
401
	cmovne	%eax, %esi
402
	callq	__lll_lock_wait
403
#if cond_lock != 0
404
	subq	$cond_lock, %rdi
405
#endif
406
	jmp	36b
407
408
	/* Unlock after loop requires wakeup.  */
409
40:
410
#if cond_lock != 0
411
	addq	$cond_lock, %rdi
412
#endif
413
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
414
	movl	$LLL_PRIVATE, %eax
415
	movl	$LLL_SHARED, %esi
416
	cmovne	%eax, %esi
417
	callq	__lll_unlock_wake
418
	jmp	41b
419
420
	/* The initial unlocking of the mutex failed.  */
421
46:	movq	8(%rsp), %rdi
422
	movq	%rax, (%rsp)
423
	LOCK
424
#if cond_lock == 0
425
	decl	(%rdi)
426
#else
427
	decl	cond_lock(%rdi)
428
#endif
429
	jne	47f
430
431
#if cond_lock != 0
432
	addq	$cond_lock, %rdi
433
#endif
434
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
435
	movl	$LLL_PRIVATE, %eax
436
	movl	$LLL_SHARED, %esi
437
	cmovne	%eax, %esi
438
	callq	__lll_unlock_wake
439
440
47:	movq	(%rsp), %rax
441
	jmp	48b
442
443
444
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
445
.Lreltmo:
446
	/* Get internal lock.  */
447
	movl	$1, %esi
448
	xorl	%eax, %eax
449
	LOCK
450
# if cond_lock == 0
451
	cmpxchgl %esi, (%rdi)
452
# else
453
	cmpxchgl %esi, cond_lock(%rdi)
454
# endif
455
	jnz	1f
456
457
	/* Unlock the mutex.  */
458
2:	movq	16(%rsp), %rdi
459
	xorl	%esi, %esi
460
	callq	__pthread_mutex_unlock_usercnt
461
462
	testl	%eax, %eax
463
	jne	46b
464
465
	movq	8(%rsp), %rdi
466
	incq	total_seq(%rdi)
467
	incl	cond_futex(%rdi)
468
	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
469
470
	/* Get and store current wakeup_seq value.  */
471
	movq	8(%rsp), %rdi
472
	movq	wakeup_seq(%rdi), %r9
473
	movl	broadcast_seq(%rdi), %edx
474
	movq	%r9, 24(%rsp)
475
	movl	%edx, 4(%rsp)
476
477
	/* Get the current time.  */
478
8:
479
# ifdef __NR_clock_gettime
480
	/* Get the clock number.  Note that the field in the condvar
481
	   structure stores the number minus 1.  */
482
	movq	8(%rsp), %rdi
483
	movl	cond_nwaiters(%rdi), %edi
484
	andl	$((1 << nwaiters_shift) - 1), %edi
485
	/* Only clocks 0 and 1 are allowed so far.  Both are handled in the
486
	   kernel.  */
487
	leaq	32(%rsp), %rsi
488
#  ifdef SHARED
489
	mov	__vdso_clock_gettime@GOTPCREL(%rip), %RAX_LP
490
	mov	(%rax), %RAX_LP
491
	PTR_DEMANGLE (%RAX_LP)
492
	call	*%rax
493
#  else
494
	movl	$__NR_clock_gettime, %eax
495
	syscall
496
#  endif
497
498
	/* Compute relative timeout.  */
499
	movq	(%r13), %rcx
500
	movq	8(%r13), %rdx
501
	subq	32(%rsp), %rcx
502
	subq	40(%rsp), %rdx
503
# else
504
	leaq	24(%rsp), %rdi
505
	xorl	%esi, %esi
506
	/* This call works because we directly jump to a system call entry
507
	   which preserves all the registers.  */
508
	call	JUMPTARGET(__gettimeofday)
509
510
	/* Compute relative timeout.  */
511
	movq	40(%rsp), %rax
512
	movl	$1000, %edx
513
	mul	%rdx		/* Milli seconds to nano seconds.  */
514
	movq	(%r13), %rcx
515
	movq	8(%r13), %rdx
516
	subq	32(%rsp), %rcx
517
	subq	%rax, %rdx
518
# endif
519
	jns	12f
520
	addq	$1000000000, %rdx
521
	decq	%rcx
522
12:	testq	%rcx, %rcx
523
	movq	8(%rsp), %rdi
524
	movq	$-ETIMEDOUT, %r14
525
	js	6f
526
527
	/* Store relative timeout.  */
528
21:	movq	%rcx, 32(%rsp)
529
	movq	%rdx, 40(%rsp)
530
531
	movl	cond_futex(%rdi), %r12d
532
533
	/* Unlock.  */
534
	LOCK
535
# if cond_lock == 0
536
	decl	(%rdi)
537
# else
538
	decl	cond_lock(%rdi)
539
# endif
540
	jne	3f
541
542
.LcleanupSTART2:
543
4:	callq	__pthread_enable_asynccancel
544
	movl	%eax, (%rsp)
545
546
	leaq	32(%rsp), %r10
547
	LP_OP(cmp) $-1, dep_mutex(%rdi)
548
	movq	%r12, %rdx
549
# ifdef __ASSUME_PRIVATE_FUTEX
550
	movl	$FUTEX_WAIT, %eax
551
	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
552
	cmove	%eax, %esi
553
# else
554
	movl	$0, %eax
555
	movl	%fs:PRIVATE_FUTEX, %esi
556
	cmove	%eax, %esi
557
#  if FUTEX_WAIT != 0
558
	orl	$FUTEX_WAIT, %esi
559
#  endif
560
# endif
561
	addq	$cond_futex, %rdi
562
	movl	$SYS_futex, %eax
563
	syscall
564
	movq	%rax, %r14
565
566
	movl	(%rsp), %edi
567
	callq	__pthread_disable_asynccancel
568
.LcleanupEND2:
569
570
	/* Lock.  */
571
	movq	8(%rsp), %rdi
572
	movl	$1, %esi
573
	xorl	%eax, %eax
574
	LOCK
575
# if cond_lock == 0
576
	cmpxchgl %esi, (%rdi)
577
# else
578
	cmpxchgl %esi, cond_lock(%rdi)
579
# endif
580
	jne	5f
581
582
6:	movl	broadcast_seq(%rdi), %edx
583
584
	movq	woken_seq(%rdi), %rax
585
586
	movq	wakeup_seq(%rdi), %r9
587
588
	cmpl	4(%rsp), %edx
589
	jne	53b
590
591
	cmpq	24(%rsp), %r9
592
	jbe	15f
593
594
	cmpq	%rax, %r9
595
	ja	39b
596
597
15:	cmpq	$-ETIMEDOUT, %r14
598
	jne	8b
599
600
	jmp	99b
601
602
	/* Initial locking failed.  */
603
1:
604
# if cond_lock != 0
605
	addq	$cond_lock, %rdi
606
# endif
607
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
608
	movl	$LLL_PRIVATE, %eax
609
	movl	$LLL_SHARED, %esi
610
	cmovne	%eax, %esi
611
	callq	__lll_lock_wait
612
	jmp	2b
613
614
	/* Unlock in loop requires wakeup.  */
615
3:
616
# if cond_lock != 0
617
	addq	$cond_lock, %rdi
618
# endif
619
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
620
	movl	$LLL_PRIVATE, %eax
621
	movl	$LLL_SHARED, %esi
622
	cmovne	%eax, %esi
623
	callq	__lll_unlock_wake
624
	jmp	4b
625
626
	/* Locking in loop failed.  */
627
5:
628
# if cond_lock != 0
629
	addq	$cond_lock, %rdi
630
# endif
631
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
632
	movl	$LLL_PRIVATE, %eax
633
	movl	$LLL_SHARED, %esi
634
	cmovne	%eax, %esi
635
	callq	__lll_lock_wait
636
# if cond_lock != 0
637
	subq	$cond_lock, %rdi
638
# endif
639
	jmp	6b
640
#endif
641
	.size	__pthread_cond_timedwait, .-__pthread_cond_timedwait
642
versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
643
		  GLIBC_2_3_2)
644
645
646
	.align	16
647
	.type	__condvar_cleanup2, @function
648
__condvar_cleanup2:
649
	/* Stack frame:
650
651
	   rsp + 72
652
		    +--------------------------+
653
	   rsp + 64 | %r12                     |
654
		    +--------------------------+
655
	   rsp + 56 | %r13                     |
656
		    +--------------------------+
657
	   rsp + 48 | %r14                     |
658
		    +--------------------------+
659
	   rsp + 24 | unused                   |
660
		    +--------------------------+
661
	   rsp + 16 | mutex pointer            |
662
		    +--------------------------+
663
	   rsp +  8 | condvar pointer          |
664
		    +--------------------------+
665
	   rsp +  4 | old broadcast_seq value  |
666
		    +--------------------------+
667
	   rsp +  0 | old cancellation mode    |
668
		    +--------------------------+
669
	*/
670
671
	movq	%rax, 24(%rsp)
672
673
	/* Get internal lock.  */
674
	movq	8(%rsp), %rdi
675
	movl	$1, %esi
676
	xorl	%eax, %eax
677
	LOCK
678
#if cond_lock == 0
679
	cmpxchgl %esi, (%rdi)
680
#else
681
	cmpxchgl %esi, cond_lock(%rdi)
682
#endif
683
	jz	1f
684
685
#if cond_lock != 0
686
	addq	$cond_lock, %rdi
687
#endif
688
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
689
	movl	$LLL_PRIVATE, %eax
690
	movl	$LLL_SHARED, %esi
691
	cmovne	%eax, %esi
692
	callq	__lll_lock_wait
693
#if cond_lock != 0
694
	subq	$cond_lock, %rdi
695
#endif
696
697
1:	movl	broadcast_seq(%rdi), %edx
698
	cmpl	4(%rsp), %edx
699
	jne	3f
700
701
	/* We increment the wakeup_seq counter only if it is lower than
702
	   total_seq.  If this is not the case the thread was woken and
703
	   then canceled.  In this case we ignore the signal.  */
704
	movq	total_seq(%rdi), %rax
705
	cmpq	wakeup_seq(%rdi), %rax
706
	jbe	6f
707
	incq	wakeup_seq(%rdi)
708
	incl	cond_futex(%rdi)
709
6:	incq	woken_seq(%rdi)
710
711
3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
712
713
	/* Wake up a thread which wants to destroy the condvar object.  */
714
	xorq	%r12, %r12
715
	cmpq	$0xffffffffffffffff, total_seq(%rdi)
716
	jne	4f
717
	movl	cond_nwaiters(%rdi), %eax
718
	andl	$~((1 << nwaiters_shift) - 1), %eax
719
	jne	4f
720
721
	LP_OP(cmp) $-1, dep_mutex(%rdi)
722
	leaq	cond_nwaiters(%rdi), %rdi
723
	movl	$1, %edx
724
#ifdef __ASSUME_PRIVATE_FUTEX
725
	movl	$FUTEX_WAKE, %eax
726
	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
727
	cmove	%eax, %esi
728
#else
729
	movl	$0, %eax
730
	movl	%fs:PRIVATE_FUTEX, %esi
731
	cmove	%eax, %esi
732
	orl	$FUTEX_WAKE, %esi
733
#endif
734
	movl	$SYS_futex, %eax
735
	syscall
736
	subq	$cond_nwaiters, %rdi
737
	movl	$1, %r12d
738
739
4:	LOCK
740
#if cond_lock == 0
741
	decl	(%rdi)
742
#else
743
	decl	cond_lock(%rdi)
744
#endif
745
	je	2f
746
#if cond_lock != 0
747
	addq	$cond_lock, %rdi
748
#endif
749
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
750
	movl	$LLL_PRIVATE, %eax
751
	movl	$LLL_SHARED, %esi
752
	cmovne	%eax, %esi
753
	callq	__lll_unlock_wake
754
755
	/* Wake up all waiters to make sure no signal gets lost.  */
756
2:	testq	%r12, %r12
757
	jnz	5f
758
	addq	$cond_futex, %rdi
759
	LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
760
	movl	$0x7fffffff, %edx
761
#ifdef __ASSUME_PRIVATE_FUTEX
762
	movl	$FUTEX_WAKE, %eax
763
	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
764
	cmove	%eax, %esi
765
#else
766
	movl	$0, %eax
767
	movl	%fs:PRIVATE_FUTEX, %esi
768
	cmove	%eax, %esi
769
	orl	$FUTEX_WAKE, %esi
770
#endif
771
	movl	$SYS_futex, %eax
772
	syscall
773
774
	/* Lock the mutex only if we don't own it already.  This only happens
775
	   in case of PI mutexes, if we got cancelled after a successful
776
	   return of the futex syscall and before disabling async
777
	   cancellation.  */
778
5:	movq	16(%rsp), %rdi
779
	movl	MUTEX_KIND(%rdi), %eax
780
	andl	$(ROBUST_BIT|PI_BIT), %eax
781
	cmpl	$PI_BIT, %eax
782
	jne	7f
783
784
	movl	(%rdi), %eax
785
	andl	$TID_MASK, %eax
786
	cmpl	%eax, %fs:TID
787
	jne	7f
788
	/* We managed to get the lock.  Fix it up before returning.  */
789
	callq	__pthread_mutex_cond_lock_adjust
790
	jmp	8f
791
792
7:	callq	__pthread_mutex_cond_lock
793
794
8:	movq	24(%rsp), %rdi
795
	movq	FRAME_SIZE(%rsp), %r15
796
	movq	FRAME_SIZE+8(%rsp), %r14
797
	movq	FRAME_SIZE+16(%rsp), %r13
798
	movq	FRAME_SIZE+24(%rsp), %r12
799
.LcallUR:
800
	call	_Unwind_Resume@PLT
801
	hlt
802
.LENDCODE:
803
	cfi_endproc
804
	.size	__condvar_cleanup2, .-__condvar_cleanup2
805
806
807
	.section .gcc_except_table,"a",@progbits
808
.LexceptSTART:
809
	.byte	DW_EH_PE_omit			# @LPStart format
810
	.byte	DW_EH_PE_omit			# @TType format
811
	.byte	DW_EH_PE_uleb128		# call-site format
812
	.uleb128 .Lcstend-.Lcstbegin
813
.Lcstbegin:
814
	.uleb128 .LcleanupSTART1-.LSTARTCODE
815
	.uleb128 .LcleanupEND1-.LcleanupSTART1
816
	.uleb128 __condvar_cleanup2-.LSTARTCODE
817
	.uleb128  0
818
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
819
	.uleb128 .LcleanupSTART2-.LSTARTCODE
820
	.uleb128 .LcleanupEND2-.LcleanupSTART2
821
	.uleb128 __condvar_cleanup2-.LSTARTCODE
822
	.uleb128  0
823
#endif
824
	.uleb128 .LcallUR-.LSTARTCODE
825
	.uleb128 .LENDCODE-.LcallUR
826
	.uleb128 0
827
	.uleb128  0
828
.Lcstend:
829
830
831
#ifdef SHARED
832
	.hidden	DW.ref.__gcc_personality_v0
833
	.weak	DW.ref.__gcc_personality_v0
834
	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
835
	.align	LP_SIZE
836
	.type	DW.ref.__gcc_personality_v0, @object
837
	.size	DW.ref.__gcc_personality_v0, LP_SIZE
838
DW.ref.__gcc_personality_v0:
839
	ASM_ADDR __gcc_personality_v0
840
#endif
(-)a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S (-556 lines)
Lines 1-555 Link Here
1
/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2
   This file is part of the GNU C Library.
3
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5
   The GNU C Library is free software; you can redistribute it and/or
6
   modify it under the terms of the GNU Lesser General Public
7
   License as published by the Free Software Foundation; either
8
   version 2.1 of the License, or (at your option) any later version.
9
10
   The GNU C Library is distributed in the hope that it will be useful,
11
   but WITHOUT ANY WARRANTY; without even the implied warranty of
12
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
   Lesser General Public License for more details.
14
15
   You should have received a copy of the GNU Lesser General Public
16
   License along with the GNU C Library; if not, see
17
   <http://www.gnu.org/licenses/>.  */
18
19
#include <sysdep.h>
20
#include <shlib-compat.h>
21
#include <lowlevellock.h>
22
#include <lowlevelcond.h>
23
#include <tcb-offsets.h>
24
#include <pthread-pi-defines.h>
25
#include <pthread-errnos.h>
26
#include <stap-probe.h>
27
28
#include <kernel-features.h>
29
30
31
	.text
32
33
/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
34
	.globl	__pthread_cond_wait
35
	.type	__pthread_cond_wait, @function
36
	.align	16
37
__pthread_cond_wait:
38
.LSTARTCODE:
39
	cfi_startproc
40
#ifdef SHARED
41
	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
42
			DW.ref.__gcc_personality_v0)
43
	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
44
#else
45
	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
46
	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
47
#endif
48
49
#define FRAME_SIZE (32+8)
50
	leaq	-FRAME_SIZE(%rsp), %rsp
51
	cfi_adjust_cfa_offset(FRAME_SIZE)
52
53
	/* Stack frame:
54
55
	   rsp + 32
56
		    +--------------------------+
57
	   rsp + 24 | old wake_seq value       |
58
		    +--------------------------+
59
	   rsp + 16 | mutex pointer            |
60
		    +--------------------------+
61
	   rsp +  8 | condvar pointer          |
62
		    +--------------------------+
63
	   rsp +  4 | old broadcast_seq value  |
64
		    +--------------------------+
65
	   rsp +  0 | old cancellation mode    |
66
		    +--------------------------+
67
	*/
68
69
	LIBC_PROBE (cond_wait, 2, %rdi, %rsi)
70
71
	LP_OP(cmp) $-1, dep_mutex(%rdi)
72
73
	/* Prepare structure passed to cancellation handler.  */
74
	movq	%rdi, 8(%rsp)
75
	movq	%rsi, 16(%rsp)
76
77
	je	15f
78
	mov	%RSI_LP, dep_mutex(%rdi)
79
80
	/* Get internal lock.  */
81
15:	movl	$1, %esi
82
	xorl	%eax, %eax
83
	LOCK
84
#if cond_lock == 0
85
	cmpxchgl %esi, (%rdi)
86
#else
87
	cmpxchgl %esi, cond_lock(%rdi)
88
#endif
89
	jne	1f
90
91
	/* Unlock the mutex.  */
92
2:	movq	16(%rsp), %rdi
93
	xorl	%esi, %esi
94
	callq	__pthread_mutex_unlock_usercnt
95
96
	testl	%eax, %eax
97
	jne	12f
98
99
	movq	8(%rsp), %rdi
100
	incq	total_seq(%rdi)
101
	incl	cond_futex(%rdi)
102
	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
103
104
	/* Get and store current wakeup_seq value.  */
105
	movq	8(%rsp), %rdi
106
	movq	wakeup_seq(%rdi), %r9
107
	movl	broadcast_seq(%rdi), %edx
108
	movq	%r9, 24(%rsp)
109
	movl	%edx, 4(%rsp)
110
111
	/* Unlock.  */
112
8:	movl	cond_futex(%rdi), %edx
113
	LOCK
114
#if cond_lock == 0
115
	decl	(%rdi)
116
#else
117
	decl	cond_lock(%rdi)
118
#endif
119
	jne	3f
120
121
.LcleanupSTART:
122
4:	callq	__pthread_enable_asynccancel
123
	movl	%eax, (%rsp)
124
125
	xorq	%r10, %r10
126
	LP_OP(cmp) $-1, dep_mutex(%rdi)
127
	leaq	cond_futex(%rdi), %rdi
128
	movl	$FUTEX_WAIT, %esi
129
	je	60f
130
131
	mov	dep_mutex-cond_futex(%rdi), %R8_LP
132
	/* Requeue to a non-robust PI mutex if the PI bit is set and
133
	the robust bit is not set.  */
134
	movl	MUTEX_KIND(%r8), %eax
135
	andl	$(ROBUST_BIT|PI_BIT), %eax
136
	cmpl	$PI_BIT, %eax
137
	jne	61f
138
139
	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
140
	movl	$SYS_futex, %eax
141
	syscall
142
143
	cmpl	$0, %eax
144
	sete	%r8b
145
146
#ifdef __ASSUME_REQUEUE_PI
147
	jmp	62f
148
#else
149
	je	62f
150
151
	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
152
	   successfully, it has already locked the mutex for us and the
153
	   pi_flag (%r8b) is set to denote that fact.  However, if another
154
	   thread changed the futex value before we entered the wait, the
155
	   syscall may return an EAGAIN and the mutex is not locked.  We go
156
	   ahead with a success anyway since later we look at the pi_flag to
157
	   decide if we got the mutex or not.  The sequence numbers then make
158
	   sure that only one of the threads actually wake up.  We retry using
159
	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
160
	   and PI futexes don't mix.
161
162
	   Note that we don't check for EAGAIN specifically; we assume that the
163
	   only other error the futex function could return is EAGAIN since
164
	   anything else would mean an error in our function.  It is too
165
	   expensive to do that check for every call (which is 	quite common in
166
	   case of a large number of threads), so it has been skipped.  */
167
	cmpl	$-ENOSYS, %eax
168
	jne	62f
169
170
# ifndef __ASSUME_PRIVATE_FUTEX
171
	movl	$FUTEX_WAIT, %esi
172
# endif
173
#endif
174
175
61:
176
#ifdef __ASSUME_PRIVATE_FUTEX
177
	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
178
#else
179
	orl	%fs:PRIVATE_FUTEX, %esi
180
#endif
181
60:	xorb	%r8b, %r8b
182
	movl	$SYS_futex, %eax
183
	syscall
184
185
62:	movl	(%rsp), %edi
186
	callq	__pthread_disable_asynccancel
187
.LcleanupEND:
188
189
	/* Lock.  */
190
	movq	8(%rsp), %rdi
191
	movl	$1, %esi
192
	xorl	%eax, %eax
193
	LOCK
194
#if cond_lock == 0
195
	cmpxchgl %esi, (%rdi)
196
#else
197
	cmpxchgl %esi, cond_lock(%rdi)
198
#endif
199
	jnz	5f
200
201
6:	movl	broadcast_seq(%rdi), %edx
202
203
	movq	woken_seq(%rdi), %rax
204
205
	movq	wakeup_seq(%rdi), %r9
206
207
	cmpl	4(%rsp), %edx
208
	jne	16f
209
210
	cmpq	24(%rsp), %r9
211
	jbe	19f
212
213
	cmpq	%rax, %r9
214
	jna	19f
215
216
	incq	woken_seq(%rdi)
217
218
	/* Unlock */
219
16:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
220
221
	/* Wake up a thread which wants to destroy the condvar object.  */
222
	cmpq	$0xffffffffffffffff, total_seq(%rdi)
223
	jne	17f
224
	movl	cond_nwaiters(%rdi), %eax
225
	andl	$~((1 << nwaiters_shift) - 1), %eax
226
	jne	17f
227
228
	addq	$cond_nwaiters, %rdi
229
	LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
230
	movl	$1, %edx
231
#ifdef __ASSUME_PRIVATE_FUTEX
232
	movl	$FUTEX_WAKE, %eax
233
	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
234
	cmove	%eax, %esi
235
#else
236
	movl	$0, %eax
237
	movl	%fs:PRIVATE_FUTEX, %esi
238
	cmove	%eax, %esi
239
	orl	$FUTEX_WAKE, %esi
240
#endif
241
	movl	$SYS_futex, %eax
242
	syscall
243
	subq	$cond_nwaiters, %rdi
244
245
17:	LOCK
246
#if cond_lock == 0
247
	decl	(%rdi)
248
#else
249
	decl	cond_lock(%rdi)
250
#endif
251
	jne	10f
252
253
	/* If requeue_pi is used the kernel performs the locking of the
254
	   mutex. */
255
11:	movq	16(%rsp), %rdi
256
	testb	%r8b, %r8b
257
	jnz	18f
258
259
	callq	__pthread_mutex_cond_lock
260
261
14:	leaq	FRAME_SIZE(%rsp), %rsp
262
	cfi_adjust_cfa_offset(-FRAME_SIZE)
263
264
	/* We return the result of the mutex_lock operation.  */
265
	retq
266
267
	cfi_adjust_cfa_offset(FRAME_SIZE)
268
269
18:	callq	__pthread_mutex_cond_lock_adjust
270
	xorl	%eax, %eax
271
	jmp	14b
272
273
	/* We need to go back to futex_wait.  If we're using requeue_pi, then
274
	   release the mutex we had acquired and go back.  */
275
19:	testb	%r8b, %r8b
276
	jz	8b
277
278
	/* Adjust the mutex values first and then unlock it.  The unlock
279
	   should always succeed or else the kernel did not lock the mutex
280
	   correctly.  */
281
	movq	16(%rsp), %rdi
282
	callq	__pthread_mutex_cond_lock_adjust
283
	movq	%rdi, %r8
284
	xorl	%esi, %esi
285
	callq	__pthread_mutex_unlock_usercnt
286
	/* Reload cond_var.  */
287
	movq	8(%rsp), %rdi
288
	jmp	8b
289
290
	/* Initial locking failed.  */
291
1:
292
#if cond_lock != 0
293
	addq	$cond_lock, %rdi
294
#endif
295
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
296
	movl	$LLL_PRIVATE, %eax
297
	movl	$LLL_SHARED, %esi
298
	cmovne	%eax, %esi
299
	callq	__lll_lock_wait
300
	jmp	2b
301
302
	/* Unlock in loop requires wakeup.  */
303
3:
304
#if cond_lock != 0
305
	addq	$cond_lock, %rdi
306
#endif
307
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
308
	movl	$LLL_PRIVATE, %eax
309
	movl	$LLL_SHARED, %esi
310
	cmovne	%eax, %esi
311
	/* The call preserves %rdx.  */
312
	callq	__lll_unlock_wake
313
#if cond_lock != 0
314
	subq	$cond_lock, %rdi
315
#endif
316
	jmp	4b
317
318
	/* Locking in loop failed.  */
319
5:
320
#if cond_lock != 0
321
	addq	$cond_lock, %rdi
322
#endif
323
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
324
	movl	$LLL_PRIVATE, %eax
325
	movl	$LLL_SHARED, %esi
326
	cmovne	%eax, %esi
327
	callq	__lll_lock_wait
328
#if cond_lock != 0
329
	subq	$cond_lock, %rdi
330
#endif
331
	jmp	6b
332
333
	/* Unlock after loop requires wakeup.  */
334
10:
335
#if cond_lock != 0
336
	addq	$cond_lock, %rdi
337
#endif
338
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
339
	movl	$LLL_PRIVATE, %eax
340
	movl	$LLL_SHARED, %esi
341
	cmovne	%eax, %esi
342
	callq	__lll_unlock_wake
343
	jmp	11b
344
345
	/* The initial unlocking of the mutex failed.  */
346
12:	movq	%rax, %r10
347
	movq	8(%rsp), %rdi
348
	LOCK
349
#if cond_lock == 0
350
	decl	(%rdi)
351
#else
352
	decl	cond_lock(%rdi)
353
#endif
354
	je	13f
355
356
#if cond_lock != 0
357
	addq	$cond_lock, %rdi
358
#endif
359
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
360
	movl	$LLL_PRIVATE, %eax
361
	movl	$LLL_SHARED, %esi
362
	cmovne	%eax, %esi
363
	callq	__lll_unlock_wake
364
365
13:	movq	%r10, %rax
366
	jmp	14b
367
368
	.size	__pthread_cond_wait, .-__pthread_cond_wait
369
versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
370
		  GLIBC_2_3_2)
371
372
373
	.align	16
374
	.type	__condvar_cleanup1, @function
375
	.globl	__condvar_cleanup1
376
	.hidden	__condvar_cleanup1
377
__condvar_cleanup1:
378
	/* Stack frame:
379
380
	   rsp + 32
381
		    +--------------------------+
382
	   rsp + 24 | unused                   |
383
		    +--------------------------+
384
	   rsp + 16 | mutex pointer            |
385
		    +--------------------------+
386
	   rsp +  8 | condvar pointer          |
387
		    +--------------------------+
388
	   rsp +  4 | old broadcast_seq value  |
389
		    +--------------------------+
390
	   rsp +  0 | old cancellation mode    |
391
		    +--------------------------+
392
	*/
393
394
	movq	%rax, 24(%rsp)
395
396
	/* Get internal lock.  */
397
	movq	8(%rsp), %rdi
398
	movl	$1, %esi
399
	xorl	%eax, %eax
400
	LOCK
401
#if cond_lock == 0
402
	cmpxchgl %esi, (%rdi)
403
#else
404
	cmpxchgl %esi, cond_lock(%rdi)
405
#endif
406
	jz	1f
407
408
#if cond_lock != 0
409
	addq	$cond_lock, %rdi
410
#endif
411
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
412
	movl	$LLL_PRIVATE, %eax
413
	movl	$LLL_SHARED, %esi
414
	cmovne	%eax, %esi
415
	callq	__lll_lock_wait
416
#if cond_lock != 0
417
	subq	$cond_lock, %rdi
418
#endif
419
420
1:	movl	broadcast_seq(%rdi), %edx
421
	cmpl	4(%rsp), %edx
422
	jne	3f
423
424
	/* We increment the wakeup_seq counter only if it is lower than
425
	   total_seq.  If this is not the case the thread was woken and
426
	   then canceled.  In this case we ignore the signal.  */
427
	movq	total_seq(%rdi), %rax
428
	cmpq	wakeup_seq(%rdi), %rax
429
	jbe	6f
430
	incq	wakeup_seq(%rdi)
431
	incl	cond_futex(%rdi)
432
6:	incq	woken_seq(%rdi)
433
434
3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
435
436
	/* Wake up a thread which wants to destroy the condvar object.  */
437
	xorl	%ecx, %ecx
438
	cmpq	$0xffffffffffffffff, total_seq(%rdi)
439
	jne	4f
440
	movl	cond_nwaiters(%rdi), %eax
441
	andl	$~((1 << nwaiters_shift) - 1), %eax
442
	jne	4f
443
444
	LP_OP(cmp) $-1, dep_mutex(%rdi)
445
	leaq	cond_nwaiters(%rdi), %rdi
446
	movl	$1, %edx
447
#ifdef __ASSUME_PRIVATE_FUTEX
448
	movl	$FUTEX_WAKE, %eax
449
	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
450
	cmove	%eax, %esi
451
#else
452
	movl	$0, %eax
453
	movl	%fs:PRIVATE_FUTEX, %esi
454
	cmove	%eax, %esi
455
	orl	$FUTEX_WAKE, %esi
456
#endif
457
	movl	$SYS_futex, %eax
458
	syscall
459
	subq	$cond_nwaiters, %rdi
460
	movl	$1, %ecx
461
462
4:	LOCK
463
#if cond_lock == 0
464
	decl	(%rdi)
465
#else
466
	decl	cond_lock(%rdi)
467
#endif
468
	je	2f
469
#if cond_lock != 0
470
	addq	$cond_lock, %rdi
471
#endif
472
	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
473
	movl	$LLL_PRIVATE, %eax
474
	movl	$LLL_SHARED, %esi
475
	cmovne	%eax, %esi
476
	/* The call preserves %rcx.  */
477
	callq	__lll_unlock_wake
478
479
	/* Wake up all waiters to make sure no signal gets lost.  */
480
2:	testl	%ecx, %ecx
481
	jnz	5f
482
	addq	$cond_futex, %rdi
483
	LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
484
	movl	$0x7fffffff, %edx
485
#ifdef __ASSUME_PRIVATE_FUTEX
486
	movl	$FUTEX_WAKE, %eax
487
	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
488
	cmove	%eax, %esi
489
#else
490
	movl	$0, %eax
491
	movl	%fs:PRIVATE_FUTEX, %esi
492
	cmove	%eax, %esi
493
	orl	$FUTEX_WAKE, %esi
494
#endif
495
	movl	$SYS_futex, %eax
496
	syscall
497
498
	/* Lock the mutex only if we don't own it already.  This only happens
499
	   in case of PI mutexes, if we got cancelled after a successful
500
	   return of the futex syscall and before disabling async
501
	   cancellation.  */
502
5:	movq	16(%rsp), %rdi
503
	movl	MUTEX_KIND(%rdi), %eax
504
	andl	$(ROBUST_BIT|PI_BIT), %eax
505
	cmpl	$PI_BIT, %eax
506
	jne	7f
507
508
	movl	(%rdi), %eax
509
	andl	$TID_MASK, %eax
510
	cmpl	%eax, %fs:TID
511
	jne	7f
512
	/* We managed to get the lock.  Fix it up before returning.  */
513
	callq	__pthread_mutex_cond_lock_adjust
514
	jmp	8f
515
516
517
7:	callq	__pthread_mutex_cond_lock
518
519
8:	movq	24(%rsp), %rdi
520
.LcallUR:
521
	call	_Unwind_Resume@PLT
522
	hlt
523
.LENDCODE:
524
	cfi_endproc
525
	.size	__condvar_cleanup1, .-__condvar_cleanup1
526
527
528
	.section .gcc_except_table,"a",@progbits
529
.LexceptSTART:
530
	.byte	DW_EH_PE_omit			# @LPStart format
531
	.byte	DW_EH_PE_omit			# @TType format
532
	.byte	DW_EH_PE_uleb128		# call-site format
533
	.uleb128 .Lcstend-.Lcstbegin
534
.Lcstbegin:
535
	.uleb128 .LcleanupSTART-.LSTARTCODE
536
	.uleb128 .LcleanupEND-.LcleanupSTART
537
	.uleb128 __condvar_cleanup1-.LSTARTCODE
538
	.uleb128 0
539
	.uleb128 .LcallUR-.LSTARTCODE
540
	.uleb128 .LENDCODE-.LcallUR
541
	.uleb128 0
542
	.uleb128 0
543
.Lcstend:
544
545
546
#ifdef SHARED
547
	.hidden	DW.ref.__gcc_personality_v0
548
	.weak	DW.ref.__gcc_personality_v0
549
	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
550
	.align	LP_SIZE
551
	.type	DW.ref.__gcc_personality_v0, @object
552
	.size	DW.ref.__gcc_personality_v0, LP_SIZE
553
DW.ref.__gcc_personality_v0:
554
	ASM_ADDR __gcc_personality_v0
555
#endif
556
- 

Return to bug 11588