]> sourceware.org Git - glibc.git/blame - linuxthreads/pthread.c
Update.
[glibc.git] / linuxthreads / pthread.c
CommitLineData
7045878b 1
5afdca00
UD
2/* Linuxthreads - a simple clone()-based implementation of Posix */
3/* threads for Linux. */
4/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5/* */
6/* This program is free software; you can redistribute it and/or */
7/* modify it under the terms of the GNU Library General Public License */
8/* as published by the Free Software Foundation; either version 2 */
9/* of the License, or (at your option) any later version. */
10/* */
11/* This program is distributed in the hope that it will be useful, */
12/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14/* GNU Library General Public License for more details. */
15
16/* Thread creation, initialization, and basic low-level routines */
17
4959e310 18#include <errno.h>
5afdca00
UD
19#include <stddef.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
23#include <unistd.h>
24#include <fcntl.h>
25#include <sys/wait.h>
ddbf7fef 26#include <sys/resource.h>
64ca3f32 27#include <sys/time.h>
0bf98029 28#include <shlib-compat.h>
5afdca00
UD
29#include "pthread.h"
30#include "internals.h"
31#include "spinlock.h"
32#include "restart.h"
64ca3f32 33#include "smp.h"
5688da55 34#include <ldsodefs.h>
557fab43 35#include <tls.h>
069125e5 36#include <version.h>
5afdca00 37
150f740a 38/* Sanity check. */
82f81a90
UD
39#if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
40# error "This must not happen"
150f740a
UD
41#endif
42
04a7ed77 43#if !(USE_TLS && HAVE___THREAD)
c2afe833
RM
44/* These variables are used by the setup code. */
45extern int _errno;
46extern int _h_errno;
47
48/* We need the global/static resolver state here. */
04a7ed77
UD
49# include <resolv.h>
50# undef _res
c2afe833
RM
51
52extern struct __res_state _res;
3c877a04 53#endif
c2afe833 54
04a7ed77
UD
55#ifdef USE_TLS
56
57/* We need only a few variables. */
58static pthread_descr manager_thread;
59
60#else
61
5afdca00
UD
62/* Descriptor of the initial thread */
63
64struct _pthread_descr_struct __pthread_initial_thread = {
6166815d
UD
65 .p_header.data.self = &__pthread_initial_thread,
66 .p_nextlive = &__pthread_initial_thread,
67 .p_prevlive = &__pthread_initial_thread,
68 .p_tid = PTHREAD_THREADS_MAX,
69 .p_lock = &__pthread_handles[0].h_lock,
70 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
71#if !(USE_TLS && HAVE___THREAD)
72 .p_errnop = &_errno,
73 .p_h_errnop = &_h_errno,
74 .p_resp = &_res,
75#endif
76 .p_userstack = 1,
77 .p_resume_count = __ATOMIC_INITIALIZER,
78 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
5afdca00
UD
79};
80
81/* Descriptor of the manager thread; none of this is used but the error
bf7997b6
UD
82 variables, the p_pid and p_priority fields,
83 and the address for identification. */
5afdca00 84
557fab43 85#define manager_thread (&__pthread_manager_thread)
5afdca00 86struct _pthread_descr_struct __pthread_manager_thread = {
6166815d 87 .p_header.data.self = &__pthread_manager_thread,
82f81a90 88 .p_header.data.multiple_threads = 1,
6166815d
UD
89 .p_lock = &__pthread_handles[1].h_lock,
90 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
91#if !(USE_TLS && HAVE___THREAD)
92 .p_errnop = &__pthread_manager_thread.p_errno,
93#endif
94 .p_nr = 1,
95 .p_resume_count = __ATOMIC_INITIALIZER,
96 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
5afdca00 97};
557fab43 98#endif
5afdca00
UD
99
100/* Pointer to the main thread (the father of the thread manager thread) */
101/* Originally, this is the initial thread, but this changes after fork() */
102
557fab43
UD
103#ifdef USE_TLS
104pthread_descr __pthread_main_thread;
105#else
5afdca00 106pthread_descr __pthread_main_thread = &__pthread_initial_thread;
557fab43 107#endif
5afdca00
UD
108
109/* Limit between the stack of the initial thread (above) and the
110 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
111
4ad1d0cf 112char *__pthread_initial_thread_bos;
5afdca00
UD
113
114/* File descriptor for sending requests to the thread manager. */
115/* Initially -1, meaning that the thread manager is not running. */
116
117int __pthread_manager_request = -1;
118
82f81a90
UD
119int __pthread_multiple_threads attribute_hidden;
120
5afdca00
UD
121/* Other end of the pipe for sending requests to the thread manager. */
122
123int __pthread_manager_reader;
124
5afdca00
UD
125/* Limits of the thread manager stack */
126
4ad1d0cf
UD
127char *__pthread_manager_thread_bos;
128char *__pthread_manager_thread_tos;
5afdca00
UD
129
130/* For process-wide exit() */
131
4ad1d0cf
UD
132int __pthread_exit_requested;
133int __pthread_exit_code;
134
234dd7a6
UD
135/* Maximum stack size. */
136size_t __pthread_max_stacksize;
137
4ad1d0cf
UD
138/* Nozero if the machine has more than one processor. */
139int __pthread_smp_kernel;
140
5afdca00 141
150f740a 142#if !__ASSUME_REALTIME_SIGNALS
2ba3c836 143/* Pointers that select new or old suspend/resume functions
1d2fc9b3
UD
144 based on availability of rt signals. */
145
146void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
147void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
ef187474 148int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
150f740a 149#endif /* __ASSUME_REALTIME_SIGNALS */
1d2fc9b3 150
3387a425
UD
151/* Communicate relevant LinuxThreads constants to gdb */
152
153const int __pthread_threads_max = PTHREAD_THREADS_MAX;
154const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
155const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
156 h_descr);
157const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
158 p_pid);
b61345a1 159const int __linuxthreads_pthread_sizeof_descr
9640bbe1 160 = sizeof(struct _pthread_descr_struct);
3387a425 161
fc0154dd
UD
162const int __linuxthreads_initial_report_events;
163
069125e5
UD
164const char __linuxthreads_version[] = VERSION;
165
5afdca00
UD
166/* Forward declarations */
167
5ef50d00
UD
168static void pthread_onexit_process(int retcode, void *arg);
169#ifndef HAVE_Z_NODELETE
170static void pthread_atexit_process(void *arg, int retcode);
171static void pthread_atexit_retcode(void *arg, int retcode);
172#endif
5afdca00 173static void pthread_handle_sigcancel(int sig);
3387a425 174static void pthread_handle_sigrestart(int sig);
b92ad8d6 175static void pthread_handle_sigdebug(int sig);
5afdca00 176
0543cd26
UD
177/* Signal numbers used for the communication.
178 In these variables we keep track of the used variables. If the
179 platform does not support any real-time signals we will define the
180 values to some unreasonable value which will signal failing of all
181 the functions below. */
0543cd26
UD
182int __pthread_sig_restart = __SIGRTMIN;
183int __pthread_sig_cancel = __SIGRTMIN + 1;
184int __pthread_sig_debug = __SIGRTMIN + 2;
0543cd26 185
82f81a90 186extern int __libc_current_sigrtmin_private (void);
0543cd26 187
b025588a 188#if !__ASSUME_REALTIME_SIGNALS
82f81a90 189static int rtsigs_initialized;
0543cd26
UD
190
191static void
192init_rtsigs (void)
193{
82f81a90
UD
194 if (rtsigs_initialized)
195 return;
196
2e4f58a4 197 if (__libc_current_sigrtmin_private () == -1)
0543cd26 198 {
0543cd26
UD
199 __pthread_sig_restart = SIGUSR1;
200 __pthread_sig_cancel = SIGUSR2;
201 __pthread_sig_debug = 0;
0543cd26
UD
202 }
203 else
204 {
1d2fc9b3
UD
205 __pthread_restart = __pthread_restart_new;
206 __pthread_suspend = __pthread_wait_for_restart_signal;
ef187474 207 __pthread_timedsuspend = __pthread_timedsuspend_new;
0543cd26
UD
208 }
209
210 rtsigs_initialized = 1;
211}
212#endif
213
247c8869 214
5afdca00
UD
215/* Initialize the pthread library.
216 Initialization is split in two functions:
ddbf7fef 217 - a constructor function that blocks the __pthread_sig_restart signal
5afdca00
UD
218 (must do this very early, since the program could capture the signal
219 mask with e.g. sigsetjmp before creating the first thread);
220 - a regular function called from pthread_create when needed. */
221
222static void pthread_initialize(void) __attribute__((constructor));
223
b71e7ce8 224#ifndef HAVE_Z_NODELETE
a6a478e9 225extern void *__dso_handle __attribute__ ((weak));
b71e7ce8 226#endif
a6a478e9 227
3300816c 228
8a30f00f
UD
229#if defined USE_TLS && !defined SHARED
230extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
231#endif
232
bf293afe 233struct pthread_functions __pthread_functions =
82f81a90 234 {
270d9d47
UD
235#if !(USE_TLS && HAVE___THREAD)
236 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
237 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
238 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
239#endif
416d2de6 240 .ptr_pthread_fork = __pthread_fork,
82f81a90
UD
241 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
242#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
49e9f864 243 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
82f81a90 244#endif
49e9f864 245 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
82f81a90
UD
246 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
247 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
248 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
249 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
250 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
251 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
252 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
253 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
254 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
255 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
256 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
257 .ptr_pthread_condattr_init = __pthread_condattr_init,
bf293afe
UD
258 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
259 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
260 .ptr___pthread_cond_init = __pthread_cond_init,
261 .ptr___pthread_cond_signal = __pthread_cond_signal,
262 .ptr___pthread_cond_wait = __pthread_cond_wait,
82f81a90 263 .ptr_pthread_equal = __pthread_equal,
49e9f864 264 .ptr___pthread_exit = __pthread_exit,
82f81a90
UD
265 .ptr_pthread_getschedparam = __pthread_getschedparam,
266 .ptr_pthread_setschedparam = __pthread_setschedparam,
267 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
268 .ptr_pthread_mutex_init = __pthread_mutex_init,
269 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
270 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
271 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
272 .ptr_pthread_self = __pthread_self,
273 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
274 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
275 .ptr_pthread_do_exit = __pthread_do_exit,
416d2de6 276 .ptr_pthread_thread_self = __pthread_thread_self,
bf293afe
UD
277 .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
278 .ptr_pthread_sigaction = __pthread_sigaction,
279 .ptr_pthread_sigwait = __pthread_sigwait,
280 .ptr_pthread_raise = __pthread_raise
82f81a90 281 };
bf293afe
UD
282#ifdef SHARED
283# define ptr_pthread_functions &__pthread_functions
82f81a90
UD
284#else
285# define ptr_pthread_functions NULL
286#endif
287
288static int *__libc_multiple_threads_ptr;
8a30f00f 289
3300816c
UD
290/* Do some minimal initialization which has to be done during the
291 startup of the C library. */
292void
293__pthread_initialize_minimal(void)
294{
557fab43 295#ifdef USE_TLS
8a30f00f
UD
296 pthread_descr self;
297
f0377954
UD
298 /* First of all init __pthread_handles[0] and [1] if needed. */
299# if __LT_SPINLOCK_INIT != 0
300 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
301 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
302# endif
8a30f00f
UD
303# ifndef SHARED
304 /* Unlike in the dynamically linked case the dynamic linker has not
305 taken care of initializing the TLS data structures. */
306 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
fde89ad0 307# elif !USE___THREAD
850dcfca 308 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
fde89ad0 309 {
aff4519d
UD
310 tcbhead_t *tcbp;
311
fde89ad0
RM
312 /* There is no actual TLS being used, so the thread register
313 was not initialized in the dynamic linker. */
314
315 /* We need to install special hooks so that the malloc and memalign
316 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
317 malloc initialization that will try to set up its thread state. */
318
319 extern void __libc_malloc_pthread_startup (bool first_time);
320 __libc_malloc_pthread_startup (true);
321
322 if (__builtin_expect (_dl_tls_setup (), 0)
aff4519d 323 || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
fde89ad0
RM
324 {
325 static const char msg[] = "\
326cannot allocate TLS data structures for initial thread\n";
327 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
328 msg, sizeof msg - 1));
329 abort ();
330 }
aff4519d 331 const char *lossage = TLS_INIT_TP (tcbp, 0);
fde89ad0
RM
332 if (__builtin_expect (lossage != NULL, 0))
333 {
334 static const char msg[] = "cannot set up thread-local storage: ";
335 const char nl = '\n';
336 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
337 msg, sizeof msg - 1));
338 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
339 lossage, strlen (lossage)));
340 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO, &nl, 1));
341 }
342
343 /* Though it was allocated with libc's malloc, that was done without
344 the user's __malloc_hook installed. A later realloc that uses
345 the hooks might not work with that block from the plain malloc.
346 So we record this block as unfreeable just as the dynamic linker
347 does when it allocates the DTV before the libc malloc exists. */
aff4519d 348 GL(dl_initial_dtv) = GET_DTV (tcbp);
fde89ad0
RM
349
350 __libc_malloc_pthread_startup (false);
351 }
8a30f00f
UD
352# endif
353
354 self = THREAD_SELF;
557fab43
UD
355
356 /* The memory for the thread descriptor was allocated elsewhere as
357 part of the TLS allocation. We have to initialize the data
358 structure by hand. This initialization must mirror the struct
359 definition above. */
557fab43
UD
360 self->p_nextlive = self->p_prevlive = self;
361 self->p_tid = PTHREAD_THREADS_MAX;
362 self->p_lock = &__pthread_handles[0].h_lock;
c2afe833 363# ifndef HAVE___THREAD
557fab43
UD
364 self->p_errnop = &_errno;
365 self->p_h_errnop = &_h_errno;
c2afe833 366# endif
557fab43
UD
367 /* self->p_start_args need not be initialized, it's all zero. */
368 self->p_userstack = 1;
369# if __LT_SPINLOCK_INIT != 0
370 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
371# endif
6166815d 372 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
557fab43
UD
373
374 /* Another variable which points to the thread descriptor. */
375 __pthread_main_thread = self;
376
377 /* And fill in the pointer the the thread __pthread_handles array. */
378 __pthread_handles[0].h_descr = self;
fde89ad0
RM
379
380#else /* USE_TLS */
381
f0377954
UD
382 /* First of all init __pthread_handles[0] and [1]. */
383# if __LT_SPINLOCK_INIT != 0
384 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
385 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
386# endif
387 __pthread_handles[0].h_descr = &__pthread_initial_thread;
388 __pthread_handles[1].h_descr = &__pthread_manager_thread;
389
3300816c
UD
390 /* If we have special thread_self processing, initialize that for the
391 main thread now. */
557fab43 392# ifdef INIT_THREAD_SELF
3300816c 393 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
557fab43 394# endif
3300816c 395#endif
557fab43 396
3b5c1b57 397#if HP_TIMING_AVAIL
557fab43
UD
398# ifdef USE_TLS
399 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
400# else
5688da55 401 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
557fab43 402# endif
5fc48cd7 403#endif
5517266d 404
82f81a90 405 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
3300816c
UD
406}
407
408
778e0ef7
UD
409void
410__pthread_init_max_stacksize(void)
411{
412 struct rlimit limit;
413 size_t max_stack;
414
415 getrlimit(RLIMIT_STACK, &limit);
416#ifdef FLOATING_STACKS
417 if (limit.rlim_cur == RLIM_INFINITY)
418 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
419# ifdef NEED_SEPARATE_REGISTER_STACK
420 max_stack = limit.rlim_cur / 2;
421# else
422 max_stack = limit.rlim_cur;
423# endif
424#else
425 /* Play with the stack size limit to make sure that no stack ever grows
426 beyond STACK_SIZE minus one page (to act as a guard page). */
427# ifdef NEED_SEPARATE_REGISTER_STACK
428 /* STACK_SIZE bytes hold both the main stack and register backing
429 store. The rlimit value applies to each individually. */
430 max_stack = STACK_SIZE/2 - __getpagesize ();
431# else
432 max_stack = STACK_SIZE - __getpagesize();
433# endif
434 if (limit.rlim_cur > max_stack) {
435 limit.rlim_cur = max_stack;
436 setrlimit(RLIMIT_STACK, &limit);
437 }
438#endif
439 __pthread_max_stacksize = max_stack;
6166815d
UD
440 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
441 {
442#ifdef USE_TLS
443 pthread_descr self = THREAD_SELF;
444 self->p_alloca_cutoff = max_stack / 4;
445#else
446 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
447#endif
448 }
778e0ef7
UD
449}
450
3021e36d
RM
451#ifdef SHARED
452# if USE___THREAD
453/* When using __thread for this, we do it in libc so as not
454 to give libpthread its own TLS segment just for this. */
455extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
456# else
457static void ** __attribute__ ((const))
458__libc_dl_error_tsd (void)
459{
460 return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
461}
462# endif
463#endif
778e0ef7 464
5afdca00
UD
465static void pthread_initialize(void)
466{
467 struct sigaction sa;
468 sigset_t mask;
469
470 /* If already done (e.g. by a constructor called earlier!), bail out */
471 if (__pthread_initial_thread_bos != NULL) return;
3387a425
UD
472#ifdef TEST_FOR_COMPARE_AND_SWAP
473 /* Test if compare-and-swap is available */
474 __pthread_has_cas = compare_and_swap_is_available();
475#endif
778e0ef7
UD
476#ifdef FLOATING_STACKS
477 /* We don't need to know the bottom of the stack. Give the pointer some
478 value to signal that initialization happened. */
479 __pthread_initial_thread_bos = (void *) -1l;
480#else
481 /* Determine stack size limits . */
482 __pthread_init_max_stacksize ();
483# ifdef _STACK_GROWS_UP
cc765c2a
UD
484 /* The initial thread already has all the stack it needs */
485 __pthread_initial_thread_bos = (char *)
486 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
778e0ef7 487# else
5afdca00
UD
488 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
489 below the current stack address, and align that on a
490 STACK_SIZE boundary. */
491 __pthread_initial_thread_bos =
492 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
778e0ef7 493# endif
cc765c2a 494#endif
557fab43
UD
495#ifdef USE_TLS
496 /* Update the descriptor for the initial thread. */
497 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
c2afe833 498# ifndef HAVE___THREAD
557fab43
UD
499 /* Likewise for the resolver state _res. */
500 THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
c2afe833 501# endif
557fab43 502#else
5afdca00
UD
503 /* Update the descriptor for the initial thread. */
504 __pthread_initial_thread.p_pid = __getpid();
b43b13ac
UD
505 /* Likewise for the resolver state _res. */
506 __pthread_initial_thread.p_resp = &_res;
557fab43 507#endif
82f81a90 508#if !__ASSUME_REALTIME_SIGNALS
0543cd26
UD
509 /* Initialize real-time signals. */
510 init_rtsigs ();
48fc3dd2 511#endif
5afdca00
UD
512 /* Setup signal handlers for the initial thread.
513 Since signal handlers are shared between threads, these settings
514 will be inherited by all other threads. */
3387a425 515 sa.sa_handler = pthread_handle_sigrestart;
5afdca00 516 sigemptyset(&sa.sa_mask);
b92ad8d6 517 sa.sa_flags = 0;
c0f53cdd 518 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
5afdca00 519 sa.sa_handler = pthread_handle_sigcancel;
c77ec56d 520 // sa.sa_flags = 0;
c0f53cdd 521 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
b92ad8d6
UD
522 if (__pthread_sig_debug > 0) {
523 sa.sa_handler = pthread_handle_sigdebug;
524 sigemptyset(&sa.sa_mask);
c77ec56d 525 // sa.sa_flags = 0;
c0f53cdd 526 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
b92ad8d6 527 }
ddbf7fef 528 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
5afdca00 529 sigemptyset(&mask);
ddbf7fef 530 sigaddset(&mask, __pthread_sig_restart);
5afdca00 531 sigprocmask(SIG_BLOCK, &mask, NULL);
ff4d6f1b
UD
532 /* And unblock __pthread_sig_cancel if it has been blocked. */
533 sigdelset(&mask, __pthread_sig_restart);
534 sigaddset(&mask, __pthread_sig_cancel);
535 sigprocmask(SIG_UNBLOCK, &mask, NULL);
5afdca00
UD
536 /* Register an exit function to kill all other threads. */
537 /* Do it early so that user-registered atexit functions are called
5ef50d00
UD
538 before pthread_*exit_process. */
539#ifndef HAVE_Z_NODELETE
139a4d95 540 if (__builtin_expect (&__dso_handle != NULL, 1))
5ef50d00 541 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
3bbddbe4 542 __dso_handle);
a6a478e9 543 else
5ef50d00
UD
544#endif
545 __on_exit (pthread_onexit_process, NULL);
4ad1d0cf 546 /* How many processors. */
247c8869 547 __pthread_smp_kernel = is_smp_system ();
3021e36d
RM
548
549#ifdef SHARED
fde89ad0
RM
550 /* Transfer the old value from the dynamic linker's internal location. */
551 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
3021e36d
RM
552 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
553#endif
5afdca00
UD
554}
555
fdacb17d
UD
556void __pthread_initialize(void)
557{
558 pthread_initialize();
559}
560
a66f0958
UD
561int __pthread_initialize_manager(void)
562{
563 int manager_pipe[2];
564 int pid;
565 struct pthread_request request;
557fab43 566 int report_events;
aff4519d
UD
567 pthread_descr mgr;
568#ifdef USE_TLS
569 tcbhead_t *tcbp;
570#endif
a66f0958 571
82f81a90 572 __pthread_multiple_threads = 1;
bb0ddc2f 573#if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
299601a1 574 p_multiple_threads (__pthread_main_thread) = 1;
bb0ddc2f
RM
575#endif
576 *__libc_multiple_threads_ptr = 1;
82f81a90 577
a66f0958 578#ifndef HAVE_Z_NODELETE
313e5fb3 579 if (__builtin_expect (&__dso_handle != NULL, 1))
a66f0958
UD
580 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
581 __dso_handle);
582#endif
583
584 if (__pthread_max_stacksize == 0)
585 __pthread_init_max_stacksize ();
5afdca00
UD
586 /* If basic initialization not done yet (e.g. we're called from a
587 constructor run before our constructor), do it now */
588 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
589 /* Setup stack for thread manager */
590 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
591 if (__pthread_manager_thread_bos == NULL) return -1;
592 __pthread_manager_thread_tos =
593 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
594 /* Setup pipe to communicate with thread manager */
595 if (pipe(manager_pipe) == -1) {
596 free(__pthread_manager_thread_bos);
597 return -1;
598 }
557fab43
UD
599
600#ifdef USE_TLS
601 /* Allocate memory for the thread descriptor and the dtv. */
aff4519d
UD
602 tcbp = _dl_allocate_tls (NULL);
603 if (tcbp == NULL) {
557fab43
UD
604 free(__pthread_manager_thread_bos);
605 __libc_close(manager_pipe[0]);
606 __libc_close(manager_pipe[1]);
607 return -1;
608 }
609
aff4519d
UD
610# if TLS_TCB_AT_TP
611 mgr = (pthread_descr) tcbp;
612# elif TLS_DTV_AT_TP
613 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
614 returns. */
299601a1 615 mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
aff4519d
UD
616# endif
617 __pthread_handles[1].h_descr = manager_thread = mgr;
618
557fab43 619 /* Initialize the descriptor. */
bb0ddc2f 620#if !defined USE_TLS || !TLS_DTV_AT_TP
aff4519d
UD
621 mgr->p_header.data.tcb = tcbp;
622 mgr->p_header.data.self = mgr;
299601a1 623 p_multiple_threads (mgr) = 1;
bb0ddc2f 624#elif TLS_MULTIPLE_THREADS_IN_TCB
299601a1 625 p_multiple_threads (mgr) = 1;
bb0ddc2f 626#endif
aff4519d 627 mgr->p_lock = &__pthread_handles[1].h_lock;
c2afe833 628# ifndef HAVE___THREAD
aff4519d 629 mgr->p_errnop = &mgr->p_errno;
c2afe833 630# endif
aff4519d
UD
631 mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
632 mgr->p_nr = 1;
557fab43
UD
633# if __LT_SPINLOCK_INIT != 0
634 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
635# endif
aff4519d 636 mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
557fab43 637#else
aff4519d 638 mgr = &__pthread_manager_thread;
557fab43
UD
639#endif
640
641 __pthread_manager_request = manager_pipe[1]; /* writing end */
642 __pthread_manager_reader = manager_pipe[0]; /* reading end */
643
5afdca00 644 /* Start the thread manager */
20bdb31b 645 pid = 0;
557fab43 646#ifdef USE_TLS
fc0154dd
UD
647 if (__linuxthreads_initial_report_events != 0)
648 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
649 __linuxthreads_initial_report_events);
557fab43
UD
650 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
651#else
fc0154dd
UD
652 if (__linuxthreads_initial_report_events != 0)
653 __pthread_initial_thread.p_report_events
654 = __linuxthreads_initial_report_events;
557fab43
UD
655 report_events = __pthread_initial_thread.p_report_events;
656#endif
657 if (__builtin_expect (report_events, 0))
20bdb31b
UD
658 {
659 /* It's a bit more complicated. We have to report the creation of
660 the manager thread. */
661 int idx = __td_eventword (TD_CREATE);
662 uint32_t mask = __td_eventmask (TD_CREATE);
557fab43 663 uint32_t event_bits;
20bdb31b 664
557fab43
UD
665#ifdef USE_TLS
666 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
667 p_eventbuf.eventmask.event_bits[idx]);
668#else
669 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
670#endif
671
672 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
20bdb31b
UD
673 != 0)
674 {
aff4519d 675 __pthread_lock(mgr->p_lock, NULL);
c6df09ad 676
9aae19cd
UD
677#ifdef NEED_SEPARATE_REGISTER_STACK
678 pid = __clone2(__pthread_manager_event,
679 (void **) __pthread_manager_thread_bos,
680 THREAD_MANAGER_STACK_SIZE,
681 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
aff4519d 682 mgr);
cc765c2a
UD
683#elif _STACK_GROWS_UP
684 pid = __clone(__pthread_manager_event,
685 (void **) __pthread_manager_thread_bos,
686 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
aff4519d 687 mgr);
9aae19cd 688#else
20bdb31b
UD
689 pid = __clone(__pthread_manager_event,
690 (void **) __pthread_manager_thread_tos,
691 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
aff4519d 692 mgr);
9aae19cd 693#endif
20bdb31b
UD
694
695 if (pid != -1)
696 {
697 /* Now fill in the information about the new thread in
698 the newly created thread's data structure. We cannot let
699 the new thread do this since we don't know whether it was
700 already scheduled when we send the event. */
aff4519d
UD
701 mgr->p_eventbuf.eventdata = mgr;
702 mgr->p_eventbuf.eventnum = TD_CREATE;
703 __pthread_last_event = mgr;
704 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
705 mgr->p_pid = pid;
20bdb31b
UD
706
707 /* Now call the function which signals the event. */
708 __linuxthreads_create_event ();
20bdb31b 709 }
c6df09ad
UD
710
711 /* Now restart the thread. */
aff4519d 712 __pthread_unlock(mgr->p_lock);
20bdb31b
UD
713 }
714 }
715
139a4d95 716 if (__builtin_expect (pid, 0) == 0)
9aae19cd
UD
717 {
718#ifdef NEED_SEPARATE_REGISTER_STACK
719 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
720 THREAD_MANAGER_STACK_SIZE,
aff4519d 721 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
cc765c2a
UD
722#elif _STACK_GROWS_UP
723 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
aff4519d 724 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
9aae19cd
UD
725#else
726 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
aff4519d 727 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
9aae19cd
UD
728#endif
729 }
139a4d95 730 if (__builtin_expect (pid, 0) == -1) {
5afdca00
UD
731 free(__pthread_manager_thread_bos);
732 __libc_close(manager_pipe[0]);
733 __libc_close(manager_pipe[1]);
5afdca00
UD
734 return -1;
735 }
aff4519d
UD
736 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
737 mgr->p_pid = pid;
3387a425 738 /* Make gdb aware of new thread manager */
139a4d95 739 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
f5492334
UD
740 {
741 raise(__pthread_sig_debug);
742 /* We suspend ourself and gdb will wake us up when it is
743 ready to handle us. */
1d2fc9b3 744 __pthread_wait_for_restart_signal(thread_self());
f5492334 745 }
3387a425
UD
746 /* Synchronize debugging of the thread manager */
747 request.req_kind = REQ_DEBUG;
57642a78
UD
748 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
749 (char *) &request, sizeof(request)));
5afdca00
UD
750 return 0;
751}
752
753/* Thread creation */
754
5d409851
UD
755int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
756 void * (*start_routine)(void *), void *arg)
5afdca00
UD
757{
758 pthread_descr self = thread_self();
759 struct pthread_request request;
ee5d4855 760 int retval;
139a4d95 761 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
3387a425 762 if (__pthread_initialize_manager() < 0) return EAGAIN;
5afdca00
UD
763 }
764 request.req_thread = self;
765 request.req_kind = REQ_CREATE;
766 request.req_args.create.attr = attr;
767 request.req_args.create.fn = start_routine;
768 request.req_args.create.arg = arg;
769 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
770 &request.req_args.create.mask);
57642a78
UD
771 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
772 (char *) &request, sizeof(request)));
5afdca00 773 suspend(self);
41b37cb5 774 retval = THREAD_GETMEM(self, p_retcode);
139a4d95 775 if (__builtin_expect (retval, 0) == 0)
00a2f9aa 776 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
ee5d4855 777 return retval;
5afdca00
UD
778}
779
0bf98029
UD
780versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
781
ef7dddd0 782#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
5d409851
UD
783
784int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
785 void * (*start_routine)(void *), void *arg)
786{
787 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
788 the old size and access to the new members might crash the program.
789 We convert the struct now. */
790 pthread_attr_t new_attr;
791
792 if (attr != NULL)
793 {
794 size_t ps = __getpagesize ();
795
c70ca1fa
UD
796 memcpy (&new_attr, attr,
797 (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
798 new_attr.__guardsize = ps;
799 new_attr.__stackaddr_set = 0;
800 new_attr.__stackaddr = NULL;
801 new_attr.__stacksize = STACK_SIZE - ps;
5d409851
UD
802 attr = &new_attr;
803 }
804 return __pthread_create_2_1 (thread, attr, start_routine, arg);
805}
41aefe41 806compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
5d409851
UD
807#endif
808
5afdca00
UD
809/* Simple operations on thread identifiers */
810
82f81a90
UD
811pthread_descr __pthread_thread_self(void)
812{
813 return thread_self();
814}
815
816pthread_t __pthread_self(void)
5afdca00
UD
817{
818 pthread_descr self = thread_self();
00a2f9aa 819 return THREAD_GETMEM(self, p_tid);
5afdca00 820}
82f81a90 821strong_alias (__pthread_self, pthread_self);
5afdca00 822
82f81a90 823int __pthread_equal(pthread_t thread1, pthread_t thread2)
5afdca00
UD
824{
825 return thread1 == thread2;
826}
82f81a90 827strong_alias (__pthread_equal, pthread_equal);
5afdca00 828
3387a425
UD
829/* Helper function for thread_self in the case of user-provided stacks */
830
831#ifndef THREAD_SELF
832
c3317d1e 833pthread_descr __pthread_find_self(void)
3387a425
UD
834{
835 char * sp = CURRENT_STACK_FRAME;
836 pthread_handle h;
837
00a2f9aa
UD
838 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
839 the manager threads handled specially in thread_self(), so start at 2 */
840 h = __pthread_handles + 2;
3387a425
UD
841 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
842 return h->h_descr;
843}
844
c3317d1e
UD
845#else
846
847static pthread_descr thread_self_stack(void)
848{
849 char *sp = CURRENT_STACK_FRAME;
850 pthread_handle h;
851
852 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
557fab43 853 return manager_thread;
c3317d1e 854 h = __pthread_handles + 2;
b863ccd7
UD
855# ifdef USE_TLS
856 while (h->h_descr == NULL
857 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
858 h++;
859# else
c3317d1e
UD
860 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
861 h++;
b863ccd7 862# endif
c3317d1e
UD
863 return h->h_descr;
864}
865
3387a425
UD
866#endif
867
5afdca00
UD
868/* Thread scheduling */
869
82f81a90
UD
870int __pthread_setschedparam(pthread_t thread, int policy,
871 const struct sched_param *param)
5afdca00
UD
872{
873 pthread_handle handle = thread_handle(thread);
874 pthread_descr th;
875
c5e340c7 876 __pthread_lock(&handle->h_lock, NULL);
139a4d95 877 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
0f550417 878 __pthread_unlock(&handle->h_lock);
5afdca00
UD
879 return ESRCH;
880 }
881 th = handle->h_descr;
139a4d95
UD
882 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
883 0)) {
0f550417 884 __pthread_unlock(&handle->h_lock);
5afdca00
UD
885 return errno;
886 }
887 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
0f550417 888 __pthread_unlock(&handle->h_lock);
bf7997b6
UD
889 if (__pthread_manager_request >= 0)
890 __pthread_manager_adjust_prio(th->p_priority);
5afdca00
UD
891 return 0;
892}
82f81a90 893strong_alias (__pthread_setschedparam, pthread_setschedparam);
5afdca00 894
82f81a90
UD
895int __pthread_getschedparam(pthread_t thread, int *policy,
896 struct sched_param *param)
5afdca00
UD
897{
898 pthread_handle handle = thread_handle(thread);
899 int pid, pol;
900
c5e340c7 901 __pthread_lock(&handle->h_lock, NULL);
139a4d95 902 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
0f550417 903 __pthread_unlock(&handle->h_lock);
5afdca00
UD
904 return ESRCH;
905 }
906 pid = handle->h_descr->p_pid;
0f550417 907 __pthread_unlock(&handle->h_lock);
5afdca00 908 pol = __sched_getscheduler(pid);
139a4d95 909 if (__builtin_expect (pol, 0) == -1) return errno;
5afdca00
UD
910 if (__sched_getparam(pid, param) == -1) return errno;
911 *policy = pol;
912 return 0;
913}
82f81a90 914strong_alias (__pthread_getschedparam, pthread_getschedparam);
5afdca00 915
c3317d1e 916int __pthread_yield (void)
c269fdb4
UD
917{
918 /* For now this is equivalent with the POSIX call. */
919 return sched_yield ();
920}
921weak_alias (__pthread_yield, pthread_yield)
922
5afdca00
UD
923/* Process-wide exit() request */
924
5ef50d00 925static void pthread_onexit_process(int retcode, void *arg)
5afdca00 926{
139a4d95 927 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
7045878b
UD
928 struct pthread_request request;
929 pthread_descr self = thread_self();
930
5afdca00
UD
931 request.req_thread = self;
932 request.req_kind = REQ_PROCESS_EXIT;
933 request.req_args.exit.code = retcode;
57642a78
UD
934 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
935 (char *) &request, sizeof(request)));
5afdca00
UD
936 suspend(self);
937 /* Main thread should accumulate times for thread manager and its
938 children, so that timings for main thread account for all threads. */
939 if (self == __pthread_main_thread)
9b2c7523 940 {
557fab43
UD
941#ifdef USE_TLS
942 waitpid(manager_thread->p_pid, NULL, __WCLONE);
943#else
9b2c7523 944 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
557fab43 945#endif
cf6a2367
UD
946 /* Since all threads have been asynchronously terminated
947 (possibly holding locks), free cannot be used any more. */
948 /*free (__pthread_manager_thread_bos);*/
3f738366 949 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
9b2c7523 950 }
5afdca00
UD
951 }
952}
953
5ef50d00
UD
954#ifndef HAVE_Z_NODELETE
955static int __pthread_atexit_retcode;
956
957static void pthread_atexit_process(void *arg, int retcode)
958{
959 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
960}
961
962static void pthread_atexit_retcode(void *arg, int retcode)
963{
964 __pthread_atexit_retcode = retcode;
965}
966#endif
967
5afdca00
UD
968/* The handler for the RESTART signal just records the signal received
969 in the thread descriptor, and optionally performs a siglongjmp
d17a729b 970 (for pthread_cond_timedwait). */
5afdca00 971
3387a425 972static void pthread_handle_sigrestart(int sig)
5afdca00
UD
973{
974 pthread_descr self = thread_self();
4487e30b
UD
975 THREAD_SETMEM(self, p_signal, sig);
976 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
d17a729b 977 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
5afdca00
UD
978}
979
980/* The handler for the CANCEL signal checks for cancellation
3387a425 981 (in asynchronous mode), for process-wide exit and exec requests.
b92ad8d6
UD
982 For the thread manager thread, redirect the signal to
983 __pthread_manager_sighandler. */
5afdca00
UD
984
985static void pthread_handle_sigcancel(int sig)
986{
987 pthread_descr self = thread_self();
988 sigjmp_buf * jmpbuf;
989
557fab43 990 if (self == manager_thread)
d17a729b 991 {
c3317d1e
UD
992#ifdef THREAD_SELF
993 /* A new thread might get a cancel signal before it is fully
994 initialized, so that the thread register might still point to the
995 manager thread. Double check that this is really the manager
996 thread. */
997 pthread_descr real_self = thread_self_stack();
557fab43 998 if (real_self == manager_thread)
c3317d1e
UD
999 {
1000 __pthread_manager_sighandler(sig);
1001 return;
1002 }
1003 /* Oops, thread_self() isn't working yet.. */
1004 self = real_self;
1005# ifdef INIT_THREAD_SELF
1006 INIT_THREAD_SELF(self, self->p_nr);
1007# endif
1008#else
d17a729b
UD
1009 __pthread_manager_sighandler(sig);
1010 return;
c3317d1e 1011#endif
d17a729b 1012 }
139a4d95 1013 if (__builtin_expect (__pthread_exit_requested, 0)) {
5afdca00
UD
1014 /* Main thread should accumulate times for thread manager and its
1015 children, so that timings for main thread account for all threads. */
557fab43
UD
1016 if (self == __pthread_main_thread) {
1017#ifdef USE_TLS
1018 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1019#else
bf7997b6 1020 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
557fab43
UD
1021#endif
1022 }
5afdca00
UD
1023 _exit(__pthread_exit_code);
1024 }
139a4d95 1025 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
00a2f9aa
UD
1026 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1027 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
58f46c79 1028 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
00a2f9aa 1029 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
5afdca00 1030 if (jmpbuf != NULL) {
00a2f9aa 1031 THREAD_SETMEM(self, p_cancel_jmp, NULL);
5afdca00
UD
1032 siglongjmp(*jmpbuf, 1);
1033 }
1034 }
1035}
1036
b92ad8d6
UD
1037/* Handler for the DEBUG signal.
1038 The debugging strategy is as follows:
1039 On reception of a REQ_DEBUG request (sent by new threads created to
1040 the thread manager under debugging mode), the thread manager throws
4487e30b 1041 __pthread_sig_debug to itself. The debugger (if active) intercepts
b92ad8d6
UD
1042 this signal, takes into account new threads and continue execution
1043 of the thread manager by propagating the signal because it doesn't
1044 know what it is specifically done for. In the current implementation,
1045 the thread manager simply discards it. */
1046
1047static void pthread_handle_sigdebug(int sig)
1048{
1049 /* Nothing */
1050}
1051
5afdca00
UD
1052/* Reset the state of the thread machinery after a fork().
1053 Close the pipe used for requests and set the main thread to the forked
1054 thread.
1055 Notice that we can't free the stack segments, as the forked thread
1056 may hold pointers into them. */
1057
c3317d1e 1058void __pthread_reset_main_thread(void)
5afdca00
UD
1059{
1060 pthread_descr self = thread_self();
1061
1062 if (__pthread_manager_request != -1) {
1063 /* Free the thread manager stack */
1064 free(__pthread_manager_thread_bos);
1065 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1066 /* Close the two ends of the pipe */
1067 __libc_close(__pthread_manager_request);
1068 __libc_close(__pthread_manager_reader);
1069 __pthread_manager_request = __pthread_manager_reader = -1;
1070 }
2b638910 1071
5afdca00 1072 /* Update the pid of the main thread */
00a2f9aa 1073 THREAD_SETMEM(self, p_pid, __getpid());
5afdca00
UD
1074 /* Make the forked thread the main thread */
1075 __pthread_main_thread = self;
00a2f9aa
UD
1076 THREAD_SETMEM(self, p_nextlive, self);
1077 THREAD_SETMEM(self, p_prevlive, self);
c2afe833 1078#if !(USE_TLS && HAVE___THREAD)
5afdca00 1079 /* Now this thread modifies the global variables. */
00a2f9aa
UD
1080 THREAD_SETMEM(self, p_errnop, &_errno);
1081 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
b43b13ac 1082 THREAD_SETMEM(self, p_resp, &_res);
c2afe833 1083#endif
386bc781 1084
3865b57f
RM
1085#ifndef FLOATING_STACKS
1086 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1087 XXX This can be wrong if the user set the limit during the run. */
1088 {
1089 struct rlimit limit;
1090 if (getrlimit (RLIMIT_STACK, &limit) == 0
1091 && limit.rlim_cur != limit.rlim_max)
1092 {
1093 limit.rlim_cur = limit.rlim_max;
1094 setrlimit(RLIMIT_STACK, &limit);
1095 }
1096 }
1097#endif
5afdca00
UD
1098}
1099
1100/* Process-wide exec() request */
1101
1102void __pthread_kill_other_threads_np(void)
1103{
6570e194 1104 struct sigaction sa;
5afdca00 1105 /* Terminate all other threads and thread manager */
5ef50d00 1106 pthread_onexit_process(0, NULL);
5afdca00
UD
1107 /* Make current thread the main thread in case the calling thread
1108 changes its mind, does not exec(), and creates new threads instead. */
1109 __pthread_reset_main_thread();
6570e194
UD
1110
1111 /* Reset the signal handlers behaviour for the signals the
1112 implementation uses since this would be passed to the new
1113 process. */
1114 sigemptyset(&sa.sa_mask);
1115 sa.sa_flags = 0;
1116 sa.sa_handler = SIG_DFL;
c0f53cdd
UD
1117 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1118 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
6570e194 1119 if (__pthread_sig_debug > 0)
c0f53cdd 1120 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
5afdca00
UD
1121}
1122weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1123
5d409851
UD
1124/* Concurrency symbol level. */
1125static int current_level;
1126
1127int __pthread_setconcurrency(int level)
1128{
1129 /* We don't do anything unless we have found a useful interpretation. */
1130 current_level = level;
1131 return 0;
1132}
1133weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1134
1135int __pthread_getconcurrency(void)
1136{
1137 return current_level;
1138}
1139weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1140
1d2fc9b3
UD
1141/* Primitives for controlling thread execution */
1142
1143void __pthread_wait_for_restart_signal(pthread_descr self)
1144{
1145 sigset_t mask;
1146
1147 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1148 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
9ec9e34e 1149 THREAD_SETMEM(self, p_signal, 0);
1d2fc9b3 1150 do {
9a197dcc
RM
1151 __pthread_sigsuspend(&mask); /* Wait for signal. Must not be a
1152 cancellation point. */
9ec9e34e 1153 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
9c4a5197
UD
1154
1155 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1d2fc9b3
UD
1156}
1157
150f740a 1158#if !__ASSUME_REALTIME_SIGNALS
ef187474
UD
1159/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1160 signals.
1d2fc9b3
UD
1161 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1162 Since the restart signal does not queue, we use an atomic counter to create
1163 queuing semantics. This is needed to resolve a rare race condition in
1164 pthread_cond_timedwait_relative. */
2ba3c836 1165
1d2fc9b3
UD
1166void __pthread_restart_old(pthread_descr th)
1167{
1168 if (atomic_increment(&th->p_resume_count) == -1)
1169 kill(th->p_pid, __pthread_sig_restart);
1170}
1171
1172void __pthread_suspend_old(pthread_descr self)
1173{
1174 if (atomic_decrement(&self->p_resume_count) <= 0)
1175 __pthread_wait_for_restart_signal(self);
1176}
1177
150f740a 1178int
ef187474
UD
1179__pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1180{
1181 sigset_t unblock, initial_mask;
1182 int was_signalled = 0;
1183 sigjmp_buf jmpbuf;
1184
1185 if (atomic_decrement(&self->p_resume_count) == 0) {
1186 /* Set up a longjmp handler for the restart signal, unblock
1187 the signal and sleep. */
1188
1189 if (sigsetjmp(jmpbuf, 1) == 0) {
1190 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1191 THREAD_SETMEM(self, p_signal, 0);
1192 /* Unblock the restart signal */
1193 sigemptyset(&unblock);
1194 sigaddset(&unblock, __pthread_sig_restart);
1195 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1196
1197 while (1) {
1198 struct timeval now;
1199 struct timespec reltime;
1200
1201 /* Compute a time offset relative to now. */
1202 __gettimeofday (&now, NULL);
1203 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1204 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1205 if (reltime.tv_nsec < 0) {
1206 reltime.tv_nsec += 1000000000;
1207 reltime.tv_sec -= 1;
1208 }
1209
1210 /* Sleep for the required duration. If woken by a signal,
1211 resume waiting as required by Single Unix Specification. */
1212 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1213 break;
1214 }
1215
1216 /* Block the restart signal again */
1217 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1218 was_signalled = 0;
1219 } else {
1220 was_signalled = 1;
1221 }
1222 THREAD_SETMEM(self, p_signal_jmp, NULL);
1223 }
1224
1225 /* Now was_signalled is true if we exited the above code
1226 due to the delivery of a restart signal. In that case,
1227 we know we have been dequeued and resumed and that the
1228 resume count is balanced. Otherwise, there are some
1229 cases to consider. First, try to bump up the resume count
1230 back to zero. If it goes to 1, it means restart() was
1231 invoked on this thread. The signal must be consumed
1232 and the count bumped down and everything is cool. We
1233 can return a 1 to the caller.
1234 Otherwise, no restart was delivered yet, so a potential
1235 race exists; we return a 0 to the caller which must deal
1236 with this race in an appropriate way; for example by
150f740a 1237 atomically removing the thread from consideration for a
ef187474
UD
1238 wakeup---if such a thing fails, it means a restart is
1239 being delivered. */
1240
1241 if (!was_signalled) {
1242 if (atomic_increment(&self->p_resume_count) != -1) {
1243 __pthread_wait_for_restart_signal(self);
1244 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1245 /* woke spontaneously and consumed restart signal */
1246 return 1;
1247 }
1248 /* woke spontaneously but did not consume restart---caller must resolve */
1249 return 0;
1250 }
1251 /* woken due to restart signal */
1252 return 1;
1253}
150f740a 1254#endif /* __ASSUME_REALTIME_SIGNALS */
ef187474 1255
1d2fc9b3
UD
1256void __pthread_restart_new(pthread_descr th)
1257{
9c4a5197
UD
1258 /* The barrier is proabably not needed, in which case it still documents
1259 our assumptions. The intent is to commit previous writes to shared
1260 memory so the woken thread will have a consistent view. Complementary
1261 read barriers are present to the suspend functions. */
1262 WRITE_MEMORY_BARRIER();
1263 kill(th->p_pid, __pthread_sig_restart);
1d2fc9b3
UD
1264}
1265
1266/* There is no __pthread_suspend_new because it would just
1267 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1268
150f740a 1269int
ef187474
UD
1270__pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1271{
1272 sigset_t unblock, initial_mask;
1273 int was_signalled = 0;
1274 sigjmp_buf jmpbuf;
1275
1276 if (sigsetjmp(jmpbuf, 1) == 0) {
1277 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1278 THREAD_SETMEM(self, p_signal, 0);
1279 /* Unblock the restart signal */
1280 sigemptyset(&unblock);
1281 sigaddset(&unblock, __pthread_sig_restart);
1282 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1283
1284 while (1) {
1285 struct timeval now;
1286 struct timespec reltime;
1287
1288 /* Compute a time offset relative to now. */
1289 __gettimeofday (&now, NULL);
1290 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1291 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1292 if (reltime.tv_nsec < 0) {
1293 reltime.tv_nsec += 1000000000;
1294 reltime.tv_sec -= 1;
1295 }
1296
1297 /* Sleep for the required duration. If woken by a signal,
1298 resume waiting as required by Single Unix Specification. */
1299 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1300 break;
1301 }
1302
1303 /* Block the restart signal again */
1304 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1305 was_signalled = 0;
1306 } else {
1307 was_signalled = 1;
1308 }
1309 THREAD_SETMEM(self, p_signal_jmp, NULL);
1310
1311 /* Now was_signalled is true if we exited the above code
1312 due to the delivery of a restart signal. In that case,
1313 everything is cool. We have been removed from whatever
1314 we were waiting on by the other thread, and consumed its signal.
1315
1316 Otherwise we this thread woke up spontaneously, or due to a signal other
1317 than restart. This is an ambiguous case that must be resolved by
1318 the caller; the thread is still eligible for a restart wakeup
1319 so there is a race. */
1320
9c4a5197 1321 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
ef187474
UD
1322 return was_signalled;
1323}
1324
1325
5afdca00
UD
1326/* Debugging aid */
1327
1328#ifdef DEBUG
1329#include <stdarg.h>
1330
fcdc67f9 1331void __pthread_message(const char * fmt, ...)
5afdca00
UD
1332{
1333 char buffer[1024];
1334 va_list args;
1335 sprintf(buffer, "%05d : ", __getpid());
1336 va_start(args, fmt);
1337 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1338 va_end(args);
57642a78 1339 TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
5afdca00
UD
1340}
1341
1342#endif
This page took 0.332471 seconds and 5 git commands to generate.