]> sourceware.org Git - glibc.git/blame - linuxthreads/manager.c
Update.
[glibc.git] / linuxthreads / manager.c
CommitLineData
5afdca00
UD
1/* Linuxthreads - a simple clone()-based implementation of Posix */
2/* threads for Linux. */
3/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
4/* */
5/* This program is free software; you can redistribute it and/or */
6/* modify it under the terms of the GNU Library General Public License */
7/* as published by the Free Software Foundation; either version 2 */
8/* of the License, or (at your option) any later version. */
9/* */
10/* This program is distributed in the hope that it will be useful, */
11/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13/* GNU Library General Public License for more details. */
14
15/* The "thread manager" thread: manages creation and termination of threads */
16
17#include <errno.h>
18#include <sched.h>
19#include <stddef.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
23#include <unistd.h>
026d5011 24#include <sys/poll.h> /* for poll */
5afdca00 25#include <sys/mman.h> /* for mmap */
ddbf7fef 26#include <sys/param.h>
5afdca00
UD
27#include <sys/time.h>
28#include <sys/wait.h> /* for waitpid macros */
5afdca00
UD
29
30#include "pthread.h"
31#include "internals.h"
32#include "spinlock.h"
33#include "restart.h"
3387a425 34#include "semaphore.h"
5afdca00
UD
35
36/* Array of active threads. Entry 0 is reserved for the initial thread. */
5afdca00 37struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
00a2f9aa
UD
38{ { LOCK_INITIALIZER, &__pthread_initial_thread, 0},
39 { LOCK_INITIALIZER, &__pthread_manager_thread, 0}, /* All NULLs */ };
d47aac39 40
d790bc34
UD
41/* For debugging purposes put the maximum number of threads in a variable. */
42const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
43
d47aac39
UD
44/* Indicate whether at least one thread has a user-defined stack (if 1),
45 or if all threads have stacks supplied by LinuxThreads (if 0). */
a9cb398f 46int __pthread_nonstandard_stacks;
3387a425
UD
47
48/* Number of active entries in __pthread_handles (used by gdb) */
00a2f9aa 49volatile int __pthread_handles_num = 2;
3387a425
UD
50
51/* Whether to use debugger additional actions for thread creation
52 (set to 1 by gdb) */
a9cb398f
UD
53volatile int __pthread_threads_debug;
54
55/* Globally enabled events. */
56volatile td_thr_events_t __pthread_threads_events;
5afdca00 57
ab86fbb1
UD
58/* Pointer to thread descriptor with last event. */
59volatile pthread_descr __pthread_last_event;
60
5afdca00
UD
61/* Mapping from stack segment to thread descriptor. */
62/* Stack segment numbers are also indices into the __pthread_handles array. */
63/* Stack segment number 0 is reserved for the initial thread. */
64
65static inline pthread_descr thread_segment(int seg)
66{
67 return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
68 - 1;
69}
70
71/* Flag set in signal handler to record child termination */
72
73static volatile int terminated_children = 0;
74
75/* Flag set when the initial thread is blocked on pthread_exit waiting
76 for all other threads to terminate */
77
78static int main_thread_exiting = 0;
79
80/* Counter used to generate unique thread identifier.
81 Thread identifier is pthread_threads_counter + segment. */
82
83static pthread_t pthread_threads_counter = 0;
84
85/* Forward declarations */
86
87static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
88 void * (*start_routine)(void *), void *arg,
a9cb398f
UD
89 sigset_t *mask, int father_pid,
90 int report_events,
91 td_thr_events_t *event_maskp);
fdacb17d 92static void pthread_handle_free(pthread_t th_id);
5afdca00
UD
93static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode);
94static void pthread_reap_children(void);
95static void pthread_kill_all_threads(int sig, int main_thread_also);
96
97/* The server thread managing requests for thread creation and termination */
98
99int __pthread_manager(void *arg)
100{
101 int reqfd = (int)arg;
026d5011 102 struct pollfd ufd;
5afdca00 103 sigset_t mask;
5afdca00
UD
104 int n;
105 struct pthread_request request;
106
107 /* If we have special thread_self processing, initialize it. */
108#ifdef INIT_THREAD_SELF
00a2f9aa 109 INIT_THREAD_SELF(&__pthread_manager_thread, 1);
5afdca00
UD
110#endif
111 /* Set the error variable. */
112 __pthread_manager_thread.p_errnop = &__pthread_manager_thread.p_errno;
113 __pthread_manager_thread.p_h_errnop = &__pthread_manager_thread.p_h_errno;
d17a729b 114 /* Block all signals except __pthread_sig_cancel and SIGTRAP */
5afdca00 115 sigfillset(&mask);
d17a729b 116 sigdelset(&mask, __pthread_sig_cancel); /* for thread termination */
3387a425 117 sigdelset(&mask, SIGTRAP); /* for debugging purposes */
5afdca00 118 sigprocmask(SIG_SETMASK, &mask, NULL);
bf7997b6
UD
119 /* Raise our priority to match that of main thread */
120 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
3387a425
UD
121 /* Synchronize debugging of the thread manager */
122 n = __libc_read(reqfd, (char *)&request, sizeof(request));
123 ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
026d5011
UD
124 ufd.fd = reqfd;
125 ufd.events = POLLIN;
5afdca00
UD
126 /* Enter server loop */
127 while(1) {
026d5011 128 n = __poll(&ufd, 1, 2000);
48fc3dd2 129
5afdca00
UD
130 /* Check for termination of the main thread */
131 if (getppid() == 1) {
132 pthread_kill_all_threads(SIGKILL, 0);
133 _exit(0);
134 }
135 /* Check for dead children */
136 if (terminated_children) {
137 terminated_children = 0;
138 pthread_reap_children();
139 }
140 /* Read and execute request */
026d5011 141 if (n == 1 && (ufd.revents & POLLIN)) {
5afdca00
UD
142 n = __libc_read(reqfd, (char *)&request, sizeof(request));
143 ASSERT(n == sizeof(request));
144 switch(request.req_kind) {
145 case REQ_CREATE:
146 request.req_thread->p_retcode =
147 pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
148 request.req_args.create.attr,
149 request.req_args.create.fn,
150 request.req_args.create.arg,
151 &request.req_args.create.mask,
a9cb398f
UD
152 request.req_thread->p_pid,
153 request.req_thread->p_report_events,
154 &request.req_thread->p_eventbuf.eventmask);
5afdca00
UD
155 restart(request.req_thread);
156 break;
157 case REQ_FREE:
fdacb17d 158 pthread_handle_free(request.req_args.free.thread_id);
5afdca00
UD
159 break;
160 case REQ_PROCESS_EXIT:
161 pthread_handle_exit(request.req_thread,
162 request.req_args.exit.code);
163 break;
164 case REQ_MAIN_THREAD_EXIT:
165 main_thread_exiting = 1;
166 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
167 restart(__pthread_main_thread);
168 return 0;
169 }
170 break;
3387a425 171 case REQ_POST:
c813f986 172 __new_sem_post(request.req_args.post);
3387a425
UD
173 break;
174 case REQ_DEBUG:
f5492334
UD
175 /* Make gdb aware of new thread and gdb will restart the
176 new thread when it is ready to handle the new thread. */
b92ad8d6 177 if (__pthread_threads_debug && __pthread_sig_debug > 0)
4487e30b 178 raise(__pthread_sig_debug);
3387a425 179 break;
5afdca00
UD
180 }
181 }
182 }
183}
184
20bdb31b
UD
185int __pthread_manager_event(void *arg)
186{
187 /* If we have special thread_self processing, initialize it. */
188#ifdef INIT_THREAD_SELF
189 INIT_THREAD_SELF(&__pthread_manager_thread, 1);
190#endif
191
192 /* Get the lock the manager will free once all is correctly set up. */
193 __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
194 /* Free it immediately. */
195 __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
196
197 return __pthread_manager(arg);
198}
199
5afdca00
UD
200/* Process creation */
201
202static int pthread_start_thread(void *arg)
203{
204 pthread_descr self = (pthread_descr) arg;
3387a425 205 struct pthread_request request;
5afdca00
UD
206 void * outcome;
207 /* Initialize special thread_self processing, if any. */
208#ifdef INIT_THREAD_SELF
00a2f9aa 209 INIT_THREAD_SELF(self, self->p_nr);
5afdca00
UD
210#endif
211 /* Make sure our pid field is initialized, just in case we get there
212 before our father has initialized it. */
00a2f9aa 213 THREAD_SETMEM(self, p_pid, __getpid());
5afdca00
UD
214 /* Initial signal mask is that of the creating thread. (Otherwise,
215 we'd just inherit the mask of the thread manager.) */
216 sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL);
217 /* Set the scheduling policy and priority for the new thread, if needed */
00a2f9aa 218 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
431c33c0 219 /* Explicit scheduling attributes were provided: apply them */
00a2f9aa
UD
220 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
221 THREAD_GETMEM(self, p_start_args.schedpolicy),
5afdca00 222 &self->p_start_args.schedparam);
2acd8fcf 223 else if (__pthread_manager_thread.p_priority > 0)
431c33c0
UD
224 /* Default scheduling required, but thread manager runs in realtime
225 scheduling: switch new thread to SCHED_OTHER policy */
226 {
227 struct sched_param default_params;
228 default_params.sched_priority = 0;
229 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
230 SCHED_OTHER, &default_params);
231 }
3387a425 232 /* Make gdb aware of new thread */
f5492334 233 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
3387a425
UD
234 request.req_thread = self;
235 request.req_kind = REQ_DEBUG;
236 __libc_write(__pthread_manager_request,
237 (char *) &request, sizeof(request));
238 suspend(self);
239 }
5afdca00 240 /* Run the thread code */
00a2f9aa
UD
241 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
242 p_start_args.arg));
5afdca00
UD
243 /* Exit with the given return value */
244 pthread_exit(outcome);
245 return 0;
246}
247
a9cb398f
UD
248static int pthread_start_thread_event(void *arg)
249{
20bdb31b 250 pthread_descr self = (pthread_descr) arg;
a9cb398f 251
20bdb31b
UD
252#ifdef INIT_THREAD_SELF
253 INIT_THREAD_SELF(self, self->p_nr);
254#endif
ffd35632
UD
255 /* Make sure our pid field is initialized, just in case we get there
256 before our father has initialized it. */
257 THREAD_SETMEM(self, p_pid, __getpid());
a9cb398f
UD
258 /* Get the lock the manager will free once all is correctly set up. */
259 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
260 /* Free it immediately. */
261 __pthread_unlock (THREAD_GETMEM(self, p_lock));
262
263 /* Continue with the real function. */
264 return pthread_start_thread (arg);
265}
266
ddbf7fef
UD
267static int pthread_allocate_stack(const pthread_attr_t *attr,
268 pthread_descr default_new_thread,
269 int pagesize,
270 pthread_descr * out_new_thread,
271 char ** out_new_thread_bottom,
272 char ** out_guardaddr,
273 size_t * out_guardsize)
274{
275 pthread_descr new_thread;
276 char * new_thread_bottom;
277 char * guardaddr;
278 size_t stacksize, guardsize;
279
c70ca1fa 280 if (attr != NULL && attr->__stackaddr_set)
ddbf7fef
UD
281 {
282 /* The user provided a stack. */
283 new_thread =
c70ca1fa
UD
284 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
285 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
ddbf7fef
UD
286 guardaddr = NULL;
287 guardsize = 0;
82df2969 288 __pthread_nonstandard_stacks = 1;
ddbf7fef
UD
289 }
290 else
291 {
b85697f6
UD
292 stacksize = STACK_SIZE - pagesize;
293 if (attr != NULL)
294 stacksize = MIN (stacksize, roundup(attr->__stacksize, pagesize));
ddbf7fef
UD
295 /* Allocate space for stack and thread descriptor at default address */
296 new_thread = default_new_thread;
b85697f6 297 new_thread_bottom = (char *) (new_thread + 1) - stacksize;
ddbf7fef
UD
298 if (mmap((caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE),
299 INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
300 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN,
301 -1, 0) == MAP_FAILED)
302 /* Bad luck, this segment is already mapped. */
303 return -1;
304 /* We manage to get a stack. Now see whether we need a guard
305 and allocate it if necessary. Notice that the default
b85697f6
UD
306 attributes (stack_size = STACK_SIZE - pagesize) do not need
307 a guard page, since the RLIMIT_STACK soft limit prevents stacks
308 from running into one another. */
309 if (stacksize == STACK_SIZE - pagesize)
ddbf7fef
UD
310 {
311 /* We don't need a guard page. */
312 guardaddr = NULL;
313 guardsize = 0;
314 }
315 else
316 {
317 /* Put a bad page at the bottom of the stack */
b85697f6 318 guardaddr = (void *)new_thread_bottom - stacksize;
c70ca1fa 319 guardsize = attr->__guardsize;
ddbf7fef
UD
320 if (mmap ((caddr_t) guardaddr, guardsize, 0, MAP_FIXED, -1, 0)
321 == MAP_FAILED)
322 {
323 /* We don't make this an error. */
324 guardaddr = NULL;
325 guardsize = 0;
326 }
327 }
328 }
390500b1
UD
329 /* Clear the thread data structure. */
330 memset (new_thread, '\0', sizeof (*new_thread));
ddbf7fef
UD
331 *out_new_thread = new_thread;
332 *out_new_thread_bottom = new_thread_bottom;
333 *out_guardaddr = guardaddr;
334 *out_guardsize = guardsize;
335 return 0;
336}
337
5afdca00 338static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
5d409851 339 void * (*start_routine)(void *), void *arg,
a9cb398f
UD
340 sigset_t * mask, int father_pid,
341 int report_events,
342 td_thr_events_t *event_maskp)
5afdca00
UD
343{
344 size_t sseg;
345 int pid;
346 pthread_descr new_thread;
d47aac39 347 char * new_thread_bottom;
5afdca00 348 pthread_t new_thread_id;
ddbf7fef 349 char *guardaddr = NULL;
3387a425 350 size_t guardsize = 0;
ddbf7fef 351 int pagesize = __getpagesize();
5afdca00 352
50304ef0
UD
353 /* First check whether we have to change the policy and if yes, whether
354 we can do this. Normally this should be done by examining the
355 return value of the __sched_setscheduler call in pthread_start_thread
356 but this is hard to implement. FIXME */
c70ca1fa 357 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
50304ef0 358 return EPERM;
ddbf7fef 359 /* Find a free segment for the thread, and allocate a stack if needed */
00a2f9aa 360 for (sseg = 2; ; sseg++)
5d409851
UD
361 {
362 if (sseg >= PTHREAD_THREADS_MAX)
363 return EAGAIN;
364 if (__pthread_handles[sseg].h_descr != NULL)
365 continue;
ddbf7fef
UD
366 if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
367 &new_thread, &new_thread_bottom,
368 &guardaddr, &guardsize) == 0)
369 break;
5d409851 370 }
6eaccb75 371 __pthread_handles_num++;
5afdca00
UD
372 /* Allocate new thread identifier */
373 pthread_threads_counter += PTHREAD_THREADS_MAX;
374 new_thread_id = sseg + pthread_threads_counter;
390500b1
UD
375 /* Initialize the thread descriptor. Elements which have to be
376 initialized to zero already have this value. */
5afdca00 377 new_thread->p_tid = new_thread_id;
3387a425 378 new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
5afdca00
UD
379 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
380 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
5afdca00 381 new_thread->p_errnop = &new_thread->p_errno;
5afdca00 382 new_thread->p_h_errnop = &new_thread->p_h_errno;
b43b13ac 383 new_thread->p_resp = &new_thread->p_res;
5d409851 384 new_thread->p_guardaddr = guardaddr;
3387a425 385 new_thread->p_guardsize = guardsize;
00a2f9aa
UD
386 new_thread->p_self = new_thread;
387 new_thread->p_nr = sseg;
5afdca00 388 /* Initialize the thread handle */
3387a425 389 __pthread_init_lock(&__pthread_handles[sseg].h_lock);
5afdca00 390 __pthread_handles[sseg].h_descr = new_thread;
d47aac39 391 __pthread_handles[sseg].h_bottom = new_thread_bottom;
5afdca00
UD
392 /* Determine scheduling parameters for the thread */
393 new_thread->p_start_args.schedpolicy = -1;
394 if (attr != NULL) {
390500b1
UD
395 new_thread->p_detached = attr->__detachstate;
396 new_thread->p_userstack = attr->__stackaddr_set;
397
c70ca1fa 398 switch(attr->__inheritsched) {
5afdca00 399 case PTHREAD_EXPLICIT_SCHED:
c70ca1fa
UD
400 new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
401 memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
4959e310 402 sizeof (struct sched_param));
5afdca00
UD
403 break;
404 case PTHREAD_INHERIT_SCHED:
431c33c0 405 new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid);
5afdca00
UD
406 __sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
407 break;
408 }
409 new_thread->p_priority =
410 new_thread->p_start_args.schedparam.sched_priority;
411 }
412 /* Finish setting up arguments to pthread_start_thread */
413 new_thread->p_start_args.start_routine = start_routine;
414 new_thread->p_start_args.arg = arg;
415 new_thread->p_start_args.mask = *mask;
bf7997b6
UD
416 /* Raise priority of thread manager if needed */
417 __pthread_manager_adjust_prio(new_thread->p_priority);
a9cb398f
UD
418 /* Do the cloning. We have to use two different functions depending
419 on whether we are debugging or not. */
420 pid = 0; /* Note that the thread never can have PID zero. */
421 if (report_events)
422 {
423 /* See whether the TD_CREATE event bit is set in any of the
424 masks. */
425 int idx = __td_eventword (TD_CREATE);
426 uint32_t mask = __td_eventmask (TD_CREATE);
427
428 if ((mask & (__pthread_threads_events.event_bits[idx]
429 | event_maskp->event_bits[idx])) != 0)
430 {
431 /* Lock the mutex the child will use now so that it will stop. */
432 __pthread_lock(new_thread->p_lock, NULL);
433
434 /* We have to report this event. */
435 pid = __clone(pthread_start_thread_event, (void **) new_thread,
436 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
437 __pthread_sig_cancel, new_thread);
438 if (pid != -1)
439 {
440 /* Now fill in the information about the new thread in
441 the newly created thread's data structure. We cannot let
442 the new thread do this since we don't know whether it was
443 already scheduled when we send the event. */
444 new_thread->p_eventbuf.eventdata = new_thread;
445 new_thread->p_eventbuf.eventnum = TD_CREATE;
ab86fbb1 446 __pthread_last_event = new_thread;
a9cb398f 447
46fd4f67
UD
448 /* We have to set the PID here since the callback function
449 in the debug library will need it and we cannot guarantee
450 the child got scheduled before the debugger. */
451 new_thread->p_pid = pid;
452
a9cb398f
UD
453 /* Now call the function which signals the event. */
454 __linuxthreads_create_event ();
455
456 /* Now restart the thread. */
457 __pthread_unlock(new_thread->p_lock);
458 }
459 }
460 }
461 if (pid == 0)
462 pid = __clone(pthread_start_thread, (void **) new_thread,
463 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
464 __pthread_sig_cancel, new_thread);
5afdca00
UD
465 /* Check if cloning succeeded */
466 if (pid == -1) {
5d409851 467 /* Free the stack if we allocated it */
c70ca1fa 468 if (attr == NULL || !attr->__stackaddr_set)
5d409851 469 {
48fc3dd2 470 if (new_thread->p_guardsize != 0)
5d409851 471 munmap(new_thread->p_guardaddr, new_thread->p_guardsize);
019e7a64
UD
472 munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE),
473 INITIAL_STACK_SIZE);
5d409851 474 }
5afdca00 475 __pthread_handles[sseg].h_descr = NULL;
3387a425
UD
476 __pthread_handles[sseg].h_bottom = NULL;
477 __pthread_handles_num--;
5afdca00
UD
478 return errno;
479 }
480 /* Insert new thread in doubly linked list of active threads */
481 new_thread->p_prevlive = __pthread_main_thread;
482 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
483 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
484 __pthread_main_thread->p_nextlive = new_thread;
485 /* Set pid field of the new thread, in case we get there before the
486 child starts. */
487 new_thread->p_pid = pid;
488 /* We're all set */
489 *thread = new_thread_id;
490 return 0;
491}
492
5d409851 493
3dd2c3e2
UD
494/* Try to free the resources of a thread when requested by pthread_join
495 or pthread_detach on a terminated thread. */
5afdca00
UD
496
497static void pthread_free(pthread_descr th)
498{
69f155d4 499 pthread_handle handle;
5afdca00
UD
500 ASSERT(th->p_exited);
501 /* Make the handle invalid */
502 handle = thread_handle(th->p_tid);
c5e340c7 503 __pthread_lock(&handle->h_lock, NULL);
5afdca00 504 handle->h_descr = NULL;
d47aac39 505 handle->h_bottom = (char *)(-1L);
3387a425 506 __pthread_unlock(&handle->h_lock);
00a2f9aa
UD
507#ifdef FREE_THREAD_SELF
508 FREE_THREAD_SELF(th, th->p_nr);
509#endif
3387a425
UD
510 /* One fewer threads in __pthread_handles */
511 __pthread_handles_num--;
5afdca00
UD
512 /* If initial thread, nothing to free */
513 if (th == &__pthread_initial_thread) return;
5d409851
UD
514 if (!th->p_userstack)
515 {
516 /* Free the stack and thread descriptor area */
5d409851
UD
517 if (th->p_guardsize != 0)
518 munmap(th->p_guardaddr, th->p_guardsize);
f9119f49 519 munmap((caddr_t) ((char *)(th+1) - STACK_SIZE), STACK_SIZE);
5d409851 520 }
5afdca00
UD
521}
522
523/* Handle threads that have exited */
524
525static void pthread_exited(pid_t pid)
526{
527 pthread_descr th;
528 int detached;
529 /* Find thread with that pid */
530 for (th = __pthread_main_thread->p_nextlive;
531 th != __pthread_main_thread;
532 th = th->p_nextlive) {
533 if (th->p_pid == pid) {
534 /* Remove thread from list of active threads */
535 th->p_nextlive->p_prevlive = th->p_prevlive;
536 th->p_prevlive->p_nextlive = th->p_nextlive;
537 /* Mark thread as exited, and if detached, free its resources */
c5e340c7 538 __pthread_lock(th->p_lock, NULL);
5afdca00 539 th->p_exited = 1;
a9cb398f
UD
540 /* If we have to signal this event do it now. */
541 if (th->p_report_events)
542 {
543 /* See whether TD_DEATH is in any of the mask. */
544 int idx = __td_eventword (TD_REAP);
545 uint32_t mask = __td_eventmask (TD_REAP);
546
547 if ((mask & (__pthread_threads_events.event_bits[idx]
548 | th->p_eventbuf.eventmask.event_bits[idx])) != 0)
549 {
550 /* Yep, we have to signal the death. */
551 th->p_eventbuf.eventnum = TD_DEATH;
552 th->p_eventbuf.eventdata = th;
ab86fbb1 553 __pthread_last_event = th;
a9cb398f
UD
554
555 /* Now call the function to signal the event. */
556 __linuxthreads_reap_event();
557 }
558 }
5afdca00 559 detached = th->p_detached;
3387a425 560 __pthread_unlock(th->p_lock);
026d5011
UD
561 if (detached)
562 pthread_free(th);
5afdca00
UD
563 break;
564 }
565 }
566 /* If all threads have exited and the main thread is pending on a
567 pthread_exit, wake up the main thread and terminate ourselves. */
568 if (main_thread_exiting &&
569 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
570 restart(__pthread_main_thread);
571 _exit(0);
572 }
573}
574
575static void pthread_reap_children(void)
576{
577 pid_t pid;
578 int status;
579
580 while ((pid = __libc_waitpid(-1, &status, WNOHANG | __WCLONE)) > 0) {
581 pthread_exited(pid);
582 if (WIFSIGNALED(status)) {
583 /* If a thread died due to a signal, send the same signal to
584 all other threads, including the main thread. */
585 pthread_kill_all_threads(WTERMSIG(status), 1);
586 _exit(0);
587 }
588 }
589}
590
bf7997b6
UD
591/* Try to free the resources of a thread when requested by pthread_join
592 or pthread_detach on a terminated thread. */
5afdca00 593
fdacb17d 594static void pthread_handle_free(pthread_t th_id)
5afdca00 595{
fdacb17d
UD
596 pthread_handle handle = thread_handle(th_id);
597 pthread_descr th;
bf7997b6 598
c5e340c7 599 __pthread_lock(&handle->h_lock, NULL);
fdacb17d
UD
600 if (invalid_handle(handle, th_id)) {
601 /* pthread_reap_children has deallocated the thread already,
602 nothing needs to be done */
603 __pthread_unlock(&handle->h_lock);
026d5011
UD
604 return;
605 }
fdacb17d 606 th = handle->h_descr;
5afdca00 607 if (th->p_exited) {
fdacb17d 608 __pthread_unlock(&handle->h_lock);
5afdca00
UD
609 pthread_free(th);
610 } else {
611 /* The Unix process of the thread is still running.
612 Mark the thread as detached so that the thread manager will
613 deallocate its resources when the Unix process exits. */
614 th->p_detached = 1;
fdacb17d 615 __pthread_unlock(&handle->h_lock);
5afdca00
UD
616 }
617}
618
619/* Send a signal to all running threads */
620
621static void pthread_kill_all_threads(int sig, int main_thread_also)
622{
623 pthread_descr th;
624 for (th = __pthread_main_thread->p_nextlive;
625 th != __pthread_main_thread;
626 th = th->p_nextlive) {
627 kill(th->p_pid, sig);
628 }
629 if (main_thread_also) {
630 kill(__pthread_main_thread->p_pid, sig);
631 }
632}
633
634/* Process-wide exit() */
635
636static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
637{
638 pthread_descr th;
639 __pthread_exit_requested = 1;
640 __pthread_exit_code = exitcode;
641 /* Send the CANCEL signal to all running threads, including the main
642 thread, but excluding the thread from which the exit request originated
643 (that thread must complete the exit, e.g. calling atexit functions
644 and flushing stdio buffers). */
645 for (th = issuing_thread->p_nextlive;
646 th != issuing_thread;
647 th = th->p_nextlive) {
ddbf7fef 648 kill(th->p_pid, __pthread_sig_cancel);
5afdca00
UD
649 }
650 /* Now, wait for all these threads, so that they don't become zombies
651 and their times are properly added to the thread manager's times. */
652 for (th = issuing_thread->p_nextlive;
653 th != issuing_thread;
654 th = th->p_nextlive) {
655 waitpid(th->p_pid, NULL, __WCLONE);
656 }
657 restart(issuing_thread);
658 _exit(0);
659}
660
b92ad8d6 661/* Handler for __pthread_sig_cancel in thread manager thread */
5afdca00
UD
662
663void __pthread_manager_sighandler(int sig)
664{
665 terminated_children = 1;
666}
bf7997b6
UD
667
668/* Adjust priority of thread manager so that it always run at a priority
669 higher than all threads */
670
671void __pthread_manager_adjust_prio(int thread_prio)
672{
673 struct sched_param param;
674
675 if (thread_prio <= __pthread_manager_thread.p_priority) return;
676 param.sched_priority =
d47aac39 677 thread_prio < __sched_get_priority_max(SCHED_FIFO)
bf7997b6
UD
678 ? thread_prio + 1 : thread_prio;
679 __sched_setscheduler(__pthread_manager_thread.p_pid, SCHED_FIFO, &param);
680 __pthread_manager_thread.p_priority = thread_prio;
681}
This page took 0.141582 seconds and 5 git commands to generate.