]> sourceware.org Git - glibc.git/blob - linuxthreads/manager.c
Update.
[glibc.git] / linuxthreads / manager.c
1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
4 /* */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
9 /* */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
14
15 /* The "thread manager" thread: manages creation and termination of threads */
16
17 #include <errno.h>
18 #include <sched.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <sys/poll.h> /* for poll */
25 #include <sys/mman.h> /* for mmap */
26 #include <sys/param.h>
27 #include <sys/time.h>
28 #include <sys/wait.h> /* for waitpid macros */
29
30 #include "pthread.h"
31 #include "internals.h"
32 #include "spinlock.h"
33 #include "restart.h"
34 #include "semaphore.h"
35
36 /* Array of active threads. Entry 0 is reserved for the initial thread. */
37 struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
38 { { LOCK_INITIALIZER, &__pthread_initial_thread, 0},
39 { LOCK_INITIALIZER, &__pthread_manager_thread, 0}, /* All NULLs */ };
40
41 /* For debugging purposes put the maximum number of threads in a variable. */
42 const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
43
44 /* Indicate whether at least one thread has a user-defined stack (if 1),
45 or if all threads have stacks supplied by LinuxThreads (if 0). */
46 int __pthread_nonstandard_stacks;
47
48 /* Number of active entries in __pthread_handles (used by gdb) */
49 volatile int __pthread_handles_num = 2;
50
51 /* Whether to use debugger additional actions for thread creation
52 (set to 1 by gdb) */
53 volatile int __pthread_threads_debug;
54
55 /* Globally enabled events. */
56 volatile td_thr_events_t __pthread_threads_events;
57
58 /* Pointer to thread descriptor with last event. */
59 volatile pthread_descr __pthread_last_event;
60
61 /* Mapping from stack segment to thread descriptor. */
62 /* Stack segment numbers are also indices into the __pthread_handles array. */
63 /* Stack segment number 0 is reserved for the initial thread. */
64
65 static inline pthread_descr thread_segment(int seg)
66 {
67 return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
68 - 1;
69 }
70
71 /* Flag set in signal handler to record child termination */
72
73 static volatile int terminated_children = 0;
74
75 /* Flag set when the initial thread is blocked on pthread_exit waiting
76 for all other threads to terminate */
77
78 static int main_thread_exiting = 0;
79
80 /* Counter used to generate unique thread identifier.
81 Thread identifier is pthread_threads_counter + segment. */
82
83 static pthread_t pthread_threads_counter = 0;
84
85 /* Forward declarations */
86
87 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
88 void * (*start_routine)(void *), void *arg,
89 sigset_t *mask, int father_pid,
90 int report_events,
91 td_thr_events_t *event_maskp);
92 static void pthread_handle_free(pthread_t th_id);
93 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode);
94 static void pthread_reap_children(void);
95 static void pthread_kill_all_threads(int sig, int main_thread_also);
96
97 /* The server thread managing requests for thread creation and termination */
98
99 int __pthread_manager(void *arg)
100 {
101 int reqfd = (int) (long int) arg;
102 struct pollfd ufd;
103 sigset_t mask;
104 int n;
105 struct pthread_request request;
106
107 /* If we have special thread_self processing, initialize it. */
108 #ifdef INIT_THREAD_SELF
109 INIT_THREAD_SELF(&__pthread_manager_thread, 1);
110 #endif
111 /* Set the error variable. */
112 __pthread_manager_thread.p_errnop = &__pthread_manager_thread.p_errno;
113 __pthread_manager_thread.p_h_errnop = &__pthread_manager_thread.p_h_errno;
114 /* Block all signals except __pthread_sig_cancel and SIGTRAP */
115 sigfillset(&mask);
116 sigdelset(&mask, __pthread_sig_cancel); /* for thread termination */
117 sigdelset(&mask, SIGTRAP); /* for debugging purposes */
118 if (__pthread_threads_debug && __pthread_sig_debug > 0)
119 sigdelset(&mask, __pthread_sig_debug);
120 sigprocmask(SIG_SETMASK, &mask, NULL);
121 /* Raise our priority to match that of main thread */
122 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
123 /* Synchronize debugging of the thread manager */
124 n = __libc_read(reqfd, (char *)&request, sizeof(request));
125 ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
126 ufd.fd = reqfd;
127 ufd.events = POLLIN;
128 /* Enter server loop */
129 while(1) {
130 n = __poll(&ufd, 1, 2000);
131
132 /* Check for termination of the main thread */
133 if (getppid() == 1) {
134 pthread_kill_all_threads(SIGKILL, 0);
135 _exit(0);
136 }
137 /* Check for dead children */
138 if (terminated_children) {
139 terminated_children = 0;
140 pthread_reap_children();
141 }
142 /* Read and execute request */
143 if (n == 1 && (ufd.revents & POLLIN)) {
144 n = __libc_read(reqfd, (char *)&request, sizeof(request));
145 ASSERT(n == sizeof(request));
146 switch(request.req_kind) {
147 case REQ_CREATE:
148 request.req_thread->p_retcode =
149 pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
150 request.req_args.create.attr,
151 request.req_args.create.fn,
152 request.req_args.create.arg,
153 &request.req_args.create.mask,
154 request.req_thread->p_pid,
155 request.req_thread->p_report_events,
156 &request.req_thread->p_eventbuf.eventmask);
157 restart(request.req_thread);
158 break;
159 case REQ_FREE:
160 pthread_handle_free(request.req_args.free.thread_id);
161 break;
162 case REQ_PROCESS_EXIT:
163 pthread_handle_exit(request.req_thread,
164 request.req_args.exit.code);
165 break;
166 case REQ_MAIN_THREAD_EXIT:
167 main_thread_exiting = 1;
168 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
169 restart(__pthread_main_thread);
170 return 0;
171 }
172 break;
173 case REQ_POST:
174 __new_sem_post(request.req_args.post);
175 break;
176 case REQ_DEBUG:
177 /* Make gdb aware of new thread and gdb will restart the
178 new thread when it is ready to handle the new thread. */
179 if (__pthread_threads_debug && __pthread_sig_debug > 0)
180 raise(__pthread_sig_debug);
181 break;
182 }
183 }
184 }
185 }
186
187 int __pthread_manager_event(void *arg)
188 {
189 /* If we have special thread_self processing, initialize it. */
190 #ifdef INIT_THREAD_SELF
191 INIT_THREAD_SELF(&__pthread_manager_thread, 1);
192 #endif
193
194 /* Get the lock the manager will free once all is correctly set up. */
195 __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
196 /* Free it immediately. */
197 __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
198
199 return __pthread_manager(arg);
200 }
201
202 /* Process creation */
203
204 static int pthread_start_thread(void *arg)
205 {
206 pthread_descr self = (pthread_descr) arg;
207 struct pthread_request request;
208 void * outcome;
209 /* Initialize special thread_self processing, if any. */
210 #ifdef INIT_THREAD_SELF
211 INIT_THREAD_SELF(self, self->p_nr);
212 #endif
213 /* Make sure our pid field is initialized, just in case we get there
214 before our father has initialized it. */
215 THREAD_SETMEM(self, p_pid, __getpid());
216 /* Initial signal mask is that of the creating thread. (Otherwise,
217 we'd just inherit the mask of the thread manager.) */
218 sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL);
219 /* Set the scheduling policy and priority for the new thread, if needed */
220 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
221 /* Explicit scheduling attributes were provided: apply them */
222 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
223 THREAD_GETMEM(self, p_start_args.schedpolicy),
224 &self->p_start_args.schedparam);
225 else if (__pthread_manager_thread.p_priority > 0)
226 /* Default scheduling required, but thread manager runs in realtime
227 scheduling: switch new thread to SCHED_OTHER policy */
228 {
229 struct sched_param default_params;
230 default_params.sched_priority = 0;
231 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
232 SCHED_OTHER, &default_params);
233 }
234 /* Make gdb aware of new thread */
235 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
236 request.req_thread = self;
237 request.req_kind = REQ_DEBUG;
238 __libc_write(__pthread_manager_request,
239 (char *) &request, sizeof(request));
240 suspend(self);
241 }
242 /* Run the thread code */
243 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
244 p_start_args.arg));
245 /* Exit with the given return value */
246 pthread_exit(outcome);
247 return 0;
248 }
249
250 static int pthread_start_thread_event(void *arg)
251 {
252 pthread_descr self = (pthread_descr) arg;
253
254 #ifdef INIT_THREAD_SELF
255 INIT_THREAD_SELF(self, self->p_nr);
256 #endif
257 /* Make sure our pid field is initialized, just in case we get there
258 before our father has initialized it. */
259 THREAD_SETMEM(self, p_pid, __getpid());
260 /* Get the lock the manager will free once all is correctly set up. */
261 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
262 /* Free it immediately. */
263 __pthread_unlock (THREAD_GETMEM(self, p_lock));
264
265 /* Continue with the real function. */
266 return pthread_start_thread (arg);
267 }
268
269 static int pthread_allocate_stack(const pthread_attr_t *attr,
270 pthread_descr default_new_thread,
271 int pagesize,
272 pthread_descr * out_new_thread,
273 char ** out_new_thread_bottom,
274 char ** out_guardaddr,
275 size_t * out_guardsize)
276 {
277 pthread_descr new_thread;
278 char * new_thread_bottom;
279 char * guardaddr;
280 size_t stacksize, guardsize;
281
282 if (attr != NULL && attr->__stackaddr_set)
283 {
284 /* The user provided a stack. */
285 new_thread =
286 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
287 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
288 guardaddr = NULL;
289 guardsize = 0;
290 __pthread_nonstandard_stacks = 1;
291 }
292 else
293 {
294 stacksize = STACK_SIZE - pagesize;
295 if (attr != NULL)
296 stacksize = MIN (stacksize, roundup(attr->__stacksize, pagesize));
297 /* Allocate space for stack and thread descriptor at default address */
298 new_thread = default_new_thread;
299 new_thread_bottom = (char *) (new_thread + 1) - stacksize;
300 if (mmap((caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE),
301 INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
302 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN,
303 -1, 0) == MAP_FAILED)
304 /* Bad luck, this segment is already mapped. */
305 return -1;
306 /* We manage to get a stack. Now see whether we need a guard
307 and allocate it if necessary. Notice that the default
308 attributes (stack_size = STACK_SIZE - pagesize) do not need
309 a guard page, since the RLIMIT_STACK soft limit prevents stacks
310 from running into one another. */
311 if (stacksize == STACK_SIZE - pagesize)
312 {
313 /* We don't need a guard page. */
314 guardaddr = NULL;
315 guardsize = 0;
316 }
317 else
318 {
319 /* Put a bad page at the bottom of the stack */
320 guardsize = attr->__guardsize;
321 guardaddr = (void *)new_thread_bottom - guardsize;
322 if (mmap ((caddr_t) guardaddr, guardsize, 0, MAP_FIXED, -1, 0)
323 == MAP_FAILED)
324 {
325 /* We don't make this an error. */
326 guardaddr = NULL;
327 guardsize = 0;
328 }
329 }
330 }
331 /* Clear the thread data structure. */
332 memset (new_thread, '\0', sizeof (*new_thread));
333 *out_new_thread = new_thread;
334 *out_new_thread_bottom = new_thread_bottom;
335 *out_guardaddr = guardaddr;
336 *out_guardsize = guardsize;
337 return 0;
338 }
339
340 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
341 void * (*start_routine)(void *), void *arg,
342 sigset_t * mask, int father_pid,
343 int report_events,
344 td_thr_events_t *event_maskp)
345 {
346 size_t sseg;
347 int pid;
348 pthread_descr new_thread;
349 char * new_thread_bottom;
350 pthread_t new_thread_id;
351 char *guardaddr = NULL;
352 size_t guardsize = 0;
353 int pagesize = __getpagesize();
354
355 /* First check whether we have to change the policy and if yes, whether
356 we can do this. Normally this should be done by examining the
357 return value of the __sched_setscheduler call in pthread_start_thread
358 but this is hard to implement. FIXME */
359 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
360 return EPERM;
361 /* Find a free segment for the thread, and allocate a stack if needed */
362 for (sseg = 2; ; sseg++)
363 {
364 if (sseg >= PTHREAD_THREADS_MAX)
365 return EAGAIN;
366 if (__pthread_handles[sseg].h_descr != NULL)
367 continue;
368 if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
369 &new_thread, &new_thread_bottom,
370 &guardaddr, &guardsize) == 0)
371 break;
372 }
373 __pthread_handles_num++;
374 /* Allocate new thread identifier */
375 pthread_threads_counter += PTHREAD_THREADS_MAX;
376 new_thread_id = sseg + pthread_threads_counter;
377 /* Initialize the thread descriptor. Elements which have to be
378 initialized to zero already have this value. */
379 new_thread->p_tid = new_thread_id;
380 new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
381 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
382 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
383 new_thread->p_errnop = &new_thread->p_errno;
384 new_thread->p_h_errnop = &new_thread->p_h_errno;
385 new_thread->p_resp = &new_thread->p_res;
386 new_thread->p_guardaddr = guardaddr;
387 new_thread->p_guardsize = guardsize;
388 new_thread->p_self = new_thread;
389 new_thread->p_nr = sseg;
390 /* Initialize the thread handle */
391 __pthread_init_lock(&__pthread_handles[sseg].h_lock);
392 __pthread_handles[sseg].h_descr = new_thread;
393 __pthread_handles[sseg].h_bottom = new_thread_bottom;
394 /* Determine scheduling parameters for the thread */
395 new_thread->p_start_args.schedpolicy = -1;
396 if (attr != NULL) {
397 new_thread->p_detached = attr->__detachstate;
398 new_thread->p_userstack = attr->__stackaddr_set;
399
400 switch(attr->__inheritsched) {
401 case PTHREAD_EXPLICIT_SCHED:
402 new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
403 memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
404 sizeof (struct sched_param));
405 break;
406 case PTHREAD_INHERIT_SCHED:
407 new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid);
408 __sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
409 break;
410 }
411 new_thread->p_priority =
412 new_thread->p_start_args.schedparam.sched_priority;
413 }
414 /* Finish setting up arguments to pthread_start_thread */
415 new_thread->p_start_args.start_routine = start_routine;
416 new_thread->p_start_args.arg = arg;
417 new_thread->p_start_args.mask = *mask;
418 /* Make the new thread ID available already now. If any of the later
419 functions fail we return an error value and the caller must not use
420 the stored thread ID. */
421 *thread = new_thread_id;
422 /* Raise priority of thread manager if needed */
423 __pthread_manager_adjust_prio(new_thread->p_priority);
424 /* Do the cloning. We have to use two different functions depending
425 on whether we are debugging or not. */
426 pid = 0; /* Note that the thread never can have PID zero. */
427 if (report_events)
428 {
429 /* See whether the TD_CREATE event bit is set in any of the
430 masks. */
431 int idx = __td_eventword (TD_CREATE);
432 uint32_t mask = __td_eventmask (TD_CREATE);
433
434 if ((mask & (__pthread_threads_events.event_bits[idx]
435 | event_maskp->event_bits[idx])) != 0)
436 {
437 /* Lock the mutex the child will use now so that it will stop. */
438 __pthread_lock(new_thread->p_lock, NULL);
439
440 /* We have to report this event. */
441 pid = __clone(pthread_start_thread_event, (void **) new_thread,
442 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
443 __pthread_sig_cancel, new_thread);
444 if (pid != -1)
445 {
446 /* Now fill in the information about the new thread in
447 the newly created thread's data structure. We cannot let
448 the new thread do this since we don't know whether it was
449 already scheduled when we send the event. */
450 new_thread->p_eventbuf.eventdata = new_thread;
451 new_thread->p_eventbuf.eventnum = TD_CREATE;
452 __pthread_last_event = new_thread;
453
454 /* We have to set the PID here since the callback function
455 in the debug library will need it and we cannot guarantee
456 the child got scheduled before the debugger. */
457 new_thread->p_pid = pid;
458
459 /* Now call the function which signals the event. */
460 __linuxthreads_create_event ();
461
462 /* Now restart the thread. */
463 __pthread_unlock(new_thread->p_lock);
464 }
465 }
466 }
467 if (pid == 0)
468 pid = __clone(pthread_start_thread, (void **) new_thread,
469 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
470 __pthread_sig_cancel, new_thread);
471 /* Check if cloning succeeded */
472 if (pid == -1) {
473 /* Free the stack if we allocated it */
474 if (attr == NULL || !attr->__stackaddr_set)
475 {
476 if (new_thread->p_guardsize != 0)
477 munmap(new_thread->p_guardaddr, new_thread->p_guardsize);
478 munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE),
479 INITIAL_STACK_SIZE);
480 }
481 __pthread_handles[sseg].h_descr = NULL;
482 __pthread_handles[sseg].h_bottom = NULL;
483 __pthread_handles_num--;
484 return errno;
485 }
486 /* Insert new thread in doubly linked list of active threads */
487 new_thread->p_prevlive = __pthread_main_thread;
488 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
489 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
490 __pthread_main_thread->p_nextlive = new_thread;
491 /* Set pid field of the new thread, in case we get there before the
492 child starts. */
493 new_thread->p_pid = pid;
494 return 0;
495 }
496
497
498 /* Try to free the resources of a thread when requested by pthread_join
499 or pthread_detach on a terminated thread. */
500
501 static void pthread_free(pthread_descr th)
502 {
503 pthread_handle handle;
504 pthread_readlock_info *iter, *next;
505
506 ASSERT(th->p_exited);
507 /* Make the handle invalid */
508 handle = thread_handle(th->p_tid);
509 __pthread_lock(&handle->h_lock, NULL);
510 handle->h_descr = NULL;
511 handle->h_bottom = (char *)(-1L);
512 __pthread_unlock(&handle->h_lock);
513 #ifdef FREE_THREAD_SELF
514 FREE_THREAD_SELF(th, th->p_nr);
515 #endif
516 /* One fewer threads in __pthread_handles */
517 __pthread_handles_num--;
518
519 /* Destroy read lock list, and list of free read lock structures.
520 If the former is not empty, it means the thread exited while
521 holding read locks! */
522
523 for (iter = th->p_readlock_list; iter != NULL; iter = next)
524 {
525 next = iter->pr_next;
526 free(iter);
527 }
528
529 for (iter = th->p_readlock_free; iter != NULL; iter = next)
530 {
531 next = iter->pr_next;
532 free(iter);
533 }
534
535 /* If initial thread, nothing to free */
536 if (th == &__pthread_initial_thread) return;
537 if (!th->p_userstack)
538 {
539 /* Free the stack and thread descriptor area */
540 if (th->p_guardsize != 0)
541 munmap(th->p_guardaddr, th->p_guardsize);
542 munmap((caddr_t) ((char *)(th+1) - STACK_SIZE), STACK_SIZE);
543 }
544 }
545
546 /* Handle threads that have exited */
547
548 static void pthread_exited(pid_t pid)
549 {
550 pthread_descr th;
551 int detached;
552 /* Find thread with that pid */
553 for (th = __pthread_main_thread->p_nextlive;
554 th != __pthread_main_thread;
555 th = th->p_nextlive) {
556 if (th->p_pid == pid) {
557 /* Remove thread from list of active threads */
558 th->p_nextlive->p_prevlive = th->p_prevlive;
559 th->p_prevlive->p_nextlive = th->p_nextlive;
560 /* Mark thread as exited, and if detached, free its resources */
561 __pthread_lock(th->p_lock, NULL);
562 th->p_exited = 1;
563 /* If we have to signal this event do it now. */
564 if (th->p_report_events)
565 {
566 /* See whether TD_DEATH is in any of the mask. */
567 int idx = __td_eventword (TD_REAP);
568 uint32_t mask = __td_eventmask (TD_REAP);
569
570 if ((mask & (__pthread_threads_events.event_bits[idx]
571 | th->p_eventbuf.eventmask.event_bits[idx])) != 0)
572 {
573 /* Yep, we have to signal the death. */
574 th->p_eventbuf.eventnum = TD_DEATH;
575 th->p_eventbuf.eventdata = th;
576 __pthread_last_event = th;
577
578 /* Now call the function to signal the event. */
579 __linuxthreads_reap_event();
580 }
581 }
582 detached = th->p_detached;
583 __pthread_unlock(th->p_lock);
584 if (detached)
585 pthread_free(th);
586 break;
587 }
588 }
589 /* If all threads have exited and the main thread is pending on a
590 pthread_exit, wake up the main thread and terminate ourselves. */
591 if (main_thread_exiting &&
592 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
593 restart(__pthread_main_thread);
594 _exit(0);
595 }
596 }
597
598 static void pthread_reap_children(void)
599 {
600 pid_t pid;
601 int status;
602
603 while ((pid = __libc_waitpid(-1, &status, WNOHANG | __WCLONE)) > 0) {
604 pthread_exited(pid);
605 if (WIFSIGNALED(status)) {
606 /* If a thread died due to a signal, send the same signal to
607 all other threads, including the main thread. */
608 pthread_kill_all_threads(WTERMSIG(status), 1);
609 _exit(0);
610 }
611 }
612 }
613
614 /* Try to free the resources of a thread when requested by pthread_join
615 or pthread_detach on a terminated thread. */
616
617 static void pthread_handle_free(pthread_t th_id)
618 {
619 pthread_handle handle = thread_handle(th_id);
620 pthread_descr th;
621
622 __pthread_lock(&handle->h_lock, NULL);
623 if (invalid_handle(handle, th_id)) {
624 /* pthread_reap_children has deallocated the thread already,
625 nothing needs to be done */
626 __pthread_unlock(&handle->h_lock);
627 return;
628 }
629 th = handle->h_descr;
630 if (th->p_exited) {
631 __pthread_unlock(&handle->h_lock);
632 pthread_free(th);
633 } else {
634 /* The Unix process of the thread is still running.
635 Mark the thread as detached so that the thread manager will
636 deallocate its resources when the Unix process exits. */
637 th->p_detached = 1;
638 __pthread_unlock(&handle->h_lock);
639 }
640 }
641
642 /* Send a signal to all running threads */
643
644 static void pthread_kill_all_threads(int sig, int main_thread_also)
645 {
646 pthread_descr th;
647 for (th = __pthread_main_thread->p_nextlive;
648 th != __pthread_main_thread;
649 th = th->p_nextlive) {
650 kill(th->p_pid, sig);
651 }
652 if (main_thread_also) {
653 kill(__pthread_main_thread->p_pid, sig);
654 }
655 }
656
657 /* Process-wide exit() */
658
659 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
660 {
661 pthread_descr th;
662 __pthread_exit_requested = 1;
663 __pthread_exit_code = exitcode;
664 /* Send the CANCEL signal to all running threads, including the main
665 thread, but excluding the thread from which the exit request originated
666 (that thread must complete the exit, e.g. calling atexit functions
667 and flushing stdio buffers). */
668 for (th = issuing_thread->p_nextlive;
669 th != issuing_thread;
670 th = th->p_nextlive) {
671 kill(th->p_pid, __pthread_sig_cancel);
672 }
673 /* Now, wait for all these threads, so that they don't become zombies
674 and their times are properly added to the thread manager's times. */
675 for (th = issuing_thread->p_nextlive;
676 th != issuing_thread;
677 th = th->p_nextlive) {
678 waitpid(th->p_pid, NULL, __WCLONE);
679 }
680 restart(issuing_thread);
681 _exit(0);
682 }
683
684 /* Handler for __pthread_sig_cancel in thread manager thread */
685
686 void __pthread_manager_sighandler(int sig)
687 {
688 terminated_children = 1;
689 }
690
691 /* Adjust priority of thread manager so that it always run at a priority
692 higher than all threads */
693
694 void __pthread_manager_adjust_prio(int thread_prio)
695 {
696 struct sched_param param;
697
698 if (thread_prio <= __pthread_manager_thread.p_priority) return;
699 param.sched_priority =
700 thread_prio < __sched_get_priority_max(SCHED_FIFO)
701 ? thread_prio + 1 : thread_prio;
702 __sched_setscheduler(__pthread_manager_thread.p_pid, SCHED_FIFO, &param);
703 __pthread_manager_thread.p_priority = thread_prio;
704 }
This page took 0.104008 seconds and 5 git commands to generate.