]> sourceware.org Git - glibc.git/blame - nptl/init.c
* string/string.h: Define correct C++ prototypes for gcc 4.4.
[glibc.git] / nptl / init.c
CommitLineData
cbd8aeb8 1/* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <assert.h>
cbd8aeb8 21#include <errno.h>
76a50749
UD
22#include <limits.h>
23#include <signal.h>
24#include <stdlib.h>
25#include <unistd.h>
26#include <sys/param.h>
27#include <sys/resource.h>
28#include <pthreadP.h>
29#include <atomic.h>
30#include <ldsodefs.h>
31#include <tls.h>
32#include <fork.h>
33#include <version.h>
bf293afe 34#include <shlib-compat.h>
2c0b891a 35#include <smp.h>
2edb61e3 36#include <lowlevellock.h>
f8de5057 37#include <kernel-features.h>
76a50749
UD
38
39
76a50749
UD
40/* Size and alignment of static TLS block. */
41size_t __static_tls_size;
923e02ea 42size_t __static_tls_align_m1;
76a50749 43
0f6699ea
UD
44#ifndef __ASSUME_SET_ROBUST_LIST
45/* Negative if we do not have the system call and we can use it. */
46int __set_robust_list_avail;
47# define set_robust_list_not_avail() \
48 __set_robust_list_avail = -1
49#else
50# define set_robust_list_not_avail() do { } while (0)
51#endif
52
cbd8aeb8
UD
53#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
54/* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
55int __have_futex_clock_realtime;
56# define __set_futex_clock_realtime() \
57 __have_futex_clock_realtime = 1
58#else
59#define __set_futex_clock_realtime() do { } while (0)
60#endif
61
76a50749 62/* Version of the library, used in libthread_db to detect mismatches. */
e3b22ad3 63static const char nptl_version[] __attribute_used__ = VERSION;
76a50749
UD
64
65
11bf311e 66#ifndef SHARED
76a50749
UD
67extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
68#endif
69
70
8454830b 71#ifdef SHARED
630d93a7 72static const struct pthread_functions pthread_functions =
8454830b
UD
73 {
74 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
73e9ae88 75# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
49e9f864 76 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
73e9ae88 77# endif
49e9f864 78 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
8454830b
UD
79 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
80 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
81 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
82 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
83 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
84 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
85 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
86 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
87 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
88 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
89 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
90 .ptr_pthread_condattr_init = __pthread_condattr_init,
bf293afe
UD
91 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
92 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
93 .ptr___pthread_cond_init = __pthread_cond_init,
94 .ptr___pthread_cond_signal = __pthread_cond_signal,
95 .ptr___pthread_cond_wait = __pthread_cond_wait,
c503d3dc 96 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
73e9ae88 97# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
bf293afe
UD
98 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
99 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
100 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
101 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
102 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
c503d3dc 103 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
73e9ae88 104# endif
8454830b 105 .ptr_pthread_equal = __pthread_equal,
49e9f864 106 .ptr___pthread_exit = __pthread_exit,
8454830b
UD
107 .ptr_pthread_getschedparam = __pthread_getschedparam,
108 .ptr_pthread_setschedparam = __pthread_setschedparam,
109 .ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
110 .ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
111 .ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
112 .ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
113 .ptr_pthread_self = __pthread_self,
114 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
73e9ae88
UD
115 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
116 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
117 .ptr___pthread_once = __pthread_once_internal,
118 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock_internal,
119 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock_internal,
120 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock_internal,
121 .ptr___pthread_key_create = __pthread_key_create_internal,
122 .ptr___pthread_getspecific = __pthread_getspecific_internal,
123 .ptr___pthread_setspecific = __pthread_setspecific_internal,
124 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
47202270 125 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
6efd4814 126 .ptr_nthreads = &__nptl_nthreads,
3fa21fd8 127 .ptr___pthread_unwind = &__pthread_unwind,
2edb61e3 128 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
ba408f84
UD
129 .ptr__nptl_setxid = __nptl_setxid,
130 /* For now only the stack cache needs to be freed. */
7adefea8 131 .ptr_freeres = __free_stack_cache
8454830b
UD
132 };
133# define ptr_pthread_functions &pthread_functions
134#else
135# define ptr_pthread_functions NULL
136#endif
137
138
76a50749
UD
139/* For asynchronous cancellation we use a signal. This is the handler. */
140static void
a1ed6b4c 141sigcancel_handler (int sig, siginfo_t *si, void *ctx)
76a50749 142{
7960f2a7
UD
143#ifdef __ASSUME_CORRECT_SI_PID
144 /* Determine the process ID. It might be negative if the thread is
145 in the middle of a fork() call. */
146 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
147 if (__builtin_expect (pid < 0, 0))
148 pid = -pid;
149#endif
150
a1ed6b4c 151 /* Safety check. It would be possible to call this function for
2edb61e3 152 other signals and send a signal from another process. This is not
a1ed6b4c
UD
153 correct and might even be a security problem. Try to catch as
154 many incorrect invocations as possible. */
155 if (sig != SIGCANCEL
db54f488
UD
156#ifdef __ASSUME_CORRECT_SI_PID
157 /* Kernels before 2.5.75 stored the thread ID and not the process
158 ID in si_pid so we skip this test. */
7960f2a7 159 || si->si_pid != pid
db54f488 160#endif
a1ed6b4c 161 || si->si_code != SI_TKILL)
a1ed6b4c
UD
162 return;
163
76a50749
UD
164 struct pthread *self = THREAD_SELF;
165
b22d701b 166 int oldval = THREAD_GETMEM (self, cancelhandling);
76a50749
UD
167 while (1)
168 {
169 /* We are canceled now. When canceled by another thread this flag
170 is already set but if the signal is directly send (internally or
171 from another process) is has to be done here. */
e320ef46 172 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
76a50749
UD
173
174 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
175 /* Already canceled or exiting. */
176 break;
177
b22d701b
UD
178 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
179 oldval);
180 if (curval == oldval)
76a50749
UD
181 {
182 /* Set the return value. */
183 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
184
185 /* Make sure asynchronous cancellation is still enabled. */
186 if ((newval & CANCELTYPE_BITMASK) != 0)
d9eb687b
UD
187 /* Run the registered destructors and terminate the thread. */
188 __do_cancel ();
76a50749
UD
189
190 break;
191 }
b22d701b
UD
192
193 oldval = curval;
76a50749
UD
194 }
195}
196
197
2edb61e3
UD
198struct xid_command *__xidcmd attribute_hidden;
199
200/* For asynchronous cancellation we use a signal. This is the handler. */
201static void
202sighandler_setxid (int sig, siginfo_t *si, void *ctx)
203{
7960f2a7
UD
204#ifdef __ASSUME_CORRECT_SI_PID
205 /* Determine the process ID. It might be negative if the thread is
206 in the middle of a fork() call. */
207 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
208 if (__builtin_expect (pid < 0, 0))
209 pid = -pid;
210#endif
211
2edb61e3
UD
212 /* Safety check. It would be possible to call this function for
213 other signals and send a signal from another process. This is not
214 correct and might even be a security problem. Try to catch as
215 many incorrect invocations as possible. */
216 if (sig != SIGSETXID
217#ifdef __ASSUME_CORRECT_SI_PID
218 /* Kernels before 2.5.75 stored the thread ID and not the process
219 ID in si_pid so we skip this test. */
7960f2a7 220 || si->si_pid != pid
2edb61e3
UD
221#endif
222 || si->si_code != SI_TKILL)
223 return;
224
225 INTERNAL_SYSCALL_DECL (err);
226 INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
227 __xidcmd->id[1], __xidcmd->id[2]);
228
229 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
085a4412 230 lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE);
dff9a7a1
UD
231
232 /* Reset the SETXID flag. */
233 struct pthread *self = THREAD_SELF;
234 int flags = THREAD_GETMEM (self, cancelhandling);
235 THREAD_SETMEM (self, cancelhandling, flags & ~SETXID_BITMASK);
236
237 /* And release the futex. */
238 self->setxid_futex = 1;
085a4412 239 lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
2edb61e3
UD
240}
241
242
b1531183
UD
243/* When using __thread for this, we do it in libc so as not
244 to give libpthread its own TLS segment just for this. */
245extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
246
76a50749 247
0ecf9c10
RM
248/* This can be set by the debugger before initialization is complete. */
249static bool __nptl_initial_report_events;
250
76a50749 251void
2ae920ed 252__pthread_initialize_minimal_internal (void)
76a50749 253{
76a50749
UD
254#ifndef SHARED
255 /* Unlike in the dynamically linked case the dynamic linker has not
256 taken care of initializing the TLS data structures. */
257 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
85631c8e
UD
258
259 /* We must prevent gcc from being clever and move any of the
260 following code ahead of the __libc_setup_tls call. This function
261 will initialize the thread register which is subsequently
262 used. */
263 __asm __volatile ("");
76a50749
UD
264#endif
265
266 /* Minimal initialization of the thread descriptor. */
75cddafe 267 struct pthread *pd = THREAD_SELF;
6aca81bb 268 INTERNAL_SYSCALL_DECL (err);
db54f488 269 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
76a50749
UD
270 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
271 THREAD_SETMEM (pd, user_stack, true);
272 if (LLL_LOCK_INITIALIZER != 0)
273 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
274#if HP_TIMING_AVAIL
275 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
276#endif
277
0f6699ea
UD
278 /* Initialize the robust mutex data. */
279#ifdef __PTHREAD_MUTEX_HAVE_PREV
280 pd->robust_prev = &pd->robust_head;
281#endif
282 pd->robust_head.list = &pd->robust_head;
283#ifdef __NR_set_robust_list
284 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
285 - offsetof (pthread_mutex_t,
286 __data.__list.__next));
287 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
288 sizeof (struct robust_list_head));
289 if (INTERNAL_SYSCALL_ERROR_P (res, err))
290#endif
291 set_robust_list_not_avail ();
292
e59660bc 293#ifndef __ASSUME_PRIVATE_FUTEX
5a8075b1
UD
294 /* Private futexes are always used (at least internally) so that
295 doing the test once this early is beneficial. */
296 {
e807818b 297 int word = 0;
e59660bc 298 word = INTERNAL_SYSCALL (futex, err, 3, &word,
5a8075b1 299 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
e59660bc
UD
300 if (!INTERNAL_SYSCALL_ERROR_P (word, err))
301 THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
5a8075b1 302 }
5a8075b1 303
3c612057
UD
304 /* Private futexes have been introduced earlier than the
305 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
306 know the former are not supported. This also means we know the
307 kernel will return ENOSYS for unknown operations. */
308 if (THREAD_GETMEM (pd, header.private_futex) != 0)
309#endif
cbd8aeb8 310#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
3c612057
UD
311 {
312 int word = 0;
313 /* NB: the syscall actually takes six parameters. The last is the
314 bit mask. But since we will not actually wait at all the value
315 is irrelevant. Given that passing six parameters is difficult
316 on some architectures we just pass whatever random value the
317 calling convention calls for to the kernel. It causes no harm. */
318 word = INTERNAL_SYSCALL (futex, err, 5, &word,
319 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
320 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
321 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
322 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
323 __set_futex_clock_realtime ();
324 }
cbd8aeb8
UD
325#endif
326
675620f7
UD
327 /* Set initial thread's stack block from 0 up to __libc_stack_end.
328 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
329 purposes this is good enough. */
330 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
331
fde89ad0 332 /* Initialize the list of all running threads with the main thread. */
a4548cea 333 INIT_LIST_HEAD (&__stack_user);
d4f64e1a 334 list_add (&pd->list, &__stack_user);
76a50749 335
0ecf9c10
RM
336 /* Before initializing __stack_user, the debugger could not find us and
337 had to set __nptl_initial_report_events. Propagate its setting. */
338 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
76a50749
UD
339
340 /* Install the cancellation signal handler. If for some reason we
341 cannot install the handler we do not abort. Maybe we should, but
342 it is only asynchronous cancellation which is affected. */
75cddafe 343 struct sigaction sa;
a1ed6b4c
UD
344 sa.sa_sigaction = sigcancel_handler;
345 sa.sa_flags = SA_SIGINFO;
a71c152c 346 __sigemptyset (&sa.sa_mask);
76a50749
UD
347
348 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
349
2edb61e3
UD
350 /* Install the handle to change the threads' uid/gid. */
351 sa.sa_sigaction = sighandler_setxid;
352 sa.sa_flags = SA_SIGINFO | SA_RESTART;
353
354 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
355
708bfb9a 356 /* The parent process might have left the signals blocked. Just in
f006d3a0
UD
357 case, unblock it. We reuse the signal mask in the sigaction
358 structure. It is already cleared. */
359 __sigaddset (&sa.sa_mask, SIGCANCEL);
708bfb9a 360 __sigaddset (&sa.sa_mask, SIGSETXID);
f006d3a0
UD
361 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
362 NULL, _NSIG / 8);
363
b399707e
RM
364 /* Get the size of the static and alignment requirements for the TLS
365 block. */
366 size_t static_tls_align;
367 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
368
369 /* Make sure the size takes all the alignments into account. */
370 if (STACK_ALIGN > static_tls_align)
371 static_tls_align = STACK_ALIGN;
372 __static_tls_align_m1 = static_tls_align - 1;
373
374 __static_tls_size = roundup (__static_tls_size, static_tls_align);
76a50749
UD
375
376 /* Determine the default allowed stack size. This is the size used
377 in case the user does not specify one. */
75cddafe 378 struct rlimit limit;
76a50749
UD
379 if (getrlimit (RLIMIT_STACK, &limit) != 0
380 || limit.rlim_cur == RLIM_INFINITY)
381 /* The system limit is not usable. Use an architecture-specific
382 default. */
fe60d146 383 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
d9cabb2f
RM
384 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
385 /* The system limit is unusably small.
386 Use the minimal size acceptable. */
fe60d146
RM
387 limit.rlim_cur = PTHREAD_STACK_MIN;
388
389 /* Make sure it meets the minimum size that allocate_stack
390 (allocatestack.c) will demand, which depends on the page size. */
391 const uintptr_t pagesz = __sysconf (_SC_PAGESIZE);
b399707e 392 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
fe60d146
RM
393 if (limit.rlim_cur < minstack)
394 limit.rlim_cur = minstack;
395
396 /* Round the resource limit up to page size. */
397 limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
398 __default_stacksize = limit.rlim_cur;
76a50749 399
b1531183
UD
400#ifdef SHARED
401 /* Transfer the old value from the dynamic linker's internal location. */
402 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
403 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
334fcf2a
UD
404
405 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
406 keep the lock count from the ld.so implementation. */
407 GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
408 GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
409 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
410 GL(dl_load_lock).mutex.__data.__count = 0;
411 while (rtld_lock_count-- > 0)
412 INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);
54ee14b3
UD
413
414 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
b1531183
UD
415#endif
416
adc12574
UD
417 GL(dl_init_static_tls) = &__pthread_init_static_tls;
418
7adefea8
UD
419 GL(dl_wait_lookup_done) = &__wait_lookup_done;
420
76a50749 421 /* Register the fork generation counter with the libc. */
5a03acfe
UD
422#ifndef TLS_MULTIPLE_THREADS_IN_TCB
423 __libc_multiple_threads_ptr =
424#endif
425 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
426 ptr_pthread_functions);
2c0b891a
UD
427
428 /* Determine whether the machine is SMP or not. */
429 __is_smp = is_smp_system ();
76a50749 430}
2ae920ed
UD
431strong_alias (__pthread_initialize_minimal_internal,
432 __pthread_initialize_minimal)
This page took 0.221631 seconds and 5 git commands to generate.