]> sourceware.org Git - glibc.git/blob - nptl/init.c
Update.
[glibc.git] / nptl / init.c
1 /* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <assert.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <fork.h>
32 #include <version.h>
33 #include <shlib-compat.h>
34
35
36 #ifndef __NR_set_tid_address
37 /* XXX For the time being... Once we can rely on the kernel headers
38 having the definition remove these lines. */
39 #if defined __s390__
40 # define __NR_set_tid_address 252
41 #elif defined __ia64__
42 # define __NR_set_tid_address 1233
43 #elif defined __i386__
44 # define __NR_set_tid_address 258
45 #elif defined __x86_64__
46 # define __NR_set_tid_address 218
47 #elif defined __powerpc__
48 # define __NR_set_tid_address 232
49 #elif defined __sparc__
50 # define __NR_set_tid_address 166
51 #else
52 # error "define __NR_set_tid_address"
53 #endif
54 #endif
55
56
57 /* Default stack size. */
58 size_t __default_stacksize attribute_hidden;
59
60 /* Size and alignment of static TLS block. */
61 size_t __static_tls_size;
62 size_t __static_tls_align_m1;
63
64 /* Version of the library, used in libthread_db to detect mismatches. */
65 static const char nptl_version[] = VERSION;
66
67
68 #if defined USE_TLS && !defined SHARED
69 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
70 #endif
71
72
73 #ifdef SHARED
74 static struct pthread_functions pthread_functions =
75 {
76 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
77 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
78 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
79 # endif
80 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
81 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
82 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
83 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
84 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
85 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
86 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
87 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
88 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
89 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
90 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
91 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
92 .ptr_pthread_condattr_init = __pthread_condattr_init,
93 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
94 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
95 .ptr___pthread_cond_init = __pthread_cond_init,
96 .ptr___pthread_cond_signal = __pthread_cond_signal,
97 .ptr___pthread_cond_wait = __pthread_cond_wait,
98 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
99 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
100 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
101 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
102 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
103 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
104 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
105 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
106 # endif
107 .ptr_pthread_equal = __pthread_equal,
108 .ptr___pthread_exit = __pthread_exit,
109 .ptr_pthread_getschedparam = __pthread_getschedparam,
110 .ptr_pthread_setschedparam = __pthread_setschedparam,
111 .ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
112 .ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
113 .ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
114 .ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
115 .ptr_pthread_self = __pthread_self,
116 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
117 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
118 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
119 .ptr___pthread_once = __pthread_once_internal,
120 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock_internal,
121 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock_internal,
122 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock_internal,
123 .ptr___pthread_key_create = __pthread_key_create_internal,
124 .ptr___pthread_getspecific = __pthread_getspecific_internal,
125 .ptr___pthread_setspecific = __pthread_setspecific_internal,
126 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
127 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
128 .ptr_nthreads = &__nptl_nthreads,
129 .ptr___pthread_unwind = &__pthread_unwind
130 };
131 # define ptr_pthread_functions &pthread_functions
132 #else
133 # define ptr_pthread_functions NULL
134 #endif
135
136
137 /* For asynchronous cancellation we use a signal. This is the handler. */
138 static void
139 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
140 {
141 /* Safety check. It would be possible to call this function for
142 other signals and send a signal from another thread. This is not
143 correct and might even be a security problem. Try to catch as
144 many incorrect invocations as possible. */
145 if (sig != SIGCANCEL
146 #ifdef __ASSUME_CORRECT_SI_PID
147 /* Kernels before 2.5.75 stored the thread ID and not the process
148 ID in si_pid so we skip this test. */
149 || si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
150 #endif
151 || si->si_code != SI_TKILL)
152 return;
153
154 struct pthread *self = THREAD_SELF;
155
156 int oldval = THREAD_GETMEM (self, cancelhandling);
157 while (1)
158 {
159 /* We are canceled now. When canceled by another thread this flag
160 is already set but if the signal is directly send (internally or
161 from another process) is has to be done here. */
162 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
163
164 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
165 /* Already canceled or exiting. */
166 break;
167
168 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
169 oldval);
170 if (curval == oldval)
171 {
172 /* Set the return value. */
173 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
174
175 /* Make sure asynchronous cancellation is still enabled. */
176 if ((newval & CANCELTYPE_BITMASK) != 0)
177 /* Run the registered destructors and terminate the thread. */
178 __do_cancel ();
179
180 break;
181 }
182
183 oldval = curval;
184 }
185 }
186
187
188 /* When using __thread for this, we do it in libc so as not
189 to give libpthread its own TLS segment just for this. */
190 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
191
192
193 void
194 __pthread_initialize_minimal_internal (void)
195 {
196 #ifndef SHARED
197 /* Unlike in the dynamically linked case the dynamic linker has not
198 taken care of initializing the TLS data structures. */
199 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
200
201 /* We must prevent gcc from being clever and move any of the
202 following code ahead of the __libc_setup_tls call. This function
203 will initialize the thread register which is subsequently
204 used. */
205 __asm __volatile ("");
206 #endif
207
208 /* Minimal initialization of the thread descriptor. */
209 struct pthread *pd = THREAD_SELF;
210 INTERNAL_SYSCALL_DECL (err);
211 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
212 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
213 THREAD_SETMEM (pd, user_stack, true);
214 if (LLL_LOCK_INITIALIZER != 0)
215 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
216 #if HP_TIMING_AVAIL
217 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
218 #endif
219
220 /* Initialize the list of all running threads with the main thread. */
221 INIT_LIST_HEAD (&__stack_user);
222 list_add (&pd->list, &__stack_user);
223
224
225 /* Install the cancellation signal handler. If for some reason we
226 cannot install the handler we do not abort. Maybe we should, but
227 it is only asynchronous cancellation which is affected. */
228 struct sigaction sa;
229 sa.sa_sigaction = sigcancel_handler;
230 sa.sa_flags = SA_SIGINFO;
231 sigemptyset (&sa.sa_mask);
232
233 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
234
235 /* The parent process might have left the signal blocked. Just in
236 case, unblock it. We reuse the signal mask in the sigaction
237 structure. It is already cleared. */
238 __sigaddset (&sa.sa_mask, SIGCANCEL);
239 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
240 NULL, _NSIG / 8);
241
242
243 /* Determine the default allowed stack size. This is the size used
244 in case the user does not specify one. */
245 struct rlimit limit;
246 if (getrlimit (RLIMIT_STACK, &limit) != 0
247 || limit.rlim_cur == RLIM_INFINITY)
248 /* The system limit is not usable. Use an architecture-specific
249 default. */
250 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
251
252 #ifdef NEED_SEPARATE_REGISTER_STACK
253 __default_stacksize = MAX (limit.rlim_cur / 2, PTHREAD_STACK_MIN);
254 #else
255 __default_stacksize = MAX (limit.rlim_cur, PTHREAD_STACK_MIN);
256 #endif
257 /* The maximum page size better should be a multiple of the page
258 size. */
259 assert (__default_stacksize % __sysconf (_SC_PAGESIZE) == 0);
260
261 /* Get the size of the static and alignment requirements for the TLS
262 block. */
263 size_t static_tls_align;
264 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
265
266 /* Make sure the size takes all the alignments into account. */
267 if (STACK_ALIGN > static_tls_align)
268 static_tls_align = STACK_ALIGN;
269 __static_tls_align_m1 = static_tls_align - 1;
270
271 __static_tls_size = roundup (__static_tls_size, static_tls_align);
272
273 #ifdef SHARED
274 /* Transfer the old value from the dynamic linker's internal location. */
275 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
276 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
277
278 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
279 keep the lock count from the ld.so implementation. */
280 GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
281 GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
282 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
283 GL(dl_load_lock).mutex.__data.__count = 0;
284 while (rtld_lock_count-- > 0)
285 INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);
286
287 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
288 #endif
289
290 GL(dl_init_static_tls) = &__pthread_init_static_tls;
291
292 /* Register the fork generation counter with the libc. */
293 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
294 __libc_multiple_threads_ptr =
295 #endif
296 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
297 ptr_pthread_functions);
298 }
299 strong_alias (__pthread_initialize_minimal_internal,
300 __pthread_initialize_minimal)
This page took 0.049175 seconds and 5 git commands to generate.