]> sourceware.org Git - glibc.git/blame - sysdeps/generic/dl-tls.c
Update.
[glibc.git] / sysdeps / generic / dl-tls.c
CommitLineData
3fb55878
UD
1/* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <assert.h>
aed283dd 21#include <signal.h>
a52d1562 22#include <stdlib.h>
aed283dd
UD
23#include <unistd.h>
24#include <sys/param.h>
3fb55878 25
d555194c 26#include <tls.h>
3fb55878
UD
27
28/* We don't need any of this if TLS is not supported. */
29#ifdef USE_TLS
30
fc093be1
UD
31# include <dl-tls.h>
32# include <ldsodefs.h>
d4468ab7 33
3fb55878 34/* Value used for dtv entries for which the allocation is delayed. */
aed283dd
UD
35# define TLS_DTV_UNALLOCATED ((void *) -1l)
36
37
38/* Out-of-memory handler. */
fc093be1 39# ifdef SHARED
aed283dd
UD
40static void
41__attribute__ ((__noreturn__))
42oom (void)
43{
38a7d8ba 44 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
aed283dd 45}
fc093be1 46# endif
aed283dd 47
3fb55878
UD
48
49
50size_t
51internal_function
52_dl_next_tls_modid (void)
53{
54 size_t result;
55
56 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
57 {
aed283dd
UD
58 size_t disp = 0;
59 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
60
61 /* Note that this branch will never be executed during program
62 start since there are no gaps at that time. Therefore it
63 does not matter that the dl_tls_dtv_slotinfo is not allocated
64 yet when the function is called for the first times. */
fc093be1
UD
65 result = GL(dl_tls_static_nelem) + 1;
66 /* If the following would not be true we mustn't have assumed
67 there is a gap. */
68 assert (result <= GL(dl_tls_max_dtv_idx));
3fb55878
UD
69 do
70 {
aed283dd 71 while (result - disp < runp->len)
fc093be1
UD
72 {
73 if (runp->slotinfo[result - disp].map == NULL)
74 break;
3fb55878 75
fc093be1
UD
76 ++result;
77 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
78 }
aed283dd
UD
79
80 if (result - disp < runp->len)
3fb55878 81 break;
aed283dd
UD
82
83 disp += runp->len;
84 }
85 while ((runp = runp->next) != NULL);
86
fc093be1 87 if (result >= GL(dl_tls_max_dtv_idx))
aed283dd
UD
88 {
89 /* The new index must indeed be exactly one higher than the
90 previous high. */
fc093be1 91 assert (result == GL(dl_tls_max_dtv_idx));
aed283dd
UD
92
93 /* There is no gap anymore. */
94 GL(dl_tls_dtv_gaps) = false;
95
96 goto nogaps;
97 }
3fb55878
UD
98 }
99 else
aed283dd
UD
100 {
101 /* No gaps, allocate a new entry. */
102 nogaps:
103 result = ++GL(dl_tls_max_dtv_idx);
104 }
3fb55878
UD
105
106 return result;
107}
108
109
110void
111internal_function
aed283dd 112_dl_determine_tlsoffset (void)
3fb55878 113{
aed283dd
UD
114 struct dtv_slotinfo *slotinfo;
115 size_t max_align = __alignof__ (void *);
3fb55878 116 size_t offset;
aed283dd 117 size_t cnt;
3fb55878 118
aed283dd
UD
119 /* The first element of the dtv slot info list is allocated. */
120 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
121 /* There is at this point only one element in the
122 dl_tls_dtv_slotinfo_list list. */
123 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
a52d1562 124
3fb55878
UD
125# if TLS_TCB_AT_TP
126 /* We simply start with zero. */
127 offset = 0;
128
aed283dd
UD
129 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
130 for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
3fb55878 131 {
aed283dd
UD
132 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
133
134 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
3fb55878
UD
135
136 /* Compute the offset of the next TLS block. */
aed283dd
UD
137 offset = roundup (offset + slotinfo[cnt].map->l_tls_blocksize,
138 slotinfo[cnt].map->l_tls_align);
3fb55878
UD
139
140 /* XXX For some architectures we perhaps should store the
141 negative offset. */
aed283dd 142 slotinfo[cnt].map->l_tls_offset = offset;
3fb55878 143 }
8d4b5a8a
UD
144
145 /* The thread descriptor (pointed to by the thread pointer) has its
146 own alignment requirement. Adjust the static TLS size
cd30b01e 147 and TLS offsets appropriately. */
b123d06e
UD
148 // XXX How to deal with this. We cannot simply add zero bytes
149 // XXX after the first (closest to the TCB) TLS block since this
150 // XXX would invalidate the offsets the linker creates for the LE
151 // XXX model.
cd30b01e
UD
152
153 GL(dl_tls_static_size) = offset + TLS_TCB_SIZE;
3fb55878 154# elif TLS_DTV_AT_TP
aed283dd 155 /* The TLS blocks start right after the TCB. */
3fb55878 156 offset = TLS_TCB_SIZE;
3fb55878 157
aed283dd
UD
158 /* The first block starts right after the TCB. */
159 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
160 if (slotinfo[1].map != NULL)
3fb55878 161 {
3632a260 162 size_t prev_size;
3fb55878 163
aed283dd
UD
164 offset = roundup (offset, slotinfo[1].map->l_tls_align);
165 slotinfo[1].map->l_tls_offset = offset;
166 max_align = slotinfo[1].map->l_tls_align;
167 prev_size = slotinfo[1].map->l_tls_blocksize;
3fb55878 168
aed283dd
UD
169 for (cnt = 2; slotinfo[cnt].map != NULL; ++cnt)
170 {
171 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
172
173 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
174
175 /* Compute the offset of the next TLS block. */
176 offset = roundup (offset + prev_size,
177 slotinfo[cnt].map->l_tls_align);
178
179 /* XXX For some architectures we perhaps should store the
180 negative offset. */
181 slotinfo[cnt].map->l_tls_offset = offset;
182
183 prev_size = slotinfo[cnt].map->l_tls_blocksize;
184 }
cd30b01e 185
aed283dd 186 offset += prev_size;
3fb55878 187 }
cd30b01e 188
aed283dd 189 GL(dl_tls_static_size) = offset;
3fb55878
UD
190# else
191# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
192# endif
cd30b01e
UD
193
194 /* The alignment requirement for the static TLS block. */
195 GL(dl_tls_static_align) = MAX (TLS_TCB_ALIGN, max_align);
3fb55878
UD
196}
197
198
a52d1562
UD
199void *
200internal_function
201_dl_allocate_tls (void)
202{
203 void *result;
204 dtv_t *dtv;
aed283dd 205 size_t dtv_length;
a52d1562
UD
206
207 /* Allocate a correctly aligned chunk of memory. */
208 /* XXX For now */
209 assert (GL(dl_tls_static_align) <= GL(dl_pagesize));
aed283dd
UD
210# ifdef MAP_ANON
211# define _dl_zerofd (-1)
212# else
213# define _dl_zerofd GL(dl_zerofd)
a52d1562
UD
214 if ((dl_zerofd) == -1)
215 GL(dl_zerofd) = _dl_sysdep_open_zero_fill ();
aed283dd
UD
216# define MAP_ANON 0
217# endif
8a30f00f 218 result = __mmap (0, GL(dl_tls_static_size) ?: 1, PROT_READ|PROT_WRITE,
a52d1562
UD
219 MAP_ANON|MAP_PRIVATE, _dl_zerofd, 0);
220
aed283dd
UD
221 /* We allocate a few more elements in the dtv than are needed for the
222 initial set of modules. This should avoid in most cases expansions
223 of the dtv. */
224 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
225 dtv = (dtv_t *) malloc ((dtv_length + 2) * sizeof (dtv_t));
a52d1562
UD
226 if (result != MAP_FAILED && dtv != NULL)
227 {
aed283dd
UD
228 struct dtv_slotinfo_list *listp;
229 bool first_block = true;
230 size_t total = 0;
a52d1562
UD
231
232# if TLS_TCB_AT_TP
233 /* The TCB follows the TLS blocks. */
234 result = (char *) result + GL(dl_tls_static_size) - TLS_TCB_SIZE;
235# endif
236
aed283dd
UD
237 /* This is the initial length of the dtv. */
238 dtv[0].counter = dtv_length;
239 /* Fill in the generation number. */
240 dtv[1].counter = GL(dl_tls_generation) = 0;
241 /* Initialize all of the rest of the dtv with zero to indicate
242 nothing there. */
243 memset (dtv + 2, '\0', dtv_length * sizeof (dtv_t));
244
245 /* We have to look prepare the dtv for all currently loaded
246 modules using TLS. For those which are dynamically loaded we
247 add the values indicating deferred allocation. */
248 listp = GL(dl_tls_dtv_slotinfo_list);
249 while (1)
a52d1562 250 {
aed283dd
UD
251 size_t cnt;
252
253 for (cnt = first_block ? 1 : 0; cnt < listp->len; ++cnt)
a52d1562 254 {
aed283dd
UD
255 struct link_map *map;
256 void *dest;
257
258 /* Check for the total number of used slots. */
259 if (total + cnt >= GL(dl_tls_max_dtv_idx))
260 break;
261
262 map = listp->slotinfo[cnt].map;
263 if (map == NULL)
264 /* Unused entry. */
265 continue;
266
267 if (map->l_type == lt_loaded)
268 {
269 /* For dynamically loaded modules we simply store
270 the value indicating deferred allocation. */
271 dtv[1 + map->l_tls_modid].pointer = TLS_DTV_UNALLOCATED;
272 continue;
273 }
274
275 assert (map->l_tls_modid == cnt);
276 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
a52d1562 277# if TLS_TCB_AT_TP
aed283dd
UD
278 assert (map->l_tls_offset >= map->l_tls_blocksize);
279 dest = (char *) result - map->l_tls_offset;
a52d1562 280# elif TLS_DTV_AT_TP
aed283dd 281 dest = (char *) result + map->l_tls_offset;
a52d1562
UD
282# else
283# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
284# endif
285
aed283dd
UD
286 /* We don't have to clear the BSS part of the TLS block
287 since mmap is used to allocate the memory which
288 guarantees it is initialized to zero. */
289 dtv[1 + cnt].pointer = memcpy (dest, map->l_tls_initimage,
290 map->l_tls_initimage_size);
a52d1562 291 }
aed283dd
UD
292
293 total += cnt;
294 if (total >= GL(dl_tls_max_dtv_idx))
295 break;
296
297 listp = listp->next;
298 assert (listp != NULL);
a52d1562
UD
299 }
300
301 /* Add the dtv to the thread data structures. */
302 INSTALL_DTV (result, dtv);
303 }
304 else if (result != NULL)
305 {
306 free (result);
307 result = NULL;
308 }
309
310 return result;
311}
e4138261
UD
312INTDEF(_dl_allocate_tls)
313
314
315void
316internal_function
317_dl_deallocate_tls (void *tcb)
318{
319 dtv_t *dtv = GET_DTV (tcb);
320
321 /* The array starts with dtv[-1]. */
322 free (dtv - 1);
323
324 munmap (tcb, GL(dl_tls_static_size));
325}
326
a52d1562
UD
327
328
aed283dd 329# ifdef SHARED
3fb55878
UD
330/* The __tls_get_addr function has two basic forms which differ in the
331 arguments. The IA-64 form takes two parameters, the module ID and
332 offset. The form used, among others, on IA-32 takes a reference to
333 a special structure which contain the same information. The second
334 form seems to be more often used (in the moment) so we default to
335 it. Users of the IA-64 form have to provide adequate definitions
336 of the following macros. */
aed283dd
UD
337# ifndef GET_ADDR_ARGS
338# define GET_ADDR_ARGS tls_index *ti
339# endif
340# ifndef GET_ADDR_MODULE
341# define GET_ADDR_MODULE ti->ti_module
342# endif
343# ifndef GET_ADDR_OFFSET
344# define GET_ADDR_OFFSET ti->ti_offset
345# endif
346/* Systems which do not have tls_index also probably have to define
347 DONT_USE_TLS_INDEX. */
348
349# ifndef __TLS_GET_ADDR
350# define __TLS_GET_ADDR __tls_get_addr
351# endif
352
353
354/* Return the symbol address given the map of the module it is in and
355 the symbol record. This is used in dl-sym.c. */
356void *
357internal_function
358_dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
359{
360# ifndef DONT_USE_TLS_INDEX
361 tls_index tmp =
362 {
363 .ti_module = map->l_tls_modid,
364 .ti_offset = ref->st_value
365 };
366
367 return __TLS_GET_ADDR (&tmp);
368# else
369 return __TLS_GET_ADDR (map->l_tls_modid, ref->st_value);
370# endif
371}
372
373
374static void *
375allocate_and_init (struct link_map *map)
376{
377 void *newp;
378
379 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
380 if (newp == NULL)
381 oom ();
3fb55878 382
aed283dd
UD
383 /* Initialize the memory. */
384 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
385 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
3fb55878 386
aed283dd
UD
387 return newp;
388}
389
390
391/* The generic dynamic and local dynamic model cannot be used in
392 statically linked applications. */
3fb55878
UD
393void *
394__tls_get_addr (GET_ADDR_ARGS)
395{
396 dtv_t *dtv = THREAD_DTV ();
aed283dd
UD
397 struct link_map *the_map = NULL;
398 void *p;
399
400 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
401 {
402 struct dtv_slotinfo_list *listp;
403 size_t idx;
404
405 /* The global dl_tls_dtv_slotinfo array contains for each module
406 index the generation counter current when the entry was
407 created. This array never shrinks so that all module indices
408 which were valid at some time can be used to access it.
409 Before the first use of a new module index in this function
410 the array was extended appropriately. Access also does not
411 have to be guarded against modifications of the array. It is
412 assumed that pointer-size values can be read atomically even
413 in SMP environments. It is possible that other threads at
414 the same time dynamically load code and therefore add to the
415 slotinfo list. This is a problem since we must not pick up
416 any information about incomplete work. The solution to this
417 is to ignore all dtv slots which were created after the one
418 we are currently interested. We know that dynamic loading
419 for this module is completed and this is the last load
420 operation we know finished. */
421 idx = GET_ADDR_MODULE;
422 listp = GL(dl_tls_dtv_slotinfo_list);
423 while (idx >= listp->len)
424 {
425 idx -= listp->len;
426 listp = listp->next;
427 }
3fb55878 428
aed283dd
UD
429 if (dtv[0].counter < listp->slotinfo[idx].gen)
430 {
431 /* The generation counter for the slot is higher than what
432 the current dtv implements. We have to update the whole
433 dtv but only those entries with a generation counter <=
434 the one for the entry we need. */
435 size_t new_gen = listp->slotinfo[idx].gen;
436 size_t total = 0;
437
438 /* We have to look through the entire dtv slotinfo list. */
439 listp = GL(dl_tls_dtv_slotinfo_list);
440 do
441 {
442 size_t cnt;
443
444 for (cnt = total = 0 ? 1 : 0; cnt < listp->len; ++cnt)
445 {
446 size_t gen = listp->slotinfo[cnt].gen;
447 struct link_map *map;
448 size_t modid;
449
450 if (gen > new_gen)
451 /* This is a slot for a generation younger than
452 the one we are handling now. It might be
453 incompletely set up so ignore it. */
454 continue;
455
456 /* If the entry is older than the current dtv layout
457 we know we don't have to handle it. */
458 if (gen <= dtv[0].counter)
459 continue;
460
461 /* If there is no map this means the entry is empty. */
462 map = listp->slotinfo[cnt].map;
463 if (map == NULL)
464 {
465 /* If this modid was used at some point the memory
466 might still be allocated. */
467 if (dtv[total + cnt].pointer != TLS_DTV_UNALLOCATED)
468 free (dtv[total + cnt].pointer);
469
470 continue;
471 }
472
473 /* Check whether the current dtv array is large enough. */
474 modid = map->l_tls_modid;
475 assert (total + cnt == modid);
476 if (dtv[-1].counter < modid)
477 {
478 /* Reallocate the dtv. */
479 dtv_t *newp;
480 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
481 size_t oldsize = dtv[-1].counter;
482
483 assert (map->l_tls_modid <= newsize);
484
485 newp = (dtv_t *) realloc (&dtv[-1],
486 (2 + newsize)
487 * sizeof (dtv_t));
488 if (newp == NULL)
489 oom ();
490
491 newp[0].counter = newsize;
492
493 /* Clear the newly allocate part. */
494 memset (newp + 2 + oldsize, '\0',
495 (newsize - oldsize) * sizeof (dtv_t));
496
497 /* Point dtv to the generation counter. */
498 dtv = &newp[1];
499
500 /* Install this new dtv in the thread data
501 structures. */
502 INSTALL_NEW_DTV (dtv);
503 }
504
505 /* If there is currently memory allocate for this
506 dtv entry free it. */
507 /* XXX Ideally we will at some point create a memory
508 pool. */
509 if (dtv[modid].pointer != TLS_DTV_UNALLOCATED)
510 /* Note that free is called for NULL is well. We
511 deallocate even if it is this dtv entry we are
512 supposed to load. The reason is that we call
513 memalign and not malloc. */
514 free (dtv[modid].pointer);
515
516 /* This module is loaded dynamically- We defer
517 memory allocation. */
518 dtv[modid].pointer = TLS_DTV_UNALLOCATED;
519
520 if (modid == GET_ADDR_MODULE)
521 the_map = map;
522 }
523
524 total += listp->len;
525 }
526 while ((listp = listp->next) != NULL);
3fb55878 527
aed283dd
UD
528 /* This will be the new maximum generation counter. */
529 dtv[0].counter = new_gen;
530 }
531 }
532
533 p = dtv[GET_ADDR_MODULE].pointer;
534
535 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
536 {
537 /* The allocation was deferred. Do it now. */
538 if (the_map == NULL)
539 {
540 /* Find the link map for this module. */
541 size_t idx = GET_ADDR_MODULE;
542 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
543
544 while (idx >= listp->len)
545 {
546 idx -= listp->len;
547 listp = listp->next;
548 }
549
550 the_map = listp->slotinfo[idx].map;
551 }
552
553 p = dtv[GET_ADDR_MODULE].pointer = allocate_and_init (the_map);
554 }
555
556 return (char *) p + GET_ADDR_OFFSET;
3fb55878 557}
aed283dd 558# endif
3fb55878
UD
559
560#endif /* use TLS */
This page took 0.115868 seconds and 5 git commands to generate.