]> sourceware.org Git - glibc.git/blame - sysdeps/generic/dl-tls.c
Update.
[glibc.git] / sysdeps / generic / dl-tls.c
CommitLineData
3fb55878
UD
1/* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <assert.h>
aed283dd 21#include <signal.h>
a52d1562 22#include <stdlib.h>
aed283dd
UD
23#include <unistd.h>
24#include <sys/param.h>
3fb55878 25
d555194c 26#include <tls.h>
3fb55878
UD
27
28/* We don't need any of this if TLS is not supported. */
29#ifdef USE_TLS
30
fc093be1
UD
31# include <dl-tls.h>
32# include <ldsodefs.h>
d4468ab7 33
3fb55878 34/* Value used for dtv entries for which the allocation is delayed. */
aed283dd
UD
35# define TLS_DTV_UNALLOCATED ((void *) -1l)
36
37
38/* Out-of-memory handler. */
fc093be1 39# ifdef SHARED
aed283dd
UD
40static void
41__attribute__ ((__noreturn__))
42oom (void)
43{
38a7d8ba 44 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
aed283dd 45}
fc093be1 46# endif
aed283dd 47
3fb55878
UD
48
49
50size_t
51internal_function
52_dl_next_tls_modid (void)
53{
54 size_t result;
55
56 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
57 {
aed283dd
UD
58 size_t disp = 0;
59 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
60
61 /* Note that this branch will never be executed during program
62 start since there are no gaps at that time. Therefore it
63 does not matter that the dl_tls_dtv_slotinfo is not allocated
64 yet when the function is called for the first times. */
fc093be1
UD
65 result = GL(dl_tls_static_nelem) + 1;
66 /* If the following would not be true we mustn't have assumed
67 there is a gap. */
68 assert (result <= GL(dl_tls_max_dtv_idx));
3fb55878
UD
69 do
70 {
aed283dd 71 while (result - disp < runp->len)
fc093be1
UD
72 {
73 if (runp->slotinfo[result - disp].map == NULL)
74 break;
3fb55878 75
fc093be1
UD
76 ++result;
77 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
78 }
aed283dd
UD
79
80 if (result - disp < runp->len)
3fb55878 81 break;
aed283dd
UD
82
83 disp += runp->len;
84 }
85 while ((runp = runp->next) != NULL);
86
fc093be1 87 if (result >= GL(dl_tls_max_dtv_idx))
aed283dd
UD
88 {
89 /* The new index must indeed be exactly one higher than the
90 previous high. */
fc093be1 91 assert (result == GL(dl_tls_max_dtv_idx));
aed283dd
UD
92
93 /* There is no gap anymore. */
94 GL(dl_tls_dtv_gaps) = false;
95
96 goto nogaps;
97 }
3fb55878
UD
98 }
99 else
aed283dd
UD
100 {
101 /* No gaps, allocate a new entry. */
102 nogaps:
103 result = ++GL(dl_tls_max_dtv_idx);
104 }
3fb55878
UD
105
106 return result;
107}
108
109
110void
111internal_function
aed283dd 112_dl_determine_tlsoffset (void)
3fb55878 113{
aed283dd
UD
114 struct dtv_slotinfo *slotinfo;
115 size_t max_align = __alignof__ (void *);
3fb55878 116 size_t offset;
aed283dd 117 size_t cnt;
3fb55878 118
aed283dd
UD
119 /* The first element of the dtv slot info list is allocated. */
120 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
121 /* There is at this point only one element in the
122 dl_tls_dtv_slotinfo_list list. */
123 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
a52d1562 124
3fb55878
UD
125# if TLS_TCB_AT_TP
126 /* We simply start with zero. */
127 offset = 0;
128
aed283dd
UD
129 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
130 for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
3fb55878 131 {
aed283dd
UD
132 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
133
134 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
3fb55878
UD
135
136 /* Compute the offset of the next TLS block. */
aed283dd
UD
137 offset = roundup (offset + slotinfo[cnt].map->l_tls_blocksize,
138 slotinfo[cnt].map->l_tls_align);
3fb55878
UD
139
140 /* XXX For some architectures we perhaps should store the
141 negative offset. */
aed283dd 142 slotinfo[cnt].map->l_tls_offset = offset;
3fb55878 143 }
8d4b5a8a
UD
144
145 /* The thread descriptor (pointed to by the thread pointer) has its
146 own alignment requirement. Adjust the static TLS size
cd30b01e 147 and TLS offsets appropriately. */
b123d06e
UD
148 // XXX How to deal with this. We cannot simply add zero bytes
149 // XXX after the first (closest to the TCB) TLS block since this
150 // XXX would invalidate the offsets the linker creates for the LE
151 // XXX model.
cd30b01e
UD
152
153 GL(dl_tls_static_size) = offset + TLS_TCB_SIZE;
3fb55878 154# elif TLS_DTV_AT_TP
aed283dd 155 /* The TLS blocks start right after the TCB. */
3fb55878 156 offset = TLS_TCB_SIZE;
3fb55878 157
aed283dd
UD
158 /* The first block starts right after the TCB. */
159 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
160 if (slotinfo[1].map != NULL)
3fb55878 161 {
3632a260 162 size_t prev_size;
3fb55878 163
aed283dd
UD
164 offset = roundup (offset, slotinfo[1].map->l_tls_align);
165 slotinfo[1].map->l_tls_offset = offset;
166 max_align = slotinfo[1].map->l_tls_align;
167 prev_size = slotinfo[1].map->l_tls_blocksize;
3fb55878 168
aed283dd
UD
169 for (cnt = 2; slotinfo[cnt].map != NULL; ++cnt)
170 {
171 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
172
173 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
174
175 /* Compute the offset of the next TLS block. */
176 offset = roundup (offset + prev_size,
177 slotinfo[cnt].map->l_tls_align);
178
179 /* XXX For some architectures we perhaps should store the
180 negative offset. */
181 slotinfo[cnt].map->l_tls_offset = offset;
182
183 prev_size = slotinfo[cnt].map->l_tls_blocksize;
184 }
cd30b01e 185
aed283dd 186 offset += prev_size;
3fb55878 187 }
cd30b01e 188
aed283dd 189 GL(dl_tls_static_size) = offset;
3fb55878
UD
190# else
191# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
192# endif
cd30b01e
UD
193
194 /* The alignment requirement for the static TLS block. */
195 GL(dl_tls_static_align) = MAX (TLS_TCB_ALIGN, max_align);
3fb55878
UD
196}
197
198
a52d1562
UD
199void *
200internal_function
581dc54b 201_dl_allocate_tls_storage (void)
a52d1562
UD
202{
203 void *result;
204 dtv_t *dtv;
aed283dd 205 size_t dtv_length;
a52d1562
UD
206
207 /* Allocate a correctly aligned chunk of memory. */
a4dda453
RM
208 result = __libc_memalign (GL(dl_tls_static_align), GL(dl_tls_static_size));
209 if (__builtin_expect (result == NULL, 0))
210 return result;
a52d1562 211
aed283dd
UD
212 /* We allocate a few more elements in the dtv than are needed for the
213 initial set of modules. This should avoid in most cases expansions
214 of the dtv. */
215 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
216 dtv = (dtv_t *) malloc ((dtv_length + 2) * sizeof (dtv_t));
a4dda453 217 if (dtv != NULL)
a52d1562 218 {
a52d1562
UD
219# if TLS_TCB_AT_TP
220 /* The TCB follows the TLS blocks. */
221 result = (char *) result + GL(dl_tls_static_size) - TLS_TCB_SIZE;
222# endif
223
aed283dd
UD
224 /* This is the initial length of the dtv. */
225 dtv[0].counter = dtv_length;
226 /* Fill in the generation number. */
227 dtv[1].counter = GL(dl_tls_generation) = 0;
228 /* Initialize all of the rest of the dtv with zero to indicate
229 nothing there. */
230 memset (dtv + 2, '\0', dtv_length * sizeof (dtv_t));
231
581dc54b
RM
232 /* Add the dtv to the thread data structures. */
233 INSTALL_DTV (result, dtv);
234 }
a4dda453 235 else
581dc54b
RM
236 {
237 free (result);
238 result = NULL;
239 }
240
241 return result;
242}
b6836436 243
581dc54b
RM
244
245void *
246internal_function
247_dl_allocate_tls_init (void *result)
248{
249 dtv_t *dtv = GET_DTV (result);
250 struct dtv_slotinfo_list *listp;
581dc54b
RM
251 size_t total = 0;
252
b6836436
UD
253 if (result == NULL)
254 /* The memory allocation failed. */
255 return NULL;
256
581dc54b
RM
257 /* We have to look prepare the dtv for all currently loaded
258 modules using TLS. For those which are dynamically loaded we
259 add the values indicating deferred allocation. */
260 listp = GL(dl_tls_dtv_slotinfo_list);
261 while (1)
262 {
263 size_t cnt;
264
f7c1f4dd 265 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
a52d1562 266 {
581dc54b
RM
267 struct link_map *map;
268 void *dest;
aed283dd 269
581dc54b
RM
270 /* Check for the total number of used slots. */
271 if (total + cnt > GL(dl_tls_max_dtv_idx))
272 break;
aed283dd 273
581dc54b
RM
274 map = listp->slotinfo[cnt].map;
275 if (map == NULL)
276 /* Unused entry. */
277 continue;
278
279 if (map->l_type == lt_loaded)
280 {
281 /* For dynamically loaded modules we simply store
282 the value indicating deferred allocation. */
283 dtv[map->l_tls_modid].pointer = TLS_DTV_UNALLOCATED;
284 continue;
285 }
aed283dd 286
581dc54b
RM
287 assert (map->l_tls_modid == cnt);
288 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
a52d1562 289# if TLS_TCB_AT_TP
581dc54b
RM
290 assert (map->l_tls_offset >= map->l_tls_blocksize);
291 dest = (char *) result - map->l_tls_offset;
a52d1562 292# elif TLS_DTV_AT_TP
581dc54b 293 dest = (char *) result + map->l_tls_offset;
a52d1562
UD
294# else
295# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
296# endif
297
84bdcade
UD
298 /* Copy the initialization image and clear the BSS part. */
299 dtv[map->l_tls_modid].pointer = dest;
300 memset (__mempcpy (dest, map->l_tls_initimage,
301 map->l_tls_initimage_size), '\0',
302 map->l_tls_blocksize - map->l_tls_initimage_size);
a52d1562
UD
303 }
304
581dc54b 305 total += cnt;
f7c1f4dd 306 if (total >= GL(dl_tls_max_dtv_idx))
581dc54b
RM
307 break;
308
309 listp = listp->next;
310 assert (listp != NULL);
a52d1562
UD
311 }
312
313 return result;
314}
e4138261 315
581dc54b
RM
316void *
317internal_function
318_dl_allocate_tls (void)
319{
320 return _dl_allocate_tls_init (_dl_allocate_tls_storage ());
321}
b6836436
UD
322INTDEF(_dl_allocate_tls)
323
e4138261
UD
324
325void
326internal_function
327_dl_deallocate_tls (void *tcb)
328{
329 dtv_t *dtv = GET_DTV (tcb);
330
331 /* The array starts with dtv[-1]. */
332 free (dtv - 1);
333
a4dda453 334 free (tcb);
e4138261
UD
335}
336
a52d1562
UD
337
338
aed283dd 339# ifdef SHARED
3fb55878
UD
340/* The __tls_get_addr function has two basic forms which differ in the
341 arguments. The IA-64 form takes two parameters, the module ID and
342 offset. The form used, among others, on IA-32 takes a reference to
343 a special structure which contain the same information. The second
344 form seems to be more often used (in the moment) so we default to
345 it. Users of the IA-64 form have to provide adequate definitions
346 of the following macros. */
aed283dd
UD
347# ifndef GET_ADDR_ARGS
348# define GET_ADDR_ARGS tls_index *ti
349# endif
350# ifndef GET_ADDR_MODULE
351# define GET_ADDR_MODULE ti->ti_module
352# endif
353# ifndef GET_ADDR_OFFSET
354# define GET_ADDR_OFFSET ti->ti_offset
355# endif
356/* Systems which do not have tls_index also probably have to define
357 DONT_USE_TLS_INDEX. */
358
359# ifndef __TLS_GET_ADDR
360# define __TLS_GET_ADDR __tls_get_addr
361# endif
362
363
364/* Return the symbol address given the map of the module it is in and
365 the symbol record. This is used in dl-sym.c. */
366void *
367internal_function
368_dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
369{
370# ifndef DONT_USE_TLS_INDEX
371 tls_index tmp =
372 {
373 .ti_module = map->l_tls_modid,
374 .ti_offset = ref->st_value
375 };
376
377 return __TLS_GET_ADDR (&tmp);
378# else
379 return __TLS_GET_ADDR (map->l_tls_modid, ref->st_value);
380# endif
381}
382
383
384static void *
385allocate_and_init (struct link_map *map)
386{
387 void *newp;
388
389 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
390 if (newp == NULL)
391 oom ();
3fb55878 392
aed283dd
UD
393 /* Initialize the memory. */
394 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
395 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
3fb55878 396
aed283dd
UD
397 return newp;
398}
399
400
401/* The generic dynamic and local dynamic model cannot be used in
402 statically linked applications. */
3fb55878
UD
403void *
404__tls_get_addr (GET_ADDR_ARGS)
405{
406 dtv_t *dtv = THREAD_DTV ();
aed283dd
UD
407 struct link_map *the_map = NULL;
408 void *p;
409
410 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
411 {
412 struct dtv_slotinfo_list *listp;
413 size_t idx;
414
415 /* The global dl_tls_dtv_slotinfo array contains for each module
416 index the generation counter current when the entry was
417 created. This array never shrinks so that all module indices
418 which were valid at some time can be used to access it.
419 Before the first use of a new module index in this function
420 the array was extended appropriately. Access also does not
421 have to be guarded against modifications of the array. It is
422 assumed that pointer-size values can be read atomically even
423 in SMP environments. It is possible that other threads at
424 the same time dynamically load code and therefore add to the
425 slotinfo list. This is a problem since we must not pick up
426 any information about incomplete work. The solution to this
427 is to ignore all dtv slots which were created after the one
428 we are currently interested. We know that dynamic loading
429 for this module is completed and this is the last load
430 operation we know finished. */
431 idx = GET_ADDR_MODULE;
432 listp = GL(dl_tls_dtv_slotinfo_list);
433 while (idx >= listp->len)
434 {
435 idx -= listp->len;
436 listp = listp->next;
437 }
3fb55878 438
aed283dd
UD
439 if (dtv[0].counter < listp->slotinfo[idx].gen)
440 {
441 /* The generation counter for the slot is higher than what
442 the current dtv implements. We have to update the whole
443 dtv but only those entries with a generation counter <=
444 the one for the entry we need. */
445 size_t new_gen = listp->slotinfo[idx].gen;
446 size_t total = 0;
447
448 /* We have to look through the entire dtv slotinfo list. */
449 listp = GL(dl_tls_dtv_slotinfo_list);
450 do
451 {
452 size_t cnt;
453
454 for (cnt = total = 0 ? 1 : 0; cnt < listp->len; ++cnt)
455 {
456 size_t gen = listp->slotinfo[cnt].gen;
457 struct link_map *map;
458 size_t modid;
459
460 if (gen > new_gen)
461 /* This is a slot for a generation younger than
462 the one we are handling now. It might be
463 incompletely set up so ignore it. */
464 continue;
465
466 /* If the entry is older than the current dtv layout
467 we know we don't have to handle it. */
468 if (gen <= dtv[0].counter)
469 continue;
470
471 /* If there is no map this means the entry is empty. */
472 map = listp->slotinfo[cnt].map;
473 if (map == NULL)
474 {
475 /* If this modid was used at some point the memory
476 might still be allocated. */
477 if (dtv[total + cnt].pointer != TLS_DTV_UNALLOCATED)
478 free (dtv[total + cnt].pointer);
479
480 continue;
481 }
482
483 /* Check whether the current dtv array is large enough. */
484 modid = map->l_tls_modid;
485 assert (total + cnt == modid);
486 if (dtv[-1].counter < modid)
487 {
488 /* Reallocate the dtv. */
489 dtv_t *newp;
490 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
491 size_t oldsize = dtv[-1].counter;
492
493 assert (map->l_tls_modid <= newsize);
494
08da0621
RM
495 if (dtv == GL(dl_initial_dtv))
496 {
497 /* This is the initial dtv that was allocated
498 during rtld startup using the dl-minimal.c
499 malloc instead of the real malloc. We can't
500 free it, we have to abandon the old storage. */
501
502 newp = malloc ((2 + newsize) * sizeof (dtv_t));
503 if (newp == NULL)
504 oom ();
505 memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
506 }
507 else
508 {
509 newp = realloc (&dtv[-1],
510 (2 + newsize) * sizeof (dtv_t));
511 if (newp == NULL)
512 oom ();
513 }
aed283dd
UD
514
515 newp[0].counter = newsize;
516
08da0621 517 /* Clear the newly allocated part. */
aed283dd
UD
518 memset (newp + 2 + oldsize, '\0',
519 (newsize - oldsize) * sizeof (dtv_t));
520
521 /* Point dtv to the generation counter. */
522 dtv = &newp[1];
523
524 /* Install this new dtv in the thread data
525 structures. */
526 INSTALL_NEW_DTV (dtv);
527 }
528
529 /* If there is currently memory allocate for this
530 dtv entry free it. */
531 /* XXX Ideally we will at some point create a memory
532 pool. */
533 if (dtv[modid].pointer != TLS_DTV_UNALLOCATED)
534 /* Note that free is called for NULL is well. We
535 deallocate even if it is this dtv entry we are
536 supposed to load. The reason is that we call
537 memalign and not malloc. */
538 free (dtv[modid].pointer);
539
540 /* This module is loaded dynamically- We defer
541 memory allocation. */
542 dtv[modid].pointer = TLS_DTV_UNALLOCATED;
543
544 if (modid == GET_ADDR_MODULE)
545 the_map = map;
546 }
547
548 total += listp->len;
549 }
550 while ((listp = listp->next) != NULL);
3fb55878 551
aed283dd
UD
552 /* This will be the new maximum generation counter. */
553 dtv[0].counter = new_gen;
554 }
555 }
556
557 p = dtv[GET_ADDR_MODULE].pointer;
558
559 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
560 {
561 /* The allocation was deferred. Do it now. */
562 if (the_map == NULL)
563 {
564 /* Find the link map for this module. */
565 size_t idx = GET_ADDR_MODULE;
566 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
567
568 while (idx >= listp->len)
569 {
570 idx -= listp->len;
571 listp = listp->next;
572 }
573
574 the_map = listp->slotinfo[idx].map;
575 }
576
577 p = dtv[GET_ADDR_MODULE].pointer = allocate_and_init (the_map);
578 }
579
580 return (char *) p + GET_ADDR_OFFSET;
3fb55878 581}
aed283dd 582# endif
3fb55878
UD
583
584#endif /* use TLS */
This page took 0.120251 seconds and 5 git commands to generate.