]> sourceware.org Git - glibc.git/blame - sysdeps/generic/dl-tls.c
Update.
[glibc.git] / sysdeps / generic / dl-tls.c
CommitLineData
3fb55878 1/* Thread-local storage handling in the ELF dynamic linker. Generic version.
733f25e6 2 Copyright (C) 2002, 2003 Free Software Foundation, Inc.
3fb55878
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <assert.h>
aed283dd 21#include <signal.h>
a52d1562 22#include <stdlib.h>
aed283dd
UD
23#include <unistd.h>
24#include <sys/param.h>
3fb55878 25
d555194c 26#include <tls.h>
3fb55878
UD
27
28/* We don't need any of this if TLS is not supported. */
29#ifdef USE_TLS
30
fc093be1
UD
31# include <dl-tls.h>
32# include <ldsodefs.h>
d4468ab7 33
c56baa87
RM
34/* Amount of excess space to allocate in the static TLS area
35 to allow dynamic loading of modules defining IE-model TLS data. */
36# define TLS_STATIC_SURPLUS 64
37
3fb55878 38/* Value used for dtv entries for which the allocation is delayed. */
aed283dd
UD
39# define TLS_DTV_UNALLOCATED ((void *) -1l)
40
41
42/* Out-of-memory handler. */
fc093be1 43# ifdef SHARED
aed283dd
UD
44static void
45__attribute__ ((__noreturn__))
46oom (void)
47{
38a7d8ba 48 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
aed283dd 49}
fc093be1 50# endif
aed283dd 51
3fb55878
UD
52
53
54size_t
55internal_function
56_dl_next_tls_modid (void)
57{
58 size_t result;
59
60 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
61 {
aed283dd
UD
62 size_t disp = 0;
63 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
64
65 /* Note that this branch will never be executed during program
66 start since there are no gaps at that time. Therefore it
67 does not matter that the dl_tls_dtv_slotinfo is not allocated
68 yet when the function is called for the first times. */
fc093be1
UD
69 result = GL(dl_tls_static_nelem) + 1;
70 /* If the following would not be true we mustn't have assumed
71 there is a gap. */
72 assert (result <= GL(dl_tls_max_dtv_idx));
3fb55878
UD
73 do
74 {
aed283dd 75 while (result - disp < runp->len)
fc093be1
UD
76 {
77 if (runp->slotinfo[result - disp].map == NULL)
78 break;
3fb55878 79
fc093be1
UD
80 ++result;
81 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
82 }
aed283dd
UD
83
84 if (result - disp < runp->len)
3fb55878 85 break;
aed283dd
UD
86
87 disp += runp->len;
88 }
89 while ((runp = runp->next) != NULL);
90
fc093be1 91 if (result >= GL(dl_tls_max_dtv_idx))
aed283dd
UD
92 {
93 /* The new index must indeed be exactly one higher than the
94 previous high. */
fc093be1 95 assert (result == GL(dl_tls_max_dtv_idx));
aed283dd
UD
96
97 /* There is no gap anymore. */
98 GL(dl_tls_dtv_gaps) = false;
99
100 goto nogaps;
101 }
3fb55878
UD
102 }
103 else
aed283dd
UD
104 {
105 /* No gaps, allocate a new entry. */
106 nogaps:
107 result = ++GL(dl_tls_max_dtv_idx);
108 }
3fb55878
UD
109
110 return result;
111}
112
216455bc 113# ifdef SHARED
3fb55878
UD
114
115void
116internal_function
aed283dd 117_dl_determine_tlsoffset (void)
3fb55878 118{
aed283dd
UD
119 struct dtv_slotinfo *slotinfo;
120 size_t max_align = __alignof__ (void *);
3fb55878 121 size_t offset;
aed283dd 122 size_t cnt;
3fb55878 123
aed283dd
UD
124 /* The first element of the dtv slot info list is allocated. */
125 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
126 /* There is at this point only one element in the
127 dl_tls_dtv_slotinfo_list list. */
128 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
a52d1562 129
3fb55878
UD
130# if TLS_TCB_AT_TP
131 /* We simply start with zero. */
132 offset = 0;
133
aed283dd
UD
134 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
135 for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
3fb55878 136 {
aed283dd
UD
137 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
138
139 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
3fb55878
UD
140
141 /* Compute the offset of the next TLS block. */
aed283dd
UD
142 offset = roundup (offset + slotinfo[cnt].map->l_tls_blocksize,
143 slotinfo[cnt].map->l_tls_align);
3fb55878
UD
144
145 /* XXX For some architectures we perhaps should store the
146 negative offset. */
aed283dd 147 slotinfo[cnt].map->l_tls_offset = offset;
3fb55878 148 }
8d4b5a8a
UD
149
150 /* The thread descriptor (pointed to by the thread pointer) has its
151 own alignment requirement. Adjust the static TLS size
cd30b01e 152 and TLS offsets appropriately. */
b123d06e
UD
153 // XXX How to deal with this. We cannot simply add zero bytes
154 // XXX after the first (closest to the TCB) TLS block since this
155 // XXX would invalidate the offsets the linker creates for the LE
156 // XXX model.
cd30b01e 157
c56baa87
RM
158 GL(dl_tls_static_used) = offset;
159 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS + TLS_TCB_SIZE,
160 TLS_TCB_ALIGN);
3fb55878 161# elif TLS_DTV_AT_TP
aed283dd 162 /* The TLS blocks start right after the TCB. */
3fb55878 163 offset = TLS_TCB_SIZE;
3fb55878 164
aed283dd
UD
165 /* The first block starts right after the TCB. */
166 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
167 if (slotinfo[1].map != NULL)
3fb55878 168 {
3632a260 169 size_t prev_size;
3fb55878 170
aed283dd
UD
171 offset = roundup (offset, slotinfo[1].map->l_tls_align);
172 slotinfo[1].map->l_tls_offset = offset;
173 max_align = slotinfo[1].map->l_tls_align;
174 prev_size = slotinfo[1].map->l_tls_blocksize;
3fb55878 175
aed283dd
UD
176 for (cnt = 2; slotinfo[cnt].map != NULL; ++cnt)
177 {
178 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
179
180 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
181
182 /* Compute the offset of the next TLS block. */
183 offset = roundup (offset + prev_size,
184 slotinfo[cnt].map->l_tls_align);
185
186 /* XXX For some architectures we perhaps should store the
187 negative offset. */
188 slotinfo[cnt].map->l_tls_offset = offset;
189
190 prev_size = slotinfo[cnt].map->l_tls_blocksize;
191 }
cd30b01e 192
aed283dd 193 offset += prev_size;
3fb55878 194 }
cd30b01e 195
c56baa87
RM
196 GL(dl_tls_static_used) = offset;
197 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
198 TLS_TCB_ALIGN);
3fb55878
UD
199# else
200# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
201# endif
cd30b01e
UD
202
203 /* The alignment requirement for the static TLS block. */
204 GL(dl_tls_static_align) = MAX (TLS_TCB_ALIGN, max_align);
3fb55878
UD
205}
206
207
216455bc
RM
208/* This is called only when the data structure setup was skipped at startup,
209 when there was no need for it then. Now we have dynamically loaded
210 something needing TLS, or libpthread needs it. */
211int
212internal_function
213_dl_tls_setup (void)
214{
215 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
216 assert (GL(dl_tls_max_dtv_idx) == 0);
217
218 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
219
fde89ad0
RM
220 GL(dl_tls_dtv_slotinfo_list)
221 = calloc (1, (sizeof (struct dtv_slotinfo_list)
222 + nelem * sizeof (struct dtv_slotinfo)));
216455bc
RM
223 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
224 return -1;
225
216455bc 226 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
216455bc
RM
227
228 /* Number of elements in the static TLS block. It can't be zero
229 because of various assumptions. The one element is null. */
230 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
231
232 /* This initializes more variables for us. */
233 _dl_determine_tlsoffset ();
234
235 return 0;
236}
237rtld_hidden_def (_dl_tls_setup)
238# endif
239
9a1eb38e 240static void *
a52d1562 241internal_function
9a1eb38e 242allocate_dtv (void *result)
a52d1562 243{
a52d1562 244 dtv_t *dtv;
aed283dd 245 size_t dtv_length;
a52d1562 246
aed283dd
UD
247 /* We allocate a few more elements in the dtv than are needed for the
248 initial set of modules. This should avoid in most cases expansions
249 of the dtv. */
250 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
fde89ad0 251 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
a4dda453 252 if (dtv != NULL)
a52d1562 253 {
aed283dd
UD
254 /* This is the initial length of the dtv. */
255 dtv[0].counter = dtv_length;
fde89ad0
RM
256
257 /* The rest of the dtv (including the generation counter) is
258 Initialize with zero to indicate nothing there. */
aed283dd 259
581dc54b
RM
260 /* Add the dtv to the thread data structures. */
261 INSTALL_DTV (result, dtv);
262 }
a4dda453 263 else
9a1eb38e
UD
264 result = NULL;
265
266 return result;
267}
268
269
270/* Get size and alignment requirements of the static TLS block. */
271void
272internal_function
273_dl_get_tls_static_info (size_t *sizep, size_t *alignp)
274{
275 *sizep = GL(dl_tls_static_size);
276 *alignp = GL(dl_tls_static_align);
277}
278
279
280void *
281internal_function
282_dl_allocate_tls_storage (void)
283{
284 void *result;
aff4519d
UD
285 size_t size = GL(dl_tls_static_size);
286
287# if TLS_DTV_AT_TP
288 /* Memory layout is:
289 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
290 ^ This should be returned. */
291 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
292 & ~(GL(dl_tls_static_align) - 1);
293# endif
9a1eb38e
UD
294
295 /* Allocate a correctly aligned chunk of memory. */
aff4519d 296 result = __libc_memalign (GL(dl_tls_static_align), size);
92f6b65e 297 if (__builtin_expect (result != NULL, 1))
581dc54b 298 {
9a1eb38e
UD
299 /* Allocate the DTV. */
300 void *allocated = result;
301
302# if TLS_TCB_AT_TP
303 /* The TCB follows the TLS blocks. */
aff4519d 304 result = (char *) result + size - TLS_TCB_SIZE;
9a1eb38e 305
cdedcc79
RM
306 /* Clear the TCB data structure. We can't ask the caller (i.e.
307 libpthread) to do it, because we will initialize the DTV et al. */
308 memset (result, 0, TLS_TCB_SIZE);
aff4519d
UD
309# elif TLS_DTV_AT_TP
310 result = (char *) result + size - GL(dl_tls_static_size);
311
312 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
313 We can't ask the caller (i.e. libpthread) to do it, because we will
314 initialize the DTV et al. */
315 memset ((char *) result - TLS_PRE_TCB_SIZE, 0,
316 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
317# endif
cdedcc79 318
9a1eb38e
UD
319 result = allocate_dtv (result);
320 if (result == NULL)
321 free (allocated);
581dc54b
RM
322 }
323
324 return result;
325}
b6836436 326
581dc54b
RM
327
328void *
329internal_function
330_dl_allocate_tls_init (void *result)
331{
b6836436
UD
332 if (result == NULL)
333 /* The memory allocation failed. */
334 return NULL;
335
28061409
RM
336 dtv_t *dtv = GET_DTV (result);
337 struct dtv_slotinfo_list *listp;
338 size_t total = 0;
339
581dc54b
RM
340 /* We have to look prepare the dtv for all currently loaded
341 modules using TLS. For those which are dynamically loaded we
342 add the values indicating deferred allocation. */
343 listp = GL(dl_tls_dtv_slotinfo_list);
344 while (1)
345 {
346 size_t cnt;
347
f7c1f4dd 348 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
a52d1562 349 {
581dc54b
RM
350 struct link_map *map;
351 void *dest;
aed283dd 352
581dc54b
RM
353 /* Check for the total number of used slots. */
354 if (total + cnt > GL(dl_tls_max_dtv_idx))
355 break;
aed283dd 356
581dc54b
RM
357 map = listp->slotinfo[cnt].map;
358 if (map == NULL)
359 /* Unused entry. */
360 continue;
361
299601a1 362 if (map->l_tls_offset == NO_TLS_OFFSET)
581dc54b
RM
363 {
364 /* For dynamically loaded modules we simply store
365 the value indicating deferred allocation. */
366 dtv[map->l_tls_modid].pointer = TLS_DTV_UNALLOCATED;
367 continue;
368 }
aed283dd 369
581dc54b
RM
370 assert (map->l_tls_modid == cnt);
371 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
a52d1562 372# if TLS_TCB_AT_TP
177d1ad3 373 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
581dc54b 374 dest = (char *) result - map->l_tls_offset;
a52d1562 375# elif TLS_DTV_AT_TP
581dc54b 376 dest = (char *) result + map->l_tls_offset;
a52d1562
UD
377# else
378# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
379# endif
380
84bdcade
UD
381 /* Copy the initialization image and clear the BSS part. */
382 dtv[map->l_tls_modid].pointer = dest;
383 memset (__mempcpy (dest, map->l_tls_initimage,
384 map->l_tls_initimage_size), '\0',
385 map->l_tls_blocksize - map->l_tls_initimage_size);
a52d1562
UD
386 }
387
581dc54b 388 total += cnt;
f7c1f4dd 389 if (total >= GL(dl_tls_max_dtv_idx))
581dc54b
RM
390 break;
391
392 listp = listp->next;
393 assert (listp != NULL);
a52d1562
UD
394 }
395
396 return result;
397}
209a8ca3 398rtld_hidden_def (_dl_allocate_tls_init)
e4138261 399
581dc54b
RM
400void *
401internal_function
9a1eb38e 402_dl_allocate_tls (void *mem)
581dc54b 403{
9a1eb38e
UD
404 return _dl_allocate_tls_init (mem == NULL
405 ? _dl_allocate_tls_storage ()
406 : allocate_dtv (mem));
581dc54b 407}
733f25e6 408rtld_hidden_def (_dl_allocate_tls)
b6836436 409
e4138261
UD
410
411void
412internal_function
9a1eb38e 413_dl_deallocate_tls (void *tcb, bool dealloc_tcb)
e4138261
UD
414{
415 dtv_t *dtv = GET_DTV (tcb);
416
417 /* The array starts with dtv[-1]. */
1739d268
UD
418#ifdef SHARED
419 if (dtv != GL(dl_initial_dtv))
420#endif
421 free (dtv - 1);
e4138261 422
9a1eb38e 423 if (dealloc_tcb)
9cbc2823
RM
424 {
425# if TLS_TCB_AT_TP
426 /* The TCB follows the TLS blocks. Back up to free the whole block. */
427 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
aff4519d
UD
428# elif TLS_DTV_AT_TP
429 /* Back up the TLS_PRE_TCB_SIZE bytes. */
430 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
431 & ~(GL(dl_tls_static_align) - 1);
9cbc2823
RM
432# endif
433 free (tcb);
434 }
e4138261 435}
733f25e6 436rtld_hidden_def (_dl_deallocate_tls)
a52d1562
UD
437
438
aed283dd 439# ifdef SHARED
3fb55878
UD
440/* The __tls_get_addr function has two basic forms which differ in the
441 arguments. The IA-64 form takes two parameters, the module ID and
442 offset. The form used, among others, on IA-32 takes a reference to
443 a special structure which contain the same information. The second
444 form seems to be more often used (in the moment) so we default to
445 it. Users of the IA-64 form have to provide adequate definitions
446 of the following macros. */
aed283dd
UD
447# ifndef GET_ADDR_ARGS
448# define GET_ADDR_ARGS tls_index *ti
449# endif
450# ifndef GET_ADDR_MODULE
451# define GET_ADDR_MODULE ti->ti_module
452# endif
453# ifndef GET_ADDR_OFFSET
454# define GET_ADDR_OFFSET ti->ti_offset
455# endif
456/* Systems which do not have tls_index also probably have to define
457 DONT_USE_TLS_INDEX. */
458
459# ifndef __TLS_GET_ADDR
460# define __TLS_GET_ADDR __tls_get_addr
461# endif
462
463
464/* Return the symbol address given the map of the module it is in and
465 the symbol record. This is used in dl-sym.c. */
466void *
467internal_function
468_dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
469{
470# ifndef DONT_USE_TLS_INDEX
471 tls_index tmp =
472 {
473 .ti_module = map->l_tls_modid,
474 .ti_offset = ref->st_value
475 };
476
477 return __TLS_GET_ADDR (&tmp);
478# else
479 return __TLS_GET_ADDR (map->l_tls_modid, ref->st_value);
480# endif
481}
482
483
484static void *
485allocate_and_init (struct link_map *map)
486{
487 void *newp;
488
489 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
490 if (newp == NULL)
491 oom ();
3fb55878 492
aed283dd
UD
493 /* Initialize the memory. */
494 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
495 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
3fb55878 496
aed283dd
UD
497 return newp;
498}
499
500
501/* The generic dynamic and local dynamic model cannot be used in
502 statically linked applications. */
3fb55878
UD
503void *
504__tls_get_addr (GET_ADDR_ARGS)
505{
506 dtv_t *dtv = THREAD_DTV ();
aed283dd
UD
507 struct link_map *the_map = NULL;
508 void *p;
509
510 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
511 {
512 struct dtv_slotinfo_list *listp;
513 size_t idx;
514
515 /* The global dl_tls_dtv_slotinfo array contains for each module
516 index the generation counter current when the entry was
517 created. This array never shrinks so that all module indices
518 which were valid at some time can be used to access it.
519 Before the first use of a new module index in this function
520 the array was extended appropriately. Access also does not
521 have to be guarded against modifications of the array. It is
522 assumed that pointer-size values can be read atomically even
523 in SMP environments. It is possible that other threads at
524 the same time dynamically load code and therefore add to the
525 slotinfo list. This is a problem since we must not pick up
526 any information about incomplete work. The solution to this
527 is to ignore all dtv slots which were created after the one
528 we are currently interested. We know that dynamic loading
529 for this module is completed and this is the last load
530 operation we know finished. */
531 idx = GET_ADDR_MODULE;
532 listp = GL(dl_tls_dtv_slotinfo_list);
533 while (idx >= listp->len)
534 {
535 idx -= listp->len;
536 listp = listp->next;
537 }
3fb55878 538
aed283dd
UD
539 if (dtv[0].counter < listp->slotinfo[idx].gen)
540 {
541 /* The generation counter for the slot is higher than what
542 the current dtv implements. We have to update the whole
543 dtv but only those entries with a generation counter <=
544 the one for the entry we need. */
545 size_t new_gen = listp->slotinfo[idx].gen;
546 size_t total = 0;
547
548 /* We have to look through the entire dtv slotinfo list. */
549 listp = GL(dl_tls_dtv_slotinfo_list);
550 do
551 {
552 size_t cnt;
553
554 for (cnt = total = 0 ? 1 : 0; cnt < listp->len; ++cnt)
555 {
556 size_t gen = listp->slotinfo[cnt].gen;
557 struct link_map *map;
558 size_t modid;
559
560 if (gen > new_gen)
561 /* This is a slot for a generation younger than
562 the one we are handling now. It might be
563 incompletely set up so ignore it. */
564 continue;
565
566 /* If the entry is older than the current dtv layout
567 we know we don't have to handle it. */
568 if (gen <= dtv[0].counter)
569 continue;
570
571 /* If there is no map this means the entry is empty. */
572 map = listp->slotinfo[cnt].map;
573 if (map == NULL)
574 {
575 /* If this modid was used at some point the memory
576 might still be allocated. */
577 if (dtv[total + cnt].pointer != TLS_DTV_UNALLOCATED)
2a4f7d66
RM
578 {
579 free (dtv[total + cnt].pointer);
580 dtv[total + cnt].pointer = TLS_DTV_UNALLOCATED;
581 }
aed283dd
UD
582
583 continue;
584 }
585
586 /* Check whether the current dtv array is large enough. */
587 modid = map->l_tls_modid;
588 assert (total + cnt == modid);
589 if (dtv[-1].counter < modid)
590 {
591 /* Reallocate the dtv. */
592 dtv_t *newp;
593 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
594 size_t oldsize = dtv[-1].counter;
595
596 assert (map->l_tls_modid <= newsize);
597
08da0621
RM
598 if (dtv == GL(dl_initial_dtv))
599 {
600 /* This is the initial dtv that was allocated
601 during rtld startup using the dl-minimal.c
602 malloc instead of the real malloc. We can't
603 free it, we have to abandon the old storage. */
604
605 newp = malloc ((2 + newsize) * sizeof (dtv_t));
606 if (newp == NULL)
607 oom ();
608 memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
609 }
610 else
611 {
612 newp = realloc (&dtv[-1],
613 (2 + newsize) * sizeof (dtv_t));
614 if (newp == NULL)
615 oom ();
616 }
aed283dd
UD
617
618 newp[0].counter = newsize;
619
08da0621 620 /* Clear the newly allocated part. */
aed283dd
UD
621 memset (newp + 2 + oldsize, '\0',
622 (newsize - oldsize) * sizeof (dtv_t));
623
624 /* Point dtv to the generation counter. */
625 dtv = &newp[1];
626
627 /* Install this new dtv in the thread data
628 structures. */
629 INSTALL_NEW_DTV (dtv);
630 }
631
632 /* If there is currently memory allocate for this
633 dtv entry free it. */
634 /* XXX Ideally we will at some point create a memory
635 pool. */
636 if (dtv[modid].pointer != TLS_DTV_UNALLOCATED)
637 /* Note that free is called for NULL is well. We
638 deallocate even if it is this dtv entry we are
639 supposed to load. The reason is that we call
640 memalign and not malloc. */
641 free (dtv[modid].pointer);
642
643 /* This module is loaded dynamically- We defer
644 memory allocation. */
645 dtv[modid].pointer = TLS_DTV_UNALLOCATED;
646
647 if (modid == GET_ADDR_MODULE)
648 the_map = map;
649 }
650
651 total += listp->len;
652 }
653 while ((listp = listp->next) != NULL);
3fb55878 654
aed283dd
UD
655 /* This will be the new maximum generation counter. */
656 dtv[0].counter = new_gen;
657 }
658 }
659
660 p = dtv[GET_ADDR_MODULE].pointer;
661
662 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
663 {
664 /* The allocation was deferred. Do it now. */
665 if (the_map == NULL)
666 {
667 /* Find the link map for this module. */
668 size_t idx = GET_ADDR_MODULE;
669 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
670
671 while (idx >= listp->len)
672 {
673 idx -= listp->len;
674 listp = listp->next;
675 }
676
677 the_map = listp->slotinfo[idx].map;
678 }
679
680 p = dtv[GET_ADDR_MODULE].pointer = allocate_and_init (the_map);
681 }
682
683 return (char *) p + GET_ADDR_OFFSET;
3fb55878 684}
aed283dd 685# endif
3fb55878
UD
686
687#endif /* use TLS */
This page took 0.189268 seconds and 5 git commands to generate.