]> sourceware.org Git - glibc.git/blob - sysdeps/generic/dl-tls.c
* sysdeps/generic/dl-tls.c (__tls_get_addr): After freeing block in
[glibc.git] / sysdeps / generic / dl-tls.c
1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <assert.h>
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <sys/param.h>
25
26 #include <tls.h>
27
28 /* We don't need any of this if TLS is not supported. */
29 #ifdef USE_TLS
30
31 # include <dl-tls.h>
32 # include <ldsodefs.h>
33
34 /* Value used for dtv entries for which the allocation is delayed. */
35 # define TLS_DTV_UNALLOCATED ((void *) -1l)
36
37
38 /* Out-of-memory handler. */
39 # ifdef SHARED
40 static void
41 __attribute__ ((__noreturn__))
42 oom (void)
43 {
44 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
45 }
46 # endif
47
48
49
50 size_t
51 internal_function
52 _dl_next_tls_modid (void)
53 {
54 size_t result;
55
56 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
57 {
58 size_t disp = 0;
59 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
60
61 /* Note that this branch will never be executed during program
62 start since there are no gaps at that time. Therefore it
63 does not matter that the dl_tls_dtv_slotinfo is not allocated
64 yet when the function is called for the first times. */
65 result = GL(dl_tls_static_nelem) + 1;
66 /* If the following would not be true we mustn't have assumed
67 there is a gap. */
68 assert (result <= GL(dl_tls_max_dtv_idx));
69 do
70 {
71 while (result - disp < runp->len)
72 {
73 if (runp->slotinfo[result - disp].map == NULL)
74 break;
75
76 ++result;
77 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
78 }
79
80 if (result - disp < runp->len)
81 break;
82
83 disp += runp->len;
84 }
85 while ((runp = runp->next) != NULL);
86
87 if (result >= GL(dl_tls_max_dtv_idx))
88 {
89 /* The new index must indeed be exactly one higher than the
90 previous high. */
91 assert (result == GL(dl_tls_max_dtv_idx));
92
93 /* There is no gap anymore. */
94 GL(dl_tls_dtv_gaps) = false;
95
96 goto nogaps;
97 }
98 }
99 else
100 {
101 /* No gaps, allocate a new entry. */
102 nogaps:
103 result = ++GL(dl_tls_max_dtv_idx);
104 }
105
106 return result;
107 }
108
109
110 void
111 internal_function
112 _dl_determine_tlsoffset (void)
113 {
114 struct dtv_slotinfo *slotinfo;
115 size_t max_align = __alignof__ (void *);
116 size_t offset;
117 size_t cnt;
118
119 /* The first element of the dtv slot info list is allocated. */
120 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
121 /* There is at this point only one element in the
122 dl_tls_dtv_slotinfo_list list. */
123 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
124
125 # if TLS_TCB_AT_TP
126 /* We simply start with zero. */
127 offset = 0;
128
129 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
130 for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
131 {
132 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
133
134 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
135
136 /* Compute the offset of the next TLS block. */
137 offset = roundup (offset + slotinfo[cnt].map->l_tls_blocksize,
138 slotinfo[cnt].map->l_tls_align);
139
140 /* XXX For some architectures we perhaps should store the
141 negative offset. */
142 slotinfo[cnt].map->l_tls_offset = offset;
143 }
144
145 /* The thread descriptor (pointed to by the thread pointer) has its
146 own alignment requirement. Adjust the static TLS size
147 and TLS offsets appropriately. */
148 // XXX How to deal with this. We cannot simply add zero bytes
149 // XXX after the first (closest to the TCB) TLS block since this
150 // XXX would invalidate the offsets the linker creates for the LE
151 // XXX model.
152
153 GL(dl_tls_static_size) = offset + TLS_TCB_SIZE;
154 # elif TLS_DTV_AT_TP
155 /* The TLS blocks start right after the TCB. */
156 offset = TLS_TCB_SIZE;
157
158 /* The first block starts right after the TCB. */
159 slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
160 if (slotinfo[1].map != NULL)
161 {
162 size_t prev_size;
163
164 offset = roundup (offset, slotinfo[1].map->l_tls_align);
165 slotinfo[1].map->l_tls_offset = offset;
166 max_align = slotinfo[1].map->l_tls_align;
167 prev_size = slotinfo[1].map->l_tls_blocksize;
168
169 for (cnt = 2; slotinfo[cnt].map != NULL; ++cnt)
170 {
171 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
172
173 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
174
175 /* Compute the offset of the next TLS block. */
176 offset = roundup (offset + prev_size,
177 slotinfo[cnt].map->l_tls_align);
178
179 /* XXX For some architectures we perhaps should store the
180 negative offset. */
181 slotinfo[cnt].map->l_tls_offset = offset;
182
183 prev_size = slotinfo[cnt].map->l_tls_blocksize;
184 }
185
186 offset += prev_size;
187 }
188
189 GL(dl_tls_static_size) = offset;
190 # else
191 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
192 # endif
193
194 /* The alignment requirement for the static TLS block. */
195 GL(dl_tls_static_align) = MAX (TLS_TCB_ALIGN, max_align);
196 }
197
198
199 static void *
200 internal_function
201 allocate_dtv (void *result)
202 {
203 dtv_t *dtv;
204 size_t dtv_length;
205
206 /* We allocate a few more elements in the dtv than are needed for the
207 initial set of modules. This should avoid in most cases expansions
208 of the dtv. */
209 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
210 dtv = (dtv_t *) malloc ((dtv_length + 2) * sizeof (dtv_t));
211 if (dtv != NULL)
212 {
213 /* This is the initial length of the dtv. */
214 dtv[0].counter = dtv_length;
215 /* Initialize all of the rest of the dtv (including the
216 generation counter) with zero to indicate nothing there. */
217 memset (dtv + 1, '\0', (dtv_length + 1) * sizeof (dtv_t));
218
219 /* Add the dtv to the thread data structures. */
220 INSTALL_DTV (result, dtv);
221 }
222 else
223 result = NULL;
224
225 return result;
226 }
227
228
229 /* Get size and alignment requirements of the static TLS block. */
230 void
231 internal_function
232 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
233 {
234 *sizep = GL(dl_tls_static_size);
235 *alignp = GL(dl_tls_static_align);
236 }
237
238
239 void *
240 internal_function
241 _dl_allocate_tls_storage (void)
242 {
243 void *result;
244
245 /* Allocate a correctly aligned chunk of memory. */
246 result = __libc_memalign (GL(dl_tls_static_align), GL(dl_tls_static_size));
247 if (__builtin_expect (result != NULL, 0))
248 {
249 /* Allocate the DTV. */
250 void *allocated = result;
251
252 # if TLS_TCB_AT_TP
253 /* The TCB follows the TLS blocks. */
254 result = (char *) result + GL(dl_tls_static_size) - TLS_TCB_SIZE;
255 # endif
256
257 result = allocate_dtv (result);
258 if (result == NULL)
259 free (allocated);
260 }
261
262 return result;
263 }
264
265
266 void *
267 internal_function
268 _dl_allocate_tls_init (void *result)
269 {
270 dtv_t *dtv = GET_DTV (result);
271 struct dtv_slotinfo_list *listp;
272 size_t total = 0;
273
274 if (result == NULL)
275 /* The memory allocation failed. */
276 return NULL;
277
278 /* We have to look prepare the dtv for all currently loaded
279 modules using TLS. For those which are dynamically loaded we
280 add the values indicating deferred allocation. */
281 listp = GL(dl_tls_dtv_slotinfo_list);
282 while (1)
283 {
284 size_t cnt;
285
286 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
287 {
288 struct link_map *map;
289 void *dest;
290
291 /* Check for the total number of used slots. */
292 if (total + cnt > GL(dl_tls_max_dtv_idx))
293 break;
294
295 map = listp->slotinfo[cnt].map;
296 if (map == NULL)
297 /* Unused entry. */
298 continue;
299
300 if (map->l_type == lt_loaded)
301 {
302 /* For dynamically loaded modules we simply store
303 the value indicating deferred allocation. */
304 dtv[map->l_tls_modid].pointer = TLS_DTV_UNALLOCATED;
305 continue;
306 }
307
308 assert (map->l_tls_modid == cnt);
309 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
310 # if TLS_TCB_AT_TP
311 assert (map->l_tls_offset >= map->l_tls_blocksize);
312 dest = (char *) result - map->l_tls_offset;
313 # elif TLS_DTV_AT_TP
314 dest = (char *) result + map->l_tls_offset;
315 # else
316 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
317 # endif
318
319 /* Copy the initialization image and clear the BSS part. */
320 dtv[map->l_tls_modid].pointer = dest;
321 memset (__mempcpy (dest, map->l_tls_initimage,
322 map->l_tls_initimage_size), '\0',
323 map->l_tls_blocksize - map->l_tls_initimage_size);
324 }
325
326 total += cnt;
327 if (total >= GL(dl_tls_max_dtv_idx))
328 break;
329
330 listp = listp->next;
331 assert (listp != NULL);
332 }
333
334 return result;
335 }
336 rtld_hidden_def (_dl_allocate_tls_init)
337
338 void *
339 internal_function
340 _dl_allocate_tls (void *mem)
341 {
342 return _dl_allocate_tls_init (mem == NULL
343 ? _dl_allocate_tls_storage ()
344 : allocate_dtv (mem));
345 }
346 INTDEF(_dl_allocate_tls)
347
348
349 void
350 internal_function
351 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
352 {
353 dtv_t *dtv = GET_DTV (tcb);
354
355 /* The array starts with dtv[-1]. */
356 free (dtv - 1);
357
358 if (dealloc_tcb)
359 free (tcb);
360 }
361
362
363
364 # ifdef SHARED
365 /* The __tls_get_addr function has two basic forms which differ in the
366 arguments. The IA-64 form takes two parameters, the module ID and
367 offset. The form used, among others, on IA-32 takes a reference to
368 a special structure which contain the same information. The second
369 form seems to be more often used (in the moment) so we default to
370 it. Users of the IA-64 form have to provide adequate definitions
371 of the following macros. */
372 # ifndef GET_ADDR_ARGS
373 # define GET_ADDR_ARGS tls_index *ti
374 # endif
375 # ifndef GET_ADDR_MODULE
376 # define GET_ADDR_MODULE ti->ti_module
377 # endif
378 # ifndef GET_ADDR_OFFSET
379 # define GET_ADDR_OFFSET ti->ti_offset
380 # endif
381 /* Systems which do not have tls_index also probably have to define
382 DONT_USE_TLS_INDEX. */
383
384 # ifndef __TLS_GET_ADDR
385 # define __TLS_GET_ADDR __tls_get_addr
386 # endif
387
388
389 /* Return the symbol address given the map of the module it is in and
390 the symbol record. This is used in dl-sym.c. */
391 void *
392 internal_function
393 _dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
394 {
395 # ifndef DONT_USE_TLS_INDEX
396 tls_index tmp =
397 {
398 .ti_module = map->l_tls_modid,
399 .ti_offset = ref->st_value
400 };
401
402 return __TLS_GET_ADDR (&tmp);
403 # else
404 return __TLS_GET_ADDR (map->l_tls_modid, ref->st_value);
405 # endif
406 }
407
408
409 static void *
410 allocate_and_init (struct link_map *map)
411 {
412 void *newp;
413
414 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
415 if (newp == NULL)
416 oom ();
417
418 /* Initialize the memory. */
419 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
420 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
421
422 return newp;
423 }
424
425
426 /* The generic dynamic and local dynamic model cannot be used in
427 statically linked applications. */
428 void *
429 __tls_get_addr (GET_ADDR_ARGS)
430 {
431 dtv_t *dtv = THREAD_DTV ();
432 struct link_map *the_map = NULL;
433 void *p;
434
435 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
436 {
437 struct dtv_slotinfo_list *listp;
438 size_t idx;
439
440 /* The global dl_tls_dtv_slotinfo array contains for each module
441 index the generation counter current when the entry was
442 created. This array never shrinks so that all module indices
443 which were valid at some time can be used to access it.
444 Before the first use of a new module index in this function
445 the array was extended appropriately. Access also does not
446 have to be guarded against modifications of the array. It is
447 assumed that pointer-size values can be read atomically even
448 in SMP environments. It is possible that other threads at
449 the same time dynamically load code and therefore add to the
450 slotinfo list. This is a problem since we must not pick up
451 any information about incomplete work. The solution to this
452 is to ignore all dtv slots which were created after the one
453 we are currently interested. We know that dynamic loading
454 for this module is completed and this is the last load
455 operation we know finished. */
456 idx = GET_ADDR_MODULE;
457 listp = GL(dl_tls_dtv_slotinfo_list);
458 while (idx >= listp->len)
459 {
460 idx -= listp->len;
461 listp = listp->next;
462 }
463
464 if (dtv[0].counter < listp->slotinfo[idx].gen)
465 {
466 /* The generation counter for the slot is higher than what
467 the current dtv implements. We have to update the whole
468 dtv but only those entries with a generation counter <=
469 the one for the entry we need. */
470 size_t new_gen = listp->slotinfo[idx].gen;
471 size_t total = 0;
472
473 /* We have to look through the entire dtv slotinfo list. */
474 listp = GL(dl_tls_dtv_slotinfo_list);
475 do
476 {
477 size_t cnt;
478
479 for (cnt = total = 0 ? 1 : 0; cnt < listp->len; ++cnt)
480 {
481 size_t gen = listp->slotinfo[cnt].gen;
482 struct link_map *map;
483 size_t modid;
484
485 if (gen > new_gen)
486 /* This is a slot for a generation younger than
487 the one we are handling now. It might be
488 incompletely set up so ignore it. */
489 continue;
490
491 /* If the entry is older than the current dtv layout
492 we know we don't have to handle it. */
493 if (gen <= dtv[0].counter)
494 continue;
495
496 /* If there is no map this means the entry is empty. */
497 map = listp->slotinfo[cnt].map;
498 if (map == NULL)
499 {
500 /* If this modid was used at some point the memory
501 might still be allocated. */
502 if (dtv[total + cnt].pointer != TLS_DTV_UNALLOCATED)
503 {
504 free (dtv[total + cnt].pointer);
505 dtv[total + cnt].pointer = TLS_DTV_UNALLOCATED;
506 }
507
508 continue;
509 }
510
511 /* Check whether the current dtv array is large enough. */
512 modid = map->l_tls_modid;
513 assert (total + cnt == modid);
514 if (dtv[-1].counter < modid)
515 {
516 /* Reallocate the dtv. */
517 dtv_t *newp;
518 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
519 size_t oldsize = dtv[-1].counter;
520
521 assert (map->l_tls_modid <= newsize);
522
523 if (dtv == GL(dl_initial_dtv))
524 {
525 /* This is the initial dtv that was allocated
526 during rtld startup using the dl-minimal.c
527 malloc instead of the real malloc. We can't
528 free it, we have to abandon the old storage. */
529
530 newp = malloc ((2 + newsize) * sizeof (dtv_t));
531 if (newp == NULL)
532 oom ();
533 memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
534 }
535 else
536 {
537 newp = realloc (&dtv[-1],
538 (2 + newsize) * sizeof (dtv_t));
539 if (newp == NULL)
540 oom ();
541 }
542
543 newp[0].counter = newsize;
544
545 /* Clear the newly allocated part. */
546 memset (newp + 2 + oldsize, '\0',
547 (newsize - oldsize) * sizeof (dtv_t));
548
549 /* Point dtv to the generation counter. */
550 dtv = &newp[1];
551
552 /* Install this new dtv in the thread data
553 structures. */
554 INSTALL_NEW_DTV (dtv);
555 }
556
557 /* If there is currently memory allocate for this
558 dtv entry free it. */
559 /* XXX Ideally we will at some point create a memory
560 pool. */
561 if (dtv[modid].pointer != TLS_DTV_UNALLOCATED)
562 /* Note that free is called for NULL is well. We
563 deallocate even if it is this dtv entry we are
564 supposed to load. The reason is that we call
565 memalign and not malloc. */
566 free (dtv[modid].pointer);
567
568 /* This module is loaded dynamically- We defer
569 memory allocation. */
570 dtv[modid].pointer = TLS_DTV_UNALLOCATED;
571
572 if (modid == GET_ADDR_MODULE)
573 the_map = map;
574 }
575
576 total += listp->len;
577 }
578 while ((listp = listp->next) != NULL);
579
580 /* This will be the new maximum generation counter. */
581 dtv[0].counter = new_gen;
582 }
583 }
584
585 p = dtv[GET_ADDR_MODULE].pointer;
586
587 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
588 {
589 /* The allocation was deferred. Do it now. */
590 if (the_map == NULL)
591 {
592 /* Find the link map for this module. */
593 size_t idx = GET_ADDR_MODULE;
594 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
595
596 while (idx >= listp->len)
597 {
598 idx -= listp->len;
599 listp = listp->next;
600 }
601
602 the_map = listp->slotinfo[idx].map;
603 }
604
605 p = dtv[GET_ADDR_MODULE].pointer = allocate_and_init (the_map);
606 }
607
608 return (char *) p + GET_ADDR_OFFSET;
609 }
610 # endif
611
612 #endif /* use TLS */
This page took 0.065987 seconds and 5 git commands to generate.