]> sourceware.org Git - glibc.git/blame - sysdeps/generic/dl-tls.c
Update.
[glibc.git] / sysdeps / generic / dl-tls.c
CommitLineData
3fb55878
UD
1/* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <assert.h>
a52d1562 21#include <stdlib.h>
3fb55878 22
d555194c 23#include <tls.h>
3fb55878
UD
24
25/* We don't need any of this if TLS is not supported. */
26#ifdef USE_TLS
27
d4468ab7
UD
28#include <dl-tls.h>
29#include <ldsodefs.h>
30
3fb55878
UD
31/* Value used for dtv entries for which the allocation is delayed. */
32# define TLS_DTV_UNALLOCATE ((void *) -1l)
33
34
35size_t
36internal_function
37_dl_next_tls_modid (void)
38{
39 size_t result;
40
41 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
42 {
43 /* XXX If this method proves too costly we can optimize
44 it to use a constant time method. But I don't think
45 it's a problem. */
46 struct link_map *runp = GL(dl_initimage_list);
47 bool used[GL(dl_tls_max_dtv_idx)];
48
49 assert (runp != NULL);
50 do
51 {
52 assert (runp->l_tls_modid > 0
53 && runp->l_tls_modid <= GL(dl_tls_max_dtv_idx));
54 used[runp->l_tls_modid - 1] = true;
55 }
56 while ((runp = runp->l_tls_nextimage) != GL(dl_initimage_list));
57
58 result = 0;
59 do
60 /* The information about the gaps is pessimistic. It might be
61 there are actually none. */
62 if (result >= GL(dl_tls_max_dtv_idx))
63 {
64 /* Now we know there is actually no gap. Bump the maximum
65 ID number and remember that there are no gaps. */
66 result = ++GL(dl_tls_max_dtv_idx);
67 GL(dl_tls_dtv_gaps) = false;
68 break;
69 }
70 while (used[result++]);
71 }
72 else
73 /* No gaps, allocate a new entry. */
74 result = ++GL(dl_tls_max_dtv_idx);
75
76 return result;
77}
78
79
80void
81internal_function
82_dl_determine_tlsoffset (struct link_map *firstp)
83{
84 struct link_map *runp = firstp;
85 size_t max_align = 0;
86 size_t offset;
87
a52d1562
UD
88 if (GL(dl_initimage_list) == NULL)
89 {
90 /* None of the objects used at startup time uses TLS. We still
91 have to allocate the TCB adn dtv. */
92 GL(dl_tls_static_size) = TLS_TCB_SIZE;
93 GL(dl_tls_static_align) = TLS_TCB_ALIGN;
94
95 return;
96 }
97
3fb55878
UD
98# if TLS_TCB_AT_TP
99 /* We simply start with zero. */
100 offset = 0;
101
102 do
103 {
104 max_align = MAX (max_align, runp->l_tls_align);
105
106 /* Compute the offset of the next TLS block. */
107 offset = roundup (offset + runp->l_tls_blocksize, runp->l_tls_align);
108
109 /* XXX For some architectures we perhaps should store the
110 negative offset. */
111 runp->l_tls_offset = offset;
112 }
113 while ((runp = runp->l_tls_nextimage) != firstp);
8d4b5a8a
UD
114
115 /* The thread descriptor (pointed to by the thread pointer) has its
116 own alignment requirement. Adjust the static TLS size
cd30b01e
UD
117 and TLS offsets appropriately. */
118 if (offset % TLS_TCB_ALIGN != 0)
119 {
120 size_t add = TLS_TCB_ALIGN - offset % TLS_TCB_ALIGN;
121
122 /* XXX If the offset stored is negative we must subtract here. */
123 offset += add;
124
125 runp = firstp;
126 do
127 runp->l_tls_offset += add;
128 while ((runp = runp->l_tls_nextimage) != firstp);
129 }
130
131 GL(dl_tls_static_size) = offset + TLS_TCB_SIZE;
3fb55878
UD
132# elif TLS_DTV_AT_TP
133 struct link_map *lastp;
134
135 /* The first block starts right after the TCB. */
136 offset = TLS_TCB_SIZE;
137 max_align = runp->l_tls_align;
138 runp->l_tls_offset = offset;
139 lastp = runp;
140
141 while ((runp = runp->l_tls_nextimage) != firstp)
142 {
143 max_align = MAX (max_align, runp->l_tls_align);
144
145 /* Compute the offset of the next TLS block. */
146 offset = roundup (offset + lastp->l_tls_blocksize, runp->l_tls_align);
147
148 runp->l_tls_offset = offset;
cd30b01e
UD
149
150 lastp = runp;
3fb55878 151 }
cd30b01e
UD
152
153 GL(dl_tls_static_size) = offset + lastp->l_tls_blocksize;
3fb55878
UD
154# else
155# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
156# endif
cd30b01e
UD
157
158 /* The alignment requirement for the static TLS block. */
159 GL(dl_tls_static_align) = MAX (TLS_TCB_ALIGN, max_align);
3fb55878
UD
160}
161
162
a52d1562
UD
163void *
164internal_function
165_dl_allocate_tls (void)
166{
167 void *result;
168 dtv_t *dtv;
169
170 /* Allocate a correctly aligned chunk of memory. */
171 /* XXX For now */
172 assert (GL(dl_tls_static_align) <= GL(dl_pagesize));
173#ifdef MAP_ANON
174# define _dl_zerofd (-1)
175#else
176# define _dl_zerofd GL(dl_zerofd)
177 if ((dl_zerofd) == -1)
178 GL(dl_zerofd) = _dl_sysdep_open_zero_fill ();
179# define MAP_ANON 0
180#endif
181 result = __mmap (0, GL(dl_tls_static_size), PROT_READ|PROT_WRITE,
182 MAP_ANON|MAP_PRIVATE, _dl_zerofd, 0);
183
184 dtv = (dtv_t *) malloc ((GL(dl_tls_max_dtv_idx) + 1) * sizeof (dtv_t));
185 if (result != MAP_FAILED && dtv != NULL)
186 {
187 struct link_map *runp;
188
189# if TLS_TCB_AT_TP
190 /* The TCB follows the TLS blocks. */
191 result = (char *) result + GL(dl_tls_static_size) - TLS_TCB_SIZE;
192# endif
193
194 /* XXX Fill in an correct generation number. */
195 dtv[0].counter = 0;
196
197 /* Initialize the memory from the initialization image list and clear
198 the BSS parts. */
199 if (GL(dl_initimage_list) != NULL)
200 {
201 runp = GL(dl_initimage_list)->l_tls_nextimage;
202 do
203 {
204 assert (runp->l_tls_modid > 0);
205 assert (runp->l_tls_modid <= GL(dl_tls_max_dtv_idx));
206# if TLS_TCB_AT_TP
207 dtv[runp->l_tls_modid].pointer = result - runp->l_tls_offset;
208# elif TLS_DTV_AT_TP
209 dtv[runp->l_tls_modid].pointer = result + runp->l_tls_offset;
210# else
211# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
212# endif
213
214 memset (__mempcpy (dtv[runp->l_tls_modid].pointer,
215 runp->l_tls_initimage,
216 runp->l_tls_initimage_size),
217 '\0',
218 runp->l_tls_blocksize - runp->l_tls_initimage_size);
219 }
3065b0c7
UD
220 while ((runp = runp->l_tls_nextimage)
221 != GL(dl_initimage_list)->l_tls_nextimage);
a52d1562
UD
222 }
223
224 /* Add the dtv to the thread data structures. */
225 INSTALL_DTV (result, dtv);
226 }
227 else if (result != NULL)
228 {
229 free (result);
230 result = NULL;
231 }
232
233 return result;
234}
235
236
3fb55878
UD
237/* The __tls_get_addr function has two basic forms which differ in the
238 arguments. The IA-64 form takes two parameters, the module ID and
239 offset. The form used, among others, on IA-32 takes a reference to
240 a special structure which contain the same information. The second
241 form seems to be more often used (in the moment) so we default to
242 it. Users of the IA-64 form have to provide adequate definitions
243 of the following macros. */
244# ifndef GET_ADDR_ARGS
cd30b01e 245# define GET_ADDR_ARGS tls_index *ti
3fb55878
UD
246# endif
247# ifndef GET_ADDR_MODULE
248# define GET_ADDR_MODULE ti->ti_module
249# endif
250# ifndef GET_ADDR_OFFSET
251# define GET_ADDR_OFFSET ti->ti_offset
252# endif
253
254
255void *
256__tls_get_addr (GET_ADDR_ARGS)
257{
258 dtv_t *dtv = THREAD_DTV ();
259
260 if (dtv[GET_ADDR_MODULE].pointer == TLS_DTV_UNALLOCATE)
261 /* XXX */;
262
263 return (char *) dtv[GET_ADDR_MODULE].pointer + GET_ADDR_OFFSET;
264}
265
266#endif /* use TLS */
This page took 0.05899 seconds and 5 git commands to generate.