]>
Commit | Line | Data |
---|---|---|
1 | /* Load a shared object at runtime, relocate it, and run its initializer. | |
2 | Copyright (C) 1996-2024 Free Software Foundation, Inc. | |
3 | This file is part of the GNU C Library. | |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
16 | License along with the GNU C Library; if not, see | |
17 | <https://www.gnu.org/licenses/>. */ | |
18 | ||
19 | #include <assert.h> | |
20 | #include <dlfcn.h> | |
21 | #include <errno.h> | |
22 | #include <libintl.h> | |
23 | #include <stdio.h> | |
24 | #include <stdlib.h> | |
25 | #include <string.h> | |
26 | #include <unistd.h> | |
27 | #include <sys/mman.h> /* Check whether MAP_COPY is defined. */ | |
28 | #include <sys/param.h> | |
29 | #include <libc-lock.h> | |
30 | #include <ldsodefs.h> | |
31 | #include <sysdep-cancel.h> | |
32 | #include <tls.h> | |
33 | #include <stap-probe.h> | |
34 | #include <atomic.h> | |
35 | #include <libc-internal.h> | |
36 | #include <array_length.h> | |
37 | #include <libc-early-init.h> | |
38 | #include <gnu/lib-names.h> | |
39 | #include <dl-find_object.h> | |
40 | ||
41 | #include <dl-dst.h> | |
42 | #include <dl-prop.h> | |
43 | ||
44 | ||
45 | /* We must be careful not to leave us in an inconsistent state. Thus we | |
46 | catch any error and re-raise it after cleaning up. */ | |
47 | ||
48 | struct dl_open_args | |
49 | { | |
50 | const char *file; | |
51 | int mode; | |
52 | /* This is the caller of the dlopen() function. */ | |
53 | const void *caller_dlopen; | |
54 | struct link_map *map; | |
55 | /* Namespace ID. */ | |
56 | Lmid_t nsid; | |
57 | ||
58 | /* Original value of _ns_global_scope_pending_adds. Set by | |
59 | dl_open_worker. Only valid if nsid is a real namespace | |
60 | (non-negative). */ | |
61 | unsigned int original_global_scope_pending_adds; | |
62 | ||
63 | /* Set to true by dl_open_worker if libc.so was already loaded into | |
64 | the namespace at the time dl_open_worker was called. This is | |
65 | used to determine whether libc.so early initialization has | |
66 | already been done before, and whether to roll back the cached | |
67 | libc_map value in the namespace in case of a dlopen failure. */ | |
68 | bool libc_already_loaded; | |
69 | ||
70 | /* Set to true if the end of dl_open_worker_begin was reached. */ | |
71 | bool worker_continue; | |
72 | ||
73 | /* Original parameters to the program and the current environment. */ | |
74 | int argc; | |
75 | char **argv; | |
76 | char **env; | |
77 | }; | |
78 | ||
79 | /* Called in case the global scope cannot be extended. */ | |
80 | static void __attribute__ ((noreturn)) | |
81 | add_to_global_resize_failure (struct link_map *new) | |
82 | { | |
83 | _dl_signal_error (ENOMEM, new->l_libname->name, NULL, | |
84 | N_ ("cannot extend global scope")); | |
85 | } | |
86 | ||
87 | /* Grow the global scope array for the namespace, so that all the new | |
88 | global objects can be added later in add_to_global_update, without | |
89 | risk of memory allocation failure. add_to_global_resize raises | |
90 | exceptions for memory allocation errors. */ | |
91 | static void | |
92 | add_to_global_resize (struct link_map *new) | |
93 | { | |
94 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; | |
95 | ||
96 | /* Count the objects we have to put in the global scope. */ | |
97 | unsigned int to_add = 0; | |
98 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) | |
99 | if (new->l_searchlist.r_list[cnt]->l_global == 0) | |
100 | ++to_add; | |
101 | ||
102 | /* The symbols of the new objects and its dependencies are to be | |
103 | introduced into the global scope that will be used to resolve | |
104 | references from other dynamically-loaded objects. | |
105 | ||
106 | The global scope is the searchlist in the main link map. We | |
107 | extend this list if necessary. There is one problem though: | |
108 | since this structure was allocated very early (before the libc | |
109 | is loaded) the memory it uses is allocated by the malloc()-stub | |
110 | in the ld.so. When we come here these functions are not used | |
111 | anymore. Instead the malloc() implementation of the libc is | |
112 | used. But this means the block from the main map cannot be used | |
113 | in an realloc() call. Therefore we allocate a completely new | |
114 | array the first time we have to add something to the locale scope. */ | |
115 | ||
116 | if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add, | |
117 | &ns->_ns_global_scope_pending_adds)) | |
118 | add_to_global_resize_failure (new); | |
119 | ||
120 | unsigned int new_size = 0; /* 0 means no new allocation. */ | |
121 | void *old_global = NULL; /* Old allocation if free-able. */ | |
122 | ||
123 | /* Minimum required element count for resizing. Adjusted below for | |
124 | an exponential resizing policy. */ | |
125 | size_t required_new_size; | |
126 | if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist, | |
127 | ns->_ns_global_scope_pending_adds, | |
128 | &required_new_size)) | |
129 | add_to_global_resize_failure (new); | |
130 | ||
131 | if (ns->_ns_global_scope_alloc == 0) | |
132 | { | |
133 | if (__builtin_add_overflow (required_new_size, 8, &new_size)) | |
134 | add_to_global_resize_failure (new); | |
135 | } | |
136 | else if (required_new_size > ns->_ns_global_scope_alloc) | |
137 | { | |
138 | if (__builtin_mul_overflow (required_new_size, 2, &new_size)) | |
139 | add_to_global_resize_failure (new); | |
140 | ||
141 | /* The old array was allocated with our malloc, not the minimal | |
142 | malloc. */ | |
143 | old_global = ns->_ns_main_searchlist->r_list; | |
144 | } | |
145 | ||
146 | if (new_size > 0) | |
147 | { | |
148 | size_t allocation_size; | |
149 | if (__builtin_mul_overflow (new_size, sizeof (struct link_map *), | |
150 | &allocation_size)) | |
151 | add_to_global_resize_failure (new); | |
152 | struct link_map **new_global = malloc (allocation_size); | |
153 | if (new_global == NULL) | |
154 | add_to_global_resize_failure (new); | |
155 | ||
156 | /* Copy over the old entries. */ | |
157 | memcpy (new_global, ns->_ns_main_searchlist->r_list, | |
158 | ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *)); | |
159 | ||
160 | ns->_ns_global_scope_alloc = new_size; | |
161 | ns->_ns_main_searchlist->r_list = new_global; | |
162 | ||
163 | if (!RTLD_SINGLE_THREAD_P) | |
164 | THREAD_GSCOPE_WAIT (); | |
165 | ||
166 | free (old_global); | |
167 | } | |
168 | } | |
169 | ||
170 | /* Actually add the new global objects to the global scope. Must be | |
171 | called after add_to_global_resize. This function cannot fail. */ | |
172 | static void | |
173 | add_to_global_update (struct link_map *new) | |
174 | { | |
175 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; | |
176 | ||
177 | /* Now add the new entries. */ | |
178 | unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist; | |
179 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) | |
180 | { | |
181 | struct link_map *map = new->l_searchlist.r_list[cnt]; | |
182 | ||
183 | if (map->l_global == 0) | |
184 | { | |
185 | map->l_global = 1; | |
186 | ||
187 | /* The array has been resized by add_to_global_resize. */ | |
188 | assert (new_nlist < ns->_ns_global_scope_alloc); | |
189 | ||
190 | ns->_ns_main_searchlist->r_list[new_nlist++] = map; | |
191 | ||
192 | /* We modify the global scope. Report this. */ | |
193 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) | |
194 | _dl_debug_printf ("\nadd %s [%lu] to global scope\n", | |
195 | map->l_name, map->l_ns); | |
196 | } | |
197 | } | |
198 | ||
199 | /* Some of the pending adds have been performed by the loop above. | |
200 | Adjust the counter accordingly. */ | |
201 | unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist; | |
202 | assert (added <= ns->_ns_global_scope_pending_adds); | |
203 | ns->_ns_global_scope_pending_adds -= added; | |
204 | ||
205 | atomic_write_barrier (); | |
206 | ns->_ns_main_searchlist->r_nlist = new_nlist; | |
207 | } | |
208 | ||
209 | /* Search link maps in all namespaces for the DSO that contains the object at | |
210 | address ADDR. Returns the pointer to the link map of the matching DSO, or | |
211 | NULL if a match is not found. */ | |
212 | struct link_map * | |
213 | _dl_find_dso_for_object (const ElfW(Addr) addr) | |
214 | { | |
215 | struct link_map *l; | |
216 | ||
217 | /* Find the highest-addressed object that ADDR is not below. */ | |
218 | for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns) | |
219 | for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next) | |
220 | if (addr >= l->l_map_start && addr < l->l_map_end | |
221 | && (l->l_contiguous | |
222 | || _dl_addr_inside_object (l, (ElfW(Addr)) addr))) | |
223 | { | |
224 | assert (ns == l->l_ns); | |
225 | return l; | |
226 | } | |
227 | return NULL; | |
228 | } | |
229 | rtld_hidden_def (_dl_find_dso_for_object); | |
230 | ||
231 | /* Return true if NEW is found in the scope for MAP. */ | |
232 | static size_t | |
233 | scope_has_map (struct link_map *map, struct link_map *new) | |
234 | { | |
235 | size_t cnt; | |
236 | for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt) | |
237 | if (map->l_scope[cnt] == &new->l_searchlist) | |
238 | return true; | |
239 | return false; | |
240 | } | |
241 | ||
242 | /* Return the length of the scope for MAP. */ | |
243 | static size_t | |
244 | scope_size (struct link_map *map) | |
245 | { | |
246 | size_t cnt; | |
247 | for (cnt = 0; map->l_scope[cnt] != NULL; ) | |
248 | ++cnt; | |
249 | return cnt; | |
250 | } | |
251 | ||
252 | /* Resize the scopes of depended-upon objects, so that the new object | |
253 | can be added later without further allocation of memory. This | |
254 | function can raise an exceptions due to malloc failure. */ | |
255 | static void | |
256 | resize_scopes (struct link_map *new) | |
257 | { | |
258 | /* If the file is not loaded now as a dependency, add the search | |
259 | list of the newly loaded object to the scope. */ | |
260 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) | |
261 | { | |
262 | struct link_map *imap = new->l_searchlist.r_list[i]; | |
263 | ||
264 | /* If the initializer has been called already, the object has | |
265 | not been loaded here and now. */ | |
266 | if (imap->l_init_called && imap->l_type == lt_loaded) | |
267 | { | |
268 | if (scope_has_map (imap, new)) | |
269 | /* Avoid duplicates. */ | |
270 | continue; | |
271 | ||
272 | size_t cnt = scope_size (imap); | |
273 | if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max)) | |
274 | { | |
275 | /* The l_scope array is too small. Allocate a new one | |
276 | dynamically. */ | |
277 | size_t new_size; | |
278 | struct r_scope_elem **newp; | |
279 | ||
280 | if (imap->l_scope != imap->l_scope_mem | |
281 | && imap->l_scope_max < array_length (imap->l_scope_mem)) | |
282 | { | |
283 | /* If the current l_scope memory is not pointing to | |
284 | the static memory in the structure, but the | |
285 | static memory in the structure is large enough to | |
286 | use for cnt + 1 scope entries, then switch to | |
287 | using the static memory. */ | |
288 | new_size = array_length (imap->l_scope_mem); | |
289 | newp = imap->l_scope_mem; | |
290 | } | |
291 | else | |
292 | { | |
293 | new_size = imap->l_scope_max * 2; | |
294 | newp = (struct r_scope_elem **) | |
295 | malloc (new_size * sizeof (struct r_scope_elem *)); | |
296 | if (newp == NULL) | |
297 | _dl_signal_error (ENOMEM, "dlopen", NULL, | |
298 | N_("cannot create scope list")); | |
299 | } | |
300 | ||
301 | /* Copy the array and the terminating NULL. */ | |
302 | memcpy (newp, imap->l_scope, | |
303 | (cnt + 1) * sizeof (imap->l_scope[0])); | |
304 | struct r_scope_elem **old = imap->l_scope; | |
305 | ||
306 | imap->l_scope = newp; | |
307 | ||
308 | if (old != imap->l_scope_mem) | |
309 | _dl_scope_free (old); | |
310 | ||
311 | imap->l_scope_max = new_size; | |
312 | } | |
313 | } | |
314 | } | |
315 | } | |
316 | ||
317 | /* Second stage of resize_scopes: Add NEW to the scopes. Also print | |
318 | debugging information about scopes if requested. | |
319 | ||
320 | This function cannot raise an exception because all required memory | |
321 | has been allocated by a previous call to resize_scopes. */ | |
322 | static void | |
323 | update_scopes (struct link_map *new) | |
324 | { | |
325 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) | |
326 | { | |
327 | struct link_map *imap = new->l_searchlist.r_list[i]; | |
328 | int from_scope = 0; | |
329 | ||
330 | if (imap->l_init_called && imap->l_type == lt_loaded) | |
331 | { | |
332 | if (scope_has_map (imap, new)) | |
333 | /* Avoid duplicates. */ | |
334 | continue; | |
335 | ||
336 | size_t cnt = scope_size (imap); | |
337 | /* Assert that resize_scopes has sufficiently enlarged the | |
338 | array. */ | |
339 | assert (cnt + 1 < imap->l_scope_max); | |
340 | ||
341 | /* First terminate the extended list. Otherwise a thread | |
342 | might use the new last element and then use the garbage | |
343 | at offset IDX+1. */ | |
344 | imap->l_scope[cnt + 1] = NULL; | |
345 | atomic_write_barrier (); | |
346 | imap->l_scope[cnt] = &new->l_searchlist; | |
347 | ||
348 | from_scope = cnt; | |
349 | } | |
350 | ||
351 | /* Print scope information. */ | |
352 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) | |
353 | _dl_show_scope (imap, from_scope); | |
354 | } | |
355 | } | |
356 | ||
357 | /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate | |
358 | space in GL (dl_tls_dtv_slotinfo_list). This can raise an | |
359 | exception. The return value is true if any of the new objects use | |
360 | TLS. */ | |
361 | static bool | |
362 | resize_tls_slotinfo (struct link_map *new) | |
363 | { | |
364 | bool any_tls = false; | |
365 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) | |
366 | if (_dl_add_to_slotinfo (new->l_searchlist.r_list[i], false)) | |
367 | any_tls = true; | |
368 | return any_tls; | |
369 | } | |
370 | ||
371 | /* Second stage of TLS update, after resize_tls_slotinfo. This | |
372 | function does not raise any exception. It should only be called if | |
373 | resize_tls_slotinfo returned true. */ | |
374 | static void | |
375 | update_tls_slotinfo (struct link_map *new) | |
376 | { | |
377 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) | |
378 | _dl_add_to_slotinfo (new->l_searchlist.r_list[i], true); | |
379 | ||
380 | size_t newgen = GL(dl_tls_generation) + 1; | |
381 | if (__glibc_unlikely (newgen == 0)) | |
382 | _dl_fatal_printf (N_("\ | |
383 | TLS generation counter wrapped! Please report this.")); | |
384 | /* Can be read concurrently. */ | |
385 | atomic_store_release (&GL(dl_tls_generation), newgen); | |
386 | ||
387 | /* We need a second pass for static tls data, because | |
388 | _dl_update_slotinfo must not be run while calls to | |
389 | _dl_add_to_slotinfo are still pending. */ | |
390 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) | |
391 | { | |
392 | struct link_map *imap = new->l_searchlist.r_list[i]; | |
393 | ||
394 | if (imap->l_need_tls_init && imap->l_tls_blocksize > 0) | |
395 | { | |
396 | /* For static TLS we have to allocate the memory here and | |
397 | now, but we can delay updating the DTV. */ | |
398 | imap->l_need_tls_init = 0; | |
399 | #ifdef SHARED | |
400 | /* Update the slot information data for the current | |
401 | generation. */ | |
402 | ||
403 | /* FIXME: This can terminate the process on memory | |
404 | allocation failure. It is not possible to raise | |
405 | exceptions from this context; to fix this bug, | |
406 | _dl_update_slotinfo would have to be split into two | |
407 | operations, similar to resize_scopes and update_scopes | |
408 | above. This is related to bug 16134. */ | |
409 | _dl_update_slotinfo (imap->l_tls_modid, newgen); | |
410 | #endif | |
411 | ||
412 | dl_init_static_tls (imap); | |
413 | assert (imap->l_need_tls_init == 0); | |
414 | } | |
415 | } | |
416 | } | |
417 | ||
418 | /* Mark the objects as NODELETE if required. This is delayed until | |
419 | after dlopen failure is not possible, so that _dl_close can clean | |
420 | up objects if necessary. */ | |
421 | static void | |
422 | activate_nodelete (struct link_map *new) | |
423 | { | |
424 | /* It is necessary to traverse the entire namespace. References to | |
425 | objects in the global scope and unique symbol bindings can force | |
426 | NODELETE status for objects outside the local scope. */ | |
427 | for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL; | |
428 | l = l->l_next) | |
429 | if (l->l_nodelete_pending) | |
430 | { | |
431 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)) | |
432 | _dl_debug_printf ("activating NODELETE for %s [%lu]\n", | |
433 | l->l_name, l->l_ns); | |
434 | ||
435 | /* The flag can already be true at this point, e.g. a signal | |
436 | handler may have triggered lazy binding and set NODELETE | |
437 | status immediately. */ | |
438 | l->l_nodelete_active = true; | |
439 | ||
440 | /* This is just a debugging aid, to indicate that | |
441 | activate_nodelete has run for this map. */ | |
442 | l->l_nodelete_pending = false; | |
443 | } | |
444 | } | |
445 | ||
446 | /* Relocate the object L. *RELOCATION_IN_PROGRESS controls whether | |
447 | the debugger is notified of the start of relocation processing. */ | |
448 | static void | |
449 | _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r, | |
450 | struct link_map *l, int reloc_mode, | |
451 | bool *relocation_in_progress) | |
452 | { | |
453 | if (l->l_real->l_relocated) | |
454 | return; | |
455 | ||
456 | if (!*relocation_in_progress) | |
457 | { | |
458 | /* Notify the debugger that relocations are about to happen. */ | |
459 | LIBC_PROBE (reloc_start, 2, args->nsid, r); | |
460 | *relocation_in_progress = true; | |
461 | } | |
462 | ||
463 | #ifdef SHARED | |
464 | if (__glibc_unlikely (GLRO(dl_profile) != NULL)) | |
465 | { | |
466 | /* If this here is the shared object which we want to profile | |
467 | make sure the profile is started. We can find out whether | |
468 | this is necessary or not by observing the `_dl_profile_map' | |
469 | variable. If it was NULL but is not NULL afterwards we must | |
470 | start the profiling. */ | |
471 | struct link_map *old_profile_map = GL(dl_profile_map); | |
472 | ||
473 | _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1); | |
474 | ||
475 | if (old_profile_map == NULL && GL(dl_profile_map) != NULL) | |
476 | { | |
477 | /* We must prepare the profiling. */ | |
478 | _dl_start_profile (); | |
479 | ||
480 | /* Prevent unloading the object. */ | |
481 | GL(dl_profile_map)->l_nodelete_active = true; | |
482 | } | |
483 | } | |
484 | else | |
485 | #endif | |
486 | _dl_relocate_object (l, l->l_scope, reloc_mode, 0); | |
487 | } | |
488 | ||
489 | static void | |
490 | call_dl_init (void *closure) | |
491 | { | |
492 | struct dl_open_args *args = closure; | |
493 | _dl_init (args->map, args->argc, args->argv, args->env); | |
494 | } | |
495 | ||
496 | static void | |
497 | dl_open_worker_begin (void *a) | |
498 | { | |
499 | struct dl_open_args *args = a; | |
500 | const char *file = args->file; | |
501 | int mode = args->mode; | |
502 | struct link_map *call_map = NULL; | |
503 | ||
504 | /* Determine the caller's map if necessary. This is needed in case | |
505 | we have a DST, when we don't know the namespace ID we have to put | |
506 | the new object in, or when the file name has no path in which | |
507 | case we need to look along the RUNPATH/RPATH of the caller. */ | |
508 | const char *dst = strchr (file, '$'); | |
509 | if (dst != NULL || args->nsid == __LM_ID_CALLER | |
510 | || strchr (file, '/') == NULL) | |
511 | { | |
512 | const void *caller_dlopen = args->caller_dlopen; | |
513 | ||
514 | /* We have to find out from which object the caller is calling. | |
515 | By default we assume this is the main application. */ | |
516 | call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded; | |
517 | ||
518 | struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen); | |
519 | ||
520 | if (l) | |
521 | call_map = l; | |
522 | ||
523 | if (args->nsid == __LM_ID_CALLER) | |
524 | args->nsid = call_map->l_ns; | |
525 | } | |
526 | ||
527 | /* The namespace ID is now known. Keep track of whether libc.so was | |
528 | already loaded, to determine whether it is necessary to call the | |
529 | early initialization routine (or clear libc_map on error). */ | |
530 | args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL; | |
531 | ||
532 | /* Retain the old value, so that it can be restored. */ | |
533 | args->original_global_scope_pending_adds | |
534 | = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds; | |
535 | ||
536 | /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that | |
537 | may not be true if this is a recursive call to dlopen. */ | |
538 | _dl_debug_initialize (0, args->nsid); | |
539 | ||
540 | /* Load the named object. */ | |
541 | struct link_map *new; | |
542 | args->map = new = _dl_map_object (call_map, file, lt_loaded, 0, | |
543 | mode | __RTLD_CALLMAP, args->nsid); | |
544 | ||
545 | /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is | |
546 | set and the object is not already loaded. */ | |
547 | if (new == NULL) | |
548 | { | |
549 | assert (mode & RTLD_NOLOAD); | |
550 | return; | |
551 | } | |
552 | ||
553 | if (__glibc_unlikely (mode & __RTLD_SPROF)) | |
554 | /* This happens only if we load a DSO for 'sprof'. */ | |
555 | return; | |
556 | ||
557 | /* This object is directly loaded. */ | |
558 | ++new->l_direct_opencount; | |
559 | ||
560 | /* It was already open. */ | |
561 | if (__glibc_unlikely (new->l_searchlist.r_list != NULL)) | |
562 | { | |
563 | /* Let the user know about the opencount. */ | |
564 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) | |
565 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n", | |
566 | new->l_name, new->l_ns, new->l_direct_opencount); | |
567 | ||
568 | /* If the user requested the object to be in the global | |
569 | namespace but it is not so far, prepare to add it now. This | |
570 | can raise an exception to do a malloc failure. */ | |
571 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) | |
572 | add_to_global_resize (new); | |
573 | ||
574 | /* Mark the object as not deletable if the RTLD_NODELETE flags | |
575 | was passed. */ | |
576 | if (__glibc_unlikely (mode & RTLD_NODELETE)) | |
577 | { | |
578 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES) | |
579 | && !new->l_nodelete_active) | |
580 | _dl_debug_printf ("marking %s [%lu] as NODELETE\n", | |
581 | new->l_name, new->l_ns); | |
582 | new->l_nodelete_active = true; | |
583 | } | |
584 | ||
585 | /* Finalize the addition to the global scope. */ | |
586 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) | |
587 | add_to_global_update (new); | |
588 | ||
589 | const int r_state __attribute__ ((unused)) | |
590 | = _dl_debug_update (args->nsid)->r_state; | |
591 | assert (r_state == RT_CONSISTENT); | |
592 | ||
593 | return; | |
594 | } | |
595 | ||
596 | /* Schedule NODELETE marking for the directly loaded object if | |
597 | requested. */ | |
598 | if (__glibc_unlikely (mode & RTLD_NODELETE)) | |
599 | new->l_nodelete_pending = true; | |
600 | ||
601 | /* Load that object's dependencies. */ | |
602 | _dl_map_object_deps (new, NULL, 0, 0, | |
603 | mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT)); | |
604 | ||
605 | /* So far, so good. Now check the versions. */ | |
606 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) | |
607 | if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL) | |
608 | { | |
609 | struct link_map *map = new->l_searchlist.r_list[i]->l_real; | |
610 | _dl_check_map_versions (map, 0, 0); | |
611 | #ifndef SHARED | |
612 | /* During static dlopen, check if ld.so has been loaded. | |
613 | Perform partial initialization in this case. This must | |
614 | come after the symbol versioning initialization in | |
615 | _dl_check_map_versions. */ | |
616 | if (map->l_info[DT_SONAME] != NULL | |
617 | && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB]) | |
618 | + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0) | |
619 | __rtld_static_init (map); | |
620 | #endif | |
621 | } | |
622 | ||
623 | #ifdef SHARED | |
624 | /* Auditing checkpoint: we have added all objects. */ | |
625 | _dl_audit_activity_nsid (new->l_ns, LA_ACT_CONSISTENT); | |
626 | #endif | |
627 | ||
628 | /* Notify the debugger all new objects are now ready to go. */ | |
629 | struct r_debug *r = _dl_debug_update (args->nsid); | |
630 | r->r_state = RT_CONSISTENT; | |
631 | _dl_debug_state (); | |
632 | LIBC_PROBE (map_complete, 3, args->nsid, r, new); | |
633 | ||
634 | _dl_open_check (new); | |
635 | ||
636 | /* Print scope information. */ | |
637 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) | |
638 | _dl_show_scope (new, 0); | |
639 | ||
640 | /* Only do lazy relocation if `LD_BIND_NOW' is not set. */ | |
641 | int reloc_mode = mode & __RTLD_AUDIT; | |
642 | if (GLRO(dl_lazy)) | |
643 | reloc_mode |= mode & RTLD_LAZY; | |
644 | ||
645 | /* Objects must be sorted by dependency for the relocation process. | |
646 | This allows IFUNC relocations to work and it also means copy | |
647 | relocation of dependencies are if necessary overwritten. | |
648 | __dl_map_object_deps has already sorted l_initfini for us. */ | |
649 | unsigned int first = UINT_MAX; | |
650 | unsigned int last = 0; | |
651 | unsigned int j = 0; | |
652 | struct link_map *l = new->l_initfini[0]; | |
653 | do | |
654 | { | |
655 | if (! l->l_real->l_relocated) | |
656 | { | |
657 | if (first == UINT_MAX) | |
658 | first = j; | |
659 | last = j + 1; | |
660 | } | |
661 | l = new->l_initfini[++j]; | |
662 | } | |
663 | while (l != NULL); | |
664 | ||
665 | bool relocation_in_progress = false; | |
666 | ||
667 | /* Perform relocation. This can trigger lazy binding in IFUNC | |
668 | resolvers. For NODELETE mappings, these dependencies are not | |
669 | recorded because the flag has not been applied to the newly | |
670 | loaded objects. This means that upon dlopen failure, these | |
671 | NODELETE objects can be unloaded despite existing references to | |
672 | them. However, such relocation dependencies in IFUNC resolvers | |
673 | are undefined anyway, so this is not a problem. */ | |
674 | ||
675 | /* Ensure that libc is relocated first. This helps with the | |
676 | execution of IFUNC resolvers in libc, and matters only to newly | |
677 | created dlmopen namespaces. Do not do this for static dlopen | |
678 | because libc has relocations against ld.so, which may not have | |
679 | been relocated at this point. */ | |
680 | #ifdef SHARED | |
681 | if (GL(dl_ns)[args->nsid].libc_map != NULL) | |
682 | _dl_open_relocate_one_object (args, r, GL(dl_ns)[args->nsid].libc_map, | |
683 | reloc_mode, &relocation_in_progress); | |
684 | #endif | |
685 | ||
686 | for (unsigned int i = last; i-- > first; ) | |
687 | _dl_open_relocate_one_object (args, r, new->l_initfini[i], reloc_mode, | |
688 | &relocation_in_progress); | |
689 | ||
690 | /* This only performs the memory allocations. The actual update of | |
691 | the scopes happens below, after failure is impossible. */ | |
692 | resize_scopes (new); | |
693 | ||
694 | /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data | |
695 | structure. */ | |
696 | bool any_tls = resize_tls_slotinfo (new); | |
697 | ||
698 | /* Perform the necessary allocations for adding new global objects | |
699 | to the global scope below. */ | |
700 | if (mode & RTLD_GLOBAL) | |
701 | add_to_global_resize (new); | |
702 | ||
703 | /* Demarcation point: After this, no recoverable errors are allowed. | |
704 | All memory allocations for new objects must have happened | |
705 | before. */ | |
706 | ||
707 | /* Finalize the NODELETE status first. This comes before | |
708 | update_scopes, so that lazy binding will not see pending NODELETE | |
709 | state for newly loaded objects. There is a compiler barrier in | |
710 | update_scopes which ensures that the changes from | |
711 | activate_nodelete are visible before new objects show up in the | |
712 | local scope. */ | |
713 | activate_nodelete (new); | |
714 | ||
715 | /* Second stage after resize_scopes: Actually perform the scope | |
716 | update. After this, dlsym and lazy binding can bind to new | |
717 | objects. */ | |
718 | update_scopes (new); | |
719 | ||
720 | if (!_dl_find_object_update (new)) | |
721 | _dl_signal_error (ENOMEM, new->l_libname->name, NULL, | |
722 | N_ ("cannot allocate address lookup data")); | |
723 | ||
724 | /* FIXME: It is unclear whether the order here is correct. | |
725 | Shouldn't new objects be made available for binding (and thus | |
726 | execution) only after there TLS data has been set up fully? | |
727 | Fixing bug 16134 will likely make this distinction less | |
728 | important. */ | |
729 | ||
730 | /* Second stage after resize_tls_slotinfo: Update the slotinfo data | |
731 | structures. */ | |
732 | if (any_tls) | |
733 | /* FIXME: This calls _dl_update_slotinfo, which aborts the process | |
734 | on memory allocation failure. See bug 16134. */ | |
735 | update_tls_slotinfo (new); | |
736 | ||
737 | /* Notify the debugger all new objects have been relocated. */ | |
738 | if (relocation_in_progress) | |
739 | LIBC_PROBE (reloc_complete, 3, args->nsid, r, new); | |
740 | ||
741 | /* If libc.so was not there before, attempt to call its early | |
742 | initialization routine. Indicate to the initialization routine | |
743 | whether the libc being initialized is the one in the base | |
744 | namespace. */ | |
745 | if (!args->libc_already_loaded) | |
746 | { | |
747 | /* dlopen cannot be used to load an initial libc by design. */ | |
748 | struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map; | |
749 | _dl_call_libc_early_init (libc_map, false); | |
750 | } | |
751 | ||
752 | args->worker_continue = true; | |
753 | } | |
754 | ||
755 | static void | |
756 | dl_open_worker (void *a) | |
757 | { | |
758 | struct dl_open_args *args = a; | |
759 | ||
760 | args->worker_continue = false; | |
761 | ||
762 | { | |
763 | /* Protects global and module specific TLS state. */ | |
764 | __rtld_lock_lock_recursive (GL(dl_load_tls_lock)); | |
765 | ||
766 | struct dl_exception ex; | |
767 | int err = _dl_catch_exception (&ex, dl_open_worker_begin, args); | |
768 | ||
769 | __rtld_lock_unlock_recursive (GL(dl_load_tls_lock)); | |
770 | ||
771 | if (__glibc_unlikely (ex.errstring != NULL)) | |
772 | /* Reraise the error. */ | |
773 | _dl_signal_exception (err, &ex, NULL); | |
774 | } | |
775 | ||
776 | if (!args->worker_continue) | |
777 | return; | |
778 | ||
779 | int mode = args->mode; | |
780 | struct link_map *new = args->map; | |
781 | ||
782 | /* Run the initializer functions of new objects. Temporarily | |
783 | disable the exception handler, so that lazy binding failures are | |
784 | fatal. */ | |
785 | _dl_catch_exception (NULL, call_dl_init, args); | |
786 | ||
787 | /* Now we can make the new map available in the global scope. */ | |
788 | if (mode & RTLD_GLOBAL) | |
789 | add_to_global_update (new); | |
790 | ||
791 | /* Let the user know about the opencount. */ | |
792 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) | |
793 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n", | |
794 | new->l_name, new->l_ns, new->l_direct_opencount); | |
795 | } | |
796 | ||
797 | void * | |
798 | _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid, | |
799 | int argc, char *argv[], char *env[]) | |
800 | { | |
801 | if ((mode & RTLD_BINDING_MASK) == 0) | |
802 | /* One of the flags must be set. */ | |
803 | _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()")); | |
804 | ||
805 | /* Make sure we are alone. */ | |
806 | __rtld_lock_lock_recursive (GL(dl_load_lock)); | |
807 | ||
808 | if (__glibc_unlikely (nsid == LM_ID_NEWLM)) | |
809 | { | |
810 | /* Find a new namespace. */ | |
811 | for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid) | |
812 | if (GL(dl_ns)[nsid]._ns_loaded == NULL) | |
813 | break; | |
814 | ||
815 | if (__glibc_unlikely (nsid == DL_NNS)) | |
816 | { | |
817 | /* No more namespace available. */ | |
818 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); | |
819 | ||
820 | _dl_signal_error (EINVAL, file, NULL, N_("\ | |
821 | no more namespaces available for dlmopen()")); | |
822 | } | |
823 | else if (nsid == GL(dl_nns)) | |
824 | { | |
825 | __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock); | |
826 | ++GL(dl_nns); | |
827 | } | |
828 | ||
829 | GL(dl_ns)[nsid].libc_map = NULL; | |
830 | _dl_debug_update (nsid)->r_state = RT_CONSISTENT; | |
831 | } | |
832 | /* Never allow loading a DSO in a namespace which is empty. Such | |
833 | direct placements is only causing problems. Also don't allow | |
834 | loading into a namespace used for auditing. */ | |
835 | else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER) | |
836 | && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns)) | |
837 | /* This prevents the [NSID] index expressions from being | |
838 | evaluated, so the compiler won't think that we are | |
839 | accessing an invalid index here in the !SHARED case where | |
840 | DL_NNS is 1 and so any NSID != 0 is invalid. */ | |
841 | || DL_NNS == 1 | |
842 | || GL(dl_ns)[nsid]._ns_nloaded == 0 | |
843 | || GL(dl_ns)[nsid]._ns_loaded->l_auditing)) | |
844 | _dl_signal_error (EINVAL, file, NULL, | |
845 | N_("invalid target namespace in dlmopen()")); | |
846 | ||
847 | struct dl_open_args args; | |
848 | args.file = file; | |
849 | args.mode = mode; | |
850 | args.caller_dlopen = caller_dlopen; | |
851 | args.map = NULL; | |
852 | args.nsid = nsid; | |
853 | /* args.libc_already_loaded is always assigned by dl_open_worker | |
854 | (before any explicit/non-local returns). */ | |
855 | args.argc = argc; | |
856 | args.argv = argv; | |
857 | args.env = env; | |
858 | ||
859 | struct dl_exception exception; | |
860 | int errcode = _dl_catch_exception (&exception, dl_open_worker, &args); | |
861 | ||
862 | #if defined USE_LDCONFIG && !defined MAP_COPY | |
863 | /* We must unmap the cache file. */ | |
864 | _dl_unload_cache (); | |
865 | #endif | |
866 | ||
867 | /* Do this for both the error and success cases. The old value has | |
868 | only been determined if the namespace ID was assigned (i.e., it | |
869 | is not __LM_ID_CALLER). In the success case, we actually may | |
870 | have consumed more pending adds than planned (because the local | |
871 | scopes overlap in case of a recursive dlopen, the inner dlopen | |
872 | doing some of the globalization work of the outer dlopen), so the | |
873 | old pending adds value is larger than absolutely necessary. | |
874 | Since it is just a conservative upper bound, this is harmless. | |
875 | The top-level dlopen call will restore the field to zero. */ | |
876 | if (args.nsid >= 0) | |
877 | GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds | |
878 | = args.original_global_scope_pending_adds; | |
879 | ||
880 | /* See if an error occurred during loading. */ | |
881 | if (__glibc_unlikely (exception.errstring != NULL)) | |
882 | { | |
883 | /* Avoid keeping around a dangling reference to the libc.so link | |
884 | map in case it has been cached in libc_map. */ | |
885 | if (!args.libc_already_loaded) | |
886 | GL(dl_ns)[args.nsid].libc_map = NULL; | |
887 | ||
888 | /* Remove the object from memory. It may be in an inconsistent | |
889 | state if relocation failed, for example. */ | |
890 | if (args.map) | |
891 | { | |
892 | _dl_close_worker (args.map, true); | |
893 | ||
894 | /* All l_nodelete_pending objects should have been deleted | |
895 | at this point, which is why it is not necessary to reset | |
896 | the flag here. */ | |
897 | } | |
898 | ||
899 | /* Release the lock. */ | |
900 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); | |
901 | ||
902 | /* Reraise the error. */ | |
903 | _dl_signal_exception (errcode, &exception, NULL); | |
904 | } | |
905 | ||
906 | const int r_state __attribute__ ((unused)) | |
907 | = _dl_debug_update (args.nsid)->r_state; | |
908 | assert (r_state == RT_CONSISTENT); | |
909 | ||
910 | /* Release the lock. */ | |
911 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); | |
912 | ||
913 | return args.map; | |
914 | } | |
915 | ||
916 | ||
917 | void | |
918 | _dl_show_scope (struct link_map *l, int from) | |
919 | { | |
920 | _dl_debug_printf ("object=%s [%lu]\n", | |
921 | DSO_FILENAME (l->l_name), l->l_ns); | |
922 | if (l->l_scope != NULL) | |
923 | for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt) | |
924 | { | |
925 | _dl_debug_printf (" scope %u:", scope_cnt); | |
926 | ||
927 | for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt) | |
928 | if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name) | |
929 | _dl_debug_printf_c (" %s", | |
930 | l->l_scope[scope_cnt]->r_list[cnt]->l_name); | |
931 | else | |
932 | _dl_debug_printf_c (" %s", RTLD_PROGNAME); | |
933 | ||
934 | _dl_debug_printf_c ("\n"); | |
935 | } | |
936 | else | |
937 | _dl_debug_printf (" no scope\n"); | |
938 | _dl_debug_printf ("\n"); | |
939 | } |