]> sourceware.org Git - glibc.git/blame - elf/dl-open.c
x86: Add `Avoid_STOSB` tunable to allow NT memset without ERMS
[glibc.git] / elf / dl-open.c
CommitLineData
266180eb 1/* Load a shared object at runtime, relocate it, and run its initializer.
dff8da6b 2 Copyright (C) 1996-2024 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
266180eb 18
dc5efe83 19#include <assert.h>
266180eb 20#include <dlfcn.h>
ba79d61b 21#include <errno.h>
06535ae9 22#include <libintl.h>
b209e34a 23#include <stdio.h>
a853022c 24#include <stdlib.h>
7a68c94a 25#include <string.h>
06535ae9 26#include <unistd.h>
08cac4ac 27#include <sys/mman.h> /* Check whether MAP_COPY is defined. */
dc5efe83 28#include <sys/param.h>
ec999b8e 29#include <libc-lock.h>
a42195db 30#include <ldsodefs.h>
609cf614 31#include <sysdep-cancel.h>
df94b641 32#include <tls.h>
815e6fa3 33#include <stap-probe.h>
5908bf46 34#include <atomic.h>
54e4b8f2 35#include <libc-internal.h>
a509eb11 36#include <array_length.h>
ec935dea 37#include <libc-early-init.h>
78b31cc8 38#include <gnu/lib-names.h>
5d28a896 39#include <dl-find_object.h>
ba79d61b 40
dc5efe83 41#include <dl-dst.h>
f753fa7d 42#include <dl-prop.h>
dc5efe83 43
39778c6c 44
05d723ab 45/* We must be careful not to leave us in an inconsistent state. Thus we
7a68c94a
UD
46 catch any error and re-raise it after cleaning up. */
47
48struct dl_open_args
266180eb 49{
7a68c94a
UD
50 const char *file;
51 int mode;
f213ef02
UD
52 /* This is the caller of the dlopen() function. */
53 const void *caller_dlopen;
7a68c94a 54 struct link_map *map;
c0f62c56
UD
55 /* Namespace ID. */
56 Lmid_t nsid;
440b7f86
FW
57
58 /* Original value of _ns_global_scope_pending_adds. Set by
59 dl_open_worker. Only valid if nsid is a real namespace
60 (non-negative). */
61 unsigned int original_global_scope_pending_adds;
62
ec935dea
FW
63 /* Set to true by dl_open_worker if libc.so was already loaded into
64 the namespace at the time dl_open_worker was called. This is
65 used to determine whether libc.so early initialization has
66 already been done before, and whether to roll back the cached
67 libc_map value in the namespace in case of a dlopen failure. */
68 bool libc_already_loaded;
69
83b53232
SN
70 /* Set to true if the end of dl_open_worker_begin was reached. */
71 bool worker_continue;
72
9dcafc55
UD
73 /* Original parameters to the program and the current environment. */
74 int argc;
75 char **argv;
76 char **env;
7a68c94a
UD
77};
78
440b7f86
FW
79/* Called in case the global scope cannot be extended. */
80static void __attribute__ ((noreturn))
81add_to_global_resize_failure (struct link_map *new)
82{
83 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
84 N_ ("cannot extend global scope"));
85}
d785c366 86
440b7f86
FW
87/* Grow the global scope array for the namespace, so that all the new
88 global objects can be added later in add_to_global_update, without
89 risk of memory allocation failure. add_to_global_resize raises
90 exceptions for memory allocation errors. */
91static void
92add_to_global_resize (struct link_map *new)
d785c366 93{
440b7f86 94 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
d785c366
UD
95
96 /* Count the objects we have to put in the global scope. */
440b7f86
FW
97 unsigned int to_add = 0;
98 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
d785c366
UD
99 if (new->l_searchlist.r_list[cnt]->l_global == 0)
100 ++to_add;
101
102 /* The symbols of the new objects and its dependencies are to be
103 introduced into the global scope that will be used to resolve
104 references from other dynamically-loaded objects.
105
106 The global scope is the searchlist in the main link map. We
107 extend this list if necessary. There is one problem though:
108 since this structure was allocated very early (before the libc
109 is loaded) the memory it uses is allocated by the malloc()-stub
110 in the ld.so. When we come here these functions are not used
111 anymore. Instead the malloc() implementation of the libc is
112 used. But this means the block from the main map cannot be used
113 in an realloc() call. Therefore we allocate a completely new
114 array the first time we have to add something to the locale scope. */
115
440b7f86
FW
116 if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
117 &ns->_ns_global_scope_pending_adds))
118 add_to_global_resize_failure (new);
119
120 unsigned int new_size = 0; /* 0 means no new allocation. */
121 void *old_global = NULL; /* Old allocation if free-able. */
122
123 /* Minimum required element count for resizing. Adjusted below for
124 an exponential resizing policy. */
125 size_t required_new_size;
126 if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
127 ns->_ns_global_scope_pending_adds,
128 &required_new_size))
129 add_to_global_resize_failure (new);
130
d65ef3dd 131 if (ns->_ns_global_scope_alloc == 0)
d785c366 132 {
440b7f86
FW
133 if (__builtin_add_overflow (required_new_size, 8, &new_size))
134 add_to_global_resize_failure (new);
135 }
136 else if (required_new_size > ns->_ns_global_scope_alloc)
137 {
138 if (__builtin_mul_overflow (required_new_size, 2, &new_size))
139 add_to_global_resize_failure (new);
d785c366 140
440b7f86
FW
141 /* The old array was allocated with our malloc, not the minimal
142 malloc. */
143 old_global = ns->_ns_main_searchlist->r_list;
d785c366 144 }
440b7f86
FW
145
146 if (new_size > 0)
d785c366 147 {
440b7f86
FW
148 size_t allocation_size;
149 if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
150 &allocation_size))
151 add_to_global_resize_failure (new);
152 struct link_map **new_global = malloc (allocation_size);
d785c366 153 if (new_global == NULL)
440b7f86 154 add_to_global_resize_failure (new);
d785c366 155
440b7f86
FW
156 /* Copy over the old entries. */
157 memcpy (new_global, ns->_ns_main_searchlist->r_list,
158 ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
df94b641 159
440b7f86 160 ns->_ns_global_scope_alloc = new_size;
d65ef3dd 161 ns->_ns_main_searchlist->r_list = new_global;
df94b641
UD
162
163 if (!RTLD_SINGLE_THREAD_P)
164 THREAD_GSCOPE_WAIT ();
165
166 free (old_global);
d785c366 167 }
440b7f86
FW
168}
169
170/* Actually add the new global objects to the global scope. Must be
171 called after add_to_global_resize. This function cannot fail. */
172static void
173add_to_global_update (struct link_map *new)
174{
175 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
d785c366
UD
176
177 /* Now add the new entries. */
9b0d1c02 178 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
440b7f86 179 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
d785c366
UD
180 {
181 struct link_map *map = new->l_searchlist.r_list[cnt];
182
183 if (map->l_global == 0)
184 {
185 map->l_global = 1;
440b7f86
FW
186
187 /* The array has been resized by add_to_global_resize. */
188 assert (new_nlist < ns->_ns_global_scope_alloc);
189
9b0d1c02 190 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
49c74ba9
UD
191
192 /* We modify the global scope. Report this. */
a1ffb40e 193 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
49c74ba9
UD
194 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
195 map->l_name, map->l_ns);
d785c366
UD
196 }
197 }
440b7f86
FW
198
199 /* Some of the pending adds have been performed by the loop above.
200 Adjust the counter accordingly. */
201 unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
202 assert (added <= ns->_ns_global_scope_pending_adds);
203 ns->_ns_global_scope_pending_adds -= added;
204
d65ef3dd 205 atomic_write_barrier ();
9b0d1c02 206 ns->_ns_main_searchlist->r_nlist = new_nlist;
d785c366
UD
207}
208
382466e0 209/* Search link maps in all namespaces for the DSO that contains the object at
be179c8a
SP
210 address ADDR. Returns the pointer to the link map of the matching DSO, or
211 NULL if a match is not found. */
212struct link_map *
be179c8a
SP
213_dl_find_dso_for_object (const ElfW(Addr) addr)
214{
215 struct link_map *l;
216
217 /* Find the highest-addressed object that ADDR is not below. */
218 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
219 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
220 if (addr >= l->l_map_start && addr < l->l_map_end
221 && (l->l_contiguous
222 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
223 {
224 assert (ns == l->l_ns);
225 return l;
226 }
227 return NULL;
228}
229rtld_hidden_def (_dl_find_dso_for_object);
230
a509eb11
FW
231/* Return true if NEW is found in the scope for MAP. */
232static size_t
233scope_has_map (struct link_map *map, struct link_map *new)
234{
235 size_t cnt;
236 for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
237 if (map->l_scope[cnt] == &new->l_searchlist)
238 return true;
239 return false;
240}
241
242/* Return the length of the scope for MAP. */
243static size_t
244scope_size (struct link_map *map)
245{
246 size_t cnt;
247 for (cnt = 0; map->l_scope[cnt] != NULL; )
248 ++cnt;
249 return cnt;
250}
251
252/* Resize the scopes of depended-upon objects, so that the new object
253 can be added later without further allocation of memory. This
254 function can raise an exceptions due to malloc failure. */
255static void
256resize_scopes (struct link_map *new)
257{
258 /* If the file is not loaded now as a dependency, add the search
259 list of the newly loaded object to the scope. */
260 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
261 {
262 struct link_map *imap = new->l_searchlist.r_list[i];
263
264 /* If the initializer has been called already, the object has
265 not been loaded here and now. */
266 if (imap->l_init_called && imap->l_type == lt_loaded)
267 {
268 if (scope_has_map (imap, new))
269 /* Avoid duplicates. */
270 continue;
271
272 size_t cnt = scope_size (imap);
273 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
274 {
275 /* The l_scope array is too small. Allocate a new one
276 dynamically. */
277 size_t new_size;
278 struct r_scope_elem **newp;
279
280 if (imap->l_scope != imap->l_scope_mem
281 && imap->l_scope_max < array_length (imap->l_scope_mem))
282 {
283 /* If the current l_scope memory is not pointing to
284 the static memory in the structure, but the
285 static memory in the structure is large enough to
286 use for cnt + 1 scope entries, then switch to
287 using the static memory. */
288 new_size = array_length (imap->l_scope_mem);
289 newp = imap->l_scope_mem;
290 }
291 else
292 {
293 new_size = imap->l_scope_max * 2;
294 newp = (struct r_scope_elem **)
295 malloc (new_size * sizeof (struct r_scope_elem *));
296 if (newp == NULL)
297 _dl_signal_error (ENOMEM, "dlopen", NULL,
298 N_("cannot create scope list"));
299 }
300
301 /* Copy the array and the terminating NULL. */
302 memcpy (newp, imap->l_scope,
303 (cnt + 1) * sizeof (imap->l_scope[0]));
304 struct r_scope_elem **old = imap->l_scope;
305
306 imap->l_scope = newp;
307
308 if (old != imap->l_scope_mem)
309 _dl_scope_free (old);
310
311 imap->l_scope_max = new_size;
312 }
313 }
314 }
315}
316
317/* Second stage of resize_scopes: Add NEW to the scopes. Also print
318 debugging information about scopes if requested.
319
320 This function cannot raise an exception because all required memory
321 has been allocated by a previous call to resize_scopes. */
322static void
323update_scopes (struct link_map *new)
324{
325 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
326 {
327 struct link_map *imap = new->l_searchlist.r_list[i];
328 int from_scope = 0;
329
330 if (imap->l_init_called && imap->l_type == lt_loaded)
331 {
332 if (scope_has_map (imap, new))
333 /* Avoid duplicates. */
334 continue;
335
336 size_t cnt = scope_size (imap);
337 /* Assert that resize_scopes has sufficiently enlarged the
338 array. */
339 assert (cnt + 1 < imap->l_scope_max);
340
341 /* First terminate the extended list. Otherwise a thread
342 might use the new last element and then use the garbage
343 at offset IDX+1. */
344 imap->l_scope[cnt + 1] = NULL;
345 atomic_write_barrier ();
346 imap->l_scope[cnt] = &new->l_searchlist;
347
348 from_scope = cnt;
349 }
350
351 /* Print scope information. */
352 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
353 _dl_show_scope (imap, from_scope);
354 }
355}
356
357/* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
358 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
359 exception. The return value is true if any of the new objects use
360 TLS. */
361static bool
362resize_tls_slotinfo (struct link_map *new)
363{
364 bool any_tls = false;
365 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
5097cd34
FW
366 if (_dl_add_to_slotinfo (new->l_searchlist.r_list[i], false))
367 any_tls = true;
a509eb11
FW
368 return any_tls;
369}
370
371/* Second stage of TLS update, after resize_tls_slotinfo. This
372 function does not raise any exception. It should only be called if
373 resize_tls_slotinfo returned true. */
374static void
375update_tls_slotinfo (struct link_map *new)
376{
a509eb11 377 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
5097cd34 378 _dl_add_to_slotinfo (new->l_searchlist.r_list[i], true);
a509eb11 379
f4f8f4d4
SN
380 size_t newgen = GL(dl_tls_generation) + 1;
381 if (__glibc_unlikely (newgen == 0))
a509eb11
FW
382 _dl_fatal_printf (N_("\
383TLS generation counter wrapped! Please report this."));
f4f8f4d4 384 /* Can be read concurrently. */
d2123d68 385 atomic_store_release (&GL(dl_tls_generation), newgen);
a509eb11
FW
386
387 /* We need a second pass for static tls data, because
388 _dl_update_slotinfo must not be run while calls to
389 _dl_add_to_slotinfo are still pending. */
5097cd34 390 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
a509eb11
FW
391 {
392 struct link_map *imap = new->l_searchlist.r_list[i];
393
5097cd34 394 if (imap->l_need_tls_init && imap->l_tls_blocksize > 0)
a509eb11
FW
395 {
396 /* For static TLS we have to allocate the memory here and
397 now, but we can delay updating the DTV. */
398 imap->l_need_tls_init = 0;
399#ifdef SHARED
d2123d68
SN
400 /* Update the slot information data for the current
401 generation. */
a509eb11
FW
402
403 /* FIXME: This can terminate the process on memory
404 allocation failure. It is not possible to raise
405 exceptions from this context; to fix this bug,
406 _dl_update_slotinfo would have to be split into two
407 operations, similar to resize_scopes and update_scopes
408 above. This is related to bug 16134. */
d2123d68 409 _dl_update_slotinfo (imap->l_tls_modid, newgen);
a509eb11
FW
410#endif
411
7cbf1c84 412 dl_init_static_tls (imap);
a509eb11
FW
413 assert (imap->l_need_tls_init == 0);
414 }
415 }
416}
417
f63b7381
FW
418/* Mark the objects as NODELETE if required. This is delayed until
419 after dlopen failure is not possible, so that _dl_close can clean
420 up objects if necessary. */
421static void
365624e2 422activate_nodelete (struct link_map *new)
f63b7381 423{
365624e2
FW
424 /* It is necessary to traverse the entire namespace. References to
425 objects in the global scope and unique symbol bindings can force
426 NODELETE status for objects outside the local scope. */
427 for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
428 l = l->l_next)
f8ed116a 429 if (l->l_nodelete_pending)
365624e2
FW
430 {
431 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
432 _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
433 l->l_name, l->l_ns);
434
f7649d57
FW
435 /* The flag can already be true at this point, e.g. a signal
436 handler may have triggered lazy binding and set NODELETE
437 status immediately. */
f8ed116a
FW
438 l->l_nodelete_active = true;
439
440 /* This is just a debugging aid, to indicate that
441 activate_nodelete has run for this map. */
442 l->l_nodelete_pending = false;
365624e2 443 }
f63b7381
FW
444}
445
a74c2e1c
FW
446/* Relocate the object L. *RELOCATION_IN_PROGRESS controls whether
447 the debugger is notified of the start of relocation processing. */
448static void
449_dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
450 struct link_map *l, int reloc_mode,
451 bool *relocation_in_progress)
452{
453 if (l->l_real->l_relocated)
454 return;
455
456 if (!*relocation_in_progress)
457 {
458 /* Notify the debugger that relocations are about to happen. */
459 LIBC_PROBE (reloc_start, 2, args->nsid, r);
460 *relocation_in_progress = true;
461 }
462
463#ifdef SHARED
464 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
465 {
466 /* If this here is the shared object which we want to profile
467 make sure the profile is started. We can find out whether
468 this is necessary or not by observing the `_dl_profile_map'
469 variable. If it was NULL but is not NULL afterwards we must
470 start the profiling. */
471 struct link_map *old_profile_map = GL(dl_profile_map);
472
473 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
474
475 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
476 {
477 /* We must prepare the profiling. */
478 _dl_start_profile ();
479
480 /* Prevent unloading the object. */
481 GL(dl_profile_map)->l_nodelete_active = true;
482 }
483 }
484 else
485#endif
486 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
487}
488
79e0cd7b
FW
489static void
490call_dl_init (void *closure)
491{
2d14f72c
FW
492 struct dl_open_args *args = closure;
493 _dl_init (args->map, args->argc, args->argv, args->env);
79e0cd7b
FW
494}
495
7a68c94a 496static void
83b53232 497dl_open_worker_begin (void *a)
7a68c94a
UD
498{
499 struct dl_open_args *args = a;
500 const char *file = args->file;
501 int mode = args->mode;
c14e9135 502 struct link_map *call_map = NULL;
dc5efe83 503
c14e9135 504 /* Determine the caller's map if necessary. This is needed in case
c0f62c56
UD
505 we have a DST, when we don't know the namespace ID we have to put
506 the new object in, or when the file name has no path in which
507 case we need to look along the RUNPATH/RPATH of the caller. */
c14e9135 508 const char *dst = strchr (file, '$');
c0f62c56
UD
509 if (dst != NULL || args->nsid == __LM_ID_CALLER
510 || strchr (file, '/') == NULL)
dc5efe83 511 {
f213ef02 512 const void *caller_dlopen = args->caller_dlopen;
06535ae9 513
c0f62c56
UD
514 /* We have to find out from which object the caller is calling.
515 By default we assume this is the main application. */
516 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
dc5efe83 517
be179c8a
SP
518 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
519
520 if (l)
328c44c3 521 call_map = l;
be179c8a 522
c0f62c56 523 if (args->nsid == __LM_ID_CALLER)
f91f1c0f 524 args->nsid = call_map->l_ns;
c14e9135
UD
525 }
526
ec935dea
FW
527 /* The namespace ID is now known. Keep track of whether libc.so was
528 already loaded, to determine whether it is necessary to call the
529 early initialization routine (or clear libc_map on error). */
530 args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL;
531
440b7f86
FW
532 /* Retain the old value, so that it can be restored. */
533 args->original_global_scope_pending_adds
534 = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
535
ccdb048d
CD
536 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
537 may not be true if this is a recursive call to dlopen. */
538 _dl_debug_initialize (0, args->nsid);
29f97654 539
266180eb 540 /* Load the named object. */
22c83193 541 struct link_map *new;
8e9f92e9 542 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
9dcafc55 543 mode | __RTLD_CALLMAP, args->nsid);
bf8b3e74
UD
544
545 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
546 set and the object is not already loaded. */
547 if (new == NULL)
548 {
549 assert (mode & RTLD_NOLOAD);
550 return;
551 }
552
a1ffb40e 553 if (__glibc_unlikely (mode & __RTLD_SPROF))
f7649d57
FW
554 /* This happens only if we load a DSO for 'sprof'. */
555 return;
9d0881aa 556
c0f62c56
UD
557 /* This object is directly loaded. */
558 ++new->l_direct_opencount;
559
42c4f32a 560 /* It was already open. */
a1ffb40e 561 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
b35e21f4
UD
562 {
563 /* Let the user know about the opencount. */
a1ffb40e 564 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
20fe49b9 565 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
a2f7570b 566 new->l_name, new->l_ns, new->l_direct_opencount);
d785c366 567
f63b7381
FW
568 /* If the user requested the object to be in the global
569 namespace but it is not so far, prepare to add it now. This
570 can raise an exception to do a malloc failure. */
d785c366 571 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
f63b7381
FW
572 add_to_global_resize (new);
573
574 /* Mark the object as not deletable if the RTLD_NODELETE flags
575 was passed. */
576 if (__glibc_unlikely (mode & RTLD_NODELETE))
440b7f86 577 {
f63b7381 578 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
f8ed116a 579 && !new->l_nodelete_active)
f63b7381
FW
580 _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
581 new->l_name, new->l_ns);
f8ed116a 582 new->l_nodelete_active = true;
440b7f86 583 }
d785c366 584
f63b7381
FW
585 /* Finalize the addition to the global scope. */
586 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
587 add_to_global_update (new);
588
1b5e65ef
PP
589 const int r_state __attribute__ ((unused))
590 = _dl_debug_update (args->nsid)->r_state;
591 assert (r_state == RT_CONSISTENT);
9dcafc55 592
b35e21f4
UD
593 return;
594 }
266180eb 595
f63b7381
FW
596 /* Schedule NODELETE marking for the directly loaded object if
597 requested. */
598 if (__glibc_unlikely (mode & RTLD_NODELETE))
f8ed116a 599 new->l_nodelete_pending = true;
f63b7381 600
266180eb 601 /* Load that object's dependencies. */
9dcafc55 602 _dl_map_object_deps (new, NULL, 0, 0,
3e539cb4 603 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
266180eb 604
c84142e8 605 /* So far, so good. Now check the versions. */
22c83193 606 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
c0f62c56 607 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
78b31cc8
FW
608 {
609 struct link_map *map = new->l_searchlist.r_list[i]->l_real;
610 _dl_check_map_versions (map, 0, 0);
611#ifndef SHARED
612 /* During static dlopen, check if ld.so has been loaded.
613 Perform partial initialization in this case. This must
614 come after the symbol versioning initialization in
615 _dl_check_map_versions. */
616 if (map->l_info[DT_SONAME] != NULL
617 && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
618 + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
619 __rtld_static_init (map);
620#endif
621 }
ba79d61b 622
9dcafc55
UD
623#ifdef SHARED
624 /* Auditing checkpoint: we have added all objects. */
3dac3959 625 _dl_audit_activity_nsid (new->l_ns, LA_ACT_CONSISTENT);
9dcafc55
UD
626#endif
627
628 /* Notify the debugger all new objects are now ready to go. */
a93d9e03 629 struct r_debug *r = _dl_debug_update (args->nsid);
9dcafc55
UD
630 r->r_state = RT_CONSISTENT;
631 _dl_debug_state ();
815e6fa3 632 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
9dcafc55 633
e37c2cf2
FW
634 _dl_open_check (new);
635
174baab3 636 /* Print scope information. */
a1ffb40e 637 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
174baab3
UD
638 _dl_show_scope (new, 0);
639
12b5b6b7 640 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
2ca285b0
UD
641 int reloc_mode = mode & __RTLD_AUDIT;
642 if (GLRO(dl_lazy))
643 reloc_mode |= mode & RTLD_LAZY;
12b5b6b7 644
71bcfa62
DK
645 /* Objects must be sorted by dependency for the relocation process.
646 This allows IFUNC relocations to work and it also means copy
647 relocation of dependencies are if necessary overwritten.
648 __dl_map_object_deps has already sorted l_initfini for us. */
0a8ce6a0
DK
649 unsigned int first = UINT_MAX;
650 unsigned int last = 0;
eb447b7b
DK
651 unsigned int j = 0;
652 struct link_map *l = new->l_initfini[0];
6ee65ed6
UD
653 do
654 {
655 if (! l->l_real->l_relocated)
0a8ce6a0
DK
656 {
657 if (first == UINT_MAX)
658 first = j;
659 last = j + 1;
660 }
eb447b7b 661 l = new->l_initfini[++j];
6ee65ed6
UD
662 }
663 while (l != NULL);
ba79d61b 664
a74c2e1c 665 bool relocation_in_progress = false;
815e6fa3 666
f63b7381
FW
667 /* Perform relocation. This can trigger lazy binding in IFUNC
668 resolvers. For NODELETE mappings, these dependencies are not
669 recorded because the flag has not been applied to the newly
670 loaded objects. This means that upon dlopen failure, these
671 NODELETE objects can be unloaded despite existing references to
672 them. However, such relocation dependencies in IFUNC resolvers
673 are undefined anyway, so this is not a problem. */
674
78ca44da
FW
675 /* Ensure that libc is relocated first. This helps with the
676 execution of IFUNC resolvers in libc, and matters only to newly
677 created dlmopen namespaces. Do not do this for static dlopen
678 because libc has relocations against ld.so, which may not have
679 been relocated at this point. */
680#ifdef SHARED
681 if (GL(dl_ns)[args->nsid].libc_map != NULL)
682 _dl_open_relocate_one_object (args, r, GL(dl_ns)[args->nsid].libc_map,
683 reloc_mode, &relocation_in_progress);
684#endif
685
0a8ce6a0 686 for (unsigned int i = last; i-- > first; )
a74c2e1c
FW
687 _dl_open_relocate_one_object (args, r, new->l_initfini[i], reloc_mode,
688 &relocation_in_progress);
ba79d61b 689
a509eb11
FW
690 /* This only performs the memory allocations. The actual update of
691 the scopes happens below, after failure is impossible. */
692 resize_scopes (new);
20fe49b9 693
a509eb11
FW
694 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
695 structure. */
696 bool any_tls = resize_tls_slotinfo (new);
c0a777e8 697
a509eb11
FW
698 /* Perform the necessary allocations for adding new global objects
699 to the global scope below. */
700 if (mode & RTLD_GLOBAL)
701 add_to_global_resize (new);
73d61e4f 702
a509eb11
FW
703 /* Demarcation point: After this, no recoverable errors are allowed.
704 All memory allocations for new objects must have happened
705 before. */
706
f7649d57
FW
707 /* Finalize the NODELETE status first. This comes before
708 update_scopes, so that lazy binding will not see pending NODELETE
709 state for newly loaded objects. There is a compiler barrier in
710 update_scopes which ensures that the changes from
711 activate_nodelete are visible before new objects show up in the
712 local scope. */
365624e2 713 activate_nodelete (new);
f63b7381 714
a509eb11
FW
715 /* Second stage after resize_scopes: Actually perform the scope
716 update. After this, dlsym and lazy binding can bind to new
717 objects. */
718 update_scopes (new);
719
5d28a896
FW
720 if (!_dl_find_object_update (new))
721 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
722 N_ ("cannot allocate address lookup data"));
723
a509eb11
FW
724 /* FIXME: It is unclear whether the order here is correct.
725 Shouldn't new objects be made available for binding (and thus
726 execution) only after there TLS data has been set up fully?
727 Fixing bug 16134 will likely make this distinction less
728 important. */
729
730 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
731 structures. */
732 if (any_tls)
733 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
734 on memory allocation failure. See bug 16134. */
735 update_tls_slotinfo (new);
d26dfc60 736
815e6fa3
GB
737 /* Notify the debugger all new objects have been relocated. */
738 if (relocation_in_progress)
739 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
740
ec935dea 741 /* If libc.so was not there before, attempt to call its early
03e187a4
FW
742 initialization routine. Indicate to the initialization routine
743 whether the libc being initialized is the one in the base
744 namespace. */
ec935dea 745 if (!args->libc_already_loaded)
03e187a4 746 {
3908fa93 747 /* dlopen cannot be used to load an initial libc by design. */
89baed0b
FW
748 struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
749 _dl_call_libc_early_init (libc_map, false);
03e187a4 750 }
ec935dea 751
83b53232
SN
752 args->worker_continue = true;
753}
754
755static void
756dl_open_worker (void *a)
757{
758 struct dl_open_args *args = a;
759
760 args->worker_continue = false;
761
762 {
763 /* Protects global and module specific TLS state. */
764 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
765
766 struct dl_exception ex;
767 int err = _dl_catch_exception (&ex, dl_open_worker_begin, args);
768
769 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
770
771 if (__glibc_unlikely (ex.errstring != NULL))
772 /* Reraise the error. */
773 _dl_signal_exception (err, &ex, NULL);
774 }
775
776 if (!args->worker_continue)
777 return;
778
779 int mode = args->mode;
780 struct link_map *new = args->map;
781
79e0cd7b
FW
782 /* Run the initializer functions of new objects. Temporarily
783 disable the exception handler, so that lazy binding failures are
784 fatal. */
2d14f72c 785 _dl_catch_exception (NULL, call_dl_init, args);
266180eb 786
50b65db1 787 /* Now we can make the new map available in the global scope. */
d9cb1a7d 788 if (mode & RTLD_GLOBAL)
440b7f86 789 add_to_global_update (new);
be935610 790
b35e21f4 791 /* Let the user know about the opencount. */
a1ffb40e 792 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
20fe49b9
UD
793 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
794 new->l_name, new->l_ns, new->l_direct_opencount);
7a68c94a
UD
795}
796
94e365c6 797void *
9dcafc55
UD
798_dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
799 int argc, char *argv[], char *env[])
7a68c94a 800{
e254df14
UD
801 if ((mode & RTLD_BINDING_MASK) == 0)
802 /* One of the flags must be set. */
9dcafc55 803 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
e254df14 804
7a68c94a 805 /* Make sure we are alone. */
d3c9f895 806 __rtld_lock_lock_recursive (GL(dl_load_lock));
7a68c94a 807
a1ffb40e 808 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
c0f62c56
UD
809 {
810 /* Find a new namespace. */
5615eaf2 811 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
c0f62c56
UD
812 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
813 break;
814
a1ffb40e 815 if (__glibc_unlikely (nsid == DL_NNS))
c0f62c56
UD
816 {
817 /* No more namespace available. */
818 __rtld_lock_unlock_recursive (GL(dl_load_lock));
819
9dcafc55 820 _dl_signal_error (EINVAL, file, NULL, N_("\
c0f62c56
UD
821no more namespaces available for dlmopen()"));
822 }
2c422573
FW
823 else if (nsid == GL(dl_nns))
824 {
825 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
826 ++GL(dl_nns);
827 }
d0e357ff 828
2c422573 829 GL(dl_ns)[nsid].libc_map = NULL;
a93d9e03 830 _dl_debug_update (nsid)->r_state = RT_CONSISTENT;
c0f62c56 831 }
32738a22 832 /* Never allow loading a DSO in a namespace which is empty. Such
9dcafc55
UD
833 direct placements is only causing problems. Also don't allow
834 loading into a namespace used for auditing. */
328c44c3
RM
835 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
836 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
837 /* This prevents the [NSID] index expressions from being
838 evaluated, so the compiler won't think that we are
839 accessing an invalid index here in the !SHARED case where
840 DL_NNS is 1 and so any NSID != 0 is invalid. */
841 || DL_NNS == 1
842 || GL(dl_ns)[nsid]._ns_nloaded == 0
9dcafc55
UD
843 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
844 _dl_signal_error (EINVAL, file, NULL,
845 N_("invalid target namespace in dlmopen()"));
c0f62c56 846
74780cf6 847 struct dl_open_args args;
7a68c94a
UD
848 args.file = file;
849 args.mode = mode;
f213ef02 850 args.caller_dlopen = caller_dlopen;
7a68c94a 851 args.map = NULL;
c0f62c56 852 args.nsid = nsid;
ec935dea
FW
853 /* args.libc_already_loaded is always assigned by dl_open_worker
854 (before any explicit/non-local returns). */
9dcafc55
UD
855 args.argc = argc;
856 args.argv = argv;
857 args.env = env;
74780cf6 858
2449ae7b
FW
859 struct dl_exception exception;
860 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
39778c6c 861
f57f8055
RM
862#if defined USE_LDCONFIG && !defined MAP_COPY
863 /* We must unmap the cache file. */
9dcafc55 864 _dl_unload_cache ();
08cac4ac
UD
865#endif
866
440b7f86
FW
867 /* Do this for both the error and success cases. The old value has
868 only been determined if the namespace ID was assigned (i.e., it
869 is not __LM_ID_CALLER). In the success case, we actually may
870 have consumed more pending adds than planned (because the local
871 scopes overlap in case of a recursive dlopen, the inner dlopen
872 doing some of the globalization work of the outer dlopen), so the
873 old pending adds value is larger than absolutely necessary.
874 Since it is just a conservative upper bound, this is harmless.
875 The top-level dlopen call will restore the field to zero. */
876 if (args.nsid >= 0)
877 GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
878 = args.original_global_scope_pending_adds;
879
b2369ca3 880 /* See if an error occurred during loading. */
2449ae7b 881 if (__glibc_unlikely (exception.errstring != NULL))
7a68c94a 882 {
ec935dea
FW
883 /* Avoid keeping around a dangling reference to the libc.so link
884 map in case it has been cached in libc_map. */
885 if (!args.libc_already_loaded)
1e1ecea6 886 GL(dl_ns)[args.nsid].libc_map = NULL;
ec935dea 887
7a68c94a
UD
888 /* Remove the object from memory. It may be in an inconsistent
889 state if relocation failed, for example. */
890 if (args.map)
c77a4478 891 {
02d5e5d9 892 _dl_close_worker (args.map, true);
f63b7381 893
f8ed116a
FW
894 /* All l_nodelete_pending objects should have been deleted
895 at this point, which is why it is not necessary to reset
896 the flag here. */
c77a4478 897 }
7a68c94a 898
b2369ca3
UD
899 /* Release the lock. */
900 __rtld_lock_unlock_recursive (GL(dl_load_lock));
901
7a68c94a 902 /* Reraise the error. */
2449ae7b 903 _dl_signal_exception (errcode, &exception, NULL);
7a68c94a
UD
904 }
905
1b5e65ef
PP
906 const int r_state __attribute__ ((unused))
907 = _dl_debug_update (args.nsid)->r_state;
908 assert (r_state == RT_CONSISTENT);
9dcafc55 909
b2369ca3
UD
910 /* Release the lock. */
911 __rtld_lock_unlock_recursive (GL(dl_load_lock));
912
7a68c94a 913 return args.map;
266180eb 914}
482eec0d
UD
915
916
73d7af4f 917void
174baab3 918_dl_show_scope (struct link_map *l, int from)
482eec0d 919{
73d7af4f 920 _dl_debug_printf ("object=%s [%lu]\n",
b9375348 921 DSO_FILENAME (l->l_name), l->l_ns);
73d7af4f 922 if (l->l_scope != NULL)
174baab3 923 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
73d7af4f 924 {
f0f47fa0 925 _dl_debug_printf (" scope %u:", scope_cnt);
73d7af4f 926
076fe015 927 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
73d7af4f
UD
928 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
929 _dl_debug_printf_c (" %s",
930 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
931 else
b9375348 932 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
482eec0d 933
73d7af4f
UD
934 _dl_debug_printf_c ("\n");
935 }
001f0a6c
UD
936 else
937 _dl_debug_printf (" no scope\n");
73d7af4f 938 _dl_debug_printf ("\n");
482eec0d 939}
This page took 0.703368 seconds and 6 git commands to generate.