1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
31 #include <sysdep-cancel.h>
33 #include <stap-probe.h>
35 #include <libc-internal.h>
36 #include <array_length.h>
37 #include <libc-early-init.h>
38 #include <gnu/lib-names.h>
39 #include <dl-find_object.h>
45 /* We must be careful not to leave us in an inconsistent state. Thus we
46 catch any error and re-raise it after cleaning up. */
52 /* This is the caller of the dlopen() function. */
53 const void *caller_dlopen
;
58 /* Original value of _ns_global_scope_pending_adds. Set by
59 dl_open_worker. Only valid if nsid is a real namespace
61 unsigned int original_global_scope_pending_adds
;
63 /* Set to true by dl_open_worker if libc.so was already loaded into
64 the namespace at the time dl_open_worker was called. This is
65 used to determine whether libc.so early initialization has
66 already been done before, and whether to roll back the cached
67 libc_map value in the namespace in case of a dlopen failure. */
68 bool libc_already_loaded
;
70 /* Set to true if the end of dl_open_worker_begin was reached. */
73 /* Original parameters to the program and the current environment. */
79 /* Called in case the global scope cannot be extended. */
80 static void __attribute__ ((noreturn
))
81 add_to_global_resize_failure (struct link_map
*new)
83 _dl_signal_error (ENOMEM
, new->l_libname
->name
, NULL
,
84 N_ ("cannot extend global scope"));
87 /* Grow the global scope array for the namespace, so that all the new
88 global objects can be added later in add_to_global_update, without
89 risk of memory allocation failure. add_to_global_resize raises
90 exceptions for memory allocation errors. */
92 add_to_global_resize (struct link_map
*new)
94 struct link_namespaces
*ns
= &GL (dl_ns
)[new->l_ns
];
96 /* Count the objects we have to put in the global scope. */
97 unsigned int to_add
= 0;
98 for (unsigned int cnt
= 0; cnt
< new->l_searchlist
.r_nlist
; ++cnt
)
99 if (new->l_searchlist
.r_list
[cnt
]->l_global
== 0)
102 /* The symbols of the new objects and its dependencies are to be
103 introduced into the global scope that will be used to resolve
104 references from other dynamically-loaded objects.
106 The global scope is the searchlist in the main link map. We
107 extend this list if necessary. There is one problem though:
108 since this structure was allocated very early (before the libc
109 is loaded) the memory it uses is allocated by the malloc()-stub
110 in the ld.so. When we come here these functions are not used
111 anymore. Instead the malloc() implementation of the libc is
112 used. But this means the block from the main map cannot be used
113 in an realloc() call. Therefore we allocate a completely new
114 array the first time we have to add something to the locale scope. */
116 if (__builtin_add_overflow (ns
->_ns_global_scope_pending_adds
, to_add
,
117 &ns
->_ns_global_scope_pending_adds
))
118 add_to_global_resize_failure (new);
120 unsigned int new_size
= 0; /* 0 means no new allocation. */
121 void *old_global
= NULL
; /* Old allocation if free-able. */
123 /* Minimum required element count for resizing. Adjusted below for
124 an exponential resizing policy. */
125 size_t required_new_size
;
126 if (__builtin_add_overflow (ns
->_ns_main_searchlist
->r_nlist
,
127 ns
->_ns_global_scope_pending_adds
,
129 add_to_global_resize_failure (new);
131 if (ns
->_ns_global_scope_alloc
== 0)
133 if (__builtin_add_overflow (required_new_size
, 8, &new_size
))
134 add_to_global_resize_failure (new);
136 else if (required_new_size
> ns
->_ns_global_scope_alloc
)
138 if (__builtin_mul_overflow (required_new_size
, 2, &new_size
))
139 add_to_global_resize_failure (new);
141 /* The old array was allocated with our malloc, not the minimal
143 old_global
= ns
->_ns_main_searchlist
->r_list
;
148 size_t allocation_size
;
149 if (__builtin_mul_overflow (new_size
, sizeof (struct link_map
*),
151 add_to_global_resize_failure (new);
152 struct link_map
**new_global
= malloc (allocation_size
);
153 if (new_global
== NULL
)
154 add_to_global_resize_failure (new);
156 /* Copy over the old entries. */
157 memcpy (new_global
, ns
->_ns_main_searchlist
->r_list
,
158 ns
->_ns_main_searchlist
->r_nlist
* sizeof (struct link_map
*));
160 ns
->_ns_global_scope_alloc
= new_size
;
161 ns
->_ns_main_searchlist
->r_list
= new_global
;
163 if (!RTLD_SINGLE_THREAD_P
)
164 THREAD_GSCOPE_WAIT ();
170 /* Actually add the new global objects to the global scope. Must be
171 called after add_to_global_resize. This function cannot fail. */
173 add_to_global_update (struct link_map
*new)
175 struct link_namespaces
*ns
= &GL (dl_ns
)[new->l_ns
];
177 /* Now add the new entries. */
178 unsigned int new_nlist
= ns
->_ns_main_searchlist
->r_nlist
;
179 for (unsigned int cnt
= 0; cnt
< new->l_searchlist
.r_nlist
; ++cnt
)
181 struct link_map
*map
= new->l_searchlist
.r_list
[cnt
];
183 if (map
->l_global
== 0)
187 /* The array has been resized by add_to_global_resize. */
188 assert (new_nlist
< ns
->_ns_global_scope_alloc
);
190 ns
->_ns_main_searchlist
->r_list
[new_nlist
++] = map
;
192 /* We modify the global scope. Report this. */
193 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_SCOPES
))
194 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
195 map
->l_name
, map
->l_ns
);
199 /* Some of the pending adds have been performed by the loop above.
200 Adjust the counter accordingly. */
201 unsigned int added
= new_nlist
- ns
->_ns_main_searchlist
->r_nlist
;
202 assert (added
<= ns
->_ns_global_scope_pending_adds
);
203 ns
->_ns_global_scope_pending_adds
-= added
;
205 atomic_write_barrier ();
206 ns
->_ns_main_searchlist
->r_nlist
= new_nlist
;
209 /* Search link maps in all namespaces for the DSO that contains the object at
210 address ADDR. Returns the pointer to the link map of the matching DSO, or
211 NULL if a match is not found. */
213 _dl_find_dso_for_object (const ElfW(Addr
) addr
)
217 /* Find the highest-addressed object that ADDR is not below. */
218 for (Lmid_t ns
= 0; ns
< GL(dl_nns
); ++ns
)
219 for (l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
; l
= l
->l_next
)
220 if (addr
>= l
->l_map_start
&& addr
< l
->l_map_end
222 || _dl_addr_inside_object (l
, (ElfW(Addr
)) addr
)))
224 assert (ns
== l
->l_ns
);
229 rtld_hidden_def (_dl_find_dso_for_object
);
231 /* Return true if NEW is found in the scope for MAP. */
233 scope_has_map (struct link_map
*map
, struct link_map
*new)
236 for (cnt
= 0; map
->l_scope
[cnt
] != NULL
; ++cnt
)
237 if (map
->l_scope
[cnt
] == &new->l_searchlist
)
242 /* Return the length of the scope for MAP. */
244 scope_size (struct link_map
*map
)
247 for (cnt
= 0; map
->l_scope
[cnt
] != NULL
; )
252 /* Resize the scopes of depended-upon objects, so that the new object
253 can be added later without further allocation of memory. This
254 function can raise an exceptions due to malloc failure. */
256 resize_scopes (struct link_map
*new)
258 /* If the file is not loaded now as a dependency, add the search
259 list of the newly loaded object to the scope. */
260 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
262 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
264 /* If the initializer has been called already, the object has
265 not been loaded here and now. */
266 if (imap
->l_init_called
&& imap
->l_type
== lt_loaded
)
268 if (scope_has_map (imap
, new))
269 /* Avoid duplicates. */
272 size_t cnt
= scope_size (imap
);
273 if (__glibc_unlikely (cnt
+ 1 >= imap
->l_scope_max
))
275 /* The l_scope array is too small. Allocate a new one
278 struct r_scope_elem
**newp
;
280 if (imap
->l_scope
!= imap
->l_scope_mem
281 && imap
->l_scope_max
< array_length (imap
->l_scope_mem
))
283 /* If the current l_scope memory is not pointing to
284 the static memory in the structure, but the
285 static memory in the structure is large enough to
286 use for cnt + 1 scope entries, then switch to
287 using the static memory. */
288 new_size
= array_length (imap
->l_scope_mem
);
289 newp
= imap
->l_scope_mem
;
293 new_size
= imap
->l_scope_max
* 2;
294 newp
= (struct r_scope_elem
**)
295 malloc (new_size
* sizeof (struct r_scope_elem
*));
297 _dl_signal_error (ENOMEM
, "dlopen", NULL
,
298 N_("cannot create scope list"));
301 /* Copy the array and the terminating NULL. */
302 memcpy (newp
, imap
->l_scope
,
303 (cnt
+ 1) * sizeof (imap
->l_scope
[0]));
304 struct r_scope_elem
**old
= imap
->l_scope
;
306 imap
->l_scope
= newp
;
308 if (old
!= imap
->l_scope_mem
)
309 _dl_scope_free (old
);
311 imap
->l_scope_max
= new_size
;
317 /* Second stage of resize_scopes: Add NEW to the scopes. Also print
318 debugging information about scopes if requested.
320 This function cannot raise an exception because all required memory
321 has been allocated by a previous call to resize_scopes. */
323 update_scopes (struct link_map
*new)
325 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
327 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
330 if (imap
->l_init_called
&& imap
->l_type
== lt_loaded
)
332 if (scope_has_map (imap
, new))
333 /* Avoid duplicates. */
336 size_t cnt
= scope_size (imap
);
337 /* Assert that resize_scopes has sufficiently enlarged the
339 assert (cnt
+ 1 < imap
->l_scope_max
);
341 /* First terminate the extended list. Otherwise a thread
342 might use the new last element and then use the garbage
344 imap
->l_scope
[cnt
+ 1] = NULL
;
345 atomic_write_barrier ();
346 imap
->l_scope
[cnt
] = &new->l_searchlist
;
351 /* Print scope information. */
352 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_SCOPES
))
353 _dl_show_scope (imap
, from_scope
);
357 /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
358 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
359 exception. The return value is true if any of the new objects use
362 resize_tls_slotinfo (struct link_map
*new)
364 bool any_tls
= false;
365 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
366 if (_dl_add_to_slotinfo (new->l_searchlist
.r_list
[i
], false))
371 /* Second stage of TLS update, after resize_tls_slotinfo. This
372 function does not raise any exception. It should only be called if
373 resize_tls_slotinfo returned true. */
375 update_tls_slotinfo (struct link_map
*new)
377 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
378 _dl_add_to_slotinfo (new->l_searchlist
.r_list
[i
], true);
380 size_t newgen
= GL(dl_tls_generation
) + 1;
381 if (__glibc_unlikely (newgen
== 0))
382 _dl_fatal_printf (N_("\
383 TLS generation counter wrapped! Please report this."));
384 /* Can be read concurrently. */
385 atomic_store_release (&GL(dl_tls_generation
), newgen
);
387 /* We need a second pass for static tls data, because
388 _dl_update_slotinfo must not be run while calls to
389 _dl_add_to_slotinfo are still pending. */
390 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
392 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
394 if (imap
->l_need_tls_init
&& imap
->l_tls_blocksize
> 0)
396 /* For static TLS we have to allocate the memory here and
397 now, but we can delay updating the DTV. */
398 imap
->l_need_tls_init
= 0;
400 /* Update the slot information data for the current
403 /* FIXME: This can terminate the process on memory
404 allocation failure. It is not possible to raise
405 exceptions from this context; to fix this bug,
406 _dl_update_slotinfo would have to be split into two
407 operations, similar to resize_scopes and update_scopes
408 above. This is related to bug 16134. */
409 _dl_update_slotinfo (imap
->l_tls_modid
, newgen
);
412 dl_init_static_tls (imap
);
413 assert (imap
->l_need_tls_init
== 0);
418 /* Mark the objects as NODELETE if required. This is delayed until
419 after dlopen failure is not possible, so that _dl_close can clean
420 up objects if necessary. */
422 activate_nodelete (struct link_map
*new)
424 /* It is necessary to traverse the entire namespace. References to
425 objects in the global scope and unique symbol bindings can force
426 NODELETE status for objects outside the local scope. */
427 for (struct link_map
*l
= GL (dl_ns
)[new->l_ns
]._ns_loaded
; l
!= NULL
;
429 if (l
->l_nodelete_pending
)
431 if (__glibc_unlikely (GLRO (dl_debug_mask
) & DL_DEBUG_FILES
))
432 _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
435 /* The flag can already be true at this point, e.g. a signal
436 handler may have triggered lazy binding and set NODELETE
437 status immediately. */
438 l
->l_nodelete_active
= true;
440 /* This is just a debugging aid, to indicate that
441 activate_nodelete has run for this map. */
442 l
->l_nodelete_pending
= false;
446 /* Relocate the object L. *RELOCATION_IN_PROGRESS controls whether
447 the debugger is notified of the start of relocation processing. */
449 _dl_open_relocate_one_object (struct dl_open_args
*args
, struct r_debug
*r
,
450 struct link_map
*l
, int reloc_mode
,
451 bool *relocation_in_progress
)
453 if (l
->l_real
->l_relocated
)
456 if (!*relocation_in_progress
)
458 /* Notify the debugger that relocations are about to happen. */
459 LIBC_PROBE (reloc_start
, 2, args
->nsid
, r
);
460 *relocation_in_progress
= true;
464 if (__glibc_unlikely (GLRO(dl_profile
) != NULL
))
466 /* If this here is the shared object which we want to profile
467 make sure the profile is started. We can find out whether
468 this is necessary or not by observing the `_dl_profile_map'
469 variable. If it was NULL but is not NULL afterwards we must
470 start the profiling. */
471 struct link_map
*old_profile_map
= GL(dl_profile_map
);
473 _dl_relocate_object (l
, l
->l_scope
, reloc_mode
| RTLD_LAZY
, 1);
475 if (old_profile_map
== NULL
&& GL(dl_profile_map
) != NULL
)
477 /* We must prepare the profiling. */
478 _dl_start_profile ();
480 /* Prevent unloading the object. */
481 GL(dl_profile_map
)->l_nodelete_active
= true;
486 _dl_relocate_object (l
, l
->l_scope
, reloc_mode
, 0);
490 call_dl_init (void *closure
)
492 struct dl_open_args
*args
= closure
;
493 _dl_init (args
->map
, args
->argc
, args
->argv
, args
->env
);
497 dl_open_worker_begin (void *a
)
499 struct dl_open_args
*args
= a
;
500 const char *file
= args
->file
;
501 int mode
= args
->mode
;
502 struct link_map
*call_map
= NULL
;
504 /* Determine the caller's map if necessary. This is needed in case
505 we have a DST, when we don't know the namespace ID we have to put
506 the new object in, or when the file name has no path in which
507 case we need to look along the RUNPATH/RPATH of the caller. */
508 const char *dst
= strchr (file
, '$');
509 if (dst
!= NULL
|| args
->nsid
== __LM_ID_CALLER
510 || strchr (file
, '/') == NULL
)
512 const void *caller_dlopen
= args
->caller_dlopen
;
514 /* We have to find out from which object the caller is calling.
515 By default we assume this is the main application. */
516 call_map
= GL(dl_ns
)[LM_ID_BASE
]._ns_loaded
;
518 struct link_map
*l
= _dl_find_dso_for_object ((ElfW(Addr
)) caller_dlopen
);
523 if (args
->nsid
== __LM_ID_CALLER
)
524 args
->nsid
= call_map
->l_ns
;
527 /* The namespace ID is now known. Keep track of whether libc.so was
528 already loaded, to determine whether it is necessary to call the
529 early initialization routine (or clear libc_map on error). */
530 args
->libc_already_loaded
= GL(dl_ns
)[args
->nsid
].libc_map
!= NULL
;
532 /* Retain the old value, so that it can be restored. */
533 args
->original_global_scope_pending_adds
534 = GL (dl_ns
)[args
->nsid
]._ns_global_scope_pending_adds
;
536 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
537 may not be true if this is a recursive call to dlopen. */
538 _dl_debug_initialize (0, args
->nsid
);
540 /* Load the named object. */
541 struct link_map
*new;
542 args
->map
= new = _dl_map_object (call_map
, file
, lt_loaded
, 0,
543 mode
| __RTLD_CALLMAP
, args
->nsid
);
545 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
546 set and the object is not already loaded. */
549 assert (mode
& RTLD_NOLOAD
);
553 if (__glibc_unlikely (mode
& __RTLD_SPROF
))
554 /* This happens only if we load a DSO for 'sprof'. */
557 /* This object is directly loaded. */
558 ++new->l_direct_opencount
;
560 /* It was already open. */
561 if (__glibc_unlikely (new->l_searchlist
.r_list
!= NULL
))
563 /* Let the user know about the opencount. */
564 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
565 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
566 new->l_name
, new->l_ns
, new->l_direct_opencount
);
568 /* If the user requested the object to be in the global
569 namespace but it is not so far, prepare to add it now. This
570 can raise an exception to do a malloc failure. */
571 if ((mode
& RTLD_GLOBAL
) && new->l_global
== 0)
572 add_to_global_resize (new);
574 /* Mark the object as not deletable if the RTLD_NODELETE flags
576 if (__glibc_unlikely (mode
& RTLD_NODELETE
))
578 if (__glibc_unlikely (GLRO (dl_debug_mask
) & DL_DEBUG_FILES
)
579 && !new->l_nodelete_active
)
580 _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
581 new->l_name
, new->l_ns
);
582 new->l_nodelete_active
= true;
585 /* Finalize the addition to the global scope. */
586 if ((mode
& RTLD_GLOBAL
) && new->l_global
== 0)
587 add_to_global_update (new);
589 const int r_state
__attribute__ ((unused
))
590 = _dl_debug_update (args
->nsid
)->r_state
;
591 assert (r_state
== RT_CONSISTENT
);
596 /* Schedule NODELETE marking for the directly loaded object if
598 if (__glibc_unlikely (mode
& RTLD_NODELETE
))
599 new->l_nodelete_pending
= true;
601 /* Load that object's dependencies. */
602 _dl_map_object_deps (new, NULL
, 0, 0,
603 mode
& (__RTLD_DLOPEN
| RTLD_DEEPBIND
| __RTLD_AUDIT
));
605 /* So far, so good. Now check the versions. */
606 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
607 if (new->l_searchlist
.r_list
[i
]->l_real
->l_versions
== NULL
)
609 struct link_map
*map
= new->l_searchlist
.r_list
[i
]->l_real
;
610 _dl_check_map_versions (map
, 0, 0);
612 /* During static dlopen, check if ld.so has been loaded.
613 Perform partial initialization in this case. This must
614 come after the symbol versioning initialization in
615 _dl_check_map_versions. */
616 if (map
->l_info
[DT_SONAME
] != NULL
617 && strcmp (((const char *) D_PTR (map
, l_info
[DT_STRTAB
])
618 + map
->l_info
[DT_SONAME
]->d_un
.d_val
), LD_SO
) == 0)
619 __rtld_static_init (map
);
624 /* Auditing checkpoint: we have added all objects. */
625 _dl_audit_activity_nsid (new->l_ns
, LA_ACT_CONSISTENT
);
628 /* Notify the debugger all new objects are now ready to go. */
629 struct r_debug
*r
= _dl_debug_update (args
->nsid
);
630 r
->r_state
= RT_CONSISTENT
;
632 LIBC_PROBE (map_complete
, 3, args
->nsid
, r
, new);
634 _dl_open_check (new);
636 /* Print scope information. */
637 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_SCOPES
))
638 _dl_show_scope (new, 0);
640 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
641 int reloc_mode
= mode
& __RTLD_AUDIT
;
643 reloc_mode
|= mode
& RTLD_LAZY
;
645 /* Objects must be sorted by dependency for the relocation process.
646 This allows IFUNC relocations to work and it also means copy
647 relocation of dependencies are if necessary overwritten.
648 __dl_map_object_deps has already sorted l_initfini for us. */
649 unsigned int first
= UINT_MAX
;
650 unsigned int last
= 0;
652 struct link_map
*l
= new->l_initfini
[0];
655 if (! l
->l_real
->l_relocated
)
657 if (first
== UINT_MAX
)
661 l
= new->l_initfini
[++j
];
665 bool relocation_in_progress
= false;
667 /* Perform relocation. This can trigger lazy binding in IFUNC
668 resolvers. For NODELETE mappings, these dependencies are not
669 recorded because the flag has not been applied to the newly
670 loaded objects. This means that upon dlopen failure, these
671 NODELETE objects can be unloaded despite existing references to
672 them. However, such relocation dependencies in IFUNC resolvers
673 are undefined anyway, so this is not a problem. */
675 /* Ensure that libc is relocated first. This helps with the
676 execution of IFUNC resolvers in libc, and matters only to newly
677 created dlmopen namespaces. Do not do this for static dlopen
678 because libc has relocations against ld.so, which may not have
679 been relocated at this point. */
681 if (GL(dl_ns
)[args
->nsid
].libc_map
!= NULL
)
682 _dl_open_relocate_one_object (args
, r
, GL(dl_ns
)[args
->nsid
].libc_map
,
683 reloc_mode
, &relocation_in_progress
);
686 for (unsigned int i
= last
; i
-- > first
; )
687 _dl_open_relocate_one_object (args
, r
, new->l_initfini
[i
], reloc_mode
,
688 &relocation_in_progress
);
690 /* This only performs the memory allocations. The actual update of
691 the scopes happens below, after failure is impossible. */
694 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
696 bool any_tls
= resize_tls_slotinfo (new);
698 /* Perform the necessary allocations for adding new global objects
699 to the global scope below. */
700 if (mode
& RTLD_GLOBAL
)
701 add_to_global_resize (new);
703 /* Demarcation point: After this, no recoverable errors are allowed.
704 All memory allocations for new objects must have happened
707 /* Finalize the NODELETE status first. This comes before
708 update_scopes, so that lazy binding will not see pending NODELETE
709 state for newly loaded objects. There is a compiler barrier in
710 update_scopes which ensures that the changes from
711 activate_nodelete are visible before new objects show up in the
713 activate_nodelete (new);
715 /* Second stage after resize_scopes: Actually perform the scope
716 update. After this, dlsym and lazy binding can bind to new
720 if (!_dl_find_object_update (new))
721 _dl_signal_error (ENOMEM
, new->l_libname
->name
, NULL
,
722 N_ ("cannot allocate address lookup data"));
724 /* FIXME: It is unclear whether the order here is correct.
725 Shouldn't new objects be made available for binding (and thus
726 execution) only after there TLS data has been set up fully?
727 Fixing bug 16134 will likely make this distinction less
730 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
733 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
734 on memory allocation failure. See bug 16134. */
735 update_tls_slotinfo (new);
737 /* Notify the debugger all new objects have been relocated. */
738 if (relocation_in_progress
)
739 LIBC_PROBE (reloc_complete
, 3, args
->nsid
, r
, new);
741 /* If libc.so was not there before, attempt to call its early
742 initialization routine. Indicate to the initialization routine
743 whether the libc being initialized is the one in the base
745 if (!args
->libc_already_loaded
)
747 /* dlopen cannot be used to load an initial libc by design. */
748 struct link_map
*libc_map
= GL(dl_ns
)[args
->nsid
].libc_map
;
749 _dl_call_libc_early_init (libc_map
, false);
752 args
->worker_continue
= true;
756 dl_open_worker (void *a
)
758 struct dl_open_args
*args
= a
;
760 args
->worker_continue
= false;
763 /* Protects global and module specific TLS state. */
764 __rtld_lock_lock_recursive (GL(dl_load_tls_lock
));
766 struct dl_exception ex
;
767 int err
= _dl_catch_exception (&ex
, dl_open_worker_begin
, args
);
769 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock
));
771 if (__glibc_unlikely (ex
.errstring
!= NULL
))
772 /* Reraise the error. */
773 _dl_signal_exception (err
, &ex
, NULL
);
776 if (!args
->worker_continue
)
779 int mode
= args
->mode
;
780 struct link_map
*new = args
->map
;
782 /* Run the initializer functions of new objects. Temporarily
783 disable the exception handler, so that lazy binding failures are
785 _dl_catch_exception (NULL
, call_dl_init
, args
);
787 /* Now we can make the new map available in the global scope. */
788 if (mode
& RTLD_GLOBAL
)
789 add_to_global_update (new);
791 /* Let the user know about the opencount. */
792 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
793 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
794 new->l_name
, new->l_ns
, new->l_direct_opencount
);
798 _dl_open (const char *file
, int mode
, const void *caller_dlopen
, Lmid_t nsid
,
799 int argc
, char *argv
[], char *env
[])
801 if ((mode
& RTLD_BINDING_MASK
) == 0)
802 /* One of the flags must be set. */
803 _dl_signal_error (EINVAL
, file
, NULL
, N_("invalid mode for dlopen()"));
805 /* Make sure we are alone. */
806 __rtld_lock_lock_recursive (GL(dl_load_lock
));
808 if (__glibc_unlikely (nsid
== LM_ID_NEWLM
))
810 /* Find a new namespace. */
811 for (nsid
= 1; DL_NNS
> 1 && nsid
< GL(dl_nns
); ++nsid
)
812 if (GL(dl_ns
)[nsid
]._ns_loaded
== NULL
)
815 if (__glibc_unlikely (nsid
== DL_NNS
))
817 /* No more namespace available. */
818 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
820 _dl_signal_error (EINVAL
, file
, NULL
, N_("\
821 no more namespaces available for dlmopen()"));
823 else if (nsid
== GL(dl_nns
))
825 __rtld_lock_initialize (GL(dl_ns
)[nsid
]._ns_unique_sym_table
.lock
);
829 GL(dl_ns
)[nsid
].libc_map
= NULL
;
830 _dl_debug_update (nsid
)->r_state
= RT_CONSISTENT
;
832 /* Never allow loading a DSO in a namespace which is empty. Such
833 direct placements is only causing problems. Also don't allow
834 loading into a namespace used for auditing. */
835 else if (__glibc_unlikely (nsid
!= LM_ID_BASE
&& nsid
!= __LM_ID_CALLER
)
836 && (__glibc_unlikely (nsid
< 0 || nsid
>= GL(dl_nns
))
837 /* This prevents the [NSID] index expressions from being
838 evaluated, so the compiler won't think that we are
839 accessing an invalid index here in the !SHARED case where
840 DL_NNS is 1 and so any NSID != 0 is invalid. */
842 || GL(dl_ns
)[nsid
]._ns_nloaded
== 0
843 || GL(dl_ns
)[nsid
]._ns_loaded
->l_auditing
))
844 _dl_signal_error (EINVAL
, file
, NULL
,
845 N_("invalid target namespace in dlmopen()"));
847 struct dl_open_args args
;
850 args
.caller_dlopen
= caller_dlopen
;
853 /* args.libc_already_loaded is always assigned by dl_open_worker
854 (before any explicit/non-local returns). */
859 struct dl_exception exception
;
860 int errcode
= _dl_catch_exception (&exception
, dl_open_worker
, &args
);
862 #if defined USE_LDCONFIG && !defined MAP_COPY
863 /* We must unmap the cache file. */
867 /* Do this for both the error and success cases. The old value has
868 only been determined if the namespace ID was assigned (i.e., it
869 is not __LM_ID_CALLER). In the success case, we actually may
870 have consumed more pending adds than planned (because the local
871 scopes overlap in case of a recursive dlopen, the inner dlopen
872 doing some of the globalization work of the outer dlopen), so the
873 old pending adds value is larger than absolutely necessary.
874 Since it is just a conservative upper bound, this is harmless.
875 The top-level dlopen call will restore the field to zero. */
877 GL (dl_ns
)[args
.nsid
]._ns_global_scope_pending_adds
878 = args
.original_global_scope_pending_adds
;
880 /* See if an error occurred during loading. */
881 if (__glibc_unlikely (exception
.errstring
!= NULL
))
883 /* Avoid keeping around a dangling reference to the libc.so link
884 map in case it has been cached in libc_map. */
885 if (!args
.libc_already_loaded
)
886 GL(dl_ns
)[args
.nsid
].libc_map
= NULL
;
888 /* Remove the object from memory. It may be in an inconsistent
889 state if relocation failed, for example. */
892 _dl_close_worker (args
.map
, true);
894 /* All l_nodelete_pending objects should have been deleted
895 at this point, which is why it is not necessary to reset
899 /* Release the lock. */
900 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
902 /* Reraise the error. */
903 _dl_signal_exception (errcode
, &exception
, NULL
);
906 const int r_state
__attribute__ ((unused
))
907 = _dl_debug_update (args
.nsid
)->r_state
;
908 assert (r_state
== RT_CONSISTENT
);
910 /* Release the lock. */
911 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
918 _dl_show_scope (struct link_map
*l
, int from
)
920 _dl_debug_printf ("object=%s [%lu]\n",
921 DSO_FILENAME (l
->l_name
), l
->l_ns
);
922 if (l
->l_scope
!= NULL
)
923 for (int scope_cnt
= from
; l
->l_scope
[scope_cnt
] != NULL
; ++scope_cnt
)
925 _dl_debug_printf (" scope %u:", scope_cnt
);
927 for (unsigned int cnt
= 0; cnt
< l
->l_scope
[scope_cnt
]->r_nlist
; ++cnt
)
928 if (*l
->l_scope
[scope_cnt
]->r_list
[cnt
]->l_name
)
929 _dl_debug_printf_c (" %s",
930 l
->l_scope
[scope_cnt
]->r_list
[cnt
]->l_name
);
932 _dl_debug_printf_c (" %s", RTLD_PROGNAME
);
934 _dl_debug_printf_c ("\n");
937 _dl_debug_printf (" no scope\n");
938 _dl_debug_printf ("\n");