diff --git a/elf/dl-close.c b/elf/dl-close.c
index 4aef95a1a0..2b24d63422 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -43,6 +43,11 @@ typedef void (*fini_t) (void);
/* Special l_idx value used to indicate which objects remain loaded. */
#define IDX_STILL_USED -1
+/* We use the two l_reserved bits for 'used' and 'done' attributes. */
+#define MAP_DONE(L) ((L)->l_reserved & 1)
+#define MAP_USED(L) ((L)->l_reserved & 2)
+#define SET_MAP_DONE(L) ((L)->l_reserved |= 1)
+#define SET_MAP_USED(L) ((L)->l_reserved |= 2)
/* Returns true we an non-empty was found. */
static bool
@@ -140,8 +145,6 @@ _dl_close_worker (struct link_map *map, bool force)
bool any_tls = false;
const unsigned int nloaded = ns->_ns_nloaded;
- char used[nloaded];
- char done[nloaded];
struct link_map *maps[nloaded];
/* Clear DF_1_NODELETE to force object deletion. We don't need to touch
@@ -157,24 +160,20 @@ _dl_close_worker (struct link_map *map, bool force)
int idx = 0;
for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
{
+ l->l_reserved = 0;
l->l_idx = idx;
maps[idx] = l;
++idx;
-
}
assert (idx == nloaded);
- /* Prepare the bitmaps. */
- memset (used, '\0', sizeof (used));
- memset (done, '\0', sizeof (done));
-
/* Keep track of the lowest index link map we have covered already. */
int done_index = -1;
while (++done_index < nloaded)
{
struct link_map *l = maps[done_index];
- if (done[done_index])
+ if (MAP_DONE (l))
/* Already handled. */
continue;
@@ -185,12 +184,12 @@ _dl_close_worker (struct link_map *map, bool force)
/* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
acquire is sufficient and correct. */
&& atomic_load_acquire (&l->l_tls_dtor_count) == 0
- && !used[done_index])
+ && !MAP_USED (l))
continue;
/* We need this object and we handle it now. */
- done[done_index] = 1;
- used[done_index] = 1;
+ SET_MAP_DONE (l);
+ SET_MAP_USED (l);
/* Signal the object is still needed. */
l->l_idx = IDX_STILL_USED;
@@ -206,9 +205,9 @@ _dl_close_worker (struct link_map *map, bool force)
{
assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
- if (!used[(*lp)->l_idx])
+ if (!MAP_USED (*lp))
{
- used[(*lp)->l_idx] = 1;
+ SET_MAP_USED (*lp);
/* If we marked a new object as used, and we've
already processed it, then we need to go back
and process again from that point forward to
@@ -231,9 +230,9 @@ _dl_close_worker (struct link_map *map, bool force)
{
assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
- if (!used[jmap->l_idx])
+ if (!MAP_USED (jmap))
{
- used[jmap->l_idx] = 1;
+ SET_MAP_USED (jmap);
if (jmap->l_idx - 1 < done_index)
done_index = jmap->l_idx - 1;
}
@@ -241,10 +240,8 @@ _dl_close_worker (struct link_map *map, bool force)
}
}
- /* Sort the entries. We can skip looking for the binary itself which is
- at the front of the search list for the main namespace. */
- _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
- used + (nsid == LM_ID_BASE), true);
+ /* Sort the entries. */
+ _dl_sort_maps (maps, nloaded, true);
/* Call all termination functions at once. */
#ifdef SHARED
@@ -261,7 +258,7 @@ _dl_close_worker (struct link_map *map, bool force)
/* All elements must be in the same namespace. */
assert (imap->l_ns == nsid);
- if (!used[i])
+ if (!MAP_USED (imap))
{
assert (imap->l_type == lt_loaded
&& (imap->l_flags_1 & DF_1_NODELETE) == 0);
@@ -323,7 +320,7 @@ _dl_close_worker (struct link_map *map, bool force)
if (i < first_loaded)
first_loaded = i;
}
- /* Else used[i]. */
+ /* Else MAP_USED (imap). */
else if (imap->l_type == lt_loaded)
{
struct r_scope_elem *new_list = NULL;
@@ -544,7 +541,7 @@ _dl_close_worker (struct link_map *map, bool force)
for (unsigned int i = first_loaded; i < nloaded; ++i)
{
struct link_map *imap = maps[i];
- if (!used[i])
+ if (!MAP_USED (imap))
{
assert (imap->l_type == lt_loaded);
@@ -798,6 +795,10 @@ _dl_close_worker (struct link_map *map, bool force)
if (dl_close_state == rerun)
goto retry;
+ /* Reset l_reserved bits to zero. */
+ for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
+ l->l_reserved = 0;
+
dl_close_state = not_pending;
}
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
index e12c353158..5698552f05 100644
--- a/elf/dl-deps.c
+++ b/elf/dl-deps.c
@@ -589,9 +589,7 @@ Filters not supported with LD_TRACE_PRELINKING"));
itself will always be initialize last. */
memcpy (l_initfini, map->l_searchlist.r_list,
nlist * sizeof (struct link_map *));
- /* We can skip looking for the binary itself which is at the front of
- the search list. */
- _dl_sort_maps (&l_initfini[1], nlist - 1, NULL, false);
+ _dl_sort_maps (l_initfini, nlist, false);
/* Terminate the list of dependencies. */
l_initfini[nlist] = NULL;
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 1e55d39814..91ee57a68a 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -88,11 +88,8 @@ _dl_fini (void)
assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
unsigned int nmaps = i;
- /* Now we have to do the sorting. We can skip looking for the
- binary itself which is at the front of the search list for
- the main namespace. */
- _dl_sort_maps (maps + (ns == LM_ID_BASE), nmaps - (ns == LM_ID_BASE),
- NULL, true);
+ /* Now we have to do the sorting. */
+ _dl_sort_maps (maps, nmaps, true);
/* We do not rely on the linked list of loaded object anymore
from this point on. We have our own list here (maps). The
diff --git a/elf/dl-open.c b/elf/dl-open.c
index e18ee398cb..0a448832ac 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -301,8 +301,9 @@ dl_open_worker (void *a)
if (GLRO(dl_lazy))
reloc_mode |= mode & RTLD_LAZY;
- /* Sort the objects by dependency for the relocation process. This
- allows IFUNC relocations to work and it also means copy
+ /* Start relocation process of newly loaded objects, which were sorted by
+ dependency during _dl_map_object(), results placed in new->l_initfini.
+ This allows IFUNC relocations to work and it also means copy
relocation of dependencies are if necessary overwritten. */
unsigned int nmaps = 0;
struct link_map *l = new;
@@ -314,16 +315,14 @@ dl_open_worker (void *a)
}
while (l != NULL);
struct link_map *maps[nmaps];
- nmaps = 0;
- l = new;
- do
+ int i = 0;
+ for (struct link_map **ptr = new->l_initfini; *ptr; ptr++)
{
+ l = *ptr;
if (! l->l_real->l_relocated)
- maps[nmaps++] = l;
- l = l->l_next;
+ maps[i++] = l;
}
- while (l != NULL);
- _dl_sort_maps (maps, nmaps, NULL, false);
+ assert (i == nmaps);
int relocation_in_progress = 0;
diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
index 26b3fd93a3..0c359d3da9 100644
--- a/elf/dl-sort-maps.c
+++ b/elf/dl-sort-maps.c
@@ -16,107 +16,131 @@
License along with the GNU C Library; if not, see
. */
+#include
#include
+/* We use a recursive function due to its better clarity and ease of
+ implementation, as well as faster execution speed. We already use
+ alloca() for list allocation during the breadth-first search of
+ dependencies in _dl_map_object_deps(), and this should be on the
+ same order of worst-case stack usage. */
-/* Sort array MAPS according to dependencies of the contained objects.
- Array USED, if non-NULL, is permutated along MAPS. If FOR_FINI this is
- called for finishing an object. */
-void
-_dl_sort_maps (struct link_map **maps, unsigned int nmaps, char *used,
- bool for_fini)
+static void
+dfs_traversal (struct link_map ***rpo, struct link_map *map,
+ bool *do_reldeps)
{
- /* A list of one element need not be sorted. */
- if (nmaps <= 1)
+ if (map->l_visited)
return;
- unsigned int i = 0;
- uint16_t seen[nmaps];
- memset (seen, 0, nmaps * sizeof (seen[0]));
- while (1)
- {
- /* Keep track of which object we looked at this round. */
- ++seen[i];
- struct link_map *thisp = maps[i];
+ map->l_visited = 1;
- if (__glibc_unlikely (for_fini))
+ if (map->l_initfini)
+ {
+ for (int i = 0; map->l_initfini[i] != NULL; i++)
{
- /* Do not handle ld.so in secondary namespaces and objects which
- are not removed. */
- if (thisp != thisp->l_real || thisp->l_idx == -1)
- goto skip;
+ struct link_map *dep = map->l_initfini[i];
+ if (dep->l_visited == 0)
+ dfs_traversal (rpo, dep, do_reldeps);
}
+ }
+
+ if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
+ {
+ /* Indicate that we encountered relocation dependencies during
+ traversal. */
+ *do_reldeps = true;
- /* Find the last object in the list for which the current one is
- a dependency and move the current object behind the object
- with the dependency. */
- unsigned int k = nmaps - 1;
- while (k > i)
+ for (int m = map->l_reldeps->act - 1; m >= 0; m--)
{
- struct link_map **runp = maps[k]->l_initfini;
- if (runp != NULL)
- /* Look through the dependencies of the object. */
- while (*runp != NULL)
- if (__glibc_unlikely (*runp++ == thisp))
- {
- move:
- /* Move the current object to the back past the last
- object with it as the dependency. */
- memmove (&maps[i], &maps[i + 1],
- (k - i) * sizeof (maps[0]));
- maps[k] = thisp;
-
- if (used != NULL)
- {
- char here_used = used[i];
- memmove (&used[i], &used[i + 1],
- (k - i) * sizeof (used[0]));
- used[k] = here_used;
- }
-
- if (seen[i + 1] > nmaps - i)
- {
- ++i;
- goto next_clear;
- }
-
- uint16_t this_seen = seen[i];
- memmove (&seen[i], &seen[i + 1], (k - i) * sizeof (seen[0]));
- seen[k] = this_seen;
-
- goto next;
- }
-
- if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
- {
- unsigned int m = maps[k]->l_reldeps->act;
- struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
-
- /* Look through the relocation dependencies of the object. */
- while (m-- > 0)
- if (__glibc_unlikely (relmaps[m] == thisp))
- {
- /* If a cycle exists with a link time dependency,
- preserve the latter. */
- struct link_map **runp = thisp->l_initfini;
- if (runp != NULL)
- while (*runp != NULL)
- if (__glibc_unlikely (*runp++ == maps[k]))
- goto ignore;
- goto move;
- }
- ignore:;
- }
-
- --k;
+ struct link_map *dep = map->l_reldeps->list[m];
+ if (dep->l_visited == 0)
+ dfs_traversal (rpo, dep, do_reldeps);
}
+ }
+
+ *rpo -= 1;
+ **rpo = map;
+}
- skip:
- if (++i == nmaps)
- break;
- next_clear:
- memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
+/* Topologically sort array MAPS according to dependencies of the contained
+ objects. */
- next:;
+void
+_dl_sort_maps (struct link_map **maps, unsigned int nmaps, bool for_fini)
+{
+ for (int i = 0; i < nmaps; i++)
+ maps[i]->l_visited = 0;
+
+ /* We apply DFS traversal for each of maps[i] until the whole total order
+ is found and we're at the start of the Reverse-Postorder (RPO) sequence,
+ which is a topological sort.
+
+ We go from maps[nmaps - 1] backwards towards maps[0] at this level.
+ Due to the breadth-first search (BFS) ordering we receive, going
+ backwards usually gives a more shallow depth-first recursion depth,
+ adding more stack usage safety. Also, combined with the natural
+ processing order of l_initfini[] at each node during DFS, this maintains
+ an ordering closer to the original link ordering in the sorting results
+ under most simpler cases.
+
+ Another reason we order the top level backwards, it that maps[0] is
+ usually exactly the main object of which we're in the midst of
+ _dl_map_object_deps() processing, and maps[0]->l_initfini[] is still
+ blank. If we start the traversal from maps[0], since having no
+ dependencies yet filled in, maps[0] will always be immediately
+ incorrectly placed at the last place in the order (first in reverse).
+ Adjusting the order so that maps[0] is last traversed naturally avoids
+ this problem.
+
+ Further, the old "optimization" of skipping the main object at maps[0]
+ from the call-site (i.e. _dl_sort_maps(maps+1,nmaps-1)) is in general
+ no longer valid, since traversing along object dependency-links
+ may "find" the main object even when it is not included in the initial
+ order (e.g. a dlopen()'ed shared object can have circular dependencies
+ linked back to itself). In such a case, traversing N-1 objects will
+ create a N-object result, and raise problems.
+
+ To summarize, just passing in the full list, and iterating from back
+ to front makes things much more straightforward. */
+
+ struct link_map *rpo[nmaps];
+ struct link_map **rpo_head = &rpo[nmaps];
+
+ bool do_reldeps = false;
+ bool *do_reldeps_ref = (for_fini ? &do_reldeps : NULL);
+
+ for (int i = nmaps - 1; i >= 0; i--)
+ {
+ dfs_traversal (&rpo_head, maps[i], do_reldeps_ref);
+
+ /* We can break early if all objects are already placed. */
+ if (rpo_head == rpo)
+ goto end;
+ }
+ assert (rpo_head == rpo);
+
+ end:
+ /* This is avoided if !FOR_FINI or if we didn't find any reldeps in
+ the first DFS traversal. */
+ if (do_reldeps)
+ {
+ for (int i = 0; i < nmaps; i++)
+ rpo[i]->l_visited = 0;
+
+ struct link_map **maps_head = &maps[nmaps];
+ for (int i = nmaps - 1; i >= 0; i--)
+ {
+ dfs_traversal (&maps_head, rpo[i], NULL);
+
+ /* We can break early if all objects are already placed.
+ The below memcpy is not needed in the do_reldeps case here,
+ since we wrote back to maps[] during DFS traversal. */
+ if (maps_head == maps)
+ return;
+ }
+ assert (maps_head == maps);
+ return;
}
+
+ memcpy (maps, rpo, sizeof (struct link_map *) * nmaps);
}
diff --git a/include/link.h b/include/link.h
index 736e1d72ae..67ff00c4bc 100644
--- a/include/link.h
+++ b/include/link.h
@@ -178,6 +178,8 @@ struct link_map
unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
+ unsigned int l_visited:1; /* Used internally for map dependency
+ graph traversal. */
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
to by `l_phdr' is allocated. */
unsigned int l_soname_added:1; /* Nonzero if the SONAME is for sure in
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index b1fc5c31f9..9b6dcc8d5f 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -964,8 +964,8 @@ extern void _dl_init (struct link_map *main_map, int argc, char **argv,
extern void _dl_fini (void) attribute_hidden;
/* Sort array MAPS according to dependencies of the contained objects. */
-extern void _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
- char *used, bool for_fini) attribute_hidden;
+extern void _dl_sort_maps (struct link_map **, unsigned int, bool)
+ attribute_hidden;
/* The dynamic linker calls this function before and having changing
any shared object mappings. The `r_state' member of `struct r_debug'