]> sourceware.org Git - glibc.git/blame - elf/dl-close.c
Introduce link_map_audit_state accessor function
[glibc.git] / elf / dl-close.c
CommitLineData
26b4d766 1/* Close a shared object opened by `_dl_open'.
04277e02 2 Copyright (C) 1996-2019 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
ba79d61b 18
7afab53d 19#include <assert.h>
ba79d61b 20#include <dlfcn.h>
1100f849 21#include <errno.h>
8e17ea58 22#include <libintl.h>
bfc832cc 23#include <stddef.h>
b209e34a 24#include <stdio.h>
ba79d61b 25#include <stdlib.h>
8d6468d0 26#include <string.h>
9dcafc55 27#include <unistd.h>
ec999b8e 28#include <libc-lock.h>
b8445829 29#include <ldsodefs.h>
ba79d61b
RM
30#include <sys/types.h>
31#include <sys/mman.h>
609cf614 32#include <sysdep-cancel.h>
df94b641 33#include <tls.h>
815e6fa3 34#include <stap-probe.h>
ba79d61b 35
fcccd512
RM
36#include <dl-unmap-segments.h>
37
ba79d61b 38
dacc8ffa
UD
39/* Type of the constructor functions. */
40typedef void (*fini_t) (void);
41
42
1100f849
UD
43/* Special l_idx value used to indicate which objects remain loaded. */
44#define IDX_STILL_USED -1
45
46
fc093be1
UD
47/* Returns true we an non-empty was found. */
48static bool
1f0c4a10
RM
49remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
fc093be1
UD
51{
52 if (idx - disp >= listp->len)
53 {
1f0c4a10
RM
54 if (listp->next == NULL)
55 {
56 /* The index is not actually valid in the slotinfo list,
8265947d
RM
57 because this object was closed before it was fully set
58 up due to some error. */
1f0c4a10
RM
59 assert (! should_be_there);
60 }
61 else
62 {
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
fc093be1 66
1f0c4a10
RM
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
70 }
fc093be1
UD
71 }
72 else
73 {
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
fc093be1 75
2430d57a
RM
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
a1ffb40e 78 if (__glibc_likely (old_map != NULL))
2430d57a
RM
79 {
80 assert (old_map->l_tls_modid == idx);
81
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
85 }
fc093be1
UD
86
87 /* If this is not the last currently used entry no need to look
88 further. */
2430d57a 89 if (idx != GL(dl_tls_max_dtv_idx))
fc093be1 90 return true;
fc093be1
UD
91 }
92
06a04e09 93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
fc093be1
UD
94 {
95 --idx;
96
97 if (listp->slotinfo[idx - disp].map != NULL)
98 {
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
102 }
103 }
104
105 /* No non-entry in this list element. */
106 return false;
107}
fc093be1
UD
108
109
ba79d61b 110void
02d5e5d9 111_dl_close_worker (struct link_map *map, bool force)
ba79d61b 112{
c0f62c56 113 /* One less direct use. */
c0f62c56
UD
114 --map->l_direct_opencount;
115
bfc832cc
UD
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
120
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
26b4d766 123 {
0479b305
AS
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
bfc832cc 126
26b4d766 127 /* There are still references to this object. Do nothing more. */
a1ffb40e 128 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
20fe49b9
UD
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
a334319f 131
26b4d766
UD
132 return;
133 }
ba79d61b 134
2e81d449
UD
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
137
bfc832cc
UD
138 retry:
139 dl_close_state = pending;
140
bfc832cc 141 bool any_tls = false;
2e81d449 142 const unsigned int nloaded = ns->_ns_nloaded;
c3381f3e
UD
143 char used[nloaded];
144 char done[nloaded];
20fe49b9
UD
145 struct link_map *maps[nloaded];
146
f25238ff
MO
147 /* Clear DF_1_NODELETE to force object deletion. We don't need to touch
148 l_tls_dtor_count because forced object deletion only happens when an
149 error occurs during object load. Destructor registration for TLS
150 non-POD objects should not have happened till then for this
151 object. */
152 if (force)
153 map->l_flags_1 &= ~DF_1_NODELETE;
154
bfc832cc 155 /* Run over the list and assign indexes to the link maps and enter
20fe49b9
UD
156 them into the MAPS array. */
157 int idx = 0;
2e81d449 158 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
20fe49b9
UD
159 {
160 l->l_idx = idx;
161 maps[idx] = l;
162 ++idx;
02d5e5d9 163
20fe49b9
UD
164 }
165 assert (idx == nloaded);
c4bb124a 166
20fe49b9
UD
167 /* Prepare the bitmaps. */
168 memset (used, '\0', sizeof (used));
169 memset (done, '\0', sizeof (done));
0ecb606c 170
20fe49b9
UD
171 /* Keep track of the lowest index link map we have covered already. */
172 int done_index = -1;
173 while (++done_index < nloaded)
0ecb606c 174 {
20fe49b9
UD
175 struct link_map *l = maps[done_index];
176
c3381f3e 177 if (done[done_index])
20fe49b9
UD
178 /* Already handled. */
179 continue;
180
181 /* Check whether this object is still used. */
182 if (l->l_type == lt_loaded
183 && l->l_direct_opencount == 0
184 && (l->l_flags_1 & DF_1_NODELETE) == 0
90b37cac
SP
185 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
186 acquire is sufficient and correct. */
187 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
c3381f3e 188 && !used[done_index])
20fe49b9
UD
189 continue;
190
191 /* We need this object and we handle it now. */
c3381f3e
UD
192 done[done_index] = 1;
193 used[done_index] = 1;
194 /* Signal the object is still needed. */
1100f849 195 l->l_idx = IDX_STILL_USED;
20fe49b9
UD
196
197 /* Mark all dependencies as used. */
198 if (l->l_initfini != NULL)
199 {
36129722
CD
200 /* We are always the zeroth entry, and since we don't include
201 ourselves in the dependency analysis start at 1. */
20fe49b9
UD
202 struct link_map **lp = &l->l_initfini[1];
203 while (*lp != NULL)
204 {
1100f849 205 if ((*lp)->l_idx != IDX_STILL_USED)
556224ab 206 {
c3381f3e
UD
207 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
208
209 if (!used[(*lp)->l_idx])
210 {
211 used[(*lp)->l_idx] = 1;
36129722
CD
212 /* If we marked a new object as used, and we've
213 already processed it, then we need to go back
214 and process again from that point forward to
215 ensure we keep all of its dependencies also. */
c3381f3e
UD
216 if ((*lp)->l_idx - 1 < done_index)
217 done_index = (*lp)->l_idx - 1;
218 }
556224ab 219 }
556224ab 220
20fe49b9
UD
221 ++lp;
222 }
223 }
224 /* And the same for relocation dependencies. */
225 if (l->l_reldeps != NULL)
385b4cf4 226 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
20fe49b9 227 {
385b4cf4 228 struct link_map *jmap = l->l_reldeps->list[j];
20fe49b9 229
1100f849 230 if (jmap->l_idx != IDX_STILL_USED)
556224ab 231 {
c3381f3e
UD
232 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
233
234 if (!used[jmap->l_idx])
235 {
236 used[jmap->l_idx] = 1;
237 if (jmap->l_idx - 1 < done_index)
238 done_index = jmap->l_idx - 1;
239 }
556224ab
UD
240 }
241 }
20fe49b9 242 }
42c4f32a 243
c2c299fd
AS
244 /* Sort the entries. We can skip looking for the binary itself which is
245 at the front of the search list for the main namespace. */
246 _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
247 used + (nsid == LM_ID_BASE), true);
c3381f3e 248
a709dd43 249 /* Call all termination functions at once. */
29f97654 250#ifdef SHARED
2e81d449 251 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
29f97654 252#endif
20fe49b9 253 bool unload_any = false;
e4eb675d 254 bool scope_mem_left = false;
e8b6b64d 255 unsigned int unload_global = 0;
20fe49b9 256 unsigned int first_loaded = ~0;
ffd0e1b7 257 for (unsigned int i = 0; i < nloaded; ++i)
a709dd43 258 {
20fe49b9 259 struct link_map *imap = maps[i];
9dcafc55
UD
260
261 /* All elements must be in the same namespace. */
2e81d449 262 assert (imap->l_ns == nsid);
9dcafc55 263
c3381f3e 264 if (!used[i])
a709dd43 265 {
20fe49b9
UD
266 assert (imap->l_type == lt_loaded
267 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
268
aff4519d
UD
269 /* Call its termination function. Do not do it for
270 half-cooked objects. */
271 if (imap->l_init_called)
dacc8ffa 272 {
ac53c9c6
UD
273 /* When debugging print a message first. */
274 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
275 0))
276 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
2e81d449 277 imap->l_name, nsid);
ac53c9c6 278
aff4519d
UD
279 if (imap->l_info[DT_FINI_ARRAY] != NULL)
280 {
281 ElfW(Addr) *array =
282 (ElfW(Addr) *) (imap->l_addr
283 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
284 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
285 / sizeof (ElfW(Addr)));
aff4519d 286
62f29da7
UD
287 while (sz-- > 0)
288 ((fini_t) array[sz]) ();
aff4519d
UD
289 }
290
291 /* Next try the old-style destructor. */
292 if (imap->l_info[DT_FINI] != NULL)
daf75146
GM
293 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
294 + imap->l_info[DT_FINI]->d_un.d_ptr));
dacc8ffa
UD
295 }
296
9dcafc55 297#ifdef SHARED
a3d731d3 298 /* Auditing checkpoint: we remove an object. */
a1ffb40e 299 if (__glibc_unlikely (do_audit))
9dcafc55
UD
300 {
301 struct audit_ifaces *afct = GLRO(dl_audit);
302 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
303 {
304 if (afct->objclose != NULL)
e1d559f3
FW
305 {
306 struct auditstate *state
307 = link_map_audit_state (imap, cnt);
308 /* Return value is ignored. */
309 (void) afct->objclose (&state->cookie);
310 }
9dcafc55
UD
311
312 afct = afct->next;
313 }
314 }
315#endif
316
20fe49b9
UD
317 /* This object must not be used anymore. */
318 imap->l_removed = 1;
aff4519d 319
20fe49b9
UD
320 /* We indeed have an object to remove. */
321 unload_any = true;
aff4519d 322
e8b6b64d
UD
323 if (imap->l_global)
324 ++unload_global;
325
20fe49b9
UD
326 /* Remember where the first dynamically loaded object is. */
327 if (i < first_loaded)
328 first_loaded = i;
a709dd43 329 }
c3381f3e 330 /* Else used[i]. */
20fe49b9
UD
331 else if (imap->l_type == lt_loaded)
332 {
1100f849
UD
333 struct r_scope_elem *new_list = NULL;
334
335 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
20fe49b9 336 {
bfc832cc 337 /* The object is still used. But one of the objects we are
20fe49b9
UD
338 unloading right now is responsible for loading it. If
339 the current object does not have it's own scope yet we
340 have to create one. This has to be done before running
341 the finalizers.
342
343 To do this count the number of dependencies. */
344 unsigned int cnt;
345 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
346 ;
347
348 /* We simply reuse the l_initfini list. */
349 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
350 imap->l_searchlist.r_nlist = cnt;
351
1100f849 352 new_list = &imap->l_searchlist;
20fe49b9 353 }
1100f849
UD
354
355 /* Count the number of scopes which remain after the unload.
356 When we add the local search list count it. Always add
357 one for the terminating NULL pointer. */
358 size_t remain = (new_list != NULL) + 1;
359 bool removed_any = false;
c0a777e8 360 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1100f849
UD
361 /* This relies on l_scope[] entries being always set either
362 to its own l_symbolic_searchlist address, or some map's
363 l_searchlist address. */
c0a777e8 364 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1100f849
UD
365 {
366 struct link_map *tmap = (struct link_map *)
c0a777e8 367 ((char *) imap->l_scope[cnt]
1100f849 368 - offsetof (struct link_map, l_searchlist));
2e81d449 369 assert (tmap->l_ns == nsid);
1100f849
UD
370 if (tmap->l_idx == IDX_STILL_USED)
371 ++remain;
372 else
373 removed_any = true;
374 }
375 else
376 ++remain;
377
378 if (removed_any)
1ee2ff20 379 {
1100f849
UD
380 /* Always allocate a new array for the scope. This is
381 necessary since we must be able to determine the last
382 user of the current array. If possible use the link map's
383 memory. */
384 size_t new_size;
c0a777e8
UD
385 struct r_scope_elem **newp;
386
387#define SCOPE_ELEMS(imap) \
388 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
389
390 if (imap->l_scope != imap->l_scope_mem
391 && remain < SCOPE_ELEMS (imap))
1100f849 392 {
c0a777e8
UD
393 new_size = SCOPE_ELEMS (imap);
394 newp = imap->l_scope_mem;
1100f849
UD
395 }
396 else
397 {
398 new_size = imap->l_scope_max;
c0a777e8
UD
399 newp = (struct r_scope_elem **)
400 malloc (new_size * sizeof (struct r_scope_elem *));
1100f849
UD
401 if (newp == NULL)
402 _dl_signal_error (ENOMEM, "dlclose", NULL,
403 N_("cannot create scope list"));
404 }
405
1100f849
UD
406 /* Copy over the remaining scope elements. */
407 remain = 0;
c0a777e8 408 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1ee2ff20 409 {
c0a777e8 410 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1ee2ff20 411 {
1100f849 412 struct link_map *tmap = (struct link_map *)
c0a777e8 413 ((char *) imap->l_scope[cnt]
1100f849
UD
414 - offsetof (struct link_map, l_searchlist));
415 if (tmap->l_idx != IDX_STILL_USED)
416 {
417 /* Remove the scope. Or replace with own map's
418 scope. */
419 if (new_list != NULL)
420 {
c0a777e8 421 newp[remain++] = new_list;
1100f849
UD
422 new_list = NULL;
423 }
424 continue;
425 }
1ee2ff20 426 }
1100f849 427
c0a777e8 428 newp[remain++] = imap->l_scope[cnt];
1ee2ff20 429 }
c0a777e8 430 newp[remain] = NULL;
1100f849 431
c0a777e8 432 struct r_scope_elem **old = imap->l_scope;
1100f849 433
e4eb675d 434 imap->l_scope = newp;
1100f849
UD
435
436 /* No user anymore, we can free it now. */
c0a777e8 437 if (old != imap->l_scope_mem)
e4eb675d
UD
438 {
439 if (_dl_scope_free (old))
440 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
441 no need to repeat it. */
442 scope_mem_left = false;
443 }
444 else
445 scope_mem_left = true;
1100f849
UD
446
447 imap->l_scope_max = new_size;
1ee2ff20 448 }
39dd69df
AS
449 else if (new_list != NULL)
450 {
451 /* We didn't change the scope array, so reset the search
452 list. */
453 imap->l_searchlist.r_list = NULL;
454 imap->l_searchlist.r_nlist = 0;
455 }
42c4f32a 456
c3381f3e 457 /* The loader is gone, so mark the object as not having one.
1100f849
UD
458 Note: l_idx != IDX_STILL_USED -> object will be removed. */
459 if (imap->l_loader != NULL
460 && imap->l_loader->l_idx != IDX_STILL_USED)
20fe49b9 461 imap->l_loader = NULL;
aff4519d 462
20fe49b9
UD
463 /* Remember where the first dynamically loaded object is. */
464 if (i < first_loaded)
465 first_loaded = i;
466 }
a709dd43
UD
467 }
468
20fe49b9
UD
469 /* If there are no objects to unload, do nothing further. */
470 if (!unload_any)
471 goto out;
472
9dcafc55
UD
473#ifdef SHARED
474 /* Auditing checkpoint: we will start deleting objects. */
a1ffb40e 475 if (__glibc_unlikely (do_audit))
9dcafc55 476 {
2e81d449 477 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
478 struct audit_ifaces *afct = GLRO(dl_audit);
479 /* Do not call the functions for any auditing object. */
480 if (head->l_auditing == 0)
481 {
482 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
483 {
484 if (afct->activity != NULL)
e1d559f3
FW
485 {
486 struct auditstate *state = link_map_audit_state (head, cnt);
487 afct->activity (&state->cookie, LA_ACT_DELETE);
488 }
9dcafc55
UD
489
490 afct = afct->next;
491 }
492 }
493 }
494#endif
495
4d6acc61 496 /* Notify the debugger we are about to remove some loaded objects. */
2e81d449 497 struct r_debug *r = _dl_debug_initialize (0, nsid);
9dcafc55
UD
498 r->r_state = RT_DELETE;
499 _dl_debug_state ();
815e6fa3 500 LIBC_PROBE (unmap_start, 2, nsid, r);
4d6acc61 501
e8b6b64d
UD
502 if (unload_global)
503 {
504 /* Some objects are in the global scope list. Remove them. */
505 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
506 unsigned int i;
507 unsigned int j = 0;
508 unsigned int cnt = ns_msl->r_nlist;
509
510 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
511 --cnt;
512
513 if (cnt + unload_global == ns_msl->r_nlist)
514 /* Speed up removing most recently added objects. */
515 j = cnt;
516 else
b7c08a66 517 for (i = 0; i < cnt; i++)
e8b6b64d
UD
518 if (ns_msl->r_list[i]->l_removed == 0)
519 {
520 if (i != j)
521 ns_msl->r_list[j] = ns_msl->r_list[i];
522 j++;
523 }
524 ns_msl->r_nlist = j;
e4eb675d 525 }
e8b6b64d 526
e4eb675d
UD
527 if (!RTLD_SINGLE_THREAD_P
528 && (unload_global
529 || scope_mem_left
530 || (GL(dl_scope_free_list) != NULL
531 && GL(dl_scope_free_list)->count)))
532 {
533 THREAD_GSCOPE_WAIT ();
534
535 /* Now we can free any queued old scopes. */
385b4cf4 536 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
e4eb675d
UD
537 if (fsl != NULL)
538 while (fsl->count > 0)
539 free (fsl->list[--fsl->count]);
e8b6b64d
UD
540 }
541
541765b6
UD
542 size_t tls_free_start;
543 size_t tls_free_end;
544 tls_free_start = tls_free_end = NO_TLS_OFFSET;
c877418f 545
5a2a1d75
AS
546 /* We modify the list of loaded objects. */
547 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
548
ba79d61b
RM
549 /* Check each element of the search list to see if all references to
550 it are gone. */
ffd0e1b7 551 for (unsigned int i = first_loaded; i < nloaded; ++i)
ba79d61b 552 {
20fe49b9 553 struct link_map *imap = maps[i];
c3381f3e 554 if (!used[i])
ba79d61b 555 {
20fe49b9 556 assert (imap->l_type == lt_loaded);
a8a1269d 557
ba79d61b
RM
558 /* That was the last reference, and this was a dlopen-loaded
559 object. We can unmap it. */
ba79d61b 560
1f0c4a10 561 /* Remove the object from the dtv slotinfo array if it uses TLS. */
a1ffb40e 562 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
a04586d8 563 {
a04586d8 564 any_tls = true;
bb4cb252 565
9dcafc55
UD
566 if (GL(dl_tls_dtv_slotinfo_list) != NULL
567 && ! remove_slotinfo (imap->l_tls_modid,
568 GL(dl_tls_dtv_slotinfo_list), 0,
569 imap->l_init_called))
fc093be1
UD
570 /* All dynamically loaded modules with TLS are unloaded. */
571 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
c877418f 572
4c533566
UD
573 if (imap->l_tls_offset != NO_TLS_OFFSET
574 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
c877418f
RM
575 {
576 /* Collect a contiguous chunk built from the objects in
577 this search list, going in either direction. When the
578 whole chunk is at the end of the used area then we can
579 reclaim it. */
11bf311e 580#if TLS_TCB_AT_TP
541765b6
UD
581 if (tls_free_start == NO_TLS_OFFSET
582 || (size_t) imap->l_tls_offset == tls_free_start)
583 {
584 /* Extend the contiguous chunk being reclaimed. */
585 tls_free_start
586 = imap->l_tls_offset - imap->l_tls_blocksize;
587
588 if (tls_free_end == NO_TLS_OFFSET)
589 tls_free_end = imap->l_tls_offset;
590 }
591 else if (imap->l_tls_offset - imap->l_tls_blocksize
592 == tls_free_end)
593 /* Extend the chunk backwards. */
594 tls_free_end = imap->l_tls_offset;
595 else
596 {
597 /* This isn't contiguous with the last chunk freed.
598 One of them will be leaked unless we can free
599 one block right away. */
600 if (tls_free_end == GL(dl_tls_static_used))
601 {
602 GL(dl_tls_static_used) = tls_free_start;
603 tls_free_end = imap->l_tls_offset;
604 tls_free_start
605 = tls_free_end - imap->l_tls_blocksize;
606 }
607 else if ((size_t) imap->l_tls_offset
608 == GL(dl_tls_static_used))
609 GL(dl_tls_static_used)
610 = imap->l_tls_offset - imap->l_tls_blocksize;
611 else if (tls_free_end < (size_t) imap->l_tls_offset)
612 {
613 /* We pick the later block. It has a chance to
614 be freed. */
615 tls_free_end = imap->l_tls_offset;
616 tls_free_start
617 = tls_free_end - imap->l_tls_blocksize;
618 }
619 }
11bf311e 620#elif TLS_DTV_AT_TP
66bdbaa4
AM
621 if (tls_free_start == NO_TLS_OFFSET)
622 {
623 tls_free_start = imap->l_tls_firstbyte_offset;
624 tls_free_end = (imap->l_tls_offset
625 + imap->l_tls_blocksize);
626 }
627 else if (imap->l_tls_firstbyte_offset == tls_free_end)
c877418f 628 /* Extend the contiguous chunk being reclaimed. */
66bdbaa4 629 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f
RM
630 else if (imap->l_tls_offset + imap->l_tls_blocksize
631 == tls_free_start)
632 /* Extend the chunk backwards. */
66bdbaa4
AM
633 tls_free_start = imap->l_tls_firstbyte_offset;
634 /* This isn't contiguous with the last chunk freed.
635 One of them will be leaked unless we can free
636 one block right away. */
637 else if (imap->l_tls_offset + imap->l_tls_blocksize
638 == GL(dl_tls_static_used))
639 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
640 else if (tls_free_end == GL(dl_tls_static_used))
c877418f 641 {
66bdbaa4
AM
642 GL(dl_tls_static_used) = tls_free_start;
643 tls_free_start = imap->l_tls_firstbyte_offset;
644 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
645 }
646 else if (tls_free_end < imap->l_tls_firstbyte_offset)
647 {
648 /* We pick the later block. It has a chance to
649 be freed. */
650 tls_free_start = imap->l_tls_firstbyte_offset;
651 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f 652 }
11bf311e
UD
653#else
654# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
655#endif
c877418f 656 }
a04586d8 657 }
a04586d8 658
02d5e5d9
PK
659 /* Reset unique symbols if forced. */
660 if (force)
661 {
662 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
663 __rtld_lock_lock_recursive (tab->lock);
664 struct unique_sym *entries = tab->entries;
665 if (entries != NULL)
666 {
667 size_t idx, size = tab->size;
668 for (idx = 0; idx < size; ++idx)
669 {
670 /* Clear unique symbol entries that belong to this
671 object. */
672 if (entries[idx].name != NULL
673 && entries[idx].map == imap)
674 {
675 entries[idx].name = NULL;
676 entries[idx].hashval = 0;
677 tab->n_elements--;
678 }
679 }
680 }
681 __rtld_lock_unlock_recursive (tab->lock);
682 }
683
a8a1269d 684 /* We can unmap all the maps at once. We determined the
4ce636da
UD
685 start address and length when we loaded the object and
686 the `munmap' call does the rest. */
09bf6406 687 DL_UNMAP (imap);
22bc7978 688
ba79d61b 689 /* Finally, unlink the data structure and free it. */
2bd2cad9
RM
690#if DL_NNS == 1
691 /* The assert in the (imap->l_prev == NULL) case gives
692 the compiler license to warn that NS points outside
693 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
694 is tantamount to nsid >= DL_NNS). That should be impossible
695 in this configuration, so just assert about it instead. */
696 assert (nsid == LM_ID_BASE);
697 assert (imap->l_prev != NULL);
698#else
699 if (imap->l_prev == NULL)
c0f62c56 700 {
2e81d449 701 assert (nsid != LM_ID_BASE);
b7c08a66
RM
702 ns->_ns_loaded = imap->l_next;
703
704 /* Update the pointer to the head of the list
705 we leave for debuggers to examine. */
706 r->r_map = (void *) ns->_ns_loaded;
c0f62c56 707 }
2bd2cad9
RM
708 else
709#endif
710 imap->l_prev->l_next = imap->l_next;
c0f62c56 711
2e81d449 712 --ns->_ns_nloaded;
c0f62c56 713 if (imap->l_next != NULL)
af69217f 714 imap->l_next->l_prev = imap->l_prev;
a8a1269d 715
556224ab
UD
716 free (imap->l_versions);
717 if (imap->l_origin != (char *) -1)
a8a1269d
UD
718 free ((char *) imap->l_origin);
719
20fe49b9 720 free (imap->l_reldeps);
4b4fcf99 721
ac53c9c6 722 /* Print debugging message. */
a1ffb40e 723 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
ac53c9c6
UD
724 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
725 imap->l_name, imap->l_ns);
726
4ce636da 727 /* This name always is allocated. */
a8a1269d 728 free (imap->l_name);
4ce636da 729 /* Remove the list with all the names of the shared object. */
20fe49b9
UD
730
731 struct libname_list *lnp = imap->l_libname;
a8a1269d
UD
732 do
733 {
76156ea1 734 struct libname_list *this = lnp;
a8a1269d 735 lnp = lnp->next;
11810621
UD
736 if (!this->dont_free)
737 free (this);
a8a1269d
UD
738 }
739 while (lnp != NULL);
a8a1269d 740
4ce636da 741 /* Remove the searchlists. */
20fe49b9 742 free (imap->l_initfini);
4ce636da 743
5a21d307 744 /* Remove the scope array if we allocated it. */
c0a777e8
UD
745 if (imap->l_scope != imap->l_scope_mem)
746 free (imap->l_scope);
5a21d307 747
7bcaca43 748 if (imap->l_phdr_allocated)
15925412 749 free ((void *) imap->l_phdr);
7bcaca43 750
f55727ca
UD
751 if (imap->l_rpath_dirs.dirs != (void *) -1)
752 free (imap->l_rpath_dirs.dirs);
753 if (imap->l_runpath_dirs.dirs != (void *) -1)
754 free (imap->l_runpath_dirs.dirs);
755
af69217f 756 free (imap);
ba79d61b
RM
757 }
758 }
759
5a2a1d75
AS
760 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
761
c877418f 762 /* If we removed any object which uses TLS bump the generation counter. */
bb4cb252 763 if (any_tls)
c877418f 764 {
a1ffb40e 765 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
8b748aed 766 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
c877418f
RM
767
768 if (tls_free_end == GL(dl_tls_static_used))
769 GL(dl_tls_static_used) = tls_free_start;
770 }
a04586d8 771
9dcafc55
UD
772#ifdef SHARED
773 /* Auditing checkpoint: we have deleted all objects. */
a1ffb40e 774 if (__glibc_unlikely (do_audit))
9dcafc55 775 {
2e81d449 776 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
777 /* Do not call the functions for any auditing object. */
778 if (head->l_auditing == 0)
779 {
780 struct audit_ifaces *afct = GLRO(dl_audit);
781 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
782 {
783 if (afct->activity != NULL)
e1d559f3
FW
784 {
785 struct auditstate *state = link_map_audit_state (head, cnt);
786 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
787 }
9dcafc55
UD
788
789 afct = afct->next;
790 }
791 }
792 }
793#endif
794
22c83193
UD
795 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
796 && nsid == GL(dl_nns) - 1)
797 do
0d23a5c1 798 --GL(dl_nns);
22c83193
UD
799 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
800
e3e5f672 801 /* Notify the debugger those objects are finalized and gone. */
9dcafc55
UD
802 r->r_state = RT_CONSISTENT;
803 _dl_debug_state ();
815e6fa3 804 LIBC_PROBE (unmap_complete, 2, nsid, r);
4b4fcf99 805
bfc832cc 806 /* Recheck if we need to retry, release the lock. */
20fe49b9 807 out:
bfc832cc
UD
808 if (dl_close_state == rerun)
809 goto retry;
810
811 dl_close_state = not_pending;
131c4428
UD
812}
813
814
815void
816_dl_close (void *_map)
817{
818 struct link_map *map = _map;
819
57707b7f
CD
820 /* We must take the lock to examine the contents of map and avoid
821 concurrent dlopens. */
822 __rtld_lock_lock_recursive (GL(dl_load_lock));
823
824 /* At this point we are guaranteed nobody else is touching the list of
825 loaded maps, but a concurrent dlclose might have freed our map
826 before we took the lock. There is no way to detect this (see below)
827 so we proceed assuming this isn't the case. First see whether we
828 can remove the object at all. */
a1ffb40e 829 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
131c4428 830 {
131c4428 831 /* Nope. Do nothing. */
57707b7f 832 __rtld_lock_unlock_recursive (GL(dl_load_lock));
131c4428
UD
833 return;
834 }
835
57707b7f
CD
836 /* At present this is an unreliable check except in the case where the
837 caller has recursively called dlclose and we are sure the link map
838 has not been freed. In a non-recursive dlclose the map itself
839 might have been freed and this access is potentially a data race
840 with whatever other use this memory might have now, or worse we
841 might silently corrupt memory if it looks enough like a link map.
842 POSIX has language in dlclose that appears to guarantee that this
843 should be a detectable case and given that dlclose should be threadsafe
844 we need this to be a reliable detection.
845 This is bug 20990. */
131c4428 846 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
57707b7f
CD
847 {
848 __rtld_lock_unlock_recursive (GL(dl_load_lock));
849 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
850 }
131c4428 851
02d5e5d9 852 _dl_close_worker (map, false);
131c4428 853
d3c9f895 854 __rtld_lock_unlock_recursive (GL(dl_load_lock));
e3e5f672 855}
This page took 0.639688 seconds and 5 git commands to generate.