]> sourceware.org Git - glibc.git/blob - elf/dl-close.c
Introduce link_map_audit_state accessor function
[glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35
36 #include <dl-unmap-segments.h>
37
38
39 /* Type of the constructor functions. */
40 typedef void (*fini_t) (void);
41
42
43 /* Special l_idx value used to indicate which objects remain loaded. */
44 #define IDX_STILL_USED -1
45
46
47 /* Returns true we an non-empty was found. */
48 static bool
49 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
51 {
52 if (idx - disp >= listp->len)
53 {
54 if (listp->next == NULL)
55 {
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there);
60 }
61 else
62 {
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
66
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
70 }
71 }
72 else
73 {
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
75
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map != NULL))
79 {
80 assert (old_map->l_tls_modid == idx);
81
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
85 }
86
87 /* If this is not the last currently used entry no need to look
88 further. */
89 if (idx != GL(dl_tls_max_dtv_idx))
90 return true;
91 }
92
93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
94 {
95 --idx;
96
97 if (listp->slotinfo[idx - disp].map != NULL)
98 {
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
102 }
103 }
104
105 /* No non-entry in this list element. */
106 return false;
107 }
108
109
110 void
111 _dl_close_worker (struct link_map *map, bool force)
112 {
113 /* One less direct use. */
114 --map->l_direct_opencount;
115
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
120
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
123 {
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
126
127 /* There are still references to this object. Do nothing more. */
128 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
131
132 return;
133 }
134
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
137
138 retry:
139 dl_close_state = pending;
140
141 bool any_tls = false;
142 const unsigned int nloaded = ns->_ns_nloaded;
143 char used[nloaded];
144 char done[nloaded];
145 struct link_map *maps[nloaded];
146
147 /* Clear DF_1_NODELETE to force object deletion. We don't need to touch
148 l_tls_dtor_count because forced object deletion only happens when an
149 error occurs during object load. Destructor registration for TLS
150 non-POD objects should not have happened till then for this
151 object. */
152 if (force)
153 map->l_flags_1 &= ~DF_1_NODELETE;
154
155 /* Run over the list and assign indexes to the link maps and enter
156 them into the MAPS array. */
157 int idx = 0;
158 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
159 {
160 l->l_idx = idx;
161 maps[idx] = l;
162 ++idx;
163
164 }
165 assert (idx == nloaded);
166
167 /* Prepare the bitmaps. */
168 memset (used, '\0', sizeof (used));
169 memset (done, '\0', sizeof (done));
170
171 /* Keep track of the lowest index link map we have covered already. */
172 int done_index = -1;
173 while (++done_index < nloaded)
174 {
175 struct link_map *l = maps[done_index];
176
177 if (done[done_index])
178 /* Already handled. */
179 continue;
180
181 /* Check whether this object is still used. */
182 if (l->l_type == lt_loaded
183 && l->l_direct_opencount == 0
184 && (l->l_flags_1 & DF_1_NODELETE) == 0
185 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
186 acquire is sufficient and correct. */
187 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
188 && !used[done_index])
189 continue;
190
191 /* We need this object and we handle it now. */
192 done[done_index] = 1;
193 used[done_index] = 1;
194 /* Signal the object is still needed. */
195 l->l_idx = IDX_STILL_USED;
196
197 /* Mark all dependencies as used. */
198 if (l->l_initfini != NULL)
199 {
200 /* We are always the zeroth entry, and since we don't include
201 ourselves in the dependency analysis start at 1. */
202 struct link_map **lp = &l->l_initfini[1];
203 while (*lp != NULL)
204 {
205 if ((*lp)->l_idx != IDX_STILL_USED)
206 {
207 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
208
209 if (!used[(*lp)->l_idx])
210 {
211 used[(*lp)->l_idx] = 1;
212 /* If we marked a new object as used, and we've
213 already processed it, then we need to go back
214 and process again from that point forward to
215 ensure we keep all of its dependencies also. */
216 if ((*lp)->l_idx - 1 < done_index)
217 done_index = (*lp)->l_idx - 1;
218 }
219 }
220
221 ++lp;
222 }
223 }
224 /* And the same for relocation dependencies. */
225 if (l->l_reldeps != NULL)
226 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
227 {
228 struct link_map *jmap = l->l_reldeps->list[j];
229
230 if (jmap->l_idx != IDX_STILL_USED)
231 {
232 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
233
234 if (!used[jmap->l_idx])
235 {
236 used[jmap->l_idx] = 1;
237 if (jmap->l_idx - 1 < done_index)
238 done_index = jmap->l_idx - 1;
239 }
240 }
241 }
242 }
243
244 /* Sort the entries. We can skip looking for the binary itself which is
245 at the front of the search list for the main namespace. */
246 _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
247 used + (nsid == LM_ID_BASE), true);
248
249 /* Call all termination functions at once. */
250 #ifdef SHARED
251 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
252 #endif
253 bool unload_any = false;
254 bool scope_mem_left = false;
255 unsigned int unload_global = 0;
256 unsigned int first_loaded = ~0;
257 for (unsigned int i = 0; i < nloaded; ++i)
258 {
259 struct link_map *imap = maps[i];
260
261 /* All elements must be in the same namespace. */
262 assert (imap->l_ns == nsid);
263
264 if (!used[i])
265 {
266 assert (imap->l_type == lt_loaded
267 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
268
269 /* Call its termination function. Do not do it for
270 half-cooked objects. */
271 if (imap->l_init_called)
272 {
273 /* When debugging print a message first. */
274 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
275 0))
276 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
277 imap->l_name, nsid);
278
279 if (imap->l_info[DT_FINI_ARRAY] != NULL)
280 {
281 ElfW(Addr) *array =
282 (ElfW(Addr) *) (imap->l_addr
283 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
284 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
285 / sizeof (ElfW(Addr)));
286
287 while (sz-- > 0)
288 ((fini_t) array[sz]) ();
289 }
290
291 /* Next try the old-style destructor. */
292 if (imap->l_info[DT_FINI] != NULL)
293 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
294 + imap->l_info[DT_FINI]->d_un.d_ptr));
295 }
296
297 #ifdef SHARED
298 /* Auditing checkpoint: we remove an object. */
299 if (__glibc_unlikely (do_audit))
300 {
301 struct audit_ifaces *afct = GLRO(dl_audit);
302 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
303 {
304 if (afct->objclose != NULL)
305 {
306 struct auditstate *state
307 = link_map_audit_state (imap, cnt);
308 /* Return value is ignored. */
309 (void) afct->objclose (&state->cookie);
310 }
311
312 afct = afct->next;
313 }
314 }
315 #endif
316
317 /* This object must not be used anymore. */
318 imap->l_removed = 1;
319
320 /* We indeed have an object to remove. */
321 unload_any = true;
322
323 if (imap->l_global)
324 ++unload_global;
325
326 /* Remember where the first dynamically loaded object is. */
327 if (i < first_loaded)
328 first_loaded = i;
329 }
330 /* Else used[i]. */
331 else if (imap->l_type == lt_loaded)
332 {
333 struct r_scope_elem *new_list = NULL;
334
335 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
336 {
337 /* The object is still used. But one of the objects we are
338 unloading right now is responsible for loading it. If
339 the current object does not have it's own scope yet we
340 have to create one. This has to be done before running
341 the finalizers.
342
343 To do this count the number of dependencies. */
344 unsigned int cnt;
345 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
346 ;
347
348 /* We simply reuse the l_initfini list. */
349 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
350 imap->l_searchlist.r_nlist = cnt;
351
352 new_list = &imap->l_searchlist;
353 }
354
355 /* Count the number of scopes which remain after the unload.
356 When we add the local search list count it. Always add
357 one for the terminating NULL pointer. */
358 size_t remain = (new_list != NULL) + 1;
359 bool removed_any = false;
360 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
361 /* This relies on l_scope[] entries being always set either
362 to its own l_symbolic_searchlist address, or some map's
363 l_searchlist address. */
364 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
365 {
366 struct link_map *tmap = (struct link_map *)
367 ((char *) imap->l_scope[cnt]
368 - offsetof (struct link_map, l_searchlist));
369 assert (tmap->l_ns == nsid);
370 if (tmap->l_idx == IDX_STILL_USED)
371 ++remain;
372 else
373 removed_any = true;
374 }
375 else
376 ++remain;
377
378 if (removed_any)
379 {
380 /* Always allocate a new array for the scope. This is
381 necessary since we must be able to determine the last
382 user of the current array. If possible use the link map's
383 memory. */
384 size_t new_size;
385 struct r_scope_elem **newp;
386
387 #define SCOPE_ELEMS(imap) \
388 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
389
390 if (imap->l_scope != imap->l_scope_mem
391 && remain < SCOPE_ELEMS (imap))
392 {
393 new_size = SCOPE_ELEMS (imap);
394 newp = imap->l_scope_mem;
395 }
396 else
397 {
398 new_size = imap->l_scope_max;
399 newp = (struct r_scope_elem **)
400 malloc (new_size * sizeof (struct r_scope_elem *));
401 if (newp == NULL)
402 _dl_signal_error (ENOMEM, "dlclose", NULL,
403 N_("cannot create scope list"));
404 }
405
406 /* Copy over the remaining scope elements. */
407 remain = 0;
408 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
409 {
410 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
411 {
412 struct link_map *tmap = (struct link_map *)
413 ((char *) imap->l_scope[cnt]
414 - offsetof (struct link_map, l_searchlist));
415 if (tmap->l_idx != IDX_STILL_USED)
416 {
417 /* Remove the scope. Or replace with own map's
418 scope. */
419 if (new_list != NULL)
420 {
421 newp[remain++] = new_list;
422 new_list = NULL;
423 }
424 continue;
425 }
426 }
427
428 newp[remain++] = imap->l_scope[cnt];
429 }
430 newp[remain] = NULL;
431
432 struct r_scope_elem **old = imap->l_scope;
433
434 imap->l_scope = newp;
435
436 /* No user anymore, we can free it now. */
437 if (old != imap->l_scope_mem)
438 {
439 if (_dl_scope_free (old))
440 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
441 no need to repeat it. */
442 scope_mem_left = false;
443 }
444 else
445 scope_mem_left = true;
446
447 imap->l_scope_max = new_size;
448 }
449 else if (new_list != NULL)
450 {
451 /* We didn't change the scope array, so reset the search
452 list. */
453 imap->l_searchlist.r_list = NULL;
454 imap->l_searchlist.r_nlist = 0;
455 }
456
457 /* The loader is gone, so mark the object as not having one.
458 Note: l_idx != IDX_STILL_USED -> object will be removed. */
459 if (imap->l_loader != NULL
460 && imap->l_loader->l_idx != IDX_STILL_USED)
461 imap->l_loader = NULL;
462
463 /* Remember where the first dynamically loaded object is. */
464 if (i < first_loaded)
465 first_loaded = i;
466 }
467 }
468
469 /* If there are no objects to unload, do nothing further. */
470 if (!unload_any)
471 goto out;
472
473 #ifdef SHARED
474 /* Auditing checkpoint: we will start deleting objects. */
475 if (__glibc_unlikely (do_audit))
476 {
477 struct link_map *head = ns->_ns_loaded;
478 struct audit_ifaces *afct = GLRO(dl_audit);
479 /* Do not call the functions for any auditing object. */
480 if (head->l_auditing == 0)
481 {
482 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
483 {
484 if (afct->activity != NULL)
485 {
486 struct auditstate *state = link_map_audit_state (head, cnt);
487 afct->activity (&state->cookie, LA_ACT_DELETE);
488 }
489
490 afct = afct->next;
491 }
492 }
493 }
494 #endif
495
496 /* Notify the debugger we are about to remove some loaded objects. */
497 struct r_debug *r = _dl_debug_initialize (0, nsid);
498 r->r_state = RT_DELETE;
499 _dl_debug_state ();
500 LIBC_PROBE (unmap_start, 2, nsid, r);
501
502 if (unload_global)
503 {
504 /* Some objects are in the global scope list. Remove them. */
505 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
506 unsigned int i;
507 unsigned int j = 0;
508 unsigned int cnt = ns_msl->r_nlist;
509
510 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
511 --cnt;
512
513 if (cnt + unload_global == ns_msl->r_nlist)
514 /* Speed up removing most recently added objects. */
515 j = cnt;
516 else
517 for (i = 0; i < cnt; i++)
518 if (ns_msl->r_list[i]->l_removed == 0)
519 {
520 if (i != j)
521 ns_msl->r_list[j] = ns_msl->r_list[i];
522 j++;
523 }
524 ns_msl->r_nlist = j;
525 }
526
527 if (!RTLD_SINGLE_THREAD_P
528 && (unload_global
529 || scope_mem_left
530 || (GL(dl_scope_free_list) != NULL
531 && GL(dl_scope_free_list)->count)))
532 {
533 THREAD_GSCOPE_WAIT ();
534
535 /* Now we can free any queued old scopes. */
536 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
537 if (fsl != NULL)
538 while (fsl->count > 0)
539 free (fsl->list[--fsl->count]);
540 }
541
542 size_t tls_free_start;
543 size_t tls_free_end;
544 tls_free_start = tls_free_end = NO_TLS_OFFSET;
545
546 /* We modify the list of loaded objects. */
547 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
548
549 /* Check each element of the search list to see if all references to
550 it are gone. */
551 for (unsigned int i = first_loaded; i < nloaded; ++i)
552 {
553 struct link_map *imap = maps[i];
554 if (!used[i])
555 {
556 assert (imap->l_type == lt_loaded);
557
558 /* That was the last reference, and this was a dlopen-loaded
559 object. We can unmap it. */
560
561 /* Remove the object from the dtv slotinfo array if it uses TLS. */
562 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
563 {
564 any_tls = true;
565
566 if (GL(dl_tls_dtv_slotinfo_list) != NULL
567 && ! remove_slotinfo (imap->l_tls_modid,
568 GL(dl_tls_dtv_slotinfo_list), 0,
569 imap->l_init_called))
570 /* All dynamically loaded modules with TLS are unloaded. */
571 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
572
573 if (imap->l_tls_offset != NO_TLS_OFFSET
574 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
575 {
576 /* Collect a contiguous chunk built from the objects in
577 this search list, going in either direction. When the
578 whole chunk is at the end of the used area then we can
579 reclaim it. */
580 #if TLS_TCB_AT_TP
581 if (tls_free_start == NO_TLS_OFFSET
582 || (size_t) imap->l_tls_offset == tls_free_start)
583 {
584 /* Extend the contiguous chunk being reclaimed. */
585 tls_free_start
586 = imap->l_tls_offset - imap->l_tls_blocksize;
587
588 if (tls_free_end == NO_TLS_OFFSET)
589 tls_free_end = imap->l_tls_offset;
590 }
591 else if (imap->l_tls_offset - imap->l_tls_blocksize
592 == tls_free_end)
593 /* Extend the chunk backwards. */
594 tls_free_end = imap->l_tls_offset;
595 else
596 {
597 /* This isn't contiguous with the last chunk freed.
598 One of them will be leaked unless we can free
599 one block right away. */
600 if (tls_free_end == GL(dl_tls_static_used))
601 {
602 GL(dl_tls_static_used) = tls_free_start;
603 tls_free_end = imap->l_tls_offset;
604 tls_free_start
605 = tls_free_end - imap->l_tls_blocksize;
606 }
607 else if ((size_t) imap->l_tls_offset
608 == GL(dl_tls_static_used))
609 GL(dl_tls_static_used)
610 = imap->l_tls_offset - imap->l_tls_blocksize;
611 else if (tls_free_end < (size_t) imap->l_tls_offset)
612 {
613 /* We pick the later block. It has a chance to
614 be freed. */
615 tls_free_end = imap->l_tls_offset;
616 tls_free_start
617 = tls_free_end - imap->l_tls_blocksize;
618 }
619 }
620 #elif TLS_DTV_AT_TP
621 if (tls_free_start == NO_TLS_OFFSET)
622 {
623 tls_free_start = imap->l_tls_firstbyte_offset;
624 tls_free_end = (imap->l_tls_offset
625 + imap->l_tls_blocksize);
626 }
627 else if (imap->l_tls_firstbyte_offset == tls_free_end)
628 /* Extend the contiguous chunk being reclaimed. */
629 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
630 else if (imap->l_tls_offset + imap->l_tls_blocksize
631 == tls_free_start)
632 /* Extend the chunk backwards. */
633 tls_free_start = imap->l_tls_firstbyte_offset;
634 /* This isn't contiguous with the last chunk freed.
635 One of them will be leaked unless we can free
636 one block right away. */
637 else if (imap->l_tls_offset + imap->l_tls_blocksize
638 == GL(dl_tls_static_used))
639 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
640 else if (tls_free_end == GL(dl_tls_static_used))
641 {
642 GL(dl_tls_static_used) = tls_free_start;
643 tls_free_start = imap->l_tls_firstbyte_offset;
644 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
645 }
646 else if (tls_free_end < imap->l_tls_firstbyte_offset)
647 {
648 /* We pick the later block. It has a chance to
649 be freed. */
650 tls_free_start = imap->l_tls_firstbyte_offset;
651 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
652 }
653 #else
654 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
655 #endif
656 }
657 }
658
659 /* Reset unique symbols if forced. */
660 if (force)
661 {
662 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
663 __rtld_lock_lock_recursive (tab->lock);
664 struct unique_sym *entries = tab->entries;
665 if (entries != NULL)
666 {
667 size_t idx, size = tab->size;
668 for (idx = 0; idx < size; ++idx)
669 {
670 /* Clear unique symbol entries that belong to this
671 object. */
672 if (entries[idx].name != NULL
673 && entries[idx].map == imap)
674 {
675 entries[idx].name = NULL;
676 entries[idx].hashval = 0;
677 tab->n_elements--;
678 }
679 }
680 }
681 __rtld_lock_unlock_recursive (tab->lock);
682 }
683
684 /* We can unmap all the maps at once. We determined the
685 start address and length when we loaded the object and
686 the `munmap' call does the rest. */
687 DL_UNMAP (imap);
688
689 /* Finally, unlink the data structure and free it. */
690 #if DL_NNS == 1
691 /* The assert in the (imap->l_prev == NULL) case gives
692 the compiler license to warn that NS points outside
693 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
694 is tantamount to nsid >= DL_NNS). That should be impossible
695 in this configuration, so just assert about it instead. */
696 assert (nsid == LM_ID_BASE);
697 assert (imap->l_prev != NULL);
698 #else
699 if (imap->l_prev == NULL)
700 {
701 assert (nsid != LM_ID_BASE);
702 ns->_ns_loaded = imap->l_next;
703
704 /* Update the pointer to the head of the list
705 we leave for debuggers to examine. */
706 r->r_map = (void *) ns->_ns_loaded;
707 }
708 else
709 #endif
710 imap->l_prev->l_next = imap->l_next;
711
712 --ns->_ns_nloaded;
713 if (imap->l_next != NULL)
714 imap->l_next->l_prev = imap->l_prev;
715
716 free (imap->l_versions);
717 if (imap->l_origin != (char *) -1)
718 free ((char *) imap->l_origin);
719
720 free (imap->l_reldeps);
721
722 /* Print debugging message. */
723 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
724 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
725 imap->l_name, imap->l_ns);
726
727 /* This name always is allocated. */
728 free (imap->l_name);
729 /* Remove the list with all the names of the shared object. */
730
731 struct libname_list *lnp = imap->l_libname;
732 do
733 {
734 struct libname_list *this = lnp;
735 lnp = lnp->next;
736 if (!this->dont_free)
737 free (this);
738 }
739 while (lnp != NULL);
740
741 /* Remove the searchlists. */
742 free (imap->l_initfini);
743
744 /* Remove the scope array if we allocated it. */
745 if (imap->l_scope != imap->l_scope_mem)
746 free (imap->l_scope);
747
748 if (imap->l_phdr_allocated)
749 free ((void *) imap->l_phdr);
750
751 if (imap->l_rpath_dirs.dirs != (void *) -1)
752 free (imap->l_rpath_dirs.dirs);
753 if (imap->l_runpath_dirs.dirs != (void *) -1)
754 free (imap->l_runpath_dirs.dirs);
755
756 free (imap);
757 }
758 }
759
760 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
761
762 /* If we removed any object which uses TLS bump the generation counter. */
763 if (any_tls)
764 {
765 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
766 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
767
768 if (tls_free_end == GL(dl_tls_static_used))
769 GL(dl_tls_static_used) = tls_free_start;
770 }
771
772 #ifdef SHARED
773 /* Auditing checkpoint: we have deleted all objects. */
774 if (__glibc_unlikely (do_audit))
775 {
776 struct link_map *head = ns->_ns_loaded;
777 /* Do not call the functions for any auditing object. */
778 if (head->l_auditing == 0)
779 {
780 struct audit_ifaces *afct = GLRO(dl_audit);
781 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
782 {
783 if (afct->activity != NULL)
784 {
785 struct auditstate *state = link_map_audit_state (head, cnt);
786 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
787 }
788
789 afct = afct->next;
790 }
791 }
792 }
793 #endif
794
795 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
796 && nsid == GL(dl_nns) - 1)
797 do
798 --GL(dl_nns);
799 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
800
801 /* Notify the debugger those objects are finalized and gone. */
802 r->r_state = RT_CONSISTENT;
803 _dl_debug_state ();
804 LIBC_PROBE (unmap_complete, 2, nsid, r);
805
806 /* Recheck if we need to retry, release the lock. */
807 out:
808 if (dl_close_state == rerun)
809 goto retry;
810
811 dl_close_state = not_pending;
812 }
813
814
815 void
816 _dl_close (void *_map)
817 {
818 struct link_map *map = _map;
819
820 /* We must take the lock to examine the contents of map and avoid
821 concurrent dlopens. */
822 __rtld_lock_lock_recursive (GL(dl_load_lock));
823
824 /* At this point we are guaranteed nobody else is touching the list of
825 loaded maps, but a concurrent dlclose might have freed our map
826 before we took the lock. There is no way to detect this (see below)
827 so we proceed assuming this isn't the case. First see whether we
828 can remove the object at all. */
829 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
830 {
831 /* Nope. Do nothing. */
832 __rtld_lock_unlock_recursive (GL(dl_load_lock));
833 return;
834 }
835
836 /* At present this is an unreliable check except in the case where the
837 caller has recursively called dlclose and we are sure the link map
838 has not been freed. In a non-recursive dlclose the map itself
839 might have been freed and this access is potentially a data race
840 with whatever other use this memory might have now, or worse we
841 might silently corrupt memory if it looks enough like a link map.
842 POSIX has language in dlclose that appears to guarantee that this
843 should be a detectable case and given that dlclose should be threadsafe
844 we need this to be a reliable detection.
845 This is bug 20990. */
846 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
847 {
848 __rtld_lock_unlock_recursive (GL(dl_load_lock));
849 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
850 }
851
852 _dl_close_worker (map, false);
853
854 __rtld_lock_unlock_recursive (GL(dl_load_lock));
855 }
This page took 0.077561 seconds and 5 git commands to generate.