]> sourceware.org Git - glibc.git/blob - elf/dl-close.c
Revert unwanted patch. Again.
[glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2007, 2009, 2010 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
35
36
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
39
40
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
43
44
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
49 {
50 if (idx - disp >= listp->len)
51 {
52 if (listp->next == NULL)
53 {
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
58 }
59 else
60 {
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
64
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
68 }
69 }
70 else
71 {
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
73
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
77 {
78 assert (old_map->l_tls_modid == idx);
79
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
83 }
84
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
89 }
90
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
92 {
93 --idx;
94
95 if (listp->slotinfo[idx - disp].map != NULL)
96 {
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
100 }
101 }
102
103 /* No non-entry in this list element. */
104 return false;
105 }
106
107
108 void
109 _dl_close_worker (struct link_map *map)
110 {
111 /* One less direct use. */
112 --map->l_direct_opencount;
113
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
118
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
121 {
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
124
125 /* There are still references to this object. Do nothing more. */
126 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
129
130 return;
131 }
132
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
135
136 retry:
137 dl_close_state = pending;
138
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 char used[nloaded];
142 char done[nloaded];
143 struct link_map *maps[nloaded];
144
145 /* Run over the list and assign indexes to the link maps and enter
146 them into the MAPS array. */
147 int idx = 0;
148 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
149 {
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
153 }
154 assert (idx == nloaded);
155
156 /* Prepare the bitmaps. */
157 memset (used, '\0', sizeof (used));
158 memset (done, '\0', sizeof (done));
159
160 /* Keep track of the lowest index link map we have covered already. */
161 int done_index = -1;
162 while (++done_index < nloaded)
163 {
164 struct link_map *l = maps[done_index];
165
166 if (done[done_index])
167 /* Already handled. */
168 continue;
169
170 /* Check whether this object is still used. */
171 if (l->l_type == lt_loaded
172 && l->l_direct_opencount == 0
173 && (l->l_flags_1 & DF_1_NODELETE) == 0
174 && !used[done_index])
175 continue;
176
177 /* We need this object and we handle it now. */
178 done[done_index] = 1;
179 used[done_index] = 1;
180 /* Signal the object is still needed. */
181 l->l_idx = IDX_STILL_USED;
182
183 /* Mark all dependencies as used. */
184 if (l->l_initfini != NULL)
185 {
186 struct link_map **lp = &l->l_initfini[1];
187 while (*lp != NULL)
188 {
189 if ((*lp)->l_idx != IDX_STILL_USED)
190 {
191 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
192
193 if (!used[(*lp)->l_idx])
194 {
195 used[(*lp)->l_idx] = 1;
196 if ((*lp)->l_idx - 1 < done_index)
197 done_index = (*lp)->l_idx - 1;
198 }
199 }
200
201 ++lp;
202 }
203 }
204 /* And the same for relocation dependencies. */
205 if (l->l_reldeps != NULL)
206 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
207 {
208 struct link_map *jmap = l->l_reldeps->list[j];
209
210 if (jmap->l_idx != IDX_STILL_USED)
211 {
212 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
213
214 if (!used[jmap->l_idx])
215 {
216 used[jmap->l_idx] = 1;
217 if (jmap->l_idx - 1 < done_index)
218 done_index = jmap->l_idx - 1;
219 }
220 }
221 }
222 }
223
224 /* Sort the entries. */
225 _dl_sort_fini (ns->_ns_loaded, maps, nloaded, used, nsid);
226
227 /* Call all termination functions at once. */
228 #ifdef SHARED
229 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
230 #endif
231 bool unload_any = false;
232 bool scope_mem_left = false;
233 unsigned int unload_global = 0;
234 unsigned int first_loaded = ~0;
235 for (unsigned int i = 0; i < nloaded; ++i)
236 {
237 struct link_map *imap = maps[i];
238
239 /* All elements must be in the same namespace. */
240 assert (imap->l_ns == nsid);
241
242 if (!used[i])
243 {
244 assert (imap->l_type == lt_loaded
245 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
246
247 /* Call its termination function. Do not do it for
248 half-cooked objects. */
249 if (imap->l_init_called)
250 {
251 /* When debugging print a message first. */
252 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
253 0))
254 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
255 imap->l_name, nsid);
256
257 if (imap->l_info[DT_FINI_ARRAY] != NULL)
258 {
259 ElfW(Addr) *array =
260 (ElfW(Addr) *) (imap->l_addr
261 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
262 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
263 / sizeof (ElfW(Addr)));
264
265 while (sz-- > 0)
266 ((fini_t) array[sz]) ();
267 }
268
269 /* Next try the old-style destructor. */
270 if (imap->l_info[DT_FINI] != NULL)
271 (*(void (*) (void)) DL_DT_FINI_ADDRESS
272 (imap, ((void *) imap->l_addr
273 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
274 }
275
276 #ifdef SHARED
277 /* Auditing checkpoint: we remove an object. */
278 if (__builtin_expect (do_audit, 0))
279 {
280 struct audit_ifaces *afct = GLRO(dl_audit);
281 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
282 {
283 if (afct->objclose != NULL)
284 /* Return value is ignored. */
285 (void) afct->objclose (&imap->l_audit[cnt].cookie);
286
287 afct = afct->next;
288 }
289 }
290 #endif
291
292 /* This object must not be used anymore. */
293 imap->l_removed = 1;
294
295 /* We indeed have an object to remove. */
296 unload_any = true;
297
298 if (imap->l_global)
299 ++unload_global;
300
301 /* Remember where the first dynamically loaded object is. */
302 if (i < first_loaded)
303 first_loaded = i;
304 }
305 /* Else used[i]. */
306 else if (imap->l_type == lt_loaded)
307 {
308 struct r_scope_elem *new_list = NULL;
309
310 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
311 {
312 /* The object is still used. But one of the objects we are
313 unloading right now is responsible for loading it. If
314 the current object does not have it's own scope yet we
315 have to create one. This has to be done before running
316 the finalizers.
317
318 To do this count the number of dependencies. */
319 unsigned int cnt;
320 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
321 ;
322
323 /* We simply reuse the l_initfini list. */
324 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
325 imap->l_searchlist.r_nlist = cnt;
326
327 new_list = &imap->l_searchlist;
328 }
329
330 /* Count the number of scopes which remain after the unload.
331 When we add the local search list count it. Always add
332 one for the terminating NULL pointer. */
333 size_t remain = (new_list != NULL) + 1;
334 bool removed_any = false;
335 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
336 /* This relies on l_scope[] entries being always set either
337 to its own l_symbolic_searchlist address, or some map's
338 l_searchlist address. */
339 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
340 {
341 struct link_map *tmap = (struct link_map *)
342 ((char *) imap->l_scope[cnt]
343 - offsetof (struct link_map, l_searchlist));
344 assert (tmap->l_ns == nsid);
345 if (tmap->l_idx == IDX_STILL_USED)
346 ++remain;
347 else
348 removed_any = true;
349 }
350 else
351 ++remain;
352
353 if (removed_any)
354 {
355 /* Always allocate a new array for the scope. This is
356 necessary since we must be able to determine the last
357 user of the current array. If possible use the link map's
358 memory. */
359 size_t new_size;
360 struct r_scope_elem **newp;
361
362 #define SCOPE_ELEMS(imap) \
363 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
364
365 if (imap->l_scope != imap->l_scope_mem
366 && remain < SCOPE_ELEMS (imap))
367 {
368 new_size = SCOPE_ELEMS (imap);
369 newp = imap->l_scope_mem;
370 }
371 else
372 {
373 new_size = imap->l_scope_max;
374 newp = (struct r_scope_elem **)
375 malloc (new_size * sizeof (struct r_scope_elem *));
376 if (newp == NULL)
377 _dl_signal_error (ENOMEM, "dlclose", NULL,
378 N_("cannot create scope list"));
379 }
380
381 /* Copy over the remaining scope elements. */
382 remain = 0;
383 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
384 {
385 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
386 {
387 struct link_map *tmap = (struct link_map *)
388 ((char *) imap->l_scope[cnt]
389 - offsetof (struct link_map, l_searchlist));
390 if (tmap->l_idx != IDX_STILL_USED)
391 {
392 /* Remove the scope. Or replace with own map's
393 scope. */
394 if (new_list != NULL)
395 {
396 newp[remain++] = new_list;
397 new_list = NULL;
398 }
399 continue;
400 }
401 }
402
403 newp[remain++] = imap->l_scope[cnt];
404 }
405 newp[remain] = NULL;
406
407 struct r_scope_elem **old = imap->l_scope;
408
409 imap->l_scope = newp;
410
411 /* No user anymore, we can free it now. */
412 if (old != imap->l_scope_mem)
413 {
414 if (_dl_scope_free (old))
415 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
416 no need to repeat it. */
417 scope_mem_left = false;
418 }
419 else
420 scope_mem_left = true;
421
422 imap->l_scope_max = new_size;
423 }
424
425 /* The loader is gone, so mark the object as not having one.
426 Note: l_idx != IDX_STILL_USED -> object will be removed. */
427 if (imap->l_loader != NULL
428 && imap->l_loader->l_idx != IDX_STILL_USED)
429 imap->l_loader = NULL;
430
431 /* Remember where the first dynamically loaded object is. */
432 if (i < first_loaded)
433 first_loaded = i;
434 }
435 }
436
437 /* If there are no objects to unload, do nothing further. */
438 if (!unload_any)
439 goto out;
440
441 #ifdef SHARED
442 /* Auditing checkpoint: we will start deleting objects. */
443 if (__builtin_expect (do_audit, 0))
444 {
445 struct link_map *head = ns->_ns_loaded;
446 struct audit_ifaces *afct = GLRO(dl_audit);
447 /* Do not call the functions for any auditing object. */
448 if (head->l_auditing == 0)
449 {
450 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
451 {
452 if (afct->activity != NULL)
453 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
454
455 afct = afct->next;
456 }
457 }
458 }
459 #endif
460
461 /* Notify the debugger we are about to remove some loaded objects. */
462 struct r_debug *r = _dl_debug_initialize (0, nsid);
463 r->r_state = RT_DELETE;
464 _dl_debug_state ();
465
466 if (unload_global)
467 {
468 /* Some objects are in the global scope list. Remove them. */
469 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
470 unsigned int i;
471 unsigned int j = 0;
472 unsigned int cnt = ns_msl->r_nlist;
473
474 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
475 --cnt;
476
477 if (cnt + unload_global == ns_msl->r_nlist)
478 /* Speed up removing most recently added objects. */
479 j = cnt;
480 else
481 for (i = 0; i < cnt; i++)
482 if (ns_msl->r_list[i]->l_removed == 0)
483 {
484 if (i != j)
485 ns_msl->r_list[j] = ns_msl->r_list[i];
486 j++;
487 }
488 ns_msl->r_nlist = j;
489 }
490
491 if (!RTLD_SINGLE_THREAD_P
492 && (unload_global
493 || scope_mem_left
494 || (GL(dl_scope_free_list) != NULL
495 && GL(dl_scope_free_list)->count)))
496 {
497 THREAD_GSCOPE_WAIT ();
498
499 /* Now we can free any queued old scopes. */
500 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
501 if (fsl != NULL)
502 while (fsl->count > 0)
503 free (fsl->list[--fsl->count]);
504 }
505
506 size_t tls_free_start;
507 size_t tls_free_end;
508 tls_free_start = tls_free_end = NO_TLS_OFFSET;
509
510 /* We modify the list of loaded objects. */
511 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
512
513 /* Check each element of the search list to see if all references to
514 it are gone. */
515 for (unsigned int i = first_loaded; i < nloaded; ++i)
516 {
517 struct link_map *imap = maps[i];
518 if (!used[i])
519 {
520 assert (imap->l_type == lt_loaded);
521
522 /* That was the last reference, and this was a dlopen-loaded
523 object. We can unmap it. */
524
525 /* Remove the object from the dtv slotinfo array if it uses TLS. */
526 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
527 {
528 any_tls = true;
529
530 if (GL(dl_tls_dtv_slotinfo_list) != NULL
531 && ! remove_slotinfo (imap->l_tls_modid,
532 GL(dl_tls_dtv_slotinfo_list), 0,
533 imap->l_init_called))
534 /* All dynamically loaded modules with TLS are unloaded. */
535 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
536
537 if (imap->l_tls_offset != NO_TLS_OFFSET
538 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
539 {
540 /* Collect a contiguous chunk built from the objects in
541 this search list, going in either direction. When the
542 whole chunk is at the end of the used area then we can
543 reclaim it. */
544 #if TLS_TCB_AT_TP
545 if (tls_free_start == NO_TLS_OFFSET
546 || (size_t) imap->l_tls_offset == tls_free_start)
547 {
548 /* Extend the contiguous chunk being reclaimed. */
549 tls_free_start
550 = imap->l_tls_offset - imap->l_tls_blocksize;
551
552 if (tls_free_end == NO_TLS_OFFSET)
553 tls_free_end = imap->l_tls_offset;
554 }
555 else if (imap->l_tls_offset - imap->l_tls_blocksize
556 == tls_free_end)
557 /* Extend the chunk backwards. */
558 tls_free_end = imap->l_tls_offset;
559 else
560 {
561 /* This isn't contiguous with the last chunk freed.
562 One of them will be leaked unless we can free
563 one block right away. */
564 if (tls_free_end == GL(dl_tls_static_used))
565 {
566 GL(dl_tls_static_used) = tls_free_start;
567 tls_free_end = imap->l_tls_offset;
568 tls_free_start
569 = tls_free_end - imap->l_tls_blocksize;
570 }
571 else if ((size_t) imap->l_tls_offset
572 == GL(dl_tls_static_used))
573 GL(dl_tls_static_used)
574 = imap->l_tls_offset - imap->l_tls_blocksize;
575 else if (tls_free_end < (size_t) imap->l_tls_offset)
576 {
577 /* We pick the later block. It has a chance to
578 be freed. */
579 tls_free_end = imap->l_tls_offset;
580 tls_free_start
581 = tls_free_end - imap->l_tls_blocksize;
582 }
583 }
584 #elif TLS_DTV_AT_TP
585 if ((size_t) imap->l_tls_offset == tls_free_end)
586 /* Extend the contiguous chunk being reclaimed. */
587 tls_free_end -= imap->l_tls_blocksize;
588 else if (imap->l_tls_offset + imap->l_tls_blocksize
589 == tls_free_start)
590 /* Extend the chunk backwards. */
591 tls_free_start = imap->l_tls_offset;
592 else
593 {
594 /* This isn't contiguous with the last chunk freed.
595 One of them will be leaked. */
596 if (tls_free_end == GL(dl_tls_static_used))
597 GL(dl_tls_static_used) = tls_free_start;
598 tls_free_start = imap->l_tls_offset;
599 tls_free_end = tls_free_start + imap->l_tls_blocksize;
600 }
601 #else
602 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
603 #endif
604 }
605 }
606
607 /* We can unmap all the maps at once. We determined the
608 start address and length when we loaded the object and
609 the `munmap' call does the rest. */
610 DL_UNMAP (imap);
611
612 /* Finally, unlink the data structure and free it. */
613 if (imap->l_prev != NULL)
614 imap->l_prev->l_next = imap->l_next;
615 else
616 {
617 #ifdef SHARED
618 assert (nsid != LM_ID_BASE);
619 #endif
620 ns->_ns_loaded = imap->l_next;
621 }
622
623 --ns->_ns_nloaded;
624 if (imap->l_next != NULL)
625 imap->l_next->l_prev = imap->l_prev;
626
627 free (imap->l_versions);
628 if (imap->l_origin != (char *) -1)
629 free ((char *) imap->l_origin);
630
631 free (imap->l_reldeps);
632
633 /* Print debugging message. */
634 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
635 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
636 imap->l_name, imap->l_ns);
637
638 /* This name always is allocated. */
639 free (imap->l_name);
640 /* Remove the list with all the names of the shared object. */
641
642 struct libname_list *lnp = imap->l_libname;
643 do
644 {
645 struct libname_list *this = lnp;
646 lnp = lnp->next;
647 if (!this->dont_free)
648 free (this);
649 }
650 while (lnp != NULL);
651
652 /* Remove the searchlists. */
653 free (imap->l_initfini);
654
655 /* Remove the scope array if we allocated it. */
656 if (imap->l_scope != imap->l_scope_mem)
657 free (imap->l_scope);
658
659 if (imap->l_phdr_allocated)
660 free ((void *) imap->l_phdr);
661
662 if (imap->l_rpath_dirs.dirs != (void *) -1)
663 free (imap->l_rpath_dirs.dirs);
664 if (imap->l_runpath_dirs.dirs != (void *) -1)
665 free (imap->l_runpath_dirs.dirs);
666
667 free (imap);
668 }
669 }
670
671 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
672
673 /* If we removed any object which uses TLS bump the generation counter. */
674 if (any_tls)
675 {
676 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
677 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
678
679 if (tls_free_end == GL(dl_tls_static_used))
680 GL(dl_tls_static_used) = tls_free_start;
681 }
682
683 #ifdef SHARED
684 /* Auditing checkpoint: we have deleted all objects. */
685 if (__builtin_expect (do_audit, 0))
686 {
687 struct link_map *head = ns->_ns_loaded;
688 /* Do not call the functions for any auditing object. */
689 if (head->l_auditing == 0)
690 {
691 struct audit_ifaces *afct = GLRO(dl_audit);
692 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
693 {
694 if (afct->activity != NULL)
695 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
696
697 afct = afct->next;
698 }
699 }
700 }
701 #endif
702
703 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
704 && nsid == GL(dl_nns) - 1)
705 do
706 {
707 --GL(dl_nns);
708 #ifndef SHARED
709 if (GL(dl_nns) == 0)
710 break;
711 #endif
712 }
713 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
714
715 /* Notify the debugger those objects are finalized and gone. */
716 r->r_state = RT_CONSISTENT;
717 _dl_debug_state ();
718
719 /* Recheck if we need to retry, release the lock. */
720 out:
721 if (dl_close_state == rerun)
722 goto retry;
723
724 dl_close_state = not_pending;
725 }
726
727
728 void
729 _dl_close (void *_map)
730 {
731 struct link_map *map = _map;
732
733 /* First see whether we can remove the object at all. */
734 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
735 {
736 assert (map->l_init_called);
737 /* Nope. Do nothing. */
738 return;
739 }
740
741 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
742 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
743
744 /* Acquire the lock. */
745 __rtld_lock_lock_recursive (GL(dl_load_lock));
746
747 _dl_close_worker (map);
748
749 __rtld_lock_unlock_recursive (GL(dl_load_lock));
750 }
751
752
753 static bool __libc_freeres_fn_section
754 free_slotinfo (struct dtv_slotinfo_list **elemp)
755 {
756 size_t cnt;
757
758 if (*elemp == NULL)
759 /* Nothing here, all is removed (or there never was anything). */
760 return true;
761
762 if (!free_slotinfo (&(*elemp)->next))
763 /* We cannot free the entry. */
764 return false;
765
766 /* That cleared our next pointer for us. */
767
768 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
769 if ((*elemp)->slotinfo[cnt].map != NULL)
770 /* Still used. */
771 return false;
772
773 /* We can remove the list element. */
774 free (*elemp);
775 *elemp = NULL;
776
777 return true;
778 }
779
780
781 libc_freeres_fn (free_mem)
782 {
783 for (Lmid_t nsid = 0; nsid < GL(dl_nns); ++nsid)
784 if (__builtin_expect (GL(dl_ns)[nsid]._ns_global_scope_alloc, 0) != 0
785 && (GL(dl_ns)[nsid]._ns_main_searchlist->r_nlist
786 // XXX Check whether we need NS-specific initial_searchlist
787 == GLRO(dl_initial_searchlist).r_nlist))
788 {
789 /* All object dynamically loaded by the program are unloaded. Free
790 the memory allocated for the global scope variable. */
791 struct link_map **old = GL(dl_ns)[nsid]._ns_main_searchlist->r_list;
792
793 /* Put the old map in. */
794 GL(dl_ns)[nsid]._ns_main_searchlist->r_list
795 // XXX Check whether we need NS-specific initial_searchlist
796 = GLRO(dl_initial_searchlist).r_list;
797 /* Signal that the original map is used. */
798 GL(dl_ns)[nsid]._ns_global_scope_alloc = 0;
799
800 /* Now free the old map. */
801 free (old);
802 }
803
804 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
805 {
806 /* Free the memory allocated for the dtv slotinfo array. We can do
807 this only if all modules which used this memory are unloaded. */
808 #ifdef SHARED
809 if (GL(dl_initial_dtv) == NULL)
810 /* There was no initial TLS setup, it was set up later when
811 it used the normal malloc. */
812 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
813 else
814 #endif
815 /* The first element of the list does not have to be deallocated.
816 It was allocated in the dynamic linker (i.e., with a different
817 malloc), and in the static library it's in .bss space. */
818 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
819 }
820
821 void *scope_free_list = GL(dl_scope_free_list);
822 GL(dl_scope_free_list) = NULL;
823 free (scope_free_list);
824 }
This page took 0.082184 seconds and 6 git commands to generate.