]> sourceware.org Git - glibc.git/blob - elf/dl-close.c
[BZ #3313]
[glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2005, 2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
34
35
36 /* Type of the constructor functions. */
37 typedef void (*fini_t) (void);
38
39
40 /* Special l_idx value used to indicate which objects remain loaded. */
41 #define IDX_STILL_USED -1
42
43
44 #ifdef USE_TLS
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
49 {
50 if (idx - disp >= listp->len)
51 {
52 if (listp->next == NULL)
53 {
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
58 }
59 else
60 {
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
64
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
68 }
69 }
70 else
71 {
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
73
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
77 {
78 assert (old_map->l_tls_modid == idx);
79
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
83 }
84
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
89 }
90
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
92 {
93 --idx;
94
95 if (listp->slotinfo[idx - disp].map != NULL)
96 {
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
100 }
101 }
102
103 /* No non-entry in this list element. */
104 return false;
105 }
106 #endif
107
108
109 void
110 _dl_close (void *_map)
111 {
112 struct link_map *map = _map;
113 Lmid_t ns = map->l_ns;
114 unsigned int i;
115 /* First see whether we can remove the object at all. */
116 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0)
117 && map->l_init_called)
118 /* Nope. Do nothing. */
119 return;
120
121 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
122 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
123
124 /* Acquire the lock. */
125 __rtld_lock_lock_recursive (GL(dl_load_lock));
126
127 /* One less direct use. */
128 --map->l_direct_opencount;
129
130 /* If _dl_close is called recursively (some destructor call dlclose),
131 just record that the parent _dl_close will need to do garbage collection
132 again and return. */
133 static enum { not_pending, pending, rerun } dl_close_state;
134
135 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
136 || dl_close_state != not_pending)
137 {
138 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
139 dl_close_state = rerun;
140
141 /* There are still references to this object. Do nothing more. */
142 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
143 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
144 map->l_name, map->l_direct_opencount);
145
146 __rtld_lock_unlock_recursive (GL(dl_load_lock));
147 return;
148 }
149
150 retry:
151 dl_close_state = pending;
152
153 #ifdef USE_TLS
154 bool any_tls = false;
155 #endif
156 const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
157 char used[nloaded];
158 char done[nloaded];
159 struct link_map *maps[nloaded];
160
161 /* Run over the list and assign indexes to the link maps and enter
162 them into the MAPS array. */
163 int idx = 0;
164 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
165 {
166 l->l_idx = idx;
167 maps[idx] = l;
168 ++idx;
169 }
170 assert (idx == nloaded);
171
172 /* Prepare the bitmaps. */
173 memset (used, '\0', sizeof (used));
174 memset (done, '\0', sizeof (done));
175
176 /* Keep track of the lowest index link map we have covered already. */
177 int done_index = -1;
178 while (++done_index < nloaded)
179 {
180 struct link_map *l = maps[done_index];
181
182 if (done[done_index])
183 /* Already handled. */
184 continue;
185
186 /* Check whether this object is still used. */
187 if (l->l_type == lt_loaded
188 && l->l_direct_opencount == 0
189 && (l->l_flags_1 & DF_1_NODELETE) == 0
190 && !used[done_index])
191 continue;
192
193 /* We need this object and we handle it now. */
194 done[done_index] = 1;
195 used[done_index] = 1;
196 /* Signal the object is still needed. */
197 l->l_idx = IDX_STILL_USED;
198
199 /* Mark all dependencies as used. */
200 if (l->l_initfini != NULL)
201 {
202 struct link_map **lp = &l->l_initfini[1];
203 while (*lp != NULL)
204 {
205 if ((*lp)->l_idx != IDX_STILL_USED)
206 {
207 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
208
209 if (!used[(*lp)->l_idx])
210 {
211 used[(*lp)->l_idx] = 1;
212 if ((*lp)->l_idx - 1 < done_index)
213 done_index = (*lp)->l_idx - 1;
214 }
215 }
216
217 ++lp;
218 }
219 }
220 /* And the same for relocation dependencies. */
221 if (l->l_reldeps != NULL)
222 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
223 {
224 struct link_map *jmap = l->l_reldeps[j];
225
226 if (jmap->l_idx != IDX_STILL_USED)
227 {
228 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
229
230 if (!used[jmap->l_idx])
231 {
232 used[jmap->l_idx] = 1;
233 if (jmap->l_idx - 1 < done_index)
234 done_index = jmap->l_idx - 1;
235 }
236 }
237 }
238 }
239
240 /* Sort the entries. */
241 _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nloaded, used, ns);
242
243 /* Call all termination functions at once. */
244 #ifdef SHARED
245 bool do_audit = GLRO(dl_naudit) > 0 && !GL(dl_ns)[ns]._ns_loaded->l_auditing;
246 #endif
247 bool unload_any = false;
248 unsigned int first_loaded = ~0;
249 for (i = 0; i < nloaded; ++i)
250 {
251 struct link_map *imap = maps[i];
252
253 /* All elements must be in the same namespace. */
254 assert (imap->l_ns == ns);
255
256 if (!used[i])
257 {
258 assert (imap->l_type == lt_loaded
259 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
260
261 /* Call its termination function. Do not do it for
262 half-cooked objects. */
263 if (imap->l_init_called)
264 {
265 /* When debugging print a message first. */
266 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
267 0))
268 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
269 imap->l_name, ns);
270
271 if (imap->l_info[DT_FINI_ARRAY] != NULL)
272 {
273 ElfW(Addr) *array =
274 (ElfW(Addr) *) (imap->l_addr
275 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
276 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
277 / sizeof (ElfW(Addr)));
278
279 while (sz-- > 0)
280 ((fini_t) array[sz]) ();
281 }
282
283 /* Next try the old-style destructor. */
284 if (imap->l_info[DT_FINI] != NULL)
285 (*(void (*) (void)) DL_DT_FINI_ADDRESS
286 (imap, ((void *) imap->l_addr
287 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
288 }
289
290 #ifdef SHARED
291 /* Auditing checkpoint: we have a new object. */
292 if (__builtin_expect (do_audit, 0))
293 {
294 struct audit_ifaces *afct = GLRO(dl_audit);
295 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
296 {
297 if (afct->objclose != NULL)
298 /* Return value is ignored. */
299 (void) afct->objclose (&imap->l_audit[cnt].cookie);
300
301 afct = afct->next;
302 }
303 }
304 #endif
305
306 /* This object must not be used anymore. */
307 imap->l_removed = 1;
308
309 /* We indeed have an object to remove. */
310 unload_any = true;
311
312 /* Remember where the first dynamically loaded object is. */
313 if (i < first_loaded)
314 first_loaded = i;
315 }
316 /* Else used[i]. */
317 else if (imap->l_type == lt_loaded)
318 {
319 struct r_scope_elem *new_list = NULL;
320
321 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
322 {
323 /* The object is still used. But one of the objects we are
324 unloading right now is responsible for loading it. If
325 the current object does not have it's own scope yet we
326 have to create one. This has to be done before running
327 the finalizers.
328
329 To do this count the number of dependencies. */
330 unsigned int cnt;
331 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
332 ;
333
334 /* We simply reuse the l_initfini list. */
335 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
336 imap->l_searchlist.r_nlist = cnt;
337
338 new_list = &imap->l_searchlist;
339 }
340
341 /* Count the number of scopes which remain after the unload.
342 When we add the local search list count it. Always add
343 one for the terminating NULL pointer. */
344 size_t remain = (new_list != NULL) + 1;
345 bool removed_any = false;
346 for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
347 /* This relies on l_scope[] entries being always set either
348 to its own l_symbolic_searchlist address, or some map's
349 l_searchlist address. */
350 if (imap->l_scoperec->scope[cnt] != &imap->l_symbolic_searchlist)
351 {
352 struct link_map *tmap = (struct link_map *)
353 ((char *) imap->l_scoperec->scope[cnt]
354 - offsetof (struct link_map, l_searchlist));
355 assert (tmap->l_ns == ns);
356 if (tmap->l_idx == IDX_STILL_USED)
357 ++remain;
358 else
359 removed_any = true;
360 }
361 else
362 ++remain;
363
364 if (removed_any)
365 {
366 /* Always allocate a new array for the scope. This is
367 necessary since we must be able to determine the last
368 user of the current array. If possible use the link map's
369 memory. */
370 size_t new_size;
371 struct r_scoperec *newp;
372 if (imap->l_scoperec != &imap->l_scoperec_mem
373 && remain < NINIT_SCOPE_ELEMS (imap)
374 && imap->l_scoperec_mem.nusers == 0)
375 {
376 new_size = NINIT_SCOPE_ELEMS (imap);
377 newp = &imap->l_scoperec_mem;
378 }
379 else
380 {
381 new_size = imap->l_scope_max;
382 newp = (struct r_scoperec *)
383 malloc (sizeof (struct r_scoperec)
384 + new_size * sizeof (struct r_scope_elem *));
385 if (newp == NULL)
386 _dl_signal_error (ENOMEM, "dlclose", NULL,
387 N_("cannot create scope list"));
388 }
389
390 newp->nusers = 0;
391 newp->remove_after_use = false;
392 newp->notify = false;
393
394 /* Copy over the remaining scope elements. */
395 remain = 0;
396 for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
397 {
398 if (imap->l_scoperec->scope[cnt]
399 != &imap->l_symbolic_searchlist)
400 {
401 struct link_map *tmap = (struct link_map *)
402 ((char *) imap->l_scoperec->scope[cnt]
403 - offsetof (struct link_map, l_searchlist));
404 if (tmap->l_idx != IDX_STILL_USED)
405 {
406 /* Remove the scope. Or replace with own map's
407 scope. */
408 if (new_list != NULL)
409 {
410 newp->scope[remain++] = new_list;
411 new_list = NULL;
412 }
413 continue;
414 }
415 }
416
417 newp->scope[remain++] = imap->l_scoperec->scope[cnt];
418 }
419 newp->scope[remain] = NULL;
420
421 struct r_scoperec *old = imap->l_scoperec;
422
423 if (SINGLE_THREAD_P)
424 imap->l_scoperec = newp;
425 else
426 {
427 __rtld_mrlock_change (imap->l_scoperec_lock);
428 imap->l_scoperec = newp;
429 __rtld_mrlock_done (imap->l_scoperec_lock);
430
431 if (atomic_increment_val (&old->nusers) != 1)
432 {
433 old->remove_after_use = true;
434 old->notify = true;
435 if (atomic_decrement_val (&old->nusers) != 0)
436 __rtld_waitzero (old->nusers);
437 }
438 }
439
440 /* No user anymore, we can free it now. */
441 if (old != &imap->l_scoperec_mem)
442 free (old);
443
444 imap->l_scope_max = new_size;
445 }
446
447 /* The loader is gone, so mark the object as not having one.
448 Note: l_idx != IDX_STILL_USED -> object will be removed. */
449 if (imap->l_loader != NULL
450 && imap->l_loader->l_idx != IDX_STILL_USED)
451 imap->l_loader = NULL;
452
453 /* Remember where the first dynamically loaded object is. */
454 if (i < first_loaded)
455 first_loaded = i;
456 }
457 }
458
459 /* If there are no objects to unload, do nothing further. */
460 if (!unload_any)
461 goto out;
462
463 #ifdef SHARED
464 /* Auditing checkpoint: we will start deleting objects. */
465 if (__builtin_expect (do_audit, 0))
466 {
467 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
468 struct audit_ifaces *afct = GLRO(dl_audit);
469 /* Do not call the functions for any auditing object. */
470 if (head->l_auditing == 0)
471 {
472 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
473 {
474 if (afct->activity != NULL)
475 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
476
477 afct = afct->next;
478 }
479 }
480 }
481 #endif
482
483 /* Notify the debugger we are about to remove some loaded objects. */
484 struct r_debug *r = _dl_debug_initialize (0, ns);
485 r->r_state = RT_DELETE;
486 _dl_debug_state ();
487
488 #ifdef USE_TLS
489 size_t tls_free_start;
490 size_t tls_free_end;
491 tls_free_start = tls_free_end = NO_TLS_OFFSET;
492 #endif
493
494 /* Check each element of the search list to see if all references to
495 it are gone. */
496 for (i = first_loaded; i < nloaded; ++i)
497 {
498 struct link_map *imap = maps[i];
499 if (!used[i])
500 {
501 assert (imap->l_type == lt_loaded);
502
503 /* That was the last reference, and this was a dlopen-loaded
504 object. We can unmap it. */
505 if (__builtin_expect (imap->l_global, 0))
506 {
507 /* This object is in the global scope list. Remove it. */
508 unsigned int cnt = GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
509
510 do
511 --cnt;
512 while (GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt] != imap);
513
514 /* The object was already correctly registered. */
515 while (++cnt
516 < GL(dl_ns)[ns]._ns_main_searchlist->r_nlist)
517 GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt - 1]
518 = GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt];
519
520 --GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
521 }
522
523 #ifdef USE_TLS
524 /* Remove the object from the dtv slotinfo array if it uses TLS. */
525 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
526 {
527 any_tls = true;
528
529 if (GL(dl_tls_dtv_slotinfo_list) != NULL
530 && ! remove_slotinfo (imap->l_tls_modid,
531 GL(dl_tls_dtv_slotinfo_list), 0,
532 imap->l_init_called))
533 /* All dynamically loaded modules with TLS are unloaded. */
534 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
535
536 if (imap->l_tls_offset != NO_TLS_OFFSET)
537 {
538 /* Collect a contiguous chunk built from the objects in
539 this search list, going in either direction. When the
540 whole chunk is at the end of the used area then we can
541 reclaim it. */
542 # if TLS_TCB_AT_TP
543 if (tls_free_start == NO_TLS_OFFSET
544 || (size_t) imap->l_tls_offset == tls_free_start)
545 {
546 /* Extend the contiguous chunk being reclaimed. */
547 tls_free_start
548 = imap->l_tls_offset - imap->l_tls_blocksize;
549
550 if (tls_free_end == NO_TLS_OFFSET)
551 tls_free_end = imap->l_tls_offset;
552 }
553 else if (imap->l_tls_offset - imap->l_tls_blocksize
554 == tls_free_end)
555 /* Extend the chunk backwards. */
556 tls_free_end = imap->l_tls_offset;
557 else
558 {
559 /* This isn't contiguous with the last chunk freed.
560 One of them will be leaked unless we can free
561 one block right away. */
562 if (tls_free_end == GL(dl_tls_static_used))
563 {
564 GL(dl_tls_static_used) = tls_free_start;
565 tls_free_end = imap->l_tls_offset;
566 tls_free_start
567 = tls_free_end - imap->l_tls_blocksize;
568 }
569 else if ((size_t) imap->l_tls_offset
570 == GL(dl_tls_static_used))
571 GL(dl_tls_static_used)
572 = imap->l_tls_offset - imap->l_tls_blocksize;
573 else if (tls_free_end < (size_t) imap->l_tls_offset)
574 {
575 /* We pick the later block. It has a chance to
576 be freed. */
577 tls_free_end = imap->l_tls_offset;
578 tls_free_start
579 = tls_free_end - imap->l_tls_blocksize;
580 }
581 }
582 # elif TLS_DTV_AT_TP
583 if ((size_t) imap->l_tls_offset == tls_free_end)
584 /* Extend the contiguous chunk being reclaimed. */
585 tls_free_end -= imap->l_tls_blocksize;
586 else if (imap->l_tls_offset + imap->l_tls_blocksize
587 == tls_free_start)
588 /* Extend the chunk backwards. */
589 tls_free_start = imap->l_tls_offset;
590 else
591 {
592 /* This isn't contiguous with the last chunk freed.
593 One of them will be leaked. */
594 if (tls_free_end == GL(dl_tls_static_used))
595 GL(dl_tls_static_used) = tls_free_start;
596 tls_free_start = imap->l_tls_offset;
597 tls_free_end = tls_free_start + imap->l_tls_blocksize;
598 }
599 # else
600 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
601 # endif
602 }
603 }
604 #endif
605
606 /* We can unmap all the maps at once. We determined the
607 start address and length when we loaded the object and
608 the `munmap' call does the rest. */
609 DL_UNMAP (imap);
610
611 /* Finally, unlink the data structure and free it. */
612 if (imap->l_prev != NULL)
613 imap->l_prev->l_next = imap->l_next;
614 else
615 {
616 #ifdef SHARED
617 assert (ns != LM_ID_BASE);
618 #endif
619 GL(dl_ns)[ns]._ns_loaded = imap->l_next;
620 }
621
622 --GL(dl_ns)[ns]._ns_nloaded;
623 if (imap->l_next != NULL)
624 imap->l_next->l_prev = imap->l_prev;
625
626 free (imap->l_versions);
627 if (imap->l_origin != (char *) -1)
628 free ((char *) imap->l_origin);
629
630 free (imap->l_reldeps);
631
632 /* Print debugging message. */
633 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
634 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
635 imap->l_name, imap->l_ns);
636
637 /* This name always is allocated. */
638 free (imap->l_name);
639 /* Remove the list with all the names of the shared object. */
640
641 struct libname_list *lnp = imap->l_libname;
642 do
643 {
644 struct libname_list *this = lnp;
645 lnp = lnp->next;
646 if (!this->dont_free)
647 free (this);
648 }
649 while (lnp != NULL);
650
651 /* Remove the searchlists. */
652 free (imap->l_initfini);
653
654 /* Remove the scope array if we allocated it. */
655 if (imap->l_scoperec != &imap->l_scoperec_mem)
656 free (imap->l_scoperec);
657
658 if (imap->l_phdr_allocated)
659 free ((void *) imap->l_phdr);
660
661 if (imap->l_rpath_dirs.dirs != (void *) -1)
662 free (imap->l_rpath_dirs.dirs);
663 if (imap->l_runpath_dirs.dirs != (void *) -1)
664 free (imap->l_runpath_dirs.dirs);
665
666 free (imap);
667 }
668 }
669
670 #ifdef USE_TLS
671 /* If we removed any object which uses TLS bump the generation counter. */
672 if (any_tls)
673 {
674 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
675 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
676
677 if (tls_free_end == GL(dl_tls_static_used))
678 GL(dl_tls_static_used) = tls_free_start;
679 }
680 #endif
681
682 #ifdef SHARED
683 /* Auditing checkpoint: we have deleted all objects. */
684 if (__builtin_expect (do_audit, 0))
685 {
686 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
687 /* Do not call the functions for any auditing object. */
688 if (head->l_auditing == 0)
689 {
690 struct audit_ifaces *afct = GLRO(dl_audit);
691 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
692 {
693 if (afct->activity != NULL)
694 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
695
696 afct = afct->next;
697 }
698 }
699 }
700 #endif
701
702 /* Notify the debugger those objects are finalized and gone. */
703 r->r_state = RT_CONSISTENT;
704 _dl_debug_state ();
705
706 /* Recheck if we need to retry, release the lock. */
707 out:
708 if (dl_close_state == rerun)
709 goto retry;
710
711 dl_close_state = not_pending;
712 __rtld_lock_unlock_recursive (GL(dl_load_lock));
713 }
714
715
716 #ifdef USE_TLS
717 static bool __libc_freeres_fn_section
718 free_slotinfo (struct dtv_slotinfo_list **elemp)
719 {
720 size_t cnt;
721
722 if (*elemp == NULL)
723 /* Nothing here, all is removed (or there never was anything). */
724 return true;
725
726 if (!free_slotinfo (&(*elemp)->next))
727 /* We cannot free the entry. */
728 return false;
729
730 /* That cleared our next pointer for us. */
731
732 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
733 if ((*elemp)->slotinfo[cnt].map != NULL)
734 /* Still used. */
735 return false;
736
737 /* We can remove the list element. */
738 free (*elemp);
739 *elemp = NULL;
740
741 return true;
742 }
743 #endif
744
745
746 libc_freeres_fn (free_mem)
747 {
748 for (Lmid_t ns = 0; ns < DL_NNS; ++ns)
749 if (__builtin_expect (GL(dl_ns)[ns]._ns_global_scope_alloc, 0) != 0
750 && (GL(dl_ns)[ns]._ns_main_searchlist->r_nlist
751 // XXX Check whether we need NS-specific initial_searchlist
752 == GLRO(dl_initial_searchlist).r_nlist))
753 {
754 /* All object dynamically loaded by the program are unloaded. Free
755 the memory allocated for the global scope variable. */
756 struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
757
758 /* Put the old map in. */
759 GL(dl_ns)[ns]._ns_main_searchlist->r_list
760 // XXX Check whether we need NS-specific initial_searchlist
761 = GLRO(dl_initial_searchlist).r_list;
762 /* Signal that the original map is used. */
763 GL(dl_ns)[ns]._ns_global_scope_alloc = 0;
764
765 /* Now free the old map. */
766 free (old);
767 }
768
769 #ifdef USE_TLS
770 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
771 {
772 /* Free the memory allocated for the dtv slotinfo array. We can do
773 this only if all modules which used this memory are unloaded. */
774 # ifdef SHARED
775 if (GL(dl_initial_dtv) == NULL)
776 /* There was no initial TLS setup, it was set up later when
777 it used the normal malloc. */
778 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
779 else
780 # endif
781 /* The first element of the list does not have to be deallocated.
782 It was allocated in the dynamic linker (i.e., with a different
783 malloc), and in the static library it's in .bss space. */
784 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
785 }
786 #endif
787 }
This page took 0.076643 seconds and 6 git commands to generate.