1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2005, 2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 #include <bits/libc-lock.h>
31 #include <sys/types.h>
33 #include <sysdep-cancel.h>
36 /* Type of the constructor functions. */
37 typedef void (*fini_t
) (void);
40 /* Special l_idx value used to indicate which objects remain loaded. */
41 #define IDX_STILL_USED -1
45 /* Returns true we an non-empty was found. */
47 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
50 if (idx
- disp
>= listp
->len
)
52 if (listp
->next
== NULL
)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there
);
61 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
65 /* No non-empty entry. Search from the end of this element's
67 idx
= disp
+ listp
->len
;
72 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map
!= NULL
, 1))
78 assert (old_map
->l_tls_modid
== idx
);
80 /* Mark the entry as unused. */
81 listp
->slotinfo
[idx
- disp
].gen
= GL(dl_tls_generation
) + 1;
82 listp
->slotinfo
[idx
- disp
].map
= NULL
;
85 /* If this is not the last currently used entry no need to look
87 if (idx
!= GL(dl_tls_max_dtv_idx
))
91 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
95 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx
) = idx
;
103 /* No non-entry in this list element. */
110 _dl_close (void *_map
)
112 struct link_map
*map
= _map
;
113 Lmid_t ns
= map
->l_ns
;
115 /* First see whether we can remove the object at all. */
116 if (__builtin_expect (map
->l_flags_1
& DF_1_NODELETE
, 0)
117 && map
->l_init_called
)
118 /* Nope. Do nothing. */
121 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
122 GLRO(dl_signal_error
) (0, map
->l_name
, NULL
, N_("shared object not open"));
124 /* Acquire the lock. */
125 __rtld_lock_lock_recursive (GL(dl_load_lock
));
127 /* One less direct use. */
128 --map
->l_direct_opencount
;
130 /* If _dl_close is called recursively (some destructor call dlclose),
131 just record that the parent _dl_close will need to do garbage collection
133 static enum { not_pending
, pending
, rerun
} dl_close_state
;
135 if (map
->l_direct_opencount
> 0 || map
->l_type
!= lt_loaded
136 || dl_close_state
!= not_pending
)
138 if (map
->l_direct_opencount
== 0 && map
->l_type
== lt_loaded
)
139 dl_close_state
= rerun
;
141 /* There are still references to this object. Do nothing more. */
142 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
143 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
144 map
->l_name
, map
->l_direct_opencount
);
146 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
151 dl_close_state
= pending
;
154 bool any_tls
= false;
156 const unsigned int nloaded
= GL(dl_ns
)[ns
]._ns_nloaded
;
159 struct link_map
*maps
[nloaded
];
161 /* Run over the list and assign indexes to the link maps and enter
162 them into the MAPS array. */
164 for (struct link_map
*l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
; l
= l
->l_next
)
170 assert (idx
== nloaded
);
172 /* Prepare the bitmaps. */
173 memset (used
, '\0', sizeof (used
));
174 memset (done
, '\0', sizeof (done
));
176 /* Keep track of the lowest index link map we have covered already. */
178 while (++done_index
< nloaded
)
180 struct link_map
*l
= maps
[done_index
];
182 if (done
[done_index
])
183 /* Already handled. */
186 /* Check whether this object is still used. */
187 if (l
->l_type
== lt_loaded
188 && l
->l_direct_opencount
== 0
189 && (l
->l_flags_1
& DF_1_NODELETE
) == 0
190 && !used
[done_index
])
193 /* We need this object and we handle it now. */
194 done
[done_index
] = 1;
195 used
[done_index
] = 1;
196 /* Signal the object is still needed. */
197 l
->l_idx
= IDX_STILL_USED
;
199 /* Mark all dependencies as used. */
200 if (l
->l_initfini
!= NULL
)
202 struct link_map
**lp
= &l
->l_initfini
[1];
205 if ((*lp
)->l_idx
!= IDX_STILL_USED
)
207 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
209 if (!used
[(*lp
)->l_idx
])
211 used
[(*lp
)->l_idx
] = 1;
212 if ((*lp
)->l_idx
- 1 < done_index
)
213 done_index
= (*lp
)->l_idx
- 1;
220 /* And the same for relocation dependencies. */
221 if (l
->l_reldeps
!= NULL
)
222 for (unsigned int j
= 0; j
< l
->l_reldepsact
; ++j
)
224 struct link_map
*jmap
= l
->l_reldeps
[j
];
226 if (jmap
->l_idx
!= IDX_STILL_USED
)
228 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
230 if (!used
[jmap
->l_idx
])
232 used
[jmap
->l_idx
] = 1;
233 if (jmap
->l_idx
- 1 < done_index
)
234 done_index
= jmap
->l_idx
- 1;
240 /* Sort the entries. */
241 _dl_sort_fini (GL(dl_ns
)[ns
]._ns_loaded
, maps
, nloaded
, used
, ns
);
243 /* Call all termination functions at once. */
245 bool do_audit
= GLRO(dl_naudit
) > 0 && !GL(dl_ns
)[ns
]._ns_loaded
->l_auditing
;
247 bool unload_any
= false;
248 unsigned int first_loaded
= ~0;
249 for (i
= 0; i
< nloaded
; ++i
)
251 struct link_map
*imap
= maps
[i
];
253 /* All elements must be in the same namespace. */
254 assert (imap
->l_ns
== ns
);
258 assert (imap
->l_type
== lt_loaded
259 && (imap
->l_flags_1
& DF_1_NODELETE
) == 0);
261 /* Call its termination function. Do not do it for
262 half-cooked objects. */
263 if (imap
->l_init_called
)
265 /* When debugging print a message first. */
266 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
,
268 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
271 if (imap
->l_info
[DT_FINI_ARRAY
] != NULL
)
274 (ElfW(Addr
) *) (imap
->l_addr
275 + imap
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
276 unsigned int sz
= (imap
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
277 / sizeof (ElfW(Addr
)));
280 ((fini_t
) array
[sz
]) ();
283 /* Next try the old-style destructor. */
284 if (imap
->l_info
[DT_FINI
] != NULL
)
285 (*(void (*) (void)) DL_DT_FINI_ADDRESS
286 (imap
, ((void *) imap
->l_addr
287 + imap
->l_info
[DT_FINI
]->d_un
.d_ptr
))) ();
291 /* Auditing checkpoint: we have a new object. */
292 if (__builtin_expect (do_audit
, 0))
294 struct audit_ifaces
*afct
= GLRO(dl_audit
);
295 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
297 if (afct
->objclose
!= NULL
)
298 /* Return value is ignored. */
299 (void) afct
->objclose (&imap
->l_audit
[cnt
].cookie
);
306 /* This object must not be used anymore. */
309 /* We indeed have an object to remove. */
312 /* Remember where the first dynamically loaded object is. */
313 if (i
< first_loaded
)
317 else if (imap
->l_type
== lt_loaded
)
319 struct r_scope_elem
*new_list
= NULL
;
321 if (imap
->l_searchlist
.r_list
== NULL
&& imap
->l_initfini
!= NULL
)
323 /* The object is still used. But one of the objects we are
324 unloading right now is responsible for loading it. If
325 the current object does not have it's own scope yet we
326 have to create one. This has to be done before running
329 To do this count the number of dependencies. */
331 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
334 /* We simply reuse the l_initfini list. */
335 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
336 imap
->l_searchlist
.r_nlist
= cnt
;
338 new_list
= &imap
->l_searchlist
;
341 /* Count the number of scopes which remain after the unload.
342 When we add the local search list count it. Always add
343 one for the terminating NULL pointer. */
344 size_t remain
= (new_list
!= NULL
) + 1;
345 bool removed_any
= false;
346 for (size_t cnt
= 0; imap
->l_scoperec
->scope
[cnt
] != NULL
; ++cnt
)
347 /* This relies on l_scope[] entries being always set either
348 to its own l_symbolic_searchlist address, or some map's
349 l_searchlist address. */
350 if (imap
->l_scoperec
->scope
[cnt
] != &imap
->l_symbolic_searchlist
)
352 struct link_map
*tmap
= (struct link_map
*)
353 ((char *) imap
->l_scoperec
->scope
[cnt
]
354 - offsetof (struct link_map
, l_searchlist
));
355 assert (tmap
->l_ns
== ns
);
356 if (tmap
->l_idx
== IDX_STILL_USED
)
366 /* Always allocate a new array for the scope. This is
367 necessary since we must be able to determine the last
368 user of the current array. If possible use the link map's
371 struct r_scoperec
*newp
;
372 if (imap
->l_scoperec
!= &imap
->l_scoperec_mem
373 && remain
< NINIT_SCOPE_ELEMS (imap
)
374 && imap
->l_scoperec_mem
.nusers
== 0)
376 new_size
= NINIT_SCOPE_ELEMS (imap
);
377 newp
= &imap
->l_scoperec_mem
;
381 new_size
= imap
->l_scope_max
;
382 newp
= (struct r_scoperec
*)
383 malloc (sizeof (struct r_scoperec
)
384 + new_size
* sizeof (struct r_scope_elem
*));
386 _dl_signal_error (ENOMEM
, "dlclose", NULL
,
387 N_("cannot create scope list"));
391 newp
->remove_after_use
= false;
392 newp
->notify
= false;
394 /* Copy over the remaining scope elements. */
396 for (size_t cnt
= 0; imap
->l_scoperec
->scope
[cnt
] != NULL
; ++cnt
)
398 if (imap
->l_scoperec
->scope
[cnt
]
399 != &imap
->l_symbolic_searchlist
)
401 struct link_map
*tmap
= (struct link_map
*)
402 ((char *) imap
->l_scoperec
->scope
[cnt
]
403 - offsetof (struct link_map
, l_searchlist
));
404 if (tmap
->l_idx
!= IDX_STILL_USED
)
406 /* Remove the scope. Or replace with own map's
408 if (new_list
!= NULL
)
410 newp
->scope
[remain
++] = new_list
;
417 newp
->scope
[remain
++] = imap
->l_scoperec
->scope
[cnt
];
419 newp
->scope
[remain
] = NULL
;
421 struct r_scoperec
*old
= imap
->l_scoperec
;
424 imap
->l_scoperec
= newp
;
427 __rtld_mrlock_change (imap
->l_scoperec_lock
);
428 imap
->l_scoperec
= newp
;
429 __rtld_mrlock_done (imap
->l_scoperec_lock
);
431 if (atomic_increment_val (&old
->nusers
) != 1)
433 old
->remove_after_use
= true;
435 if (atomic_decrement_val (&old
->nusers
) != 0)
436 __rtld_waitzero (old
->nusers
);
440 /* No user anymore, we can free it now. */
441 if (old
!= &imap
->l_scoperec_mem
)
444 imap
->l_scope_max
= new_size
;
447 /* The loader is gone, so mark the object as not having one.
448 Note: l_idx != IDX_STILL_USED -> object will be removed. */
449 if (imap
->l_loader
!= NULL
450 && imap
->l_loader
->l_idx
!= IDX_STILL_USED
)
451 imap
->l_loader
= NULL
;
453 /* Remember where the first dynamically loaded object is. */
454 if (i
< first_loaded
)
459 /* If there are no objects to unload, do nothing further. */
464 /* Auditing checkpoint: we will start deleting objects. */
465 if (__builtin_expect (do_audit
, 0))
467 struct link_map
*head
= GL(dl_ns
)[ns
]._ns_loaded
;
468 struct audit_ifaces
*afct
= GLRO(dl_audit
);
469 /* Do not call the functions for any auditing object. */
470 if (head
->l_auditing
== 0)
472 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
474 if (afct
->activity
!= NULL
)
475 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_DELETE
);
483 /* Notify the debugger we are about to remove some loaded objects. */
484 struct r_debug
*r
= _dl_debug_initialize (0, ns
);
485 r
->r_state
= RT_DELETE
;
489 size_t tls_free_start
;
491 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
494 /* Check each element of the search list to see if all references to
496 for (i
= first_loaded
; i
< nloaded
; ++i
)
498 struct link_map
*imap
= maps
[i
];
501 assert (imap
->l_type
== lt_loaded
);
503 /* That was the last reference, and this was a dlopen-loaded
504 object. We can unmap it. */
505 if (__builtin_expect (imap
->l_global
, 0))
507 /* This object is in the global scope list. Remove it. */
508 unsigned int cnt
= GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
;
512 while (GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
[cnt
] != imap
);
514 /* The object was already correctly registered. */
516 < GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
)
517 GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
[cnt
- 1]
518 = GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
[cnt
];
520 --GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
;
524 /* Remove the object from the dtv slotinfo array if it uses TLS. */
525 if (__builtin_expect (imap
->l_tls_blocksize
> 0, 0))
529 if (GL(dl_tls_dtv_slotinfo_list
) != NULL
530 && ! remove_slotinfo (imap
->l_tls_modid
,
531 GL(dl_tls_dtv_slotinfo_list
), 0,
532 imap
->l_init_called
))
533 /* All dynamically loaded modules with TLS are unloaded. */
534 GL(dl_tls_max_dtv_idx
) = GL(dl_tls_static_nelem
);
536 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
)
538 /* Collect a contiguous chunk built from the objects in
539 this search list, going in either direction. When the
540 whole chunk is at the end of the used area then we can
543 if (tls_free_start
== NO_TLS_OFFSET
544 || (size_t) imap
->l_tls_offset
== tls_free_start
)
546 /* Extend the contiguous chunk being reclaimed. */
548 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
550 if (tls_free_end
== NO_TLS_OFFSET
)
551 tls_free_end
= imap
->l_tls_offset
;
553 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
555 /* Extend the chunk backwards. */
556 tls_free_end
= imap
->l_tls_offset
;
559 /* This isn't contiguous with the last chunk freed.
560 One of them will be leaked unless we can free
561 one block right away. */
562 if (tls_free_end
== GL(dl_tls_static_used
))
564 GL(dl_tls_static_used
) = tls_free_start
;
565 tls_free_end
= imap
->l_tls_offset
;
567 = tls_free_end
- imap
->l_tls_blocksize
;
569 else if ((size_t) imap
->l_tls_offset
570 == GL(dl_tls_static_used
))
571 GL(dl_tls_static_used
)
572 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
573 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
575 /* We pick the later block. It has a chance to
577 tls_free_end
= imap
->l_tls_offset
;
579 = tls_free_end
- imap
->l_tls_blocksize
;
583 if ((size_t) imap
->l_tls_offset
== tls_free_end
)
584 /* Extend the contiguous chunk being reclaimed. */
585 tls_free_end
-= imap
->l_tls_blocksize
;
586 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
588 /* Extend the chunk backwards. */
589 tls_free_start
= imap
->l_tls_offset
;
592 /* This isn't contiguous with the last chunk freed.
593 One of them will be leaked. */
594 if (tls_free_end
== GL(dl_tls_static_used
))
595 GL(dl_tls_static_used
) = tls_free_start
;
596 tls_free_start
= imap
->l_tls_offset
;
597 tls_free_end
= tls_free_start
+ imap
->l_tls_blocksize
;
600 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
606 /* We can unmap all the maps at once. We determined the
607 start address and length when we loaded the object and
608 the `munmap' call does the rest. */
611 /* Finally, unlink the data structure and free it. */
612 if (imap
->l_prev
!= NULL
)
613 imap
->l_prev
->l_next
= imap
->l_next
;
617 assert (ns
!= LM_ID_BASE
);
619 GL(dl_ns
)[ns
]._ns_loaded
= imap
->l_next
;
622 --GL(dl_ns
)[ns
]._ns_nloaded
;
623 if (imap
->l_next
!= NULL
)
624 imap
->l_next
->l_prev
= imap
->l_prev
;
626 free (imap
->l_versions
);
627 if (imap
->l_origin
!= (char *) -1)
628 free ((char *) imap
->l_origin
);
630 free (imap
->l_reldeps
);
632 /* Print debugging message. */
633 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
634 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
635 imap
->l_name
, imap
->l_ns
);
637 /* This name always is allocated. */
639 /* Remove the list with all the names of the shared object. */
641 struct libname_list
*lnp
= imap
->l_libname
;
644 struct libname_list
*this = lnp
;
646 if (!this->dont_free
)
651 /* Remove the searchlists. */
652 free (imap
->l_initfini
);
654 /* Remove the scope array if we allocated it. */
655 if (imap
->l_scoperec
!= &imap
->l_scoperec_mem
)
656 free (imap
->l_scoperec
);
658 if (imap
->l_phdr_allocated
)
659 free ((void *) imap
->l_phdr
);
661 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
662 free (imap
->l_rpath_dirs
.dirs
);
663 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
664 free (imap
->l_runpath_dirs
.dirs
);
671 /* If we removed any object which uses TLS bump the generation counter. */
674 if (__builtin_expect (++GL(dl_tls_generation
) == 0, 0))
675 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
677 if (tls_free_end
== GL(dl_tls_static_used
))
678 GL(dl_tls_static_used
) = tls_free_start
;
683 /* Auditing checkpoint: we have deleted all objects. */
684 if (__builtin_expect (do_audit
, 0))
686 struct link_map
*head
= GL(dl_ns
)[ns
]._ns_loaded
;
687 /* Do not call the functions for any auditing object. */
688 if (head
->l_auditing
== 0)
690 struct audit_ifaces
*afct
= GLRO(dl_audit
);
691 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
693 if (afct
->activity
!= NULL
)
694 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_CONSISTENT
);
702 /* Notify the debugger those objects are finalized and gone. */
703 r
->r_state
= RT_CONSISTENT
;
706 /* Recheck if we need to retry, release the lock. */
708 if (dl_close_state
== rerun
)
711 dl_close_state
= not_pending
;
712 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
717 static bool __libc_freeres_fn_section
718 free_slotinfo (struct dtv_slotinfo_list
**elemp
)
723 /* Nothing here, all is removed (or there never was anything). */
726 if (!free_slotinfo (&(*elemp
)->next
))
727 /* We cannot free the entry. */
730 /* That cleared our next pointer for us. */
732 for (cnt
= 0; cnt
< (*elemp
)->len
; ++cnt
)
733 if ((*elemp
)->slotinfo
[cnt
].map
!= NULL
)
737 /* We can remove the list element. */
746 libc_freeres_fn (free_mem
)
748 for (Lmid_t ns
= 0; ns
< DL_NNS
; ++ns
)
749 if (__builtin_expect (GL(dl_ns
)[ns
]._ns_global_scope_alloc
, 0) != 0
750 && (GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
751 // XXX Check whether we need NS-specific initial_searchlist
752 == GLRO(dl_initial_searchlist
).r_nlist
))
754 /* All object dynamically loaded by the program are unloaded. Free
755 the memory allocated for the global scope variable. */
756 struct link_map
**old
= GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
;
758 /* Put the old map in. */
759 GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
760 // XXX Check whether we need NS-specific initial_searchlist
761 = GLRO(dl_initial_searchlist
).r_list
;
762 /* Signal that the original map is used. */
763 GL(dl_ns
)[ns
]._ns_global_scope_alloc
= 0;
765 /* Now free the old map. */
770 if (USE___THREAD
|| GL(dl_tls_dtv_slotinfo_list
) != NULL
)
772 /* Free the memory allocated for the dtv slotinfo array. We can do
773 this only if all modules which used this memory are unloaded. */
775 if (GL(dl_initial_dtv
) == NULL
)
776 /* There was no initial TLS setup, it was set up later when
777 it used the normal malloc. */
778 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
));
781 /* The first element of the list does not have to be deallocated.
782 It was allocated in the dynamic linker (i.e., with a different
783 malloc), and in the static library it's in .bss space. */
784 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
)->next
);