1/* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <dlfcn.h>
21#include <errno.h>
22#include <libintl.h>
23#include <stddef.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <unistd.h>
28#include <libc-lock.h>
29#include <ldsodefs.h>
30#include <sys/types.h>
31#include <sys/mman.h>
32#include <sysdep-cancel.h>
33#include <tls.h>
34#include <stap-probe.h>
35#include <dl-find_object.h>
36
37#include <dl-unmap-segments.h>
38
39/* Special l_idx value used to indicate which objects remain loaded. */
40#define IDX_STILL_USED -1
41
42
43/* Returns true we an non-empty was found. */
44static bool
45remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
46 bool should_be_there)
47{
48 if (idx - disp >= listp->len)
49 {
50 if (listp->next == NULL)
51 {
52 /* The index is not actually valid in the slotinfo list,
53 because this object was closed before it was fully set
54 up due to some error. */
55 assert (! should_be_there);
56 }
57 else
58 {
59 if (remove_slotinfo (idx, listp: listp->next, disp: disp + listp->len,
60 should_be_there))
61 return true;
62
63 /* No non-empty entry. Search from the end of this element's
64 slotinfo array. */
65 idx = disp + listp->len;
66 }
67 }
68 else
69 {
70 struct link_map *old_map = listp->slotinfo[idx - disp].map;
71
72 /* The entry might still be in its unused state if we are closing an
73 object that wasn't fully set up. */
74 if (__glibc_likely (old_map != NULL))
75 {
76 /* Mark the entry as unused. These can be read concurrently. */
77 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
78 GL(dl_tls_generation) + 1);
79 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
80 }
81
82 /* If this is not the last currently used entry no need to look
83 further. */
84 if (idx != GL(dl_tls_max_dtv_idx))
85 {
86 /* There is an unused dtv entry in the middle. */
87 GL(dl_tls_dtv_gaps) = true;
88 return true;
89 }
90 }
91
92 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 {
94 --idx;
95
96 if (listp->slotinfo[idx - disp].map != NULL)
97 {
98 /* Found a new last used index. This can be read concurrently. */
99 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
100 return true;
101 }
102 }
103
104 /* No non-entry in this list element. */
105 return false;
106}
107
108void
109_dl_close_worker (struct link_map *map, bool force)
110{
111 /* One less direct use. */
112 --map->l_direct_opencount;
113
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
118
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
121 {
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
124
125 /* There are still references to this object. Do nothing more. */
126 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
127 _dl_debug_printf (fmt: "\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
129
130 return;
131 }
132
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
135
136 retry:
137 dl_close_state = pending;
138
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 struct link_map *maps[nloaded];
142
143 /* Run over the list and assign indexes to the link maps and enter
144 them into the MAPS array. */
145 int idx = 0;
146 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
147 {
148 l->l_map_used = 0;
149 l->l_map_done = 0;
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
153 }
154 assert (idx == nloaded);
155
156 /* Keep track of the lowest index link map we have covered already. */
157 int done_index = -1;
158 while (++done_index < nloaded)
159 {
160 struct link_map *l = maps[done_index];
161
162 if (l->l_map_done)
163 /* Already handled. */
164 continue;
165
166 /* Check whether this object is still used. */
167 if (l->l_type == lt_loaded
168 && l->l_direct_opencount == 0
169 && !l->l_nodelete_active
170 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
171 acquire is sufficient and correct. */
172 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
173 && !l->l_map_used)
174 continue;
175
176 /* We need this object and we handle it now. */
177 l->l_map_used = 1;
178 l->l_map_done = 1;
179 /* Signal the object is still needed. */
180 l->l_idx = IDX_STILL_USED;
181
182 /* Mark all dependencies as used. */
183 if (l->l_initfini != NULL)
184 {
185 /* We are always the zeroth entry, and since we don't include
186 ourselves in the dependency analysis start at 1. */
187 struct link_map **lp = &l->l_initfini[1];
188 while (*lp != NULL)
189 {
190 if ((*lp)->l_idx != IDX_STILL_USED)
191 {
192 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
193
194 if (!(*lp)->l_map_used)
195 {
196 (*lp)->l_map_used = 1;
197 /* If we marked a new object as used, and we've
198 already processed it, then we need to go back
199 and process again from that point forward to
200 ensure we keep all of its dependencies also. */
201 if ((*lp)->l_idx - 1 < done_index)
202 done_index = (*lp)->l_idx - 1;
203 }
204 }
205
206 ++lp;
207 }
208 }
209 /* And the same for relocation dependencies. */
210 if (l->l_reldeps != NULL)
211 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
212 {
213 struct link_map *jmap = l->l_reldeps->list[j];
214
215 if (jmap->l_idx != IDX_STILL_USED)
216 {
217 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
218
219 if (!jmap->l_map_used)
220 {
221 jmap->l_map_used = 1;
222 if (jmap->l_idx - 1 < done_index)
223 done_index = jmap->l_idx - 1;
224 }
225 }
226 }
227 }
228
229 /* Sort the entries. We can skip looking for the binary itself which is
230 at the front of the search list for the main namespace. */
231 _dl_sort_maps (maps, nmaps: nloaded, force_first: (nsid == LM_ID_BASE), true);
232
233 /* Call all termination functions at once. */
234 bool unload_any = false;
235 bool scope_mem_left = false;
236 unsigned int unload_global = 0;
237 unsigned int first_loaded = ~0;
238 for (unsigned int i = 0; i < nloaded; ++i)
239 {
240 struct link_map *imap = maps[i];
241
242 /* All elements must be in the same namespace. */
243 assert (imap->l_ns == nsid);
244
245 if (!imap->l_map_used)
246 {
247 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
248
249 /* Call its termination function. Do not do it for
250 half-cooked objects. Temporarily disable exception
251 handling, so that errors are fatal. */
252 if (imap->l_init_called)
253 _dl_catch_exception (NULL, _dl_call_fini, imap);
254
255#ifdef SHARED
256 /* Auditing checkpoint: we remove an object. */
257 _dl_audit_objclose (imap);
258#endif
259
260 /* This object must not be used anymore. */
261 imap->l_removed = 1;
262
263 /* We indeed have an object to remove. */
264 unload_any = true;
265
266 if (imap->l_global)
267 ++unload_global;
268
269 /* Remember where the first dynamically loaded object is. */
270 if (i < first_loaded)
271 first_loaded = i;
272 }
273 /* Else imap->l_map_used. */
274 else if (imap->l_type == lt_loaded)
275 {
276 struct r_scope_elem *new_list = NULL;
277
278 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
279 {
280 /* The object is still used. But one of the objects we are
281 unloading right now is responsible for loading it. If
282 the current object does not have it's own scope yet we
283 have to create one. This has to be done before running
284 the finalizers.
285
286 To do this count the number of dependencies. */
287 unsigned int cnt;
288 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
289 ;
290
291 /* We simply reuse the l_initfini list. */
292 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
293 imap->l_searchlist.r_nlist = cnt;
294
295 new_list = &imap->l_searchlist;
296 }
297
298 /* Count the number of scopes which remain after the unload.
299 When we add the local search list count it. Always add
300 one for the terminating NULL pointer. */
301 size_t remain = (new_list != NULL) + 1;
302 bool removed_any = false;
303 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
304 /* This relies on l_scope[] entries being always set either
305 to its own l_symbolic_searchlist address, or some map's
306 l_searchlist address. */
307 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
308 {
309 struct link_map *tmap = (struct link_map *)
310 ((char *) imap->l_scope[cnt]
311 - offsetof (struct link_map, l_searchlist));
312 assert (tmap->l_ns == nsid);
313 if (tmap->l_idx == IDX_STILL_USED)
314 ++remain;
315 else
316 removed_any = true;
317 }
318 else
319 ++remain;
320
321 if (removed_any)
322 {
323 /* Always allocate a new array for the scope. This is
324 necessary since we must be able to determine the last
325 user of the current array. If possible use the link map's
326 memory. */
327 size_t new_size;
328 struct r_scope_elem **newp;
329
330#define SCOPE_ELEMS(imap) \
331 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
332
333 if (imap->l_scope != imap->l_scope_mem
334 && remain < SCOPE_ELEMS (imap))
335 {
336 new_size = SCOPE_ELEMS (imap);
337 newp = imap->l_scope_mem;
338 }
339 else
340 {
341 new_size = imap->l_scope_max;
342 newp = (struct r_scope_elem **)
343 malloc (size: new_size * sizeof (struct r_scope_elem *));
344 if (newp == NULL)
345 _dl_signal_error (ENOMEM, "dlclose", NULL,
346 N_("cannot create scope list"));
347 }
348
349 /* Copy over the remaining scope elements. */
350 remain = 0;
351 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
352 {
353 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
354 {
355 struct link_map *tmap = (struct link_map *)
356 ((char *) imap->l_scope[cnt]
357 - offsetof (struct link_map, l_searchlist));
358 if (tmap->l_idx != IDX_STILL_USED)
359 {
360 /* Remove the scope. Or replace with own map's
361 scope. */
362 if (new_list != NULL)
363 {
364 newp[remain++] = new_list;
365 new_list = NULL;
366 }
367 continue;
368 }
369 }
370
371 newp[remain++] = imap->l_scope[cnt];
372 }
373 newp[remain] = NULL;
374
375 struct r_scope_elem **old = imap->l_scope;
376
377 imap->l_scope = newp;
378
379 /* No user anymore, we can free it now. */
380 if (old != imap->l_scope_mem)
381 {
382 if (_dl_scope_free (old))
383 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
384 no need to repeat it. */
385 scope_mem_left = false;
386 }
387 else
388 scope_mem_left = true;
389
390 imap->l_scope_max = new_size;
391 }
392 else if (new_list != NULL)
393 {
394 /* We didn't change the scope array, so reset the search
395 list. */
396 imap->l_searchlist.r_list = NULL;
397 imap->l_searchlist.r_nlist = 0;
398 }
399
400 /* The loader is gone, so mark the object as not having one.
401 Note: l_idx != IDX_STILL_USED -> object will be removed. */
402 if (imap->l_loader != NULL
403 && imap->l_loader->l_idx != IDX_STILL_USED)
404 imap->l_loader = NULL;
405
406 /* Remember where the first dynamically loaded object is. */
407 if (i < first_loaded)
408 first_loaded = i;
409 }
410 }
411
412 /* If there are no objects to unload, do nothing further. */
413 if (!unload_any)
414 goto out;
415
416#ifdef SHARED
417 /* Auditing checkpoint: we will start deleting objects. */
418 _dl_audit_activity_nsid (nsid, LA_ACT_DELETE);
419#endif
420
421 /* Notify the debugger we are about to remove some loaded objects. */
422 struct r_debug *r = _dl_debug_update (ns: nsid);
423 r->r_state = RT_DELETE;
424 _dl_debug_state ();
425 LIBC_PROBE (unmap_start, 2, nsid, r);
426
427 if (unload_global)
428 {
429 /* Some objects are in the global scope list. Remove them. */
430 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
431 unsigned int i;
432 unsigned int j = 0;
433 unsigned int cnt = ns_msl->r_nlist;
434
435 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
436 --cnt;
437
438 if (cnt + unload_global == ns_msl->r_nlist)
439 /* Speed up removing most recently added objects. */
440 j = cnt;
441 else
442 for (i = 0; i < cnt; i++)
443 if (ns_msl->r_list[i]->l_removed == 0)
444 {
445 if (i != j)
446 ns_msl->r_list[j] = ns_msl->r_list[i];
447 j++;
448 }
449 ns_msl->r_nlist = j;
450 }
451
452 if (!RTLD_SINGLE_THREAD_P
453 && (unload_global
454 || scope_mem_left
455 || (GL(dl_scope_free_list) != NULL
456 && GL(dl_scope_free_list)->count)))
457 {
458 THREAD_GSCOPE_WAIT ();
459
460 /* Now we can free any queued old scopes. */
461 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
462 if (fsl != NULL)
463 while (fsl->count > 0)
464 free (ptr: fsl->list[--fsl->count]);
465 }
466
467 size_t tls_free_start;
468 size_t tls_free_end;
469 tls_free_start = tls_free_end = NO_TLS_OFFSET;
470
471 /* Protects global and module specitic TLS state. */
472 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
473
474 /* We modify the list of loaded objects. */
475 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
476
477 /* Check each element of the search list to see if all references to
478 it are gone. */
479 for (unsigned int i = first_loaded; i < nloaded; ++i)
480 {
481 struct link_map *imap = maps[i];
482 if (!imap->l_map_used)
483 {
484 assert (imap->l_type == lt_loaded);
485
486 /* That was the last reference, and this was a dlopen-loaded
487 object. We can unmap it. */
488
489 /* Remove the object from the dtv slotinfo array if it uses TLS. */
490 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
491 {
492 any_tls = true;
493
494 if (GL(dl_tls_dtv_slotinfo_list) != NULL
495 && ! remove_slotinfo (idx: imap->l_tls_modid,
496 GL(dl_tls_dtv_slotinfo_list), disp: 0,
497 should_be_there: imap->l_init_called))
498 /* All dynamically loaded modules with TLS are unloaded. */
499 /* Can be read concurrently. */
500 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
501 GL(dl_tls_static_nelem));
502
503 if (imap->l_tls_offset != NO_TLS_OFFSET
504 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
505 {
506 /* Collect a contiguous chunk built from the objects in
507 this search list, going in either direction. When the
508 whole chunk is at the end of the used area then we can
509 reclaim it. */
510#if TLS_TCB_AT_TP
511 if (tls_free_start == NO_TLS_OFFSET
512 || (size_t) imap->l_tls_offset == tls_free_start)
513 {
514 /* Extend the contiguous chunk being reclaimed. */
515 tls_free_start
516 = imap->l_tls_offset - imap->l_tls_blocksize;
517
518 if (tls_free_end == NO_TLS_OFFSET)
519 tls_free_end = imap->l_tls_offset;
520 }
521 else if (imap->l_tls_offset - imap->l_tls_blocksize
522 == tls_free_end)
523 /* Extend the chunk backwards. */
524 tls_free_end = imap->l_tls_offset;
525 else
526 {
527 /* This isn't contiguous with the last chunk freed.
528 One of them will be leaked unless we can free
529 one block right away. */
530 if (tls_free_end == GL(dl_tls_static_used))
531 {
532 GL(dl_tls_static_used) = tls_free_start;
533 tls_free_end = imap->l_tls_offset;
534 tls_free_start
535 = tls_free_end - imap->l_tls_blocksize;
536 }
537 else if ((size_t) imap->l_tls_offset
538 == GL(dl_tls_static_used))
539 GL(dl_tls_static_used)
540 = imap->l_tls_offset - imap->l_tls_blocksize;
541 else if (tls_free_end < (size_t) imap->l_tls_offset)
542 {
543 /* We pick the later block. It has a chance to
544 be freed. */
545 tls_free_end = imap->l_tls_offset;
546 tls_free_start
547 = tls_free_end - imap->l_tls_blocksize;
548 }
549 }
550#elif TLS_DTV_AT_TP
551 if (tls_free_start == NO_TLS_OFFSET)
552 {
553 tls_free_start = imap->l_tls_firstbyte_offset;
554 tls_free_end = (imap->l_tls_offset
555 + imap->l_tls_blocksize);
556 }
557 else if (imap->l_tls_firstbyte_offset == tls_free_end)
558 /* Extend the contiguous chunk being reclaimed. */
559 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
560 else if (imap->l_tls_offset + imap->l_tls_blocksize
561 == tls_free_start)
562 /* Extend the chunk backwards. */
563 tls_free_start = imap->l_tls_firstbyte_offset;
564 /* This isn't contiguous with the last chunk freed.
565 One of them will be leaked unless we can free
566 one block right away. */
567 else if (imap->l_tls_offset + imap->l_tls_blocksize
568 == GL(dl_tls_static_used))
569 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
570 else if (tls_free_end == GL(dl_tls_static_used))
571 {
572 GL(dl_tls_static_used) = tls_free_start;
573 tls_free_start = imap->l_tls_firstbyte_offset;
574 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
575 }
576 else if (tls_free_end < imap->l_tls_firstbyte_offset)
577 {
578 /* We pick the later block. It has a chance to
579 be freed. */
580 tls_free_start = imap->l_tls_firstbyte_offset;
581 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
582 }
583#else
584# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
585#endif
586 }
587 }
588
589 /* Reset unique symbols if forced. */
590 if (force)
591 {
592 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
593 __rtld_lock_lock_recursive (tab->lock);
594 struct unique_sym *entries = tab->entries;
595 if (entries != NULL)
596 {
597 size_t idx, size = tab->size;
598 for (idx = 0; idx < size; ++idx)
599 {
600 /* Clear unique symbol entries that belong to this
601 object. */
602 if (entries[idx].name != NULL
603 && entries[idx].map == imap)
604 {
605 entries[idx].name = NULL;
606 entries[idx].hashval = 0;
607 tab->n_elements--;
608 }
609 }
610 }
611 __rtld_lock_unlock_recursive (tab->lock);
612 }
613
614 /* We can unmap all the maps at once. We determined the
615 start address and length when we loaded the object and
616 the `munmap' call does the rest. */
617 DL_UNMAP (imap);
618
619 /* Finally, unlink the data structure and free it. */
620#if DL_NNS == 1
621 /* The assert in the (imap->l_prev == NULL) case gives
622 the compiler license to warn that NS points outside
623 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
624 is tantamount to nsid >= DL_NNS). That should be impossible
625 in this configuration, so just assert about it instead. */
626 assert (nsid == LM_ID_BASE);
627 assert (imap->l_prev != NULL);
628#else
629 if (imap->l_prev == NULL)
630 {
631 assert (nsid != LM_ID_BASE);
632 ns->_ns_loaded = imap->l_next;
633
634 /* Update the pointer to the head of the list
635 we leave for debuggers to examine. */
636 r->r_map = (void *) ns->_ns_loaded;
637 }
638 else
639#endif
640 imap->l_prev->l_next = imap->l_next;
641
642 --ns->_ns_nloaded;
643 if (imap->l_next != NULL)
644 imap->l_next->l_prev = imap->l_prev;
645
646 /* Update the data used by _dl_find_object. */
647 _dl_find_object_dlclose (l: imap);
648
649 free (ptr: imap->l_versions);
650 if (imap->l_origin != (char *) -1)
651 free (ptr: (char *) imap->l_origin);
652
653 free (ptr: imap->l_reldeps);
654
655 /* Print debugging message. */
656 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
657 _dl_debug_printf (fmt: "\nfile=%s [%lu]; destroying link map\n",
658 imap->l_name, imap->l_ns);
659
660 /* This name always is allocated. */
661 free (ptr: imap->l_name);
662 /* Remove the list with all the names of the shared object. */
663
664 struct libname_list *lnp = imap->l_libname;
665 do
666 {
667 struct libname_list *this = lnp;
668 lnp = lnp->next;
669 if (!this->dont_free)
670 free (ptr: this);
671 }
672 while (lnp != NULL);
673
674 /* Remove the searchlists. */
675 free (ptr: imap->l_initfini);
676
677 /* Remove the scope array if we allocated it. */
678 if (imap->l_scope != imap->l_scope_mem)
679 free (ptr: imap->l_scope);
680
681 if (imap->l_phdr_allocated)
682 free (ptr: (void *) imap->l_phdr);
683
684 if (imap->l_rpath_dirs.dirs != (void *) -1)
685 free (ptr: imap->l_rpath_dirs.dirs);
686 if (imap->l_runpath_dirs.dirs != (void *) -1)
687 free (ptr: imap->l_runpath_dirs.dirs);
688
689 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
690 if (imap == GL(dl_initfirst))
691 GL(dl_initfirst) = NULL;
692
693 free (ptr: imap);
694 }
695 }
696
697 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
698
699 /* If we removed any object which uses TLS bump the generation counter. */
700 if (any_tls)
701 {
702 size_t newgen = GL(dl_tls_generation) + 1;
703 if (__glibc_unlikely (newgen == 0))
704 _dl_fatal_printf (fmt: "TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
705 /* Can be read concurrently. */
706 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
707
708 if (tls_free_end == GL(dl_tls_static_used))
709 GL(dl_tls_static_used) = tls_free_start;
710 }
711
712 /* TLS is cleaned up for the unloaded modules. */
713 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
714
715#ifdef SHARED
716 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
717 auditors of the cleanup of a failed audit module loading attempt. */
718 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
719#endif
720
721 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
722 && nsid == GL(dl_nns) - 1)
723 do
724 --GL(dl_nns);
725 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
726
727 /* Notify the debugger those objects are finalized and gone. */
728 r->r_state = RT_CONSISTENT;
729 _dl_debug_state ();
730 LIBC_PROBE (unmap_complete, 2, nsid, r);
731
732 /* Recheck if we need to retry, release the lock. */
733 out:
734 if (dl_close_state == rerun)
735 goto retry;
736
737 dl_close_state = not_pending;
738}
739
740
741void
742_dl_close (void *_map)
743{
744 struct link_map *map = _map;
745
746 /* We must take the lock to examine the contents of map and avoid
747 concurrent dlopens. */
748 __rtld_lock_lock_recursive (GL(dl_load_lock));
749
750 /* At this point we are guaranteed nobody else is touching the list of
751 loaded maps, but a concurrent dlclose might have freed our map
752 before we took the lock. There is no way to detect this (see below)
753 so we proceed assuming this isn't the case. First see whether we
754 can remove the object at all. */
755 if (__glibc_unlikely (map->l_nodelete_active))
756 {
757 /* Nope. Do nothing. */
758 __rtld_lock_unlock_recursive (GL(dl_load_lock));
759 return;
760 }
761
762 /* At present this is an unreliable check except in the case where the
763 caller has recursively called dlclose and we are sure the link map
764 has not been freed. In a non-recursive dlclose the map itself
765 might have been freed and this access is potentially a data race
766 with whatever other use this memory might have now, or worse we
767 might silently corrupt memory if it looks enough like a link map.
768 POSIX has language in dlclose that appears to guarantee that this
769 should be a detectable case and given that dlclose should be threadsafe
770 we need this to be a reliable detection.
771 This is bug 20990. */
772 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
773 {
774 __rtld_lock_unlock_recursive (GL(dl_load_lock));
775 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
776 }
777
778 _dl_close_worker (map, false);
779
780 __rtld_lock_unlock_recursive (GL(dl_load_lock));
781}
782

source code of glibc/elf/dl-close.c