1/* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <dlfcn.h>
21#include <errno.h>
22#include <libintl.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <unistd.h>
27#include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28#include <sys/param.h>
29#include <libc-lock.h>
30#include <ldsodefs.h>
31#include <sysdep-cancel.h>
32#include <tls.h>
33#include <stap-probe.h>
34#include <atomic.h>
35#include <libc-internal.h>
36#include <array_length.h>
37#include <libc-early-init.h>
38#include <gnu/lib-names.h>
39#include <dl-find_object.h>
40
41#include <dl-dst.h>
42#include <dl-prop.h>
43
44
45/* We must be careful not to leave us in an inconsistent state. Thus we
46 catch any error and re-raise it after cleaning up. */
47
48struct dl_open_args
49{
50 const char *file;
51 int mode;
52 /* This is the caller of the dlopen() function. */
53 const void *caller_dlopen;
54 struct link_map *map;
55 /* Namespace ID. */
56 Lmid_t nsid;
57
58 /* Original value of _ns_global_scope_pending_adds. Set by
59 dl_open_worker. Only valid if nsid is a real namespace
60 (non-negative). */
61 unsigned int original_global_scope_pending_adds;
62
63 /* Set to true by dl_open_worker if libc.so was already loaded into
64 the namespace at the time dl_open_worker was called. This is
65 used to determine whether libc.so early initialization has
66 already been done before, and whether to roll back the cached
67 libc_map value in the namespace in case of a dlopen failure. */
68 bool libc_already_loaded;
69
70 /* Set to true if the end of dl_open_worker_begin was reached. */
71 bool worker_continue;
72
73 /* Original parameters to the program and the current environment. */
74 int argc;
75 char **argv;
76 char **env;
77};
78
79/* Called in case the global scope cannot be extended. */
80static void __attribute__ ((noreturn))
81add_to_global_resize_failure (struct link_map *new)
82{
83 _dl_signal_error (ENOMEM, object: new->l_libname->name, NULL,
84 N_ ("cannot extend global scope"));
85}
86
87/* Grow the global scope array for the namespace, so that all the new
88 global objects can be added later in add_to_global_update, without
89 risk of memory allocation failure. add_to_global_resize raises
90 exceptions for memory allocation errors. */
91static void
92add_to_global_resize (struct link_map *new)
93{
94 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
95
96 /* Count the objects we have to put in the global scope. */
97 unsigned int to_add = 0;
98 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
99 if (new->l_searchlist.r_list[cnt]->l_global == 0)
100 ++to_add;
101
102 /* The symbols of the new objects and its dependencies are to be
103 introduced into the global scope that will be used to resolve
104 references from other dynamically-loaded objects.
105
106 The global scope is the searchlist in the main link map. We
107 extend this list if necessary. There is one problem though:
108 since this structure was allocated very early (before the libc
109 is loaded) the memory it uses is allocated by the malloc()-stub
110 in the ld.so. When we come here these functions are not used
111 anymore. Instead the malloc() implementation of the libc is
112 used. But this means the block from the main map cannot be used
113 in an realloc() call. Therefore we allocate a completely new
114 array the first time we have to add something to the locale scope. */
115
116 if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
117 &ns->_ns_global_scope_pending_adds))
118 add_to_global_resize_failure (new);
119
120 unsigned int new_size = 0; /* 0 means no new allocation. */
121 void *old_global = NULL; /* Old allocation if free-able. */
122
123 /* Minimum required element count for resizing. Adjusted below for
124 an exponential resizing policy. */
125 size_t required_new_size;
126 if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
127 ns->_ns_global_scope_pending_adds,
128 &required_new_size))
129 add_to_global_resize_failure (new);
130
131 if (ns->_ns_global_scope_alloc == 0)
132 {
133 if (__builtin_add_overflow (required_new_size, 8, &new_size))
134 add_to_global_resize_failure (new);
135 }
136 else if (required_new_size > ns->_ns_global_scope_alloc)
137 {
138 if (__builtin_mul_overflow (required_new_size, 2, &new_size))
139 add_to_global_resize_failure (new);
140
141 /* The old array was allocated with our malloc, not the minimal
142 malloc. */
143 old_global = ns->_ns_main_searchlist->r_list;
144 }
145
146 if (new_size > 0)
147 {
148 size_t allocation_size;
149 if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
150 &allocation_size))
151 add_to_global_resize_failure (new);
152 struct link_map **new_global = malloc (size: allocation_size);
153 if (new_global == NULL)
154 add_to_global_resize_failure (new);
155
156 /* Copy over the old entries. */
157 memcpy (new_global, ns->_ns_main_searchlist->r_list,
158 ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
159
160 ns->_ns_global_scope_alloc = new_size;
161 ns->_ns_main_searchlist->r_list = new_global;
162
163 if (!RTLD_SINGLE_THREAD_P)
164 THREAD_GSCOPE_WAIT ();
165
166 free (ptr: old_global);
167 }
168}
169
170/* Actually add the new global objects to the global scope. Must be
171 called after add_to_global_resize. This function cannot fail. */
172static void
173add_to_global_update (struct link_map *new)
174{
175 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
176
177 /* Now add the new entries. */
178 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
179 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
180 {
181 struct link_map *map = new->l_searchlist.r_list[cnt];
182
183 if (map->l_global == 0)
184 {
185 map->l_global = 1;
186
187 /* The array has been resized by add_to_global_resize. */
188 assert (new_nlist < ns->_ns_global_scope_alloc);
189
190 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
191
192 /* We modify the global scope. Report this. */
193 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
194 _dl_debug_printf (fmt: "\nadd %s [%lu] to global scope\n",
195 map->l_name, map->l_ns);
196 }
197 }
198
199 /* Some of the pending adds have been performed by the loop above.
200 Adjust the counter accordingly. */
201 unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
202 assert (added <= ns->_ns_global_scope_pending_adds);
203 ns->_ns_global_scope_pending_adds -= added;
204
205 atomic_write_barrier ();
206 ns->_ns_main_searchlist->r_nlist = new_nlist;
207}
208
209/* Search link maps in all namespaces for the DSO that contains the object at
210 address ADDR. Returns the pointer to the link map of the matching DSO, or
211 NULL if a match is not found. */
212struct link_map *
213_dl_find_dso_for_object (const ElfW(Addr) addr)
214{
215 struct link_map *l;
216
217 /* Find the highest-addressed object that ADDR is not below. */
218 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
219 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
220 if (addr >= l->l_map_start && addr < l->l_map_end
221 && (l->l_contiguous
222 || _dl_addr_inside_object (l, addr: (ElfW(Addr)) addr)))
223 {
224 assert (ns == l->l_ns);
225 return l;
226 }
227 return NULL;
228}
229rtld_hidden_def (_dl_find_dso_for_object);
230
231/* Return true if NEW is found in the scope for MAP. */
232static size_t
233scope_has_map (struct link_map *map, struct link_map *new)
234{
235 size_t cnt;
236 for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
237 if (map->l_scope[cnt] == &new->l_searchlist)
238 return true;
239 return false;
240}
241
242/* Return the length of the scope for MAP. */
243static size_t
244scope_size (struct link_map *map)
245{
246 size_t cnt;
247 for (cnt = 0; map->l_scope[cnt] != NULL; )
248 ++cnt;
249 return cnt;
250}
251
252/* Resize the scopes of depended-upon objects, so that the new object
253 can be added later without further allocation of memory. This
254 function can raise an exceptions due to malloc failure. */
255static void
256resize_scopes (struct link_map *new)
257{
258 /* If the file is not loaded now as a dependency, add the search
259 list of the newly loaded object to the scope. */
260 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
261 {
262 struct link_map *imap = new->l_searchlist.r_list[i];
263
264 /* If the initializer has been called already, the object has
265 not been loaded here and now. */
266 if (imap->l_init_called && imap->l_type == lt_loaded)
267 {
268 if (scope_has_map (map: imap, new))
269 /* Avoid duplicates. */
270 continue;
271
272 size_t cnt = scope_size (map: imap);
273 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
274 {
275 /* The l_scope array is too small. Allocate a new one
276 dynamically. */
277 size_t new_size;
278 struct r_scope_elem **newp;
279
280 if (imap->l_scope != imap->l_scope_mem
281 && imap->l_scope_max < array_length (imap->l_scope_mem))
282 {
283 /* If the current l_scope memory is not pointing to
284 the static memory in the structure, but the
285 static memory in the structure is large enough to
286 use for cnt + 1 scope entries, then switch to
287 using the static memory. */
288 new_size = array_length (imap->l_scope_mem);
289 newp = imap->l_scope_mem;
290 }
291 else
292 {
293 new_size = imap->l_scope_max * 2;
294 newp = (struct r_scope_elem **)
295 malloc (size: new_size * sizeof (struct r_scope_elem *));
296 if (newp == NULL)
297 _dl_signal_error (ENOMEM, object: "dlopen", NULL,
298 N_("cannot create scope list"));
299 }
300
301 /* Copy the array and the terminating NULL. */
302 memcpy (newp, imap->l_scope,
303 (cnt + 1) * sizeof (imap->l_scope[0]));
304 struct r_scope_elem **old = imap->l_scope;
305
306 imap->l_scope = newp;
307
308 if (old != imap->l_scope_mem)
309 _dl_scope_free (old);
310
311 imap->l_scope_max = new_size;
312 }
313 }
314 }
315}
316
317/* Second stage of resize_scopes: Add NEW to the scopes. Also print
318 debugging information about scopes if requested.
319
320 This function cannot raise an exception because all required memory
321 has been allocated by a previous call to resize_scopes. */
322static void
323update_scopes (struct link_map *new)
324{
325 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
326 {
327 struct link_map *imap = new->l_searchlist.r_list[i];
328 int from_scope = 0;
329
330 if (imap->l_init_called && imap->l_type == lt_loaded)
331 {
332 if (scope_has_map (map: imap, new))
333 /* Avoid duplicates. */
334 continue;
335
336 size_t cnt = scope_size (map: imap);
337 /* Assert that resize_scopes has sufficiently enlarged the
338 array. */
339 assert (cnt + 1 < imap->l_scope_max);
340
341 /* First terminate the extended list. Otherwise a thread
342 might use the new last element and then use the garbage
343 at offset IDX+1. */
344 imap->l_scope[cnt + 1] = NULL;
345 atomic_write_barrier ();
346 imap->l_scope[cnt] = &new->l_searchlist;
347
348 from_scope = cnt;
349 }
350
351 /* Print scope information. */
352 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
353 _dl_show_scope (new: imap, from: from_scope);
354 }
355}
356
357/* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
358 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
359 exception. The return value is true if any of the new objects use
360 TLS. */
361static bool
362resize_tls_slotinfo (struct link_map *new)
363{
364 bool any_tls = false;
365 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
366 if (_dl_add_to_slotinfo (l: new->l_searchlist.r_list[i], false))
367 any_tls = true;
368 return any_tls;
369}
370
371/* Second stage of TLS update, after resize_tls_slotinfo. This
372 function does not raise any exception. It should only be called if
373 resize_tls_slotinfo returned true. */
374static void
375update_tls_slotinfo (struct link_map *new)
376{
377 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
378 _dl_add_to_slotinfo (l: new->l_searchlist.r_list[i], true);
379
380 size_t newgen = GL(dl_tls_generation) + 1;
381 if (__glibc_unlikely (newgen == 0))
382 _dl_fatal_printf (N_("\
383TLS generation counter wrapped! Please report this."));
384 /* Can be read concurrently. */
385 atomic_store_release (&GL(dl_tls_generation), newgen);
386
387 /* We need a second pass for static tls data, because
388 _dl_update_slotinfo must not be run while calls to
389 _dl_add_to_slotinfo are still pending. */
390 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
391 {
392 struct link_map *imap = new->l_searchlist.r_list[i];
393
394 if (imap->l_need_tls_init && imap->l_tls_blocksize > 0)
395 {
396 /* For static TLS we have to allocate the memory here and
397 now, but we can delay updating the DTV. */
398 imap->l_need_tls_init = 0;
399#ifdef SHARED
400 /* Update the slot information data for the current
401 generation. */
402
403 /* FIXME: This can terminate the process on memory
404 allocation failure. It is not possible to raise
405 exceptions from this context; to fix this bug,
406 _dl_update_slotinfo would have to be split into two
407 operations, similar to resize_scopes and update_scopes
408 above. This is related to bug 16134. */
409 _dl_update_slotinfo (imap->l_tls_modid, newgen);
410#endif
411
412 dl_init_static_tls (map: imap);
413 assert (imap->l_need_tls_init == 0);
414 }
415 }
416}
417
418/* Mark the objects as NODELETE if required. This is delayed until
419 after dlopen failure is not possible, so that _dl_close can clean
420 up objects if necessary. */
421static void
422activate_nodelete (struct link_map *new)
423{
424 /* It is necessary to traverse the entire namespace. References to
425 objects in the global scope and unique symbol bindings can force
426 NODELETE status for objects outside the local scope. */
427 for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
428 l = l->l_next)
429 if (l->l_nodelete_pending)
430 {
431 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
432 _dl_debug_printf (fmt: "activating NODELETE for %s [%lu]\n",
433 l->l_name, l->l_ns);
434
435 /* The flag can already be true at this point, e.g. a signal
436 handler may have triggered lazy binding and set NODELETE
437 status immediately. */
438 l->l_nodelete_active = true;
439
440 /* This is just a debugging aid, to indicate that
441 activate_nodelete has run for this map. */
442 l->l_nodelete_pending = false;
443 }
444}
445
446/* Relocate the object L. *RELOCATION_IN_PROGRESS controls whether
447 the debugger is notified of the start of relocation processing. */
448static void
449_dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
450 struct link_map *l, int reloc_mode,
451 bool *relocation_in_progress)
452{
453 if (l->l_real->l_relocated)
454 return;
455
456 if (!*relocation_in_progress)
457 {
458 /* Notify the debugger that relocations are about to happen. */
459 LIBC_PROBE (reloc_start, 2, args->nsid, r);
460 *relocation_in_progress = true;
461 }
462
463#ifdef SHARED
464 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
465 {
466 /* If this here is the shared object which we want to profile
467 make sure the profile is started. We can find out whether
468 this is necessary or not by observing the `_dl_profile_map'
469 variable. If it was NULL but is not NULL afterwards we must
470 start the profiling. */
471 struct link_map *old_profile_map = GL(dl_profile_map);
472
473 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
474
475 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
476 {
477 /* We must prepare the profiling. */
478 _dl_start_profile ();
479
480 /* Prevent unloading the object. */
481 GL(dl_profile_map)->l_nodelete_active = true;
482 }
483 }
484 else
485#endif
486 _dl_relocate_object (map: l, scope: l->l_scope, reloc_mode, consider_profiling: 0);
487}
488
489
490/* struct dl_init_args and call_dl_init are used to call _dl_init with
491 exception handling disabled. */
492struct dl_init_args
493{
494 struct link_map *new;
495 int argc;
496 char **argv;
497 char **env;
498};
499
500static void
501call_dl_init (void *closure)
502{
503 struct dl_init_args *args = closure;
504 _dl_init (main_map: args->new, argc: args->argc, argv: args->argv, env: args->env);
505}
506
507static void
508dl_open_worker_begin (void *a)
509{
510 struct dl_open_args *args = a;
511 const char *file = args->file;
512 int mode = args->mode;
513 struct link_map *call_map = NULL;
514
515 /* Determine the caller's map if necessary. This is needed in case
516 we have a DST, when we don't know the namespace ID we have to put
517 the new object in, or when the file name has no path in which
518 case we need to look along the RUNPATH/RPATH of the caller. */
519 const char *dst = strchr (file, '$');
520 if (dst != NULL || args->nsid == __LM_ID_CALLER
521 || strchr (file, '/') == NULL)
522 {
523 const void *caller_dlopen = args->caller_dlopen;
524
525 /* We have to find out from which object the caller is calling.
526 By default we assume this is the main application. */
527 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
528
529 struct link_map *l = _dl_find_dso_for_object (addr: (ElfW(Addr)) caller_dlopen);
530
531 if (l)
532 call_map = l;
533
534 if (args->nsid == __LM_ID_CALLER)
535 args->nsid = call_map->l_ns;
536 }
537
538 /* The namespace ID is now known. Keep track of whether libc.so was
539 already loaded, to determine whether it is necessary to call the
540 early initialization routine (or clear libc_map on error). */
541 args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL;
542
543 /* Retain the old value, so that it can be restored. */
544 args->original_global_scope_pending_adds
545 = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
546
547 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
548 may not be true if this is a recursive call to dlopen. */
549 _dl_debug_initialize (ldbase: 0, ns: args->nsid);
550
551 /* Load the named object. */
552 struct link_map *new;
553 args->map = new = _dl_map_object (loader: call_map, name: file, type: lt_loaded, trace_mode: 0,
554 mode: mode | __RTLD_CALLMAP, nsid: args->nsid);
555
556 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
557 set and the object is not already loaded. */
558 if (new == NULL)
559 {
560 assert (mode & RTLD_NOLOAD);
561 return;
562 }
563
564 if (__glibc_unlikely (mode & __RTLD_SPROF))
565 /* This happens only if we load a DSO for 'sprof'. */
566 return;
567
568 /* This object is directly loaded. */
569 ++new->l_direct_opencount;
570
571 /* It was already open. */
572 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
573 {
574 /* Let the user know about the opencount. */
575 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
576 _dl_debug_printf (fmt: "opening file=%s [%lu]; direct_opencount=%u\n\n",
577 new->l_name, new->l_ns, new->l_direct_opencount);
578
579#ifdef SHARED
580 /* No relocation processing on this execution path. But
581 relocation has not been performed for static
582 position-dependent executables, so disable the assert for
583 static linking. */
584 assert (new->l_relocated);
585#endif
586
587 /* If the user requested the object to be in the global
588 namespace but it is not so far, prepare to add it now. This
589 can raise an exception to do a malloc failure. */
590 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
591 add_to_global_resize (new);
592
593 /* Mark the object as not deletable if the RTLD_NODELETE flags
594 was passed. */
595 if (__glibc_unlikely (mode & RTLD_NODELETE))
596 {
597 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
598 && !new->l_nodelete_active)
599 _dl_debug_printf (fmt: "marking %s [%lu] as NODELETE\n",
600 new->l_name, new->l_ns);
601 new->l_nodelete_active = true;
602 }
603
604 /* Finalize the addition to the global scope. */
605 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
606 add_to_global_update (new);
607
608 /* It is not possible to run the ELF constructor for the new
609 link map if it has not executed yet: If this dlopen call came
610 from an ELF constructor that has not put that object into a
611 consistent state, completing initialization for the entire
612 scope will expose objects that have this partially
613 constructed object among its dependencies to this
614 inconsistent state. This could happen even with a benign
615 dlopen (NULL, RTLD_LAZY) call from a constructor of an
616 initially loaded shared object. */
617
618 return;
619 }
620
621 /* Schedule NODELETE marking for the directly loaded object if
622 requested. */
623 if (__glibc_unlikely (mode & RTLD_NODELETE))
624 new->l_nodelete_pending = true;
625
626 /* Load that object's dependencies. */
627 _dl_map_object_deps (map: new, NULL, npreloads: 0, trace_mode: 0,
628 open_mode: mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
629
630 /* So far, so good. Now check the versions. */
631 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
632 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
633 {
634 struct link_map *map = new->l_searchlist.r_list[i]->l_real;
635 _dl_check_map_versions (map, verbose: 0, trace_mode: 0);
636#ifndef SHARED
637 /* During static dlopen, check if ld.so has been loaded.
638 Perform partial initialization in this case. This must
639 come after the symbol versioning initialization in
640 _dl_check_map_versions. */
641 if (map->l_info[DT_SONAME] != NULL
642 && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
643 + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
644 __rtld_static_init (map);
645#endif
646 }
647
648 _dl_open_check (m: new);
649
650 /* Print scope information. */
651 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
652 _dl_show_scope (new, from: 0);
653
654 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
655 int reloc_mode = mode & __RTLD_AUDIT;
656 if (GLRO(dl_lazy))
657 reloc_mode |= mode & RTLD_LAZY;
658
659 /* Objects must be sorted by dependency for the relocation process.
660 This allows IFUNC relocations to work and it also means copy
661 relocation of dependencies are if necessary overwritten.
662 __dl_map_object_deps has already sorted l_initfini for us. */
663 unsigned int first = UINT_MAX;
664 unsigned int last = 0;
665 unsigned int j = 0;
666 struct link_map *l = new->l_initfini[0];
667 do
668 {
669 if (! l->l_real->l_relocated)
670 {
671 if (first == UINT_MAX)
672 first = j;
673 last = j + 1;
674 }
675 l = new->l_initfini[++j];
676 }
677 while (l != NULL);
678
679 bool relocation_in_progress = false;
680
681 /* Perform relocation. This can trigger lazy binding in IFUNC
682 resolvers. For NODELETE mappings, these dependencies are not
683 recorded because the flag has not been applied to the newly
684 loaded objects. This means that upon dlopen failure, these
685 NODELETE objects can be unloaded despite existing references to
686 them. However, such relocation dependencies in IFUNC resolvers
687 are undefined anyway, so this is not a problem. */
688
689 /* Ensure that libc is relocated first. This helps with the
690 execution of IFUNC resolvers in libc, and matters only to newly
691 created dlmopen namespaces. Do not do this for static dlopen
692 because libc has relocations against ld.so, which may not have
693 been relocated at this point. */
694 struct r_debug *r = _dl_debug_update (ns: args->nsid);
695#ifdef SHARED
696 if (GL(dl_ns)[args->nsid].libc_map != NULL)
697 _dl_open_relocate_one_object (args, r, GL(dl_ns)[args->nsid].libc_map,
698 reloc_mode, &relocation_in_progress);
699#endif
700
701 for (unsigned int i = last; i-- > first; )
702 _dl_open_relocate_one_object (args, r, l: new->l_initfini[i], reloc_mode,
703 relocation_in_progress: &relocation_in_progress);
704
705 /* This only performs the memory allocations. The actual update of
706 the scopes happens below, after failure is impossible. */
707 resize_scopes (new);
708
709 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
710 structure. */
711 bool any_tls = resize_tls_slotinfo (new);
712
713 /* Perform the necessary allocations for adding new global objects
714 to the global scope below. */
715 if (mode & RTLD_GLOBAL)
716 add_to_global_resize (new);
717
718 /* Demarcation point: After this, no recoverable errors are allowed.
719 All memory allocations for new objects must have happened
720 before. */
721
722 /* Finalize the NODELETE status first. This comes before
723 update_scopes, so that lazy binding will not see pending NODELETE
724 state for newly loaded objects. There is a compiler barrier in
725 update_scopes which ensures that the changes from
726 activate_nodelete are visible before new objects show up in the
727 local scope. */
728 activate_nodelete (new);
729
730 /* Second stage after resize_scopes: Actually perform the scope
731 update. After this, dlsym and lazy binding can bind to new
732 objects. */
733 update_scopes (new);
734
735 if (!_dl_find_object_update (new_map: new))
736 _dl_signal_error (ENOMEM, object: new->l_libname->name, NULL,
737 N_ ("cannot allocate address lookup data"));
738
739 /* FIXME: It is unclear whether the order here is correct.
740 Shouldn't new objects be made available for binding (and thus
741 execution) only after there TLS data has been set up fully?
742 Fixing bug 16134 will likely make this distinction less
743 important. */
744
745 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
746 structures. */
747 if (any_tls)
748 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
749 on memory allocation failure. See bug 16134. */
750 update_tls_slotinfo (new);
751
752 /* Notify the debugger all new objects have been relocated. */
753 if (relocation_in_progress)
754 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
755
756 /* If libc.so was not there before, attempt to call its early
757 initialization routine. Indicate to the initialization routine
758 whether the libc being initialized is the one in the base
759 namespace. */
760 if (!args->libc_already_loaded)
761 {
762 /* dlopen cannot be used to load an initial libc by design. */
763 struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
764 _dl_call_libc_early_init (libc_map, false);
765 }
766
767 args->worker_continue = true;
768}
769
770static void
771dl_open_worker (void *a)
772{
773 struct dl_open_args *args = a;
774
775 args->worker_continue = false;
776
777 {
778 /* Protects global and module specific TLS state. */
779 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
780
781 struct dl_exception ex;
782 int err = _dl_catch_exception (exception: &ex, operate: dl_open_worker_begin, args);
783
784 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
785
786 /* Auditing checkpoint and debugger signalling. Do this even on
787 error, so that dlopen exists with consistent state. */
788 if (args->nsid >= 0 || args->map != NULL)
789 {
790 Lmid_t nsid = args->map != NULL ? args->map->l_ns : args->nsid;
791 struct r_debug *r = _dl_debug_update (ns: nsid);
792#ifdef SHARED
793 bool was_not_consistent = r->r_state != RT_CONSISTENT;
794#endif
795 _dl_debug_change_state (r, state: RT_CONSISTENT);
796 LIBC_PROBE (map_complete, 3, nsid, r, args->map);
797
798#ifdef SHARED
799 if (was_not_consistent)
800 /* Avoid redudant/recursive signalling. */
801 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
802#endif
803 }
804
805 if (__glibc_unlikely (ex.errstring != NULL))
806 /* Reraise the error. */
807 _dl_signal_exception (errcode: err, &ex, NULL);
808 }
809
810 if (!args->worker_continue)
811 return;
812
813 int mode = args->mode;
814 struct link_map *new = args->map;
815
816 /* Run the initializer functions of new objects. Temporarily
817 disable the exception handler, so that lazy binding failures are
818 fatal. */
819 {
820 struct dl_init_args init_args =
821 {
822 .new = new,
823 .argc = args->argc,
824 .argv = args->argv,
825 .env = args->env
826 };
827 _dl_catch_exception (NULL, operate: call_dl_init, args: &init_args);
828 }
829
830 /* Now we can make the new map available in the global scope. */
831 if (mode & RTLD_GLOBAL)
832 add_to_global_update (new);
833
834 /* Let the user know about the opencount. */
835 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
836 _dl_debug_printf (fmt: "opening file=%s [%lu]; direct_opencount=%u\n\n",
837 new->l_name, new->l_ns, new->l_direct_opencount);
838}
839
840void *
841_dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
842 int argc, char *argv[], char *env[])
843{
844 if ((mode & RTLD_BINDING_MASK) == 0)
845 /* One of the flags must be set. */
846 _dl_signal_error (EINVAL, object: file, NULL, N_("invalid mode for dlopen()"));
847
848 /* Make sure we are alone. */
849 __rtld_lock_lock_recursive (GL(dl_load_lock));
850
851 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
852 {
853 /* Find a new namespace. */
854 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
855 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
856 break;
857
858 if (__glibc_unlikely (nsid == DL_NNS))
859 {
860 /* No more namespace available. */
861 __rtld_lock_unlock_recursive (GL(dl_load_lock));
862
863 _dl_signal_error (EINVAL, object: file, NULL, N_("\
864no more namespaces available for dlmopen()"));
865 }
866 else if (nsid == GL(dl_nns))
867 {
868 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
869 ++GL(dl_nns);
870 }
871
872 GL(dl_ns)[nsid].libc_map = NULL;
873 _dl_debug_change_state (r: _dl_debug_update (ns: nsid), state: RT_CONSISTENT);
874 }
875 /* Never allow loading a DSO in a namespace which is empty. Such
876 direct placements is only causing problems. Also don't allow
877 loading into a namespace used for auditing. */
878 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
879 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
880 /* This prevents the [NSID] index expressions from being
881 evaluated, so the compiler won't think that we are
882 accessing an invalid index here in the !SHARED case where
883 DL_NNS is 1 and so any NSID != 0 is invalid. */
884 || DL_NNS == 1
885 || GL(dl_ns)[nsid]._ns_nloaded == 0
886 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
887 _dl_signal_error (EINVAL, object: file, NULL,
888 N_("invalid target namespace in dlmopen()"));
889
890 struct dl_open_args args;
891 args.file = file;
892 args.mode = mode;
893 args.caller_dlopen = caller_dlopen;
894 args.map = NULL;
895 args.nsid = nsid;
896 /* args.libc_already_loaded is always assigned by dl_open_worker
897 (before any explicit/non-local returns). */
898 args.argc = argc;
899 args.argv = argv;
900 args.env = env;
901
902 struct dl_exception exception;
903 int errcode = _dl_catch_exception (exception: &exception, operate: dl_open_worker, args: &args);
904
905#if defined USE_LDCONFIG && !defined MAP_COPY
906 /* We must unmap the cache file. */
907 _dl_unload_cache ();
908#endif
909
910 /* Do this for both the error and success cases. The old value has
911 only been determined if the namespace ID was assigned (i.e., it
912 is not __LM_ID_CALLER). In the success case, we actually may
913 have consumed more pending adds than planned (because the local
914 scopes overlap in case of a recursive dlopen, the inner dlopen
915 doing some of the globalization work of the outer dlopen), so the
916 old pending adds value is larger than absolutely necessary.
917 Since it is just a conservative upper bound, this is harmless.
918 The top-level dlopen call will restore the field to zero. */
919 if (args.nsid >= 0)
920 GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
921 = args.original_global_scope_pending_adds;
922
923 /* See if an error occurred during loading. */
924 if (__glibc_unlikely (exception.errstring != NULL))
925 {
926 /* Avoid keeping around a dangling reference to the libc.so link
927 map in case it has been cached in libc_map. */
928 if (!args.libc_already_loaded)
929 GL(dl_ns)[args.nsid].libc_map = NULL;
930
931 /* Remove the object from memory. It may be in an inconsistent
932 state if relocation failed, for example. */
933 if (args.map)
934 {
935 _dl_close_worker (map: args.map, true);
936
937 /* All l_nodelete_pending objects should have been deleted
938 at this point, which is why it is not necessary to reset
939 the flag here. */
940 }
941
942 /* Release the lock. */
943 __rtld_lock_unlock_recursive (GL(dl_load_lock));
944
945 /* Reraise the error. */
946 _dl_signal_exception (errcode, &exception, NULL);
947 }
948
949 const int r_state __attribute__ ((unused))
950 = _dl_debug_update (ns: args.nsid)->r_state;
951 assert (r_state == RT_CONSISTENT);
952
953 /* Release the lock. */
954 __rtld_lock_unlock_recursive (GL(dl_load_lock));
955
956 return args.map;
957}
958
959
960void
961_dl_show_scope (struct link_map *l, int from)
962{
963 _dl_debug_printf (fmt: "object=%s [%lu]\n",
964 DSO_FILENAME (l->l_name), l->l_ns);
965 if (l->l_scope != NULL)
966 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
967 {
968 _dl_debug_printf (fmt: " scope %u:", scope_cnt);
969
970 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
971 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
972 _dl_debug_printf_c (fmt: " %s",
973 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
974 else
975 _dl_debug_printf_c (fmt: " %s", RTLD_PROGNAME);
976
977 _dl_debug_printf_c (fmt: "\n");
978 }
979 else
980 _dl_debug_printf (fmt: " no scope\n");
981 _dl_debug_printf (fmt: "\n");
982}
983

source code of glibc/elf/dl-open.c