1/* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <dlfcn.h>
21#include <errno.h>
22#include <libintl.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <unistd.h>
27#include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28#include <sys/param.h>
29#include <libc-lock.h>
30#include <ldsodefs.h>
31#include <sysdep-cancel.h>
32#include <tls.h>
33#include <stap-probe.h>
34#include <atomic.h>
35#include <libc-internal.h>
36#include <array_length.h>
37#include <libc-early-init.h>
38#include <gnu/lib-names.h>
39#include <dl-find_object.h>
40
41#include <dl-dst.h>
42#include <dl-prop.h>
43
44
45/* We must be careful not to leave us in an inconsistent state. Thus we
46 catch any error and re-raise it after cleaning up. */
47
48struct dl_open_args
49{
50 const char *file;
51 int mode;
52 /* This is the caller of the dlopen() function. */
53 const void *caller_dlopen;
54 struct link_map *map;
55 /* Namespace ID. */
56 Lmid_t nsid;
57
58 /* Original value of _ns_global_scope_pending_adds. Set by
59 dl_open_worker. Only valid if nsid is a real namespace
60 (non-negative). */
61 unsigned int original_global_scope_pending_adds;
62
63 /* Set to true by dl_open_worker if libc.so was already loaded into
64 the namespace at the time dl_open_worker was called. This is
65 used to determine whether libc.so early initialization has
66 already been done before, and whether to roll back the cached
67 libc_map value in the namespace in case of a dlopen failure. */
68 bool libc_already_loaded;
69
70 /* Set to true if the end of dl_open_worker_begin was reached. */
71 bool worker_continue;
72
73 /* Original parameters to the program and the current environment. */
74 int argc;
75 char **argv;
76 char **env;
77};
78
79/* Called in case the global scope cannot be extended. */
80static void __attribute__ ((noreturn))
81add_to_global_resize_failure (struct link_map *new)
82{
83 _dl_signal_error (ENOMEM, object: new->l_libname->name, NULL,
84 N_ ("cannot extend global scope"));
85}
86
87/* Grow the global scope array for the namespace, so that all the new
88 global objects can be added later in add_to_global_update, without
89 risk of memory allocation failure. add_to_global_resize raises
90 exceptions for memory allocation errors. */
91static void
92add_to_global_resize (struct link_map *new)
93{
94 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
95
96 /* Count the objects we have to put in the global scope. */
97 unsigned int to_add = 0;
98 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
99 if (new->l_searchlist.r_list[cnt]->l_global == 0)
100 ++to_add;
101
102 /* The symbols of the new objects and its dependencies are to be
103 introduced into the global scope that will be used to resolve
104 references from other dynamically-loaded objects.
105
106 The global scope is the searchlist in the main link map. We
107 extend this list if necessary. There is one problem though:
108 since this structure was allocated very early (before the libc
109 is loaded) the memory it uses is allocated by the malloc()-stub
110 in the ld.so. When we come here these functions are not used
111 anymore. Instead the malloc() implementation of the libc is
112 used. But this means the block from the main map cannot be used
113 in an realloc() call. Therefore we allocate a completely new
114 array the first time we have to add something to the locale scope. */
115
116 if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
117 &ns->_ns_global_scope_pending_adds))
118 add_to_global_resize_failure (new);
119
120 unsigned int new_size = 0; /* 0 means no new allocation. */
121 void *old_global = NULL; /* Old allocation if free-able. */
122
123 /* Minimum required element count for resizing. Adjusted below for
124 an exponential resizing policy. */
125 size_t required_new_size;
126 if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
127 ns->_ns_global_scope_pending_adds,
128 &required_new_size))
129 add_to_global_resize_failure (new);
130
131 if (ns->_ns_global_scope_alloc == 0)
132 {
133 if (__builtin_add_overflow (required_new_size, 8, &new_size))
134 add_to_global_resize_failure (new);
135 }
136 else if (required_new_size > ns->_ns_global_scope_alloc)
137 {
138 if (__builtin_mul_overflow (required_new_size, 2, &new_size))
139 add_to_global_resize_failure (new);
140
141 /* The old array was allocated with our malloc, not the minimal
142 malloc. */
143 old_global = ns->_ns_main_searchlist->r_list;
144 }
145
146 if (new_size > 0)
147 {
148 size_t allocation_size;
149 if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
150 &allocation_size))
151 add_to_global_resize_failure (new);
152 struct link_map **new_global = malloc (size: allocation_size);
153 if (new_global == NULL)
154 add_to_global_resize_failure (new);
155
156 /* Copy over the old entries. */
157 memcpy (new_global, ns->_ns_main_searchlist->r_list,
158 ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
159
160 ns->_ns_global_scope_alloc = new_size;
161 ns->_ns_main_searchlist->r_list = new_global;
162
163 if (!RTLD_SINGLE_THREAD_P)
164 THREAD_GSCOPE_WAIT ();
165
166 free (ptr: old_global);
167 }
168}
169
170/* Actually add the new global objects to the global scope. Must be
171 called after add_to_global_resize. This function cannot fail. */
172static void
173add_to_global_update (struct link_map *new)
174{
175 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
176
177 /* Now add the new entries. */
178 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
179 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
180 {
181 struct link_map *map = new->l_searchlist.r_list[cnt];
182
183 if (map->l_global == 0)
184 {
185 map->l_global = 1;
186
187 /* The array has been resized by add_to_global_resize. */
188 assert (new_nlist < ns->_ns_global_scope_alloc);
189
190 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
191
192 /* We modify the global scope. Report this. */
193 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
194 _dl_debug_printf (fmt: "\nadd %s [%lu] to global scope\n",
195 map->l_name, map->l_ns);
196 }
197 }
198
199 /* Some of the pending adds have been performed by the loop above.
200 Adjust the counter accordingly. */
201 unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
202 assert (added <= ns->_ns_global_scope_pending_adds);
203 ns->_ns_global_scope_pending_adds -= added;
204
205 atomic_write_barrier ();
206 ns->_ns_main_searchlist->r_nlist = new_nlist;
207}
208
209/* Search link maps in all namespaces for the DSO that contains the object at
210 address ADDR. Returns the pointer to the link map of the matching DSO, or
211 NULL if a match is not found. */
212struct link_map *
213_dl_find_dso_for_object (const ElfW(Addr) addr)
214{
215 struct link_map *l;
216
217 /* Find the highest-addressed object that ADDR is not below. */
218 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
219 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
220 if (addr >= l->l_map_start && addr < l->l_map_end
221 && (l->l_contiguous
222 || _dl_addr_inside_object (l, addr: (ElfW(Addr)) addr)))
223 {
224 assert (ns == l->l_ns);
225 return l;
226 }
227 return NULL;
228}
229rtld_hidden_def (_dl_find_dso_for_object);
230
231/* Return true if NEW is found in the scope for MAP. */
232static size_t
233scope_has_map (struct link_map *map, struct link_map *new)
234{
235 size_t cnt;
236 for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
237 if (map->l_scope[cnt] == &new->l_searchlist)
238 return true;
239 return false;
240}
241
242/* Return the length of the scope for MAP. */
243static size_t
244scope_size (struct link_map *map)
245{
246 size_t cnt;
247 for (cnt = 0; map->l_scope[cnt] != NULL; )
248 ++cnt;
249 return cnt;
250}
251
252/* Resize the scopes of depended-upon objects, so that the new object
253 can be added later without further allocation of memory. This
254 function can raise an exceptions due to malloc failure. */
255static void
256resize_scopes (struct link_map *new)
257{
258 /* If the file is not loaded now as a dependency, add the search
259 list of the newly loaded object to the scope. */
260 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
261 {
262 struct link_map *imap = new->l_searchlist.r_list[i];
263
264 /* If the initializer has been called already, the object has
265 not been loaded here and now. */
266 if (imap->l_init_called && imap->l_type == lt_loaded)
267 {
268 if (scope_has_map (map: imap, new))
269 /* Avoid duplicates. */
270 continue;
271
272 size_t cnt = scope_size (map: imap);
273 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
274 {
275 /* The l_scope array is too small. Allocate a new one
276 dynamically. */
277 size_t new_size;
278 struct r_scope_elem **newp;
279
280 if (imap->l_scope != imap->l_scope_mem
281 && imap->l_scope_max < array_length (imap->l_scope_mem))
282 {
283 /* If the current l_scope memory is not pointing to
284 the static memory in the structure, but the
285 static memory in the structure is large enough to
286 use for cnt + 1 scope entries, then switch to
287 using the static memory. */
288 new_size = array_length (imap->l_scope_mem);
289 newp = imap->l_scope_mem;
290 }
291 else
292 {
293 new_size = imap->l_scope_max * 2;
294 newp = (struct r_scope_elem **)
295 malloc (size: new_size * sizeof (struct r_scope_elem *));
296 if (newp == NULL)
297 _dl_signal_error (ENOMEM, object: "dlopen", NULL,
298 N_("cannot create scope list"));
299 }
300
301 /* Copy the array and the terminating NULL. */
302 memcpy (newp, imap->l_scope,
303 (cnt + 1) * sizeof (imap->l_scope[0]));
304 struct r_scope_elem **old = imap->l_scope;
305
306 imap->l_scope = newp;
307
308 if (old != imap->l_scope_mem)
309 _dl_scope_free (old);
310
311 imap->l_scope_max = new_size;
312 }
313 }
314 }
315}
316
317/* Second stage of resize_scopes: Add NEW to the scopes. Also print
318 debugging information about scopes if requested.
319
320 This function cannot raise an exception because all required memory
321 has been allocated by a previous call to resize_scopes. */
322static void
323update_scopes (struct link_map *new)
324{
325 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
326 {
327 struct link_map *imap = new->l_searchlist.r_list[i];
328 int from_scope = 0;
329
330 if (imap->l_init_called && imap->l_type == lt_loaded)
331 {
332 if (scope_has_map (map: imap, new))
333 /* Avoid duplicates. */
334 continue;
335
336 size_t cnt = scope_size (map: imap);
337 /* Assert that resize_scopes has sufficiently enlarged the
338 array. */
339 assert (cnt + 1 < imap->l_scope_max);
340
341 /* First terminate the extended list. Otherwise a thread
342 might use the new last element and then use the garbage
343 at offset IDX+1. */
344 imap->l_scope[cnt + 1] = NULL;
345 atomic_write_barrier ();
346 imap->l_scope[cnt] = &new->l_searchlist;
347
348 from_scope = cnt;
349 }
350
351 /* Print scope information. */
352 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
353 _dl_show_scope (new: imap, from: from_scope);
354 }
355}
356
357/* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
358 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
359 exception. The return value is true if any of the new objects use
360 TLS. */
361static bool
362resize_tls_slotinfo (struct link_map *new)
363{
364 bool any_tls = false;
365 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
366 if (_dl_add_to_slotinfo (l: new->l_searchlist.r_list[i], false))
367 any_tls = true;
368 return any_tls;
369}
370
371/* Second stage of TLS update, after resize_tls_slotinfo. This
372 function does not raise any exception. It should only be called if
373 resize_tls_slotinfo returned true. */
374static void
375update_tls_slotinfo (struct link_map *new)
376{
377 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
378 _dl_add_to_slotinfo (l: new->l_searchlist.r_list[i], true);
379
380 size_t newgen = GL(dl_tls_generation) + 1;
381 if (__glibc_unlikely (newgen == 0))
382 _dl_fatal_printf (N_("\
383TLS generation counter wrapped! Please report this."));
384 /* Can be read concurrently. */
385 atomic_store_release (&GL(dl_tls_generation), newgen);
386
387 /* We need a second pass for static tls data, because
388 _dl_update_slotinfo must not be run while calls to
389 _dl_add_to_slotinfo are still pending. */
390 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
391 {
392 struct link_map *imap = new->l_searchlist.r_list[i];
393
394 if (imap->l_need_tls_init && imap->l_tls_blocksize > 0)
395 {
396 /* For static TLS we have to allocate the memory here and
397 now, but we can delay updating the DTV. */
398 imap->l_need_tls_init = 0;
399#ifdef SHARED
400 /* Update the slot information data for the current
401 generation. */
402
403 /* FIXME: This can terminate the process on memory
404 allocation failure. It is not possible to raise
405 exceptions from this context; to fix this bug,
406 _dl_update_slotinfo would have to be split into two
407 operations, similar to resize_scopes and update_scopes
408 above. This is related to bug 16134. */
409 _dl_update_slotinfo (imap->l_tls_modid, newgen);
410#endif
411
412 dl_init_static_tls (map: imap);
413 assert (imap->l_need_tls_init == 0);
414 }
415 }
416}
417
418/* Mark the objects as NODELETE if required. This is delayed until
419 after dlopen failure is not possible, so that _dl_close can clean
420 up objects if necessary. */
421static void
422activate_nodelete (struct link_map *new)
423{
424 /* It is necessary to traverse the entire namespace. References to
425 objects in the global scope and unique symbol bindings can force
426 NODELETE status for objects outside the local scope. */
427 for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
428 l = l->l_next)
429 if (l->l_nodelete_pending)
430 {
431 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
432 _dl_debug_printf (fmt: "activating NODELETE for %s [%lu]\n",
433 l->l_name, l->l_ns);
434
435 /* The flag can already be true at this point, e.g. a signal
436 handler may have triggered lazy binding and set NODELETE
437 status immediately. */
438 l->l_nodelete_active = true;
439
440 /* This is just a debugging aid, to indicate that
441 activate_nodelete has run for this map. */
442 l->l_nodelete_pending = false;
443 }
444}
445
446/* Relocate the object L. *RELOCATION_IN_PROGRESS controls whether
447 the debugger is notified of the start of relocation processing. */
448static void
449_dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
450 struct link_map *l, int reloc_mode,
451 bool *relocation_in_progress)
452{
453 if (l->l_real->l_relocated)
454 return;
455
456 if (!*relocation_in_progress)
457 {
458 /* Notify the debugger that relocations are about to happen. */
459 LIBC_PROBE (reloc_start, 2, args->nsid, r);
460 *relocation_in_progress = true;
461 }
462
463#ifdef SHARED
464 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
465 {
466 /* If this here is the shared object which we want to profile
467 make sure the profile is started. We can find out whether
468 this is necessary or not by observing the `_dl_profile_map'
469 variable. If it was NULL but is not NULL afterwards we must
470 start the profiling. */
471 struct link_map *old_profile_map = GL(dl_profile_map);
472
473 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
474
475 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
476 {
477 /* We must prepare the profiling. */
478 _dl_start_profile ();
479
480 /* Prevent unloading the object. */
481 GL(dl_profile_map)->l_nodelete_active = true;
482 }
483 }
484 else
485#endif
486 _dl_relocate_object (map: l, scope: l->l_scope, reloc_mode, consider_profiling: 0);
487}
488
489
490/* struct dl_init_args and call_dl_init are used to call _dl_init with
491 exception handling disabled. */
492struct dl_init_args
493{
494 struct link_map *new;
495 int argc;
496 char **argv;
497 char **env;
498};
499
500static void
501call_dl_init (void *closure)
502{
503 struct dl_init_args *args = closure;
504 _dl_init (main_map: args->new, argc: args->argc, argv: args->argv, env: args->env);
505}
506
507static void
508dl_open_worker_begin (void *a)
509{
510 struct dl_open_args *args = a;
511 const char *file = args->file;
512 int mode = args->mode;
513 struct link_map *call_map = NULL;
514
515 /* Determine the caller's map if necessary. This is needed in case
516 we have a DST, when we don't know the namespace ID we have to put
517 the new object in, or when the file name has no path in which
518 case we need to look along the RUNPATH/RPATH of the caller. */
519 const char *dst = strchr (file, '$');
520 if (dst != NULL || args->nsid == __LM_ID_CALLER
521 || strchr (file, '/') == NULL)
522 {
523 const void *caller_dlopen = args->caller_dlopen;
524
525 /* We have to find out from which object the caller is calling.
526 By default we assume this is the main application. */
527 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
528
529 struct link_map *l = _dl_find_dso_for_object (addr: (ElfW(Addr)) caller_dlopen);
530
531 if (l)
532 call_map = l;
533
534 if (args->nsid == __LM_ID_CALLER)
535 args->nsid = call_map->l_ns;
536 }
537
538 /* The namespace ID is now known. Keep track of whether libc.so was
539 already loaded, to determine whether it is necessary to call the
540 early initialization routine (or clear libc_map on error). */
541 args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL;
542
543 /* Retain the old value, so that it can be restored. */
544 args->original_global_scope_pending_adds
545 = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
546
547 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
548 may not be true if this is a recursive call to dlopen. */
549 _dl_debug_initialize (ldbase: 0, ns: args->nsid);
550
551 /* Load the named object. */
552 struct link_map *new;
553 args->map = new = _dl_map_object (loader: call_map, name: file, type: lt_loaded, trace_mode: 0,
554 mode: mode | __RTLD_CALLMAP, nsid: args->nsid);
555
556 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
557 set and the object is not already loaded. */
558 if (new == NULL)
559 {
560 assert (mode & RTLD_NOLOAD);
561 return;
562 }
563
564 if (__glibc_unlikely (mode & __RTLD_SPROF))
565 /* This happens only if we load a DSO for 'sprof'. */
566 return;
567
568 /* This object is directly loaded. */
569 ++new->l_direct_opencount;
570
571 /* It was already open. */
572 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
573 {
574 /* Let the user know about the opencount. */
575 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
576 _dl_debug_printf (fmt: "opening file=%s [%lu]; direct_opencount=%u\n\n",
577 new->l_name, new->l_ns, new->l_direct_opencount);
578
579 /* If the user requested the object to be in the global
580 namespace but it is not so far, prepare to add it now. This
581 can raise an exception to do a malloc failure. */
582 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
583 add_to_global_resize (new);
584
585 /* Mark the object as not deletable if the RTLD_NODELETE flags
586 was passed. */
587 if (__glibc_unlikely (mode & RTLD_NODELETE))
588 {
589 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
590 && !new->l_nodelete_active)
591 _dl_debug_printf (fmt: "marking %s [%lu] as NODELETE\n",
592 new->l_name, new->l_ns);
593 new->l_nodelete_active = true;
594 }
595
596 /* Finalize the addition to the global scope. */
597 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
598 add_to_global_update (new);
599
600 const int r_state __attribute__ ((unused))
601 = _dl_debug_update (ns: args->nsid)->r_state;
602 assert (r_state == RT_CONSISTENT);
603
604 return;
605 }
606
607 /* Schedule NODELETE marking for the directly loaded object if
608 requested. */
609 if (__glibc_unlikely (mode & RTLD_NODELETE))
610 new->l_nodelete_pending = true;
611
612 /* Load that object's dependencies. */
613 _dl_map_object_deps (map: new, NULL, npreloads: 0, trace_mode: 0,
614 open_mode: mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
615
616 /* So far, so good. Now check the versions. */
617 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
618 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
619 {
620 struct link_map *map = new->l_searchlist.r_list[i]->l_real;
621 _dl_check_map_versions (map, verbose: 0, trace_mode: 0);
622#ifndef SHARED
623 /* During static dlopen, check if ld.so has been loaded.
624 Perform partial initialization in this case. This must
625 come after the symbol versioning initialization in
626 _dl_check_map_versions. */
627 if (map->l_info[DT_SONAME] != NULL
628 && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
629 + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
630 __rtld_static_init (map);
631#endif
632 }
633
634#ifdef SHARED
635 /* Auditing checkpoint: we have added all objects. */
636 _dl_audit_activity_nsid (new->l_ns, LA_ACT_CONSISTENT);
637#endif
638
639 /* Notify the debugger all new objects are now ready to go. */
640 struct r_debug *r = _dl_debug_update (ns: args->nsid);
641 r->r_state = RT_CONSISTENT;
642 _dl_debug_state ();
643 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
644
645 _dl_open_check (m: new);
646
647 /* Print scope information. */
648 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
649 _dl_show_scope (new, from: 0);
650
651 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
652 int reloc_mode = mode & __RTLD_AUDIT;
653 if (GLRO(dl_lazy))
654 reloc_mode |= mode & RTLD_LAZY;
655
656 /* Objects must be sorted by dependency for the relocation process.
657 This allows IFUNC relocations to work and it also means copy
658 relocation of dependencies are if necessary overwritten.
659 __dl_map_object_deps has already sorted l_initfini for us. */
660 unsigned int first = UINT_MAX;
661 unsigned int last = 0;
662 unsigned int j = 0;
663 struct link_map *l = new->l_initfini[0];
664 do
665 {
666 if (! l->l_real->l_relocated)
667 {
668 if (first == UINT_MAX)
669 first = j;
670 last = j + 1;
671 }
672 l = new->l_initfini[++j];
673 }
674 while (l != NULL);
675
676 bool relocation_in_progress = false;
677
678 /* Perform relocation. This can trigger lazy binding in IFUNC
679 resolvers. For NODELETE mappings, these dependencies are not
680 recorded because the flag has not been applied to the newly
681 loaded objects. This means that upon dlopen failure, these
682 NODELETE objects can be unloaded despite existing references to
683 them. However, such relocation dependencies in IFUNC resolvers
684 are undefined anyway, so this is not a problem. */
685
686 /* Ensure that libc is relocated first. This helps with the
687 execution of IFUNC resolvers in libc, and matters only to newly
688 created dlmopen namespaces. Do not do this for static dlopen
689 because libc has relocations against ld.so, which may not have
690 been relocated at this point. */
691#ifdef SHARED
692 if (GL(dl_ns)[args->nsid].libc_map != NULL)
693 _dl_open_relocate_one_object (args, r, GL(dl_ns)[args->nsid].libc_map,
694 reloc_mode, &relocation_in_progress);
695#endif
696
697 for (unsigned int i = last; i-- > first; )
698 _dl_open_relocate_one_object (args, r, l: new->l_initfini[i], reloc_mode,
699 relocation_in_progress: &relocation_in_progress);
700
701 /* This only performs the memory allocations. The actual update of
702 the scopes happens below, after failure is impossible. */
703 resize_scopes (new);
704
705 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
706 structure. */
707 bool any_tls = resize_tls_slotinfo (new);
708
709 /* Perform the necessary allocations for adding new global objects
710 to the global scope below. */
711 if (mode & RTLD_GLOBAL)
712 add_to_global_resize (new);
713
714 /* Demarcation point: After this, no recoverable errors are allowed.
715 All memory allocations for new objects must have happened
716 before. */
717
718 /* Finalize the NODELETE status first. This comes before
719 update_scopes, so that lazy binding will not see pending NODELETE
720 state for newly loaded objects. There is a compiler barrier in
721 update_scopes which ensures that the changes from
722 activate_nodelete are visible before new objects show up in the
723 local scope. */
724 activate_nodelete (new);
725
726 /* Second stage after resize_scopes: Actually perform the scope
727 update. After this, dlsym and lazy binding can bind to new
728 objects. */
729 update_scopes (new);
730
731 if (!_dl_find_object_update (new_map: new))
732 _dl_signal_error (ENOMEM, object: new->l_libname->name, NULL,
733 N_ ("cannot allocate address lookup data"));
734
735 /* FIXME: It is unclear whether the order here is correct.
736 Shouldn't new objects be made available for binding (and thus
737 execution) only after there TLS data has been set up fully?
738 Fixing bug 16134 will likely make this distinction less
739 important. */
740
741 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
742 structures. */
743 if (any_tls)
744 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
745 on memory allocation failure. See bug 16134. */
746 update_tls_slotinfo (new);
747
748 /* Notify the debugger all new objects have been relocated. */
749 if (relocation_in_progress)
750 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
751
752 /* If libc.so was not there before, attempt to call its early
753 initialization routine. Indicate to the initialization routine
754 whether the libc being initialized is the one in the base
755 namespace. */
756 if (!args->libc_already_loaded)
757 {
758 /* dlopen cannot be used to load an initial libc by design. */
759 struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
760 _dl_call_libc_early_init (libc_map, false);
761 }
762
763 args->worker_continue = true;
764}
765
766static void
767dl_open_worker (void *a)
768{
769 struct dl_open_args *args = a;
770
771 args->worker_continue = false;
772
773 {
774 /* Protects global and module specific TLS state. */
775 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
776
777 struct dl_exception ex;
778 int err = _dl_catch_exception (exception: &ex, operate: dl_open_worker_begin, args);
779
780 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
781
782 if (__glibc_unlikely (ex.errstring != NULL))
783 /* Reraise the error. */
784 _dl_signal_exception (errcode: err, &ex, NULL);
785 }
786
787 if (!args->worker_continue)
788 return;
789
790 int mode = args->mode;
791 struct link_map *new = args->map;
792
793 /* Run the initializer functions of new objects. Temporarily
794 disable the exception handler, so that lazy binding failures are
795 fatal. */
796 {
797 struct dl_init_args init_args =
798 {
799 .new = new,
800 .argc = args->argc,
801 .argv = args->argv,
802 .env = args->env
803 };
804 _dl_catch_exception (NULL, operate: call_dl_init, args: &init_args);
805 }
806
807 /* Now we can make the new map available in the global scope. */
808 if (mode & RTLD_GLOBAL)
809 add_to_global_update (new);
810
811 /* Let the user know about the opencount. */
812 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
813 _dl_debug_printf (fmt: "opening file=%s [%lu]; direct_opencount=%u\n\n",
814 new->l_name, new->l_ns, new->l_direct_opencount);
815}
816
817void *
818_dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
819 int argc, char *argv[], char *env[])
820{
821 if ((mode & RTLD_BINDING_MASK) == 0)
822 /* One of the flags must be set. */
823 _dl_signal_error (EINVAL, object: file, NULL, N_("invalid mode for dlopen()"));
824
825 /* Make sure we are alone. */
826 __rtld_lock_lock_recursive (GL(dl_load_lock));
827
828 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
829 {
830 /* Find a new namespace. */
831 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
832 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
833 break;
834
835 if (__glibc_unlikely (nsid == DL_NNS))
836 {
837 /* No more namespace available. */
838 __rtld_lock_unlock_recursive (GL(dl_load_lock));
839
840 _dl_signal_error (EINVAL, object: file, NULL, N_("\
841no more namespaces available for dlmopen()"));
842 }
843 else if (nsid == GL(dl_nns))
844 {
845 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
846 ++GL(dl_nns);
847 }
848
849 GL(dl_ns)[nsid].libc_map = NULL;
850 _dl_debug_update (ns: nsid)->r_state = RT_CONSISTENT;
851 }
852 /* Never allow loading a DSO in a namespace which is empty. Such
853 direct placements is only causing problems. Also don't allow
854 loading into a namespace used for auditing. */
855 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
856 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
857 /* This prevents the [NSID] index expressions from being
858 evaluated, so the compiler won't think that we are
859 accessing an invalid index here in the !SHARED case where
860 DL_NNS is 1 and so any NSID != 0 is invalid. */
861 || DL_NNS == 1
862 || GL(dl_ns)[nsid]._ns_nloaded == 0
863 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
864 _dl_signal_error (EINVAL, object: file, NULL,
865 N_("invalid target namespace in dlmopen()"));
866
867 struct dl_open_args args;
868 args.file = file;
869 args.mode = mode;
870 args.caller_dlopen = caller_dlopen;
871 args.map = NULL;
872 args.nsid = nsid;
873 /* args.libc_already_loaded is always assigned by dl_open_worker
874 (before any explicit/non-local returns). */
875 args.argc = argc;
876 args.argv = argv;
877 args.env = env;
878
879 struct dl_exception exception;
880 int errcode = _dl_catch_exception (exception: &exception, operate: dl_open_worker, args: &args);
881
882#if defined USE_LDCONFIG && !defined MAP_COPY
883 /* We must unmap the cache file. */
884 _dl_unload_cache ();
885#endif
886
887 /* Do this for both the error and success cases. The old value has
888 only been determined if the namespace ID was assigned (i.e., it
889 is not __LM_ID_CALLER). In the success case, we actually may
890 have consumed more pending adds than planned (because the local
891 scopes overlap in case of a recursive dlopen, the inner dlopen
892 doing some of the globalization work of the outer dlopen), so the
893 old pending adds value is larger than absolutely necessary.
894 Since it is just a conservative upper bound, this is harmless.
895 The top-level dlopen call will restore the field to zero. */
896 if (args.nsid >= 0)
897 GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
898 = args.original_global_scope_pending_adds;
899
900 /* See if an error occurred during loading. */
901 if (__glibc_unlikely (exception.errstring != NULL))
902 {
903 /* Avoid keeping around a dangling reference to the libc.so link
904 map in case it has been cached in libc_map. */
905 if (!args.libc_already_loaded)
906 GL(dl_ns)[args.nsid].libc_map = NULL;
907
908 /* Remove the object from memory. It may be in an inconsistent
909 state if relocation failed, for example. */
910 if (args.map)
911 {
912 _dl_close_worker (map: args.map, true);
913
914 /* All l_nodelete_pending objects should have been deleted
915 at this point, which is why it is not necessary to reset
916 the flag here. */
917 }
918
919 /* Release the lock. */
920 __rtld_lock_unlock_recursive (GL(dl_load_lock));
921
922 /* Reraise the error. */
923 _dl_signal_exception (errcode, &exception, NULL);
924 }
925
926 const int r_state __attribute__ ((unused))
927 = _dl_debug_update (ns: args.nsid)->r_state;
928 assert (r_state == RT_CONSISTENT);
929
930 /* Release the lock. */
931 __rtld_lock_unlock_recursive (GL(dl_load_lock));
932
933 return args.map;
934}
935
936
937void
938_dl_show_scope (struct link_map *l, int from)
939{
940 _dl_debug_printf (fmt: "object=%s [%lu]\n",
941 DSO_FILENAME (l->l_name), l->l_ns);
942 if (l->l_scope != NULL)
943 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
944 {
945 _dl_debug_printf (fmt: " scope %u:", scope_cnt);
946
947 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
948 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
949 _dl_debug_printf_c (fmt: " %s",
950 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
951 else
952 _dl_debug_printf_c (fmt: " %s", RTLD_PROGNAME);
953
954 _dl_debug_printf_c (fmt: "\n");
955 }
956 else
957 _dl_debug_printf (fmt: " no scope\n");
958 _dl_debug_printf (fmt: "\n");
959}
960

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of glibc/elf/dl-open.c