1/* Map in a shared object's segments. Generic version.
2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
3 Copyright The GNU Toolchain Authors.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
19
20#include <dl-load.h>
21
22/* Map a segment and align it properly. */
23
24static __always_inline ElfW(Addr)
25_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
26 const size_t maplength, int fd)
27{
28 if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
29 return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
30 MAP_COPY|MAP_FILE, fd, c->mapoff);
31
32 /* If the segment alignment > the page size, allocate enough space to
33 ensure that the segment can be properly aligned. */
34 ElfW(Addr) maplen = (maplength >= c->mapalign
35 ? (maplength + c->mapalign)
36 : (2 * c->mapalign));
37 ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
38 PROT_NONE,
39 MAP_ANONYMOUS|MAP_PRIVATE,
40 -1, 0);
41 if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
42 return map_start;
43
44 ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
45 map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
46 maplength, c->prot,
47 MAP_COPY|MAP_FILE|MAP_FIXED,
48 fd, c->mapoff);
49 if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED))
50 __munmap ((void *) map_start, maplen);
51 else
52 {
53 /* Unmap the unused regions. */
54 ElfW(Addr) delta = map_start_aligned - map_start;
55 if (delta)
56 __munmap ((void *) map_start, delta);
57 ElfW(Addr) map_end = map_start_aligned + maplength;
58 map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
59 delta = map_start + maplen - map_end;
60 if (delta)
61 __munmap ((void *) map_end, delta);
62 }
63
64 return map_start_aligned;
65}
66
67/* This implementation assumes (as does the corresponding implementation
68 of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
69 are always laid out with all segments contiguous (or with gaps
70 between them small enough that it's preferable to reserve all whole
71 pages inside the gaps with PROT_NONE mappings rather than permitting
72 other use of those parts of the address space). */
73
74static __always_inline const char *
75_dl_map_segments (struct link_map *l, int fd,
76 const ElfW(Ehdr) *header, int type,
77 const struct loadcmd loadcmds[], size_t nloadcmds,
78 const size_t maplength, bool has_holes,
79 struct link_map *loader)
80{
81 const struct loadcmd *c = loadcmds;
82
83 if (__glibc_likely (type == ET_DYN))
84 {
85 /* This is a position-independent shared object. We can let the
86 kernel map it anywhere it likes, but we must have space for all
87 the segments in their specified positions relative to the first.
88 So we map the first segment without MAP_FIXED, but with its
89 extent increased to cover all the segments. Then we remove
90 access from excess portion, and there is known sufficient space
91 there to remap from the later segments.
92
93 As a refinement, sometimes we have an address that we would
94 prefer to map such objects at; but this is only a preference,
95 the OS can do whatever it likes. */
96 ElfW(Addr) mappref
97 = (ELF_PREFERRED_ADDRESS (loader, maplength,
98 c->mapstart & GLRO(dl_use_load_bias))
99 - MAP_BASE_ADDR (l));
100
101 /* Remember which part of the address space this object uses. */
102 l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
103 if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
104 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
105
106 l->l_map_end = l->l_map_start + maplength;
107 l->l_addr = l->l_map_start - c->mapstart;
108
109 if (has_holes)
110 {
111 /* Change protection on the excess portion to disallow all access;
112 the portions we do not remap later will be inaccessible as if
113 unallocated. Then jump into the normal segment-mapping loop to
114 handle the portion of the segment past the end of the file
115 mapping. */
116 if (__glibc_unlikely (loadcmds[nloadcmds - 1].mapstart <
117 c->mapend))
118 return N_("ELF load command address/offset not page-aligned");
119 if (__glibc_unlikely
120 (__mprotect ((caddr_t) (l->l_addr + c->mapend),
121 loadcmds[nloadcmds - 1].mapstart - c->mapend,
122 PROT_NONE) < 0))
123 return DL_MAP_SEGMENTS_ERROR_MPROTECT;
124 }
125
126 l->l_contiguous = 1;
127
128 goto postmap;
129 }
130
131 /* Remember which part of the address space this object uses. */
132 l->l_map_start = c->mapstart + l->l_addr;
133 l->l_map_end = l->l_map_start + maplength;
134 l->l_contiguous = !has_holes;
135
136 while (c < &loadcmds[nloadcmds])
137 {
138 if (c->mapend > c->mapstart
139 /* Map the segment contents from the file. */
140 && (__mmap ((void *) (l->l_addr + c->mapstart),
141 c->mapend - c->mapstart, c->prot,
142 MAP_FIXED|MAP_COPY|MAP_FILE,
143 fd, c->mapoff)
144 == MAP_FAILED))
145 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
146
147 postmap:
148 _dl_postprocess_loadcmd (l, header, c);
149
150 if (c->allocend > c->dataend)
151 {
152 /* Extra zero pages should appear at the end of this segment,
153 after the data mapped from the file. */
154 ElfW(Addr) zero, zeroend, zeropage;
155
156 zero = l->l_addr + c->dataend;
157 zeroend = l->l_addr + c->allocend;
158 zeropage = ((zero + GLRO(dl_pagesize) - 1)
159 & ~(GLRO(dl_pagesize) - 1));
160
161 if (zeroend < zeropage)
162 /* All the extra data is in the last page of the segment.
163 We can just zero it. */
164 zeropage = zeroend;
165
166 if (zeropage > zero)
167 {
168 /* Zero the final part of the last page of the segment. */
169 if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
170 {
171 /* Dag nab it. */
172 if (__mprotect ((caddr_t) (zero
173 & ~(GLRO(dl_pagesize) - 1)),
174 GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
175 return DL_MAP_SEGMENTS_ERROR_MPROTECT;
176 }
177 memset ((void *) zero, '\0', zeropage - zero);
178 if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
179 __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
180 GLRO(dl_pagesize), c->prot);
181 }
182
183 if (zeroend > zeropage)
184 {
185 /* Map the remaining zero pages in from the zero fill FD. */
186 caddr_t mapat;
187 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
188 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
189 -1, 0);
190 if (__glibc_unlikely (mapat == MAP_FAILED))
191 return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
192 }
193 }
194
195 ++c;
196 }
197
198 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
199 fixed. */
200 ELF_FIXED_ADDRESS (loader, c->mapstart);
201
202 return NULL;
203}
204

source code of glibc/elf/dl-map-segments.h