1 | //===- Relocations.cpp ----------------------------------------------------===// |
---|---|
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains platform-independent functions to process relocations. |
10 | // I'll describe the overview of this file here. |
11 | // |
12 | // Simple relocations are easy to handle for the linker. For example, |
13 | // for R_X86_64_PC64 relocs, the linker just has to fix up locations |
14 | // with the relative offsets to the target symbols. It would just be |
15 | // reading records from relocation sections and applying them to output. |
16 | // |
17 | // But not all relocations are that easy to handle. For example, for |
18 | // R_386_GOTOFF relocs, the linker has to create new GOT entries for |
19 | // symbols if they don't exist, and fix up locations with GOT entry |
20 | // offsets from the beginning of GOT section. So there is more than |
21 | // fixing addresses in relocation processing. |
22 | // |
23 | // ELF defines a large number of complex relocations. |
24 | // |
25 | // The functions in this file analyze relocations and do whatever needs |
26 | // to be done. It includes, but not limited to, the following. |
27 | // |
28 | // - create GOT/PLT entries |
29 | // - create new relocations in .dynsym to let the dynamic linker resolve |
30 | // them at runtime (since ELF supports dynamic linking, not all |
31 | // relocations can be resolved at link-time) |
32 | // - create COPY relocs and reserve space in .bss |
33 | // - replace expensive relocs (in terms of runtime cost) with cheap ones |
34 | // - error out infeasible combinations such as PIC and non-relative relocs |
35 | // |
36 | // Note that the functions in this file don't actually apply relocations |
37 | // because it doesn't know about the output file nor the output file buffer. |
38 | // It instead stores Relocation objects to InputSection's Relocations |
39 | // vector to let it apply later in InputSection::writeTo. |
40 | // |
41 | //===----------------------------------------------------------------------===// |
42 | |
43 | #include "Relocations.h" |
44 | #include "Config.h" |
45 | #include "InputFiles.h" |
46 | #include "LinkerScript.h" |
47 | #include "OutputSections.h" |
48 | #include "SymbolTable.h" |
49 | #include "Symbols.h" |
50 | #include "SyntheticSections.h" |
51 | #include "Target.h" |
52 | #include "Thunks.h" |
53 | #include "lld/Common/ErrorHandler.h" |
54 | #include "lld/Common/Memory.h" |
55 | #include "llvm/ADT/SmallSet.h" |
56 | #include "llvm/BinaryFormat/ELF.h" |
57 | #include "llvm/Demangle/Demangle.h" |
58 | #include <algorithm> |
59 | |
60 | using namespace llvm; |
61 | using namespace llvm::ELF; |
62 | using namespace llvm::object; |
63 | using namespace llvm::support::endian; |
64 | using namespace lld; |
65 | using namespace lld::elf; |
66 | |
67 | static void printDefinedLocation(ELFSyncStream &s, const Symbol &sym) { |
68 | s << "\n>>> defined in "<< sym.file; |
69 | } |
70 | |
71 | // Construct a message in the following format. |
72 | // |
73 | // >>> defined in /home/alice/src/foo.o |
74 | // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12) |
75 | // >>> /home/alice/src/bar.o:(.text+0x1) |
76 | static void printLocation(ELFSyncStream &s, InputSectionBase &sec, |
77 | const Symbol &sym, uint64_t off) { |
78 | printDefinedLocation(s, sym); |
79 | s << "\n>>> referenced by "; |
80 | auto tell = s.tell(); |
81 | s << sec.getSrcMsg(sym, offset: off); |
82 | if (tell != s.tell()) |
83 | s << "\n>>> "; |
84 | s << sec.getObjMsg(offset: off); |
85 | } |
86 | |
87 | void elf::reportRangeError(Ctx &ctx, uint8_t *loc, const Relocation &rel, |
88 | const Twine &v, int64_t min, uint64_t max) { |
89 | ErrorPlace errPlace = getErrorPlace(ctx, loc); |
90 | auto diag = Err(ctx); |
91 | diag << errPlace.loc << "relocation "<< rel.type |
92 | << " out of range: "<< v.str() << " is not in ["<< min << ", "<< max |
93 | << ']'; |
94 | |
95 | if (rel.sym) { |
96 | if (!rel.sym->isSection()) |
97 | diag << "; references '"<< rel.sym << '\''; |
98 | else if (auto *d = dyn_cast<Defined>(Val: rel.sym)) |
99 | diag << "; references section '"<< d->section->name << "'"; |
100 | |
101 | if (ctx.arg.emachine == EM_X86_64 && rel.type == R_X86_64_PC32 && |
102 | rel.sym->getOutputSection() && |
103 | (rel.sym->getOutputSection()->flags & SHF_X86_64_LARGE)) { |
104 | diag << "; R_X86_64_PC32 should not reference a section marked " |
105 | "SHF_X86_64_LARGE"; |
106 | } |
107 | } |
108 | if (!errPlace.srcLoc.empty()) |
109 | diag << "\n>>> referenced by "<< errPlace.srcLoc; |
110 | if (rel.sym && !rel.sym->isSection()) |
111 | printDefinedLocation(s&: diag, sym: *rel.sym); |
112 | |
113 | if (errPlace.isec && errPlace.isec->name.starts_with(Prefix: ".debug")) |
114 | diag << "; consider recompiling with -fdebug-types-section to reduce size " |
115 | "of debug sections"; |
116 | } |
117 | |
118 | void elf::reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n, |
119 | const Symbol &sym, const Twine &msg) { |
120 | auto diag = Err(ctx); |
121 | diag << getErrorPlace(ctx, loc).loc << msg << " is out of range: "<< v |
122 | << " is not in ["<< llvm::minIntN(N: n) << ", "<< llvm::maxIntN(N: n) << "]"; |
123 | if (!sym.getName().empty()) { |
124 | diag << "; references '"<< &sym << '\''; |
125 | printDefinedLocation(s&: diag, sym); |
126 | } |
127 | } |
128 | |
129 | // Build a bitmask with one bit set for each 64 subset of RelExpr. |
130 | static constexpr uint64_t buildMask() { return 0; } |
131 | |
132 | template <typename... Tails> |
133 | static constexpr uint64_t buildMask(int head, Tails... tails) { |
134 | return (0 <= head && head < 64 ? uint64_t(1) << head : 0) | |
135 | buildMask(tails...); |
136 | } |
137 | |
138 | // Return true if `Expr` is one of `Exprs`. |
139 | // There are more than 64 but less than 128 RelExprs, so we divide the set of |
140 | // exprs into [0, 64) and [64, 128) and represent each range as a constant |
141 | // 64-bit mask. Then we decide which mask to test depending on the value of |
142 | // expr and use a simple shift and bitwise-and to test for membership. |
143 | template <RelExpr... Exprs> static bool oneof(RelExpr expr) { |
144 | assert(0 <= expr && (int)expr < 128 && |
145 | "RelExpr is too large for 128-bit mask!"); |
146 | |
147 | if (expr >= 64) |
148 | return (uint64_t(1) << (expr - 64)) & buildMask((Exprs - 64)...); |
149 | return (uint64_t(1) << expr) & buildMask(Exprs...); |
150 | } |
151 | |
152 | static RelType getMipsPairType(RelType type, bool isLocal) { |
153 | switch (type) { |
154 | case R_MIPS_HI16: |
155 | return R_MIPS_LO16; |
156 | case R_MIPS_GOT16: |
157 | // In case of global symbol, the R_MIPS_GOT16 relocation does not |
158 | // have a pair. Each global symbol has a unique entry in the GOT |
159 | // and a corresponding instruction with help of the R_MIPS_GOT16 |
160 | // relocation loads an address of the symbol. In case of local |
161 | // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold |
162 | // the high 16 bits of the symbol's value. A paired R_MIPS_LO16 |
163 | // relocations handle low 16 bits of the address. That allows |
164 | // to allocate only one GOT entry for every 64 KiB of local data. |
165 | return isLocal ? R_MIPS_LO16 : R_MIPS_NONE; |
166 | case R_MICROMIPS_GOT16: |
167 | return isLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE; |
168 | case R_MIPS_PCHI16: |
169 | return R_MIPS_PCLO16; |
170 | case R_MICROMIPS_HI16: |
171 | return R_MICROMIPS_LO16; |
172 | default: |
173 | return R_MIPS_NONE; |
174 | } |
175 | } |
176 | |
177 | // True if non-preemptable symbol always has the same value regardless of where |
178 | // the DSO is loaded. |
179 | static bool isAbsolute(const Symbol &sym) { |
180 | if (sym.isUndefined()) |
181 | return true; |
182 | if (const auto *dr = dyn_cast<Defined>(Val: &sym)) |
183 | return dr->section == nullptr; // Absolute symbol. |
184 | return false; |
185 | } |
186 | |
187 | static bool isAbsoluteValue(const Symbol &sym) { |
188 | return isAbsolute(sym) || sym.isTls(); |
189 | } |
190 | |
191 | // Returns true if Expr refers a PLT entry. |
192 | static bool needsPlt(RelExpr expr) { |
193 | return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, |
194 | R_GOTPLT_PC, RE_LOONGARCH_PLT_PAGE_PC, RE_PPC32_PLTREL, |
195 | RE_PPC64_CALL_PLT>(expr); |
196 | } |
197 | |
198 | bool lld::elf::needsGot(RelExpr expr) { |
199 | return oneof<R_GOT, RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, R_GOT_OFF, |
200 | RE_MIPS_GOT_LOCAL_PAGE, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32, |
201 | RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC, |
202 | RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC, R_GOTPLT, |
203 | RE_AARCH64_GOT_PAGE, RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>( |
204 | expr); |
205 | } |
206 | |
207 | // True if this expression is of the form Sym - X, where X is a position in the |
208 | // file (PC, or GOT for example). |
209 | static bool isRelExpr(RelExpr expr) { |
210 | return oneof<R_PC, R_GOTREL, R_GOTPLTREL, RE_ARM_PCA, RE_MIPS_GOTREL, |
211 | RE_PPC64_CALL, RE_PPC64_RELAX_TOC, RE_AARCH64_PAGE_PC, |
212 | R_RELAX_GOT_PC, RE_RISCV_PC_INDIRECT, RE_PPC64_RELAX_GOT_PC, |
213 | RE_LOONGARCH_PAGE_PC>(expr); |
214 | } |
215 | |
216 | static RelExpr toPlt(RelExpr expr) { |
217 | switch (expr) { |
218 | case RE_LOONGARCH_PAGE_PC: |
219 | return RE_LOONGARCH_PLT_PAGE_PC; |
220 | case RE_PPC64_CALL: |
221 | return RE_PPC64_CALL_PLT; |
222 | case R_PC: |
223 | return R_PLT_PC; |
224 | case R_ABS: |
225 | return R_PLT; |
226 | case R_GOTREL: |
227 | return R_PLT_GOTREL; |
228 | default: |
229 | return expr; |
230 | } |
231 | } |
232 | |
233 | static RelExpr fromPlt(RelExpr expr) { |
234 | // We decided not to use a plt. Optimize a reference to the plt to a |
235 | // reference to the symbol itself. |
236 | switch (expr) { |
237 | case R_PLT_PC: |
238 | case RE_PPC32_PLTREL: |
239 | return R_PC; |
240 | case RE_LOONGARCH_PLT_PAGE_PC: |
241 | return RE_LOONGARCH_PAGE_PC; |
242 | case RE_PPC64_CALL_PLT: |
243 | return RE_PPC64_CALL; |
244 | case R_PLT: |
245 | return R_ABS; |
246 | case R_PLT_GOTPLT: |
247 | return R_GOTPLTREL; |
248 | case R_PLT_GOTREL: |
249 | return R_GOTREL; |
250 | default: |
251 | return expr; |
252 | } |
253 | } |
254 | |
255 | // Returns true if a given shared symbol is in a read-only segment in a DSO. |
256 | template <class ELFT> static bool isReadOnly(SharedSymbol &ss) { |
257 | using Elf_Phdr = typename ELFT::Phdr; |
258 | |
259 | // Determine if the symbol is read-only by scanning the DSO's program headers. |
260 | const auto &file = cast<SharedFile>(Val&: *ss.file); |
261 | for (const Elf_Phdr &phdr : |
262 | check(file.template getObj<ELFT>().program_headers())) |
263 | if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) && |
264 | !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr && |
265 | ss.value < phdr.p_vaddr + phdr.p_memsz) |
266 | return true; |
267 | return false; |
268 | } |
269 | |
270 | // Returns symbols at the same offset as a given symbol, including SS itself. |
271 | // |
272 | // If two or more symbols are at the same offset, and at least one of |
273 | // them are copied by a copy relocation, all of them need to be copied. |
274 | // Otherwise, they would refer to different places at runtime. |
275 | template <class ELFT> |
276 | static SmallSet<SharedSymbol *, 4> getSymbolsAt(Ctx &ctx, SharedSymbol &ss) { |
277 | using Elf_Sym = typename ELFT::Sym; |
278 | |
279 | const auto &file = cast<SharedFile>(Val&: *ss.file); |
280 | |
281 | SmallSet<SharedSymbol *, 4> ret; |
282 | for (const Elf_Sym &s : file.template getGlobalELFSyms<ELFT>()) { |
283 | if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS || |
284 | s.getType() == STT_TLS || s.st_value != ss.value) |
285 | continue; |
286 | StringRef name = check(s.getName(file.getStringTable())); |
287 | Symbol *sym = ctx.symtab->find(name); |
288 | if (auto *alias = dyn_cast_or_null<SharedSymbol>(Val: sym)) |
289 | ret.insert(Ptr: alias); |
290 | } |
291 | |
292 | // The loop does not check SHT_GNU_verneed, so ret does not contain |
293 | // non-default version symbols. If ss has a non-default version, ret won't |
294 | // contain ss. Just add ss unconditionally. If a non-default version alias is |
295 | // separately copy relocated, it and ss will have different addresses. |
296 | // Fortunately this case is impractical and fails with GNU ld as well. |
297 | ret.insert(Ptr: &ss); |
298 | return ret; |
299 | } |
300 | |
301 | // When a symbol is copy relocated or we create a canonical plt entry, it is |
302 | // effectively a defined symbol. In the case of copy relocation the symbol is |
303 | // in .bss and in the case of a canonical plt entry it is in .plt. This function |
304 | // replaces the existing symbol with a Defined pointing to the appropriate |
305 | // location. |
306 | static void replaceWithDefined(Ctx &ctx, Symbol &sym, SectionBase &sec, |
307 | uint64_t value, uint64_t size) { |
308 | Symbol old = sym; |
309 | Defined(ctx, sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value, |
310 | size, &sec) |
311 | .overwrite(sym); |
312 | |
313 | sym.versionId = old.versionId; |
314 | sym.isUsedInRegularObj = true; |
315 | // A copy relocated alias may need a GOT entry. |
316 | sym.flags.store(i: old.flags.load(m: std::memory_order_relaxed) & NEEDS_GOT, |
317 | m: std::memory_order_relaxed); |
318 | } |
319 | |
320 | // Reserve space in .bss or .bss.rel.ro for copy relocation. |
321 | // |
322 | // The copy relocation is pretty much a hack. If you use a copy relocation |
323 | // in your program, not only the symbol name but the symbol's size, RW/RO |
324 | // bit and alignment become part of the ABI. In addition to that, if the |
325 | // symbol has aliases, the aliases become part of the ABI. That's subtle, |
326 | // but if you violate that implicit ABI, that can cause very counter- |
327 | // intuitive consequences. |
328 | // |
329 | // So, what is the copy relocation? It's for linking non-position |
330 | // independent code to DSOs. In an ideal world, all references to data |
331 | // exported by DSOs should go indirectly through GOT. But if object files |
332 | // are compiled as non-PIC, all data references are direct. There is no |
333 | // way for the linker to transform the code to use GOT, as machine |
334 | // instructions are already set in stone in object files. This is where |
335 | // the copy relocation takes a role. |
336 | // |
337 | // A copy relocation instructs the dynamic linker to copy data from a DSO |
338 | // to a specified address (which is usually in .bss) at load-time. If the |
339 | // static linker (that's us) finds a direct data reference to a DSO |
340 | // symbol, it creates a copy relocation, so that the symbol can be |
341 | // resolved as if it were in .bss rather than in a DSO. |
342 | // |
343 | // As you can see in this function, we create a copy relocation for the |
344 | // dynamic linker, and the relocation contains not only symbol name but |
345 | // various other information about the symbol. So, such attributes become a |
346 | // part of the ABI. |
347 | // |
348 | // Note for application developers: I can give you a piece of advice if |
349 | // you are writing a shared library. You probably should export only |
350 | // functions from your library. You shouldn't export variables. |
351 | // |
352 | // As an example what can happen when you export variables without knowing |
353 | // the semantics of copy relocations, assume that you have an exported |
354 | // variable of type T. It is an ABI-breaking change to add new members at |
355 | // end of T even though doing that doesn't change the layout of the |
356 | // existing members. That's because the space for the new members are not |
357 | // reserved in .bss unless you recompile the main program. That means they |
358 | // are likely to overlap with other data that happens to be laid out next |
359 | // to the variable in .bss. This kind of issue is sometimes very hard to |
360 | // debug. What's a solution? Instead of exporting a variable V from a DSO, |
361 | // define an accessor getV(). |
362 | template <class ELFT> static void addCopyRelSymbol(Ctx &ctx, SharedSymbol &ss) { |
363 | // Copy relocation against zero-sized symbol doesn't make sense. |
364 | uint64_t symSize = ss.getSize(); |
365 | if (symSize == 0 || ss.alignment == 0) |
366 | Err(ctx) << "cannot create a copy relocation for symbol "<< &ss; |
367 | |
368 | // See if this symbol is in a read-only segment. If so, preserve the symbol's |
369 | // memory protection by reserving space in the .bss.rel.ro section. |
370 | bool isRO = isReadOnly<ELFT>(ss); |
371 | BssSection *sec = make<BssSection>(args&: ctx, args: isRO ? ".bss.rel.ro": ".bss", |
372 | args&: symSize, args&: ss.alignment); |
373 | OutputSection *osec = (isRO ? ctx.in.bssRelRo : ctx.in.bss)->getParent(); |
374 | |
375 | // At this point, sectionBases has been migrated to sections. Append sec to |
376 | // sections. |
377 | if (osec->commands.empty() || |
378 | !isa<InputSectionDescription>(Val: osec->commands.back())) |
379 | osec->commands.push_back(Elt: make<InputSectionDescription>(args: "")); |
380 | auto *isd = cast<InputSectionDescription>(Val: osec->commands.back()); |
381 | isd->sections.push_back(Elt: sec); |
382 | osec->commitSection(isec: sec); |
383 | |
384 | // Look through the DSO's dynamic symbol table for aliases and create a |
385 | // dynamic symbol for each one. This causes the copy relocation to correctly |
386 | // interpose any aliases. |
387 | for (SharedSymbol *sym : getSymbolsAt<ELFT>(ctx, ss)) |
388 | replaceWithDefined(ctx, sym&: *sym, sec&: *sec, value: 0, size: sym->size); |
389 | |
390 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->copyRel, isec&: *sec, offsetInSec: 0, sym&: ss); |
391 | } |
392 | |
393 | // .eh_frame sections are mergeable input sections, so their input |
394 | // offsets are not linearly mapped to output section. For each input |
395 | // offset, we need to find a section piece containing the offset and |
396 | // add the piece's base address to the input offset to compute the |
397 | // output offset. That isn't cheap. |
398 | // |
399 | // This class is to speed up the offset computation. When we process |
400 | // relocations, we access offsets in the monotonically increasing |
401 | // order. So we can optimize for that access pattern. |
402 | // |
403 | // For sections other than .eh_frame, this class doesn't do anything. |
404 | namespace { |
405 | class OffsetGetter { |
406 | public: |
407 | OffsetGetter() = default; |
408 | explicit OffsetGetter(InputSectionBase &sec) { |
409 | if (auto *eh = dyn_cast<EhInputSection>(Val: &sec)) { |
410 | cies = eh->cies; |
411 | fdes = eh->fdes; |
412 | i = cies.begin(); |
413 | j = fdes.begin(); |
414 | } |
415 | } |
416 | |
417 | // Translates offsets in input sections to offsets in output sections. |
418 | // Given offset must increase monotonically. We assume that Piece is |
419 | // sorted by inputOff. |
420 | uint64_t get(Ctx &ctx, uint64_t off) { |
421 | if (cies.empty()) |
422 | return off; |
423 | |
424 | while (j != fdes.end() && j->inputOff <= off) |
425 | ++j; |
426 | auto it = j; |
427 | if (j == fdes.begin() || j[-1].inputOff + j[-1].size <= off) { |
428 | while (i != cies.end() && i->inputOff <= off) |
429 | ++i; |
430 | if (i == cies.begin() || i[-1].inputOff + i[-1].size <= off) { |
431 | Err(ctx) << ".eh_frame: relocation is not in any piece"; |
432 | return 0; |
433 | } |
434 | it = i; |
435 | } |
436 | |
437 | // Offset -1 means that the piece is dead (i.e. garbage collected). |
438 | if (it[-1].outputOff == -1) |
439 | return -1; |
440 | return it[-1].outputOff + (off - it[-1].inputOff); |
441 | } |
442 | |
443 | private: |
444 | ArrayRef<EhSectionPiece> cies, fdes; |
445 | ArrayRef<EhSectionPiece>::iterator i, j; |
446 | }; |
447 | |
448 | // This class encapsulates states needed to scan relocations for one |
449 | // InputSectionBase. |
450 | class RelocationScanner { |
451 | public: |
452 | RelocationScanner(Ctx &ctx) : ctx(ctx) {} |
453 | template <class ELFT> |
454 | void scanSection(InputSectionBase &s, bool isEH = false); |
455 | |
456 | private: |
457 | Ctx &ctx; |
458 | InputSectionBase *sec; |
459 | OffsetGetter getter; |
460 | |
461 | // End of relocations, used by Mips/PPC64. |
462 | const void *end = nullptr; |
463 | |
464 | template <class RelTy> RelType getMipsN32RelType(RelTy *&rel) const; |
465 | template <class ELFT, class RelTy> |
466 | int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const; |
467 | bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym, |
468 | uint64_t relOff) const; |
469 | void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym, |
470 | int64_t addend) const; |
471 | unsigned handleTlsRelocation(RelExpr expr, RelType type, uint64_t offset, |
472 | Symbol &sym, int64_t addend); |
473 | |
474 | template <class ELFT, class RelTy> |
475 | void scanOne(typename Relocs<RelTy>::const_iterator &i); |
476 | template <class ELFT, class RelTy> void scan(Relocs<RelTy> rels); |
477 | }; |
478 | } // namespace |
479 | |
480 | // MIPS has an odd notion of "paired" relocations to calculate addends. |
481 | // For example, if a relocation is of R_MIPS_HI16, there must be a |
482 | // R_MIPS_LO16 relocation after that, and an addend is calculated using |
483 | // the two relocations. |
484 | template <class ELFT, class RelTy> |
485 | int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr, |
486 | bool isLocal) const { |
487 | if (expr == RE_MIPS_GOTREL && isLocal) |
488 | return sec->getFile<ELFT>()->mipsGp0; |
489 | |
490 | // The ABI says that the paired relocation is used only for REL. |
491 | // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf |
492 | // This generalises to relocation types with implicit addends. |
493 | if (RelTy::HasAddend) |
494 | return 0; |
495 | |
496 | RelType type = rel.getType(ctx.arg.isMips64EL); |
497 | RelType pairTy = getMipsPairType(type, isLocal); |
498 | if (pairTy == R_MIPS_NONE) |
499 | return 0; |
500 | |
501 | const uint8_t *buf = sec->content().data(); |
502 | uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL); |
503 | |
504 | // To make things worse, paired relocations might not be contiguous in |
505 | // the relocation table, so we need to do linear search. *sigh* |
506 | for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri) |
507 | if (ri->getType(ctx.arg.isMips64EL) == pairTy && |
508 | ri->getSymbol(ctx.arg.isMips64EL) == symIndex) |
509 | return ctx.target->getImplicitAddend(buf: buf + ri->r_offset, type: pairTy); |
510 | |
511 | Warn(ctx) << "can't find matching "<< pairTy << " relocation for "<< type; |
512 | return 0; |
513 | } |
514 | |
515 | // Custom error message if Sym is defined in a discarded section. |
516 | template <class ELFT> |
517 | static void maybeReportDiscarded(Ctx &ctx, ELFSyncStream &msg, Undefined &sym) { |
518 | auto *file = dyn_cast<ObjFile<ELFT>>(sym.file); |
519 | if (!file || !sym.discardedSecIdx) |
520 | return; |
521 | ArrayRef<typename ELFT::Shdr> objSections = |
522 | file->template getELFShdrs<ELFT>(); |
523 | |
524 | if (sym.type == ELF::STT_SECTION) { |
525 | msg << "relocation refers to a discarded section: "; |
526 | msg << CHECK2( |
527 | file->getObj().getSectionName(objSections[sym.discardedSecIdx]), file); |
528 | } else { |
529 | msg << "relocation refers to a symbol in a discarded section: "<< &sym; |
530 | } |
531 | msg << "\n>>> defined in "<< file; |
532 | |
533 | Elf_Shdr_Impl<ELFT> elfSec = objSections[sym.discardedSecIdx - 1]; |
534 | if (elfSec.sh_type != SHT_GROUP) |
535 | return; |
536 | |
537 | // If the discarded section is a COMDAT. |
538 | StringRef signature = file->getShtGroupSignature(objSections, elfSec); |
539 | if (const InputFile *prevailing = |
540 | ctx.symtab->comdatGroups.lookup(Val: CachedHashStringRef(signature))) { |
541 | msg << "\n>>> section group signature: "<< signature |
542 | << "\n>>> prevailing definition is in "<< prevailing; |
543 | if (sym.nonPrevailing) { |
544 | msg << "\n>>> or the symbol in the prevailing group had STB_WEAK " |
545 | "binding and the symbol in a non-prevailing group had STB_GLOBAL " |
546 | "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding " |
547 | "signature is not supported"; |
548 | } |
549 | } |
550 | } |
551 | |
552 | // Check whether the definition name def is a mangled function name that matches |
553 | // the reference name ref. |
554 | static bool canSuggestExternCForCXX(StringRef ref, StringRef def) { |
555 | llvm::ItaniumPartialDemangler d; |
556 | std::string name = def.str(); |
557 | if (d.partialDemangle(MangledName: name.c_str())) |
558 | return false; |
559 | char *buf = d.getFunctionName(Buf: nullptr, N: nullptr); |
560 | if (!buf) |
561 | return false; |
562 | bool ret = ref == buf; |
563 | free(ptr: buf); |
564 | return ret; |
565 | } |
566 | |
567 | // Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns |
568 | // the suggested symbol, which is either in the symbol table, or in the same |
569 | // file of sym. |
570 | static const Symbol *getAlternativeSpelling(Ctx &ctx, const Undefined &sym, |
571 | std::string &pre_hint, |
572 | std::string &post_hint) { |
573 | DenseMap<StringRef, const Symbol *> map; |
574 | if (sym.file->kind() == InputFile::ObjKind) { |
575 | auto *file = cast<ELFFileBase>(Val: sym.file); |
576 | // If sym is a symbol defined in a discarded section, maybeReportDiscarded() |
577 | // will give an error. Don't suggest an alternative spelling. |
578 | if (sym.discardedSecIdx != 0 && |
579 | file->getSections()[sym.discardedSecIdx] == &InputSection::discarded) |
580 | return nullptr; |
581 | |
582 | // Build a map of local defined symbols. |
583 | for (const Symbol *s : sym.file->getSymbols()) |
584 | if (s->isLocal() && s->isDefined() && !s->getName().empty()) |
585 | map.try_emplace(Key: s->getName(), Args&: s); |
586 | } |
587 | |
588 | auto suggest = [&](StringRef newName) -> const Symbol * { |
589 | // If defined locally. |
590 | if (const Symbol *s = map.lookup(Val: newName)) |
591 | return s; |
592 | |
593 | // If in the symbol table and not undefined. |
594 | if (const Symbol *s = ctx.symtab->find(name: newName)) |
595 | if (!s->isUndefined()) |
596 | return s; |
597 | |
598 | return nullptr; |
599 | }; |
600 | |
601 | // This loop enumerates all strings of Levenshtein distance 1 as typo |
602 | // correction candidates and suggests the one that exists as a non-undefined |
603 | // symbol. |
604 | StringRef name = sym.getName(); |
605 | for (size_t i = 0, e = name.size(); i != e + 1; ++i) { |
606 | // Insert a character before name[i]. |
607 | std::string newName = (name.substr(Start: 0, N: i) + "0"+ name.substr(Start: i)).str(); |
608 | for (char c = '0'; c <= 'z'; ++c) { |
609 | newName[i] = c; |
610 | if (const Symbol *s = suggest(newName)) |
611 | return s; |
612 | } |
613 | if (i == e) |
614 | break; |
615 | |
616 | // Substitute name[i]. |
617 | newName = std::string(name); |
618 | for (char c = '0'; c <= 'z'; ++c) { |
619 | newName[i] = c; |
620 | if (const Symbol *s = suggest(newName)) |
621 | return s; |
622 | } |
623 | |
624 | // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is |
625 | // common. |
626 | if (i + 1 < e) { |
627 | newName[i] = name[i + 1]; |
628 | newName[i + 1] = name[i]; |
629 | if (const Symbol *s = suggest(newName)) |
630 | return s; |
631 | } |
632 | |
633 | // Delete name[i]. |
634 | newName = (name.substr(Start: 0, N: i) + name.substr(Start: i + 1)).str(); |
635 | if (const Symbol *s = suggest(newName)) |
636 | return s; |
637 | } |
638 | |
639 | // Case mismatch, e.g. Foo vs FOO. |
640 | for (auto &it : map) |
641 | if (name.equals_insensitive(RHS: it.first)) |
642 | return it.second; |
643 | for (Symbol *sym : ctx.symtab->getSymbols()) |
644 | if (!sym->isUndefined() && name.equals_insensitive(RHS: sym->getName())) |
645 | return sym; |
646 | |
647 | // The reference may be a mangled name while the definition is not. Suggest a |
648 | // missing extern "C". |
649 | if (name.starts_with(Prefix: "_Z")) { |
650 | std::string buf = name.str(); |
651 | llvm::ItaniumPartialDemangler d; |
652 | if (!d.partialDemangle(MangledName: buf.c_str())) |
653 | if (char *buf = d.getFunctionName(Buf: nullptr, N: nullptr)) { |
654 | const Symbol *s = suggest(buf); |
655 | free(ptr: buf); |
656 | if (s) { |
657 | pre_hint = ": extern \"C\" "; |
658 | return s; |
659 | } |
660 | } |
661 | } else { |
662 | const Symbol *s = nullptr; |
663 | for (auto &it : map) |
664 | if (canSuggestExternCForCXX(ref: name, def: it.first)) { |
665 | s = it.second; |
666 | break; |
667 | } |
668 | if (!s) |
669 | for (Symbol *sym : ctx.symtab->getSymbols()) |
670 | if (canSuggestExternCForCXX(ref: name, def: sym->getName())) { |
671 | s = sym; |
672 | break; |
673 | } |
674 | if (s) { |
675 | pre_hint = " to declare "; |
676 | post_hint = " as extern \"C\"?"; |
677 | return s; |
678 | } |
679 | } |
680 | |
681 | return nullptr; |
682 | } |
683 | |
684 | static void reportUndefinedSymbol(Ctx &ctx, const UndefinedDiag &undef, |
685 | bool correctSpelling) { |
686 | Undefined &sym = *undef.sym; |
687 | ELFSyncStream msg(ctx, DiagLevel::None); |
688 | |
689 | auto visibility = [&]() { |
690 | switch (sym.visibility()) { |
691 | case STV_INTERNAL: |
692 | return "internal "; |
693 | case STV_HIDDEN: |
694 | return "hidden "; |
695 | case STV_PROTECTED: |
696 | return "protected "; |
697 | default: |
698 | return ""; |
699 | } |
700 | }; |
701 | |
702 | switch (ctx.arg.ekind) { |
703 | case ELF32LEKind: |
704 | maybeReportDiscarded<ELF32LE>(ctx, msg, sym); |
705 | break; |
706 | case ELF32BEKind: |
707 | maybeReportDiscarded<ELF32BE>(ctx, msg, sym); |
708 | break; |
709 | case ELF64LEKind: |
710 | maybeReportDiscarded<ELF64LE>(ctx, msg, sym); |
711 | break; |
712 | case ELF64BEKind: |
713 | maybeReportDiscarded<ELF64BE>(ctx, msg, sym); |
714 | break; |
715 | default: |
716 | llvm_unreachable(""); |
717 | } |
718 | if (msg.str().empty()) |
719 | msg << "undefined "<< visibility() << "symbol: "<< &sym; |
720 | |
721 | const size_t maxUndefReferences = 3; |
722 | for (UndefinedDiag::Loc l : |
723 | ArrayRef(undef.locs).take_front(N: maxUndefReferences)) { |
724 | InputSectionBase &sec = *l.sec; |
725 | uint64_t offset = l.offset; |
726 | |
727 | msg << "\n>>> referenced by "; |
728 | // In the absence of line number information, utilize DW_TAG_variable (if |
729 | // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`). |
730 | Symbol *enclosing = sec.getEnclosingSymbol(offset); |
731 | |
732 | ELFSyncStream msg1(ctx, DiagLevel::None); |
733 | auto tell = msg.tell(); |
734 | msg << sec.getSrcMsg(sym: enclosing ? *enclosing : sym, offset); |
735 | if (tell != msg.tell()) |
736 | msg << "\n>>> "; |
737 | msg << sec.getObjMsg(offset); |
738 | } |
739 | |
740 | if (maxUndefReferences < undef.locs.size()) |
741 | msg << "\n>>> referenced "<< (undef.locs.size() - maxUndefReferences) |
742 | << " more times"; |
743 | |
744 | if (correctSpelling) { |
745 | std::string pre_hint = ": ", post_hint; |
746 | if (const Symbol *corrected = |
747 | getAlternativeSpelling(ctx, sym, pre_hint, post_hint)) { |
748 | msg << "\n>>> did you mean"<< pre_hint << corrected << post_hint |
749 | << "\n>>> defined in: "<< corrected->file; |
750 | } |
751 | } |
752 | |
753 | if (sym.getName().starts_with(Prefix: "_ZTV")) |
754 | msg << "\n>>> the vtable symbol may be undefined because the class is " |
755 | "missing its key function " |
756 | "(see https://lld.llvm.org/missingkeyfunction)"; |
757 | if (ctx.arg.gcSections && ctx.arg.zStartStopGC && |
758 | sym.getName().starts_with(Prefix: "__start_")) { |
759 | msg << "\n>>> the encapsulation symbol needs to be retained under " |
760 | "--gc-sections properly; consider -z nostart-stop-gc " |
761 | "(see https://lld.llvm.org/ELF/start-stop-gc)"; |
762 | } |
763 | |
764 | if (undef.isWarning) |
765 | Warn(ctx) << msg.str(); |
766 | else |
767 | ctx.e.error(msg: msg.str(), tag: ErrorTag::SymbolNotFound, args: {sym.getName()}); |
768 | } |
769 | |
770 | void elf::reportUndefinedSymbols(Ctx &ctx) { |
771 | // Find the first "undefined symbol" diagnostic for each diagnostic, and |
772 | // collect all "referenced from" lines at the first diagnostic. |
773 | DenseMap<Symbol *, UndefinedDiag *> firstRef; |
774 | for (UndefinedDiag &undef : ctx.undefErrs) { |
775 | assert(undef.locs.size() == 1); |
776 | if (UndefinedDiag *canon = firstRef.lookup(Val: undef.sym)) { |
777 | canon->locs.push_back(Elt: undef.locs[0]); |
778 | undef.locs.clear(); |
779 | } else |
780 | firstRef[undef.sym] = &undef; |
781 | } |
782 | |
783 | // Enable spell corrector for the first 2 diagnostics. |
784 | for (auto [i, undef] : llvm::enumerate(First&: ctx.undefErrs)) |
785 | if (!undef.locs.empty()) |
786 | reportUndefinedSymbol(ctx, undef, correctSpelling: i < 2); |
787 | } |
788 | |
789 | // Report an undefined symbol if necessary. |
790 | // Returns true if the undefined symbol will produce an error message. |
791 | static bool maybeReportUndefined(Ctx &ctx, Undefined &sym, |
792 | InputSectionBase &sec, uint64_t offset) { |
793 | std::lock_guard<std::mutex> lock(ctx.relocMutex); |
794 | // If versioned, issue an error (even if the symbol is weak) because we don't |
795 | // know the defining filename which is required to construct a Verneed entry. |
796 | if (sym.hasVersionSuffix) { |
797 | ctx.undefErrs.push_back(Elt: {.sym: &sym, .locs: {{.sec: &sec, .offset: offset}}, .isWarning: false}); |
798 | return true; |
799 | } |
800 | if (sym.isWeak()) |
801 | return false; |
802 | |
803 | bool canBeExternal = !sym.isLocal() && sym.visibility() == STV_DEFAULT; |
804 | if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal) |
805 | return false; |
806 | |
807 | // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc |
808 | // which references a switch table in a discarded .rodata/.text section. The |
809 | // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF |
810 | // spec says references from outside the group to a STB_LOCAL symbol are not |
811 | // allowed. Work around the bug. |
812 | // |
813 | // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible |
814 | // because .LC0-.LTOC is not representable if the two labels are in different |
815 | // .got2 |
816 | if (sym.discardedSecIdx != 0 && (sec.name == ".got2"|| sec.name == ".toc")) |
817 | return false; |
818 | |
819 | bool isWarning = |
820 | (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) || |
821 | ctx.arg.noinhibitExec; |
822 | ctx.undefErrs.push_back(Elt: {.sym: &sym, .locs: {{.sec: &sec, .offset: offset}}, .isWarning: isWarning}); |
823 | return !isWarning; |
824 | } |
825 | |
826 | // MIPS N32 ABI treats series of successive relocations with the same offset |
827 | // as a single relocation. The similar approach used by N64 ABI, but this ABI |
828 | // packs all relocations into the single relocation record. Here we emulate |
829 | // this for the N32 ABI. Iterate over relocation with the same offset and put |
830 | // theirs types into the single bit-set. |
831 | template <class RelTy> |
832 | RelType RelocationScanner::getMipsN32RelType(RelTy *&rel) const { |
833 | uint32_t type = 0; |
834 | uint64_t offset = rel->r_offset; |
835 | |
836 | int n = 0; |
837 | while (rel != static_cast<const RelTy *>(end) && rel->r_offset == offset) |
838 | type |= (rel++)->getType(ctx.arg.isMips64EL) << (8 * n++); |
839 | return type; |
840 | } |
841 | |
842 | template <bool shard = false> |
843 | static void addRelativeReloc(Ctx &ctx, InputSectionBase &isec, |
844 | uint64_t offsetInSec, Symbol &sym, int64_t addend, |
845 | RelExpr expr, RelType type) { |
846 | Partition &part = isec.getPartition(ctx); |
847 | |
848 | if (sym.isTagged()) { |
849 | part.relaDyn->addRelativeReloc<shard>(ctx.target->relativeRel, isec, |
850 | offsetInSec, sym, addend, type, expr); |
851 | // With MTE globals, we always want to derive the address tag by `ldg`-ing |
852 | // the symbol. When we have a RELATIVE relocation though, we no longer have |
853 | // a reference to the symbol. Because of this, when we have an addend that |
854 | // puts the result of the RELATIVE relocation out-of-bounds of the symbol |
855 | // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI |
856 | // says we should store the offset to the start of the symbol in the target |
857 | // field. This is described in further detail in: |
858 | // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative |
859 | if (addend < 0 || static_cast<uint64_t>(addend) >= sym.getSize()) |
860 | isec.relocations.push_back(Elt: {.expr: expr, .type: type, .offset: offsetInSec, .addend: addend, .sym: &sym}); |
861 | return; |
862 | } |
863 | |
864 | // Add a relative relocation. If relrDyn section is enabled, and the |
865 | // relocation offset is guaranteed to be even, add the relocation to |
866 | // the relrDyn section, otherwise add it to the relaDyn section. |
867 | // relrDyn sections don't support odd offsets. Also, relrDyn sections |
868 | // don't store the addend values, so we must write it to the relocated |
869 | // address. |
870 | if (part.relrDyn && isec.addralign >= 2 && offsetInSec % 2 == 0) { |
871 | isec.addReloc(r: {.expr: expr, .type: type, .offset: offsetInSec, .addend: addend, .sym: &sym}); |
872 | if (shard) |
873 | part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back( |
874 | Elt: {.inputSec: &isec, .relocIdx: isec.relocs().size() - 1}); |
875 | else |
876 | part.relrDyn->relocs.push_back(Elt: {.inputSec: &isec, .relocIdx: isec.relocs().size() - 1}); |
877 | return; |
878 | } |
879 | part.relaDyn->addRelativeReloc<shard>(ctx.target->relativeRel, isec, |
880 | offsetInSec, sym, addend, type, expr); |
881 | } |
882 | |
883 | template <class PltSection, class GotPltSection> |
884 | static void addPltEntry(Ctx &ctx, PltSection &plt, GotPltSection &gotPlt, |
885 | RelocationBaseSection &rel, RelType type, Symbol &sym) { |
886 | plt.addEntry(sym); |
887 | gotPlt.addEntry(sym); |
888 | rel.addReloc({type, &gotPlt, sym.getGotPltOffset(ctx), |
889 | sym.isPreemptible ? DynamicReloc::AgainstSymbol |
890 | : DynamicReloc::AddendOnlyWithTargetVA, |
891 | sym, 0, R_ABS}); |
892 | } |
893 | |
894 | void elf::addGotEntry(Ctx &ctx, Symbol &sym) { |
895 | ctx.in.got->addEntry(sym); |
896 | uint64_t off = sym.getGotOffset(ctx); |
897 | |
898 | // If preemptible, emit a GLOB_DAT relocation. |
899 | if (sym.isPreemptible) { |
900 | ctx.mainPart->relaDyn->addReloc(reloc: {ctx.target->gotRel, ctx.in.got.get(), off, |
901 | DynamicReloc::AgainstSymbol, sym, 0, |
902 | R_ABS}); |
903 | return; |
904 | } |
905 | |
906 | // Otherwise, the value is either a link-time constant or the load base |
907 | // plus a constant. |
908 | if (!ctx.arg.isPic || isAbsolute(sym)) |
909 | ctx.in.got->addConstant(r: {.expr: R_ABS, .type: ctx.target->symbolicRel, .offset: off, .addend: 0, .sym: &sym}); |
910 | else |
911 | addRelativeReloc(ctx, isec&: *ctx.in.got, offsetInSec: off, sym, addend: 0, expr: R_ABS, |
912 | type: ctx.target->symbolicRel); |
913 | } |
914 | |
915 | static void addGotAuthEntry(Ctx &ctx, Symbol &sym) { |
916 | ctx.in.got->addEntry(sym); |
917 | ctx.in.got->addAuthEntry(sym); |
918 | uint64_t off = sym.getGotOffset(ctx); |
919 | |
920 | // If preemptible, emit a GLOB_DAT relocation. |
921 | if (sym.isPreemptible) { |
922 | ctx.mainPart->relaDyn->addReloc(reloc: {R_AARCH64_AUTH_GLOB_DAT, ctx.in.got.get(), |
923 | off, DynamicReloc::AgainstSymbol, sym, 0, |
924 | R_ABS}); |
925 | return; |
926 | } |
927 | |
928 | // Signed GOT requires dynamic relocation. |
929 | ctx.in.got->getPartition(ctx).relaDyn->addReloc( |
930 | reloc: {R_AARCH64_AUTH_RELATIVE, ctx.in.got.get(), off, |
931 | DynamicReloc::AddendOnlyWithTargetVA, sym, 0, R_ABS}); |
932 | } |
933 | |
934 | static void addTpOffsetGotEntry(Ctx &ctx, Symbol &sym) { |
935 | ctx.in.got->addEntry(sym); |
936 | uint64_t off = sym.getGotOffset(ctx); |
937 | if (!sym.isPreemptible && !ctx.arg.shared) { |
938 | ctx.in.got->addConstant(r: {.expr: R_TPREL, .type: ctx.target->symbolicRel, .offset: off, .addend: 0, .sym: &sym}); |
939 | return; |
940 | } |
941 | ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( |
942 | dynType: ctx.target->tlsGotRel, isec&: *ctx.in.got, offsetInSec: off, sym, addendRelType: ctx.target->symbolicRel); |
943 | } |
944 | |
945 | // Return true if we can define a symbol in the executable that |
946 | // contains the value/function of a symbol defined in a shared |
947 | // library. |
948 | static bool canDefineSymbolInExecutable(Ctx &ctx, Symbol &sym) { |
949 | // If the symbol has default visibility the symbol defined in the |
950 | // executable will preempt it. |
951 | // Note that we want the visibility of the shared symbol itself, not |
952 | // the visibility of the symbol in the output file we are producing. |
953 | if (!sym.dsoProtected) |
954 | return true; |
955 | |
956 | // If we are allowed to break address equality of functions, defining |
957 | // a plt entry will allow the program to call the function in the |
958 | // .so, but the .so and the executable will no agree on the address |
959 | // of the function. Similar logic for objects. |
960 | return ((sym.isFunc() && ctx.arg.ignoreFunctionAddressEquality) || |
961 | (sym.isObject() && ctx.arg.ignoreDataAddressEquality)); |
962 | } |
963 | |
964 | // Returns true if a given relocation can be computed at link-time. |
965 | // This only handles relocation types expected in processAux. |
966 | // |
967 | // For instance, we know the offset from a relocation to its target at |
968 | // link-time if the relocation is PC-relative and refers a |
969 | // non-interposable function in the same executable. This function |
970 | // will return true for such relocation. |
971 | // |
972 | // If this function returns false, that means we need to emit a |
973 | // dynamic relocation so that the relocation will be fixed at load-time. |
974 | bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type, |
975 | const Symbol &sym, |
976 | uint64_t relOff) const { |
977 | // These expressions always compute a constant |
978 | if (oneof< |
979 | R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, RE_MIPS_GOT_LOCAL_PAGE, |
980 | RE_MIPS_GOTREL, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32, RE_MIPS_GOT_GP_PC, |
981 | RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC, |
982 | R_GOTONLY_PC, R_GOTPLTONLY_PC, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, |
983 | R_GOTPLT_GOTREL, R_GOTPLT_PC, RE_PPC32_PLTREL, RE_PPC64_CALL_PLT, |
984 | RE_PPC64_RELAX_TOC, RE_RISCV_ADD, RE_AARCH64_GOT_PAGE, |
985 | RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, RE_LOONGARCH_PLT_PAGE_PC, |
986 | RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>(expr: e)) |
987 | return true; |
988 | |
989 | // These never do, except if the entire file is position dependent or if |
990 | // only the low bits are used. |
991 | if (e == R_GOT || e == R_PLT) |
992 | return ctx.target->usesOnlyLowPageBits(type) || !ctx.arg.isPic; |
993 | |
994 | // R_AARCH64_AUTH_ABS64 requires a dynamic relocation. |
995 | if (sym.isPreemptible || e == RE_AARCH64_AUTH) |
996 | return false; |
997 | if (!ctx.arg.isPic) |
998 | return true; |
999 | |
1000 | // Constant when referencing a non-preemptible symbol. |
1001 | if (e == R_SIZE || e == RE_RISCV_LEB128) |
1002 | return true; |
1003 | |
1004 | // For the target and the relocation, we want to know if they are |
1005 | // absolute or relative. |
1006 | bool absVal = isAbsoluteValue(sym) && e != RE_PPC64_TOCBASE; |
1007 | bool relE = isRelExpr(expr: e); |
1008 | if (absVal && !relE) |
1009 | return true; |
1010 | if (!absVal && relE) |
1011 | return true; |
1012 | if (!absVal && !relE) |
1013 | return ctx.target->usesOnlyLowPageBits(type); |
1014 | |
1015 | assert(absVal && relE); |
1016 | |
1017 | // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol |
1018 | // in PIC mode. This is a little strange, but it allows us to link function |
1019 | // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers). |
1020 | // Normally such a call will be guarded with a comparison, which will load a |
1021 | // zero from the GOT. |
1022 | if (sym.isUndefined()) |
1023 | return true; |
1024 | |
1025 | // We set the final symbols values for linker script defined symbols later. |
1026 | // They always can be computed as a link time constant. |
1027 | if (sym.scriptDefined) |
1028 | return true; |
1029 | |
1030 | auto diag = Err(ctx); |
1031 | diag << "relocation "<< type << " cannot refer to absolute symbol: "<< &sym; |
1032 | printLocation(s&: diag, sec&: *sec, sym, off: relOff); |
1033 | return true; |
1034 | } |
1035 | |
1036 | // The reason we have to do this early scan is as follows |
1037 | // * To mmap the output file, we need to know the size |
1038 | // * For that, we need to know how many dynamic relocs we will have. |
1039 | // It might be possible to avoid this by outputting the file with write: |
1040 | // * Write the allocated output sections, computing addresses. |
1041 | // * Apply relocations, recording which ones require a dynamic reloc. |
1042 | // * Write the dynamic relocations. |
1043 | // * Write the rest of the file. |
1044 | // This would have some drawbacks. For example, we would only know if .rela.dyn |
1045 | // is needed after applying relocations. If it is, it will go after rw and rx |
1046 | // sections. Given that it is ro, we will need an extra PT_LOAD. This |
1047 | // complicates things for the dynamic linker and means we would have to reserve |
1048 | // space for the extra PT_LOAD even if we end up not using it. |
1049 | void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, |
1050 | Symbol &sym, int64_t addend) const { |
1051 | // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT |
1052 | // indirection. |
1053 | const bool isIfunc = sym.isGnuIFunc(); |
1054 | if (!sym.isPreemptible && (!isIfunc || ctx.arg.zIfuncNoplt)) { |
1055 | if (expr != R_GOT_PC) { |
1056 | // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call |
1057 | // stub type. It should be ignored if optimized to R_PC. |
1058 | if (ctx.arg.emachine == EM_PPC && expr == RE_PPC32_PLTREL) |
1059 | addend &= ~0x8000; |
1060 | // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into |
1061 | // call __tls_get_addr even if the symbol is non-preemptible. |
1062 | if (!(ctx.arg.emachine == EM_HEXAGON && |
1063 | (type == R_HEX_GD_PLT_B22_PCREL || |
1064 | type == R_HEX_GD_PLT_B22_PCREL_X || |
1065 | type == R_HEX_GD_PLT_B32_PCREL_X))) |
1066 | expr = fromPlt(expr); |
1067 | } else if (!isAbsoluteValue(sym) || |
1068 | (type == R_PPC64_PCREL_OPT && ctx.arg.emachine == EM_PPC64)) { |
1069 | expr = ctx.target->adjustGotPcExpr(type, addend, |
1070 | loc: sec->content().data() + offset); |
1071 | // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up |
1072 | // needing the GOT if we can't relax everything. |
1073 | if (expr == R_RELAX_GOT_PC) |
1074 | ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed); |
1075 | } |
1076 | } |
1077 | |
1078 | // We were asked not to generate PLT entries for ifuncs. Instead, pass the |
1079 | // direct relocation on through. |
1080 | if (LLVM_UNLIKELY(isIfunc) && ctx.arg.zIfuncNoplt) { |
1081 | std::lock_guard<std::mutex> lock(ctx.relocMutex); |
1082 | sym.isExported = true; |
1083 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: type, isec&: *sec, offsetInSec: offset, sym, addend, |
1084 | addendRelType: type); |
1085 | return; |
1086 | } |
1087 | |
1088 | if (needsGot(expr)) { |
1089 | if (ctx.arg.emachine == EM_MIPS) { |
1090 | // MIPS ABI has special rules to process GOT entries and doesn't |
1091 | // require relocation entries for them. A special case is TLS |
1092 | // relocations. In that case dynamic loader applies dynamic |
1093 | // relocations to initialize TLS GOT entries. |
1094 | // See "Global Offset Table" in Chapter 5 in the following document |
1095 | // for detailed description: |
1096 | // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf |
1097 | ctx.in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr); |
1098 | } else if (!sym.isTls() || ctx.arg.emachine != EM_LOONGARCH) { |
1099 | // Many LoongArch TLS relocs reuse the RE_LOONGARCH_GOT type, in which |
1100 | // case the NEEDS_GOT flag shouldn't get set. |
1101 | if (expr == RE_AARCH64_AUTH_GOT || expr == RE_AARCH64_AUTH_GOT_PAGE_PC || |
1102 | expr == RE_AARCH64_AUTH_GOT_PC) |
1103 | sym.setFlags(NEEDS_GOT | NEEDS_GOT_AUTH); |
1104 | else |
1105 | sym.setFlags(NEEDS_GOT | NEEDS_GOT_NONAUTH); |
1106 | } |
1107 | } else if (needsPlt(expr)) { |
1108 | sym.setFlags(NEEDS_PLT); |
1109 | } else if (LLVM_UNLIKELY(isIfunc)) { |
1110 | sym.setFlags(HAS_DIRECT_RELOC); |
1111 | } |
1112 | |
1113 | // If the relocation is known to be a link-time constant, we know no dynamic |
1114 | // relocation will be created, pass the control to relocateAlloc() or |
1115 | // relocateNonAlloc() to resolve it. |
1116 | // |
1117 | // The behavior of an undefined weak reference is implementation defined. For |
1118 | // non-link-time constants, we resolve relocations statically (let |
1119 | // relocate{,Non}Alloc() resolve them) for -no-pie and try producing dynamic |
1120 | // relocations for -pie and -shared. |
1121 | // |
1122 | // The general expectation of -no-pie static linking is that there is no |
1123 | // dynamic relocation (except IRELATIVE). Emitting dynamic relocations for |
1124 | // -shared matches the spirit of its -z undefs default. -pie has freedom on |
1125 | // choices, and we choose dynamic relocations to be consistent with the |
1126 | // handling of GOT-generating relocations. |
1127 | if (isStaticLinkTimeConstant(e: expr, type, sym, relOff: offset) || |
1128 | (!ctx.arg.isPic && sym.isUndefWeak())) { |
1129 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1130 | return; |
1131 | } |
1132 | |
1133 | // Use a simple -z notext rule that treats all sections except .eh_frame as |
1134 | // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our |
1135 | // SectionBase::getOffset would incorrectly adjust the offset). |
1136 | // |
1137 | // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel |
1138 | // conversion. We still emit a dynamic relocation. |
1139 | bool canWrite = (sec->flags & SHF_WRITE) || |
1140 | !(ctx.arg.zText || |
1141 | (isa<EhInputSection>(Val: sec) && ctx.arg.emachine != EM_MIPS)); |
1142 | if (canWrite) { |
1143 | RelType rel = ctx.target->getDynRel(type); |
1144 | if (oneof<R_GOT, RE_LOONGARCH_GOT>(expr) || |
1145 | (rel == ctx.target->symbolicRel && !sym.isPreemptible)) { |
1146 | addRelativeReloc<true>(ctx, isec&: *sec, offsetInSec: offset, sym, addend, expr, type); |
1147 | return; |
1148 | } |
1149 | if (rel != 0) { |
1150 | if (ctx.arg.emachine == EM_MIPS && rel == ctx.target->symbolicRel) |
1151 | rel = ctx.target->relativeRel; |
1152 | std::lock_guard<std::mutex> lock(ctx.relocMutex); |
1153 | Partition &part = sec->getPartition(ctx); |
1154 | if (ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64) { |
1155 | // For a preemptible symbol, we can't use a relative relocation. For an |
1156 | // undefined symbol, we can't compute offset at link-time and use a |
1157 | // relative relocation. Use a symbolic relocation instead. |
1158 | if (sym.isPreemptible) { |
1159 | part.relaDyn->addSymbolReloc(dynType: type, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type); |
1160 | } else if (part.relrAuthDyn && sec->addralign >= 2 && offset % 2 == 0) { |
1161 | // When symbol values are determined in |
1162 | // finalizeAddressDependentContent, some .relr.auth.dyn relocations |
1163 | // may be moved to .rela.dyn. |
1164 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1165 | part.relrAuthDyn->relocs.push_back(Elt: {.inputSec: sec, .relocIdx: sec->relocs().size() - 1}); |
1166 | } else { |
1167 | part.relaDyn->addReloc(reloc: {R_AARCH64_AUTH_RELATIVE, sec, offset, |
1168 | DynamicReloc::AddendOnlyWithTargetVA, sym, |
1169 | addend, R_ABS}); |
1170 | } |
1171 | return; |
1172 | } |
1173 | part.relaDyn->addSymbolReloc(dynType: rel, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type); |
1174 | |
1175 | // MIPS ABI turns using of GOT and dynamic relocations inside out. |
1176 | // While regular ABI uses dynamic relocations to fill up GOT entries |
1177 | // MIPS ABI requires dynamic linker to fills up GOT entries using |
1178 | // specially sorted dynamic symbol table. This affects even dynamic |
1179 | // relocations against symbols which do not require GOT entries |
1180 | // creation explicitly, i.e. do not have any GOT-relocations. So if |
1181 | // a preemptible symbol has a dynamic relocation we anyway have |
1182 | // to create a GOT entry for it. |
1183 | // If a non-preemptible symbol has a dynamic relocation against it, |
1184 | // dynamic linker takes it st_value, adds offset and writes down |
1185 | // result of the dynamic relocation. In case of preemptible symbol |
1186 | // dynamic linker performs symbol resolution, writes the symbol value |
1187 | // to the GOT entry and reads the GOT entry when it needs to perform |
1188 | // a dynamic relocation. |
1189 | // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19 |
1190 | if (ctx.arg.emachine == EM_MIPS) |
1191 | ctx.in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr); |
1192 | return; |
1193 | } |
1194 | } |
1195 | |
1196 | // When producing an executable, we can perform copy relocations (for |
1197 | // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO. |
1198 | // Copy relocations/canonical PLT entries are unsupported for |
1199 | // R_AARCH64_AUTH_ABS64. |
1200 | if (!ctx.arg.shared && sym.isShared() && |
1201 | !(ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64)) { |
1202 | if (!canDefineSymbolInExecutable(ctx, sym)) { |
1203 | auto diag = Err(ctx); |
1204 | diag << "cannot preempt symbol: "<< &sym; |
1205 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
1206 | return; |
1207 | } |
1208 | |
1209 | if (sym.isObject()) { |
1210 | // Produce a copy relocation. |
1211 | if (auto *ss = dyn_cast<SharedSymbol>(Val: &sym)) { |
1212 | if (!ctx.arg.zCopyreloc) { |
1213 | auto diag = Err(ctx); |
1214 | diag << "unresolvable relocation "<< type << " against symbol '" |
1215 | << ss << "'; recompile with -fPIC or remove '-z nocopyreloc'"; |
1216 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
1217 | } |
1218 | sym.setFlags(NEEDS_COPY); |
1219 | } |
1220 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1221 | return; |
1222 | } |
1223 | |
1224 | // This handles a non PIC program call to function in a shared library. In |
1225 | // an ideal world, we could just report an error saying the relocation can |
1226 | // overflow at runtime. In the real world with glibc, crt1.o has a |
1227 | // R_X86_64_PC32 pointing to libc.so. |
1228 | // |
1229 | // The general idea on how to handle such cases is to create a PLT entry and |
1230 | // use that as the function value. |
1231 | // |
1232 | // For the static linking part, we just return a plt expr and everything |
1233 | // else will use the PLT entry as the address. |
1234 | // |
1235 | // The remaining problem is making sure pointer equality still works. We |
1236 | // need the help of the dynamic linker for that. We let it know that we have |
1237 | // a direct reference to a so symbol by creating an undefined symbol with a |
1238 | // non zero st_value. Seeing that, the dynamic linker resolves the symbol to |
1239 | // the value of the symbol we created. This is true even for got entries, so |
1240 | // pointer equality is maintained. To avoid an infinite loop, the only entry |
1241 | // that points to the real function is a dedicated got entry used by the |
1242 | // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT, |
1243 | // R_386_JMP_SLOT, etc). |
1244 | |
1245 | // For position independent executable on i386, the plt entry requires ebx |
1246 | // to be set. This causes two problems: |
1247 | // * If some code has a direct reference to a function, it was probably |
1248 | // compiled without -fPIE/-fPIC and doesn't maintain ebx. |
1249 | // * If a library definition gets preempted to the executable, it will have |
1250 | // the wrong ebx value. |
1251 | if (sym.isFunc()) { |
1252 | if (ctx.arg.pie && ctx.arg.emachine == EM_386) { |
1253 | auto diag = Err(ctx); |
1254 | diag << "symbol '"<< &sym |
1255 | << "' cannot be preempted; recompile with -fPIE"; |
1256 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
1257 | } |
1258 | sym.setFlags(NEEDS_COPY | NEEDS_PLT); |
1259 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1260 | return; |
1261 | } |
1262 | } |
1263 | |
1264 | auto diag = Err(ctx); |
1265 | diag << "relocation "<< type << " cannot be used against "; |
1266 | if (sym.getName().empty()) |
1267 | diag << "local symbol"; |
1268 | else |
1269 | diag << "symbol '"<< &sym << "'"; |
1270 | diag << "; recompile with -fPIC"; |
1271 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
1272 | } |
1273 | |
1274 | // This function is similar to the `handleTlsRelocation`. MIPS does not |
1275 | // support any relaxations for TLS relocations so by factoring out MIPS |
1276 | // handling in to the separate function we can simplify the code and do not |
1277 | // pollute other `handleTlsRelocation` by MIPS `ifs` statements. |
1278 | // Mips has a custom MipsGotSection that handles the writing of GOT entries |
1279 | // without dynamic relocations. |
1280 | static unsigned handleMipsTlsRelocation(Ctx &ctx, RelType type, Symbol &sym, |
1281 | InputSectionBase &c, uint64_t offset, |
1282 | int64_t addend, RelExpr expr) { |
1283 | if (expr == RE_MIPS_TLSLD) { |
1284 | ctx.in.mipsGot->addTlsIndex(file&: *c.file); |
1285 | c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1286 | return 1; |
1287 | } |
1288 | if (expr == RE_MIPS_TLSGD) { |
1289 | ctx.in.mipsGot->addDynTlsEntry(file&: *c.file, sym); |
1290 | c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1291 | return 1; |
1292 | } |
1293 | return 0; |
1294 | } |
1295 | |
1296 | static unsigned handleAArch64PAuthTlsRelocation(InputSectionBase *sec, |
1297 | RelExpr expr, RelType type, |
1298 | uint64_t offset, Symbol &sym, |
1299 | int64_t addend) { |
1300 | // Do not optimize signed TLSDESC to LE/IE (as described in pauthabielf64). |
1301 | // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#general-restrictions |
1302 | // > PAUTHELF64 only supports the descriptor based TLS (TLSDESC). |
1303 | if (oneof<RE_AARCH64_AUTH_TLSDESC_PAGE, RE_AARCH64_AUTH_TLSDESC>(expr)) { |
1304 | sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_AUTH); |
1305 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1306 | return 1; |
1307 | } |
1308 | |
1309 | // TLSDESC_CALL hint relocation should not be emitted by compiler with signed |
1310 | // TLSDESC enabled. |
1311 | if (expr == R_TLSDESC_CALL) |
1312 | sym.setFlags(NEEDS_TLSDESC_NONAUTH); |
1313 | |
1314 | return 0; |
1315 | } |
1316 | |
1317 | // Notes about General Dynamic and Local Dynamic TLS models below. They may |
1318 | // require the generation of a pair of GOT entries that have associated dynamic |
1319 | // relocations. The pair of GOT entries created are of the form GOT[e0] Module |
1320 | // Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of |
1321 | // symbol in TLS block. |
1322 | // |
1323 | // Returns the number of relocations processed. |
1324 | unsigned RelocationScanner::handleTlsRelocation(RelExpr expr, RelType type, |
1325 | uint64_t offset, Symbol &sym, |
1326 | int64_t addend) { |
1327 | bool isAArch64 = ctx.arg.emachine == EM_AARCH64; |
1328 | |
1329 | if (isAArch64) |
1330 | if (unsigned processed = handleAArch64PAuthTlsRelocation( |
1331 | sec, expr, type, offset, sym, addend)) |
1332 | return processed; |
1333 | |
1334 | if (expr == R_TPREL || expr == R_TPREL_NEG) { |
1335 | if (ctx.arg.shared) { |
1336 | auto diag = Err(ctx); |
1337 | diag << "relocation "<< type << " against "<< &sym |
1338 | << " cannot be used with -shared"; |
1339 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
1340 | return 1; |
1341 | } |
1342 | return 0; |
1343 | } |
1344 | |
1345 | if (ctx.arg.emachine == EM_MIPS) |
1346 | return handleMipsTlsRelocation(ctx, type, sym, c&: *sec, offset, addend, expr); |
1347 | |
1348 | // LoongArch does not yet implement transition from TLSDESC to LE/IE, so |
1349 | // generate TLSDESC dynamic relocation for the dynamic linker to handle. |
1350 | if (ctx.arg.emachine == EM_LOONGARCH && |
1351 | oneof<RE_LOONGARCH_TLSDESC_PAGE_PC, R_TLSDESC, R_TLSDESC_PC, |
1352 | R_TLSDESC_CALL>(expr)) { |
1353 | if (expr != R_TLSDESC_CALL) { |
1354 | sym.setFlags(NEEDS_TLSDESC); |
1355 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1356 | } |
1357 | return 1; |
1358 | } |
1359 | |
1360 | bool isRISCV = ctx.arg.emachine == EM_RISCV; |
1361 | |
1362 | if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, |
1363 | R_TLSDESC_GOTPLT>(expr) && |
1364 | ctx.arg.shared) { |
1365 | // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not |
1366 | // set NEEDS_TLSDESC on the label. |
1367 | if (expr != R_TLSDESC_CALL) { |
1368 | if (isAArch64) |
1369 | sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_NONAUTH); |
1370 | else if (!isRISCV || type == R_RISCV_TLSDESC_HI20) |
1371 | sym.setFlags(NEEDS_TLSDESC); |
1372 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1373 | } |
1374 | return 1; |
1375 | } |
1376 | |
1377 | // LoongArch supports IE to LE optimization in non-extreme code model. |
1378 | bool execOptimizeInLoongArch = |
1379 | ctx.arg.emachine == EM_LOONGARCH && |
1380 | (type == R_LARCH_TLS_IE_PC_HI20 || type == R_LARCH_TLS_IE_PC_LO12); |
1381 | |
1382 | // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE |
1383 | // optimizations. |
1384 | // RISC-V supports TLSDESC to IE/LE optimizations. |
1385 | // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable |
1386 | // optimization as well. |
1387 | bool execOptimize = |
1388 | !ctx.arg.shared && ctx.arg.emachine != EM_ARM && |
1389 | ctx.arg.emachine != EM_HEXAGON && |
1390 | (ctx.arg.emachine != EM_LOONGARCH || execOptimizeInLoongArch) && |
1391 | !(isRISCV && expr != R_TLSDESC_PC && expr != R_TLSDESC_CALL) && |
1392 | !sec->file->ppc64DisableTLSRelax; |
1393 | |
1394 | // If we are producing an executable and the symbol is non-preemptable, it |
1395 | // must be defined and the code sequence can be optimized to use |
1396 | // Local-Exesec-> |
1397 | // |
1398 | // ARM and RISC-V do not support any relaxations for TLS relocations, however, |
1399 | // we can omit the DTPMOD dynamic relocations and resolve them at link time |
1400 | // because them are always 1. This may be necessary for static linking as |
1401 | // DTPMOD may not be expected at load time. |
1402 | bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared; |
1403 | |
1404 | // Local Dynamic is for access to module local TLS variables, while still |
1405 | // being suitable for being dynamically loaded via dlopen. GOT[e0] is the |
1406 | // module index, with a special value of 0 for the current module. GOT[e1] is |
1407 | // unused. There only needs to be one module index entry. |
1408 | if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) { |
1409 | // Local-Dynamic relocs can be optimized to Local-Exesec-> |
1410 | if (execOptimize) { |
1411 | sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE), |
1412 | .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1413 | return ctx.target->getTlsGdRelaxSkip(type); |
1414 | } |
1415 | if (expr == R_TLSLD_HINT) |
1416 | return 1; |
1417 | ctx.needsTlsLd.store(i: true, m: std::memory_order_relaxed); |
1418 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1419 | return 1; |
1420 | } |
1421 | |
1422 | // Local-Dynamic relocs can be optimized to Local-Exesec-> |
1423 | if (expr == R_DTPREL) { |
1424 | if (execOptimize) |
1425 | expr = ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE); |
1426 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1427 | return 1; |
1428 | } |
1429 | |
1430 | // Local-Dynamic sequence where offset of tls variable relative to dynamic |
1431 | // thread pointer is stored in the got. This cannot be optimized to |
1432 | // Local-Exesec-> |
1433 | if (expr == R_TLSLD_GOT_OFF) { |
1434 | sym.setFlags(NEEDS_GOT_DTPREL); |
1435 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1436 | return 1; |
1437 | } |
1438 | |
1439 | if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, |
1440 | R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC, |
1441 | RE_LOONGARCH_TLSGD_PAGE_PC>(expr)) { |
1442 | if (!execOptimize) { |
1443 | sym.setFlags(NEEDS_TLSGD); |
1444 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1445 | return 1; |
1446 | } |
1447 | |
1448 | // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec |
1449 | // depending on the symbol being locally defined or not. |
1450 | // |
1451 | // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible |
1452 | // label, so TLSDESC=>IE will be categorized as R_RELAX_TLS_GD_TO_LE. We fix |
1453 | // the categorization in RISCV::relocateAllosec-> |
1454 | if (sym.isPreemptible) { |
1455 | sym.setFlags(NEEDS_TLSGD_TO_IE); |
1456 | sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_IE), |
1457 | .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1458 | } else { |
1459 | sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_LE), |
1460 | .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1461 | } |
1462 | return ctx.target->getTlsGdRelaxSkip(type); |
1463 | } |
1464 | |
1465 | if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, RE_AARCH64_GOT_PAGE_PC, |
1466 | RE_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) { |
1467 | ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed); |
1468 | // Initial-Exec relocs can be optimized to Local-Exec if the symbol is |
1469 | // locally defined. This is not supported on SystemZ. |
1470 | if (execOptimize && isLocalInExecutable && ctx.arg.emachine != EM_S390) { |
1471 | sec->addReloc(r: {.expr: R_RELAX_TLS_IE_TO_LE, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1472 | } else if (expr != R_TLSIE_HINT) { |
1473 | sym.setFlags(NEEDS_TLSIE); |
1474 | // R_GOT needs a relative relocation for PIC on i386 and Hexagon. |
1475 | if (expr == R_GOT && ctx.arg.isPic && |
1476 | !ctx.target->usesOnlyLowPageBits(type)) |
1477 | addRelativeReloc<true>(ctx, isec&: *sec, offsetInSec: offset, sym, addend, expr, type); |
1478 | else |
1479 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1480 | } |
1481 | return 1; |
1482 | } |
1483 | |
1484 | // LoongArch TLS GD/LD relocs reuse the RE_LOONGARCH_GOT, in which |
1485 | // NEEDS_TLSIE shouldn't set. So we check independently. |
1486 | if (ctx.arg.emachine == EM_LOONGARCH && expr == RE_LOONGARCH_GOT && |
1487 | execOptimize && isLocalInExecutable) { |
1488 | ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed); |
1489 | sec->addReloc(r: {.expr: R_RELAX_TLS_IE_TO_LE, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
1490 | return 1; |
1491 | } |
1492 | |
1493 | return 0; |
1494 | } |
1495 | |
1496 | template <class ELFT, class RelTy> |
1497 | void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) { |
1498 | const RelTy &rel = *i; |
1499 | uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL); |
1500 | Symbol &sym = sec->getFile<ELFT>()->getSymbol(symIndex); |
1501 | RelType type; |
1502 | if constexpr (ELFT::Is64Bits || RelTy::IsCrel) { |
1503 | type = rel.getType(ctx.arg.isMips64EL); |
1504 | ++i; |
1505 | } else { |
1506 | // CREL is unsupported for MIPS N32. |
1507 | if (ctx.arg.mipsN32Abi) { |
1508 | type = getMipsN32RelType(i); |
1509 | } else { |
1510 | type = rel.getType(ctx.arg.isMips64EL); |
1511 | ++i; |
1512 | } |
1513 | } |
1514 | // Get an offset in an output section this relocation is applied to. |
1515 | uint64_t offset = getter.get(ctx, off: rel.r_offset); |
1516 | if (offset == uint64_t(-1)) |
1517 | return; |
1518 | |
1519 | RelExpr expr = |
1520 | ctx.target->getRelExpr(type, s: sym, loc: sec->content().data() + offset); |
1521 | int64_t addend = RelTy::HasAddend |
1522 | ? getAddend<ELFT>(rel) |
1523 | : ctx.target->getImplicitAddend( |
1524 | buf: sec->content().data() + rel.r_offset, type); |
1525 | if (LLVM_UNLIKELY(ctx.arg.emachine == EM_MIPS)) |
1526 | addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal()); |
1527 | else if (ctx.arg.emachine == EM_PPC64 && ctx.arg.isPic && type == R_PPC64_TOC) |
1528 | addend += getPPC64TocBase(ctx); |
1529 | |
1530 | // Ignore R_*_NONE and other marker relocations. |
1531 | if (expr == R_NONE) |
1532 | return; |
1533 | |
1534 | // Error if the target symbol is undefined. Symbol index 0 may be used by |
1535 | // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them. |
1536 | if (sym.isUndefined() && symIndex != 0 && |
1537 | maybeReportUndefined(ctx, sym&: cast<Undefined>(Val&: sym), sec&: *sec, offset)) |
1538 | return; |
1539 | |
1540 | if (ctx.arg.emachine == EM_PPC64) { |
1541 | // We can separate the small code model relocations into 2 categories: |
1542 | // 1) Those that access the compiler generated .toc sections. |
1543 | // 2) Those that access the linker allocated got entries. |
1544 | // lld allocates got entries to symbols on demand. Since we don't try to |
1545 | // sort the got entries in any way, we don't have to track which objects |
1546 | // have got-based small code model relocs. The .toc sections get placed |
1547 | // after the end of the linker allocated .got section and we do sort those |
1548 | // so sections addressed with small code model relocations come first. |
1549 | if (type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS) |
1550 | sec->file->ppc64SmallCodeModelTocRelocs = true; |
1551 | |
1552 | // Record the TOC entry (.toc + addend) as not relaxable. See the comment in |
1553 | // InputSectionBase::relocateAlloc(). |
1554 | if (type == R_PPC64_TOC16_LO && sym.isSection() && isa<Defined>(Val: sym) && |
1555 | cast<Defined>(Val&: sym).section->name == ".toc") |
1556 | ctx.ppc64noTocRelax.insert(V: {&sym, addend}); |
1557 | |
1558 | if ((type == R_PPC64_TLSGD && expr == R_TLSDESC_CALL) || |
1559 | (type == R_PPC64_TLSLD && expr == R_TLSLD_HINT)) { |
1560 | // Skip the error check for CREL, which does not set `end`. |
1561 | if constexpr (!RelTy::IsCrel) { |
1562 | if (i == end) { |
1563 | auto diag = Err(ctx); |
1564 | diag << "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last " |
1565 | "relocation"; |
1566 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
1567 | return; |
1568 | } |
1569 | } |
1570 | |
1571 | // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC |
1572 | // case, so we can discern it later from the toc-case. |
1573 | if (i->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC) |
1574 | ++offset; |
1575 | } |
1576 | } |
1577 | |
1578 | // If the relocation does not emit a GOT or GOTPLT entry but its computation |
1579 | // uses their addresses, we need GOT or GOTPLT to be created. |
1580 | // |
1581 | // The 5 types that relative GOTPLT are all x86 and x86-64 specific. |
1582 | if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT, |
1583 | R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) { |
1584 | ctx.in.gotPlt->hasGotPltOffRel.store(i: true, m: std::memory_order_relaxed); |
1585 | } else if (oneof<R_GOTONLY_PC, R_GOTREL, RE_PPC32_PLTREL, RE_PPC64_TOCBASE, |
1586 | RE_PPC64_RELAX_TOC>(expr)) { |
1587 | ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed); |
1588 | } |
1589 | |
1590 | // Process TLS relocations, including TLS optimizations. Note that |
1591 | // R_TPREL and R_TPREL_NEG relocations are resolved in processAux. |
1592 | // |
1593 | // Some RISCV TLSDESC relocations reference a local NOTYPE symbol, |
1594 | // but we need to process them in handleTlsRelocation. |
1595 | if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) { |
1596 | if (unsigned processed = |
1597 | handleTlsRelocation(expr, type, offset, sym, addend)) { |
1598 | i += processed - 1; |
1599 | return; |
1600 | } |
1601 | } |
1602 | |
1603 | processAux(expr, type, offset, sym, addend); |
1604 | } |
1605 | |
1606 | // R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for |
1607 | // General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is |
1608 | // found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the |
1609 | // instructions are generated by very old IBM XL compilers. Work around the |
1610 | // issue by disabling GD/LD to IE/LE relaxation. |
1611 | template <class RelTy> |
1612 | static void checkPPC64TLSRelax(InputSectionBase &sec, Relocs<RelTy> rels) { |
1613 | // Skip if sec is synthetic (sec.file is null) or if sec has been marked. |
1614 | if (!sec.file || sec.file->ppc64DisableTLSRelax) |
1615 | return; |
1616 | bool hasGDLD = false; |
1617 | for (const RelTy &rel : rels) { |
1618 | RelType type = rel.getType(false); |
1619 | switch (type) { |
1620 | case R_PPC64_TLSGD: |
1621 | case R_PPC64_TLSLD: |
1622 | return; // Found a marker |
1623 | case R_PPC64_GOT_TLSGD16: |
1624 | case R_PPC64_GOT_TLSGD16_HA: |
1625 | case R_PPC64_GOT_TLSGD16_HI: |
1626 | case R_PPC64_GOT_TLSGD16_LO: |
1627 | case R_PPC64_GOT_TLSLD16: |
1628 | case R_PPC64_GOT_TLSLD16_HA: |
1629 | case R_PPC64_GOT_TLSLD16_HI: |
1630 | case R_PPC64_GOT_TLSLD16_LO: |
1631 | hasGDLD = true; |
1632 | break; |
1633 | } |
1634 | } |
1635 | if (hasGDLD) { |
1636 | sec.file->ppc64DisableTLSRelax = true; |
1637 | Warn(ctx&: sec.file->ctx) |
1638 | << sec.file |
1639 | << ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations " |
1640 | "without " |
1641 | "R_PPC64_TLSGD/R_PPC64_TLSLD relocations"; |
1642 | } |
1643 | } |
1644 | |
1645 | template <class ELFT, class RelTy> |
1646 | void RelocationScanner::scan(Relocs<RelTy> rels) { |
1647 | // Not all relocations end up in Sec->Relocations, but a lot do. |
1648 | sec->relocations.reserve(N: rels.size()); |
1649 | |
1650 | if (ctx.arg.emachine == EM_PPC64) |
1651 | checkPPC64TLSRelax<RelTy>(*sec, rels); |
1652 | |
1653 | // For EhInputSection, OffsetGetter expects the relocations to be sorted by |
1654 | // r_offset. In rare cases (.eh_frame pieces are reordered by a linker |
1655 | // script), the relocations may be unordered. |
1656 | // On SystemZ, all sections need to be sorted by r_offset, to allow TLS |
1657 | // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip. |
1658 | SmallVector<RelTy, 0> storage; |
1659 | if (isa<EhInputSection>(Val: sec) || ctx.arg.emachine == EM_S390) |
1660 | rels = sortRels(rels, storage); |
1661 | |
1662 | if constexpr (RelTy::IsCrel) { |
1663 | for (auto i = rels.begin(); i != rels.end();) |
1664 | scanOne<ELFT, RelTy>(i); |
1665 | } else { |
1666 | // The non-CREL code path has additional check for PPC64 TLS. |
1667 | end = static_cast<const void *>(rels.end()); |
1668 | for (auto i = rels.begin(); i != end;) |
1669 | scanOne<ELFT, RelTy>(i); |
1670 | } |
1671 | |
1672 | // Sort relocations by offset for more efficient searching for |
1673 | // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64. |
1674 | if (ctx.arg.emachine == EM_RISCV || |
1675 | (ctx.arg.emachine == EM_PPC64 && sec->name == ".toc")) |
1676 | llvm::stable_sort(sec->relocs(), |
1677 | [](const Relocation &lhs, const Relocation &rhs) { |
1678 | return lhs.offset < rhs.offset; |
1679 | }); |
1680 | } |
1681 | |
1682 | template <class ELFT> |
1683 | void RelocationScanner::scanSection(InputSectionBase &s, bool isEH) { |
1684 | sec = &s; |
1685 | getter = OffsetGetter(s); |
1686 | const RelsOrRelas<ELFT> rels = s.template relsOrRelas<ELFT>(!isEH); |
1687 | if (rels.areRelocsCrel()) |
1688 | scan<ELFT>(rels.crels); |
1689 | else if (rels.areRelocsRel()) |
1690 | scan<ELFT>(rels.rels); |
1691 | else |
1692 | scan<ELFT>(rels.relas); |
1693 | } |
1694 | |
1695 | template <class ELFT> void elf::scanRelocations(Ctx &ctx) { |
1696 | // Scan all relocations. Each relocation goes through a series of tests to |
1697 | // determine if it needs special treatment, such as creating GOT, PLT, |
1698 | // copy relocations, etc. Note that relocations for non-alloc sections are |
1699 | // directly processed by InputSection::relocateNonAlloc. |
1700 | |
1701 | // Deterministic parallellism needs sorting relocations which is unsuitable |
1702 | // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable |
1703 | // for parallelism. |
1704 | bool serial = !ctx.arg.zCombreloc || ctx.arg.emachine == EM_MIPS || |
1705 | ctx.arg.emachine == EM_PPC64; |
1706 | parallel::TaskGroup tg; |
1707 | auto outerFn = [&]() { |
1708 | for (ELFFileBase *f : ctx.objectFiles) { |
1709 | auto fn = [f, &ctx]() { |
1710 | RelocationScanner scanner(ctx); |
1711 | for (InputSectionBase *s : f->getSections()) { |
1712 | if (s && s->kind() == SectionBase::Regular && s->isLive() && |
1713 | (s->flags & SHF_ALLOC) && |
1714 | !(s->type == SHT_ARM_EXIDX && ctx.arg.emachine == EM_ARM)) |
1715 | scanner.template scanSection<ELFT>(*s); |
1716 | } |
1717 | }; |
1718 | if (serial) |
1719 | fn(); |
1720 | else |
1721 | tg.spawn(f: fn); |
1722 | } |
1723 | auto scanEH = [&] { |
1724 | RelocationScanner scanner(ctx); |
1725 | for (Partition &part : ctx.partitions) { |
1726 | for (EhInputSection *sec : part.ehFrame->sections) |
1727 | scanner.template scanSection<ELFT>(*sec, /*isEH=*/true); |
1728 | if (part.armExidx && part.armExidx->isLive()) |
1729 | for (InputSection *sec : part.armExidx->exidxSections) |
1730 | if (sec->isLive()) |
1731 | scanner.template scanSection<ELFT>(*sec); |
1732 | } |
1733 | }; |
1734 | if (serial) |
1735 | scanEH(); |
1736 | else |
1737 | tg.spawn(f: scanEH); |
1738 | }; |
1739 | // If `serial` is true, call `spawn` to ensure that `scanner` runs in a thread |
1740 | // with valid getThreadIndex(). |
1741 | if (serial) |
1742 | tg.spawn(f: outerFn); |
1743 | else |
1744 | outerFn(); |
1745 | } |
1746 | |
1747 | RelocationBaseSection &elf::getIRelativeSection(Ctx &ctx) { |
1748 | // Prior to Android V, there was a bug that caused RELR relocations to be |
1749 | // applied after packed relocations. This meant that resolvers referenced by |
1750 | // IRELATIVE relocations in the packed relocation section would read |
1751 | // unrelocated globals with RELR relocations when |
1752 | // --pack-relative-relocs=android+relr is enabled. Work around this by placing |
1753 | // IRELATIVE in .rela.plt. |
1754 | return ctx.arg.androidPackDynRelocs ? *ctx.in.relaPlt |
1755 | : *ctx.mainPart->relaDyn; |
1756 | } |
1757 | |
1758 | static bool handleNonPreemptibleIfunc(Ctx &ctx, Symbol &sym, uint16_t flags) { |
1759 | // Handle a reference to a non-preemptible ifunc. These are special in a |
1760 | // few ways: |
1761 | // |
1762 | // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have |
1763 | // a fixed value. But assuming that all references to the ifunc are |
1764 | // GOT-generating or PLT-generating, the handling of an ifunc is |
1765 | // relatively straightforward. We create a PLT entry in Iplt, which is |
1766 | // usually at the end of .plt, which makes an indirect call using a |
1767 | // matching GOT entry in igotPlt, which is usually at the end of .got.plt. |
1768 | // The GOT entry is relocated using an IRELATIVE relocation in relaDyn, |
1769 | // which is usually at the end of .rela.dyn. |
1770 | // |
1771 | // - Despite the fact that an ifunc does not have a fixed value, compilers |
1772 | // that are not passed -fPIC will assume that they do, and will emit |
1773 | // direct (non-GOT-generating, non-PLT-generating) relocations to the |
1774 | // symbol. This means that if a direct relocation to the symbol is |
1775 | // seen, the linker must set a value for the symbol, and this value must |
1776 | // be consistent no matter what type of reference is made to the symbol. |
1777 | // This can be done by creating a PLT entry for the symbol in the way |
1778 | // described above and making it canonical, that is, making all references |
1779 | // point to the PLT entry instead of the resolver. In lld we also store |
1780 | // the address of the PLT entry in the dynamic symbol table, which means |
1781 | // that the symbol will also have the same value in other modules. |
1782 | // Because the value loaded from the GOT needs to be consistent with |
1783 | // the value computed using a direct relocation, a non-preemptible ifunc |
1784 | // may end up with two GOT entries, one in .got.plt that points to the |
1785 | // address returned by the resolver and is used only by the PLT entry, |
1786 | // and another in .got that points to the PLT entry and is used by |
1787 | // GOT-generating relocations. |
1788 | // |
1789 | // - The fact that these symbols do not have a fixed value makes them an |
1790 | // exception to the general rule that a statically linked executable does |
1791 | // not require any form of dynamic relocation. To handle these relocations |
1792 | // correctly, the IRELATIVE relocations are stored in an array which a |
1793 | // statically linked executable's startup code must enumerate using the |
1794 | // linker-defined symbols __rela?_iplt_{start,end}. |
1795 | if (!sym.isGnuIFunc() || sym.isPreemptible || ctx.arg.zIfuncNoplt) |
1796 | return false; |
1797 | // Skip unreferenced non-preemptible ifunc. |
1798 | if (!(flags & (NEEDS_GOT | NEEDS_PLT | HAS_DIRECT_RELOC))) |
1799 | return true; |
1800 | |
1801 | sym.isInIplt = true; |
1802 | |
1803 | // Create an Iplt and the associated IRELATIVE relocation pointing to the |
1804 | // original section/value pairs. For non-GOT non-PLT relocation case below, we |
1805 | // may alter section/value, so create a copy of the symbol to make |
1806 | // section/value fixed. |
1807 | auto *directSym = makeDefined(args&: cast<Defined>(Val&: sym)); |
1808 | directSym->allocateAux(ctx); |
1809 | auto &dyn = getIRelativeSection(ctx); |
1810 | addPltEntry(ctx, plt&: *ctx.in.iplt, gotPlt&: *ctx.in.igotPlt, rel&: dyn, type: ctx.target->iRelativeRel, |
1811 | sym&: *directSym); |
1812 | sym.allocateAux(ctx); |
1813 | ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx; |
1814 | |
1815 | if (flags & HAS_DIRECT_RELOC) { |
1816 | // Change the value to the IPLT and redirect all references to it. |
1817 | auto &d = cast<Defined>(Val&: sym); |
1818 | d.section = ctx.in.iplt.get(); |
1819 | d.value = d.getPltIdx(ctx) * ctx.target->ipltEntrySize; |
1820 | d.size = 0; |
1821 | // It's important to set the symbol type here so that dynamic loaders |
1822 | // don't try to call the PLT as if it were an ifunc resolver. |
1823 | d.type = STT_FUNC; |
1824 | |
1825 | if (flags & NEEDS_GOT) { |
1826 | assert(!(flags & NEEDS_GOT_AUTH) && |
1827 | "R_AARCH64_AUTH_IRELATIVE is not supported yet"); |
1828 | addGotEntry(ctx, sym); |
1829 | } |
1830 | } else if (flags & NEEDS_GOT) { |
1831 | // Redirect GOT accesses to point to the Igot. |
1832 | sym.gotInIgot = true; |
1833 | } |
1834 | return true; |
1835 | } |
1836 | |
1837 | void elf::postScanRelocations(Ctx &ctx) { |
1838 | auto fn = [&](Symbol &sym) { |
1839 | auto flags = sym.flags.load(m: std::memory_order_relaxed); |
1840 | if (handleNonPreemptibleIfunc(ctx, sym, flags)) |
1841 | return; |
1842 | |
1843 | if (sym.isTagged() && sym.isDefined()) |
1844 | ctx.mainPart->memtagGlobalDescriptors->addSymbol(sym); |
1845 | |
1846 | if (!sym.needsDynReloc()) |
1847 | return; |
1848 | sym.allocateAux(ctx); |
1849 | |
1850 | if (flags & NEEDS_GOT) { |
1851 | if ((flags & NEEDS_GOT_AUTH) && (flags & NEEDS_GOT_NONAUTH)) { |
1852 | auto diag = Err(ctx); |
1853 | diag << "both AUTH and non-AUTH GOT entries for '"<< sym.getName() |
1854 | << "' requested, but only one type of GOT entry per symbol is " |
1855 | "supported"; |
1856 | return; |
1857 | } |
1858 | if (flags & NEEDS_GOT_AUTH) |
1859 | addGotAuthEntry(ctx, sym); |
1860 | else |
1861 | addGotEntry(ctx, sym); |
1862 | } |
1863 | if (flags & NEEDS_PLT) |
1864 | addPltEntry(ctx, plt&: *ctx.in.plt, gotPlt&: *ctx.in.gotPlt, rel&: *ctx.in.relaPlt, |
1865 | type: ctx.target->pltRel, sym); |
1866 | if (flags & NEEDS_COPY) { |
1867 | if (sym.isObject()) { |
1868 | invokeELFT(addCopyRelSymbol, ctx, cast<SharedSymbol>(sym)); |
1869 | // NEEDS_COPY is cleared for sym and its aliases so that in |
1870 | // later iterations aliases won't cause redundant copies. |
1871 | assert(!sym.hasFlag(NEEDS_COPY)); |
1872 | } else { |
1873 | assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT)); |
1874 | if (!sym.isDefined()) { |
1875 | replaceWithDefined(ctx, sym, sec&: *ctx.in.plt, |
1876 | value: ctx.target->pltHeaderSize + |
1877 | ctx.target->pltEntrySize * sym.getPltIdx(ctx), |
1878 | size: 0); |
1879 | sym.setFlags(NEEDS_COPY); |
1880 | if (ctx.arg.emachine == EM_PPC) { |
1881 | // PPC32 canonical PLT entries are at the beginning of .glink |
1882 | cast<Defined>(Val&: sym).value = ctx.in.plt->headerSize; |
1883 | ctx.in.plt->headerSize += 16; |
1884 | cast<PPC32GlinkSection>(Val&: *ctx.in.plt).canonical_plts.push_back(Elt: &sym); |
1885 | } |
1886 | } |
1887 | } |
1888 | } |
1889 | |
1890 | if (!sym.isTls()) |
1891 | return; |
1892 | bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared; |
1893 | GotSection *got = ctx.in.got.get(); |
1894 | |
1895 | if (flags & NEEDS_TLSDESC) { |
1896 | if ((flags & NEEDS_TLSDESC_AUTH) && (flags & NEEDS_TLSDESC_NONAUTH)) { |
1897 | Err(ctx) |
1898 | << "both AUTH and non-AUTH TLSDESC entries for '"<< sym.getName() |
1899 | << "' requested, but only one type of TLSDESC entry per symbol is " |
1900 | "supported"; |
1901 | return; |
1902 | } |
1903 | got->addTlsDescEntry(sym); |
1904 | RelType tlsDescRel = ctx.target->tlsDescRel; |
1905 | if (flags & NEEDS_TLSDESC_AUTH) { |
1906 | got->addTlsDescAuthEntry(); |
1907 | tlsDescRel = ELF::R_AARCH64_AUTH_TLSDESC; |
1908 | } |
1909 | ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( |
1910 | dynType: tlsDescRel, isec&: *got, offsetInSec: got->getTlsDescOffset(sym), sym, addendRelType: tlsDescRel); |
1911 | } |
1912 | if (flags & NEEDS_TLSGD) { |
1913 | got->addDynTlsEntry(sym); |
1914 | uint64_t off = got->getGlobalDynOffset(b: sym); |
1915 | if (isLocalInExecutable) |
1916 | // Write one to the GOT slot. |
1917 | got->addConstant(r: {.expr: R_ADDEND, .type: ctx.target->symbolicRel, .offset: off, .addend: 1, .sym: &sym}); |
1918 | else |
1919 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsModuleIndexRel, |
1920 | isec&: *got, offsetInSec: off, sym); |
1921 | |
1922 | // If the symbol is preemptible we need the dynamic linker to write |
1923 | // the offset too. |
1924 | uint64_t offsetOff = off + ctx.arg.wordsize; |
1925 | if (sym.isPreemptible) |
1926 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsOffsetRel, isec&: *got, |
1927 | offsetInSec: offsetOff, sym); |
1928 | else |
1929 | got->addConstant(r: {.expr: R_ABS, .type: ctx.target->tlsOffsetRel, .offset: offsetOff, .addend: 0, .sym: &sym}); |
1930 | } |
1931 | if (flags & NEEDS_TLSGD_TO_IE) { |
1932 | got->addEntry(sym); |
1933 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsGotRel, isec&: *got, |
1934 | offsetInSec: sym.getGotOffset(ctx), sym); |
1935 | } |
1936 | if (flags & NEEDS_GOT_DTPREL) { |
1937 | got->addEntry(sym); |
1938 | got->addConstant( |
1939 | r: {.expr: R_ABS, .type: ctx.target->tlsOffsetRel, .offset: sym.getGotOffset(ctx), .addend: 0, .sym: &sym}); |
1940 | } |
1941 | |
1942 | if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE)) |
1943 | addTpOffsetGotEntry(ctx, sym); |
1944 | }; |
1945 | |
1946 | GotSection *got = ctx.in.got.get(); |
1947 | if (ctx.needsTlsLd.load(m: std::memory_order_relaxed) && got->addTlsIndex()) { |
1948 | static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0); |
1949 | if (ctx.arg.shared) |
1950 | ctx.mainPart->relaDyn->addReloc( |
1951 | reloc: {ctx.target->tlsModuleIndexRel, got, got->getTlsIndexOff()}); |
1952 | else |
1953 | got->addConstant(r: {.expr: R_ADDEND, .type: ctx.target->symbolicRel, |
1954 | .offset: got->getTlsIndexOff(), .addend: 1, .sym: &dummy}); |
1955 | } |
1956 | |
1957 | assert(ctx.symAux.size() == 1); |
1958 | for (Symbol *sym : ctx.symtab->getSymbols()) |
1959 | fn(*sym); |
1960 | |
1961 | // Local symbols may need the aforementioned non-preemptible ifunc and GOT |
1962 | // handling. They don't need regular PLT. |
1963 | for (ELFFileBase *file : ctx.objectFiles) |
1964 | for (Symbol *sym : file->getLocalSymbols()) |
1965 | fn(*sym); |
1966 | } |
1967 | |
1968 | static bool mergeCmp(const InputSection *a, const InputSection *b) { |
1969 | // std::merge requires a strict weak ordering. |
1970 | if (a->outSecOff < b->outSecOff) |
1971 | return true; |
1972 | |
1973 | // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection. |
1974 | if (a->outSecOff == b->outSecOff && a != b) { |
1975 | auto *ta = dyn_cast<ThunkSection>(Val: a); |
1976 | auto *tb = dyn_cast<ThunkSection>(Val: b); |
1977 | |
1978 | // Check if Thunk is immediately before any specific Target |
1979 | // InputSection for example Mips LA25 Thunks. |
1980 | if (ta && ta->getTargetInputSection() == b) |
1981 | return true; |
1982 | |
1983 | // Place Thunk Sections without specific targets before |
1984 | // non-Thunk Sections. |
1985 | if (ta && !tb && !ta->getTargetInputSection()) |
1986 | return true; |
1987 | } |
1988 | |
1989 | return false; |
1990 | } |
1991 | |
1992 | // Call Fn on every executable InputSection accessed via the linker script |
1993 | // InputSectionDescription::Sections. |
1994 | static void forEachInputSectionDescription( |
1995 | ArrayRef<OutputSection *> outputSections, |
1996 | llvm::function_ref<void(OutputSection *, InputSectionDescription *)> fn) { |
1997 | for (OutputSection *os : outputSections) { |
1998 | if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR)) |
1999 | continue; |
2000 | for (SectionCommand *bc : os->commands) |
2001 | if (auto *isd = dyn_cast<InputSectionDescription>(Val: bc)) |
2002 | fn(os, isd); |
2003 | } |
2004 | } |
2005 | |
2006 | ThunkCreator::ThunkCreator(Ctx &ctx) : ctx(ctx) {} |
2007 | |
2008 | ThunkCreator::~ThunkCreator() {} |
2009 | |
2010 | // Thunk Implementation |
2011 | // |
2012 | // Thunks (sometimes called stubs, veneers or branch islands) are small pieces |
2013 | // of code that the linker inserts inbetween a caller and a callee. The thunks |
2014 | // are added at link time rather than compile time as the decision on whether |
2015 | // a thunk is needed, such as the caller and callee being out of range, can only |
2016 | // be made at link time. |
2017 | // |
2018 | // It is straightforward to tell given the current state of the program when a |
2019 | // thunk is needed for a particular call. The more difficult part is that |
2020 | // the thunk needs to be placed in the program such that the caller can reach |
2021 | // the thunk and the thunk can reach the callee; furthermore, adding thunks to |
2022 | // the program alters addresses, which can mean more thunks etc. |
2023 | // |
2024 | // In lld we have a synthetic ThunkSection that can hold many Thunks. |
2025 | // The decision to have a ThunkSection act as a container means that we can |
2026 | // more easily handle the most common case of a single block of contiguous |
2027 | // Thunks by inserting just a single ThunkSection. |
2028 | // |
2029 | // The implementation of Thunks in lld is split across these areas |
2030 | // Relocations.cpp : Framework for creating and placing thunks |
2031 | // Thunks.cpp : The code generated for each supported thunk |
2032 | // Target.cpp : Target specific hooks that the framework uses to decide when |
2033 | // a thunk is used |
2034 | // Synthetic.cpp : Implementation of ThunkSection |
2035 | // Writer.cpp : Iteratively call framework until no more Thunks added |
2036 | // |
2037 | // Thunk placement requirements: |
2038 | // Mips LA25 thunks. These must be placed immediately before the callee section |
2039 | // We can assume that the caller is in range of the Thunk. These are modelled |
2040 | // by Thunks that return the section they must precede with |
2041 | // getTargetInputSection(). |
2042 | // |
2043 | // ARM interworking and range extension thunks. These thunks must be placed |
2044 | // within range of the caller. All implemented ARM thunks can always reach the |
2045 | // callee as they use an indirect jump via a register that has no range |
2046 | // restrictions. |
2047 | // |
2048 | // Thunk placement algorithm: |
2049 | // For Mips LA25 ThunkSections; the placement is explicit, it has to be before |
2050 | // getTargetInputSection(). |
2051 | // |
2052 | // For thunks that must be placed within range of the caller there are many |
2053 | // possible choices given that the maximum range from the caller is usually |
2054 | // much larger than the average InputSection size. Desirable properties include: |
2055 | // - Maximize reuse of thunks by multiple callers |
2056 | // - Minimize number of ThunkSections to simplify insertion |
2057 | // - Handle impact of already added Thunks on addresses |
2058 | // - Simple to understand and implement |
2059 | // |
2060 | // In lld for the first pass, we pre-create one or more ThunkSections per |
2061 | // InputSectionDescription at Target specific intervals. A ThunkSection is |
2062 | // placed so that the estimated end of the ThunkSection is within range of the |
2063 | // start of the InputSectionDescription or the previous ThunkSection. For |
2064 | // example: |
2065 | // InputSectionDescription |
2066 | // Section 0 |
2067 | // ... |
2068 | // Section N |
2069 | // ThunkSection 0 |
2070 | // Section N + 1 |
2071 | // ... |
2072 | // Section N + K |
2073 | // Thunk Section 1 |
2074 | // |
2075 | // The intention is that we can add a Thunk to a ThunkSection that is well |
2076 | // spaced enough to service a number of callers without having to do a lot |
2077 | // of work. An important principle is that it is not an error if a Thunk cannot |
2078 | // be placed in a pre-created ThunkSection; when this happens we create a new |
2079 | // ThunkSection placed next to the caller. This allows us to handle the vast |
2080 | // majority of thunks simply, but also handle rare cases where the branch range |
2081 | // is smaller than the target specific spacing. |
2082 | // |
2083 | // The algorithm is expected to create all the thunks that are needed in a |
2084 | // single pass, with a small number of programs needing a second pass due to |
2085 | // the insertion of thunks in the first pass increasing the offset between |
2086 | // callers and callees that were only just in range. |
2087 | // |
2088 | // A consequence of allowing new ThunkSections to be created outside of the |
2089 | // pre-created ThunkSections is that in rare cases calls to Thunks that were in |
2090 | // range in pass K, are out of range in some pass > K due to the insertion of |
2091 | // more Thunks in between the caller and callee. When this happens we retarget |
2092 | // the relocation back to the original target and create another Thunk. |
2093 | |
2094 | // Remove ThunkSections that are empty, this should only be the initial set |
2095 | // precreated on pass 0. |
2096 | |
2097 | // Insert the Thunks for OutputSection OS into their designated place |
2098 | // in the Sections vector, and recalculate the InputSection output section |
2099 | // offsets. |
2100 | // This may invalidate any output section offsets stored outside of InputSection |
2101 | void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) { |
2102 | forEachInputSectionDescription( |
2103 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
2104 | if (isd->thunkSections.empty()) |
2105 | return; |
2106 | |
2107 | // Remove any zero sized precreated Thunks. |
2108 | llvm::erase_if(C&: isd->thunkSections, |
2109 | P: [](const std::pair<ThunkSection *, uint32_t> &ts) { |
2110 | return ts.first->getSize() == 0; |
2111 | }); |
2112 | |
2113 | // ISD->ThunkSections contains all created ThunkSections, including |
2114 | // those inserted in previous passes. Extract the Thunks created this |
2115 | // pass and order them in ascending outSecOff. |
2116 | std::vector<ThunkSection *> newThunks; |
2117 | for (std::pair<ThunkSection *, uint32_t> ts : isd->thunkSections) |
2118 | if (ts.second == pass) |
2119 | newThunks.push_back(x: ts.first); |
2120 | llvm::stable_sort(Range&: newThunks, |
2121 | C: [](const ThunkSection *a, const ThunkSection *b) { |
2122 | return a->outSecOff < b->outSecOff; |
2123 | }); |
2124 | |
2125 | // Merge sorted vectors of Thunks and InputSections by outSecOff |
2126 | SmallVector<InputSection *, 0> tmp; |
2127 | tmp.reserve(N: isd->sections.size() + newThunks.size()); |
2128 | |
2129 | std::merge(first1: isd->sections.begin(), last1: isd->sections.end(), |
2130 | first2: newThunks.begin(), last2: newThunks.end(), result: std::back_inserter(x&: tmp), |
2131 | comp: mergeCmp); |
2132 | |
2133 | isd->sections = std::move(tmp); |
2134 | }); |
2135 | } |
2136 | |
2137 | static int64_t getPCBias(Ctx &ctx, RelType type) { |
2138 | if (ctx.arg.emachine != EM_ARM) |
2139 | return 0; |
2140 | switch (type) { |
2141 | case R_ARM_THM_JUMP19: |
2142 | case R_ARM_THM_JUMP24: |
2143 | case R_ARM_THM_CALL: |
2144 | return 4; |
2145 | default: |
2146 | return 8; |
2147 | } |
2148 | } |
2149 | |
2150 | // Find or create a ThunkSection within the InputSectionDescription (ISD) that |
2151 | // is in range of Src. An ISD maps to a range of InputSections described by a |
2152 | // linker script section pattern such as { .text .text.* }. |
2153 | ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os, |
2154 | InputSection *isec, |
2155 | InputSectionDescription *isd, |
2156 | const Relocation &rel, |
2157 | uint64_t src) { |
2158 | // See the comment in getThunk for -pcBias below. |
2159 | const int64_t pcBias = getPCBias(ctx, type: rel.type); |
2160 | for (std::pair<ThunkSection *, uint32_t> tp : isd->thunkSections) { |
2161 | ThunkSection *ts = tp.first; |
2162 | uint64_t tsBase = os->addr + ts->outSecOff - pcBias; |
2163 | uint64_t tsLimit = tsBase + ts->getSize(); |
2164 | if (ctx.target->inBranchRange(type: rel.type, src, |
2165 | dst: (src > tsLimit) ? tsBase : tsLimit)) |
2166 | return ts; |
2167 | } |
2168 | |
2169 | // No suitable ThunkSection exists. This can happen when there is a branch |
2170 | // with lower range than the ThunkSection spacing or when there are too |
2171 | // many Thunks. Create a new ThunkSection as close to the InputSection as |
2172 | // possible. Error if InputSection is so large we cannot place ThunkSection |
2173 | // anywhere in Range. |
2174 | uint64_t thunkSecOff = isec->outSecOff; |
2175 | if (!ctx.target->inBranchRange(type: rel.type, src, |
2176 | dst: os->addr + thunkSecOff + rel.addend)) { |
2177 | thunkSecOff = isec->outSecOff + isec->getSize(); |
2178 | if (!ctx.target->inBranchRange(type: rel.type, src, |
2179 | dst: os->addr + thunkSecOff + rel.addend)) |
2180 | Fatal(ctx) << "InputSection too large for range extension thunk " |
2181 | << isec->getObjMsg(offset: src - (os->addr << isec->outSecOff)); |
2182 | } |
2183 | return addThunkSection(os, isd, off: thunkSecOff); |
2184 | } |
2185 | |
2186 | // Add a Thunk that needs to be placed in a ThunkSection that immediately |
2187 | // precedes its Target. |
2188 | ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) { |
2189 | ThunkSection *ts = thunkedSections.lookup(Val: isec); |
2190 | if (ts) |
2191 | return ts; |
2192 | |
2193 | // Find InputSectionRange within Target Output Section (TOS) that the |
2194 | // InputSection (IS) that we need to precede is in. |
2195 | OutputSection *tos = isec->getParent(); |
2196 | for (SectionCommand *bc : tos->commands) { |
2197 | auto *isd = dyn_cast<InputSectionDescription>(Val: bc); |
2198 | if (!isd || isd->sections.empty()) |
2199 | continue; |
2200 | |
2201 | InputSection *first = isd->sections.front(); |
2202 | InputSection *last = isd->sections.back(); |
2203 | |
2204 | if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff) |
2205 | continue; |
2206 | |
2207 | ts = addThunkSection(os: tos, isd, off: isec->outSecOff); |
2208 | thunkedSections[isec] = ts; |
2209 | return ts; |
2210 | } |
2211 | |
2212 | return nullptr; |
2213 | } |
2214 | |
2215 | // Create one or more ThunkSections per OS that can be used to place Thunks. |
2216 | // We attempt to place the ThunkSections using the following desirable |
2217 | // properties: |
2218 | // - Within range of the maximum number of callers |
2219 | // - Minimise the number of ThunkSections |
2220 | // |
2221 | // We follow a simple but conservative heuristic to place ThunkSections at |
2222 | // offsets that are multiples of a Target specific branch range. |
2223 | // For an InputSectionDescription that is smaller than the range, a single |
2224 | // ThunkSection at the end of the range will do. |
2225 | // |
2226 | // For an InputSectionDescription that is more than twice the size of the range, |
2227 | // we place the last ThunkSection at range bytes from the end of the |
2228 | // InputSectionDescription in order to increase the likelihood that the |
2229 | // distance from a thunk to its target will be sufficiently small to |
2230 | // allow for the creation of a short thunk. |
2231 | void ThunkCreator::createInitialThunkSections( |
2232 | ArrayRef<OutputSection *> outputSections) { |
2233 | uint32_t thunkSectionSpacing = ctx.target->getThunkSectionSpacing(); |
2234 | forEachInputSectionDescription( |
2235 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
2236 | if (isd->sections.empty()) |
2237 | return; |
2238 | |
2239 | uint32_t isdBegin = isd->sections.front()->outSecOff; |
2240 | uint32_t isdEnd = |
2241 | isd->sections.back()->outSecOff + isd->sections.back()->getSize(); |
2242 | uint32_t lastThunkLowerBound = -1; |
2243 | if (isdEnd - isdBegin > thunkSectionSpacing * 2) |
2244 | lastThunkLowerBound = isdEnd - thunkSectionSpacing; |
2245 | |
2246 | uint32_t isecLimit; |
2247 | uint32_t prevIsecLimit = isdBegin; |
2248 | uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing; |
2249 | |
2250 | for (const InputSection *isec : isd->sections) { |
2251 | isecLimit = isec->outSecOff + isec->getSize(); |
2252 | if (isecLimit > thunkUpperBound) { |
2253 | addThunkSection(os, isd, off: prevIsecLimit); |
2254 | thunkUpperBound = prevIsecLimit + thunkSectionSpacing; |
2255 | } |
2256 | if (isecLimit > lastThunkLowerBound) |
2257 | break; |
2258 | prevIsecLimit = isecLimit; |
2259 | } |
2260 | addThunkSection(os, isd, off: isecLimit); |
2261 | }); |
2262 | } |
2263 | |
2264 | ThunkSection *ThunkCreator::addThunkSection(OutputSection *os, |
2265 | InputSectionDescription *isd, |
2266 | uint64_t off) { |
2267 | auto *ts = make<ThunkSection>(args&: ctx, args&: os, args&: off); |
2268 | ts->partition = os->partition; |
2269 | if ((ctx.arg.fixCortexA53Errata843419 || ctx.arg.fixCortexA8) && |
2270 | !isd->sections.empty()) { |
2271 | // The errata fixes are sensitive to addresses modulo 4 KiB. When we add |
2272 | // thunks we disturb the base addresses of sections placed after the thunks |
2273 | // this makes patches we have generated redundant, and may cause us to |
2274 | // generate more patches as different instructions are now in sensitive |
2275 | // locations. When we generate more patches we may force more branches to |
2276 | // go out of range, causing more thunks to be generated. In pathological |
2277 | // cases this can cause the address dependent content pass not to converge. |
2278 | // We fix this by rounding up the size of the ThunkSection to 4KiB, this |
2279 | // limits the insertion of a ThunkSection on the addresses modulo 4 KiB, |
2280 | // which means that adding Thunks to the section does not invalidate |
2281 | // errata patches for following code. |
2282 | // Rounding up the size to 4KiB has consequences for code-size and can |
2283 | // trip up linker script defined assertions. For example the linux kernel |
2284 | // has an assertion that what LLD represents as an InputSectionDescription |
2285 | // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib. |
2286 | // We use the heuristic of rounding up the size when both of the following |
2287 | // conditions are true: |
2288 | // 1.) The OutputSection is larger than the ThunkSectionSpacing. This |
2289 | // accounts for the case where no single InputSectionDescription is |
2290 | // larger than the OutputSection size. This is conservative but simple. |
2291 | // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent |
2292 | // any assertion failures that an InputSectionDescription is < 4 KiB |
2293 | // in size. |
2294 | uint64_t isdSize = isd->sections.back()->outSecOff + |
2295 | isd->sections.back()->getSize() - |
2296 | isd->sections.front()->outSecOff; |
2297 | if (os->size > ctx.target->getThunkSectionSpacing() && isdSize > 4096) |
2298 | ts->roundUpSizeForErrata = true; |
2299 | } |
2300 | isd->thunkSections.push_back(Elt: {ts, pass}); |
2301 | return ts; |
2302 | } |
2303 | |
2304 | static bool isThunkSectionCompatible(InputSection *source, |
2305 | SectionBase *target) { |
2306 | // We can't reuse thunks in different loadable partitions because they might |
2307 | // not be loaded. But partition 1 (the main partition) will always be loaded. |
2308 | if (source->partition != target->partition) |
2309 | return target->partition == 1; |
2310 | return true; |
2311 | } |
2312 | |
2313 | std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec, |
2314 | Relocation &rel, uint64_t src) { |
2315 | SmallVector<std::unique_ptr<Thunk>, 0> *thunkVec = nullptr; |
2316 | // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled |
2317 | // out in the relocation addend. We compensate for the PC bias so that |
2318 | // an Arm and Thumb relocation to the same destination get the same keyAddend, |
2319 | // which is usually 0. |
2320 | const int64_t pcBias = getPCBias(ctx, type: rel.type); |
2321 | const int64_t keyAddend = rel.addend + pcBias; |
2322 | |
2323 | // We use a ((section, offset), addend) pair to find the thunk position if |
2324 | // possible so that we create only one thunk for aliased symbols or ICFed |
2325 | // sections. There may be multiple relocations sharing the same (section, |
2326 | // offset + addend) pair. We may revert the relocation back to its original |
2327 | // non-Thunk target, so we cannot fold offset + addend. |
2328 | if (auto *d = dyn_cast<Defined>(Val: rel.sym)) |
2329 | if (!d->isInPlt(ctx) && d->section) |
2330 | thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value}, |
2331 | keyAddend}]; |
2332 | if (!thunkVec) |
2333 | thunkVec = &thunkedSymbols[{rel.sym, keyAddend}]; |
2334 | |
2335 | // Check existing Thunks for Sym to see if they can be reused |
2336 | for (auto &t : *thunkVec) |
2337 | if (isThunkSectionCompatible(source: isec, target: t->getThunkTargetSym()->section) && |
2338 | t->isCompatibleWith(*isec, rel) && |
2339 | ctx.target->inBranchRange(type: rel.type, src, |
2340 | dst: t->getThunkTargetSym()->getVA(ctx, addend: -pcBias))) |
2341 | return std::make_pair(x: t.get(), y: false); |
2342 | |
2343 | // No existing compatible Thunk in range, create a new one |
2344 | thunkVec->push_back(Elt: addThunk(ctx, isec: *isec, rel)); |
2345 | return std::make_pair(x: thunkVec->back().get(), y: true); |
2346 | } |
2347 | |
2348 | std::pair<Thunk *, bool> ThunkCreator::getSyntheticLandingPad(Defined &d, |
2349 | int64_t a) { |
2350 | auto [it, isNew] = landingPadsBySectionAndAddend.try_emplace( |
2351 | Key: {{d.section, d.value}, a}, Args: nullptr); |
2352 | if (isNew) |
2353 | it->second = addLandingPadThunk(ctx, s&: d, a); |
2354 | return {it->second.get(), isNew}; |
2355 | } |
2356 | |
2357 | // Return true if the relocation target is an in range Thunk. |
2358 | // Return false if the relocation is not to a Thunk. If the relocation target |
2359 | // was originally to a Thunk, but is no longer in range we revert the |
2360 | // relocation back to its original non-Thunk target. |
2361 | bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) { |
2362 | if (Thunk *t = thunks.lookup(Val: rel.sym)) { |
2363 | if (ctx.target->inBranchRange(type: rel.type, src, |
2364 | dst: rel.sym->getVA(ctx, addend: rel.addend))) |
2365 | return true; |
2366 | rel.sym = &t->destination; |
2367 | rel.addend = t->addend; |
2368 | if (rel.sym->isInPlt(ctx)) |
2369 | rel.expr = toPlt(expr: rel.expr); |
2370 | } |
2371 | return false; |
2372 | } |
2373 | |
2374 | // When indirect branches are restricted, such as AArch64 BTI Thunks may need |
2375 | // to target a linker generated landing pad instead of the target. This needs |
2376 | // to be done once per pass as the need for a BTI thunk is dependent whether |
2377 | // a thunk is short or long. We iterate over all the thunks to make sure we |
2378 | // catch thunks that have been created but are no longer live. Non-live thunks |
2379 | // are not reachable via normalizeExistingThunk() but are still written. |
2380 | bool ThunkCreator::addSyntheticLandingPads() { |
2381 | bool addressesChanged = false; |
2382 | for (Thunk *t : allThunks) { |
2383 | if (!t->needsSyntheticLandingPad()) |
2384 | continue; |
2385 | Thunk *lpt; |
2386 | bool isNew; |
2387 | auto &dr = cast<Defined>(Val&: t->destination); |
2388 | std::tie(args&: lpt, args&: isNew) = getSyntheticLandingPad(d&: dr, a: t->addend); |
2389 | if (isNew) { |
2390 | addressesChanged = true; |
2391 | getISThunkSec(isec: cast<InputSection>(Val: dr.section))->addThunk(t: lpt); |
2392 | } |
2393 | t->landingPad = lpt->getThunkTargetSym(); |
2394 | } |
2395 | return addressesChanged; |
2396 | } |
2397 | |
2398 | // Process all relocations from the InputSections that have been assigned |
2399 | // to InputSectionDescriptions and redirect through Thunks if needed. The |
2400 | // function should be called iteratively until it returns false. |
2401 | // |
2402 | // PreConditions: |
2403 | // All InputSections that may need a Thunk are reachable from |
2404 | // OutputSectionCommands. |
2405 | // |
2406 | // All OutputSections have an address and all InputSections have an offset |
2407 | // within the OutputSection. |
2408 | // |
2409 | // The offsets between caller (relocation place) and callee |
2410 | // (relocation target) will not be modified outside of createThunks(). |
2411 | // |
2412 | // PostConditions: |
2413 | // If return value is true then ThunkSections have been inserted into |
2414 | // OutputSections. All relocations that needed a Thunk based on the information |
2415 | // available to createThunks() on entry have been redirected to a Thunk. Note |
2416 | // that adding Thunks changes offsets between caller and callee so more Thunks |
2417 | // may be required. |
2418 | // |
2419 | // If return value is false then no more Thunks are needed, and createThunks has |
2420 | // made no changes. If the target requires range extension thunks, currently |
2421 | // ARM, then any future change in offset between caller and callee risks a |
2422 | // relocation out of range error. |
2423 | bool ThunkCreator::createThunks(uint32_t pass, |
2424 | ArrayRef<OutputSection *> outputSections) { |
2425 | this->pass = pass; |
2426 | bool addressesChanged = false; |
2427 | |
2428 | if (pass == 0 && ctx.target->getThunkSectionSpacing()) |
2429 | createInitialThunkSections(outputSections); |
2430 | |
2431 | if (ctx.arg.emachine == EM_AARCH64) |
2432 | addressesChanged = addSyntheticLandingPads(); |
2433 | |
2434 | // Create all the Thunks and insert them into synthetic ThunkSections. The |
2435 | // ThunkSections are later inserted back into InputSectionDescriptions. |
2436 | // We separate the creation of ThunkSections from the insertion of the |
2437 | // ThunkSections as ThunkSections are not always inserted into the same |
2438 | // InputSectionDescription as the caller. |
2439 | forEachInputSectionDescription( |
2440 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
2441 | for (InputSection *isec : isd->sections) |
2442 | for (Relocation &rel : isec->relocs()) { |
2443 | uint64_t src = isec->getVA(offset: rel.offset); |
2444 | |
2445 | // If we are a relocation to an existing Thunk, check if it is |
2446 | // still in range. If not then Rel will be altered to point to its |
2447 | // original target so another Thunk can be generated. |
2448 | if (pass > 0 && normalizeExistingThunk(rel, src)) |
2449 | continue; |
2450 | |
2451 | if (!ctx.target->needsThunk(expr: rel.expr, relocType: rel.type, file: isec->file, branchAddr: src, |
2452 | s: *rel.sym, a: rel.addend)) |
2453 | continue; |
2454 | |
2455 | Thunk *t; |
2456 | bool isNew; |
2457 | std::tie(args&: t, args&: isNew) = getThunk(isec, rel, src); |
2458 | |
2459 | if (isNew) { |
2460 | // Find or create a ThunkSection for the new Thunk |
2461 | ThunkSection *ts; |
2462 | if (auto *tis = t->getTargetInputSection()) |
2463 | ts = getISThunkSec(isec: tis); |
2464 | else |
2465 | ts = getISDThunkSec(os, isec, isd, rel, src); |
2466 | ts->addThunk(t); |
2467 | thunks[t->getThunkTargetSym()] = t; |
2468 | allThunks.push_back(x: t); |
2469 | } |
2470 | |
2471 | // Redirect relocation to Thunk, we never go via the PLT to a Thunk |
2472 | rel.sym = t->getThunkTargetSym(); |
2473 | rel.expr = fromPlt(expr: rel.expr); |
2474 | |
2475 | // On AArch64 and PPC, a jump/call relocation may be encoded as |
2476 | // STT_SECTION + non-zero addend, clear the addend after |
2477 | // redirection. |
2478 | if (ctx.arg.emachine != EM_MIPS) |
2479 | rel.addend = -getPCBias(ctx, type: rel.type); |
2480 | } |
2481 | |
2482 | for (auto &p : isd->thunkSections) |
2483 | addressesChanged |= p.first->assignOffsets(); |
2484 | }); |
2485 | |
2486 | for (auto &p : thunkedSections) |
2487 | addressesChanged |= p.second->assignOffsets(); |
2488 | |
2489 | // Merge all created synthetic ThunkSections back into OutputSection |
2490 | mergeThunks(outputSections); |
2491 | return addressesChanged; |
2492 | } |
2493 | |
2494 | // The following aid in the conversion of call x@GDPLT to call __tls_get_addr |
2495 | // hexagonNeedsTLSSymbol scans for relocations would require a call to |
2496 | // __tls_get_addr. |
2497 | // hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr. |
2498 | bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) { |
2499 | bool needTlsSymbol = false; |
2500 | forEachInputSectionDescription( |
2501 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
2502 | for (InputSection *isec : isd->sections) |
2503 | for (Relocation &rel : isec->relocs()) |
2504 | if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { |
2505 | needTlsSymbol = true; |
2506 | return; |
2507 | } |
2508 | }); |
2509 | return needTlsSymbol; |
2510 | } |
2511 | |
2512 | void elf::hexagonTLSSymbolUpdate(Ctx &ctx) { |
2513 | Symbol *sym = ctx.symtab->find(name: "__tls_get_addr"); |
2514 | if (!sym) |
2515 | return; |
2516 | bool needEntry = true; |
2517 | forEachInputSectionDescription( |
2518 | outputSections: ctx.outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
2519 | for (InputSection *isec : isd->sections) |
2520 | for (Relocation &rel : isec->relocs()) |
2521 | if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { |
2522 | if (needEntry) { |
2523 | sym->allocateAux(ctx); |
2524 | addPltEntry(ctx, plt&: *ctx.in.plt, gotPlt&: *ctx.in.gotPlt, rel&: *ctx.in.relaPlt, |
2525 | type: ctx.target->pltRel, sym&: *sym); |
2526 | needEntry = false; |
2527 | } |
2528 | rel.sym = sym; |
2529 | } |
2530 | }); |
2531 | } |
2532 | |
2533 | static bool matchesRefTo(const NoCrossRefCommand &cmd, StringRef osec) { |
2534 | if (cmd.toFirst) |
2535 | return cmd.outputSections[0] == osec; |
2536 | return llvm::is_contained(Range: cmd.outputSections, Element: osec); |
2537 | } |
2538 | |
2539 | template <class ELFT, class Rels> |
2540 | static void scanCrossRefs(Ctx &ctx, const NoCrossRefCommand &cmd, |
2541 | OutputSection *osec, InputSection *sec, Rels rels) { |
2542 | for (const auto &r : rels) { |
2543 | Symbol &sym = sec->file->getSymbol(symbolIndex: r.getSymbol(ctx.arg.isMips64EL)); |
2544 | // A legal cross-reference is when the destination output section is |
2545 | // nullptr, osec for a self-reference, or a section that is described by the |
2546 | // NOCROSSREFS/NOCROSSREFS_TO command. |
2547 | auto *dstOsec = sym.getOutputSection(); |
2548 | if (!dstOsec || dstOsec == osec || !matchesRefTo(cmd, osec: dstOsec->name)) |
2549 | continue; |
2550 | |
2551 | std::string toSymName; |
2552 | if (!sym.isSection()) |
2553 | toSymName = toStr(ctx, sym); |
2554 | else if (auto *d = dyn_cast<Defined>(Val: &sym)) |
2555 | toSymName = d->section->name; |
2556 | Err(ctx) << sec->getLocation(offset: r.r_offset) |
2557 | << ": prohibited cross reference from '"<< osec->name << "' to '" |
2558 | << toSymName << "' in '"<< dstOsec->name << "'"; |
2559 | } |
2560 | } |
2561 | |
2562 | // For each output section described by at least one NOCROSSREFS(_TO) command, |
2563 | // scan relocations from its input sections for prohibited cross references. |
2564 | template <class ELFT> void elf::checkNoCrossRefs(Ctx &ctx) { |
2565 | for (OutputSection *osec : ctx.outputSections) { |
2566 | for (const NoCrossRefCommand &noxref : ctx.script->noCrossRefs) { |
2567 | if (!llvm::is_contained(Range: noxref.outputSections, Element: osec->name) || |
2568 | (noxref.toFirst && noxref.outputSections[0] == osec->name)) |
2569 | continue; |
2570 | for (SectionCommand *cmd : osec->commands) { |
2571 | auto *isd = dyn_cast<InputSectionDescription>(Val: cmd); |
2572 | if (!isd) |
2573 | continue; |
2574 | parallelForEach(isd->sections, [&](InputSection *sec) { |
2575 | invokeOnRelocs(*sec, scanCrossRefs<ELFT>, ctx, noxref, osec, sec); |
2576 | }); |
2577 | } |
2578 | } |
2579 | } |
2580 | } |
2581 | |
2582 | template void elf::scanRelocations<ELF32LE>(Ctx &); |
2583 | template void elf::scanRelocations<ELF32BE>(Ctx &); |
2584 | template void elf::scanRelocations<ELF64LE>(Ctx &); |
2585 | template void elf::scanRelocations<ELF64BE>(Ctx &); |
2586 | |
2587 | template void elf::checkNoCrossRefs<ELF32LE>(Ctx &); |
2588 | template void elf::checkNoCrossRefs<ELF32BE>(Ctx &); |
2589 | template void elf::checkNoCrossRefs<ELF64LE>(Ctx &); |
2590 | template void elf::checkNoCrossRefs<ELF64BE>(Ctx &); |
2591 |
Definitions
- printDefinedLocation
- printLocation
- reportRangeError
- reportRangeError
- buildMask
- buildMask
- oneof
- getMipsPairType
- isAbsolute
- isAbsoluteValue
- needsPlt
- needsGot
- isRelExpr
- toPlt
- fromPlt
- isReadOnly
- getSymbolsAt
- replaceWithDefined
- addCopyRelSymbol
- OffsetGetter
- OffsetGetter
- OffsetGetter
- get
- RelocationScanner
- RelocationScanner
- computeMipsAddend
- maybeReportDiscarded
- canSuggestExternCForCXX
- getAlternativeSpelling
- reportUndefinedSymbol
- reportUndefinedSymbols
- maybeReportUndefined
- getMipsN32RelType
- addRelativeReloc
- addPltEntry
- addGotEntry
- addGotAuthEntry
- addTpOffsetGotEntry
- canDefineSymbolInExecutable
- isStaticLinkTimeConstant
- processAux
- handleMipsTlsRelocation
- handleAArch64PAuthTlsRelocation
- handleTlsRelocation
- scanOne
- checkPPC64TLSRelax
- scan
- scanSection
- scanRelocations
- getIRelativeSection
- handleNonPreemptibleIfunc
- postScanRelocations
- mergeCmp
- forEachInputSectionDescription
- ThunkCreator
- ~ThunkCreator
- mergeThunks
- getPCBias
- getISDThunkSec
- getISThunkSec
- createInitialThunkSections
- addThunkSection
- isThunkSectionCompatible
- getThunk
- getSyntheticLandingPad
- normalizeExistingThunk
- addSyntheticLandingPads
- createThunks
- hexagonNeedsTLSSymbol
- hexagonTLSSymbolUpdate
- matchesRefTo
- scanCrossRefs
Improve your Profiling and Debugging skills
Find out more