1 | //===- Target.h -------------------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef LLD_ELF_TARGET_H |
10 | #define LLD_ELF_TARGET_H |
11 | |
12 | #include "Config.h" |
13 | #include "InputSection.h" |
14 | #include "lld/Common/ErrorHandler.h" |
15 | #include "llvm/ADT/StringExtras.h" |
16 | #include "llvm/Object/ELF.h" |
17 | #include "llvm/Object/ELFTypes.h" |
18 | #include "llvm/Support/Compiler.h" |
19 | #include "llvm/Support/MathExtras.h" |
20 | #include <array> |
21 | |
22 | namespace lld { |
23 | namespace elf { |
24 | class Defined; |
25 | class InputFile; |
26 | class Symbol; |
27 | |
28 | std::string toStr(Ctx &, RelType type); |
29 | |
30 | class TargetInfo { |
31 | public: |
32 | TargetInfo(Ctx &ctx) : ctx(ctx) {} |
33 | virtual uint32_t calcEFlags() const { return 0; } |
34 | virtual RelExpr getRelExpr(RelType type, const Symbol &s, |
35 | const uint8_t *loc) const = 0; |
36 | virtual RelType getDynRel(RelType type) const { return 0; } |
37 | virtual void (uint8_t *buf) const {} |
38 | virtual void (uint8_t *buf) const {} |
39 | virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {}; |
40 | virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {} |
41 | virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const; |
42 | virtual int getTlsGdRelaxSkip(RelType type) const { return 1; } |
43 | |
44 | // If lazy binding is supported, the first entry of the PLT has code |
45 | // to call the dynamic linker to resolve PLT entries the first time |
46 | // they are called. This function writes that code. |
47 | virtual void (uint8_t *buf) const {} |
48 | |
49 | virtual void writePlt(uint8_t *buf, const Symbol &sym, |
50 | uint64_t pltEntryAddr) const {} |
51 | virtual void writeIplt(uint8_t *buf, const Symbol &sym, |
52 | uint64_t pltEntryAddr) const { |
53 | // All but PPC32 and PPC64 use the same format for .plt and .iplt entries. |
54 | writePlt(buf, sym, pltEntryAddr); |
55 | } |
56 | virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {} |
57 | virtual void (InputSection &isec) const {} |
58 | virtual void addPltSymbols(InputSection &isec, uint64_t off) const {} |
59 | |
60 | // Returns true if a relocation only uses the low bits of a value such that |
61 | // all those bits are in the same page. For example, if the relocation |
62 | // only uses the low 12 bits in a system with 4k pages. If this is true, the |
63 | // bits will always have the same value at runtime and we don't have to emit |
64 | // a dynamic relocation. |
65 | virtual bool usesOnlyLowPageBits(RelType type) const; |
66 | |
67 | // Decide whether a Thunk is needed for the relocation from File |
68 | // targeting S. |
69 | virtual bool needsThunk(RelExpr expr, RelType relocType, |
70 | const InputFile *file, uint64_t branchAddr, |
71 | const Symbol &s, int64_t a) const; |
72 | |
73 | // On systems with range extensions we place collections of Thunks at |
74 | // regular spacings that enable the majority of branches reach the Thunks. |
75 | // a value of 0 means range extension thunks are not supported. |
76 | virtual uint32_t getThunkSectionSpacing() const { return 0; } |
77 | |
78 | // The function with a prologue starting at Loc was compiled with |
79 | // -fsplit-stack and it calls a function compiled without. Adjust the prologue |
80 | // to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks. |
81 | // The symbols st_other flags are needed on PowerPC64 for determining the |
82 | // offset to the split-stack prologue. |
83 | virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end, |
84 | uint8_t stOther) const; |
85 | |
86 | // Return true if we can reach dst from src with RelType type. |
87 | virtual bool inBranchRange(RelType type, uint64_t src, |
88 | uint64_t dst) const; |
89 | |
90 | virtual void relocate(uint8_t *loc, const Relocation &rel, |
91 | uint64_t val) const = 0; |
92 | void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const { |
93 | relocate(loc, rel: Relocation{.expr: R_NONE, .type: type, .offset: 0, .addend: 0, .sym: nullptr}, val); |
94 | } |
95 | virtual void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const; |
96 | |
97 | // Do a linker relaxation pass and return true if we changed something. |
98 | virtual bool relaxOnce(int pass) const { return false; } |
99 | // Do finalize relaxation after collecting relaxation infos. |
100 | virtual void finalizeRelax(int passes) const {} |
101 | |
102 | virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type, |
103 | JumpModType val) const {} |
104 | |
105 | virtual ~TargetInfo(); |
106 | |
107 | // This deletes a jump insn at the end of the section if it is a fall thru to |
108 | // the next section. Further, if there is a conditional jump and a direct |
109 | // jump consecutively, it tries to flip the conditional jump to convert the |
110 | // direct jump into a fall thru and delete it. Returns true if a jump |
111 | // instruction can be deleted. |
112 | virtual bool deleteFallThruJmpInsn(InputSection &is, InputFile *file, |
113 | InputSection *nextIS) const { |
114 | return false; |
115 | } |
116 | |
117 | Ctx &ctx; |
118 | unsigned defaultCommonPageSize = 4096; |
119 | unsigned defaultMaxPageSize = 4096; |
120 | |
121 | uint64_t getImageBase() const; |
122 | |
123 | // True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got. |
124 | bool gotBaseSymInGotPlt = false; |
125 | |
126 | static constexpr RelType noneRel = 0; |
127 | RelType copyRel = 0; |
128 | RelType gotRel = 0; |
129 | RelType pltRel = 0; |
130 | RelType relativeRel = 0; |
131 | RelType iRelativeRel = 0; |
132 | RelType symbolicRel = 0; |
133 | RelType tlsDescRel = 0; |
134 | RelType tlsGotRel = 0; |
135 | RelType tlsModuleIndexRel = 0; |
136 | RelType tlsOffsetRel = 0; |
137 | unsigned gotEntrySize = ctx.arg.wordsize; |
138 | unsigned pltEntrySize = 0; |
139 | unsigned = 0; |
140 | unsigned ipltEntrySize = 0; |
141 | |
142 | // At least on x86_64 positions 1 and 2 are used by the first plt entry |
143 | // to support lazy loading. |
144 | unsigned = 3; |
145 | |
146 | // On PPC ELF V2 abi, the first entry in the .got is the .TOC. |
147 | unsigned = 0; |
148 | |
149 | // On PPC ELF V2 abi, the dynamic section needs DT_PPC64_OPT (DT_LOPROC + 3) |
150 | // to be set to 0x2 if there can be multiple TOC's. Although we do not emit |
151 | // multiple TOC's, there can be a mix of TOC and NOTOC addressing which |
152 | // is functionally equivalent. |
153 | int ppc64DynamicSectionOpt = 0; |
154 | |
155 | bool needsThunks = false; |
156 | |
157 | // A 4-byte field corresponding to one or more trap instructions, used to pad |
158 | // executable OutputSections. |
159 | std::array<uint8_t, 4> trapInstr = {}; |
160 | |
161 | // Stores the NOP instructions of different sizes for the target and is used |
162 | // to pad sections that are relaxed. |
163 | std::optional<std::vector<std::vector<uint8_t>>> nopInstrs; |
164 | |
165 | // If a target needs to rewrite calls to __morestack to instead call |
166 | // __morestack_non_split when a split-stack enabled caller calls a |
167 | // non-split-stack callee this will return true. Otherwise returns false. |
168 | bool needsMoreStackNonSplit = true; |
169 | |
170 | virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const; |
171 | virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend, |
172 | const uint8_t *loc) const; |
173 | |
174 | protected: |
175 | // On FreeBSD x86_64 the first page cannot be mmaped. |
176 | // On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64 |
177 | // installs this is set to 65536, so the first 15 pages cannot be used. |
178 | // Given that, the smallest value that can be used in here is 0x10000. |
179 | uint64_t defaultImageBase = 0x10000; |
180 | }; |
181 | |
182 | void setAArch64TargetInfo(Ctx &); |
183 | void setAMDGPUTargetInfo(Ctx &); |
184 | void setARMTargetInfo(Ctx &); |
185 | void setAVRTargetInfo(Ctx &); |
186 | void setHexagonTargetInfo(Ctx &); |
187 | void setLoongArchTargetInfo(Ctx &); |
188 | void setMSP430TargetInfo(Ctx &); |
189 | void setMipsTargetInfo(Ctx &); |
190 | void setPPC64TargetInfo(Ctx &); |
191 | void setPPCTargetInfo(Ctx &); |
192 | void setRISCVTargetInfo(Ctx &); |
193 | void setSPARCV9TargetInfo(Ctx &); |
194 | void setSystemZTargetInfo(Ctx &); |
195 | void setX86TargetInfo(Ctx &); |
196 | void setX86_64TargetInfo(Ctx &); |
197 | |
198 | struct ErrorPlace { |
199 | InputSectionBase *isec; |
200 | std::string loc; |
201 | std::string srcLoc; |
202 | }; |
203 | |
204 | // Returns input section and corresponding source string for the given location. |
205 | ErrorPlace getErrorPlace(Ctx &ctx, const uint8_t *loc); |
206 | |
207 | static inline std::string getErrorLoc(Ctx &ctx, const uint8_t *loc) { |
208 | return getErrorPlace(ctx, loc).loc; |
209 | } |
210 | |
211 | void processArmCmseSymbols(Ctx &); |
212 | |
213 | template <class ELFT> uint32_t calcMipsEFlags(Ctx &); |
214 | uint8_t getMipsFpAbiFlag(Ctx &, InputFile *file, uint8_t oldFlag, |
215 | uint8_t newFlag); |
216 | bool isMipsN32Abi(Ctx &, const InputFile &f); |
217 | bool isMicroMips(Ctx &); |
218 | bool isMipsR6(Ctx &); |
219 | |
220 | void writePPC32GlinkSection(Ctx &, uint8_t *buf, size_t numEntries); |
221 | |
222 | unsigned getPPCDFormOp(unsigned secondaryOp); |
223 | unsigned getPPCDSFormOp(unsigned secondaryOp); |
224 | |
225 | // In the PowerPC64 Elf V2 abi a function can have 2 entry points. The first |
226 | // is a global entry point (GEP) which typically is used to initialize the TOC |
227 | // pointer in general purpose register 2. The second is a local entry |
228 | // point (LEP) which bypasses the TOC pointer initialization code. The |
229 | // offset between GEP and LEP is encoded in a function's st_other flags. |
230 | // This function will return the offset (in bytes) from the global entry-point |
231 | // to the local entry-point. |
232 | unsigned getPPC64GlobalEntryToLocalEntryOffset(Ctx &, uint8_t stOther); |
233 | |
234 | // Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte |
235 | // instruction (regardless of endianness). Therefore, the prefix is always in |
236 | // lower memory than the instruction. |
237 | void writePrefixedInst(Ctx &, uint8_t *loc, uint64_t insn); |
238 | |
239 | void addPPC64SaveRestore(Ctx &); |
240 | uint64_t getPPC64TocBase(Ctx &ctx); |
241 | uint64_t getAArch64Page(uint64_t expr); |
242 | bool isAArch64BTILandingPad(Ctx &, Symbol &s, int64_t a); |
243 | template <typename ELFT> void writeARMCmseImportLib(Ctx &); |
244 | uint64_t getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type); |
245 | void riscvFinalizeRelax(int passes); |
246 | void mergeRISCVAttributesSections(Ctx &); |
247 | void addArmInputSectionMappingSymbols(Ctx &); |
248 | void addArmSyntheticSectionMappingSymbol(Defined *); |
249 | void sortArmMappingSymbols(Ctx &); |
250 | void convertArmInstructionstoBE8(Ctx &, InputSection *sec, uint8_t *buf); |
251 | void createTaggedSymbols(Ctx &); |
252 | void initSymbolAnchors(Ctx &); |
253 | |
254 | void setTarget(Ctx &); |
255 | |
256 | template <class ELFT> bool isMipsPIC(const Defined *sym); |
257 | |
258 | const ELFSyncStream &operator<<(const ELFSyncStream &, RelType); |
259 | |
260 | void reportRangeError(Ctx &, uint8_t *loc, const Relocation &rel, |
261 | const Twine &v, int64_t min, uint64_t max); |
262 | void reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n, |
263 | const Symbol &sym, const Twine &msg); |
264 | |
265 | // Make sure that V can be represented as an N bit signed integer. |
266 | inline void checkInt(Ctx &ctx, uint8_t *loc, int64_t v, int n, |
267 | const Relocation &rel) { |
268 | if (v != llvm::SignExtend64(X: v, B: n)) |
269 | reportRangeError(ctx, loc, rel, v: Twine(v), min: llvm::minIntN(N: n), |
270 | max: llvm::maxIntN(N: n)); |
271 | } |
272 | |
273 | // Make sure that V can be represented as an N bit unsigned integer. |
274 | inline void checkUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n, |
275 | const Relocation &rel) { |
276 | if ((v >> n) != 0) |
277 | reportRangeError(ctx, loc, rel, v: Twine(v), min: 0, max: llvm::maxUIntN(N: n)); |
278 | } |
279 | |
280 | // Make sure that V can be represented as an N bit signed or unsigned integer. |
281 | inline void checkIntUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n, |
282 | const Relocation &rel) { |
283 | // For the error message we should cast V to a signed integer so that error |
284 | // messages show a small negative value rather than an extremely large one |
285 | if (v != (uint64_t)llvm::SignExtend64(X: v, B: n) && (v >> n) != 0) |
286 | reportRangeError(ctx, loc, rel, v: Twine((int64_t)v), min: llvm::minIntN(N: n), |
287 | max: llvm::maxUIntN(N: n)); |
288 | } |
289 | |
290 | inline void checkAlignment(Ctx &ctx, uint8_t *loc, uint64_t v, int n, |
291 | const Relocation &rel) { |
292 | if ((v & (n - 1)) != 0) |
293 | Err(ctx) << getErrorLoc(ctx, loc) << "improper alignment for relocation " |
294 | << rel.type << ": 0x" << llvm::utohexstr(X: v) |
295 | << " is not aligned to " << n << " bytes" ; |
296 | } |
297 | |
298 | // Endianness-aware read/write. |
299 | inline uint16_t read16(Ctx &ctx, const void *p) { |
300 | return llvm::support::endian::read16(P: p, E: ctx.arg.endianness); |
301 | } |
302 | |
303 | inline uint32_t read32(Ctx &ctx, const void *p) { |
304 | return llvm::support::endian::read32(P: p, E: ctx.arg.endianness); |
305 | } |
306 | |
307 | inline uint64_t read64(Ctx &ctx, const void *p) { |
308 | return llvm::support::endian::read64(P: p, E: ctx.arg.endianness); |
309 | } |
310 | |
311 | inline void write16(Ctx &ctx, void *p, uint16_t v) { |
312 | llvm::support::endian::write16(P: p, V: v, E: ctx.arg.endianness); |
313 | } |
314 | |
315 | inline void write32(Ctx &ctx, void *p, uint32_t v) { |
316 | llvm::support::endian::write32(P: p, V: v, E: ctx.arg.endianness); |
317 | } |
318 | |
319 | inline void write64(Ctx &ctx, void *p, uint64_t v) { |
320 | llvm::support::endian::write64(P: p, V: v, E: ctx.arg.endianness); |
321 | } |
322 | |
323 | // Overwrite a ULEB128 value and keep the original length. |
324 | inline uint64_t overwriteULEB128(uint8_t *bufLoc, uint64_t val) { |
325 | while (*bufLoc & 0x80) { |
326 | *bufLoc++ = 0x80 | (val & 0x7f); |
327 | val >>= 7; |
328 | } |
329 | *bufLoc = val; |
330 | return val; |
331 | } |
332 | } // namespace elf |
333 | } // namespace lld |
334 | |
335 | #ifdef __clang__ |
336 | #pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" |
337 | #endif |
338 | #define invokeELFT(f, ...) \ |
339 | switch (ctx.arg.ekind) { \ |
340 | case lld::elf::ELF32LEKind: \ |
341 | f<llvm::object::ELF32LE>(__VA_ARGS__); \ |
342 | break; \ |
343 | case lld::elf::ELF32BEKind: \ |
344 | f<llvm::object::ELF32BE>(__VA_ARGS__); \ |
345 | break; \ |
346 | case lld::elf::ELF64LEKind: \ |
347 | f<llvm::object::ELF64LE>(__VA_ARGS__); \ |
348 | break; \ |
349 | case lld::elf::ELF64BEKind: \ |
350 | f<llvm::object::ELF64BE>(__VA_ARGS__); \ |
351 | break; \ |
352 | default: \ |
353 | llvm_unreachable("unknown ctx.arg.ekind"); \ |
354 | } |
355 | |
356 | #endif |
357 | |