1//===- LoongArch.cpp ------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputFiles.h"
10#include "OutputSections.h"
11#include "Symbols.h"
12#include "SyntheticSections.h"
13#include "Target.h"
14#include "llvm/BinaryFormat/ELF.h"
15#include "llvm/Support/LEB128.h"
16
17using namespace llvm;
18using namespace llvm::object;
19using namespace llvm::support::endian;
20using namespace llvm::ELF;
21using namespace lld;
22using namespace lld::elf;
23
24namespace {
25class LoongArch final : public TargetInfo {
26public:
27 LoongArch(Ctx &);
28 uint32_t calcEFlags() const override;
29 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
30 void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
31 void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
32 void writePltHeader(uint8_t *buf) const override;
33 void writePlt(uint8_t *buf, const Symbol &sym,
34 uint64_t pltEntryAddr) const override;
35 RelType getDynRel(RelType type) const override;
36 RelExpr getRelExpr(RelType type, const Symbol &s,
37 const uint8_t *loc) const override;
38 bool usesOnlyLowPageBits(RelType type) const override;
39 void relocate(uint8_t *loc, const Relocation &rel,
40 uint64_t val) const override;
41 bool relaxOnce(int pass) const override;
42 void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
43 void finalizeRelax(int passes) const override;
44};
45} // end anonymous namespace
46
47namespace {
48enum Op {
49 SUB_W = 0x00110000,
50 SUB_D = 0x00118000,
51 BREAK = 0x002a0000,
52 SRLI_W = 0x00448000,
53 SRLI_D = 0x00450000,
54 ADDI_W = 0x02800000,
55 ADDI_D = 0x02c00000,
56 ANDI = 0x03400000,
57 ORI = 0x03800000,
58 LU12I_W = 0x14000000,
59 PCADDI = 0x18000000,
60 PCADDU12I = 0x1c000000,
61 LD_W = 0x28800000,
62 LD_D = 0x28c00000,
63 JIRL = 0x4c000000,
64 B = 0x50000000,
65 BL = 0x54000000,
66};
67
68enum Reg {
69 R_ZERO = 0,
70 R_RA = 1,
71 R_TP = 2,
72 R_T0 = 12,
73 R_T1 = 13,
74 R_T2 = 14,
75 R_T3 = 15,
76};
77} // namespace
78
79// Mask out the input's lowest 12 bits for use with `pcalau12i`, in sequences
80// like `pcalau12i + addi.[wd]` or `pcalau12i + {ld,st}.*` where the `pcalau12i`
81// produces a PC-relative intermediate value with the lowest 12 bits zeroed (the
82// "page") for the next instruction to add in the "page offset". (`pcalau12i`
83// stands for something like "PC ALigned Add Upper that starts from the 12th
84// bit, Immediate".)
85//
86// Here a "page" is in fact just another way to refer to the 12-bit range
87// allowed by the immediate field of the addi/ld/st instructions, and not
88// related to the system or the kernel's actual page size. The semantics happen
89// to match the AArch64 `adrp`, so the concept of "page" is borrowed here.
90static uint64_t getLoongArchPage(uint64_t p) {
91 return p & ~static_cast<uint64_t>(0xfff);
92}
93
94static uint32_t lo12(uint32_t val) { return val & 0xfff; }
95
96// Calculate the adjusted page delta between dest and PC.
97uint64_t elf::getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type) {
98 // Note that if the sequence being relocated is `pcalau12i + addi.d + lu32i.d
99 // + lu52i.d`, they must be adjacent so that we can infer the PC of
100 // `pcalau12i` when calculating the page delta for the other two instructions
101 // (lu32i.d and lu52i.d). Compensate all the sign-extensions is a bit
102 // complicated. Just use psABI recommended algorithm.
103 uint64_t pcalau12i_pc;
104 switch (type) {
105 case R_LARCH_PCALA64_LO20:
106 case R_LARCH_GOT64_PC_LO20:
107 case R_LARCH_TLS_IE64_PC_LO20:
108 case R_LARCH_TLS_DESC64_PC_LO20:
109 pcalau12i_pc = pc - 8;
110 break;
111 case R_LARCH_PCALA64_HI12:
112 case R_LARCH_GOT64_PC_HI12:
113 case R_LARCH_TLS_IE64_PC_HI12:
114 case R_LARCH_TLS_DESC64_PC_HI12:
115 pcalau12i_pc = pc - 12;
116 break;
117 default:
118 pcalau12i_pc = pc;
119 break;
120 }
121 uint64_t result = getLoongArchPage(p: dest) - getLoongArchPage(p: pcalau12i_pc);
122 if (dest & 0x800)
123 result += 0x1000 - 0x1'0000'0000;
124 if (result & 0x8000'0000)
125 result += 0x1'0000'0000;
126 return result;
127}
128
129static uint32_t hi20(uint32_t val) { return (val + 0x800) >> 12; }
130
131static uint32_t insn(uint32_t op, uint32_t d, uint32_t j, uint32_t k) {
132 return op | d | (j << 5) | (k << 10);
133}
134
135// Extract bits v[begin:end], where range is inclusive.
136static uint32_t extractBits(uint64_t v, uint32_t begin, uint32_t end) {
137 return begin == 63 ? v >> end : (v & ((1ULL << (begin + 1)) - 1)) >> end;
138}
139
140static uint32_t getD5(uint64_t v) { return extractBits(v, begin: 4, end: 0); }
141
142static uint32_t getJ5(uint64_t v) { return extractBits(v, begin: 9, end: 5); }
143
144static uint32_t setD5k16(uint32_t insn, uint32_t imm) {
145 uint32_t immLo = extractBits(v: imm, begin: 15, end: 0);
146 uint32_t immHi = extractBits(v: imm, begin: 20, end: 16);
147 return (insn & 0xfc0003e0) | (immLo << 10) | immHi;
148}
149
150static uint32_t setD10k16(uint32_t insn, uint32_t imm) {
151 uint32_t immLo = extractBits(v: imm, begin: 15, end: 0);
152 uint32_t immHi = extractBits(v: imm, begin: 25, end: 16);
153 return (insn & 0xfc000000) | (immLo << 10) | immHi;
154}
155
156static uint32_t setJ20(uint32_t insn, uint32_t imm) {
157 return (insn & 0xfe00001f) | (extractBits(v: imm, begin: 19, end: 0) << 5);
158}
159
160static uint32_t setJ5(uint32_t insn, uint32_t imm) {
161 return (insn & 0xfffffc1f) | (extractBits(v: imm, begin: 4, end: 0) << 5);
162}
163
164static uint32_t setK12(uint32_t insn, uint32_t imm) {
165 return (insn & 0xffc003ff) | (extractBits(v: imm, begin: 11, end: 0) << 10);
166}
167
168static uint32_t setK16(uint32_t insn, uint32_t imm) {
169 return (insn & 0xfc0003ff) | (extractBits(v: imm, begin: 15, end: 0) << 10);
170}
171
172static bool isJirl(uint32_t insn) {
173 return (insn & 0xfc000000) == JIRL;
174}
175
176static void handleUleb128(Ctx &ctx, uint8_t *loc, uint64_t val) {
177 const uint32_t maxcount = 1 + 64 / 7;
178 uint32_t count;
179 const char *error = nullptr;
180 uint64_t orig = decodeULEB128(p: loc, n: &count, end: nullptr, error: &error);
181 if (count > maxcount || (count == maxcount && error))
182 Err(ctx) << getErrorLoc(ctx, loc) << "extra space for uleb128";
183 uint64_t mask = count < maxcount ? (1ULL << 7 * count) - 1 : -1ULL;
184 encodeULEB128(Value: (orig + val) & mask, p: loc, PadTo: count);
185}
186
187LoongArch::LoongArch(Ctx &ctx) : TargetInfo(ctx) {
188 // The LoongArch ISA itself does not have a limit on page sizes. According to
189 // the ISA manual, the PS (page size) field in MTLB entries and CSR.STLBPS is
190 // 6 bits wide, meaning the maximum page size is 2^63 which is equivalent to
191 // "unlimited".
192 // However, practically the maximum usable page size is constrained by the
193 // kernel implementation, and 64KiB is the biggest non-huge page size
194 // supported by Linux as of v6.4. The most widespread page size in use,
195 // though, is 16KiB.
196 defaultCommonPageSize = 16384;
197 defaultMaxPageSize = 65536;
198 write32le(P: trapInstr.data(), V: BREAK); // break 0
199
200 copyRel = R_LARCH_COPY;
201 pltRel = R_LARCH_JUMP_SLOT;
202 relativeRel = R_LARCH_RELATIVE;
203 iRelativeRel = R_LARCH_IRELATIVE;
204
205 if (ctx.arg.is64) {
206 symbolicRel = R_LARCH_64;
207 tlsModuleIndexRel = R_LARCH_TLS_DTPMOD64;
208 tlsOffsetRel = R_LARCH_TLS_DTPREL64;
209 tlsGotRel = R_LARCH_TLS_TPREL64;
210 tlsDescRel = R_LARCH_TLS_DESC64;
211 } else {
212 symbolicRel = R_LARCH_32;
213 tlsModuleIndexRel = R_LARCH_TLS_DTPMOD32;
214 tlsOffsetRel = R_LARCH_TLS_DTPREL32;
215 tlsGotRel = R_LARCH_TLS_TPREL32;
216 tlsDescRel = R_LARCH_TLS_DESC32;
217 }
218
219 gotRel = symbolicRel;
220
221 // .got.plt[0] = _dl_runtime_resolve, .got.plt[1] = link_map
222 gotPltHeaderEntriesNum = 2;
223
224 pltHeaderSize = 32;
225 pltEntrySize = 16;
226 ipltEntrySize = 16;
227}
228
229static uint32_t getEFlags(Ctx &ctx, const InputFile *f) {
230 if (ctx.arg.is64)
231 return cast<ObjFile<ELF64LE>>(Val: f)->getObj().getHeader().e_flags;
232 return cast<ObjFile<ELF32LE>>(Val: f)->getObj().getHeader().e_flags;
233}
234
235static bool inputFileHasCode(const InputFile *f) {
236 for (const auto *sec : f->getSections())
237 if (sec && sec->flags & SHF_EXECINSTR)
238 return true;
239
240 return false;
241}
242
243uint32_t LoongArch::calcEFlags() const {
244 // If there are only binary input files (from -b binary), use a
245 // value of 0 for the ELF header flags.
246 if (ctx.objectFiles.empty())
247 return 0;
248
249 uint32_t target = 0;
250 const InputFile *targetFile;
251 for (const InputFile *f : ctx.objectFiles) {
252 // Do not enforce ABI compatibility if the input file does not contain code.
253 // This is useful for allowing linkage with data-only object files produced
254 // with tools like objcopy, that have zero e_flags.
255 if (!inputFileHasCode(f))
256 continue;
257
258 // Take the first non-zero e_flags as the reference.
259 uint32_t flags = getEFlags(ctx, f);
260 if (target == 0 && flags != 0) {
261 target = flags;
262 targetFile = f;
263 }
264
265 if ((flags & EF_LOONGARCH_ABI_MODIFIER_MASK) !=
266 (target & EF_LOONGARCH_ABI_MODIFIER_MASK))
267 ErrAlways(ctx) << f
268 << ": cannot link object files with different ABI from "
269 << targetFile;
270
271 // We cannot process psABI v1.x / object ABI v0 files (containing stack
272 // relocations), unlike ld.bfd.
273 //
274 // Instead of blindly accepting every v0 object and only failing at
275 // relocation processing time, just disallow interlink altogether. We
276 // don't expect significant usage of object ABI v0 in the wild (the old
277 // world may continue using object ABI v0 for a while, but as it's not
278 // binary-compatible with the upstream i.e. new-world ecosystem, it's not
279 // being considered here).
280 //
281 // There are briefly some new-world systems with object ABI v0 binaries too.
282 // It is because these systems were built before the new ABI was finalized.
283 // These are not supported either due to the extremely small number of them,
284 // and the few impacted users are advised to simply rebuild world or
285 // reinstall a recent system.
286 if ((flags & EF_LOONGARCH_OBJABI_MASK) != EF_LOONGARCH_OBJABI_V1)
287 ErrAlways(ctx) << f << ": unsupported object file ABI version";
288 }
289
290 return target;
291}
292
293int64_t LoongArch::getImplicitAddend(const uint8_t *buf, RelType type) const {
294 switch (type) {
295 default:
296 InternalErr(ctx, buf) << "cannot read addend for relocation " << type;
297 return 0;
298 case R_LARCH_32:
299 case R_LARCH_TLS_DTPMOD32:
300 case R_LARCH_TLS_DTPREL32:
301 case R_LARCH_TLS_TPREL32:
302 return SignExtend64<32>(x: read32le(P: buf));
303 case R_LARCH_64:
304 case R_LARCH_TLS_DTPMOD64:
305 case R_LARCH_TLS_DTPREL64:
306 case R_LARCH_TLS_TPREL64:
307 return read64le(P: buf);
308 case R_LARCH_RELATIVE:
309 case R_LARCH_IRELATIVE:
310 return ctx.arg.is64 ? read64le(P: buf) : read32le(P: buf);
311 case R_LARCH_NONE:
312 case R_LARCH_JUMP_SLOT:
313 // These relocations are defined as not having an implicit addend.
314 return 0;
315 case R_LARCH_TLS_DESC32:
316 return read32le(P: buf + 4);
317 case R_LARCH_TLS_DESC64:
318 return read64le(P: buf + 8);
319 }
320}
321
322void LoongArch::writeGotPlt(uint8_t *buf, const Symbol &s) const {
323 if (ctx.arg.is64)
324 write64le(P: buf, V: ctx.in.plt->getVA());
325 else
326 write32le(P: buf, V: ctx.in.plt->getVA());
327}
328
329void LoongArch::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
330 if (ctx.arg.writeAddends) {
331 if (ctx.arg.is64)
332 write64le(P: buf, V: s.getVA(ctx));
333 else
334 write32le(P: buf, V: s.getVA(ctx));
335 }
336}
337
338void LoongArch::writePltHeader(uint8_t *buf) const {
339 // The LoongArch PLT is currently structured just like that of RISCV.
340 // Annoyingly, this means the PLT is still using `pcaddu12i` to perform
341 // PC-relative addressing (because `pcaddu12i` is the same as RISCV `auipc`),
342 // in contrast to the AArch64-like page-offset scheme with `pcalau12i` that
343 // is used everywhere else involving PC-relative operations in the LoongArch
344 // ELF psABI v2.00.
345 //
346 // The `pcrel_{hi20,lo12}` operators are illustrative only and not really
347 // supported by LoongArch assemblers.
348 //
349 // pcaddu12i $t2, %pcrel_hi20(.got.plt)
350 // sub.[wd] $t1, $t1, $t3
351 // ld.[wd] $t3, $t2, %pcrel_lo12(.got.plt) ; t3 = _dl_runtime_resolve
352 // addi.[wd] $t1, $t1, -pltHeaderSize-12 ; t1 = &.plt[i] - &.plt[0]
353 // addi.[wd] $t0, $t2, %pcrel_lo12(.got.plt)
354 // srli.[wd] $t1, $t1, (is64?1:2) ; t1 = &.got.plt[i] - &.got.plt[0]
355 // ld.[wd] $t0, $t0, Wordsize ; t0 = link_map
356 // jr $t3
357 uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
358 uint32_t sub = ctx.arg.is64 ? SUB_D : SUB_W;
359 uint32_t ld = ctx.arg.is64 ? LD_D : LD_W;
360 uint32_t addi = ctx.arg.is64 ? ADDI_D : ADDI_W;
361 uint32_t srli = ctx.arg.is64 ? SRLI_D : SRLI_W;
362 write32le(P: buf + 0, V: insn(op: PCADDU12I, d: R_T2, j: hi20(val: offset), k: 0));
363 write32le(P: buf + 4, V: insn(op: sub, d: R_T1, j: R_T1, k: R_T3));
364 write32le(P: buf + 8, V: insn(op: ld, d: R_T3, j: R_T2, k: lo12(val: offset)));
365 write32le(P: buf + 12,
366 V: insn(op: addi, d: R_T1, j: R_T1, k: lo12(val: -ctx.target->pltHeaderSize - 12)));
367 write32le(P: buf + 16, V: insn(op: addi, d: R_T0, j: R_T2, k: lo12(val: offset)));
368 write32le(P: buf + 20, V: insn(op: srli, d: R_T1, j: R_T1, k: ctx.arg.is64 ? 1 : 2));
369 write32le(P: buf + 24, V: insn(op: ld, d: R_T0, j: R_T0, k: ctx.arg.wordsize));
370 write32le(P: buf + 28, V: insn(op: JIRL, d: R_ZERO, j: R_T3, k: 0));
371}
372
373void LoongArch::writePlt(uint8_t *buf, const Symbol &sym,
374 uint64_t pltEntryAddr) const {
375 // See the comment in writePltHeader for reason why pcaddu12i is used instead
376 // of the pcalau12i that's more commonly seen in the ELF psABI v2.0 days.
377 //
378 // pcaddu12i $t3, %pcrel_hi20(f@.got.plt)
379 // ld.[wd] $t3, $t3, %pcrel_lo12(f@.got.plt)
380 // jirl $t1, $t3, 0
381 // nop
382 uint32_t offset = sym.getGotPltVA(ctx) - pltEntryAddr;
383 write32le(P: buf + 0, V: insn(op: PCADDU12I, d: R_T3, j: hi20(val: offset), k: 0));
384 write32le(P: buf + 4,
385 V: insn(op: ctx.arg.is64 ? LD_D : LD_W, d: R_T3, j: R_T3, k: lo12(val: offset)));
386 write32le(P: buf + 8, V: insn(op: JIRL, d: R_T1, j: R_T3, k: 0));
387 write32le(P: buf + 12, V: insn(op: ANDI, d: R_ZERO, j: R_ZERO, k: 0));
388}
389
390RelType LoongArch::getDynRel(RelType type) const {
391 return type == ctx.target->symbolicRel ? type
392 : static_cast<RelType>(R_LARCH_NONE);
393}
394
395RelExpr LoongArch::getRelExpr(const RelType type, const Symbol &s,
396 const uint8_t *loc) const {
397 switch (type) {
398 case R_LARCH_NONE:
399 case R_LARCH_MARK_LA:
400 case R_LARCH_MARK_PCREL:
401 return R_NONE;
402 case R_LARCH_32:
403 case R_LARCH_64:
404 case R_LARCH_ABS_HI20:
405 case R_LARCH_ABS_LO12:
406 case R_LARCH_ABS64_LO20:
407 case R_LARCH_ABS64_HI12:
408 return R_ABS;
409 case R_LARCH_PCALA_LO12:
410 // We could just R_ABS, but the JIRL instruction reuses the relocation type
411 // for a different purpose. The questionable usage is part of glibc 2.37
412 // libc_nonshared.a [1], which is linked into user programs, so we have to
413 // work around it for a while, even if a new relocation type may be
414 // introduced in the future [2].
415 //
416 // [1]: https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=9f482b73f41a9a1bbfb173aad0733d1c824c788a
417 // [2]: https://github.com/loongson/la-abi-specs/pull/3
418 return isJirl(insn: read32le(P: loc)) ? R_PLT : R_ABS;
419 case R_LARCH_TLS_DTPREL32:
420 case R_LARCH_TLS_DTPREL64:
421 return R_DTPREL;
422 case R_LARCH_TLS_TPREL32:
423 case R_LARCH_TLS_TPREL64:
424 case R_LARCH_TLS_LE_HI20:
425 case R_LARCH_TLS_LE_HI20_R:
426 case R_LARCH_TLS_LE_LO12:
427 case R_LARCH_TLS_LE_LO12_R:
428 case R_LARCH_TLS_LE64_LO20:
429 case R_LARCH_TLS_LE64_HI12:
430 return R_TPREL;
431 case R_LARCH_ADD6:
432 case R_LARCH_ADD8:
433 case R_LARCH_ADD16:
434 case R_LARCH_ADD32:
435 case R_LARCH_ADD64:
436 case R_LARCH_ADD_ULEB128:
437 case R_LARCH_SUB6:
438 case R_LARCH_SUB8:
439 case R_LARCH_SUB16:
440 case R_LARCH_SUB32:
441 case R_LARCH_SUB64:
442 case R_LARCH_SUB_ULEB128:
443 // The LoongArch add/sub relocs behave like the RISCV counterparts; reuse
444 // the RelExpr to avoid code duplication.
445 return RE_RISCV_ADD;
446 case R_LARCH_32_PCREL:
447 case R_LARCH_64_PCREL:
448 case R_LARCH_PCREL20_S2:
449 return R_PC;
450 case R_LARCH_B16:
451 case R_LARCH_B21:
452 case R_LARCH_B26:
453 case R_LARCH_CALL36:
454 return R_PLT_PC;
455 case R_LARCH_GOT_PC_HI20:
456 case R_LARCH_GOT64_PC_LO20:
457 case R_LARCH_GOT64_PC_HI12:
458 case R_LARCH_TLS_IE_PC_HI20:
459 case R_LARCH_TLS_IE64_PC_LO20:
460 case R_LARCH_TLS_IE64_PC_HI12:
461 return RE_LOONGARCH_GOT_PAGE_PC;
462 case R_LARCH_GOT_PC_LO12:
463 case R_LARCH_TLS_IE_PC_LO12:
464 return RE_LOONGARCH_GOT;
465 case R_LARCH_TLS_LD_PC_HI20:
466 case R_LARCH_TLS_GD_PC_HI20:
467 return RE_LOONGARCH_TLSGD_PAGE_PC;
468 case R_LARCH_PCALA_HI20:
469 // Why not RE_LOONGARCH_PAGE_PC, majority of references don't go through
470 // PLT anyway so why waste time checking only to get everything relaxed back
471 // to it?
472 //
473 // This is again due to the R_LARCH_PCALA_LO12 on JIRL case, where we want
474 // both the HI20 and LO12 to potentially refer to the PLT. But in reality
475 // the HI20 reloc appears earlier, and the relocs don't contain enough
476 // information to let us properly resolve semantics per symbol.
477 // Unlike RISCV, our LO12 relocs *do not* point to their corresponding HI20
478 // relocs, hence it is nearly impossible to 100% accurately determine each
479 // HI20's "flavor" without taking big performance hits, in the presence of
480 // edge cases (e.g. HI20 without pairing LO12; paired LO12 placed so far
481 // apart that relationship is not certain anymore), and programmer mistakes
482 // (e.g. as outlined in https://github.com/loongson/la-abi-specs/pull/3).
483 //
484 // Ideally we would scan in an extra pass for all LO12s on JIRL, then mark
485 // every HI20 reloc referring to the same symbol differently; this is not
486 // feasible with the current function signature of getRelExpr that doesn't
487 // allow for such inter-pass state.
488 //
489 // So, unfortunately we have to again workaround this quirk the same way as
490 // BFD: assuming every R_LARCH_PCALA_HI20 is potentially PLT-needing, only
491 // relaxing back to RE_LOONGARCH_PAGE_PC if it's known not so at a later
492 // stage.
493 return RE_LOONGARCH_PLT_PAGE_PC;
494 case R_LARCH_PCALA64_LO20:
495 case R_LARCH_PCALA64_HI12:
496 return RE_LOONGARCH_PAGE_PC;
497 case R_LARCH_GOT_HI20:
498 case R_LARCH_GOT_LO12:
499 case R_LARCH_GOT64_LO20:
500 case R_LARCH_GOT64_HI12:
501 case R_LARCH_TLS_IE_HI20:
502 case R_LARCH_TLS_IE_LO12:
503 case R_LARCH_TLS_IE64_LO20:
504 case R_LARCH_TLS_IE64_HI12:
505 return R_GOT;
506 case R_LARCH_TLS_LD_HI20:
507 return R_TLSLD_GOT;
508 case R_LARCH_TLS_GD_HI20:
509 return R_TLSGD_GOT;
510 case R_LARCH_TLS_LE_ADD_R:
511 case R_LARCH_RELAX:
512 return ctx.arg.relax ? R_RELAX_HINT : R_NONE;
513 case R_LARCH_ALIGN:
514 return R_RELAX_HINT;
515 case R_LARCH_TLS_DESC_PC_HI20:
516 case R_LARCH_TLS_DESC64_PC_LO20:
517 case R_LARCH_TLS_DESC64_PC_HI12:
518 return RE_LOONGARCH_TLSDESC_PAGE_PC;
519 case R_LARCH_TLS_DESC_PC_LO12:
520 case R_LARCH_TLS_DESC_LD:
521 case R_LARCH_TLS_DESC_HI20:
522 case R_LARCH_TLS_DESC_LO12:
523 case R_LARCH_TLS_DESC64_LO20:
524 case R_LARCH_TLS_DESC64_HI12:
525 return R_TLSDESC;
526 case R_LARCH_TLS_DESC_CALL:
527 return R_TLSDESC_CALL;
528 case R_LARCH_TLS_LD_PCREL20_S2:
529 return R_TLSLD_PC;
530 case R_LARCH_TLS_GD_PCREL20_S2:
531 return R_TLSGD_PC;
532 case R_LARCH_TLS_DESC_PCREL20_S2:
533 return R_TLSDESC_PC;
534
535 // Other known relocs that are explicitly unimplemented:
536 //
537 // - psABI v1 relocs that need a stateful stack machine to work, and not
538 // required when implementing psABI v2;
539 // - relocs that are not used anywhere (R_LARCH_{ADD,SUB}_24 [1], and the
540 // two GNU vtable-related relocs).
541 //
542 // [1]: https://web.archive.org/web/20230709064026/https://github.com/loongson/LoongArch-Documentation/issues/51
543 default:
544 Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v
545 << ") against symbol " << &s;
546 return R_NONE;
547 }
548}
549
550bool LoongArch::usesOnlyLowPageBits(RelType type) const {
551 switch (type) {
552 default:
553 return false;
554 case R_LARCH_PCALA_LO12:
555 case R_LARCH_GOT_LO12:
556 case R_LARCH_GOT_PC_LO12:
557 case R_LARCH_TLS_IE_PC_LO12:
558 case R_LARCH_TLS_DESC_LO12:
559 case R_LARCH_TLS_DESC_PC_LO12:
560 return true;
561 }
562}
563
564void LoongArch::relocate(uint8_t *loc, const Relocation &rel,
565 uint64_t val) const {
566 switch (rel.type) {
567 case R_LARCH_32_PCREL:
568 checkInt(ctx, loc, v: val, n: 32, rel);
569 [[fallthrough]];
570 case R_LARCH_32:
571 case R_LARCH_TLS_DTPREL32:
572 write32le(P: loc, V: val);
573 return;
574 case R_LARCH_64:
575 case R_LARCH_TLS_DTPREL64:
576 case R_LARCH_64_PCREL:
577 write64le(P: loc, V: val);
578 return;
579
580 // Relocs intended for `pcaddi`.
581 case R_LARCH_PCREL20_S2:
582 case R_LARCH_TLS_LD_PCREL20_S2:
583 case R_LARCH_TLS_GD_PCREL20_S2:
584 case R_LARCH_TLS_DESC_PCREL20_S2:
585 checkInt(ctx, loc, v: val, n: 22, rel);
586 checkAlignment(ctx, loc, v: val, n: 4, rel);
587 write32le(P: loc, V: setJ20(insn: read32le(P: loc), imm: val >> 2));
588 return;
589
590 case R_LARCH_B16:
591 checkInt(ctx, loc, v: val, n: 18, rel);
592 checkAlignment(ctx, loc, v: val, n: 4, rel);
593 write32le(P: loc, V: setK16(insn: read32le(P: loc), imm: val >> 2));
594 return;
595
596 case R_LARCH_B21:
597 checkInt(ctx, loc, v: val, n: 23, rel);
598 checkAlignment(ctx, loc, v: val, n: 4, rel);
599 write32le(P: loc, V: setD5k16(insn: read32le(P: loc), imm: val >> 2));
600 return;
601
602 case R_LARCH_B26:
603 checkInt(ctx, loc, v: val, n: 28, rel);
604 checkAlignment(ctx, loc, v: val, n: 4, rel);
605 write32le(P: loc, V: setD10k16(insn: read32le(P: loc), imm: val >> 2));
606 return;
607
608 case R_LARCH_CALL36: {
609 // This relocation is designed for adjacent pcaddu18i+jirl pairs that
610 // are patched in one time. Because of sign extension of these insns'
611 // immediate fields, the relocation range is [-128G - 0x20000, +128G -
612 // 0x20000) (of course must be 4-byte aligned).
613 if (((int64_t)val + 0x20000) != llvm::SignExtend64(X: val + 0x20000, B: 38))
614 reportRangeError(ctx, loc, rel, v: Twine(val), min: llvm::minIntN(N: 38) - 0x20000,
615 max: llvm::maxIntN(N: 38) - 0x20000);
616 checkAlignment(ctx, loc, v: val, n: 4, rel);
617 // Since jirl performs sign extension on the offset immediate, adds (1<<17)
618 // to original val to get the correct hi20.
619 uint32_t hi20 = extractBits(v: val + (1 << 17), begin: 37, end: 18);
620 // Despite the name, the lower part is actually 18 bits with 4-byte aligned.
621 uint32_t lo16 = extractBits(v: val, begin: 17, end: 2);
622 write32le(P: loc, V: setJ20(insn: read32le(P: loc), imm: hi20));
623 write32le(P: loc + 4, V: setK16(insn: read32le(P: loc + 4), imm: lo16));
624 return;
625 }
626
627 // Relocs intended for `addi`, `ld` or `st`.
628 case R_LARCH_PCALA_LO12:
629 // We have to again inspect the insn word to handle the R_LARCH_PCALA_LO12
630 // on JIRL case: firstly JIRL wants its immediate's 2 lowest zeroes
631 // removed by us (in contrast to regular R_LARCH_PCALA_LO12), secondly
632 // its immediate slot width is different too (16, not 12).
633 // In this case, process like an R_LARCH_B16, but without overflow checking
634 // and only taking the value's lowest 12 bits.
635 if (isJirl(insn: read32le(P: loc))) {
636 checkAlignment(ctx, loc, v: val, n: 4, rel);
637 val = SignExtend64<12>(x: val);
638 write32le(P: loc, V: setK16(insn: read32le(P: loc), imm: val >> 2));
639 return;
640 }
641 [[fallthrough]];
642 case R_LARCH_ABS_LO12:
643 case R_LARCH_GOT_PC_LO12:
644 case R_LARCH_GOT_LO12:
645 case R_LARCH_TLS_LE_LO12:
646 case R_LARCH_TLS_IE_PC_LO12:
647 case R_LARCH_TLS_IE_LO12:
648 case R_LARCH_TLS_LE_LO12_R:
649 case R_LARCH_TLS_DESC_PC_LO12:
650 case R_LARCH_TLS_DESC_LO12:
651 write32le(P: loc, V: setK12(insn: read32le(P: loc), imm: extractBits(v: val, begin: 11, end: 0)));
652 return;
653
654 // Relocs intended for `lu12i.w` or `pcalau12i`.
655 case R_LARCH_ABS_HI20:
656 case R_LARCH_PCALA_HI20:
657 case R_LARCH_GOT_PC_HI20:
658 case R_LARCH_GOT_HI20:
659 case R_LARCH_TLS_LE_HI20:
660 case R_LARCH_TLS_IE_PC_HI20:
661 case R_LARCH_TLS_IE_HI20:
662 case R_LARCH_TLS_LD_PC_HI20:
663 case R_LARCH_TLS_LD_HI20:
664 case R_LARCH_TLS_GD_PC_HI20:
665 case R_LARCH_TLS_GD_HI20:
666 case R_LARCH_TLS_DESC_PC_HI20:
667 case R_LARCH_TLS_DESC_HI20:
668 write32le(P: loc, V: setJ20(insn: read32le(P: loc), imm: extractBits(v: val, begin: 31, end: 12)));
669 return;
670 case R_LARCH_TLS_LE_HI20_R:
671 write32le(P: loc, V: setJ20(insn: read32le(P: loc), imm: extractBits(v: val + 0x800, begin: 31, end: 12)));
672 return;
673
674 // Relocs intended for `lu32i.d`.
675 case R_LARCH_ABS64_LO20:
676 case R_LARCH_PCALA64_LO20:
677 case R_LARCH_GOT64_PC_LO20:
678 case R_LARCH_GOT64_LO20:
679 case R_LARCH_TLS_LE64_LO20:
680 case R_LARCH_TLS_IE64_PC_LO20:
681 case R_LARCH_TLS_IE64_LO20:
682 case R_LARCH_TLS_DESC64_PC_LO20:
683 case R_LARCH_TLS_DESC64_LO20:
684 write32le(P: loc, V: setJ20(insn: read32le(P: loc), imm: extractBits(v: val, begin: 51, end: 32)));
685 return;
686
687 // Relocs intended for `lu52i.d`.
688 case R_LARCH_ABS64_HI12:
689 case R_LARCH_PCALA64_HI12:
690 case R_LARCH_GOT64_PC_HI12:
691 case R_LARCH_GOT64_HI12:
692 case R_LARCH_TLS_LE64_HI12:
693 case R_LARCH_TLS_IE64_PC_HI12:
694 case R_LARCH_TLS_IE64_HI12:
695 case R_LARCH_TLS_DESC64_PC_HI12:
696 case R_LARCH_TLS_DESC64_HI12:
697 write32le(P: loc, V: setK12(insn: read32le(P: loc), imm: extractBits(v: val, begin: 63, end: 52)));
698 return;
699
700 case R_LARCH_ADD6:
701 *loc = (*loc & 0xc0) | ((*loc + val) & 0x3f);
702 return;
703 case R_LARCH_ADD8:
704 *loc += val;
705 return;
706 case R_LARCH_ADD16:
707 write16le(P: loc, V: read16le(P: loc) + val);
708 return;
709 case R_LARCH_ADD32:
710 write32le(P: loc, V: read32le(P: loc) + val);
711 return;
712 case R_LARCH_ADD64:
713 write64le(P: loc, V: read64le(P: loc) + val);
714 return;
715 case R_LARCH_ADD_ULEB128:
716 handleUleb128(ctx, loc, val);
717 return;
718 case R_LARCH_SUB6:
719 *loc = (*loc & 0xc0) | ((*loc - val) & 0x3f);
720 return;
721 case R_LARCH_SUB8:
722 *loc -= val;
723 return;
724 case R_LARCH_SUB16:
725 write16le(P: loc, V: read16le(P: loc) - val);
726 return;
727 case R_LARCH_SUB32:
728 write32le(P: loc, V: read32le(P: loc) - val);
729 return;
730 case R_LARCH_SUB64:
731 write64le(P: loc, V: read64le(P: loc) - val);
732 return;
733 case R_LARCH_SUB_ULEB128:
734 handleUleb128(ctx, loc, val: -val);
735 return;
736
737 case R_LARCH_MARK_LA:
738 case R_LARCH_MARK_PCREL:
739 // no-op
740 return;
741
742 case R_LARCH_TLS_LE_ADD_R:
743 case R_LARCH_RELAX:
744 return; // Ignored (for now)
745
746 case R_LARCH_TLS_DESC_LD:
747 return; // nothing to do.
748 case R_LARCH_TLS_DESC32:
749 write32le(P: loc + 4, V: val);
750 return;
751 case R_LARCH_TLS_DESC64:
752 write64le(P: loc + 8, V: val);
753 return;
754
755 default:
756 llvm_unreachable("unknown relocation");
757 }
758}
759
760static bool relaxable(ArrayRef<Relocation> relocs, size_t i) {
761 return i + 1 < relocs.size() && relocs[i + 1].type == R_LARCH_RELAX;
762}
763
764static bool isPairRelaxable(ArrayRef<Relocation> relocs, size_t i) {
765 return relaxable(relocs, i) && relaxable(relocs, i: i + 2) &&
766 relocs[i].offset + 4 == relocs[i + 2].offset;
767}
768
769// Relax code sequence.
770// From:
771// pcalau12i $a0, %pc_hi20(sym) | %ld_pc_hi20(sym) | %gd_pc_hi20(sym)
772// | %desc_pc_hi20(sym)
773// addi.w/d $a0, $a0, %pc_lo12(sym) | %got_pc_lo12(sym) | %got_pc_lo12(sym)
774// | %desc_pc_lo12(sym)
775// To:
776// pcaddi $a0, %pc_lo12(sym) | %got_pc_lo12(sym) | %got_pc_lo12(sym)
777// | %desc_pcrel_20(sym)
778//
779// From:
780// pcalau12i $a0, %got_pc_hi20(sym_got)
781// ld.w/d $a0, $a0, %got_pc_lo12(sym_got)
782// To:
783// pcaddi $a0, %got_pc_hi20(sym_got)
784static void relaxPCHi20Lo12(Ctx &ctx, const InputSection &sec, size_t i,
785 uint64_t loc, Relocation &rHi20, Relocation &rLo12,
786 uint32_t &remove) {
787 // check if the relocations are relaxable sequences.
788 if (!((rHi20.type == R_LARCH_PCALA_HI20 &&
789 rLo12.type == R_LARCH_PCALA_LO12) ||
790 (rHi20.type == R_LARCH_GOT_PC_HI20 &&
791 rLo12.type == R_LARCH_GOT_PC_LO12) ||
792 (rHi20.type == R_LARCH_TLS_GD_PC_HI20 &&
793 rLo12.type == R_LARCH_GOT_PC_LO12) ||
794 (rHi20.type == R_LARCH_TLS_LD_PC_HI20 &&
795 rLo12.type == R_LARCH_GOT_PC_LO12) ||
796 (rHi20.type == R_LARCH_TLS_DESC_PC_HI20 &&
797 rLo12.type == R_LARCH_TLS_DESC_PC_LO12)))
798 return;
799
800 // GOT references to absolute symbols can't be relaxed to use pcaddi in
801 // position-independent code, because these instructions produce a relative
802 // address.
803 // Meanwhile skip undefined, preemptible and STT_GNU_IFUNC symbols, because
804 // these symbols may be resolve in runtime.
805 if (rHi20.type == R_LARCH_GOT_PC_HI20 &&
806 (!rHi20.sym->isDefined() || rHi20.sym->isPreemptible ||
807 rHi20.sym->isGnuIFunc() ||
808 (ctx.arg.isPic && !cast<Defined>(Val&: *rHi20.sym).section)))
809 return;
810
811 uint64_t dest = 0;
812 if (rHi20.expr == RE_LOONGARCH_PLT_PAGE_PC)
813 dest = rHi20.sym->getPltVA(ctx);
814 else if (rHi20.expr == RE_LOONGARCH_PAGE_PC ||
815 rHi20.expr == RE_LOONGARCH_GOT_PAGE_PC)
816 dest = rHi20.sym->getVA(ctx);
817 else if (rHi20.expr == RE_LOONGARCH_TLSGD_PAGE_PC)
818 dest = ctx.in.got->getGlobalDynAddr(b: *rHi20.sym);
819 else if (rHi20.expr == RE_LOONGARCH_TLSDESC_PAGE_PC)
820 dest = ctx.in.got->getTlsDescAddr(sym: *rHi20.sym);
821 else {
822 Err(ctx) << getErrorLoc(ctx, loc: (const uint8_t *)loc) << "unknown expr ("
823 << rHi20.expr << ") against symbol " << rHi20.sym
824 << "in relaxPCHi20Lo12";
825 return;
826 }
827 dest += rHi20.addend;
828
829 const int64_t displace = dest - loc;
830 // Check if the displace aligns 4 bytes or exceeds the range of pcaddi.
831 if ((displace & 0x3) != 0 || !isInt<22>(x: displace))
832 return;
833
834 // Note: If we can ensure that the .o files generated by LLVM only contain
835 // relaxable instruction sequences with R_LARCH_RELAX, then we do not need to
836 // decode instructions. The relaxable instruction sequences imply the
837 // following constraints:
838 // * For relocation pairs related to got_pc, the opcodes of instructions
839 // must be pcalau12i + ld.w/d. In other cases, the opcodes must be pcalau12i +
840 // addi.w/d.
841 // * The destination register of pcalau12i is guaranteed to be used only by
842 // the immediately following instruction.
843 const uint32_t currInsn = read32le(P: sec.content().data() + rHi20.offset);
844 const uint32_t nextInsn = read32le(P: sec.content().data() + rLo12.offset);
845 // Check if use the same register.
846 if (getD5(v: currInsn) != getJ5(v: nextInsn) || getJ5(v: nextInsn) != getD5(v: nextInsn))
847 return;
848
849 sec.relaxAux->relocTypes[i] = R_LARCH_RELAX;
850 if (rHi20.type == R_LARCH_TLS_GD_PC_HI20)
851 sec.relaxAux->relocTypes[i + 2] = R_LARCH_TLS_GD_PCREL20_S2;
852 else if (rHi20.type == R_LARCH_TLS_LD_PC_HI20)
853 sec.relaxAux->relocTypes[i + 2] = R_LARCH_TLS_LD_PCREL20_S2;
854 else if (rHi20.type == R_LARCH_TLS_DESC_PC_HI20)
855 sec.relaxAux->relocTypes[i + 2] = R_LARCH_TLS_DESC_PCREL20_S2;
856 else
857 sec.relaxAux->relocTypes[i + 2] = R_LARCH_PCREL20_S2;
858 sec.relaxAux->writes.push_back(Elt: insn(op: PCADDI, d: getD5(v: nextInsn), j: 0, k: 0));
859 remove = 4;
860}
861
862// Relax code sequence.
863// From:
864// pcaddu18i $ra, %call36(foo)
865// jirl $ra, $ra, 0
866// To:
867// b/bl foo
868static void relaxCall36(Ctx &ctx, const InputSection &sec, size_t i,
869 uint64_t loc, Relocation &r, uint32_t &remove) {
870 const uint64_t dest =
871 (r.expr == R_PLT_PC ? r.sym->getPltVA(ctx) : r.sym->getVA(ctx)) +
872 r.addend;
873
874 const int64_t displace = dest - loc;
875 // Check if the displace aligns 4 bytes or exceeds the range of b[l].
876 if ((displace & 0x3) != 0 || !isInt<28>(x: displace))
877 return;
878
879 const uint32_t nextInsn = read32le(P: sec.content().data() + r.offset + 4);
880 if (getD5(v: nextInsn) == R_RA) {
881 // convert jirl to bl
882 sec.relaxAux->relocTypes[i] = R_LARCH_B26;
883 sec.relaxAux->writes.push_back(Elt: insn(op: BL, d: 0, j: 0, k: 0));
884 remove = 4;
885 } else if (getD5(v: nextInsn) == R_ZERO) {
886 // convert jirl to b
887 sec.relaxAux->relocTypes[i] = R_LARCH_B26;
888 sec.relaxAux->writes.push_back(Elt: insn(op: B, d: 0, j: 0, k: 0));
889 remove = 4;
890 }
891}
892
893// Relax code sequence.
894// From:
895// lu12i.w $rd, %le_hi20_r(sym)
896// add.w/d $rd, $rd, $tp, %le_add_r(sym)
897// addi/ld/st.w/d $rd, $rd, %le_lo12_r(sym)
898// To:
899// addi/ld/st.w/d $rd, $tp, %le_lo12_r(sym)
900static void relaxTlsLe(Ctx &ctx, const InputSection &sec, size_t i,
901 uint64_t loc, Relocation &r, uint32_t &remove) {
902 uint64_t val = r.sym->getVA(ctx, addend: r.addend);
903 // Check if the val exceeds the range of addi/ld/st.
904 if (!isInt<12>(x: val))
905 return;
906 uint32_t currInsn = read32le(P: sec.content().data() + r.offset);
907 switch (r.type) {
908 case R_LARCH_TLS_LE_HI20_R:
909 case R_LARCH_TLS_LE_ADD_R:
910 sec.relaxAux->relocTypes[i] = R_LARCH_RELAX;
911 remove = 4;
912 break;
913 case R_LARCH_TLS_LE_LO12_R:
914 sec.relaxAux->writes.push_back(Elt: setJ5(insn: currInsn, imm: R_TP));
915 sec.relaxAux->relocTypes[i] = R_LARCH_TLS_LE_LO12_R;
916 break;
917 }
918}
919
920static bool relax(Ctx &ctx, InputSection &sec) {
921 const uint64_t secAddr = sec.getVA();
922 const MutableArrayRef<Relocation> relocs = sec.relocs();
923 auto &aux = *sec.relaxAux;
924 bool changed = false;
925 ArrayRef<SymbolAnchor> sa = ArrayRef(aux.anchors);
926 uint64_t delta = 0;
927
928 std::fill_n(first: aux.relocTypes.get(), n: relocs.size(), value: R_LARCH_NONE);
929 aux.writes.clear();
930 for (auto [i, r] : llvm::enumerate(First: relocs)) {
931 const uint64_t loc = secAddr + r.offset - delta;
932 uint32_t &cur = aux.relocDeltas[i], remove = 0;
933 switch (r.type) {
934 case R_LARCH_ALIGN: {
935 const uint64_t addend =
936 r.sym->isUndefined() ? Log2_64(Value: r.addend) + 1 : r.addend;
937 const uint64_t allBytes = (1ULL << (addend & 0xff)) - 4;
938 const uint64_t align = 1ULL << (addend & 0xff);
939 const uint64_t maxBytes = addend >> 8;
940 const uint64_t off = loc & (align - 1);
941 const uint64_t curBytes = off == 0 ? 0 : align - off;
942 // All bytes beyond the alignment boundary should be removed.
943 // If emit bytes more than max bytes to emit, remove all.
944 if (maxBytes != 0 && curBytes > maxBytes)
945 remove = allBytes;
946 else
947 remove = allBytes - curBytes;
948 // If we can't satisfy this alignment, we've found a bad input.
949 if (LLVM_UNLIKELY(static_cast<int32_t>(remove) < 0)) {
950 Err(ctx) << getErrorLoc(ctx, loc: (const uint8_t *)loc)
951 << "insufficient padding bytes for " << r.type << ": "
952 << allBytes << " bytes available for "
953 << "requested alignment of " << align << " bytes";
954 remove = 0;
955 }
956 break;
957 }
958 case R_LARCH_PCALA_HI20:
959 case R_LARCH_GOT_PC_HI20:
960 case R_LARCH_TLS_GD_PC_HI20:
961 case R_LARCH_TLS_LD_PC_HI20:
962 case R_LARCH_TLS_DESC_PC_HI20:
963 // The overflow check for i+2 will be carried out in isPairRelaxable.
964 if (isPairRelaxable(relocs, i))
965 relaxPCHi20Lo12(ctx, sec, i, loc, rHi20&: r, rLo12&: relocs[i + 2], remove);
966 break;
967 case R_LARCH_CALL36:
968 if (relaxable(relocs, i))
969 relaxCall36(ctx, sec, i, loc, r, remove);
970 break;
971 case R_LARCH_TLS_LE_HI20_R:
972 case R_LARCH_TLS_LE_ADD_R:
973 case R_LARCH_TLS_LE_LO12_R:
974 if (relaxable(relocs, i))
975 relaxTlsLe(ctx, sec, i, loc, r, remove);
976 break;
977 case R_LARCH_TLS_IE_PC_HI20:
978 if (relaxable(relocs, i) && r.expr == R_RELAX_TLS_IE_TO_LE &&
979 isUInt<12>(x: r.sym->getVA(ctx, addend: r.addend)))
980 remove = 4;
981 break;
982 }
983
984 // For all anchors whose offsets are <= r.offset, they are preceded by
985 // the previous relocation whose `relocDeltas` value equals `delta`.
986 // Decrease their st_value and update their st_size.
987 for (; sa.size() && sa[0].offset <= r.offset; sa = sa.slice(N: 1)) {
988 if (sa[0].end)
989 sa[0].d->size = sa[0].offset - delta - sa[0].d->value;
990 else
991 sa[0].d->value = sa[0].offset - delta;
992 }
993 delta += remove;
994 if (delta != cur) {
995 cur = delta;
996 changed = true;
997 }
998 }
999
1000 for (const SymbolAnchor &a : sa) {
1001 if (a.end)
1002 a.d->size = a.offset - delta - a.d->value;
1003 else
1004 a.d->value = a.offset - delta;
1005 }
1006 // Inform assignAddresses that the size has changed.
1007 if (!isUInt<32>(x: delta))
1008 Fatal(ctx) << "section size decrease is too large: " << delta;
1009 sec.bytesDropped = delta;
1010 return changed;
1011}
1012
1013// Convert TLS IE to LE in the normal or medium code model.
1014// Original code sequence:
1015// * pcalau12i $a0, %ie_pc_hi20(sym)
1016// * ld.d $a0, $a0, %ie_pc_lo12(sym)
1017//
1018// The code sequence converted is as follows:
1019// * lu12i.w $a0, %le_hi20(sym) # le_hi20 != 0, otherwise NOP
1020// * ori $a0, src, %le_lo12(sym) # le_hi20 != 0, src = $a0,
1021// # otherwise, src = $zero
1022//
1023// When relaxation enables, redundant NOPs can be removed.
1024static void tlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) {
1025 assert(isInt<32>(val) &&
1026 "val exceeds the range of medium code model in tlsIeToLe");
1027
1028 bool isUInt12 = isUInt<12>(x: val);
1029 const uint32_t currInsn = read32le(P: loc);
1030 switch (rel.type) {
1031 case R_LARCH_TLS_IE_PC_HI20:
1032 if (isUInt12)
1033 write32le(P: loc, V: insn(op: ANDI, d: R_ZERO, j: R_ZERO, k: 0)); // nop
1034 else
1035 write32le(P: loc, V: insn(op: LU12I_W, d: getD5(v: currInsn), j: extractBits(v: val, begin: 31, end: 12),
1036 k: 0)); // lu12i.w $a0, %le_hi20
1037 break;
1038 case R_LARCH_TLS_IE_PC_LO12:
1039 if (isUInt12)
1040 write32le(P: loc, V: insn(op: ORI, d: getD5(v: currInsn), j: R_ZERO,
1041 k: val)); // ori $a0, $zero, %le_lo12
1042 else
1043 write32le(P: loc, V: insn(op: ORI, d: getD5(v: currInsn), j: getJ5(v: currInsn),
1044 k: lo12(val))); // ori $a0, $a0, %le_lo12
1045 break;
1046 }
1047}
1048
1049void LoongArch::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
1050 const unsigned bits = ctx.arg.is64 ? 64 : 32;
1051 uint64_t secAddr = sec.getOutputSection()->addr;
1052 if (auto *s = dyn_cast<InputSection>(Val: &sec))
1053 secAddr += s->outSecOff;
1054 else if (auto *ehIn = dyn_cast<EhInputSection>(Val: &sec))
1055 secAddr += ehIn->getParent()->outSecOff;
1056 bool isExtreme = false, isRelax = false;
1057 const MutableArrayRef<Relocation> relocs = sec.relocs();
1058 for (size_t i = 0, size = relocs.size(); i != size; ++i) {
1059 Relocation &rel = relocs[i];
1060 uint8_t *loc = buf + rel.offset;
1061 uint64_t val = SignExtend64(
1062 X: sec.getRelocTargetVA(ctx, r: rel, p: secAddr + rel.offset), B: bits);
1063
1064 switch (rel.expr) {
1065 case R_RELAX_HINT:
1066 continue;
1067 case R_RELAX_TLS_IE_TO_LE:
1068 if (rel.type == R_LARCH_TLS_IE_PC_HI20) {
1069 // LoongArch does not support IE to LE optimization in the extreme code
1070 // model. In this case, the relocs are as follows:
1071 //
1072 // * i -- R_LARCH_TLS_IE_PC_HI20
1073 // * i+1 -- R_LARCH_TLS_IE_PC_LO12
1074 // * i+2 -- R_LARCH_TLS_IE64_PC_LO20
1075 // * i+3 -- R_LARCH_TLS_IE64_PC_HI12
1076 isExtreme =
1077 (i + 2 < size && relocs[i + 2].type == R_LARCH_TLS_IE64_PC_LO20);
1078 }
1079 if (isExtreme) {
1080 rel.expr = getRelExpr(type: rel.type, s: *rel.sym, loc);
1081 val = SignExtend64(X: sec.getRelocTargetVA(ctx, r: rel, p: secAddr + rel.offset),
1082 B: bits);
1083 relocateNoSym(loc, type: rel.type, val);
1084 } else {
1085 isRelax = relaxable(relocs, i);
1086 if (isRelax && rel.type == R_LARCH_TLS_IE_PC_HI20 && isUInt<12>(x: val))
1087 continue;
1088 tlsIeToLe(loc, rel, val);
1089 }
1090 continue;
1091 default:
1092 break;
1093 }
1094 relocate(loc, rel, val);
1095 }
1096}
1097
1098// When relaxing just R_LARCH_ALIGN, relocDeltas is usually changed only once in
1099// the absence of a linker script. For call and load/store R_LARCH_RELAX, code
1100// shrinkage may reduce displacement and make more relocations eligible for
1101// relaxation. Code shrinkage may increase displacement to a call/load/store
1102// target at a higher fixed address, invalidating an earlier relaxation. Any
1103// change in section sizes can have cascading effect and require another
1104// relaxation pass.
1105bool LoongArch::relaxOnce(int pass) const {
1106 if (ctx.arg.relocatable)
1107 return false;
1108
1109 if (pass == 0)
1110 initSymbolAnchors(ctx);
1111
1112 SmallVector<InputSection *, 0> storage;
1113 bool changed = false;
1114 for (OutputSection *osec : ctx.outputSections) {
1115 if (!(osec->flags & SHF_EXECINSTR))
1116 continue;
1117 for (InputSection *sec : getInputSections(os: *osec, storage))
1118 changed |= relax(ctx, sec&: *sec);
1119 }
1120 return changed;
1121}
1122
1123void LoongArch::finalizeRelax(int passes) const {
1124 Log(ctx) << "relaxation passes: " << passes;
1125 SmallVector<InputSection *, 0> storage;
1126 for (OutputSection *osec : ctx.outputSections) {
1127 if (!(osec->flags & SHF_EXECINSTR))
1128 continue;
1129 for (InputSection *sec : getInputSections(os: *osec, storage)) {
1130 RelaxAux &aux = *sec->relaxAux;
1131 if (!aux.relocDeltas)
1132 continue;
1133
1134 MutableArrayRef<Relocation> rels = sec->relocs();
1135 ArrayRef<uint8_t> old = sec->content();
1136 size_t newSize = old.size() - aux.relocDeltas[rels.size() - 1];
1137 size_t writesIdx = 0;
1138 uint8_t *p = ctx.bAlloc.Allocate<uint8_t>(Num: newSize);
1139 uint64_t offset = 0;
1140 int64_t delta = 0;
1141 sec->content_ = p;
1142 sec->size = newSize;
1143 sec->bytesDropped = 0;
1144
1145 // Update section content: remove NOPs for R_LARCH_ALIGN and rewrite
1146 // instructions for relaxed relocations.
1147 for (size_t i = 0, e = rels.size(); i != e; ++i) {
1148 uint32_t remove = aux.relocDeltas[i] - delta;
1149 delta = aux.relocDeltas[i];
1150 if (remove == 0 && aux.relocTypes[i] == R_LARCH_NONE)
1151 continue;
1152
1153 // Copy from last location to the current relocated location.
1154 Relocation &r = rels[i];
1155 uint64_t size = r.offset - offset;
1156 memcpy(dest: p, src: old.data() + offset, n: size);
1157 p += size;
1158
1159 int64_t skip = 0;
1160 if (RelType newType = aux.relocTypes[i]) {
1161 switch (newType) {
1162 case R_LARCH_RELAX:
1163 break;
1164 case R_LARCH_PCREL20_S2:
1165 skip = 4;
1166 write32le(P: p, V: aux.writes[writesIdx++]);
1167 // RelExpr is needed for relocating.
1168 r.expr = r.sym->hasFlag(bit: NEEDS_PLT) ? R_PLT_PC : R_PC;
1169 break;
1170 case R_LARCH_B26:
1171 case R_LARCH_TLS_LE_LO12_R:
1172 skip = 4;
1173 write32le(P: p, V: aux.writes[writesIdx++]);
1174 break;
1175 case R_LARCH_TLS_GD_PCREL20_S2:
1176 // Note: R_LARCH_TLS_LD_PCREL20_S2 must also use R_TLSGD_PC instead
1177 // of R_TLSLD_PC due to historical reasons. In fact, right now TLSLD
1178 // behaves exactly like TLSGD on LoongArch.
1179 //
1180 // This reason has also been mentioned in mold commit:
1181 // https://github.com/rui314/mold/commit/5dfa1cf07c03bd57cb3d493b652ef22441bcd71c
1182 case R_LARCH_TLS_LD_PCREL20_S2:
1183 skip = 4;
1184 write32le(P: p, V: aux.writes[writesIdx++]);
1185 r.expr = R_TLSGD_PC;
1186 break;
1187 case R_LARCH_TLS_DESC_PCREL20_S2:
1188 skip = 4;
1189 write32le(P: p, V: aux.writes[writesIdx++]);
1190 r.expr = R_TLSDESC_PC;
1191 break;
1192 default:
1193 llvm_unreachable("unsupported type");
1194 }
1195 }
1196
1197 p += skip;
1198 offset = r.offset + skip + remove;
1199 }
1200 memcpy(dest: p, src: old.data() + offset, n: old.size() - offset);
1201
1202 // Subtract the previous relocDeltas value from the relocation offset.
1203 // For a pair of R_LARCH_XXX/R_LARCH_RELAX with the same offset, decrease
1204 // their r_offset by the same delta.
1205 delta = 0;
1206 for (size_t i = 0, e = rels.size(); i != e;) {
1207 uint64_t cur = rels[i].offset;
1208 do {
1209 rels[i].offset -= delta;
1210 if (aux.relocTypes[i] != R_LARCH_NONE)
1211 rels[i].type = aux.relocTypes[i];
1212 } while (++i != e && rels[i].offset == cur);
1213 delta = aux.relocDeltas[i - 1];
1214 }
1215 }
1216 }
1217}
1218
1219void elf::setLoongArchTargetInfo(Ctx &ctx) {
1220 ctx.target.reset(p: new LoongArch(ctx));
1221}
1222

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of lld/ELF/Arch/LoongArch.cpp