1//===- bolt/Target/RISCV/RISCVMCPlusBuilder.cpp -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides RISCV-specific MCPlus builder.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MCTargetDesc/RISCVMCAsmInfo.h"
14#include "MCTargetDesc/RISCVMCTargetDesc.h"
15#include "bolt/Core/MCPlusBuilder.h"
16#include "llvm/BinaryFormat/ELF.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCInst.h"
19#include "llvm/MC/MCInstBuilder.h"
20#include "llvm/MC/MCSubtargetInfo.h"
21#include "llvm/Support/ErrorHandling.h"
22
23#define DEBUG_TYPE "mcplus"
24
25using namespace llvm;
26using namespace bolt;
27
28namespace {
29
30class RISCVMCPlusBuilder : public MCPlusBuilder {
31public:
32 using MCPlusBuilder::MCPlusBuilder;
33
34 bool equals(const MCSpecifierExpr &A, const MCSpecifierExpr &B,
35 CompFuncTy Comp) const override {
36 const auto &RISCVExprA = cast<MCSpecifierExpr>(Val: A);
37 const auto &RISCVExprB = cast<MCSpecifierExpr>(Val: B);
38 if (RISCVExprA.getSpecifier() != RISCVExprB.getSpecifier())
39 return false;
40
41 return MCPlusBuilder::equals(A: *RISCVExprA.getSubExpr(),
42 B: *RISCVExprB.getSubExpr(), Comp);
43 }
44
45 void getCalleeSavedRegs(BitVector &Regs) const override {
46 Regs |= getAliases(Reg: RISCV::X2);
47 Regs |= getAliases(Reg: RISCV::X8);
48 Regs |= getAliases(Reg: RISCV::X9);
49 Regs |= getAliases(Reg: RISCV::X18);
50 Regs |= getAliases(Reg: RISCV::X19);
51 Regs |= getAliases(Reg: RISCV::X20);
52 Regs |= getAliases(Reg: RISCV::X21);
53 Regs |= getAliases(Reg: RISCV::X22);
54 Regs |= getAliases(Reg: RISCV::X23);
55 Regs |= getAliases(Reg: RISCV::X24);
56 Regs |= getAliases(Reg: RISCV::X25);
57 Regs |= getAliases(Reg: RISCV::X26);
58 Regs |= getAliases(Reg: RISCV::X27);
59 }
60
61 bool shouldRecordCodeRelocation(uint32_t RelType) const override {
62 switch (RelType) {
63 case ELF::R_RISCV_JAL:
64 case ELF::R_RISCV_CALL:
65 case ELF::R_RISCV_CALL_PLT:
66 case ELF::R_RISCV_BRANCH:
67 case ELF::R_RISCV_RVC_BRANCH:
68 case ELF::R_RISCV_RVC_JUMP:
69 case ELF::R_RISCV_GOT_HI20:
70 case ELF::R_RISCV_PCREL_HI20:
71 case ELF::R_RISCV_PCREL_LO12_I:
72 case ELF::R_RISCV_PCREL_LO12_S:
73 case ELF::R_RISCV_HI20:
74 case ELF::R_RISCV_LO12_I:
75 case ELF::R_RISCV_LO12_S:
76 case ELF::R_RISCV_TLS_GOT_HI20:
77 case ELF::R_RISCV_TLS_GD_HI20:
78 return true;
79 default:
80 llvm_unreachable("Unexpected RISCV relocation type in code");
81 }
82 }
83
84 bool isNop(const MCInst &Inst) const {
85 return Inst.getOpcode() == RISCV::ADDI &&
86 Inst.getOperand(i: 0).getReg() == RISCV::X0 &&
87 Inst.getOperand(i: 1).getReg() == RISCV::X0 &&
88 Inst.getOperand(i: 2).getImm() == 0;
89 }
90
91 bool isCNop(const MCInst &Inst) const {
92 return Inst.getOpcode() == RISCV::C_NOP;
93 }
94
95 bool isNoop(const MCInst &Inst) const override {
96 return isNop(Inst) || isCNop(Inst);
97 }
98
99 bool isPseudo(const MCInst &Inst) const override {
100 switch (Inst.getOpcode()) {
101 default:
102 return MCPlusBuilder::isPseudo(Inst);
103 case RISCV::PseudoCALL:
104 case RISCV::PseudoTAIL:
105 return false;
106 }
107 }
108
109 bool isIndirectCall(const MCInst &Inst) const override {
110 if (!isCall(Inst))
111 return false;
112
113 switch (Inst.getOpcode()) {
114 default:
115 return false;
116 case RISCV::JALR:
117 case RISCV::C_JALR:
118 case RISCV::C_JR:
119 return true;
120 }
121 }
122
123 bool hasPCRelOperand(const MCInst &Inst) const override {
124 switch (Inst.getOpcode()) {
125 default:
126 return false;
127 case RISCV::JAL:
128 case RISCV::AUIPC:
129 return true;
130 }
131 }
132
133 unsigned getInvertedBranchOpcode(unsigned Opcode) const {
134 switch (Opcode) {
135 default:
136 llvm_unreachable("Failed to invert branch opcode");
137 return Opcode;
138 case RISCV::BEQ:
139 return RISCV::BNE;
140 case RISCV::BNE:
141 return RISCV::BEQ;
142 case RISCV::BLT:
143 return RISCV::BGE;
144 case RISCV::BGE:
145 return RISCV::BLT;
146 case RISCV::BLTU:
147 return RISCV::BGEU;
148 case RISCV::BGEU:
149 return RISCV::BLTU;
150 case RISCV::C_BEQZ:
151 return RISCV::C_BNEZ;
152 case RISCV::C_BNEZ:
153 return RISCV::C_BEQZ;
154 }
155 }
156
157 void reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
158 MCContext *Ctx) const override {
159 auto Opcode = getInvertedBranchOpcode(Opcode: Inst.getOpcode());
160 Inst.setOpcode(Opcode);
161 replaceBranchTarget(Inst, TBB, Ctx);
162 }
163
164 void replaceBranchTarget(MCInst &Inst, const MCSymbol *TBB,
165 MCContext *Ctx) const override {
166 assert((isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) &&
167 "Invalid instruction");
168
169 unsigned SymOpIndex;
170 auto Result = getSymbolRefOperandNum(Inst, OpNum&: SymOpIndex);
171 (void)Result;
172 assert(Result && "unimplemented branch");
173
174 Inst.getOperand(i: SymOpIndex) =
175 MCOperand::createExpr(Val: MCSymbolRefExpr::create(Symbol: TBB, Ctx&: *Ctx));
176 }
177
178 IndirectBranchType analyzeIndirectBranch(
179 MCInst &Instruction, InstructionIterator Begin, InstructionIterator End,
180 const unsigned PtrSize, MCInst *&MemLocInstr, unsigned &BaseRegNum,
181 unsigned &IndexRegNum, int64_t &DispValue, const MCExpr *&DispExpr,
182 MCInst *&PCRelBaseOut, MCInst *&FixedEntryLoadInst) const override {
183 MemLocInstr = nullptr;
184 BaseRegNum = 0;
185 IndexRegNum = 0;
186 DispValue = 0;
187 DispExpr = nullptr;
188 PCRelBaseOut = nullptr;
189 FixedEntryLoadInst = nullptr;
190
191 // Check for the following long tail call sequence:
192 // 1: auipc xi, %pcrel_hi(sym)
193 // jalr zero, %pcrel_lo(1b)(xi)
194 if (Instruction.getOpcode() == RISCV::JALR && Begin != End) {
195 MCInst &PrevInst = *std::prev(x: End);
196 if (isRISCVCall(First: PrevInst, Second: Instruction) &&
197 Instruction.getOperand(i: 0).getReg() == RISCV::X0)
198 return IndirectBranchType::POSSIBLE_TAIL_CALL;
199 }
200
201 return IndirectBranchType::UNKNOWN;
202 }
203
204 bool convertJmpToTailCall(MCInst &Inst) override {
205 if (isTailCall(Inst))
206 return false;
207
208 switch (Inst.getOpcode()) {
209 default:
210 llvm_unreachable("unsupported tail call opcode");
211 case RISCV::JAL:
212 case RISCV::JALR:
213 case RISCV::C_J:
214 case RISCV::C_JR:
215 break;
216 }
217
218 setTailCall(Inst);
219 return true;
220 }
221
222 void createReturn(MCInst &Inst) const override {
223 // TODO "c.jr ra" when RVC is enabled
224 Inst.setOpcode(RISCV::JALR);
225 Inst.clear();
226 Inst.addOperand(Op: MCOperand::createReg(Reg: RISCV::X0));
227 Inst.addOperand(Op: MCOperand::createReg(Reg: RISCV::X1));
228 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
229 }
230
231 void createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
232 MCContext *Ctx) const override {
233 Inst.setOpcode(RISCV::JAL);
234 Inst.clear();
235 Inst.addOperand(Op: MCOperand::createReg(Reg: RISCV::X0));
236 Inst.addOperand(Op: MCOperand::createExpr(Val: MCSymbolRefExpr::create(Symbol: TBB, Ctx&: *Ctx)));
237 }
238
239 StringRef getTrapFillValue() const override {
240 return StringRef("\0\0\0\0", 4);
241 }
242
243 void createCall(unsigned Opcode, MCInst &Inst, const MCSymbol *Target,
244 MCContext *Ctx) {
245 Inst.setOpcode(Opcode);
246 Inst.clear();
247 Inst.addOperand(Op: MCOperand::createExpr(Val: MCSpecifierExpr::create(
248 Expr: MCSymbolRefExpr::create(Symbol: Target, Ctx&: *Ctx), S: ELF::R_RISCV_CALL_PLT, Ctx&: *Ctx)));
249 }
250
251 void createCall(MCInst &Inst, const MCSymbol *Target,
252 MCContext *Ctx) override {
253 return createCall(Opcode: RISCV::PseudoCALL, Inst, Target, Ctx);
254 }
255
256 void createLongTailCall(InstructionListType &Seq, const MCSymbol *Target,
257 MCContext *Ctx) override {
258 createShortJmp(Seq, Target, Ctx, /*IsTailCall*/ true);
259 }
260
261 void createTailCall(MCInst &Inst, const MCSymbol *Target,
262 MCContext *Ctx) override {
263 return createCall(Opcode: RISCV::PseudoTAIL, Inst, Target, Ctx);
264 }
265
266 bool analyzeBranch(InstructionIterator Begin, InstructionIterator End,
267 const MCSymbol *&TBB, const MCSymbol *&FBB,
268 MCInst *&CondBranch,
269 MCInst *&UncondBranch) const override {
270 auto I = End;
271
272 while (I != Begin) {
273 --I;
274
275 // Ignore nops and CFIs
276 if (isPseudo(Inst: *I) || isNoop(Inst: *I))
277 continue;
278
279 // Stop when we find the first non-terminator
280 if (!isTerminator(Inst: *I) || isTailCall(Inst: *I) || !isBranch(Inst: *I))
281 break;
282
283 // Handle unconditional branches.
284 if (isUnconditionalBranch(Inst: *I)) {
285 // If any code was seen after this unconditional branch, we've seen
286 // unreachable code. Ignore them.
287 CondBranch = nullptr;
288 UncondBranch = &*I;
289 const MCSymbol *Sym = getTargetSymbol(Inst: *I);
290 assert(Sym != nullptr &&
291 "Couldn't extract BB symbol from jump operand");
292 TBB = Sym;
293 continue;
294 }
295
296 // Handle conditional branches and ignore indirect branches
297 if (isIndirectBranch(Inst: *I))
298 return false;
299
300 if (CondBranch == nullptr) {
301 const MCSymbol *TargetBB = getTargetSymbol(Inst: *I);
302 if (TargetBB == nullptr) {
303 // Unrecognized branch target
304 return false;
305 }
306 FBB = TBB;
307 TBB = TargetBB;
308 CondBranch = &*I;
309 continue;
310 }
311
312 llvm_unreachable("multiple conditional branches in one BB");
313 }
314
315 return true;
316 }
317
318 bool getSymbolRefOperandNum(const MCInst &Inst, unsigned &OpNum) const {
319 switch (Inst.getOpcode()) {
320 default:
321 return false;
322 case RISCV::C_J:
323 OpNum = 0;
324 return true;
325 case RISCV::AUIPC:
326 case RISCV::JAL:
327 case RISCV::C_BEQZ:
328 case RISCV::C_BNEZ:
329 OpNum = 1;
330 return true;
331 case RISCV::BEQ:
332 case RISCV::BGE:
333 case RISCV::BGEU:
334 case RISCV::BNE:
335 case RISCV::BLT:
336 case RISCV::BLTU:
337 OpNum = 2;
338 return true;
339 }
340 }
341
342 const MCSymbol *getTargetSymbol(const MCExpr *Expr) const override {
343 auto *RISCVExpr = dyn_cast<MCSpecifierExpr>(Val: Expr);
344 if (RISCVExpr && RISCVExpr->getSubExpr())
345 return getTargetSymbol(Expr: RISCVExpr->getSubExpr());
346
347 return MCPlusBuilder::getTargetSymbol(Expr);
348 }
349
350 const MCSymbol *getTargetSymbol(const MCInst &Inst,
351 unsigned OpNum = 0) const override {
352 if (!OpNum && !getSymbolRefOperandNum(Inst, OpNum))
353 return nullptr;
354
355 const MCOperand &Op = Inst.getOperand(i: OpNum);
356 if (!Op.isExpr())
357 return nullptr;
358
359 return getTargetSymbol(Expr: Op.getExpr());
360 }
361
362 bool lowerTailCall(MCInst &Inst) override {
363 removeAnnotation(Inst, Index: MCPlus::MCAnnotation::kTailCall);
364 if (getConditionalTailCall(Inst))
365 unsetConditionalTailCall(Inst);
366 return true;
367 }
368
369 uint64_t analyzePLTEntry(MCInst &Instruction, InstructionIterator Begin,
370 InstructionIterator End,
371 uint64_t BeginPC) const override {
372 auto I = Begin;
373
374 assert(I != End);
375 auto &AUIPC = *I++;
376 assert(AUIPC.getOpcode() == RISCV::AUIPC);
377 assert(AUIPC.getOperand(0).getReg() == RISCV::X28);
378
379 assert(I != End);
380 auto &LD = *I++;
381 assert(LD.getOpcode() == RISCV::LD);
382 assert(LD.getOperand(0).getReg() == RISCV::X28);
383 assert(LD.getOperand(1).getReg() == RISCV::X28);
384
385 assert(I != End);
386 auto &JALR = *I++;
387 (void)JALR;
388 assert(JALR.getOpcode() == RISCV::JALR);
389 assert(JALR.getOperand(0).getReg() == RISCV::X6);
390 assert(JALR.getOperand(1).getReg() == RISCV::X28);
391
392 assert(I != End);
393 auto &NOP = *I++;
394 (void)NOP;
395 assert(isNoop(NOP));
396
397 assert(I == End);
398
399 auto AUIPCOffset = AUIPC.getOperand(i: 1).getImm() << 12;
400 auto LDOffset = LD.getOperand(i: 2).getImm();
401 return BeginPC + AUIPCOffset + LDOffset;
402 }
403
404 bool replaceImmWithSymbolRef(MCInst &Inst, const MCSymbol *Symbol,
405 int64_t Addend, MCContext *Ctx, int64_t &Value,
406 uint32_t RelType) const override {
407 unsigned ImmOpNo = -1U;
408
409 for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
410 ++Index) {
411 if (Inst.getOperand(i: Index).isImm()) {
412 ImmOpNo = Index;
413 break;
414 }
415 }
416
417 if (ImmOpNo == -1U)
418 return false;
419
420 Value = Inst.getOperand(i: ImmOpNo).getImm();
421 setOperandToSymbolRef(Inst, OpNum: ImmOpNo, Symbol, Addend, Ctx, RelType);
422 return true;
423 }
424
425 const MCExpr *getTargetExprFor(MCInst &Inst, const MCExpr *Expr,
426 MCContext &Ctx,
427 uint32_t RelType) const override {
428 switch (RelType) {
429 default:
430 return Expr;
431 case ELF::R_RISCV_GOT_HI20:
432 case ELF::R_RISCV_TLS_GOT_HI20:
433 case ELF::R_RISCV_TLS_GD_HI20:
434 // The GOT is reused so no need to create GOT relocations
435 case ELF::R_RISCV_PCREL_HI20:
436 return MCSpecifierExpr::create(Expr, S: ELF::R_RISCV_PCREL_HI20, Ctx);
437 case ELF::R_RISCV_PCREL_LO12_I:
438 case ELF::R_RISCV_PCREL_LO12_S:
439 return MCSpecifierExpr::create(Expr, S: RISCV::S_PCREL_LO, Ctx);
440 case ELF::R_RISCV_HI20:
441 return MCSpecifierExpr::create(Expr, S: ELF::R_RISCV_HI20, Ctx);
442 case ELF::R_RISCV_LO12_I:
443 case ELF::R_RISCV_LO12_S:
444 return MCSpecifierExpr::create(Expr, S: RISCV::S_LO, Ctx);
445 case ELF::R_RISCV_CALL:
446 return MCSpecifierExpr::create(Expr, S: ELF::R_RISCV_CALL_PLT, Ctx);
447 case ELF::R_RISCV_CALL_PLT:
448 return MCSpecifierExpr::create(Expr, S: ELF::R_RISCV_CALL_PLT, Ctx);
449 }
450 }
451
452 bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target,
453 uint64_t Address,
454 uint64_t Size) const override {
455 return false;
456 }
457
458 bool isCallAuipc(const MCInst &Inst) const {
459 if (Inst.getOpcode() != RISCV::AUIPC)
460 return false;
461
462 const auto &ImmOp = Inst.getOperand(i: 1);
463 if (!ImmOp.isExpr())
464 return false;
465
466 const auto *ImmExpr = ImmOp.getExpr();
467 if (!isa<MCSpecifierExpr>(Val: ImmExpr))
468 return false;
469
470 switch (cast<MCSpecifierExpr>(Val: ImmExpr)->getSpecifier()) {
471 default:
472 return false;
473 case ELF::R_RISCV_CALL_PLT:
474 return true;
475 }
476 }
477
478 bool isRISCVCall(const MCInst &First, const MCInst &Second) const override {
479 if (!isCallAuipc(Inst: First))
480 return false;
481
482 assert(Second.getOpcode() == RISCV::JALR);
483 return true;
484 }
485
486 uint16_t getMinFunctionAlignment() const override {
487 if (STI->hasFeature(Feature: RISCV::FeatureStdExtC) ||
488 STI->hasFeature(Feature: RISCV::FeatureStdExtZca))
489 return 2;
490 return 4;
491 }
492
493 void createStackPointerIncrement(
494 MCInst &Inst, int imm,
495 bool NoFlagsClobber = false /*unused for RISCV*/) const override {
496 Inst = MCInstBuilder(RISCV::ADDI)
497 .addReg(Reg: RISCV::X2)
498 .addReg(Reg: RISCV::X2)
499 .addImm(Val: -imm);
500 }
501
502 void createStackPointerDecrement(
503 MCInst &Inst, int imm,
504 bool NoFlagsClobber = false /*unused for RISCV*/) const override {
505 Inst = MCInstBuilder(RISCV::ADDI)
506 .addReg(Reg: RISCV::X2)
507 .addReg(Reg: RISCV::X2)
508 .addImm(Val: imm);
509 }
510
511 void loadReg(MCInst &Inst, MCPhysReg To, MCPhysReg From,
512 int64_t offset) const {
513 Inst = MCInstBuilder(RISCV::LD).addReg(Reg: To).addReg(Reg: From).addImm(Val: offset);
514 }
515
516 void storeReg(MCInst &Inst, MCPhysReg From, MCPhysReg To,
517 int64_t offset) const {
518 Inst = MCInstBuilder(RISCV::SD).addReg(Reg: From).addReg(Reg: To).addImm(Val: offset);
519 }
520
521 void spillRegs(InstructionListType &Insts,
522 const SmallVector<unsigned> &Regs) const {
523 Insts.emplace_back();
524 createStackPointerIncrement(Inst&: Insts.back(), imm: Regs.size() * 8);
525
526 int64_t Offset = 0;
527 for (auto Reg : Regs) {
528 Insts.emplace_back();
529 storeReg(Inst&: Insts.back(), From: Reg, To: RISCV::X2, offset: Offset);
530 Offset += 8;
531 }
532 }
533
534 void reloadRegs(InstructionListType &Insts,
535 const SmallVector<unsigned> &Regs) const {
536 int64_t Offset = 0;
537 for (auto Reg : Regs) {
538 Insts.emplace_back();
539 loadReg(Inst&: Insts.back(), To: Reg, From: RISCV::X2, offset: Offset);
540 Offset += 8;
541 }
542
543 Insts.emplace_back();
544 createStackPointerDecrement(Inst&: Insts.back(), imm: Regs.size() * 8);
545 }
546
547 void atomicAdd(MCInst &Inst, MCPhysReg RegAtomic, MCPhysReg RegTo,
548 MCPhysReg RegCnt) const {
549 Inst = MCInstBuilder(RISCV::AMOADD_D)
550 .addReg(Reg: RegAtomic)
551 .addReg(Reg: RegTo)
552 .addReg(Reg: RegCnt);
553 }
554
555 InstructionListType createRegCmpJE(MCPhysReg RegNo, MCPhysReg RegTmp,
556 const MCSymbol *Target,
557 MCContext *Ctx) const {
558 InstructionListType Insts;
559 Insts.emplace_back(
560 args&: MCInstBuilder(RISCV::SUB).addReg(Reg: RegTmp).addReg(Reg: RegNo).addReg(Reg: RegNo));
561 Insts.emplace_back(args&: MCInstBuilder(RISCV::BEQ)
562 .addReg(Reg: RegNo)
563 .addReg(Reg: RegTmp)
564 .addExpr(Val: MCSymbolRefExpr::create(Symbol: Target, Ctx&: *Ctx)));
565 return Insts;
566 }
567
568 void createTrap(MCInst &Inst) const override {
569 Inst.clear();
570 Inst.setOpcode(RISCV::EBREAK);
571 }
572
573 void createShortJmp(InstructionListType &Seq, const MCSymbol *Target,
574 MCContext *Ctx, bool IsTailCall) override {
575 // The sequence of instructions we create here is the following:
576 // auipc a5, hi20(Target)
577 // addi a5, a5, low12(Target)
578 // jr x5 => jalr x0, x5, 0
579 MCPhysReg Reg = RISCV::X5;
580 InstructionListType Insts = materializeAddress(Target, Ctx, RegName: Reg);
581 Insts.emplace_back();
582 MCInst &Inst = Insts.back();
583 Inst.clear();
584 Inst = MCInstBuilder(RISCV::JALR).addReg(Reg: RISCV::X0).addReg(Reg).addImm(Val: 0);
585 if (IsTailCall)
586 setTailCall(Inst);
587 Seq.swap(x&: Insts);
588 }
589
590 InstructionListType createGetter(MCContext *Ctx, const char *name) const {
591 InstructionListType Insts(4);
592 MCSymbol *Locs = Ctx->getOrCreateSymbol(Name: name);
593 InstructionListType Addr = materializeAddress(Target: Locs, Ctx, RegName: RISCV::X10);
594 std::copy(first: Addr.begin(), last: Addr.end(), result: Insts.begin());
595 loadReg(Inst&: Insts[2], To: RISCV::X10, From: RISCV::X10, offset: 0);
596 createReturn(Inst&: Insts[3]);
597 return Insts;
598 }
599
600 InstructionListType createIncMemory(MCPhysReg RegTo, MCPhysReg RegCnt,
601 MCPhysReg RegAtomic) const {
602 InstructionListType Insts;
603 Insts.emplace_back();
604 Insts.back() =
605 MCInstBuilder(RISCV::ADDI).addReg(Reg: RegCnt).addReg(Reg: RegAtomic).addImm(Val: 1);
606 Insts.emplace_back();
607 atomicAdd(Inst&: Insts.back(), RegAtomic, RegTo, RegCnt);
608 return Insts;
609 }
610
611 InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx,
612 MCPhysReg RegName,
613 int64_t Addend = 0) const override {
614 // Get the symbol address by auipc + addi
615 InstructionListType Insts(2);
616 MCSymbol *AuipcLabel = Ctx->createNamedTempSymbol(Name: "pcrel_hi");
617 Insts[0] = MCInstBuilder(RISCV::AUIPC).addReg(Reg: RegName).addImm(Val: 0);
618 setOperandToSymbolRef(Inst&: Insts[0], /* OpNum */ 1, Symbol: Target, Addend, Ctx,
619 RelType: ELF::R_RISCV_PCREL_HI20);
620 setInstLabel(Inst&: Insts[0], Label: AuipcLabel);
621
622 Insts[1] =
623 MCInstBuilder(RISCV::ADDI).addReg(Reg: RegName).addReg(Reg: RegName).addImm(Val: 0);
624 setOperandToSymbolRef(Inst&: Insts[1], /* OpNum */ 2, Symbol: AuipcLabel, Addend, Ctx,
625 RelType: ELF::R_RISCV_PCREL_LO12_I);
626 return Insts;
627 }
628
629 InstructionListType
630 createInstrIncMemory(const MCSymbol *Target, MCContext *Ctx, bool IsLeaf,
631 unsigned CodePointerSize) const override {
632 // We need 2 scratch registers: one for the target address (x10), and one
633 // for the increment value (x11).
634 // addi sp, sp, -16
635 // sd x10, 0(sp)
636 // sd x11, 8(sp)
637 // la x10, target # 1: auipc x10, %pcrel_hi(target)
638 // # addi x10, x10, %pcrel_lo(1b)
639 // li x11, 1 # addi x11, zero, 1
640 // amoadd.d zero, x10, x11
641 // ld x10, 0(sp)
642 // ld x11, 8(sp)
643 // addi sp, sp, 16
644
645 InstructionListType Insts;
646 spillRegs(Insts, Regs: {RISCV::X10, RISCV::X11});
647 InstructionListType Addr = materializeAddress(Target, Ctx, RegName: RISCV::X10);
648 Insts.insert(position: Insts.end(), first: Addr.begin(), last: Addr.end());
649 InstructionListType IncInsts =
650 createIncMemory(RegTo: RISCV::X10, RegCnt: RISCV::X11, RegAtomic: RISCV::X0);
651 Insts.insert(position: Insts.end(), first: IncInsts.begin(), last: IncInsts.end());
652 reloadRegs(Insts, Regs: {RISCV::X10, RISCV::X11});
653 return Insts;
654 }
655
656 void createDirectCall(MCInst &Inst, const MCSymbol *Target, MCContext *Ctx,
657 bool IsTailCall) override {
658 Inst.setOpcode(RISCV::JAL);
659 Inst.clear();
660 if (IsTailCall) {
661 Inst.addOperand(Op: MCOperand::createReg(Reg: RISCV::X0));
662 Inst.addOperand(Op: MCOperand::createExpr(Val: getTargetExprFor(
663 Inst, Expr: MCSymbolRefExpr::create(Symbol: Target, Ctx&: *Ctx), Ctx&: *Ctx, RelType: 0)));
664 convertJmpToTailCall(Inst);
665 } else {
666 Inst.addOperand(Op: MCOperand::createReg(Reg: RISCV::X1));
667 Inst.addOperand(Op: MCOperand::createExpr(Val: getTargetExprFor(
668 Inst, Expr: MCSymbolRefExpr::create(Symbol: Target, Ctx&: *Ctx), Ctx&: *Ctx, RelType: 0)));
669 }
670 }
671
672 void createIndirectCallInst(MCInst &Inst, bool IsTailCall, MCPhysReg Reg,
673 int64_t Disp) const {
674 Inst.clear();
675 Inst.setOpcode(RISCV::JALR);
676 Inst.clear();
677 if (IsTailCall) {
678 Inst.addOperand(Op: MCOperand::createReg(Reg: RISCV::X0));
679 Inst.addOperand(Op: MCOperand::createReg(Reg));
680 Inst.addOperand(Op: MCOperand::createImm(Val: Disp));
681 } else {
682 Inst.addOperand(Op: MCOperand::createReg(Reg: RISCV::X1));
683 Inst.addOperand(Op: MCOperand::createReg(Reg));
684 Inst.addOperand(Op: MCOperand::createImm(Val: Disp));
685 }
686 }
687
688 InstructionListType
689 createInstrumentedIndCallHandlerEntryBB(const MCSymbol *InstrTrampoline,
690 const MCSymbol *IndCallHandler,
691 MCContext *Ctx) override {
692 // Code sequence used to check whether InstrTampoline was initialized
693 // and call it if so, returns via IndCallHandler
694 // sp -16(sp)
695 // sd x10, 0(sp)
696 // sd x11, 0(sp)
697 // la x10, InstrTrampoline -> auipc + addi
698 // ld x10, [x10]
699 // beq x10, x11, IndCallHandler
700 // sp -16(sp)
701 // sd x1, 0(sp)
702 // jalr x1,x10,0
703 // ld x1, [sp], #16
704 // sp 16(sp)
705 // jal x0, IndCallHandler
706
707 InstructionListType Insts;
708 spillRegs(Insts, Regs: {RISCV::X10, RISCV::X11});
709 InstructionListType Addr =
710 materializeAddress(Target: InstrTrampoline, Ctx, RegName: RISCV::X10);
711 Insts.insert(position: Insts.end(), first: Addr.begin(), last: Addr.end());
712 Insts.emplace_back();
713 loadReg(Inst&: Insts.back(), To: RISCV::X10, From: RISCV::X10, offset: 0);
714 InstructionListType cmpJmp =
715 createRegCmpJE(RegNo: RISCV::X10, RegTmp: RISCV::X11, Target: IndCallHandler, Ctx);
716 Insts.insert(position: Insts.end(), first: cmpJmp.begin(), last: cmpJmp.end());
717 Insts.emplace_back();
718 createStackPointerIncrement(Inst&: Insts.back(), imm: 16);
719 Insts.emplace_back();
720 storeReg(Inst&: Insts.back(), From: RISCV::X1, To: RISCV::X2, offset: 0);
721 Insts.emplace_back();
722 createIndirectCallInst(Inst&: Insts.back(), /*IsTailCall*/ false, Reg: RISCV::X10, Disp: 0);
723 Insts.emplace_back();
724 loadReg(Inst&: Insts.back(), To: RISCV::X1, From: RISCV::X2, offset: 0);
725 Insts.emplace_back();
726 createStackPointerDecrement(Inst&: Insts.back(), imm: 16);
727 Insts.emplace_back();
728 createDirectCall(Inst&: Insts.back(), Target: IndCallHandler, Ctx, /*IsTailCall*/ true);
729 return Insts;
730 }
731
732 InstructionListType createInstrumentedIndCallHandlerExitBB() const override {
733 InstructionListType Insts;
734 reloadRegs(Insts, Regs: {RISCV::X10, RISCV::X11});
735 Insts.emplace_back();
736 loadReg(Inst&: Insts.back(), To: RISCV::X5, From: RISCV::X2, offset: 0);
737 Insts.emplace_back();
738 createStackPointerDecrement(Inst&: Insts.back(), imm: 16);
739 reloadRegs(Insts, Regs: {RISCV::X10, RISCV::X11});
740 Insts.emplace_back();
741 createIndirectCallInst(Inst&: Insts.back(), /*IsTailCall*/ true, Reg: RISCV::X5, Disp: 0);
742 return Insts;
743 }
744
745 InstructionListType
746 createInstrumentedIndTailCallHandlerExitBB() const override {
747 return createInstrumentedIndCallHandlerExitBB();
748 }
749
750 std::vector<MCInst> createSymbolTrampoline(const MCSymbol *TgtSym,
751 MCContext *Ctx) override {
752 std::vector<MCInst> Insts;
753 createShortJmp(Seq&: Insts, Target: TgtSym, Ctx, /*IsTailCall*/ true);
754 return Insts;
755 }
756
757 InstructionListType createNumCountersGetter(MCContext *Ctx) const override {
758 return createGetter(Ctx, name: "__bolt_num_counters");
759 }
760
761 InstructionListType
762 createInstrLocationsGetter(MCContext *Ctx) const override {
763 return createGetter(Ctx, name: "__bolt_instr_locations");
764 }
765
766 InstructionListType createInstrTablesGetter(MCContext *Ctx) const override {
767 return createGetter(Ctx, name: "__bolt_instr_tables");
768 }
769
770 InstructionListType createInstrNumFuncsGetter(MCContext *Ctx) const override {
771 return createGetter(Ctx, name: "__bolt_instr_num_funcs");
772 }
773
774 void convertIndirectCallToLoad(MCInst &Inst, MCPhysReg Reg) override {
775 bool IsTailCall = isTailCall(Inst);
776 if (IsTailCall)
777 removeAnnotation(Inst, Index: MCPlus::MCAnnotation::kTailCall);
778 Inst.setOpcode(RISCV::ADD);
779 Inst.insert(I: Inst.begin(), Op: MCOperand::createReg(Reg));
780 Inst.insert(I: Inst.begin() + 1, Op: MCOperand::createReg(Reg: RISCV::X0));
781 }
782
783 InstructionListType createLoadImmediate(const MCPhysReg Dest,
784 uint64_t Imm) const override {
785 InstructionListType Insts;
786 // get IMM higher 32bit
787 Insts.emplace_back(
788 args&: MCInstBuilder(RISCV::LUI).addReg(Reg: Dest).addImm(Val: (Imm >> 44) & 0xFFFFF));
789 Insts.emplace_back(args&: MCInstBuilder(RISCV::LUI)
790 .addReg(Reg: RISCV::X5)
791 .addImm(Val: (Imm >> 32) & 0xFFF));
792 Insts.emplace_back(args&: MCInstBuilder(RISCV::SRLI)
793 .addReg(Reg: RISCV::X5)
794 .addReg(Reg: RISCV::X5)
795 .addImm(Val: 12));
796 Insts.emplace_back(
797 args&: MCInstBuilder(RISCV::OR).addReg(Reg: Dest).addReg(Reg: Dest).addReg(Reg: RISCV::X5));
798 Insts.emplace_back(
799 args&: MCInstBuilder(RISCV::SLLI).addReg(Reg: Dest).addReg(Reg: Dest).addImm(Val: 32));
800
801 // get IMM lower 32bit
802 Insts.emplace_back(args&: MCInstBuilder(RISCV::LUI)
803 .addReg(Reg: RISCV::X5)
804 .addImm(Val: (Imm >> 12) & 0xFFFFF));
805 Insts.emplace_back(
806 args&: MCInstBuilder(RISCV::LUI).addReg(Reg: RISCV::X6).addImm(Val: (Imm)&0xFFF));
807 Insts.emplace_back(args&: MCInstBuilder(RISCV::SRLI)
808 .addReg(Reg: RISCV::X6)
809 .addReg(Reg: RISCV::X6)
810 .addImm(Val: 12));
811 Insts.emplace_back(
812 args&: MCInstBuilder(RISCV::OR).addReg(Reg: RISCV::X5).addReg(Reg: RISCV::X5).addReg(
813 Reg: RISCV::X6));
814
815 // get 64bit IMM
816 Insts.emplace_back(
817 args&: MCInstBuilder(RISCV::OR).addReg(Reg: Dest).addReg(Reg: Dest).addReg(Reg: RISCV::X5));
818 return Insts;
819 }
820
821 InstructionListType createInstrumentedIndirectCall(MCInst &&CallInst,
822 MCSymbol *HandlerFuncAddr,
823 int CallSiteID,
824 MCContext *Ctx) override {
825 // Code sequence used to enter indirect call instrumentation helper:
826 // addi sp, sp, -0x10
827 // sd a0, 0x0(sp)
828 // sd a1, 0x8(sp)
829 // mov target x0 convertIndirectCallToLoad -> add a0, zero, target
830 // mov x1 CallSiteID createLoadImmediate
831 // addi sp, sp, -0x10
832 // sd a0, 0x0(sp)
833 // sd a1, 0x8(sp)
834 // la x0 *HandlerFuncAddr -> auipc + addi
835 // jalr x0
836
837 InstructionListType Insts;
838 spillRegs(Insts, Regs: {RISCV::X10, RISCV::X11});
839 Insts.emplace_back(args&: CallInst);
840 convertIndirectCallToLoad(Inst&: Insts.back(), Reg: RISCV::X10);
841 InstructionListType LoadImm = createLoadImmediate(Dest: RISCV::X11, Imm: CallSiteID);
842 Insts.insert(position: Insts.end(), first: LoadImm.begin(), last: LoadImm.end());
843 spillRegs(Insts, Regs: {RISCV::X10, RISCV::X11});
844 InstructionListType Addr =
845 materializeAddress(Target: HandlerFuncAddr, Ctx, RegName: RISCV::X5);
846 Insts.insert(position: Insts.end(), first: Addr.begin(), last: Addr.end());
847 Insts.emplace_back();
848 createIndirectCallInst(Inst&: Insts.back(), IsTailCall: isTailCall(Inst: CallInst), Reg: RISCV::X5, Disp: 0);
849
850 // // Carry over metadata including tail call marker if present.
851 stripAnnotations(Inst&: Insts.back());
852 moveAnnotations(SrcInst: std::move(CallInst), DstInst&: Insts.back());
853
854 return Insts;
855 }
856};
857
858} // end anonymous namespace
859
860namespace llvm {
861namespace bolt {
862
863MCPlusBuilder *createRISCVMCPlusBuilder(const MCInstrAnalysis *Analysis,
864 const MCInstrInfo *Info,
865 const MCRegisterInfo *RegInfo,
866 const MCSubtargetInfo *STI) {
867 return new RISCVMCPlusBuilder(Analysis, Info, RegInfo, STI);
868}
869
870} // namespace bolt
871} // namespace llvm
872

source code of bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp