1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
15#include "RISCVMachineFunctionInfo.h"
16#include "RISCVSubtarget.h"
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/BinaryFormat/Dwarf.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/RegisterScavenging.h"
23#include "llvm/CodeGen/TargetFrameLowering.h"
24#include "llvm/CodeGen/TargetInstrInfo.h"
25#include "llvm/IR/DebugInfoMetadata.h"
26#include "llvm/Support/ErrorHandling.h"
27
28#define GET_REGINFO_TARGET_DESC
29#include "RISCVGenRegisterInfo.inc"
30
31using namespace llvm;
32
33static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
34 cl::init(false), cl::Hidden);
35static cl::opt<bool>
36 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
37 cl::init(Val: false),
38 cl::desc("Disable two address hints for register "
39 "allocation"));
40
41static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
42static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
43static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
44static_assert(RISCV::F31_H == RISCV::F0_H + 31,
45 "Register list not consecutive");
46static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
47static_assert(RISCV::F31_F == RISCV::F0_F + 31,
48 "Register list not consecutive");
49static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
50static_assert(RISCV::F31_D == RISCV::F0_D + 31,
51 "Register list not consecutive");
52static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
53static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
54
55RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
56 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
57 /*PC*/0, HwMode) {}
58
59const MCPhysReg *
60RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
61 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
62 if (MF->getFunction().getCallingConv() == CallingConv::GHC)
63 return CSR_NoRegs_SaveList;
64 if (MF->getFunction().hasFnAttribute(Kind: "interrupt")) {
65 if (Subtarget.hasStdExtD())
66 return CSR_XLEN_F64_Interrupt_SaveList;
67 if (Subtarget.hasStdExtF())
68 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
69 : CSR_XLEN_F32_Interrupt_SaveList;
70 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
71 : CSR_Interrupt_SaveList;
72 }
73
74 bool HasVectorCSR =
75 MF->getFunction().getCallingConv() == CallingConv::RISCV_VectorCall;
76
77 switch (Subtarget.getTargetABI()) {
78 default:
79 llvm_unreachable("Unrecognized ABI");
80 case RISCVABI::ABI_ILP32E:
81 case RISCVABI::ABI_LP64E:
82 return CSR_ILP32E_LP64E_SaveList;
83 case RISCVABI::ABI_ILP32:
84 case RISCVABI::ABI_LP64:
85 if (HasVectorCSR)
86 return CSR_ILP32_LP64_V_SaveList;
87 return CSR_ILP32_LP64_SaveList;
88 case RISCVABI::ABI_ILP32F:
89 case RISCVABI::ABI_LP64F:
90 if (HasVectorCSR)
91 return CSR_ILP32F_LP64F_V_SaveList;
92 return CSR_ILP32F_LP64F_SaveList;
93 case RISCVABI::ABI_ILP32D:
94 case RISCVABI::ABI_LP64D:
95 if (HasVectorCSR)
96 return CSR_ILP32D_LP64D_V_SaveList;
97 return CSR_ILP32D_LP64D_SaveList;
98 }
99}
100
101BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
102 const RISCVFrameLowering *TFI = getFrameLowering(MF);
103 BitVector Reserved(getNumRegs());
104 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
105
106 // Mark any registers requested to be reserved as such
107 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
108 if (Subtarget.isRegisterReservedByUser(i: Reg))
109 markSuperRegs(Reserved, Reg);
110 }
111
112 // Use markSuperRegs to ensure any register aliases are also reserved
113 markSuperRegs(Reserved, RISCV::X0); // zero
114 markSuperRegs(Reserved, RISCV::X2); // sp
115 markSuperRegs(Reserved, RISCV::X3); // gp
116 markSuperRegs(Reserved, RISCV::X4); // tp
117 if (TFI->hasFP(MF))
118 markSuperRegs(Reserved, RISCV::X8); // fp
119 // Reserve the base register if we need to realign the stack and allocate
120 // variable-sized objects at runtime.
121 if (TFI->hasBP(MF))
122 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
123
124 // Additionally reserve dummy register used to form the register pair
125 // beginning with 'x0' for instructions that take register pairs.
126 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
127
128 // There are only 16 GPRs for RVE.
129 if (Subtarget.hasStdExtE())
130 for (MCPhysReg Reg = RISCV::X16; Reg <= RISCV::X31; Reg++)
131 markSuperRegs(Reserved, Reg);
132
133 // V registers for code generation. We handle them manually.
134 markSuperRegs(Reserved, RISCV::VL);
135 markSuperRegs(Reserved, RISCV::VTYPE);
136 markSuperRegs(Reserved, RISCV::VXSAT);
137 markSuperRegs(Reserved, RISCV::VXRM);
138 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
139
140 // Floating point environment registers.
141 markSuperRegs(Reserved, RISCV::FRM);
142 markSuperRegs(Reserved, RISCV::FFLAGS);
143
144 // SiFive VCIX state registers.
145 markSuperRegs(Reserved, RISCV::VCIX_STATE);
146
147 if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) {
148 if (Subtarget.hasStdExtE())
149 report_fatal_error(reason: "Graal reserved registers do not exist in RVE");
150 markSuperRegs(Reserved, RISCV::X23);
151 markSuperRegs(Reserved, RISCV::X27);
152 }
153
154 // Shadow stack pointer.
155 markSuperRegs(Reserved, RISCV::SSP);
156
157 assert(checkAllSuperRegsMarked(Reserved));
158 return Reserved;
159}
160
161bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
162 MCRegister PhysReg) const {
163 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(i: PhysReg);
164}
165
166const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
167 return CSR_NoRegs_RegMask;
168}
169
170void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
171 MachineBasicBlock::iterator II,
172 const DebugLoc &DL, Register DestReg,
173 Register SrcReg, StackOffset Offset,
174 MachineInstr::MIFlag Flag,
175 MaybeAlign RequiredAlign) const {
176
177 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
178 return;
179
180 MachineFunction &MF = *MBB.getParent();
181 MachineRegisterInfo &MRI = MF.getRegInfo();
182 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
183 const RISCVInstrInfo *TII = ST.getInstrInfo();
184
185 bool KillSrcReg = false;
186
187 if (Offset.getScalable()) {
188 unsigned ScalableAdjOpc = RISCV::ADD;
189 int64_t ScalableValue = Offset.getScalable();
190 if (ScalableValue < 0) {
191 ScalableValue = -ScalableValue;
192 ScalableAdjOpc = RISCV::SUB;
193 }
194 // Get vlenb and multiply vlen with the number of vector registers.
195 Register ScratchReg = DestReg;
196 if (DestReg == SrcReg)
197 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
198
199 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
200 assert(ScalableValue % 8 == 0 &&
201 "Reserve the stack by the multiple of one vector size.");
202 assert(isInt<32>(ScalableValue / 8) &&
203 "Expect the number of vector registers within 32-bits.");
204 uint32_t NumOfVReg = ScalableValue / 8;
205 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
206 .setMIFlag(Flag);
207
208 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
209 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
210 unsigned Opc = NumOfVReg == 2 ? RISCV::SH1ADD :
211 (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
212 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
213 .addReg(ScratchReg, RegState::Kill).addReg(SrcReg)
214 .setMIFlag(Flag);
215 } else {
216 TII->mulImm(MF, MBB, II, DL, DestReg: ScratchReg, Amt: NumOfVReg, Flag);
217 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
218 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
219 .setMIFlag(Flag);
220 }
221 SrcReg = DestReg;
222 KillSrcReg = true;
223 }
224
225 int64_t Val = Offset.getFixed();
226 if (DestReg == SrcReg && Val == 0)
227 return;
228
229 const uint64_t Align = RequiredAlign.valueOrOne().value();
230
231 if (isInt<12>(x: Val)) {
232 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
233 .addReg(SrcReg, getKillRegState(KillSrcReg))
234 .addImm(Val)
235 .setMIFlag(Flag);
236 return;
237 }
238
239 // Try to split the offset across two ADDIs. We need to keep the intermediate
240 // result aligned after each ADDI. We need to determine the maximum value we
241 // can put in each ADDI. In the negative direction, we can use -2048 which is
242 // always sufficiently aligned. In the positive direction, we need to find the
243 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
244 // created with LUI.
245 assert(Align < 2048 && "Required alignment too large");
246 int64_t MaxPosAdjStep = 2048 - Align;
247 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
248 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
249 Val -= FirstAdj;
250 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
251 .addReg(SrcReg, getKillRegState(KillSrcReg))
252 .addImm(FirstAdj)
253 .setMIFlag(Flag);
254 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
255 .addReg(DestReg, RegState::Kill)
256 .addImm(Val)
257 .setMIFlag(Flag);
258 return;
259 }
260
261 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
262 // instruction. This saves 1 instruction over the full lui/addi+add fallback
263 // path. We avoid anything which can be done with a single lui as it might
264 // be compressible. Note that the sh1add case is fully covered by the 2x addi
265 // case just above and is thus ommitted.
266 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
267 unsigned Opc = 0;
268 if (isShiftedInt<12, 3>(x: Val)) {
269 Opc = RISCV::SH3ADD;
270 Val = Val >> 3;
271 } else if (isShiftedInt<12, 2>(x: Val)) {
272 Opc = RISCV::SH2ADD;
273 Val = Val >> 2;
274 }
275 if (Opc) {
276 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
277 TII->movImm(MBB, MBBI: II, DL, DstReg: ScratchReg, Val, Flag);
278 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
279 .addReg(ScratchReg, RegState::Kill)
280 .addReg(SrcReg, getKillRegState(B: KillSrcReg))
281 .setMIFlag(Flag);
282 return;
283 }
284 }
285
286 unsigned Opc = RISCV::ADD;
287 if (Val < 0) {
288 Val = -Val;
289 Opc = RISCV::SUB;
290 }
291
292 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
293 TII->movImm(MBB, MBBI: II, DL, DstReg: ScratchReg, Val, Flag);
294 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
295 .addReg(SrcReg, getKillRegState(B: KillSrcReg))
296 .addReg(ScratchReg, RegState::Kill)
297 .setMIFlag(Flag);
298}
299
300// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
301// LMUL*VLENB bytes.
302void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
303 DebugLoc DL = II->getDebugLoc();
304 MachineBasicBlock &MBB = *II->getParent();
305 MachineFunction &MF = *MBB.getParent();
306 MachineRegisterInfo &MRI = MF.getRegInfo();
307 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
308 const TargetInstrInfo *TII = STI.getInstrInfo();
309 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
310
311 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(Opcode: II->getOpcode());
312 unsigned NF = ZvlssegInfo->first;
313 unsigned LMUL = ZvlssegInfo->second;
314 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
315 unsigned Opcode, SubRegIdx;
316 switch (LMUL) {
317 default:
318 llvm_unreachable("LMUL must be 1, 2, or 4.");
319 case 1:
320 Opcode = RISCV::VS1R_V;
321 SubRegIdx = RISCV::sub_vrm1_0;
322 break;
323 case 2:
324 Opcode = RISCV::VS2R_V;
325 SubRegIdx = RISCV::sub_vrm2_0;
326 break;
327 case 4:
328 Opcode = RISCV::VS4R_V;
329 SubRegIdx = RISCV::sub_vrm4_0;
330 break;
331 }
332 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
333 "Unexpected subreg numbering");
334 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
335 "Unexpected subreg numbering");
336 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
337 "Unexpected subreg numbering");
338
339 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
340 // Optimize for constant VLEN.
341 if (auto VLEN = STI.getRealVLen()) {
342 const int64_t VLENB = *VLEN / 8;
343 int64_t Offset = VLENB * LMUL;
344 STI.getInstrInfo()->movImm(MBB, MBBI: II, DL, DstReg: VL, Val: Offset);
345 } else {
346 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
347 uint32_t ShiftAmount = Log2_32(Value: LMUL);
348 if (ShiftAmount != 0)
349 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
350 .addReg(VL)
351 .addImm(ShiftAmount);
352 }
353
354 Register SrcReg = II->getOperand(i: 0).getReg();
355 Register Base = II->getOperand(i: 1).getReg();
356 bool IsBaseKill = II->getOperand(i: 1).isKill();
357 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
358 for (unsigned I = 0; I < NF; ++I) {
359 // Adding implicit-use of super register to describe we are using part of
360 // super register, that prevents machine verifier complaining when part of
361 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
362 // detail.
363 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII->get(Opcode))
364 .addReg(RegNo: TRI->getSubReg(Reg: SrcReg, Idx: SubRegIdx + I))
365 .addReg(RegNo: Base, flags: getKillRegState(B: I == NF - 1))
366 .addMemOperand(MMO: *(II->memoperands_begin()))
367 .addReg(RegNo: SrcReg, flags: RegState::Implicit);
368 if (I != NF - 1)
369 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
370 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
371 .addReg(VL, getKillRegState(I == NF - 2));
372 Base = NewBase;
373 }
374 II->eraseFromParent();
375}
376
377// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
378// LMUL*VLENB bytes.
379void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
380 DebugLoc DL = II->getDebugLoc();
381 MachineBasicBlock &MBB = *II->getParent();
382 MachineFunction &MF = *MBB.getParent();
383 MachineRegisterInfo &MRI = MF.getRegInfo();
384 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
385 const TargetInstrInfo *TII = STI.getInstrInfo();
386 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
387
388 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(Opcode: II->getOpcode());
389 unsigned NF = ZvlssegInfo->first;
390 unsigned LMUL = ZvlssegInfo->second;
391 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
392 unsigned Opcode, SubRegIdx;
393 switch (LMUL) {
394 default:
395 llvm_unreachable("LMUL must be 1, 2, or 4.");
396 case 1:
397 Opcode = RISCV::VL1RE8_V;
398 SubRegIdx = RISCV::sub_vrm1_0;
399 break;
400 case 2:
401 Opcode = RISCV::VL2RE8_V;
402 SubRegIdx = RISCV::sub_vrm2_0;
403 break;
404 case 4:
405 Opcode = RISCV::VL4RE8_V;
406 SubRegIdx = RISCV::sub_vrm4_0;
407 break;
408 }
409 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
410 "Unexpected subreg numbering");
411 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
412 "Unexpected subreg numbering");
413 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
414 "Unexpected subreg numbering");
415
416 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
417 // Optimize for constant VLEN.
418 if (auto VLEN = STI.getRealVLen()) {
419 const int64_t VLENB = *VLEN / 8;
420 int64_t Offset = VLENB * LMUL;
421 STI.getInstrInfo()->movImm(MBB, MBBI: II, DL, DstReg: VL, Val: Offset);
422 } else {
423 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
424 uint32_t ShiftAmount = Log2_32(Value: LMUL);
425 if (ShiftAmount != 0)
426 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
427 .addReg(VL)
428 .addImm(ShiftAmount);
429 }
430
431 Register DestReg = II->getOperand(i: 0).getReg();
432 Register Base = II->getOperand(i: 1).getReg();
433 bool IsBaseKill = II->getOperand(i: 1).isKill();
434 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
435 for (unsigned I = 0; I < NF; ++I) {
436 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII->get(Opcode),
437 DestReg: TRI->getSubReg(Reg: DestReg, Idx: SubRegIdx + I))
438 .addReg(RegNo: Base, flags: getKillRegState(B: I == NF - 1))
439 .addMemOperand(MMO: *(II->memoperands_begin()));
440 if (I != NF - 1)
441 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
442 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
443 .addReg(VL, getKillRegState(I == NF - 2));
444 Base = NewBase;
445 }
446 II->eraseFromParent();
447}
448
449bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
450 int SPAdj, unsigned FIOperandNum,
451 RegScavenger *RS) const {
452 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
453
454 MachineInstr &MI = *II;
455 MachineFunction &MF = *MI.getParent()->getParent();
456 MachineRegisterInfo &MRI = MF.getRegInfo();
457 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
458 DebugLoc DL = MI.getDebugLoc();
459
460 int FrameIndex = MI.getOperand(i: FIOperandNum).getIndex();
461 Register FrameReg;
462 StackOffset Offset =
463 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
464 bool IsRVVSpill = RISCV::isRVVSpill(MI);
465 if (!IsRVVSpill)
466 Offset += StackOffset::getFixed(Fixed: MI.getOperand(i: FIOperandNum + 1).getImm());
467
468 if (Offset.getScalable() &&
469 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
470 // For an exact VLEN value, scalable offsets become constant and thus
471 // can be converted entirely into fixed offsets.
472 int64_t FixedValue = Offset.getFixed();
473 int64_t ScalableValue = Offset.getScalable();
474 assert(ScalableValue % 8 == 0 &&
475 "Scalable offset is not a multiple of a single vector size.");
476 int64_t NumOfVReg = ScalableValue / 8;
477 int64_t VLENB = ST.getRealMinVLen() / 8;
478 Offset = StackOffset::getFixed(Fixed: FixedValue + NumOfVReg * VLENB);
479 }
480
481 if (!isInt<32>(x: Offset.getFixed())) {
482 report_fatal_error(
483 reason: "Frame offsets outside of the signed 32-bit range not supported");
484 }
485
486 if (!IsRVVSpill) {
487 int64_t Val = Offset.getFixed();
488 int64_t Lo12 = SignExtend64<12>(x: Val);
489 unsigned Opc = MI.getOpcode();
490 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
491 // We chose to emit the canonical immediate sequence rather than folding
492 // the offset into the using add under the theory that doing so doesn't
493 // save dynamic instruction count and some target may fuse the canonical
494 // 32 bit immediate sequence. We still need to clear the portion of the
495 // offset encoded in the immediate.
496 MI.getOperand(i: FIOperandNum + 1).ChangeToImmediate(ImmVal: 0);
497 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
498 Opc == RISCV::PREFETCH_W) &&
499 (Lo12 & 0b11111) != 0) {
500 // Prefetch instructions require the offset to be 32 byte aligned.
501 MI.getOperand(i: FIOperandNum + 1).ChangeToImmediate(ImmVal: 0);
502 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
503 Opc == RISCV::PseudoRV32ZdinxSD) &&
504 Lo12 >= 2044) {
505 // This instruction will be split into 2 instructions. The second
506 // instruction will add 4 to the immediate. If that would overflow 12
507 // bits, we can't fold the offset.
508 MI.getOperand(i: FIOperandNum + 1).ChangeToImmediate(ImmVal: 0);
509 } else {
510 // We can encode an add with 12 bit signed immediate in the immediate
511 // operand of our user instruction. As a result, the remaining
512 // offset can by construction, at worst, a LUI and a ADD.
513 MI.getOperand(i: FIOperandNum + 1).ChangeToImmediate(ImmVal: Lo12);
514 Offset = StackOffset::get(Fixed: (uint64_t)Val - (uint64_t)Lo12,
515 Scalable: Offset.getScalable());
516 }
517 }
518
519 if (Offset.getScalable() || Offset.getFixed()) {
520 Register DestReg;
521 if (MI.getOpcode() == RISCV::ADDI)
522 DestReg = MI.getOperand(i: 0).getReg();
523 else
524 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
525 adjustReg(MBB&: *II->getParent(), II, DL, DestReg, SrcReg: FrameReg, Offset,
526 Flag: MachineInstr::NoFlags, RequiredAlign: std::nullopt);
527 MI.getOperand(i: FIOperandNum).ChangeToRegister(Reg: DestReg, /*IsDef*/isDef: false,
528 /*IsImp*/isImp: false,
529 /*IsKill*/isKill: true);
530 } else {
531 MI.getOperand(i: FIOperandNum).ChangeToRegister(Reg: FrameReg, /*IsDef*/isDef: false,
532 /*IsImp*/isImp: false,
533 /*IsKill*/isKill: false);
534 }
535
536 // If after materializing the adjustment, we have a pointless ADDI, remove it
537 if (MI.getOpcode() == RISCV::ADDI &&
538 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
539 MI.getOperand(2).getImm() == 0) {
540 MI.eraseFromParent();
541 return true;
542 }
543
544 // Handle spill/fill of synthetic register classes for segment operations to
545 // ensure correctness in the edge case one gets spilled. There are many
546 // possible optimizations here, but given the extreme rarity of such spills,
547 // we prefer simplicity of implementation for now.
548 switch (MI.getOpcode()) {
549 case RISCV::PseudoVSPILL2_M1:
550 case RISCV::PseudoVSPILL2_M2:
551 case RISCV::PseudoVSPILL2_M4:
552 case RISCV::PseudoVSPILL3_M1:
553 case RISCV::PseudoVSPILL3_M2:
554 case RISCV::PseudoVSPILL4_M1:
555 case RISCV::PseudoVSPILL4_M2:
556 case RISCV::PseudoVSPILL5_M1:
557 case RISCV::PseudoVSPILL6_M1:
558 case RISCV::PseudoVSPILL7_M1:
559 case RISCV::PseudoVSPILL8_M1:
560 lowerVSPILL(II);
561 return true;
562 case RISCV::PseudoVRELOAD2_M1:
563 case RISCV::PseudoVRELOAD2_M2:
564 case RISCV::PseudoVRELOAD2_M4:
565 case RISCV::PseudoVRELOAD3_M1:
566 case RISCV::PseudoVRELOAD3_M2:
567 case RISCV::PseudoVRELOAD4_M1:
568 case RISCV::PseudoVRELOAD4_M2:
569 case RISCV::PseudoVRELOAD5_M1:
570 case RISCV::PseudoVRELOAD6_M1:
571 case RISCV::PseudoVRELOAD7_M1:
572 case RISCV::PseudoVRELOAD8_M1:
573 lowerVRELOAD(II);
574 return true;
575 }
576
577 return false;
578}
579
580bool RISCVRegisterInfo::requiresVirtualBaseRegisters(
581 const MachineFunction &MF) const {
582 return true;
583}
584
585// Returns true if the instruction's frame index reference would be better
586// served by a base register other than FP or SP.
587// Used by LocalStackSlotAllocation pass to determine which frame index
588// references it should create new base registers for.
589bool RISCVRegisterInfo::needsFrameBaseReg(MachineInstr *MI,
590 int64_t Offset) const {
591 unsigned FIOperandNum = 0;
592 for (; !MI->getOperand(i: FIOperandNum).isFI(); FIOperandNum++)
593 assert(FIOperandNum < MI->getNumOperands() &&
594 "Instr doesn't have FrameIndex operand");
595
596 // For RISC-V, The machine instructions that include a FrameIndex operand
597 // are load/store, ADDI instructions.
598 unsigned MIFrm = RISCVII::getFormat(TSFlags: MI->getDesc().TSFlags);
599 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
600 return false;
601 // We only generate virtual base registers for loads and stores, so
602 // return false for everything else.
603 if (!MI->mayLoad() && !MI->mayStore())
604 return false;
605
606 const MachineFunction &MF = *MI->getMF();
607 const MachineFrameInfo &MFI = MF.getFrameInfo();
608 const RISCVFrameLowering *TFI = getFrameLowering(MF);
609 const MachineRegisterInfo &MRI = MF.getRegInfo();
610 Offset += getFrameIndexInstrOffset(MI, Idx: FIOperandNum);
611
612 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
613 // Estimate the stack size used to store callee saved registers(
614 // excludes reserved registers).
615 unsigned CalleeSavedSize = 0;
616 BitVector ReservedRegs = getReservedRegs(MF);
617 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
618 ++R) {
619 if (!ReservedRegs.test(Idx: Reg))
620 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
621 }
622
623 int64_t MaxFPOffset = Offset - CalleeSavedSize;
624 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
625 }
626
627 // Assume 128 bytes spill slots size to estimate the maximum possible
628 // offset relative to the stack pointer.
629 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
630 // real one for RISC-V.
631 int64_t MaxSPOffset = Offset + 128;
632 MaxSPOffset += MFI.getLocalFrameSize();
633 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
634}
635
636// Determine whether a given base register plus offset immediate is
637// encodable to resolve a frame index.
638bool RISCVRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
639 Register BaseReg,
640 int64_t Offset) const {
641 unsigned FIOperandNum = 0;
642 while (!MI->getOperand(i: FIOperandNum).isFI()) {
643 FIOperandNum++;
644 assert(FIOperandNum < MI->getNumOperands() &&
645 "Instr does not have a FrameIndex operand!");
646 }
647
648 Offset += getFrameIndexInstrOffset(MI, Idx: FIOperandNum);
649 return isInt<12>(x: Offset);
650}
651
652// Insert defining instruction(s) for a pointer to FrameIdx before
653// insertion point I.
654// Return materialized frame pointer.
655Register RISCVRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
656 int FrameIdx,
657 int64_t Offset) const {
658 MachineBasicBlock::iterator MBBI = MBB->begin();
659 DebugLoc DL;
660 if (MBBI != MBB->end())
661 DL = MBBI->getDebugLoc();
662 MachineFunction *MF = MBB->getParent();
663 MachineRegisterInfo &MFI = MF->getRegInfo();
664 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
665
666 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
667 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
668 .addFrameIndex(FrameIdx)
669 .addImm(Offset);
670 return BaseReg;
671}
672
673// Resolve a frame index operand of an instruction to reference the
674// indicated base register plus offset instead.
675void RISCVRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
676 int64_t Offset) const {
677 unsigned FIOperandNum = 0;
678 while (!MI.getOperand(i: FIOperandNum).isFI()) {
679 FIOperandNum++;
680 assert(FIOperandNum < MI.getNumOperands() &&
681 "Instr does not have a FrameIndex operand!");
682 }
683
684 Offset += getFrameIndexInstrOffset(MI: &MI, Idx: FIOperandNum);
685 // FrameIndex Operands are always represented as a
686 // register followed by an immediate.
687 MI.getOperand(i: FIOperandNum).ChangeToRegister(Reg: BaseReg, isDef: false);
688 MI.getOperand(i: FIOperandNum + 1).ChangeToImmediate(ImmVal: Offset);
689}
690
691// Get the offset from the referenced frame index in the instruction,
692// if there is one.
693int64_t RISCVRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
694 int Idx) const {
695 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
696 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
697 "The MI must be I or S format.");
698 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
699 "FrameIndex operand");
700 return MI->getOperand(i: Idx + 1).getImm();
701}
702
703Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
704 const TargetFrameLowering *TFI = getFrameLowering(MF);
705 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
706}
707
708const uint32_t *
709RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
710 CallingConv::ID CC) const {
711 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
712
713 if (CC == CallingConv::GHC)
714 return CSR_NoRegs_RegMask;
715 switch (Subtarget.getTargetABI()) {
716 default:
717 llvm_unreachable("Unrecognized ABI");
718 case RISCVABI::ABI_ILP32E:
719 case RISCVABI::ABI_LP64E:
720 return CSR_ILP32E_LP64E_RegMask;
721 case RISCVABI::ABI_ILP32:
722 case RISCVABI::ABI_LP64:
723 if (CC == CallingConv::RISCV_VectorCall)
724 return CSR_ILP32_LP64_V_RegMask;
725 return CSR_ILP32_LP64_RegMask;
726 case RISCVABI::ABI_ILP32F:
727 case RISCVABI::ABI_LP64F:
728 if (CC == CallingConv::RISCV_VectorCall)
729 return CSR_ILP32F_LP64F_V_RegMask;
730 return CSR_ILP32F_LP64F_RegMask;
731 case RISCVABI::ABI_ILP32D:
732 case RISCVABI::ABI_LP64D:
733 if (CC == CallingConv::RISCV_VectorCall)
734 return CSR_ILP32D_LP64D_V_RegMask;
735 return CSR_ILP32D_LP64D_RegMask;
736 }
737}
738
739const TargetRegisterClass *
740RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
741 const MachineFunction &) const {
742 if (RC == &RISCV::VMV0RegClass)
743 return &RISCV::VRRegClass;
744 if (RC == &RISCV::VRNoV0RegClass)
745 return &RISCV::VRRegClass;
746 if (RC == &RISCV::VRM2NoV0RegClass)
747 return &RISCV::VRM2RegClass;
748 if (RC == &RISCV::VRM4NoV0RegClass)
749 return &RISCV::VRM4RegClass;
750 if (RC == &RISCV::VRM8NoV0RegClass)
751 return &RISCV::VRM8RegClass;
752 return RC;
753}
754
755void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
756 SmallVectorImpl<uint64_t> &Ops) const {
757 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
758 // to represent one vector register. The dwarf offset is
759 // VLENB * scalable_offset / 8.
760 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
761
762 // Add fixed-sized offset using existing DIExpression interface.
763 DIExpression::appendOffset(Ops, Offset: Offset.getFixed());
764
765 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
766 int64_t VLENBSized = Offset.getScalable() / 8;
767 if (VLENBSized > 0) {
768 Ops.push_back(Elt: dwarf::DW_OP_constu);
769 Ops.push_back(Elt: VLENBSized);
770 Ops.append(IL: {dwarf::DW_OP_bregx, VLENB, 0ULL});
771 Ops.push_back(Elt: dwarf::DW_OP_mul);
772 Ops.push_back(Elt: dwarf::DW_OP_plus);
773 } else if (VLENBSized < 0) {
774 Ops.push_back(Elt: dwarf::DW_OP_constu);
775 Ops.push_back(Elt: -VLENBSized);
776 Ops.append(IL: {dwarf::DW_OP_bregx, VLENB, 0ULL});
777 Ops.push_back(Elt: dwarf::DW_OP_mul);
778 Ops.push_back(Elt: dwarf::DW_OP_minus);
779 }
780}
781
782unsigned
783RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
784 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() &&
785 !DisableCostPerUse
786 ? 1
787 : 0;
788}
789
790// Add two address hints to improve chances of being able to use a compressed
791// instruction.
792bool RISCVRegisterInfo::getRegAllocationHints(
793 Register VirtReg, ArrayRef<MCPhysReg> Order,
794 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
795 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
796 const MachineRegisterInfo *MRI = &MF.getRegInfo();
797 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
798
799 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
800 VirtReg, Order, Hints, MF, VRM, Matrix);
801
802 if (!VRM || DisableRegAllocHints)
803 return BaseImplRetVal;
804
805 // Add any two address hints after any copy hints.
806 SmallSet<Register, 4> TwoAddrHints;
807
808 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
809 bool NeedGPRC) -> void {
810 Register Reg = MO.getReg();
811 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(virtReg: Reg));
812 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
813 // registers. If the virtual register is an odd register of a pair and the
814 // physical register is even (or vice versa), we should not add the hint.
815 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
816 !MO.getSubReg() && !VRRegMO.getSubReg()) {
817 if (!MRI->isReserved(PhysReg) && !is_contained(Range&: Hints, Element: PhysReg))
818 TwoAddrHints.insert(V: PhysReg);
819 }
820 };
821
822 // This is all of the compressible binary instructions. If an instruction
823 // needs GPRC register class operands \p NeedGPRC will be set to true.
824 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
825 NeedGPRC = false;
826 switch (MI.getOpcode()) {
827 default:
828 return false;
829 case RISCV::AND:
830 case RISCV::OR:
831 case RISCV::XOR:
832 case RISCV::SUB:
833 case RISCV::ADDW:
834 case RISCV::SUBW:
835 NeedGPRC = true;
836 return true;
837 case RISCV::ANDI: {
838 NeedGPRC = true;
839 if (!MI.getOperand(i: 2).isImm())
840 return false;
841 int64_t Imm = MI.getOperand(i: 2).getImm();
842 if (isInt<6>(x: Imm))
843 return true;
844 // c.zext.b
845 return Subtarget.hasStdExtZcb() && Imm == 255;
846 }
847 case RISCV::SRAI:
848 case RISCV::SRLI:
849 NeedGPRC = true;
850 return true;
851 case RISCV::ADD:
852 case RISCV::SLLI:
853 return true;
854 case RISCV::ADDI:
855 case RISCV::ADDIW:
856 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
857 case RISCV::MUL:
858 case RISCV::SEXT_B:
859 case RISCV::SEXT_H:
860 case RISCV::ZEXT_H_RV32:
861 case RISCV::ZEXT_H_RV64:
862 // c.mul, c.sext.b, c.sext.h, c.zext.h
863 NeedGPRC = true;
864 return Subtarget.hasStdExtZcb();
865 case RISCV::ADD_UW:
866 // c.zext.w
867 NeedGPRC = true;
868 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
869 MI.getOperand(2).getReg() == RISCV::X0;
870 case RISCV::XORI:
871 // c.not
872 NeedGPRC = true;
873 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
874 MI.getOperand(2).getImm() == -1;
875 }
876 };
877
878 // Returns true if this operand is compressible. For non-registers it always
879 // returns true. Immediate range was already checked in isCompressible.
880 // For registers, it checks if the register is a GPRC register. reg-reg
881 // instructions that require GPRC need all register operands to be GPRC.
882 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
883 if (!MO.isReg())
884 return true;
885 Register Reg = MO.getReg();
886 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(virtReg: Reg));
887 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
888 };
889
890 for (auto &MO : MRI->reg_nodbg_operands(Reg: VirtReg)) {
891 const MachineInstr &MI = *MO.getParent();
892 unsigned OpIdx = MO.getOperandNo();
893 bool NeedGPRC;
894 if (isCompressible(MI, NeedGPRC)) {
895 if (OpIdx == 0 && MI.getOperand(i: 1).isReg()) {
896 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
897 MI.getOpcode() == RISCV::ADD_UW ||
898 isCompressibleOpnd(MI.getOperand(2)))
899 tryAddHint(MO, MI.getOperand(i: 1), NeedGPRC);
900 if (MI.isCommutable() && MI.getOperand(i: 2).isReg() &&
901 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(i: 1))))
902 tryAddHint(MO, MI.getOperand(i: 2), NeedGPRC);
903 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
904 isCompressibleOpnd(MI.getOperand(i: 2)))) {
905 tryAddHint(MO, MI.getOperand(i: 0), NeedGPRC);
906 } else if (MI.isCommutable() && OpIdx == 2 &&
907 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(i: 1)))) {
908 tryAddHint(MO, MI.getOperand(i: 0), NeedGPRC);
909 }
910 }
911 }
912
913 for (MCPhysReg OrderReg : Order)
914 if (TwoAddrHints.count(V: OrderReg))
915 Hints.push_back(Elt: OrderReg);
916
917 return BaseImplRetVal;
918}
919

source code of llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp