1//==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetRegisterInfo interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/TargetRegisterInfo.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/BitVector.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/BinaryFormat/Dwarf.h"
20#include "llvm/CodeGen/LiveInterval.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/TargetFrameLowering.h"
25#include "llvm/CodeGen/TargetInstrInfo.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/CodeGen/VirtRegMap.h"
28#include "llvm/CodeGenTypes/MachineValueType.h"
29#include "llvm/Config/llvm-config.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/DebugInfoMetadata.h"
32#include "llvm/IR/Function.h"
33#include "llvm/MC/MCRegisterInfo.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Compiler.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/MathExtras.h"
38#include "llvm/Support/Printable.h"
39#include "llvm/Support/raw_ostream.h"
40#include <cassert>
41#include <utility>
42
43#define DEBUG_TYPE "target-reg-info"
44
45using namespace llvm;
46
47static cl::opt<unsigned>
48 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
49 cl::desc("A threshold of live range size which may cause "
50 "high compile time cost in global splitting."),
51 cl::init(Val: 5000));
52
53TargetRegisterInfo::TargetRegisterInfo(
54 const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
55 regclass_iterator RCE, const char *const *SRINames,
56 const SubRegCoveredBits *SubIdxRanges, const LaneBitmask *SRILaneMasks,
57 LaneBitmask SRICoveringLanes, const RegClassInfo *const RCIs,
58 const MVT::SimpleValueType *const RCVTLists, unsigned Mode)
59 : InfoDesc(ID), SubRegIndexNames(SRINames), SubRegIdxRanges(SubIdxRanges),
60 SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE),
61 CoveringLanes(SRICoveringLanes), RCInfos(RCIs), RCVTLists(RCVTLists),
62 HwMode(Mode) {}
63
64TargetRegisterInfo::~TargetRegisterInfo() = default;
65
66bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
67 const MachineFunction &MF, const LiveInterval &VirtReg) const {
68 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
69 const MachineRegisterInfo &MRI = MF.getRegInfo();
70 MachineInstr *MI = MRI.getUniqueVRegDef(Reg: VirtReg.reg());
71 if (MI && TII->isTriviallyReMaterializable(MI: *MI) &&
72 VirtReg.size() > HugeSizeForSplit)
73 return false;
74 return true;
75}
76
77void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
78 MCRegister Reg) const {
79 for (MCPhysReg SR : superregs_inclusive(Reg))
80 RegisterSet.set(SR);
81}
82
83bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
84 ArrayRef<MCPhysReg> Exceptions) const {
85 // Check that all super registers of reserved regs are reserved as well.
86 BitVector Checked(getNumRegs());
87 for (unsigned Reg : RegisterSet.set_bits()) {
88 if (Checked[Reg])
89 continue;
90 for (MCPhysReg SR : superregs(Reg)) {
91 if (!RegisterSet[SR] && !is_contained(Range&: Exceptions, Element: Reg)) {
92 dbgs() << "Error: Super register " << printReg(Reg: SR, TRI: this)
93 << " of reserved register " << printReg(Reg, TRI: this)
94 << " is not reserved.\n";
95 return false;
96 }
97
98 // We transitively check superregs. So we can remember this for later
99 // to avoid compiletime explosion in deep register hierarchies.
100 Checked.set(SR);
101 }
102 }
103 return true;
104}
105
106namespace llvm {
107
108Printable printReg(Register Reg, const TargetRegisterInfo *TRI,
109 unsigned SubIdx, const MachineRegisterInfo *MRI) {
110 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
111 if (!Reg)
112 OS << "$noreg";
113 else if (Register::isStackSlot(Reg))
114 OS << "SS#" << Register::stackSlot2Index(Reg);
115 else if (Reg.isVirtual()) {
116 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
117 if (Name != "") {
118 OS << '%' << Name;
119 } else {
120 OS << '%' << Register::virtReg2Index(Reg);
121 }
122 } else if (!TRI)
123 OS << '$' << "physreg" << Reg;
124 else if (Reg < TRI->getNumRegs()) {
125 OS << '$';
126 printLowerCase(String: TRI->getName(RegNo: Reg), Out&: OS);
127 } else
128 llvm_unreachable("Register kind is unsupported.");
129
130 if (SubIdx) {
131 if (TRI)
132 OS << ':' << TRI->getSubRegIndexName(SubIdx);
133 else
134 OS << ":sub(" << SubIdx << ')';
135 }
136 });
137}
138
139Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
140 return Printable([Unit, TRI](raw_ostream &OS) {
141 // Generic printout when TRI is missing.
142 if (!TRI) {
143 OS << "Unit~" << Unit;
144 return;
145 }
146
147 // Check for invalid register units.
148 if (Unit >= TRI->getNumRegUnits()) {
149 OS << "BadUnit~" << Unit;
150 return;
151 }
152
153 // Normal units have at least one root.
154 MCRegUnitRootIterator Roots(Unit, TRI);
155 assert(Roots.isValid() && "Unit has no roots.");
156 OS << TRI->getName(RegNo: *Roots);
157 for (++Roots; Roots.isValid(); ++Roots)
158 OS << '~' << TRI->getName(RegNo: *Roots);
159 });
160}
161
162Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
163 return Printable([Unit, TRI](raw_ostream &OS) {
164 if (Register::isVirtualRegister(Reg: Unit)) {
165 OS << '%' << Register::virtReg2Index(Reg: Unit);
166 } else {
167 OS << printRegUnit(Unit, TRI);
168 }
169 });
170}
171
172Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
173 const TargetRegisterInfo *TRI) {
174 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
175 if (RegInfo.getRegClassOrNull(Reg))
176 OS << StringRef(TRI->getRegClassName(Class: RegInfo.getRegClass(Reg))).lower();
177 else if (RegInfo.getRegBankOrNull(Reg))
178 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
179 else {
180 OS << "_";
181 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
182 "Generic registers must have a valid type");
183 }
184 });
185}
186
187} // end namespace llvm
188
189/// getAllocatableClass - Return the maximal subclass of the given register
190/// class that is alloctable, or NULL.
191const TargetRegisterClass *
192TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
193 if (!RC || RC->isAllocatable())
194 return RC;
195
196 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
197 ++It) {
198 const TargetRegisterClass *SubRC = getRegClass(i: It.getID());
199 if (SubRC->isAllocatable())
200 return SubRC;
201 }
202 return nullptr;
203}
204
205/// getMinimalPhysRegClass - Returns the Register Class of a physical
206/// register of the given type, picking the most sub register class of
207/// the right type that contains this physreg.
208const TargetRegisterClass *
209TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg, MVT VT) const {
210 assert(Register::isPhysicalRegister(reg) &&
211 "reg must be a physical register");
212
213 // Pick the most sub register class of the right type that contains
214 // this physreg.
215 const TargetRegisterClass* BestRC = nullptr;
216 for (const TargetRegisterClass* RC : regclasses()) {
217 if ((VT == MVT::Other || isTypeLegalForClass(RC: *RC, T: VT)) &&
218 RC->contains(Reg: reg) && (!BestRC || BestRC->hasSubClass(RC)))
219 BestRC = RC;
220 }
221
222 assert(BestRC && "Couldn't find the register class");
223 return BestRC;
224}
225
226const TargetRegisterClass *
227TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg, LLT Ty) const {
228 assert(Register::isPhysicalRegister(reg) &&
229 "reg must be a physical register");
230
231 // Pick the most sub register class of the right type that contains
232 // this physreg.
233 const TargetRegisterClass *BestRC = nullptr;
234 for (const TargetRegisterClass *RC : regclasses()) {
235 if ((!Ty.isValid() || isTypeLegalForClass(RC: *RC, T: Ty)) && RC->contains(Reg: reg) &&
236 (!BestRC || BestRC->hasSubClass(RC)))
237 BestRC = RC;
238 }
239
240 return BestRC;
241}
242
243/// getAllocatableSetForRC - Toggle the bits that represent allocatable
244/// registers for the specific register class.
245static void getAllocatableSetForRC(const MachineFunction &MF,
246 const TargetRegisterClass *RC, BitVector &R){
247 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
248 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
249 for (MCPhysReg PR : Order)
250 R.set(PR);
251}
252
253BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
254 const TargetRegisterClass *RC) const {
255 BitVector Allocatable(getNumRegs());
256 if (RC) {
257 // A register class with no allocatable subclass returns an empty set.
258 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
259 if (SubClass)
260 getAllocatableSetForRC(MF, RC: SubClass, R&: Allocatable);
261 } else {
262 for (const TargetRegisterClass *C : regclasses())
263 if (C->isAllocatable())
264 getAllocatableSetForRC(MF, RC: C, R&: Allocatable);
265 }
266
267 // Mask out the reserved registers
268 const MachineRegisterInfo &MRI = MF.getRegInfo();
269 const BitVector &Reserved = MRI.getReservedRegs();
270 Allocatable.reset(RHS: Reserved);
271
272 return Allocatable;
273}
274
275static inline
276const TargetRegisterClass *firstCommonClass(const uint32_t *A,
277 const uint32_t *B,
278 const TargetRegisterInfo *TRI) {
279 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
280 if (unsigned Common = *A++ & *B++)
281 return TRI->getRegClass(i: I + llvm::countr_zero(Val: Common));
282 return nullptr;
283}
284
285const TargetRegisterClass *
286TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
287 const TargetRegisterClass *B) const {
288 // First take care of the trivial cases.
289 if (A == B)
290 return A;
291 if (!A || !B)
292 return nullptr;
293
294 // Register classes are ordered topologically, so the largest common
295 // sub-class it the common sub-class with the smallest ID.
296 return firstCommonClass(A: A->getSubClassMask(), B: B->getSubClassMask(), TRI: this);
297}
298
299const TargetRegisterClass *
300TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
301 const TargetRegisterClass *B,
302 unsigned Idx) const {
303 assert(A && B && "Missing register class");
304 assert(Idx && "Bad sub-register index");
305
306 // Find Idx in the list of super-register indices.
307 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
308 if (RCI.getSubReg() == Idx)
309 // The bit mask contains all register classes that are projected into B
310 // by Idx. Find a class that is also a sub-class of A.
311 return firstCommonClass(A: RCI.getMask(), B: A->getSubClassMask(), TRI: this);
312 return nullptr;
313}
314
315const TargetRegisterClass *TargetRegisterInfo::
316getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
317 const TargetRegisterClass *RCB, unsigned SubB,
318 unsigned &PreA, unsigned &PreB) const {
319 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
320
321 // Search all pairs of sub-register indices that project into RCA and RCB
322 // respectively. This is quadratic, but usually the sets are very small. On
323 // most targets like X86, there will only be a single sub-register index
324 // (e.g., sub_16bit projecting into GR16).
325 //
326 // The worst case is a register class like DPR on ARM.
327 // We have indices dsub_0..dsub_7 projecting into that class.
328 //
329 // It is very common that one register class is a sub-register of the other.
330 // Arrange for RCA to be the larger register so the answer will be found in
331 // the first iteration. This makes the search linear for the most common
332 // case.
333 const TargetRegisterClass *BestRC = nullptr;
334 unsigned *BestPreA = &PreA;
335 unsigned *BestPreB = &PreB;
336 if (getRegSizeInBits(RC: *RCA) < getRegSizeInBits(RC: *RCB)) {
337 std::swap(a&: RCA, b&: RCB);
338 std::swap(a&: SubA, b&: SubB);
339 std::swap(a&: BestPreA, b&: BestPreB);
340 }
341
342 // Also terminate the search one we have found a register class as small as
343 // RCA.
344 unsigned MinSize = getRegSizeInBits(RC: *RCA);
345
346 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
347 unsigned FinalA = composeSubRegIndices(a: IA.getSubReg(), b: SubA);
348 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
349 // Check if a common super-register class exists for this index pair.
350 const TargetRegisterClass *RC =
351 firstCommonClass(A: IA.getMask(), B: IB.getMask(), TRI: this);
352 if (!RC || getRegSizeInBits(RC: *RC) < MinSize)
353 continue;
354
355 // The indexes must compose identically: PreA+SubA == PreB+SubB.
356 unsigned FinalB = composeSubRegIndices(a: IB.getSubReg(), b: SubB);
357 if (FinalA != FinalB)
358 continue;
359
360 // Is RC a better candidate than BestRC?
361 if (BestRC && getRegSizeInBits(RC: *RC) >= getRegSizeInBits(RC: *BestRC))
362 continue;
363
364 // Yes, RC is the smallest super-register seen so far.
365 BestRC = RC;
366 *BestPreA = IA.getSubReg();
367 *BestPreB = IB.getSubReg();
368
369 // Bail early if we reached MinSize. We won't find a better candidate.
370 if (getRegSizeInBits(RC: *BestRC) == MinSize)
371 return BestRC;
372 }
373 }
374 return BestRC;
375}
376
377/// Check if the registers defined by the pair (RegisterClass, SubReg)
378/// share the same register file.
379static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
380 const TargetRegisterClass *DefRC,
381 unsigned DefSubReg,
382 const TargetRegisterClass *SrcRC,
383 unsigned SrcSubReg) {
384 // Same register class.
385 if (DefRC == SrcRC)
386 return true;
387
388 // Both operands are sub registers. Check if they share a register class.
389 unsigned SrcIdx, DefIdx;
390 if (SrcSubReg && DefSubReg) {
391 return TRI.getCommonSuperRegClass(RCA: SrcRC, SubA: SrcSubReg, RCB: DefRC, SubB: DefSubReg,
392 PreA&: SrcIdx, PreB&: DefIdx) != nullptr;
393 }
394
395 // At most one of the register is a sub register, make it Src to avoid
396 // duplicating the test.
397 if (!SrcSubReg) {
398 std::swap(a&: DefSubReg, b&: SrcSubReg);
399 std::swap(a&: DefRC, b&: SrcRC);
400 }
401
402 // One of the register is a sub register, check if we can get a superclass.
403 if (SrcSubReg)
404 return TRI.getMatchingSuperRegClass(A: SrcRC, B: DefRC, Idx: SrcSubReg) != nullptr;
405
406 // Plain copy.
407 return TRI.getCommonSubClass(A: DefRC, B: SrcRC) != nullptr;
408}
409
410bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
411 unsigned DefSubReg,
412 const TargetRegisterClass *SrcRC,
413 unsigned SrcSubReg) const {
414 // If this source does not incur a cross register bank copy, use it.
415 return shareSameRegisterFile(TRI: *this, DefRC, DefSubReg, SrcRC, SrcSubReg);
416}
417
418// Compute target-independent register allocator hints to help eliminate copies.
419bool TargetRegisterInfo::getRegAllocationHints(
420 Register VirtReg, ArrayRef<MCPhysReg> Order,
421 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
422 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
423 const MachineRegisterInfo &MRI = MF.getRegInfo();
424 const std::pair<unsigned, SmallVector<Register, 4>> &Hints_MRI =
425 MRI.getRegAllocationHints(VReg: VirtReg);
426
427 SmallSet<Register, 32> HintedRegs;
428 // First hint may be a target hint.
429 bool Skip = (Hints_MRI.first != 0);
430 for (auto Reg : Hints_MRI.second) {
431 if (Skip) {
432 Skip = false;
433 continue;
434 }
435
436 // Target-independent hints are either a physical or a virtual register.
437 Register Phys = Reg;
438 if (VRM && Phys.isVirtual())
439 Phys = VRM->getPhys(virtReg: Phys);
440
441 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
442 // registers allocated to the same physreg).
443 if (!HintedRegs.insert(V: Phys).second)
444 continue;
445 // Check that Phys is a valid hint in VirtReg's register class.
446 if (!Phys.isPhysical())
447 continue;
448 if (MRI.isReserved(PhysReg: Phys))
449 continue;
450 // Check that Phys is in the allocation order. We shouldn't heed hints
451 // from VirtReg's register class if they aren't in the allocation order. The
452 // target probably has a reason for removing the register.
453 if (!is_contained(Range&: Order, Element: Phys))
454 continue;
455
456 // All clear, tell the register allocator to prefer this register.
457 Hints.push_back(Elt: Phys);
458 }
459 return false;
460}
461
462bool TargetRegisterInfo::isCalleeSavedPhysReg(
463 MCRegister PhysReg, const MachineFunction &MF) const {
464 if (PhysReg == 0)
465 return false;
466 const uint32_t *callerPreservedRegs =
467 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
468 if (callerPreservedRegs) {
469 assert(Register::isPhysicalRegister(PhysReg) &&
470 "Expected physical register");
471 return (callerPreservedRegs[PhysReg / 32] >> PhysReg % 32) & 1;
472 }
473 return false;
474}
475
476bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
477 return !MF.getFunction().hasFnAttribute(Kind: "no-realign-stack");
478}
479
480bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
481 const MachineFrameInfo &MFI = MF.getFrameInfo();
482 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
483 const Function &F = MF.getFunction();
484 return F.hasFnAttribute(Kind: "stackrealign") ||
485 (MFI.getMaxAlign() > TFI->getStackAlign()) ||
486 F.hasFnAttribute(Attribute::StackAlignment);
487}
488
489bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
490 const uint32_t *mask1) const {
491 unsigned N = (getNumRegs()+31) / 32;
492 for (unsigned I = 0; I < N; ++I)
493 if ((mask0[I] & mask1[I]) != mask0[I])
494 return false;
495 return true;
496}
497
498TypeSize
499TargetRegisterInfo::getRegSizeInBits(Register Reg,
500 const MachineRegisterInfo &MRI) const {
501 const TargetRegisterClass *RC{};
502 if (Reg.isPhysical()) {
503 // The size is not directly available for physical registers.
504 // Instead, we need to access a register class that contains Reg and
505 // get the size of that register class.
506 RC = getMinimalPhysRegClass(reg: Reg);
507 assert(RC && "Unable to deduce the register class");
508 return getRegSizeInBits(RC: *RC);
509 }
510 LLT Ty = MRI.getType(Reg);
511 if (Ty.isValid())
512 return Ty.getSizeInBits();
513
514 // Since Reg is not a generic register, it may have a register class.
515 RC = MRI.getRegClass(Reg);
516 assert(RC && "Unable to deduce the register class");
517 return getRegSizeInBits(RC: *RC);
518}
519
520bool TargetRegisterInfo::getCoveringSubRegIndexes(
521 const MachineRegisterInfo &MRI, const TargetRegisterClass *RC,
522 LaneBitmask LaneMask, SmallVectorImpl<unsigned> &NeededIndexes) const {
523 SmallVector<unsigned, 8> PossibleIndexes;
524 unsigned BestIdx = 0;
525 unsigned BestCover = 0;
526
527 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
528 // Is this index even compatible with the given class?
529 if (getSubClassWithSubReg(RC, Idx) != RC)
530 continue;
531 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
532 // Early exit if we found a perfect match.
533 if (SubRegMask == LaneMask) {
534 BestIdx = Idx;
535 break;
536 }
537
538 // The index must not cover any lanes outside \p LaneMask.
539 if ((SubRegMask & ~LaneMask).any())
540 continue;
541
542 unsigned PopCount = SubRegMask.getNumLanes();
543 PossibleIndexes.push_back(Elt: Idx);
544 if (PopCount > BestCover) {
545 BestCover = PopCount;
546 BestIdx = Idx;
547 }
548 }
549
550 // Abort if we cannot possibly implement the COPY with the given indexes.
551 if (BestIdx == 0)
552 return false;
553
554 NeededIndexes.push_back(Elt: BestIdx);
555
556 // Greedy heuristic: Keep iterating keeping the best covering subreg index
557 // each time.
558 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(SubIdx: BestIdx);
559 while (LanesLeft.any()) {
560 unsigned BestIdx = 0;
561 int BestCover = std::numeric_limits<int>::min();
562 for (unsigned Idx : PossibleIndexes) {
563 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
564 // Early exit if we found a perfect match.
565 if (SubRegMask == LanesLeft) {
566 BestIdx = Idx;
567 break;
568 }
569
570 // Do not cover already-covered lanes to avoid creating cycles
571 // in copy bundles (= bundle contains copies that write to the
572 // registers).
573 if ((SubRegMask & ~LanesLeft).any())
574 continue;
575
576 // Try to cover as many of the remaining lanes as possible.
577 const int Cover = (SubRegMask & LanesLeft).getNumLanes();
578 if (Cover > BestCover) {
579 BestCover = Cover;
580 BestIdx = Idx;
581 }
582 }
583
584 if (BestIdx == 0)
585 return false; // Impossible to handle
586
587 NeededIndexes.push_back(Elt: BestIdx);
588
589 LanesLeft &= ~getSubRegIndexLaneMask(SubIdx: BestIdx);
590 }
591
592 return BestIdx;
593}
594
595unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const {
596 assert(Idx && Idx < getNumSubRegIndices() &&
597 "This is not a subregister index");
598 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size;
599}
600
601unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const {
602 assert(Idx && Idx < getNumSubRegIndices() &&
603 "This is not a subregister index");
604 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset;
605}
606
607Register
608TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
609 const MachineRegisterInfo *MRI) const {
610 while (true) {
611 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
612 if (!MI->isCopyLike())
613 return SrcReg;
614
615 Register CopySrcReg;
616 if (MI->isCopy())
617 CopySrcReg = MI->getOperand(i: 1).getReg();
618 else {
619 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
620 CopySrcReg = MI->getOperand(i: 2).getReg();
621 }
622
623 if (!CopySrcReg.isVirtual())
624 return CopySrcReg;
625
626 SrcReg = CopySrcReg;
627 }
628}
629
630Register TargetRegisterInfo::lookThruSingleUseCopyChain(
631 Register SrcReg, const MachineRegisterInfo *MRI) const {
632 while (true) {
633 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
634 // Found the real definition, return it if it has a single use.
635 if (!MI->isCopyLike())
636 return MRI->hasOneNonDBGUse(RegNo: SrcReg) ? SrcReg : Register();
637
638 Register CopySrcReg;
639 if (MI->isCopy())
640 CopySrcReg = MI->getOperand(i: 1).getReg();
641 else {
642 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
643 CopySrcReg = MI->getOperand(i: 2).getReg();
644 }
645
646 // Continue only if the next definition in the chain is for a virtual
647 // register that has a single use.
648 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(RegNo: CopySrcReg))
649 return Register();
650
651 SrcReg = CopySrcReg;
652 }
653}
654
655void TargetRegisterInfo::getOffsetOpcodes(
656 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
657 assert(!Offset.getScalable() && "Scalable offsets are not handled");
658 DIExpression::appendOffset(Ops, Offset: Offset.getFixed());
659}
660
661DIExpression *
662TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
663 unsigned PrependFlags,
664 const StackOffset &Offset) const {
665 assert((PrependFlags &
666 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
667 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
668 "Unsupported prepend flag");
669 SmallVector<uint64_t, 16> OffsetExpr;
670 if (PrependFlags & DIExpression::DerefBefore)
671 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
672 getOffsetOpcodes(Offset, Ops&: OffsetExpr);
673 if (PrependFlags & DIExpression::DerefAfter)
674 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
675 return DIExpression::prependOpcodes(Expr, Ops&: OffsetExpr,
676 StackValue: PrependFlags & DIExpression::StackValue,
677 EntryValue: PrependFlags & DIExpression::EntryValue);
678}
679
680#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
681LLVM_DUMP_METHOD
682void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
683 const TargetRegisterInfo *TRI) {
684 dbgs() << printReg(Reg, TRI, SubIdx: SubRegIndex) << "\n";
685}
686#endif
687

source code of llvm/lib/CodeGen/TargetRegisterInfo.cpp