1 | //===- RegAllocFast.cpp - A fast register allocator for debug code --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file This register allocator allocates registers to a basic block at a |
10 | /// time, attempting to keep values in registers and reusing registers as |
11 | /// appropriate. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "llvm/ADT/ArrayRef.h" |
16 | #include "llvm/ADT/DenseMap.h" |
17 | #include "llvm/ADT/IndexedMap.h" |
18 | #include "llvm/ADT/MapVector.h" |
19 | #include "llvm/ADT/SmallSet.h" |
20 | #include "llvm/ADT/SmallVector.h" |
21 | #include "llvm/ADT/SparseSet.h" |
22 | #include "llvm/ADT/Statistic.h" |
23 | #include "llvm/CodeGen/MachineBasicBlock.h" |
24 | #include "llvm/CodeGen/MachineFrameInfo.h" |
25 | #include "llvm/CodeGen/MachineFunction.h" |
26 | #include "llvm/CodeGen/MachineFunctionPass.h" |
27 | #include "llvm/CodeGen/MachineInstr.h" |
28 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
29 | #include "llvm/CodeGen/MachineOperand.h" |
30 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
31 | #include "llvm/CodeGen/RegAllocCommon.h" |
32 | #include "llvm/CodeGen/RegAllocRegistry.h" |
33 | #include "llvm/CodeGen/RegisterClassInfo.h" |
34 | #include "llvm/CodeGen/TargetInstrInfo.h" |
35 | #include "llvm/CodeGen/TargetOpcodes.h" |
36 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
37 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
38 | #include "llvm/InitializePasses.h" |
39 | #include "llvm/MC/MCRegisterInfo.h" |
40 | #include "llvm/Pass.h" |
41 | #include "llvm/Support/Debug.h" |
42 | #include "llvm/Support/ErrorHandling.h" |
43 | #include "llvm/Support/raw_ostream.h" |
44 | #include <cassert> |
45 | #include <tuple> |
46 | #include <vector> |
47 | |
48 | using namespace llvm; |
49 | |
50 | #define DEBUG_TYPE "regalloc" |
51 | |
52 | STATISTIC(NumStores, "Number of stores added" ); |
53 | STATISTIC(NumLoads, "Number of loads added" ); |
54 | STATISTIC(NumCoalesced, "Number of copies coalesced" ); |
55 | |
56 | // FIXME: Remove this switch when all testcases are fixed! |
57 | static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs" , |
58 | cl::Hidden); |
59 | |
60 | static RegisterRegAlloc fastRegAlloc("fast" , "fast register allocator" , |
61 | createFastRegisterAllocator); |
62 | |
63 | namespace { |
64 | |
65 | /// Assign ascending index for instructions in machine basic block. The index |
66 | /// can be used to determine dominance between instructions in same MBB. |
67 | class InstrPosIndexes { |
68 | public: |
69 | void unsetInitialized() { IsInitialized = false; } |
70 | |
71 | void init(const MachineBasicBlock &MBB) { |
72 | CurMBB = &MBB; |
73 | Instr2PosIndex.clear(); |
74 | uint64_t LastIndex = 0; |
75 | for (const MachineInstr &MI : MBB) { |
76 | LastIndex += InstrDist; |
77 | Instr2PosIndex[&MI] = LastIndex; |
78 | } |
79 | } |
80 | |
81 | /// Set \p Index to index of \p MI. If \p MI is new inserted, it try to assign |
82 | /// index without affecting existing instruction's index. Return true if all |
83 | /// instructions index has been reassigned. |
84 | bool getIndex(const MachineInstr &MI, uint64_t &Index) { |
85 | if (!IsInitialized) { |
86 | init(MBB: *MI.getParent()); |
87 | IsInitialized = true; |
88 | Index = Instr2PosIndex.at(Val: &MI); |
89 | return true; |
90 | } |
91 | |
92 | assert(MI.getParent() == CurMBB && "MI is not in CurMBB" ); |
93 | auto It = Instr2PosIndex.find(Val: &MI); |
94 | if (It != Instr2PosIndex.end()) { |
95 | Index = It->second; |
96 | return false; |
97 | } |
98 | |
99 | // Distance is the number of consecutive unassigned instructions including |
100 | // MI. Start is the first instruction of them. End is the next of last |
101 | // instruction of them. |
102 | // e.g. |
103 | // |Instruction| A | B | C | MI | D | E | |
104 | // | Index | 1024 | | | | | 2048 | |
105 | // |
106 | // In this case, B, C, MI, D are unassigned. Distance is 4, Start is B, End |
107 | // is E. |
108 | unsigned Distance = 1; |
109 | MachineBasicBlock::const_iterator Start = MI.getIterator(), |
110 | End = std::next(x: Start); |
111 | while (Start != CurMBB->begin() && |
112 | !Instr2PosIndex.count(Val: &*std::prev(x: Start))) { |
113 | --Start; |
114 | ++Distance; |
115 | } |
116 | while (End != CurMBB->end() && !Instr2PosIndex.count(Val: &*(End))) { |
117 | ++End; |
118 | ++Distance; |
119 | } |
120 | |
121 | // LastIndex is initialized to last used index prior to MI or zero. |
122 | // In previous example, LastIndex is 1024, EndIndex is 2048; |
123 | uint64_t LastIndex = |
124 | Start == CurMBB->begin() ? 0 : Instr2PosIndex.at(Val: &*std::prev(x: Start)); |
125 | uint64_t Step; |
126 | if (End == CurMBB->end()) |
127 | Step = static_cast<uint64_t>(InstrDist); |
128 | else { |
129 | // No instruction uses index zero. |
130 | uint64_t EndIndex = Instr2PosIndex.at(Val: &*End); |
131 | assert(EndIndex > LastIndex && "Index must be ascending order" ); |
132 | unsigned NumAvailableIndexes = EndIndex - LastIndex - 1; |
133 | // We want index gap between two adjacent MI is as same as possible. Given |
134 | // total A available indexes, D is number of consecutive unassigned |
135 | // instructions, S is the step. |
136 | // |<- S-1 -> MI <- S-1 -> MI <- A-S*D ->| |
137 | // There're S-1 available indexes between unassigned instruction and its |
138 | // predecessor. There're A-S*D available indexes between the last |
139 | // unassigned instruction and its successor. |
140 | // Ideally, we want |
141 | // S-1 = A-S*D |
142 | // then |
143 | // S = (A+1)/(D+1) |
144 | // An valid S must be integer greater than zero, so |
145 | // S <= (A+1)/(D+1) |
146 | // => |
147 | // A-S*D >= 0 |
148 | // That means we can safely use (A+1)/(D+1) as step. |
149 | // In previous example, Step is 204, Index of B, C, MI, D is 1228, 1432, |
150 | // 1636, 1840. |
151 | Step = (NumAvailableIndexes + 1) / (Distance + 1); |
152 | } |
153 | |
154 | // Reassign index for all instructions if number of new inserted |
155 | // instructions exceed slot or all instructions are new. |
156 | if (LLVM_UNLIKELY(!Step || (!LastIndex && Step == InstrDist))) { |
157 | init(MBB: *CurMBB); |
158 | Index = Instr2PosIndex.at(Val: &MI); |
159 | return true; |
160 | } |
161 | |
162 | for (auto I = Start; I != End; ++I) { |
163 | LastIndex += Step; |
164 | Instr2PosIndex[&*I] = LastIndex; |
165 | } |
166 | Index = Instr2PosIndex.at(Val: &MI); |
167 | return false; |
168 | } |
169 | |
170 | private: |
171 | bool IsInitialized = false; |
172 | enum { InstrDist = 1024 }; |
173 | const MachineBasicBlock *CurMBB = nullptr; |
174 | DenseMap<const MachineInstr *, uint64_t> Instr2PosIndex; |
175 | }; |
176 | |
177 | class RegAllocFast : public MachineFunctionPass { |
178 | public: |
179 | static char ID; |
180 | |
181 | RegAllocFast(const RegClassFilterFunc F = allocateAllRegClasses, |
182 | bool ClearVirtRegs_ = true) |
183 | : MachineFunctionPass(ID), ShouldAllocateClass(F), |
184 | StackSlotForVirtReg(-1), ClearVirtRegs(ClearVirtRegs_) {} |
185 | |
186 | private: |
187 | MachineFrameInfo *MFI = nullptr; |
188 | MachineRegisterInfo *MRI = nullptr; |
189 | const TargetRegisterInfo *TRI = nullptr; |
190 | const TargetInstrInfo *TII = nullptr; |
191 | RegisterClassInfo RegClassInfo; |
192 | const RegClassFilterFunc ShouldAllocateClass; |
193 | |
194 | /// Basic block currently being allocated. |
195 | MachineBasicBlock *MBB = nullptr; |
196 | |
197 | /// Maps virtual regs to the frame index where these values are spilled. |
198 | IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg; |
199 | |
200 | bool ClearVirtRegs; |
201 | |
202 | /// Everything we know about a live virtual register. |
203 | struct LiveReg { |
204 | MachineInstr *LastUse = nullptr; ///< Last instr to use reg. |
205 | Register VirtReg; ///< Virtual register number. |
206 | MCPhysReg PhysReg = 0; ///< Currently held here. |
207 | bool LiveOut = false; ///< Register is possibly live out. |
208 | bool Reloaded = false; ///< Register was reloaded. |
209 | bool Error = false; ///< Could not allocate. |
210 | |
211 | explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {} |
212 | |
213 | unsigned getSparseSetIndex() const { |
214 | return Register::virtReg2Index(Reg: VirtReg); |
215 | } |
216 | }; |
217 | |
218 | using LiveRegMap = SparseSet<LiveReg, identity<unsigned>, uint16_t>; |
219 | /// This map contains entries for each virtual register that is currently |
220 | /// available in a physical register. |
221 | LiveRegMap LiveVirtRegs; |
222 | |
223 | /// Stores assigned virtual registers present in the bundle MI. |
224 | DenseMap<Register, MCPhysReg> BundleVirtRegsMap; |
225 | |
226 | DenseMap<unsigned, SmallVector<MachineOperand *, 2>> LiveDbgValueMap; |
227 | /// List of DBG_VALUE that we encountered without the vreg being assigned |
228 | /// because they were placed after the last use of the vreg. |
229 | DenseMap<unsigned, SmallVector<MachineInstr *, 1>> DanglingDbgValues; |
230 | |
231 | /// Has a bit set for every virtual register for which it was determined |
232 | /// that it is alive across blocks. |
233 | BitVector MayLiveAcrossBlocks; |
234 | |
235 | /// State of a register unit. |
236 | enum RegUnitState { |
237 | /// A free register is not currently in use and can be allocated |
238 | /// immediately without checking aliases. |
239 | regFree, |
240 | |
241 | /// A pre-assigned register has been assigned before register allocation |
242 | /// (e.g., setting up a call parameter). |
243 | regPreAssigned, |
244 | |
245 | /// Used temporarily in reloadAtBegin() to mark register units that are |
246 | /// live-in to the basic block. |
247 | regLiveIn, |
248 | |
249 | /// A register state may also be a virtual register number, indication |
250 | /// that the physical register is currently allocated to a virtual |
251 | /// register. In that case, LiveVirtRegs contains the inverse mapping. |
252 | }; |
253 | |
254 | /// Maps each physical register to a RegUnitState enum or virtual register. |
255 | std::vector<unsigned> RegUnitStates; |
256 | |
257 | SmallVector<MachineInstr *, 32> Coalesced; |
258 | |
259 | using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>; |
260 | /// Set of register units that are used in the current instruction, and so |
261 | /// cannot be allocated. |
262 | RegUnitSet UsedInInstr; |
263 | RegUnitSet PhysRegUses; |
264 | SmallVector<uint16_t, 8> DefOperandIndexes; |
265 | // Register masks attached to the current instruction. |
266 | SmallVector<const uint32_t *> RegMasks; |
267 | |
268 | // Assign index for each instruction to quickly determine dominance. |
269 | InstrPosIndexes PosIndexes; |
270 | |
271 | void setPhysRegState(MCPhysReg PhysReg, unsigned NewState); |
272 | bool isPhysRegFree(MCPhysReg PhysReg) const; |
273 | |
274 | /// Mark a physreg as used in this instruction. |
275 | void markRegUsedInInstr(MCPhysReg PhysReg) { |
276 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) |
277 | UsedInInstr.insert(Val: Unit); |
278 | } |
279 | |
280 | // Check if physreg is clobbered by instruction's regmask(s). |
281 | bool isClobberedByRegMasks(MCPhysReg PhysReg) const { |
282 | return llvm::any_of(Range: RegMasks, P: [PhysReg](const uint32_t *Mask) { |
283 | return MachineOperand::clobbersPhysReg(RegMask: Mask, PhysReg); |
284 | }); |
285 | } |
286 | |
287 | /// Check if a physreg or any of its aliases are used in this instruction. |
288 | bool isRegUsedInInstr(MCPhysReg PhysReg, bool LookAtPhysRegUses) const { |
289 | if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg)) |
290 | return true; |
291 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) { |
292 | if (UsedInInstr.count(Key: Unit)) |
293 | return true; |
294 | if (LookAtPhysRegUses && PhysRegUses.count(Key: Unit)) |
295 | return true; |
296 | } |
297 | return false; |
298 | } |
299 | |
300 | /// Mark physical register as being used in a register use operand. |
301 | /// This is only used by the special livethrough handling code. |
302 | void markPhysRegUsedInInstr(MCPhysReg PhysReg) { |
303 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) |
304 | PhysRegUses.insert(Val: Unit); |
305 | } |
306 | |
307 | /// Remove mark of physical register being used in the instruction. |
308 | void unmarkRegUsedInInstr(MCPhysReg PhysReg) { |
309 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) |
310 | UsedInInstr.erase(Key: Unit); |
311 | } |
312 | |
313 | enum : unsigned { |
314 | spillClean = 50, |
315 | spillDirty = 100, |
316 | spillPrefBonus = 20, |
317 | spillImpossible = ~0u |
318 | }; |
319 | |
320 | public: |
321 | StringRef getPassName() const override { return "Fast Register Allocator" ; } |
322 | |
323 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
324 | AU.setPreservesCFG(); |
325 | MachineFunctionPass::getAnalysisUsage(AU); |
326 | } |
327 | |
328 | MachineFunctionProperties getRequiredProperties() const override { |
329 | return MachineFunctionProperties().set( |
330 | MachineFunctionProperties::Property::NoPHIs); |
331 | } |
332 | |
333 | MachineFunctionProperties getSetProperties() const override { |
334 | if (ClearVirtRegs) { |
335 | return MachineFunctionProperties().set( |
336 | MachineFunctionProperties::Property::NoVRegs); |
337 | } |
338 | |
339 | return MachineFunctionProperties(); |
340 | } |
341 | |
342 | MachineFunctionProperties getClearedProperties() const override { |
343 | return MachineFunctionProperties().set( |
344 | MachineFunctionProperties::Property::IsSSA); |
345 | } |
346 | |
347 | private: |
348 | bool runOnMachineFunction(MachineFunction &MF) override; |
349 | |
350 | void allocateBasicBlock(MachineBasicBlock &MBB); |
351 | |
352 | void addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts, |
353 | Register Reg) const; |
354 | |
355 | void findAndSortDefOperandIndexes(const MachineInstr &MI); |
356 | |
357 | void allocateInstruction(MachineInstr &MI); |
358 | void handleDebugValue(MachineInstr &MI); |
359 | void handleBundle(MachineInstr &MI); |
360 | |
361 | bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg); |
362 | bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg); |
363 | bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg); |
364 | void freePhysReg(MCPhysReg PhysReg); |
365 | |
366 | unsigned calcSpillCost(MCPhysReg PhysReg) const; |
367 | |
368 | LiveRegMap::iterator findLiveVirtReg(Register VirtReg) { |
369 | return LiveVirtRegs.find(Key: Register::virtReg2Index(Reg: VirtReg)); |
370 | } |
371 | |
372 | LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const { |
373 | return LiveVirtRegs.find(Key: Register::virtReg2Index(Reg: VirtReg)); |
374 | } |
375 | |
376 | void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg); |
377 | void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint, |
378 | bool LookAtPhysRegUses = false); |
379 | void allocVirtRegUndef(MachineOperand &MO); |
380 | void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg, |
381 | MCPhysReg Reg); |
382 | bool defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum, |
383 | Register VirtReg); |
384 | bool defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg, |
385 | bool LookAtPhysRegUses = false); |
386 | bool useVirtReg(MachineInstr &MI, MachineOperand &MO, Register VirtReg); |
387 | |
388 | MachineBasicBlock::iterator |
389 | getMBBBeginInsertionPoint(MachineBasicBlock &MBB, |
390 | SmallSet<Register, 2> &PrologLiveIns) const; |
391 | |
392 | void reloadAtBegin(MachineBasicBlock &MBB); |
393 | bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg); |
394 | |
395 | Register traceCopies(Register VirtReg) const; |
396 | Register traceCopyChain(Register Reg) const; |
397 | |
398 | bool shouldAllocateRegister(const Register Reg) const; |
399 | int getStackSpaceFor(Register VirtReg); |
400 | void spill(MachineBasicBlock::iterator Before, Register VirtReg, |
401 | MCPhysReg AssignedReg, bool Kill, bool LiveOut); |
402 | void reload(MachineBasicBlock::iterator Before, Register VirtReg, |
403 | MCPhysReg PhysReg); |
404 | |
405 | bool mayLiveOut(Register VirtReg); |
406 | bool mayLiveIn(Register VirtReg); |
407 | |
408 | void dumpState() const; |
409 | }; |
410 | |
411 | } // end anonymous namespace |
412 | |
413 | char RegAllocFast::ID = 0; |
414 | |
415 | INITIALIZE_PASS(RegAllocFast, "regallocfast" , "Fast Register Allocator" , false, |
416 | false) |
417 | |
418 | bool RegAllocFast::shouldAllocateRegister(const Register Reg) const { |
419 | assert(Reg.isVirtual()); |
420 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg); |
421 | return ShouldAllocateClass(*TRI, RC); |
422 | } |
423 | |
424 | void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) { |
425 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) |
426 | RegUnitStates[Unit] = NewState; |
427 | } |
428 | |
429 | bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const { |
430 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) { |
431 | if (RegUnitStates[Unit] != regFree) |
432 | return false; |
433 | } |
434 | return true; |
435 | } |
436 | |
437 | /// This allocates space for the specified virtual register to be held on the |
438 | /// stack. |
439 | int RegAllocFast::getStackSpaceFor(Register VirtReg) { |
440 | // Find the location Reg would belong... |
441 | int SS = StackSlotForVirtReg[VirtReg]; |
442 | // Already has space allocated? |
443 | if (SS != -1) |
444 | return SS; |
445 | |
446 | // Allocate a new stack object for this spill location... |
447 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
448 | unsigned Size = TRI->getSpillSize(RC); |
449 | Align Alignment = TRI->getSpillAlign(RC); |
450 | int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment); |
451 | |
452 | // Assign the slot. |
453 | StackSlotForVirtReg[VirtReg] = FrameIdx; |
454 | return FrameIdx; |
455 | } |
456 | |
457 | static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, |
458 | const MachineInstr &B) { |
459 | uint64_t IndexA, IndexB; |
460 | PosIndexes.getIndex(MI: A, Index&: IndexA); |
461 | if (LLVM_UNLIKELY(PosIndexes.getIndex(B, IndexB))) |
462 | PosIndexes.getIndex(MI: A, Index&: IndexA); |
463 | return IndexA < IndexB; |
464 | } |
465 | |
466 | /// Returns false if \p VirtReg is known to not live out of the current block. |
467 | bool RegAllocFast::mayLiveOut(Register VirtReg) { |
468 | if (MayLiveAcrossBlocks.test(Idx: Register::virtReg2Index(Reg: VirtReg))) { |
469 | // Cannot be live-out if there are no successors. |
470 | return !MBB->succ_empty(); |
471 | } |
472 | |
473 | const MachineInstr *SelfLoopDef = nullptr; |
474 | |
475 | // If this block loops back to itself, it is necessary to check whether the |
476 | // use comes after the def. |
477 | if (MBB->isSuccessor(MBB)) { |
478 | // Find the first def in the self loop MBB. |
479 | for (const MachineInstr &DefInst : MRI->def_instructions(Reg: VirtReg)) { |
480 | if (DefInst.getParent() != MBB) { |
481 | MayLiveAcrossBlocks.set(Register::virtReg2Index(Reg: VirtReg)); |
482 | return true; |
483 | } else { |
484 | if (!SelfLoopDef || dominates(PosIndexes, A: DefInst, B: *SelfLoopDef)) |
485 | SelfLoopDef = &DefInst; |
486 | } |
487 | } |
488 | if (!SelfLoopDef) { |
489 | MayLiveAcrossBlocks.set(Register::virtReg2Index(Reg: VirtReg)); |
490 | return true; |
491 | } |
492 | } |
493 | |
494 | // See if the first \p Limit uses of the register are all in the current |
495 | // block. |
496 | static const unsigned Limit = 8; |
497 | unsigned C = 0; |
498 | for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg: VirtReg)) { |
499 | if (UseInst.getParent() != MBB || ++C >= Limit) { |
500 | MayLiveAcrossBlocks.set(Register::virtReg2Index(Reg: VirtReg)); |
501 | // Cannot be live-out if there are no successors. |
502 | return !MBB->succ_empty(); |
503 | } |
504 | |
505 | if (SelfLoopDef) { |
506 | // Try to handle some simple cases to avoid spilling and reloading every |
507 | // value inside a self looping block. |
508 | if (SelfLoopDef == &UseInst || |
509 | !dominates(PosIndexes, A: *SelfLoopDef, B: UseInst)) { |
510 | MayLiveAcrossBlocks.set(Register::virtReg2Index(Reg: VirtReg)); |
511 | return true; |
512 | } |
513 | } |
514 | } |
515 | |
516 | return false; |
517 | } |
518 | |
519 | /// Returns false if \p VirtReg is known to not be live into the current block. |
520 | bool RegAllocFast::mayLiveIn(Register VirtReg) { |
521 | if (MayLiveAcrossBlocks.test(Idx: Register::virtReg2Index(Reg: VirtReg))) |
522 | return !MBB->pred_empty(); |
523 | |
524 | // See if the first \p Limit def of the register are all in the current block. |
525 | static const unsigned Limit = 8; |
526 | unsigned C = 0; |
527 | for (const MachineInstr &DefInst : MRI->def_instructions(Reg: VirtReg)) { |
528 | if (DefInst.getParent() != MBB || ++C >= Limit) { |
529 | MayLiveAcrossBlocks.set(Register::virtReg2Index(Reg: VirtReg)); |
530 | return !MBB->pred_empty(); |
531 | } |
532 | } |
533 | |
534 | return false; |
535 | } |
536 | |
537 | /// Insert spill instruction for \p AssignedReg before \p Before. Update |
538 | /// DBG_VALUEs with \p VirtReg operands with the stack slot. |
539 | void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg, |
540 | MCPhysReg AssignedReg, bool Kill, bool LiveOut) { |
541 | LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) << " in " |
542 | << printReg(AssignedReg, TRI)); |
543 | int FI = getStackSpaceFor(VirtReg); |
544 | LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n'); |
545 | |
546 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
547 | TII->storeRegToStackSlot(MBB&: *MBB, MI: Before, SrcReg: AssignedReg, isKill: Kill, FrameIndex: FI, RC: &RC, TRI, |
548 | VReg: VirtReg); |
549 | ++NumStores; |
550 | |
551 | MachineBasicBlock::iterator FirstTerm = MBB->getFirstTerminator(); |
552 | |
553 | // When we spill a virtual register, we will have spill instructions behind |
554 | // every definition of it, meaning we can switch all the DBG_VALUEs over |
555 | // to just reference the stack slot. |
556 | SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg]; |
557 | SmallMapVector<MachineInstr *, SmallVector<const MachineOperand *>, 2> |
558 | SpilledOperandsMap; |
559 | for (MachineOperand *MO : LRIDbgOperands) |
560 | SpilledOperandsMap[MO->getParent()].push_back(Elt: MO); |
561 | for (auto MISpilledOperands : SpilledOperandsMap) { |
562 | MachineInstr &DBG = *MISpilledOperands.first; |
563 | // We don't have enough support for tracking operands of DBG_VALUE_LISTs. |
564 | if (DBG.isDebugValueList()) |
565 | continue; |
566 | MachineInstr *NewDV = buildDbgValueForSpill( |
567 | BB&: *MBB, I: Before, Orig: *MISpilledOperands.first, FrameIndex: FI, SpilledOperands&: MISpilledOperands.second); |
568 | assert(NewDV->getParent() == MBB && "dangling parent pointer" ); |
569 | (void)NewDV; |
570 | LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV); |
571 | |
572 | if (LiveOut) { |
573 | // We need to insert a DBG_VALUE at the end of the block if the spill slot |
574 | // is live out, but there is another use of the value after the |
575 | // spill. This will allow LiveDebugValues to see the correct live out |
576 | // value to propagate to the successors. |
577 | MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(Orig: NewDV); |
578 | MBB->insert(I: FirstTerm, MI: ClonedDV); |
579 | LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n" ); |
580 | } |
581 | |
582 | // Rewrite unassigned dbg_values to use the stack slot. |
583 | // TODO We can potentially do this for list debug values as well if we know |
584 | // how the dbg_values are getting unassigned. |
585 | if (DBG.isNonListDebugValue()) { |
586 | MachineOperand &MO = DBG.getDebugOperand(Index: 0); |
587 | if (MO.isReg() && MO.getReg() == 0) { |
588 | updateDbgValueForSpill(Orig&: DBG, FrameIndex: FI, Reg: 0); |
589 | } |
590 | } |
591 | } |
592 | // Now this register is spilled there is should not be any DBG_VALUE |
593 | // pointing to this register because they are all pointing to spilled value |
594 | // now. |
595 | LRIDbgOperands.clear(); |
596 | } |
597 | |
598 | /// Insert reload instruction for \p PhysReg before \p Before. |
599 | void RegAllocFast::reload(MachineBasicBlock::iterator Before, Register VirtReg, |
600 | MCPhysReg PhysReg) { |
601 | LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into " |
602 | << printReg(PhysReg, TRI) << '\n'); |
603 | int FI = getStackSpaceFor(VirtReg); |
604 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
605 | TII->loadRegFromStackSlot(MBB&: *MBB, MI: Before, DestReg: PhysReg, FrameIndex: FI, RC: &RC, TRI, VReg: VirtReg); |
606 | ++NumLoads; |
607 | } |
608 | |
609 | /// Get basic block begin insertion point. |
610 | /// This is not just MBB.begin() because surprisingly we have EH_LABEL |
611 | /// instructions marking the begin of a basic block. This means we must insert |
612 | /// new instructions after such labels... |
613 | MachineBasicBlock::iterator RegAllocFast::getMBBBeginInsertionPoint( |
614 | MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const { |
615 | MachineBasicBlock::iterator I = MBB.begin(); |
616 | while (I != MBB.end()) { |
617 | if (I->isLabel()) { |
618 | ++I; |
619 | continue; |
620 | } |
621 | |
622 | // Most reloads should be inserted after prolog instructions. |
623 | if (!TII->isBasicBlockPrologue(MI: *I)) |
624 | break; |
625 | |
626 | // However if a prolog instruction reads a register that needs to be |
627 | // reloaded, the reload should be inserted before the prolog. |
628 | for (MachineOperand &MO : I->operands()) { |
629 | if (MO.isReg()) |
630 | PrologLiveIns.insert(V: MO.getReg()); |
631 | } |
632 | |
633 | ++I; |
634 | } |
635 | |
636 | return I; |
637 | } |
638 | |
639 | /// Reload all currently assigned virtual registers. |
640 | void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) { |
641 | if (LiveVirtRegs.empty()) |
642 | return; |
643 | |
644 | for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) { |
645 | MCPhysReg Reg = P.PhysReg; |
646 | // Set state to live-in. This possibly overrides mappings to virtual |
647 | // registers but we don't care anymore at this point. |
648 | setPhysRegState(PhysReg: Reg, NewState: regLiveIn); |
649 | } |
650 | |
651 | SmallSet<Register, 2> PrologLiveIns; |
652 | |
653 | // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order |
654 | // of spilling here is deterministic, if arbitrary. |
655 | MachineBasicBlock::iterator InsertBefore = |
656 | getMBBBeginInsertionPoint(MBB, PrologLiveIns); |
657 | for (const LiveReg &LR : LiveVirtRegs) { |
658 | MCPhysReg PhysReg = LR.PhysReg; |
659 | if (PhysReg == 0) |
660 | continue; |
661 | |
662 | MCRegister FirstUnit = *TRI->regunits(Reg: PhysReg).begin(); |
663 | if (RegUnitStates[FirstUnit] == regLiveIn) |
664 | continue; |
665 | |
666 | assert((&MBB != &MBB.getParent()->front() || IgnoreMissingDefs) && |
667 | "no reload in start block. Missing vreg def?" ); |
668 | |
669 | if (PrologLiveIns.count(V: PhysReg)) { |
670 | // FIXME: Theoretically this should use an insert point skipping labels |
671 | // but I'm not sure how labels should interact with prolog instruction |
672 | // that need reloads. |
673 | reload(Before: MBB.begin(), VirtReg: LR.VirtReg, PhysReg); |
674 | } else |
675 | reload(Before: InsertBefore, VirtReg: LR.VirtReg, PhysReg); |
676 | } |
677 | LiveVirtRegs.clear(); |
678 | } |
679 | |
680 | /// Handle the direct use of a physical register. Check that the register is |
681 | /// not used by a virtreg. Kill the physreg, marking it free. This may add |
682 | /// implicit kills to MO->getParent() and invalidate MO. |
683 | bool RegAllocFast::usePhysReg(MachineInstr &MI, MCPhysReg Reg) { |
684 | assert(Register::isPhysicalRegister(Reg) && "expected physreg" ); |
685 | bool displacedAny = displacePhysReg(MI, PhysReg: Reg); |
686 | setPhysRegState(PhysReg: Reg, NewState: regPreAssigned); |
687 | markRegUsedInInstr(PhysReg: Reg); |
688 | return displacedAny; |
689 | } |
690 | |
691 | bool RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg Reg) { |
692 | bool displacedAny = displacePhysReg(MI, PhysReg: Reg); |
693 | setPhysRegState(PhysReg: Reg, NewState: regPreAssigned); |
694 | return displacedAny; |
695 | } |
696 | |
697 | /// Mark PhysReg as reserved or free after spilling any virtregs. This is very |
698 | /// similar to defineVirtReg except the physreg is reserved instead of |
699 | /// allocated. |
700 | bool RegAllocFast::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) { |
701 | bool displacedAny = false; |
702 | |
703 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) { |
704 | switch (unsigned VirtReg = RegUnitStates[Unit]) { |
705 | default: { |
706 | LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); |
707 | assert(LRI != LiveVirtRegs.end() && "datastructures in sync" ); |
708 | MachineBasicBlock::iterator ReloadBefore = |
709 | std::next(x: (MachineBasicBlock::iterator)MI.getIterator()); |
710 | reload(Before: ReloadBefore, VirtReg, PhysReg: LRI->PhysReg); |
711 | |
712 | setPhysRegState(PhysReg: LRI->PhysReg, NewState: regFree); |
713 | LRI->PhysReg = 0; |
714 | LRI->Reloaded = true; |
715 | displacedAny = true; |
716 | break; |
717 | } |
718 | case regPreAssigned: |
719 | RegUnitStates[Unit] = regFree; |
720 | displacedAny = true; |
721 | break; |
722 | case regFree: |
723 | break; |
724 | } |
725 | } |
726 | return displacedAny; |
727 | } |
728 | |
729 | void RegAllocFast::freePhysReg(MCPhysReg PhysReg) { |
730 | LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':'); |
731 | |
732 | MCRegister FirstUnit = *TRI->regunits(Reg: PhysReg).begin(); |
733 | switch (unsigned VirtReg = RegUnitStates[FirstUnit]) { |
734 | case regFree: |
735 | LLVM_DEBUG(dbgs() << '\n'); |
736 | return; |
737 | case regPreAssigned: |
738 | LLVM_DEBUG(dbgs() << '\n'); |
739 | setPhysRegState(PhysReg, NewState: regFree); |
740 | return; |
741 | default: { |
742 | LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); |
743 | assert(LRI != LiveVirtRegs.end()); |
744 | LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n'); |
745 | setPhysRegState(PhysReg: LRI->PhysReg, NewState: regFree); |
746 | LRI->PhysReg = 0; |
747 | } |
748 | return; |
749 | } |
750 | } |
751 | |
752 | /// Return the cost of spilling clearing out PhysReg and aliases so it is free |
753 | /// for allocation. Returns 0 when PhysReg is free or disabled with all aliases |
754 | /// disabled - it can be allocated directly. |
755 | /// \returns spillImpossible when PhysReg or an alias can't be spilled. |
756 | unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const { |
757 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) { |
758 | switch (unsigned VirtReg = RegUnitStates[Unit]) { |
759 | case regFree: |
760 | break; |
761 | case regPreAssigned: |
762 | LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned " |
763 | << printReg(PhysReg, TRI) << '\n'); |
764 | return spillImpossible; |
765 | default: { |
766 | bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 || |
767 | findLiveVirtReg(VirtReg)->LiveOut; |
768 | return SureSpill ? spillClean : spillDirty; |
769 | } |
770 | } |
771 | } |
772 | return 0; |
773 | } |
774 | |
775 | void RegAllocFast::assignDanglingDebugValues(MachineInstr &Definition, |
776 | Register VirtReg, MCPhysReg Reg) { |
777 | auto UDBGValIter = DanglingDbgValues.find(Val: VirtReg); |
778 | if (UDBGValIter == DanglingDbgValues.end()) |
779 | return; |
780 | |
781 | SmallVectorImpl<MachineInstr *> &Dangling = UDBGValIter->second; |
782 | for (MachineInstr *DbgValue : Dangling) { |
783 | assert(DbgValue->isDebugValue()); |
784 | if (!DbgValue->hasDebugOperandForReg(Reg: VirtReg)) |
785 | continue; |
786 | |
787 | // Test whether the physreg survives from the definition to the DBG_VALUE. |
788 | MCPhysReg SetToReg = Reg; |
789 | unsigned Limit = 20; |
790 | for (MachineBasicBlock::iterator I = std::next(x: Definition.getIterator()), |
791 | E = DbgValue->getIterator(); |
792 | I != E; ++I) { |
793 | if (I->modifiesRegister(Reg, TRI) || --Limit == 0) { |
794 | LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue |
795 | << '\n'); |
796 | SetToReg = 0; |
797 | break; |
798 | } |
799 | } |
800 | for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(Reg: VirtReg)) { |
801 | MO.setReg(SetToReg); |
802 | if (SetToReg != 0) |
803 | MO.setIsRenamable(); |
804 | } |
805 | } |
806 | Dangling.clear(); |
807 | } |
808 | |
809 | /// This method updates local state so that we know that PhysReg is the |
810 | /// proper container for VirtReg now. The physical register must not be used |
811 | /// for anything else when this is called. |
812 | void RegAllocFast::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR, |
813 | MCPhysReg PhysReg) { |
814 | Register VirtReg = LR.VirtReg; |
815 | LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to " |
816 | << printReg(PhysReg, TRI) << '\n'); |
817 | assert(LR.PhysReg == 0 && "Already assigned a physreg" ); |
818 | assert(PhysReg != 0 && "Trying to assign no register" ); |
819 | LR.PhysReg = PhysReg; |
820 | setPhysRegState(PhysReg, NewState: VirtReg); |
821 | |
822 | assignDanglingDebugValues(Definition&: AtMI, VirtReg, Reg: PhysReg); |
823 | } |
824 | |
825 | static bool isCoalescable(const MachineInstr &MI) { return MI.isFullCopy(); } |
826 | |
827 | Register RegAllocFast::traceCopyChain(Register Reg) const { |
828 | static const unsigned ChainLengthLimit = 3; |
829 | unsigned C = 0; |
830 | do { |
831 | if (Reg.isPhysical()) |
832 | return Reg; |
833 | assert(Reg.isVirtual()); |
834 | |
835 | MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg); |
836 | if (!VRegDef || !isCoalescable(MI: *VRegDef)) |
837 | return 0; |
838 | Reg = VRegDef->getOperand(i: 1).getReg(); |
839 | } while (++C <= ChainLengthLimit); |
840 | return 0; |
841 | } |
842 | |
843 | /// Check if any of \p VirtReg's definitions is a copy. If it is follow the |
844 | /// chain of copies to check whether we reach a physical register we can |
845 | /// coalesce with. |
846 | Register RegAllocFast::traceCopies(Register VirtReg) const { |
847 | static const unsigned DefLimit = 3; |
848 | unsigned C = 0; |
849 | for (const MachineInstr &MI : MRI->def_instructions(Reg: VirtReg)) { |
850 | if (isCoalescable(MI)) { |
851 | Register Reg = MI.getOperand(i: 1).getReg(); |
852 | Reg = traceCopyChain(Reg); |
853 | if (Reg.isValid()) |
854 | return Reg; |
855 | } |
856 | |
857 | if (++C >= DefLimit) |
858 | break; |
859 | } |
860 | return Register(); |
861 | } |
862 | |
863 | /// Allocates a physical register for VirtReg. |
864 | void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0, |
865 | bool LookAtPhysRegUses) { |
866 | const Register VirtReg = LR.VirtReg; |
867 | assert(LR.PhysReg == 0); |
868 | |
869 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
870 | LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg) |
871 | << " in class " << TRI->getRegClassName(&RC) |
872 | << " with hint " << printReg(Hint0, TRI) << '\n'); |
873 | |
874 | // Take hint when possible. |
875 | if (Hint0.isPhysical() && MRI->isAllocatable(PhysReg: Hint0) && RC.contains(Reg: Hint0) && |
876 | !isRegUsedInInstr(PhysReg: Hint0, LookAtPhysRegUses)) { |
877 | // Take hint if the register is currently free. |
878 | if (isPhysRegFree(PhysReg: Hint0)) { |
879 | LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI) |
880 | << '\n'); |
881 | assignVirtToPhysReg(AtMI&: MI, LR, PhysReg: Hint0); |
882 | return; |
883 | } else { |
884 | LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI) |
885 | << " occupied\n" ); |
886 | } |
887 | } else { |
888 | Hint0 = Register(); |
889 | } |
890 | |
891 | // Try other hint. |
892 | Register Hint1 = traceCopies(VirtReg); |
893 | if (Hint1.isPhysical() && MRI->isAllocatable(PhysReg: Hint1) && RC.contains(Reg: Hint1) && |
894 | !isRegUsedInInstr(PhysReg: Hint1, LookAtPhysRegUses)) { |
895 | // Take hint if the register is currently free. |
896 | if (isPhysRegFree(PhysReg: Hint1)) { |
897 | LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI) |
898 | << '\n'); |
899 | assignVirtToPhysReg(AtMI&: MI, LR, PhysReg: Hint1); |
900 | return; |
901 | } else { |
902 | LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI) |
903 | << " occupied\n" ); |
904 | } |
905 | } else { |
906 | Hint1 = Register(); |
907 | } |
908 | |
909 | MCPhysReg BestReg = 0; |
910 | unsigned BestCost = spillImpossible; |
911 | ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(RC: &RC); |
912 | for (MCPhysReg PhysReg : AllocationOrder) { |
913 | LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' '); |
914 | if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) { |
915 | LLVM_DEBUG(dbgs() << "already used in instr.\n" ); |
916 | continue; |
917 | } |
918 | |
919 | unsigned Cost = calcSpillCost(PhysReg); |
920 | LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n'); |
921 | // Immediate take a register with cost 0. |
922 | if (Cost == 0) { |
923 | assignVirtToPhysReg(AtMI&: MI, LR, PhysReg); |
924 | return; |
925 | } |
926 | |
927 | if (PhysReg == Hint0 || PhysReg == Hint1) |
928 | Cost -= spillPrefBonus; |
929 | |
930 | if (Cost < BestCost) { |
931 | BestReg = PhysReg; |
932 | BestCost = Cost; |
933 | } |
934 | } |
935 | |
936 | if (!BestReg) { |
937 | // Nothing we can do: Report an error and keep going with an invalid |
938 | // allocation. |
939 | if (MI.isInlineAsm()) |
940 | MI.emitError(Msg: "inline assembly requires more registers than available" ); |
941 | else |
942 | MI.emitError(Msg: "ran out of registers during register allocation" ); |
943 | |
944 | LR.Error = true; |
945 | LR.PhysReg = 0; |
946 | return; |
947 | } |
948 | |
949 | displacePhysReg(MI, PhysReg: BestReg); |
950 | assignVirtToPhysReg(AtMI&: MI, LR, PhysReg: BestReg); |
951 | } |
952 | |
953 | void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) { |
954 | assert(MO.isUndef() && "expected undef use" ); |
955 | Register VirtReg = MO.getReg(); |
956 | assert(VirtReg.isVirtual() && "Expected virtreg" ); |
957 | if (!shouldAllocateRegister(Reg: VirtReg)) |
958 | return; |
959 | |
960 | LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg); |
961 | MCPhysReg PhysReg; |
962 | if (LRI != LiveVirtRegs.end() && LRI->PhysReg) { |
963 | PhysReg = LRI->PhysReg; |
964 | } else { |
965 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
966 | ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(RC: &RC); |
967 | assert(!AllocationOrder.empty() && "Allocation order must not be empty" ); |
968 | PhysReg = AllocationOrder[0]; |
969 | } |
970 | |
971 | unsigned SubRegIdx = MO.getSubReg(); |
972 | if (SubRegIdx != 0) { |
973 | PhysReg = TRI->getSubReg(Reg: PhysReg, Idx: SubRegIdx); |
974 | MO.setSubReg(0); |
975 | } |
976 | MO.setReg(PhysReg); |
977 | MO.setIsRenamable(true); |
978 | } |
979 | |
980 | /// Variation of defineVirtReg() with special handling for livethrough regs |
981 | /// (tied or earlyclobber) that may interfere with preassigned uses. |
982 | /// \return true if MI's MachineOperands were re-arranged/invalidated. |
983 | bool RegAllocFast::defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum, |
984 | Register VirtReg) { |
985 | if (!shouldAllocateRegister(Reg: VirtReg)) |
986 | return false; |
987 | LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); |
988 | if (LRI != LiveVirtRegs.end()) { |
989 | MCPhysReg PrevReg = LRI->PhysReg; |
990 | if (PrevReg != 0 && isRegUsedInInstr(PhysReg: PrevReg, LookAtPhysRegUses: true)) { |
991 | LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI) |
992 | << " (tied/earlyclobber resolution)\n" ); |
993 | freePhysReg(PhysReg: PrevReg); |
994 | LRI->PhysReg = 0; |
995 | allocVirtReg(MI, LR&: *LRI, Hint0: 0, LookAtPhysRegUses: true); |
996 | MachineBasicBlock::iterator InsertBefore = |
997 | std::next(x: (MachineBasicBlock::iterator)MI.getIterator()); |
998 | LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to " |
999 | << printReg(PrevReg, TRI) << '\n'); |
1000 | BuildMI(BB&: *MBB, I: InsertBefore, MIMD: MI.getDebugLoc(), |
1001 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: PrevReg) |
1002 | .addReg(RegNo: LRI->PhysReg, flags: llvm::RegState::Kill); |
1003 | } |
1004 | MachineOperand &MO = MI.getOperand(i: OpNum); |
1005 | if (MO.getSubReg() && !MO.isUndef()) { |
1006 | LRI->LastUse = &MI; |
1007 | } |
1008 | } |
1009 | return defineVirtReg(MI, OpNum, VirtReg, LookAtPhysRegUses: true); |
1010 | } |
1011 | |
1012 | /// Allocates a register for VirtReg definition. Typically the register is |
1013 | /// already assigned from a use of the virtreg, however we still need to |
1014 | /// perform an allocation if: |
1015 | /// - It is a dead definition without any uses. |
1016 | /// - The value is live out and all uses are in different basic blocks. |
1017 | /// |
1018 | /// \return true if MI's MachineOperands were re-arranged/invalidated. |
1019 | bool RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum, |
1020 | Register VirtReg, bool LookAtPhysRegUses) { |
1021 | assert(VirtReg.isVirtual() && "Not a virtual register" ); |
1022 | if (!shouldAllocateRegister(Reg: VirtReg)) |
1023 | return false; |
1024 | MachineOperand &MO = MI.getOperand(i: OpNum); |
1025 | LiveRegMap::iterator LRI; |
1026 | bool New; |
1027 | std::tie(args&: LRI, args&: New) = LiveVirtRegs.insert(Val: LiveReg(VirtReg)); |
1028 | if (New) { |
1029 | if (!MO.isDead()) { |
1030 | if (mayLiveOut(VirtReg)) { |
1031 | LRI->LiveOut = true; |
1032 | } else { |
1033 | // It is a dead def without the dead flag; add the flag now. |
1034 | MO.setIsDead(true); |
1035 | } |
1036 | } |
1037 | } |
1038 | if (LRI->PhysReg == 0) { |
1039 | allocVirtReg(MI, LR&: *LRI, Hint0: 0, LookAtPhysRegUses); |
1040 | // If no physical register is available for LRI, we assign one at random |
1041 | // and bail out of this function immediately. |
1042 | if (LRI->Error) { |
1043 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
1044 | ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(RC: &RC); |
1045 | if (AllocationOrder.empty()) |
1046 | return setPhysReg(MI, MO, PhysReg: MCRegister::NoRegister); |
1047 | return setPhysReg(MI, MO, PhysReg: *AllocationOrder.begin()); |
1048 | } |
1049 | } else { |
1050 | assert(!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) && |
1051 | "TODO: preassign mismatch" ); |
1052 | LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI) |
1053 | << " use existing assignment to " |
1054 | << printReg(LRI->PhysReg, TRI) << '\n'); |
1055 | } |
1056 | |
1057 | MCPhysReg PhysReg = LRI->PhysReg; |
1058 | if (LRI->Reloaded || LRI->LiveOut) { |
1059 | if (!MI.isImplicitDef()) { |
1060 | MachineBasicBlock::iterator SpillBefore = |
1061 | std::next(x: (MachineBasicBlock::iterator)MI.getIterator()); |
1062 | LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut |
1063 | << " RL: " << LRI->Reloaded << '\n'); |
1064 | bool Kill = LRI->LastUse == nullptr; |
1065 | spill(Before: SpillBefore, VirtReg, AssignedReg: PhysReg, Kill, LiveOut: LRI->LiveOut); |
1066 | |
1067 | // We need to place additional spills for each indirect destination of an |
1068 | // INLINEASM_BR. |
1069 | if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) { |
1070 | int FI = StackSlotForVirtReg[VirtReg]; |
1071 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
1072 | for (MachineOperand &MO : MI.operands()) { |
1073 | if (MO.isMBB()) { |
1074 | MachineBasicBlock *Succ = MO.getMBB(); |
1075 | TII->storeRegToStackSlot(MBB&: *Succ, MI: Succ->begin(), SrcReg: PhysReg, isKill: Kill, FrameIndex: FI, |
1076 | RC: &RC, TRI, VReg: VirtReg); |
1077 | ++NumStores; |
1078 | Succ->addLiveIn(PhysReg); |
1079 | } |
1080 | } |
1081 | } |
1082 | |
1083 | LRI->LastUse = nullptr; |
1084 | } |
1085 | LRI->LiveOut = false; |
1086 | LRI->Reloaded = false; |
1087 | } |
1088 | if (MI.getOpcode() == TargetOpcode::BUNDLE) { |
1089 | BundleVirtRegsMap[VirtReg] = PhysReg; |
1090 | } |
1091 | markRegUsedInInstr(PhysReg); |
1092 | return setPhysReg(MI, MO, PhysReg); |
1093 | } |
1094 | |
1095 | /// Allocates a register for a VirtReg use. |
1096 | /// \return true if MI's MachineOperands were re-arranged/invalidated. |
1097 | bool RegAllocFast::useVirtReg(MachineInstr &MI, MachineOperand &MO, |
1098 | Register VirtReg) { |
1099 | assert(VirtReg.isVirtual() && "Not a virtual register" ); |
1100 | if (!shouldAllocateRegister(Reg: VirtReg)) |
1101 | return false; |
1102 | LiveRegMap::iterator LRI; |
1103 | bool New; |
1104 | std::tie(args&: LRI, args&: New) = LiveVirtRegs.insert(Val: LiveReg(VirtReg)); |
1105 | if (New) { |
1106 | if (!MO.isKill()) { |
1107 | if (mayLiveOut(VirtReg)) { |
1108 | LRI->LiveOut = true; |
1109 | } else { |
1110 | // It is a last (killing) use without the kill flag; add the flag now. |
1111 | MO.setIsKill(true); |
1112 | } |
1113 | } |
1114 | } else { |
1115 | assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag" ); |
1116 | } |
1117 | |
1118 | // If necessary allocate a register. |
1119 | if (LRI->PhysReg == 0) { |
1120 | assert(!MO.isTied() && "tied op should be allocated" ); |
1121 | Register Hint; |
1122 | if (MI.isCopy() && MI.getOperand(i: 1).getSubReg() == 0) { |
1123 | Hint = MI.getOperand(i: 0).getReg(); |
1124 | if (Hint.isVirtual()) { |
1125 | assert(!shouldAllocateRegister(Hint)); |
1126 | Hint = Register(); |
1127 | } else { |
1128 | assert(Hint.isPhysical() && |
1129 | "Copy destination should already be assigned" ); |
1130 | } |
1131 | } |
1132 | allocVirtReg(MI, LR&: *LRI, Hint0: Hint, LookAtPhysRegUses: false); |
1133 | if (LRI->Error) { |
1134 | const TargetRegisterClass &RC = *MRI->getRegClass(Reg: VirtReg); |
1135 | ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(RC: &RC); |
1136 | if (AllocationOrder.empty()) |
1137 | return setPhysReg(MI, MO, PhysReg: MCRegister::NoRegister); |
1138 | return setPhysReg(MI, MO, PhysReg: *AllocationOrder.begin()); |
1139 | } |
1140 | } |
1141 | |
1142 | LRI->LastUse = &MI; |
1143 | |
1144 | if (MI.getOpcode() == TargetOpcode::BUNDLE) { |
1145 | BundleVirtRegsMap[VirtReg] = LRI->PhysReg; |
1146 | } |
1147 | markRegUsedInInstr(PhysReg: LRI->PhysReg); |
1148 | return setPhysReg(MI, MO, PhysReg: LRI->PhysReg); |
1149 | } |
1150 | |
1151 | /// Changes operand OpNum in MI the refer the PhysReg, considering subregs. |
1152 | /// \return true if MI's MachineOperands were re-arranged/invalidated. |
1153 | bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO, |
1154 | MCPhysReg PhysReg) { |
1155 | if (!MO.getSubReg()) { |
1156 | MO.setReg(PhysReg); |
1157 | MO.setIsRenamable(true); |
1158 | return false; |
1159 | } |
1160 | |
1161 | // Handle subregister index. |
1162 | MO.setReg(PhysReg ? TRI->getSubReg(Reg: PhysReg, Idx: MO.getSubReg()) : MCRegister()); |
1163 | MO.setIsRenamable(true); |
1164 | // Note: We leave the subreg number around a little longer in case of defs. |
1165 | // This is so that the register freeing logic in allocateInstruction can still |
1166 | // recognize this as subregister defs. The code there will clear the number. |
1167 | if (!MO.isDef()) |
1168 | MO.setSubReg(0); |
1169 | |
1170 | // A kill flag implies killing the full register. Add corresponding super |
1171 | // register kill. |
1172 | if (MO.isKill()) { |
1173 | MI.addRegisterKilled(IncomingReg: PhysReg, RegInfo: TRI, AddIfNotFound: true); |
1174 | // Conservatively assume implicit MOs were re-arranged |
1175 | return true; |
1176 | } |
1177 | |
1178 | // A <def,read-undef> of a sub-register requires an implicit def of the full |
1179 | // register. |
1180 | if (MO.isDef() && MO.isUndef()) { |
1181 | if (MO.isDead()) |
1182 | MI.addRegisterDead(Reg: PhysReg, RegInfo: TRI, AddIfNotFound: true); |
1183 | else |
1184 | MI.addRegisterDefined(Reg: PhysReg, RegInfo: TRI); |
1185 | // Conservatively assume implicit MOs were re-arranged |
1186 | return true; |
1187 | } |
1188 | return false; |
1189 | } |
1190 | |
1191 | #ifndef NDEBUG |
1192 | |
1193 | void RegAllocFast::dumpState() const { |
1194 | for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE; |
1195 | ++Unit) { |
1196 | switch (unsigned VirtReg = RegUnitStates[Unit]) { |
1197 | case regFree: |
1198 | break; |
1199 | case regPreAssigned: |
1200 | dbgs() << " " << printRegUnit(Unit, TRI) << "[P]" ; |
1201 | break; |
1202 | case regLiveIn: |
1203 | llvm_unreachable("Should not have regLiveIn in map" ); |
1204 | default: { |
1205 | dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(Reg: VirtReg); |
1206 | LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg); |
1207 | assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry" ); |
1208 | if (I->LiveOut || I->Reloaded) { |
1209 | dbgs() << '['; |
1210 | if (I->LiveOut) |
1211 | dbgs() << 'O'; |
1212 | if (I->Reloaded) |
1213 | dbgs() << 'R'; |
1214 | dbgs() << ']'; |
1215 | } |
1216 | assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present" ); |
1217 | break; |
1218 | } |
1219 | } |
1220 | } |
1221 | dbgs() << '\n'; |
1222 | // Check that LiveVirtRegs is the inverse. |
1223 | for (const LiveReg &LR : LiveVirtRegs) { |
1224 | Register VirtReg = LR.VirtReg; |
1225 | assert(VirtReg.isVirtual() && "Bad map key" ); |
1226 | MCPhysReg PhysReg = LR.PhysReg; |
1227 | if (PhysReg != 0) { |
1228 | assert(Register::isPhysicalRegister(PhysReg) && "mapped to physreg" ); |
1229 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) { |
1230 | assert(RegUnitStates[Unit] == VirtReg && "inverse map valid" ); |
1231 | } |
1232 | } |
1233 | } |
1234 | } |
1235 | #endif |
1236 | |
1237 | /// Count number of defs consumed from each register class by \p Reg |
1238 | void RegAllocFast::addRegClassDefCounts( |
1239 | std::vector<unsigned> &RegClassDefCounts, Register Reg) const { |
1240 | assert(RegClassDefCounts.size() == TRI->getNumRegClasses()); |
1241 | |
1242 | if (Reg.isVirtual()) { |
1243 | if (!shouldAllocateRegister(Reg)) |
1244 | return; |
1245 | const TargetRegisterClass *OpRC = MRI->getRegClass(Reg); |
1246 | for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses(); |
1247 | RCIdx != RCIdxEnd; ++RCIdx) { |
1248 | const TargetRegisterClass *IdxRC = TRI->getRegClass(i: RCIdx); |
1249 | // FIXME: Consider aliasing sub/super registers. |
1250 | if (OpRC->hasSubClassEq(RC: IdxRC)) |
1251 | ++RegClassDefCounts[RCIdx]; |
1252 | } |
1253 | |
1254 | return; |
1255 | } |
1256 | |
1257 | for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses(); |
1258 | RCIdx != RCIdxEnd; ++RCIdx) { |
1259 | const TargetRegisterClass *IdxRC = TRI->getRegClass(i: RCIdx); |
1260 | for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) { |
1261 | if (IdxRC->contains(Reg: *Alias)) { |
1262 | ++RegClassDefCounts[RCIdx]; |
1263 | break; |
1264 | } |
1265 | } |
1266 | } |
1267 | } |
1268 | |
1269 | /// Compute \ref DefOperandIndexes so it contains the indices of "def" operands |
1270 | /// that are to be allocated. Those are ordered in a way that small classes, |
1271 | /// early clobbers and livethroughs are allocated first. |
1272 | void RegAllocFast::findAndSortDefOperandIndexes(const MachineInstr &MI) { |
1273 | DefOperandIndexes.clear(); |
1274 | |
1275 | // Track number of defs which may consume a register from the class. |
1276 | std::vector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0); |
1277 | assert(RegClassDefCounts[0] == 0); |
1278 | |
1279 | LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n" ); |
1280 | for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) { |
1281 | const MachineOperand &MO = MI.getOperand(i: I); |
1282 | if (!MO.isReg()) |
1283 | continue; |
1284 | Register Reg = MO.getReg(); |
1285 | if (MO.readsReg()) { |
1286 | if (Reg.isPhysical()) { |
1287 | LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI) << '\n'); |
1288 | markPhysRegUsedInInstr(PhysReg: Reg); |
1289 | } |
1290 | } |
1291 | |
1292 | if (MO.isDef()) { |
1293 | if (Reg.isVirtual() && shouldAllocateRegister(Reg)) |
1294 | DefOperandIndexes.push_back(Elt: I); |
1295 | |
1296 | addRegClassDefCounts(RegClassDefCounts, Reg); |
1297 | } |
1298 | } |
1299 | |
1300 | llvm::sort(C&: DefOperandIndexes, Comp: [&](uint16_t I0, uint16_t I1) { |
1301 | const MachineOperand &MO0 = MI.getOperand(i: I0); |
1302 | const MachineOperand &MO1 = MI.getOperand(i: I1); |
1303 | Register Reg0 = MO0.getReg(); |
1304 | Register Reg1 = MO1.getReg(); |
1305 | const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg: Reg0); |
1306 | const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg: Reg1); |
1307 | |
1308 | // Identify regclass that are easy to use up completely just in this |
1309 | // instruction. |
1310 | unsigned ClassSize0 = RegClassInfo.getOrder(RC: &RC0).size(); |
1311 | unsigned ClassSize1 = RegClassInfo.getOrder(RC: &RC1).size(); |
1312 | |
1313 | bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()]; |
1314 | bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()]; |
1315 | if (SmallClass0 > SmallClass1) |
1316 | return true; |
1317 | if (SmallClass0 < SmallClass1) |
1318 | return false; |
1319 | |
1320 | // Allocate early clobbers and livethrough operands first. |
1321 | bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() || |
1322 | (MO0.getSubReg() == 0 && !MO0.isUndef()); |
1323 | bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() || |
1324 | (MO1.getSubReg() == 0 && !MO1.isUndef()); |
1325 | if (Livethrough0 > Livethrough1) |
1326 | return true; |
1327 | if (Livethrough0 < Livethrough1) |
1328 | return false; |
1329 | |
1330 | // Tie-break rule: operand index. |
1331 | return I0 < I1; |
1332 | }); |
1333 | } |
1334 | |
1335 | // Returns true if MO is tied and the operand it's tied to is not Undef (not |
1336 | // Undef is not the same thing as Def). |
1337 | static bool isTiedToNotUndef(const MachineOperand &MO) { |
1338 | if (!MO.isTied()) |
1339 | return false; |
1340 | const MachineInstr &MI = *MO.getParent(); |
1341 | unsigned TiedIdx = MI.findTiedOperandIdx(OpIdx: MI.getOperandNo(I: &MO)); |
1342 | const MachineOperand &TiedMO = MI.getOperand(i: TiedIdx); |
1343 | return !TiedMO.isUndef(); |
1344 | } |
1345 | |
1346 | void RegAllocFast::allocateInstruction(MachineInstr &MI) { |
1347 | // The basic algorithm here is: |
1348 | // 1. Mark registers of def operands as free |
1349 | // 2. Allocate registers to use operands and place reload instructions for |
1350 | // registers displaced by the allocation. |
1351 | // |
1352 | // However we need to handle some corner cases: |
1353 | // - pre-assigned defs and uses need to be handled before the other def/use |
1354 | // operands are processed to avoid the allocation heuristics clashing with |
1355 | // the pre-assignment. |
1356 | // - The "free def operands" step has to come last instead of first for tied |
1357 | // operands and early-clobbers. |
1358 | |
1359 | UsedInInstr.clear(); |
1360 | RegMasks.clear(); |
1361 | BundleVirtRegsMap.clear(); |
1362 | |
1363 | // Scan for special cases; Apply pre-assigned register defs to state. |
1364 | bool HasPhysRegUse = false; |
1365 | bool HasRegMask = false; |
1366 | bool HasVRegDef = false; |
1367 | bool HasDef = false; |
1368 | bool HasEarlyClobber = false; |
1369 | bool NeedToAssignLiveThroughs = false; |
1370 | for (MachineOperand &MO : MI.operands()) { |
1371 | if (MO.isReg()) { |
1372 | Register Reg = MO.getReg(); |
1373 | if (Reg.isVirtual()) { |
1374 | if (!shouldAllocateRegister(Reg)) |
1375 | continue; |
1376 | if (MO.isDef()) { |
1377 | HasDef = true; |
1378 | HasVRegDef = true; |
1379 | if (MO.isEarlyClobber()) { |
1380 | HasEarlyClobber = true; |
1381 | NeedToAssignLiveThroughs = true; |
1382 | } |
1383 | if (isTiedToNotUndef(MO) || (MO.getSubReg() != 0 && !MO.isUndef())) |
1384 | NeedToAssignLiveThroughs = true; |
1385 | } |
1386 | } else if (Reg.isPhysical()) { |
1387 | if (!MRI->isReserved(PhysReg: Reg)) { |
1388 | if (MO.isDef()) { |
1389 | HasDef = true; |
1390 | bool displacedAny = definePhysReg(MI, Reg); |
1391 | if (MO.isEarlyClobber()) |
1392 | HasEarlyClobber = true; |
1393 | if (!displacedAny) |
1394 | MO.setIsDead(true); |
1395 | } |
1396 | if (MO.readsReg()) |
1397 | HasPhysRegUse = true; |
1398 | } |
1399 | } |
1400 | } else if (MO.isRegMask()) { |
1401 | HasRegMask = true; |
1402 | RegMasks.push_back(Elt: MO.getRegMask()); |
1403 | } |
1404 | } |
1405 | |
1406 | // Allocate virtreg defs. |
1407 | if (HasDef) { |
1408 | if (HasVRegDef) { |
1409 | // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop |
1410 | // multiple times to ensure no operand is missed. |
1411 | bool ReArrangedImplicitOps = true; |
1412 | |
1413 | // Special handling for early clobbers, tied operands or subregister defs: |
1414 | // Compared to "normal" defs these: |
1415 | // - Must not use a register that is pre-assigned for a use operand. |
1416 | // - In order to solve tricky inline assembly constraints we change the |
1417 | // heuristic to figure out a good operand order before doing |
1418 | // assignments. |
1419 | if (NeedToAssignLiveThroughs) { |
1420 | PhysRegUses.clear(); |
1421 | |
1422 | while (ReArrangedImplicitOps) { |
1423 | ReArrangedImplicitOps = false; |
1424 | findAndSortDefOperandIndexes(MI); |
1425 | for (uint16_t OpIdx : DefOperandIndexes) { |
1426 | MachineOperand &MO = MI.getOperand(i: OpIdx); |
1427 | LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n'); |
1428 | Register Reg = MO.getReg(); |
1429 | if (MO.isEarlyClobber() || isTiedToNotUndef(MO) || |
1430 | (MO.getSubReg() && !MO.isUndef())) { |
1431 | ReArrangedImplicitOps = defineLiveThroughVirtReg(MI, OpNum: OpIdx, VirtReg: Reg); |
1432 | } else { |
1433 | ReArrangedImplicitOps = defineVirtReg(MI, OpNum: OpIdx, VirtReg: Reg); |
1434 | } |
1435 | // Implicit operands of MI were re-arranged, |
1436 | // re-compute DefOperandIndexes. |
1437 | if (ReArrangedImplicitOps) |
1438 | break; |
1439 | } |
1440 | } |
1441 | } else { |
1442 | // Assign virtual register defs. |
1443 | while (ReArrangedImplicitOps) { |
1444 | ReArrangedImplicitOps = false; |
1445 | for (MachineOperand &MO : MI.operands()) { |
1446 | if (!MO.isReg() || !MO.isDef()) |
1447 | continue; |
1448 | Register Reg = MO.getReg(); |
1449 | if (Reg.isVirtual()) { |
1450 | ReArrangedImplicitOps = |
1451 | defineVirtReg(MI, OpNum: MI.getOperandNo(I: &MO), VirtReg: Reg); |
1452 | if (ReArrangedImplicitOps) |
1453 | break; |
1454 | } |
1455 | } |
1456 | } |
1457 | } |
1458 | } |
1459 | |
1460 | // Free registers occupied by defs. |
1461 | // Iterate operands in reverse order, so we see the implicit super register |
1462 | // defs first (we added them earlier in case of <def,read-undef>). |
1463 | for (MachineOperand &MO : reverse(C: MI.operands())) { |
1464 | if (!MO.isReg() || !MO.isDef()) |
1465 | continue; |
1466 | |
1467 | Register Reg = MO.getReg(); |
1468 | |
1469 | // subreg defs don't free the full register. We left the subreg number |
1470 | // around as a marker in setPhysReg() to recognize this case here. |
1471 | if (Reg.isPhysical() && MO.getSubReg() != 0) { |
1472 | MO.setSubReg(0); |
1473 | continue; |
1474 | } |
1475 | |
1476 | assert((!MO.isTied() || !isClobberedByRegMasks(MO.getReg())) && |
1477 | "tied def assigned to clobbered register" ); |
1478 | |
1479 | // Do not free tied operands and early clobbers. |
1480 | if (isTiedToNotUndef(MO) || MO.isEarlyClobber()) |
1481 | continue; |
1482 | if (!Reg) |
1483 | continue; |
1484 | if (Reg.isVirtual()) { |
1485 | assert(!shouldAllocateRegister(Reg)); |
1486 | continue; |
1487 | } |
1488 | assert(Reg.isPhysical()); |
1489 | if (MRI->isReserved(PhysReg: Reg)) |
1490 | continue; |
1491 | freePhysReg(PhysReg: Reg); |
1492 | unmarkRegUsedInInstr(PhysReg: Reg); |
1493 | } |
1494 | } |
1495 | |
1496 | // Displace clobbered registers. |
1497 | if (HasRegMask) { |
1498 | assert(!RegMasks.empty() && "expected RegMask" ); |
1499 | // MRI bookkeeping. |
1500 | for (const auto *RM : RegMasks) |
1501 | MRI->addPhysRegsUsedFromRegMask(RegMask: RM); |
1502 | |
1503 | // Displace clobbered registers. |
1504 | for (const LiveReg &LR : LiveVirtRegs) { |
1505 | MCPhysReg PhysReg = LR.PhysReg; |
1506 | if (PhysReg != 0 && isClobberedByRegMasks(PhysReg)) |
1507 | displacePhysReg(MI, PhysReg); |
1508 | } |
1509 | } |
1510 | |
1511 | // Apply pre-assigned register uses to state. |
1512 | if (HasPhysRegUse) { |
1513 | for (MachineOperand &MO : MI.operands()) { |
1514 | if (!MO.isReg() || !MO.readsReg()) |
1515 | continue; |
1516 | Register Reg = MO.getReg(); |
1517 | if (!Reg.isPhysical()) |
1518 | continue; |
1519 | if (MRI->isReserved(PhysReg: Reg)) |
1520 | continue; |
1521 | if (!usePhysReg(MI, Reg)) |
1522 | MO.setIsKill(true); |
1523 | } |
1524 | } |
1525 | |
1526 | // Allocate virtreg uses and insert reloads as necessary. |
1527 | // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple |
1528 | // times to ensure no operand is missed. |
1529 | bool HasUndefUse = false; |
1530 | bool ReArrangedImplicitMOs = true; |
1531 | while (ReArrangedImplicitMOs) { |
1532 | ReArrangedImplicitMOs = false; |
1533 | for (MachineOperand &MO : MI.operands()) { |
1534 | if (!MO.isReg() || !MO.isUse()) |
1535 | continue; |
1536 | Register Reg = MO.getReg(); |
1537 | if (!Reg.isVirtual() || !shouldAllocateRegister(Reg)) |
1538 | continue; |
1539 | |
1540 | if (MO.isUndef()) { |
1541 | HasUndefUse = true; |
1542 | continue; |
1543 | } |
1544 | |
1545 | // Populate MayLiveAcrossBlocks in case the use block is allocated before |
1546 | // the def block (removing the vreg uses). |
1547 | mayLiveIn(VirtReg: Reg); |
1548 | |
1549 | assert(!MO.isInternalRead() && "Bundles not supported" ); |
1550 | assert(MO.readsReg() && "reading use" ); |
1551 | ReArrangedImplicitMOs = useVirtReg(MI, MO, VirtReg: Reg); |
1552 | if (ReArrangedImplicitMOs) |
1553 | break; |
1554 | } |
1555 | } |
1556 | |
1557 | // Allocate undef operands. This is a separate step because in a situation |
1558 | // like ` = OP undef %X, %X` both operands need the same register assign |
1559 | // so we should perform the normal assignment first. |
1560 | if (HasUndefUse) { |
1561 | for (MachineOperand &MO : MI.all_uses()) { |
1562 | Register Reg = MO.getReg(); |
1563 | if (!Reg.isVirtual() || !shouldAllocateRegister(Reg)) |
1564 | continue; |
1565 | |
1566 | assert(MO.isUndef() && "Should only have undef virtreg uses left" ); |
1567 | allocVirtRegUndef(MO); |
1568 | } |
1569 | } |
1570 | |
1571 | // Free early clobbers. |
1572 | if (HasEarlyClobber) { |
1573 | for (MachineOperand &MO : reverse(C: MI.all_defs())) { |
1574 | if (!MO.isEarlyClobber()) |
1575 | continue; |
1576 | assert(!MO.getSubReg() && "should be already handled in def processing" ); |
1577 | |
1578 | Register Reg = MO.getReg(); |
1579 | if (!Reg) |
1580 | continue; |
1581 | if (Reg.isVirtual()) { |
1582 | assert(!shouldAllocateRegister(Reg)); |
1583 | continue; |
1584 | } |
1585 | assert(Reg.isPhysical() && "should have register assigned" ); |
1586 | |
1587 | // We sometimes get odd situations like: |
1588 | // early-clobber %x0 = INSTRUCTION %x0 |
1589 | // which is semantically questionable as the early-clobber should |
1590 | // apply before the use. But in practice we consider the use to |
1591 | // happen before the early clobber now. Don't free the early clobber |
1592 | // register in this case. |
1593 | if (MI.readsRegister(Reg, TRI)) |
1594 | continue; |
1595 | |
1596 | freePhysReg(PhysReg: Reg); |
1597 | } |
1598 | } |
1599 | |
1600 | LLVM_DEBUG(dbgs() << "<< " << MI); |
1601 | if (MI.isCopy() && MI.getOperand(i: 0).getReg() == MI.getOperand(i: 1).getReg() && |
1602 | MI.getNumOperands() == 2) { |
1603 | LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n" ); |
1604 | Coalesced.push_back(Elt: &MI); |
1605 | } |
1606 | } |
1607 | |
1608 | void RegAllocFast::handleDebugValue(MachineInstr &MI) { |
1609 | // Ignore DBG_VALUEs that aren't based on virtual registers. These are |
1610 | // mostly constants and frame indices. |
1611 | assert(MI.isDebugValue() && "not a DBG_VALUE*" ); |
1612 | for (const auto &MO : MI.debug_operands()) { |
1613 | if (!MO.isReg()) |
1614 | continue; |
1615 | Register Reg = MO.getReg(); |
1616 | if (!Reg.isVirtual()) |
1617 | continue; |
1618 | if (!shouldAllocateRegister(Reg)) |
1619 | continue; |
1620 | |
1621 | // Already spilled to a stackslot? |
1622 | int SS = StackSlotForVirtReg[Reg]; |
1623 | if (SS != -1) { |
1624 | // Modify DBG_VALUE now that the value is in a spill slot. |
1625 | updateDbgValueForSpill(Orig&: MI, FrameIndex: SS, Reg); |
1626 | LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI); |
1627 | continue; |
1628 | } |
1629 | |
1630 | // See if this virtual register has already been allocated to a physical |
1631 | // register or spilled to a stack slot. |
1632 | LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg: Reg); |
1633 | SmallVector<MachineOperand *> DbgOps; |
1634 | for (MachineOperand &Op : MI.getDebugOperandsForReg(Reg)) |
1635 | DbgOps.push_back(Elt: &Op); |
1636 | |
1637 | if (LRI != LiveVirtRegs.end() && LRI->PhysReg) { |
1638 | // Update every use of Reg within MI. |
1639 | for (auto &RegMO : DbgOps) |
1640 | setPhysReg(MI, MO&: *RegMO, PhysReg: LRI->PhysReg); |
1641 | } else { |
1642 | DanglingDbgValues[Reg].push_back(Elt: &MI); |
1643 | } |
1644 | |
1645 | // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so |
1646 | // that future spills of Reg will have DBG_VALUEs. |
1647 | LiveDbgValueMap[Reg].append(in_start: DbgOps.begin(), in_end: DbgOps.end()); |
1648 | } |
1649 | } |
1650 | |
1651 | void RegAllocFast::handleBundle(MachineInstr &MI) { |
1652 | MachineBasicBlock::instr_iterator BundledMI = MI.getIterator(); |
1653 | ++BundledMI; |
1654 | while (BundledMI->isBundledWithPred()) { |
1655 | for (MachineOperand &MO : BundledMI->operands()) { |
1656 | if (!MO.isReg()) |
1657 | continue; |
1658 | |
1659 | Register Reg = MO.getReg(); |
1660 | if (!Reg.isVirtual() || !shouldAllocateRegister(Reg)) |
1661 | continue; |
1662 | |
1663 | DenseMap<Register, MCPhysReg>::iterator DI; |
1664 | DI = BundleVirtRegsMap.find(Val: Reg); |
1665 | assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register" ); |
1666 | |
1667 | setPhysReg(MI, MO, PhysReg: DI->second); |
1668 | } |
1669 | |
1670 | ++BundledMI; |
1671 | } |
1672 | } |
1673 | |
1674 | void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) { |
1675 | this->MBB = &MBB; |
1676 | LLVM_DEBUG(dbgs() << "\nAllocating " << MBB); |
1677 | |
1678 | PosIndexes.unsetInitialized(); |
1679 | RegUnitStates.assign(n: TRI->getNumRegUnits(), val: regFree); |
1680 | assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?" ); |
1681 | |
1682 | for (const auto &LiveReg : MBB.liveouts()) |
1683 | setPhysRegState(PhysReg: LiveReg.PhysReg, NewState: regPreAssigned); |
1684 | |
1685 | Coalesced.clear(); |
1686 | |
1687 | // Traverse block in reverse order allocating instructions one by one. |
1688 | for (MachineInstr &MI : reverse(C&: MBB)) { |
1689 | LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:" ; dumpState()); |
1690 | |
1691 | // Special handling for debug values. Note that they are not allowed to |
1692 | // affect codegen of the other instructions in any way. |
1693 | if (MI.isDebugValue()) { |
1694 | handleDebugValue(MI); |
1695 | continue; |
1696 | } |
1697 | |
1698 | allocateInstruction(MI); |
1699 | |
1700 | // Once BUNDLE header is assigned registers, same assignments need to be |
1701 | // done for bundled MIs. |
1702 | if (MI.getOpcode() == TargetOpcode::BUNDLE) { |
1703 | handleBundle(MI); |
1704 | } |
1705 | } |
1706 | |
1707 | LLVM_DEBUG(dbgs() << "Begin Regs:" ; dumpState()); |
1708 | |
1709 | // Spill all physical registers holding virtual registers now. |
1710 | LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n" ); |
1711 | reloadAtBegin(MBB); |
1712 | |
1713 | // Erase all the coalesced copies. We are delaying it until now because |
1714 | // LiveVirtRegs might refer to the instrs. |
1715 | for (MachineInstr *MI : Coalesced) |
1716 | MBB.erase(I: MI); |
1717 | NumCoalesced += Coalesced.size(); |
1718 | |
1719 | for (auto &UDBGPair : DanglingDbgValues) { |
1720 | for (MachineInstr *DbgValue : UDBGPair.second) { |
1721 | assert(DbgValue->isDebugValue() && "expected DBG_VALUE" ); |
1722 | // Nothing to do if the vreg was spilled in the meantime. |
1723 | if (!DbgValue->hasDebugOperandForReg(Reg: UDBGPair.first)) |
1724 | continue; |
1725 | LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue |
1726 | << '\n'); |
1727 | DbgValue->setDebugValueUndef(); |
1728 | } |
1729 | } |
1730 | DanglingDbgValues.clear(); |
1731 | |
1732 | LLVM_DEBUG(MBB.dump()); |
1733 | } |
1734 | |
1735 | bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) { |
1736 | LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n" |
1737 | << "********** Function: " << MF.getName() << '\n'); |
1738 | MRI = &MF.getRegInfo(); |
1739 | const TargetSubtargetInfo &STI = MF.getSubtarget(); |
1740 | TRI = STI.getRegisterInfo(); |
1741 | TII = STI.getInstrInfo(); |
1742 | MFI = &MF.getFrameInfo(); |
1743 | MRI->freezeReservedRegs(); |
1744 | RegClassInfo.runOnMachineFunction(MF); |
1745 | unsigned NumRegUnits = TRI->getNumRegUnits(); |
1746 | UsedInInstr.clear(); |
1747 | UsedInInstr.setUniverse(NumRegUnits); |
1748 | PhysRegUses.clear(); |
1749 | PhysRegUses.setUniverse(NumRegUnits); |
1750 | |
1751 | // initialize the virtual->physical register map to have a 'null' |
1752 | // mapping for all virtual registers |
1753 | unsigned NumVirtRegs = MRI->getNumVirtRegs(); |
1754 | StackSlotForVirtReg.resize(s: NumVirtRegs); |
1755 | LiveVirtRegs.setUniverse(NumVirtRegs); |
1756 | MayLiveAcrossBlocks.clear(); |
1757 | MayLiveAcrossBlocks.resize(N: NumVirtRegs); |
1758 | |
1759 | // Loop over all of the basic blocks, eliminating virtual register references |
1760 | for (MachineBasicBlock &MBB : MF) |
1761 | allocateBasicBlock(MBB); |
1762 | |
1763 | if (ClearVirtRegs) { |
1764 | // All machine operands and other references to virtual registers have been |
1765 | // replaced. Remove the virtual registers. |
1766 | MRI->clearVirtRegs(); |
1767 | } |
1768 | |
1769 | StackSlotForVirtReg.clear(); |
1770 | LiveDbgValueMap.clear(); |
1771 | return true; |
1772 | } |
1773 | |
1774 | FunctionPass *llvm::createFastRegisterAllocator() { return new RegAllocFast(); } |
1775 | |
1776 | FunctionPass *llvm::createFastRegisterAllocator(RegClassFilterFunc Ftor, |
1777 | bool ClearVirtRegs) { |
1778 | return new RegAllocFast(Ftor, ClearVirtRegs); |
1779 | } |
1780 | |