1 | //===- MachineSink.cpp - Sinking for machine instructions -----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass moves instructions into successor blocks when possible, so that |
10 | // they aren't executed on paths where their results aren't needed. |
11 | // |
12 | // This pass is not intended to be a replacement or a complete alternative |
13 | // for an LLVM-IR-level sinking pass. It is only designed to sink simple |
14 | // constructs that are not exposed before lowering and instruction selection. |
15 | // |
16 | //===----------------------------------------------------------------------===// |
17 | |
18 | #include "llvm/ADT/DenseSet.h" |
19 | #include "llvm/ADT/DepthFirstIterator.h" |
20 | #include "llvm/ADT/MapVector.h" |
21 | #include "llvm/ADT/PointerIntPair.h" |
22 | #include "llvm/ADT/PostOrderIterator.h" |
23 | #include "llvm/ADT/SetVector.h" |
24 | #include "llvm/ADT/SmallSet.h" |
25 | #include "llvm/ADT/SmallVector.h" |
26 | #include "llvm/ADT/Statistic.h" |
27 | #include "llvm/Analysis/AliasAnalysis.h" |
28 | #include "llvm/Analysis/CFG.h" |
29 | #include "llvm/CodeGen/MachineBasicBlock.h" |
30 | #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" |
31 | #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" |
32 | #include "llvm/CodeGen/MachineCycleAnalysis.h" |
33 | #include "llvm/CodeGen/MachineDominators.h" |
34 | #include "llvm/CodeGen/MachineFunction.h" |
35 | #include "llvm/CodeGen/MachineFunctionPass.h" |
36 | #include "llvm/CodeGen/MachineInstr.h" |
37 | #include "llvm/CodeGen/MachineLoopInfo.h" |
38 | #include "llvm/CodeGen/MachineOperand.h" |
39 | #include "llvm/CodeGen/MachinePostDominators.h" |
40 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
41 | #include "llvm/CodeGen/RegisterClassInfo.h" |
42 | #include "llvm/CodeGen/RegisterPressure.h" |
43 | #include "llvm/CodeGen/TargetInstrInfo.h" |
44 | #include "llvm/CodeGen/TargetPassConfig.h" |
45 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
46 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
47 | #include "llvm/IR/BasicBlock.h" |
48 | #include "llvm/IR/DebugInfoMetadata.h" |
49 | #include "llvm/IR/LLVMContext.h" |
50 | #include "llvm/InitializePasses.h" |
51 | #include "llvm/MC/MCRegisterInfo.h" |
52 | #include "llvm/Pass.h" |
53 | #include "llvm/Support/BranchProbability.h" |
54 | #include "llvm/Support/CommandLine.h" |
55 | #include "llvm/Support/Debug.h" |
56 | #include "llvm/Support/raw_ostream.h" |
57 | #include <algorithm> |
58 | #include <cassert> |
59 | #include <cstdint> |
60 | #include <utility> |
61 | #include <vector> |
62 | |
63 | using namespace llvm; |
64 | |
65 | #define DEBUG_TYPE "machine-sink" |
66 | |
67 | static cl::opt<bool> |
68 | SplitEdges("machine-sink-split" , |
69 | cl::desc("Split critical edges during machine sinking" ), |
70 | cl::init(Val: true), cl::Hidden); |
71 | |
72 | static cl::opt<bool> |
73 | UseBlockFreqInfo("machine-sink-bfi" , |
74 | cl::desc("Use block frequency info to find successors to sink" ), |
75 | cl::init(Val: true), cl::Hidden); |
76 | |
77 | static cl::opt<unsigned> SplitEdgeProbabilityThreshold( |
78 | "machine-sink-split-probability-threshold" , |
79 | cl::desc( |
80 | "Percentage threshold for splitting single-instruction critical edge. " |
81 | "If the branch threshold is higher than this threshold, we allow " |
82 | "speculative execution of up to 1 instruction to avoid branching to " |
83 | "splitted critical edge" ), |
84 | cl::init(Val: 40), cl::Hidden); |
85 | |
86 | static cl::opt<unsigned> SinkLoadInstsPerBlockThreshold( |
87 | "machine-sink-load-instrs-threshold" , |
88 | cl::desc("Do not try to find alias store for a load if there is a in-path " |
89 | "block whose instruction number is higher than this threshold." ), |
90 | cl::init(Val: 2000), cl::Hidden); |
91 | |
92 | static cl::opt<unsigned> SinkLoadBlocksThreshold( |
93 | "machine-sink-load-blocks-threshold" , |
94 | cl::desc("Do not try to find alias store for a load if the block number in " |
95 | "the straight line is higher than this threshold." ), |
96 | cl::init(Val: 20), cl::Hidden); |
97 | |
98 | static cl::opt<bool> |
99 | SinkInstsIntoCycle("sink-insts-to-avoid-spills" , |
100 | cl::desc("Sink instructions into cycles to avoid " |
101 | "register spills" ), |
102 | cl::init(Val: false), cl::Hidden); |
103 | |
104 | static cl::opt<unsigned> SinkIntoCycleLimit( |
105 | "machine-sink-cycle-limit" , |
106 | cl::desc("The maximum number of instructions considered for cycle sinking." ), |
107 | cl::init(Val: 50), cl::Hidden); |
108 | |
109 | STATISTIC(NumSunk, "Number of machine instructions sunk" ); |
110 | STATISTIC(NumCycleSunk, "Number of machine instructions sunk into a cycle" ); |
111 | STATISTIC(NumSplit, "Number of critical edges split" ); |
112 | STATISTIC(NumCoalesces, "Number of copies coalesced" ); |
113 | STATISTIC(NumPostRACopySink, "Number of copies sunk after RA" ); |
114 | |
115 | namespace { |
116 | |
117 | class MachineSinking : public MachineFunctionPass { |
118 | const TargetSubtargetInfo *STI = nullptr; |
119 | const TargetInstrInfo *TII = nullptr; |
120 | const TargetRegisterInfo *TRI = nullptr; |
121 | MachineRegisterInfo *MRI = nullptr; // Machine register information |
122 | MachineDominatorTree *DT = nullptr; // Machine dominator tree |
123 | MachinePostDominatorTree *PDT = nullptr; // Machine post dominator tree |
124 | MachineCycleInfo *CI = nullptr; |
125 | MachineBlockFrequencyInfo *MBFI = nullptr; |
126 | const MachineBranchProbabilityInfo *MBPI = nullptr; |
127 | AliasAnalysis *AA = nullptr; |
128 | RegisterClassInfo RegClassInfo; |
129 | |
130 | // Remember which edges have been considered for breaking. |
131 | SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8> |
132 | CEBCandidates; |
133 | // Remember which edges we are about to split. |
134 | // This is different from CEBCandidates since those edges |
135 | // will be split. |
136 | SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit; |
137 | |
138 | DenseSet<Register> RegsToClearKillFlags; |
139 | |
140 | using AllSuccsCache = |
141 | DenseMap<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>; |
142 | |
143 | /// DBG_VALUE pointer and flag. The flag is true if this DBG_VALUE is |
144 | /// post-dominated by another DBG_VALUE of the same variable location. |
145 | /// This is necessary to detect sequences such as: |
146 | /// %0 = someinst |
147 | /// DBG_VALUE %0, !123, !DIExpression() |
148 | /// %1 = anotherinst |
149 | /// DBG_VALUE %1, !123, !DIExpression() |
150 | /// Where if %0 were to sink, the DBG_VAUE should not sink with it, as that |
151 | /// would re-order assignments. |
152 | using SeenDbgUser = PointerIntPair<MachineInstr *, 1>; |
153 | |
154 | /// Record of DBG_VALUE uses of vregs in a block, so that we can identify |
155 | /// debug instructions to sink. |
156 | SmallDenseMap<unsigned, TinyPtrVector<SeenDbgUser>> SeenDbgUsers; |
157 | |
158 | /// Record of debug variables that have had their locations set in the |
159 | /// current block. |
160 | DenseSet<DebugVariable> SeenDbgVars; |
161 | |
162 | DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool> |
163 | HasStoreCache; |
164 | |
165 | DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, |
166 | SmallVector<MachineInstr *>> |
167 | StoreInstrCache; |
168 | |
169 | /// Cached BB's register pressure. |
170 | DenseMap<const MachineBasicBlock *, std::vector<unsigned>> |
171 | CachedRegisterPressure; |
172 | |
173 | bool EnableSinkAndFold; |
174 | |
175 | public: |
176 | static char ID; // Pass identification |
177 | |
178 | MachineSinking() : MachineFunctionPass(ID) { |
179 | initializeMachineSinkingPass(*PassRegistry::getPassRegistry()); |
180 | } |
181 | |
182 | bool runOnMachineFunction(MachineFunction &MF) override; |
183 | |
184 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
185 | MachineFunctionPass::getAnalysisUsage(AU); |
186 | AU.addRequired<AAResultsWrapperPass>(); |
187 | AU.addRequired<MachineDominatorTree>(); |
188 | AU.addRequired<MachinePostDominatorTree>(); |
189 | AU.addRequired<MachineCycleInfoWrapperPass>(); |
190 | AU.addRequired<MachineBranchProbabilityInfo>(); |
191 | AU.addPreserved<MachineCycleInfoWrapperPass>(); |
192 | AU.addPreserved<MachineLoopInfo>(); |
193 | if (UseBlockFreqInfo) |
194 | AU.addRequired<MachineBlockFrequencyInfo>(); |
195 | AU.addRequired<TargetPassConfig>(); |
196 | } |
197 | |
198 | void releaseMemory() override { |
199 | CEBCandidates.clear(); |
200 | } |
201 | |
202 | private: |
203 | bool ProcessBlock(MachineBasicBlock &MBB); |
204 | void ProcessDbgInst(MachineInstr &MI); |
205 | bool isWorthBreakingCriticalEdge(MachineInstr &MI, |
206 | MachineBasicBlock *From, |
207 | MachineBasicBlock *To); |
208 | |
209 | bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To, |
210 | MachineInstr &MI); |
211 | |
212 | /// Postpone the splitting of the given critical |
213 | /// edge (\p From, \p To). |
214 | /// |
215 | /// We do not split the edges on the fly. Indeed, this invalidates |
216 | /// the dominance information and thus triggers a lot of updates |
217 | /// of that information underneath. |
218 | /// Instead, we postpone all the splits after each iteration of |
219 | /// the main loop. That way, the information is at least valid |
220 | /// for the lifetime of an iteration. |
221 | /// |
222 | /// \return True if the edge is marked as toSplit, false otherwise. |
223 | /// False can be returned if, for instance, this is not profitable. |
224 | bool PostponeSplitCriticalEdge(MachineInstr &MI, |
225 | MachineBasicBlock *From, |
226 | MachineBasicBlock *To, |
227 | bool BreakPHIEdge); |
228 | bool SinkInstruction(MachineInstr &MI, bool &SawStore, |
229 | AllSuccsCache &AllSuccessors); |
230 | |
231 | /// If we sink a COPY inst, some debug users of it's destination may no |
232 | /// longer be dominated by the COPY, and will eventually be dropped. |
233 | /// This is easily rectified by forwarding the non-dominated debug uses |
234 | /// to the copy source. |
235 | void SalvageUnsunkDebugUsersOfCopy(MachineInstr &, |
236 | MachineBasicBlock *TargetBlock); |
237 | bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB, |
238 | MachineBasicBlock *DefMBB, bool &BreakPHIEdge, |
239 | bool &LocalUse) const; |
240 | MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, |
241 | bool &BreakPHIEdge, AllSuccsCache &AllSuccessors); |
242 | |
243 | void FindCycleSinkCandidates(MachineCycle *Cycle, MachineBasicBlock *BB, |
244 | SmallVectorImpl<MachineInstr *> &Candidates); |
245 | bool SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I); |
246 | |
247 | bool isProfitableToSinkTo(Register Reg, MachineInstr &MI, |
248 | MachineBasicBlock *MBB, |
249 | MachineBasicBlock *SuccToSinkTo, |
250 | AllSuccsCache &AllSuccessors); |
251 | |
252 | bool PerformTrivialForwardCoalescing(MachineInstr &MI, |
253 | MachineBasicBlock *MBB); |
254 | |
255 | bool PerformSinkAndFold(MachineInstr &MI, MachineBasicBlock *MBB); |
256 | |
257 | SmallVector<MachineBasicBlock *, 4> & |
258 | GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB, |
259 | AllSuccsCache &AllSuccessors) const; |
260 | |
261 | std::vector<unsigned> &getBBRegisterPressure(const MachineBasicBlock &MBB); |
262 | |
263 | bool registerPressureSetExceedsLimit(unsigned NRegs, |
264 | const TargetRegisterClass *RC, |
265 | const MachineBasicBlock &MBB); |
266 | }; |
267 | |
268 | } // end anonymous namespace |
269 | |
270 | char MachineSinking::ID = 0; |
271 | |
272 | char &llvm::MachineSinkingID = MachineSinking::ID; |
273 | |
274 | INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE, |
275 | "Machine code sinking" , false, false) |
276 | INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo) |
277 | INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) |
278 | INITIALIZE_PASS_DEPENDENCY(MachineCycleInfoWrapperPass) |
279 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
280 | INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE, |
281 | "Machine code sinking" , false, false) |
282 | |
283 | /// Return true if a target defined block prologue instruction interferes |
284 | /// with a sink candidate. |
285 | static bool blockPrologueInterferes(const MachineBasicBlock *BB, |
286 | MachineBasicBlock::const_iterator End, |
287 | const MachineInstr &MI, |
288 | const TargetRegisterInfo *TRI, |
289 | const TargetInstrInfo *TII, |
290 | const MachineRegisterInfo *MRI) { |
291 | for (MachineBasicBlock::const_iterator PI = BB->getFirstNonPHI(); PI != End; |
292 | ++PI) { |
293 | // Only check target defined prologue instructions |
294 | if (!TII->isBasicBlockPrologue(MI: *PI)) |
295 | continue; |
296 | for (auto &MO : MI.operands()) { |
297 | if (!MO.isReg()) |
298 | continue; |
299 | Register Reg = MO.getReg(); |
300 | if (!Reg) |
301 | continue; |
302 | if (MO.isUse()) { |
303 | if (Reg.isPhysical() && |
304 | (TII->isIgnorableUse(MO) || (MRI && MRI->isConstantPhysReg(PhysReg: Reg)))) |
305 | continue; |
306 | if (PI->modifiesRegister(Reg, TRI)) |
307 | return true; |
308 | } else { |
309 | if (PI->readsRegister(Reg, TRI)) |
310 | return true; |
311 | // Check for interference with non-dead defs |
312 | auto *DefOp = PI->findRegisterDefOperand(Reg, TRI, isDead: false, Overlap: true); |
313 | if (DefOp && !DefOp->isDead()) |
314 | return true; |
315 | } |
316 | } |
317 | } |
318 | |
319 | return false; |
320 | } |
321 | |
322 | bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI, |
323 | MachineBasicBlock *MBB) { |
324 | if (!MI.isCopy()) |
325 | return false; |
326 | |
327 | Register SrcReg = MI.getOperand(i: 1).getReg(); |
328 | Register DstReg = MI.getOperand(i: 0).getReg(); |
329 | if (!SrcReg.isVirtual() || !DstReg.isVirtual() || |
330 | !MRI->hasOneNonDBGUse(RegNo: SrcReg)) |
331 | return false; |
332 | |
333 | const TargetRegisterClass *SRC = MRI->getRegClass(Reg: SrcReg); |
334 | const TargetRegisterClass *DRC = MRI->getRegClass(Reg: DstReg); |
335 | if (SRC != DRC) |
336 | return false; |
337 | |
338 | MachineInstr *DefMI = MRI->getVRegDef(Reg: SrcReg); |
339 | if (DefMI->isCopyLike()) |
340 | return false; |
341 | LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI); |
342 | LLVM_DEBUG(dbgs() << "*** to: " << MI); |
343 | MRI->replaceRegWith(FromReg: DstReg, ToReg: SrcReg); |
344 | MI.eraseFromParent(); |
345 | |
346 | // Conservatively, clear any kill flags, since it's possible that they are no |
347 | // longer correct. |
348 | MRI->clearKillFlags(Reg: SrcReg); |
349 | |
350 | ++NumCoalesces; |
351 | return true; |
352 | } |
353 | |
354 | bool MachineSinking::PerformSinkAndFold(MachineInstr &MI, |
355 | MachineBasicBlock *MBB) { |
356 | if (MI.isCopy() || MI.mayLoadOrStore() || |
357 | MI.getOpcode() == TargetOpcode::REG_SEQUENCE) |
358 | return false; |
359 | |
360 | // Don't sink instructions that the target prefers not to sink. |
361 | if (!TII->shouldSink(MI)) |
362 | return false; |
363 | |
364 | // Check if it's safe to move the instruction. |
365 | bool SawStore = true; |
366 | if (!MI.isSafeToMove(AA, SawStore)) |
367 | return false; |
368 | |
369 | // Convergent operations may not be made control-dependent on additional |
370 | // values. |
371 | if (MI.isConvergent()) |
372 | return false; |
373 | |
374 | // Don't sink defs/uses of hard registers or if the instruction defines more |
375 | // than one register. |
376 | // Don't sink more than two register uses - it'll cover most of the cases and |
377 | // greatly simplifies the register pressure checks. |
378 | Register DefReg; |
379 | Register UsedRegA, UsedRegB; |
380 | for (const MachineOperand &MO : MI.operands()) { |
381 | if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() || |
382 | MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() || |
383 | MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask()) |
384 | continue; |
385 | if (!MO.isReg()) |
386 | return false; |
387 | |
388 | Register Reg = MO.getReg(); |
389 | if (Reg == 0) |
390 | continue; |
391 | |
392 | if (Reg.isVirtual()) { |
393 | if (MO.isDef()) { |
394 | if (DefReg) |
395 | return false; |
396 | DefReg = Reg; |
397 | continue; |
398 | } |
399 | |
400 | if (UsedRegA == 0) |
401 | UsedRegA = Reg; |
402 | else if (UsedRegB == 0) |
403 | UsedRegB = Reg; |
404 | else |
405 | return false; |
406 | continue; |
407 | } |
408 | |
409 | if (Reg.isPhysical() && |
410 | (MRI->isConstantPhysReg(PhysReg: Reg) || TII->isIgnorableUse(MO))) |
411 | continue; |
412 | |
413 | return false; |
414 | } |
415 | |
416 | // Scan uses of the destination register. Every use, except the last, must be |
417 | // a copy, with a chain of copies terminating with either a copy into a hard |
418 | // register, or a load/store instruction where the use is part of the |
419 | // address (*not* the stored value). |
420 | using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>; |
421 | SmallVector<SinkInfo> SinkInto; |
422 | SmallVector<Register> Worklist; |
423 | |
424 | const TargetRegisterClass *RC = MRI->getRegClass(Reg: DefReg); |
425 | const TargetRegisterClass *RCA = |
426 | UsedRegA == 0 ? nullptr : MRI->getRegClass(Reg: UsedRegA); |
427 | const TargetRegisterClass *RCB = |
428 | UsedRegB == 0 ? nullptr : MRI->getRegClass(Reg: UsedRegB); |
429 | |
430 | Worklist.push_back(Elt: DefReg); |
431 | while (!Worklist.empty()) { |
432 | Register Reg = Worklist.pop_back_val(); |
433 | |
434 | for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { |
435 | ExtAddrMode MaybeAM; |
436 | MachineInstr &UseInst = *MO.getParent(); |
437 | if (UseInst.isCopy()) { |
438 | Register DstReg; |
439 | if (const MachineOperand &O = UseInst.getOperand(i: 0); O.isReg()) |
440 | DstReg = O.getReg(); |
441 | if (DstReg == 0) |
442 | return false; |
443 | if (DstReg.isVirtual()) { |
444 | Worklist.push_back(Elt: DstReg); |
445 | continue; |
446 | } |
447 | // If we are going to replace a copy, the original instruction must be |
448 | // as cheap as a copy. |
449 | if (!TII->isAsCheapAsAMove(MI)) |
450 | return false; |
451 | // The hard register must be in the register class of the original |
452 | // instruction's destination register. |
453 | if (!RC->contains(Reg: DstReg)) |
454 | return false; |
455 | } else if (UseInst.mayLoadOrStore()) { |
456 | ExtAddrMode AM; |
457 | if (!TII->canFoldIntoAddrMode(MemI: UseInst, Reg, AddrI: MI, AM)) |
458 | return false; |
459 | MaybeAM = AM; |
460 | } else { |
461 | return false; |
462 | } |
463 | |
464 | if (UseInst.getParent() != MI.getParent()) { |
465 | // If the register class of the register we are replacing is a superset |
466 | // of any of the register classes of the operands of the materialized |
467 | // instruction don't consider that live range extended. |
468 | const TargetRegisterClass *RCS = MRI->getRegClass(Reg); |
469 | if (RCA && RCA->hasSuperClassEq(RC: RCS)) |
470 | RCA = nullptr; |
471 | else if (RCB && RCB->hasSuperClassEq(RC: RCS)) |
472 | RCB = nullptr; |
473 | if (RCA || RCB) { |
474 | if (RCA == nullptr) { |
475 | RCA = RCB; |
476 | RCB = nullptr; |
477 | } |
478 | |
479 | unsigned NRegs = !!RCA + !!RCB; |
480 | if (RCA == RCB) |
481 | RCB = nullptr; |
482 | |
483 | // Check we don't exceed register pressure at the destination. |
484 | const MachineBasicBlock &MBB = *UseInst.getParent(); |
485 | if (RCB == nullptr) { |
486 | if (registerPressureSetExceedsLimit(NRegs, RC: RCA, MBB)) |
487 | return false; |
488 | } else if (registerPressureSetExceedsLimit(NRegs: 1, RC: RCA, MBB) || |
489 | registerPressureSetExceedsLimit(NRegs: 1, RC: RCB, MBB)) { |
490 | return false; |
491 | } |
492 | } |
493 | } |
494 | |
495 | SinkInto.emplace_back(Args: &UseInst, Args&: MaybeAM); |
496 | } |
497 | } |
498 | |
499 | if (SinkInto.empty()) |
500 | return false; |
501 | |
502 | // Now we know we can fold the instruction in all its users. |
503 | for (auto &[SinkDst, MaybeAM] : SinkInto) { |
504 | MachineInstr *New = nullptr; |
505 | LLVM_DEBUG(dbgs() << "Sinking copy of" ; MI.dump(); dbgs() << "into" ; |
506 | SinkDst->dump()); |
507 | if (SinkDst->isCopy()) { |
508 | // TODO: After performing the sink-and-fold, the original instruction is |
509 | // deleted. Its value is still available (in a hard register), so if there |
510 | // are debug instructions which refer to the (now deleted) virtual |
511 | // register they could be updated to refer to the hard register, in |
512 | // principle. However, it's not clear how to do that, moreover in some |
513 | // cases the debug instructions may need to be replicated proportionally |
514 | // to the number of the COPY instructions replaced and in some extreme |
515 | // cases we can end up with quadratic increase in the number of debug |
516 | // instructions. |
517 | |
518 | // Sink a copy of the instruction, replacing a COPY instruction. |
519 | MachineBasicBlock::iterator InsertPt = SinkDst->getIterator(); |
520 | Register DstReg = SinkDst->getOperand(i: 0).getReg(); |
521 | TII->reMaterialize(MBB&: *SinkDst->getParent(), MI: InsertPt, DestReg: DstReg, SubIdx: 0, Orig: MI, TRI: *TRI); |
522 | New = &*std::prev(x: InsertPt); |
523 | if (!New->getDebugLoc()) |
524 | New->setDebugLoc(SinkDst->getDebugLoc()); |
525 | |
526 | // The operand registers of the "sunk" instruction have their live range |
527 | // extended and their kill flags may no longer be correct. Conservatively |
528 | // clear the kill flags. |
529 | if (UsedRegA) |
530 | MRI->clearKillFlags(Reg: UsedRegA); |
531 | if (UsedRegB) |
532 | MRI->clearKillFlags(Reg: UsedRegB); |
533 | } else { |
534 | // Fold instruction into the addressing mode of a memory instruction. |
535 | New = TII->emitLdStWithAddr(MemI&: *SinkDst, AM: MaybeAM); |
536 | |
537 | // The registers of the addressing mode may have their live range extended |
538 | // and their kill flags may no longer be correct. Conservatively clear the |
539 | // kill flags. |
540 | if (Register R = MaybeAM.BaseReg; R.isValid() && R.isVirtual()) |
541 | MRI->clearKillFlags(Reg: R); |
542 | if (Register R = MaybeAM.ScaledReg; R.isValid() && R.isVirtual()) |
543 | MRI->clearKillFlags(Reg: R); |
544 | } |
545 | LLVM_DEBUG(dbgs() << "yielding" ; New->dump()); |
546 | // Clear the StoreInstrCache, since we may invalidate it by erasing. |
547 | if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef()) |
548 | StoreInstrCache.clear(); |
549 | SinkDst->eraseFromParent(); |
550 | } |
551 | |
552 | // Collect operands that need to be cleaned up because the registers no longer |
553 | // exist (in COPYs and debug instructions). We cannot delete instructions or |
554 | // clear operands while traversing register uses. |
555 | SmallVector<MachineOperand *> Cleanup; |
556 | Worklist.push_back(Elt: DefReg); |
557 | while (!Worklist.empty()) { |
558 | Register Reg = Worklist.pop_back_val(); |
559 | for (MachineOperand &MO : MRI->use_operands(Reg)) { |
560 | MachineInstr *U = MO.getParent(); |
561 | assert((U->isCopy() || U->isDebugInstr()) && |
562 | "Only debug uses and copies must remain" ); |
563 | if (U->isCopy()) |
564 | Worklist.push_back(Elt: U->getOperand(i: 0).getReg()); |
565 | Cleanup.push_back(Elt: &MO); |
566 | } |
567 | } |
568 | |
569 | // Delete the dead COPYs and clear operands in debug instructions |
570 | for (MachineOperand *MO : Cleanup) { |
571 | MachineInstr *I = MO->getParent(); |
572 | if (I->isCopy()) { |
573 | I->eraseFromParent(); |
574 | } else { |
575 | MO->setReg(0); |
576 | MO->setSubReg(0); |
577 | } |
578 | } |
579 | |
580 | MI.eraseFromParent(); |
581 | return true; |
582 | } |
583 | |
584 | /// AllUsesDominatedByBlock - Return true if all uses of the specified register |
585 | /// occur in blocks dominated by the specified block. If any use is in the |
586 | /// definition block, then return false since it is never legal to move def |
587 | /// after uses. |
588 | bool MachineSinking::AllUsesDominatedByBlock(Register Reg, |
589 | MachineBasicBlock *MBB, |
590 | MachineBasicBlock *DefMBB, |
591 | bool &BreakPHIEdge, |
592 | bool &LocalUse) const { |
593 | assert(Reg.isVirtual() && "Only makes sense for vregs" ); |
594 | |
595 | // Ignore debug uses because debug info doesn't affect the code. |
596 | if (MRI->use_nodbg_empty(RegNo: Reg)) |
597 | return true; |
598 | |
599 | // BreakPHIEdge is true if all the uses are in the successor MBB being sunken |
600 | // into and they are all PHI nodes. In this case, machine-sink must break |
601 | // the critical edge first. e.g. |
602 | // |
603 | // %bb.1: |
604 | // Predecessors according to CFG: %bb.0 |
605 | // ... |
606 | // %def = DEC64_32r %x, implicit-def dead %eflags |
607 | // ... |
608 | // JE_4 <%bb.37>, implicit %eflags |
609 | // Successors according to CFG: %bb.37 %bb.2 |
610 | // |
611 | // %bb.2: |
612 | // %p = PHI %y, %bb.0, %def, %bb.1 |
613 | if (all_of(Range: MRI->use_nodbg_operands(Reg), P: [&](MachineOperand &MO) { |
614 | MachineInstr *UseInst = MO.getParent(); |
615 | unsigned OpNo = MO.getOperandNo(); |
616 | MachineBasicBlock *UseBlock = UseInst->getParent(); |
617 | return UseBlock == MBB && UseInst->isPHI() && |
618 | UseInst->getOperand(i: OpNo + 1).getMBB() == DefMBB; |
619 | })) { |
620 | BreakPHIEdge = true; |
621 | return true; |
622 | } |
623 | |
624 | for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { |
625 | // Determine the block of the use. |
626 | MachineInstr *UseInst = MO.getParent(); |
627 | unsigned OpNo = &MO - &UseInst->getOperand(i: 0); |
628 | MachineBasicBlock *UseBlock = UseInst->getParent(); |
629 | if (UseInst->isPHI()) { |
630 | // PHI nodes use the operand in the predecessor block, not the block with |
631 | // the PHI. |
632 | UseBlock = UseInst->getOperand(i: OpNo+1).getMBB(); |
633 | } else if (UseBlock == DefMBB) { |
634 | LocalUse = true; |
635 | return false; |
636 | } |
637 | |
638 | // Check that it dominates. |
639 | if (!DT->dominates(A: MBB, B: UseBlock)) |
640 | return false; |
641 | } |
642 | |
643 | return true; |
644 | } |
645 | |
646 | /// Return true if this machine instruction loads from global offset table or |
647 | /// constant pool. |
648 | static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) { |
649 | assert(MI.mayLoad() && "Expected MI that loads!" ); |
650 | |
651 | // If we lost memory operands, conservatively assume that the instruction |
652 | // reads from everything.. |
653 | if (MI.memoperands_empty()) |
654 | return true; |
655 | |
656 | for (MachineMemOperand *MemOp : MI.memoperands()) |
657 | if (const PseudoSourceValue *PSV = MemOp->getPseudoValue()) |
658 | if (PSV->isGOT() || PSV->isConstantPool()) |
659 | return true; |
660 | |
661 | return false; |
662 | } |
663 | |
664 | void MachineSinking::FindCycleSinkCandidates( |
665 | MachineCycle *Cycle, MachineBasicBlock *BB, |
666 | SmallVectorImpl<MachineInstr *> &Candidates) { |
667 | for (auto &MI : *BB) { |
668 | LLVM_DEBUG(dbgs() << "CycleSink: Analysing candidate: " << MI); |
669 | if (!TII->shouldSink(MI)) { |
670 | LLVM_DEBUG(dbgs() << "CycleSink: Instruction not a candidate for this " |
671 | "target\n" ); |
672 | continue; |
673 | } |
674 | if (!isCycleInvariant(Cycle, I&: MI)) { |
675 | LLVM_DEBUG(dbgs() << "CycleSink: Instruction is not cycle invariant\n" ); |
676 | continue; |
677 | } |
678 | bool DontMoveAcrossStore = true; |
679 | if (!MI.isSafeToMove(AA, SawStore&: DontMoveAcrossStore)) { |
680 | LLVM_DEBUG(dbgs() << "CycleSink: Instruction not safe to move.\n" ); |
681 | continue; |
682 | } |
683 | if (MI.mayLoad() && !mayLoadFromGOTOrConstantPool(MI)) { |
684 | LLVM_DEBUG(dbgs() << "CycleSink: Dont sink GOT or constant pool loads\n" ); |
685 | continue; |
686 | } |
687 | if (MI.isConvergent()) |
688 | continue; |
689 | |
690 | const MachineOperand &MO = MI.getOperand(i: 0); |
691 | if (!MO.isReg() || !MO.getReg() || !MO.isDef()) |
692 | continue; |
693 | if (!MRI->hasOneDef(RegNo: MO.getReg())) |
694 | continue; |
695 | |
696 | LLVM_DEBUG(dbgs() << "CycleSink: Instruction added as candidate.\n" ); |
697 | Candidates.push_back(Elt: &MI); |
698 | } |
699 | } |
700 | |
701 | bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { |
702 | if (skipFunction(F: MF.getFunction())) |
703 | return false; |
704 | |
705 | LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n" ); |
706 | |
707 | STI = &MF.getSubtarget(); |
708 | TII = STI->getInstrInfo(); |
709 | TRI = STI->getRegisterInfo(); |
710 | MRI = &MF.getRegInfo(); |
711 | DT = &getAnalysis<MachineDominatorTree>(); |
712 | PDT = &getAnalysis<MachinePostDominatorTree>(); |
713 | CI = &getAnalysis<MachineCycleInfoWrapperPass>().getCycleInfo(); |
714 | MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr; |
715 | MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); |
716 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); |
717 | RegClassInfo.runOnMachineFunction(MF); |
718 | TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); |
719 | EnableSinkAndFold = PassConfig->getEnableSinkAndFold(); |
720 | |
721 | bool EverMadeChange = false; |
722 | |
723 | while (true) { |
724 | bool MadeChange = false; |
725 | |
726 | // Process all basic blocks. |
727 | CEBCandidates.clear(); |
728 | ToSplit.clear(); |
729 | for (auto &MBB: MF) |
730 | MadeChange |= ProcessBlock(MBB); |
731 | |
732 | // If we have anything we marked as toSplit, split it now. |
733 | for (const auto &Pair : ToSplit) { |
734 | auto NewSucc = Pair.first->SplitCriticalEdge(Succ: Pair.second, P&: *this); |
735 | if (NewSucc != nullptr) { |
736 | LLVM_DEBUG(dbgs() << " *** Splitting critical edge: " |
737 | << printMBBReference(*Pair.first) << " -- " |
738 | << printMBBReference(*NewSucc) << " -- " |
739 | << printMBBReference(*Pair.second) << '\n'); |
740 | if (MBFI) |
741 | MBFI->onEdgeSplit(NewPredecessor: *Pair.first, NewSuccessor: *NewSucc, MBPI: *MBPI); |
742 | |
743 | MadeChange = true; |
744 | ++NumSplit; |
745 | CI->splitCriticalEdge(Pred: Pair.first, Succ: Pair.second, New: NewSucc); |
746 | } else |
747 | LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n" ); |
748 | } |
749 | // If this iteration over the code changed anything, keep iterating. |
750 | if (!MadeChange) break; |
751 | EverMadeChange = true; |
752 | } |
753 | |
754 | if (SinkInstsIntoCycle) { |
755 | SmallVector<MachineCycle *, 8> Cycles(CI->toplevel_begin(), |
756 | CI->toplevel_end()); |
757 | for (auto *Cycle : Cycles) { |
758 | MachineBasicBlock * = Cycle->getCyclePreheader(); |
759 | if (!Preheader) { |
760 | LLVM_DEBUG(dbgs() << "CycleSink: Can't find preheader\n" ); |
761 | continue; |
762 | } |
763 | SmallVector<MachineInstr *, 8> Candidates; |
764 | FindCycleSinkCandidates(Cycle, BB: Preheader, Candidates); |
765 | |
766 | // Walk the candidates in reverse order so that we start with the use |
767 | // of a def-use chain, if there is any. |
768 | // TODO: Sort the candidates using a cost-model. |
769 | unsigned i = 0; |
770 | for (MachineInstr *I : llvm::reverse(C&: Candidates)) { |
771 | if (i++ == SinkIntoCycleLimit) { |
772 | LLVM_DEBUG(dbgs() << "CycleSink: Limit reached of instructions to " |
773 | "be analysed." ); |
774 | break; |
775 | } |
776 | |
777 | if (!SinkIntoCycle(Cycle, I&: *I)) |
778 | break; |
779 | EverMadeChange = true; |
780 | ++NumCycleSunk; |
781 | } |
782 | } |
783 | } |
784 | |
785 | HasStoreCache.clear(); |
786 | StoreInstrCache.clear(); |
787 | |
788 | // Now clear any kill flags for recorded registers. |
789 | for (auto I : RegsToClearKillFlags) |
790 | MRI->clearKillFlags(Reg: I); |
791 | RegsToClearKillFlags.clear(); |
792 | |
793 | return EverMadeChange; |
794 | } |
795 | |
796 | bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) { |
797 | if ((!EnableSinkAndFold && MBB.succ_size() <= 1) || MBB.empty()) |
798 | return false; |
799 | |
800 | // Don't bother sinking code out of unreachable blocks. In addition to being |
801 | // unprofitable, it can also lead to infinite looping, because in an |
802 | // unreachable cycle there may be nowhere to stop. |
803 | if (!DT->isReachableFromEntry(A: &MBB)) return false; |
804 | |
805 | bool MadeChange = false; |
806 | |
807 | // Cache all successors, sorted by frequency info and cycle depth. |
808 | AllSuccsCache AllSuccessors; |
809 | |
810 | // Walk the basic block bottom-up. Remember if we saw a store. |
811 | MachineBasicBlock::iterator I = MBB.end(); |
812 | --I; |
813 | bool ProcessedBegin, SawStore = false; |
814 | do { |
815 | MachineInstr &MI = *I; // The instruction to sink. |
816 | |
817 | // Predecrement I (if it's not begin) so that it isn't invalidated by |
818 | // sinking. |
819 | ProcessedBegin = I == MBB.begin(); |
820 | if (!ProcessedBegin) |
821 | --I; |
822 | |
823 | if (MI.isDebugOrPseudoInstr()) { |
824 | if (MI.isDebugValue()) |
825 | ProcessDbgInst(MI); |
826 | continue; |
827 | } |
828 | |
829 | if (EnableSinkAndFold && PerformSinkAndFold(MI, MBB: &MBB)) { |
830 | MadeChange = true; |
831 | continue; |
832 | } |
833 | |
834 | // Can't sink anything out of a block that has less than two successors. |
835 | if (MBB.succ_size() <= 1) |
836 | continue; |
837 | |
838 | if (PerformTrivialForwardCoalescing(MI, MBB: &MBB)) { |
839 | MadeChange = true; |
840 | continue; |
841 | } |
842 | |
843 | if (SinkInstruction(MI, SawStore, AllSuccessors)) { |
844 | ++NumSunk; |
845 | MadeChange = true; |
846 | } |
847 | |
848 | // If we just processed the first instruction in the block, we're done. |
849 | } while (!ProcessedBegin); |
850 | |
851 | SeenDbgUsers.clear(); |
852 | SeenDbgVars.clear(); |
853 | // recalculate the bb register pressure after sinking one BB. |
854 | CachedRegisterPressure.clear(); |
855 | return MadeChange; |
856 | } |
857 | |
858 | void MachineSinking::ProcessDbgInst(MachineInstr &MI) { |
859 | // When we see DBG_VALUEs for registers, record any vreg it reads, so that |
860 | // we know what to sink if the vreg def sinks. |
861 | assert(MI.isDebugValue() && "Expected DBG_VALUE for processing" ); |
862 | |
863 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), |
864 | MI.getDebugLoc()->getInlinedAt()); |
865 | bool SeenBefore = SeenDbgVars.contains(V: Var); |
866 | |
867 | for (MachineOperand &MO : MI.debug_operands()) { |
868 | if (MO.isReg() && MO.getReg().isVirtual()) |
869 | SeenDbgUsers[MO.getReg()].push_back(NewVal: SeenDbgUser(&MI, SeenBefore)); |
870 | } |
871 | |
872 | // Record the variable for any DBG_VALUE, to avoid re-ordering any of them. |
873 | SeenDbgVars.insert(V: Var); |
874 | } |
875 | |
876 | bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI, |
877 | MachineBasicBlock *From, |
878 | MachineBasicBlock *To) { |
879 | // FIXME: Need much better heuristics. |
880 | |
881 | // If the pass has already considered breaking this edge (during this pass |
882 | // through the function), then let's go ahead and break it. This means |
883 | // sinking multiple "cheap" instructions into the same block. |
884 | if (!CEBCandidates.insert(V: std::make_pair(x&: From, y&: To)).second) |
885 | return true; |
886 | |
887 | if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI)) |
888 | return true; |
889 | |
890 | if (From->isSuccessor(MBB: To) && MBPI->getEdgeProbability(Src: From, Dst: To) <= |
891 | BranchProbability(SplitEdgeProbabilityThreshold, 100)) |
892 | return true; |
893 | |
894 | // MI is cheap, we probably don't want to break the critical edge for it. |
895 | // However, if this would allow some definitions of its source operands |
896 | // to be sunk then it's probably worth it. |
897 | for (const MachineOperand &MO : MI.all_uses()) { |
898 | Register Reg = MO.getReg(); |
899 | if (Reg == 0) |
900 | continue; |
901 | |
902 | // We don't move live definitions of physical registers, |
903 | // so sinking their uses won't enable any opportunities. |
904 | if (Reg.isPhysical()) |
905 | continue; |
906 | |
907 | // If this instruction is the only user of a virtual register, |
908 | // check if breaking the edge will enable sinking |
909 | // both this instruction and the defining instruction. |
910 | if (MRI->hasOneNonDBGUse(RegNo: Reg)) { |
911 | // If the definition resides in same MBB, |
912 | // claim it's likely we can sink these together. |
913 | // If definition resides elsewhere, we aren't |
914 | // blocking it from being sunk so don't break the edge. |
915 | MachineInstr *DefMI = MRI->getVRegDef(Reg); |
916 | if (DefMI->getParent() == MI.getParent()) |
917 | return true; |
918 | } |
919 | } |
920 | |
921 | return false; |
922 | } |
923 | |
924 | bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI, |
925 | MachineBasicBlock *FromBB, |
926 | MachineBasicBlock *ToBB, |
927 | bool BreakPHIEdge) { |
928 | if (!isWorthBreakingCriticalEdge(MI, From: FromBB, To: ToBB)) |
929 | return false; |
930 | |
931 | // Avoid breaking back edge. From == To means backedge for single BB cycle. |
932 | if (!SplitEdges || FromBB == ToBB) |
933 | return false; |
934 | |
935 | MachineCycle *FromCycle = CI->getCycle(Block: FromBB); |
936 | MachineCycle *ToCycle = CI->getCycle(Block: ToBB); |
937 | |
938 | // Check for backedges of more "complex" cycles. |
939 | if (FromCycle == ToCycle && FromCycle && |
940 | (!FromCycle->isReducible() || FromCycle->getHeader() == ToBB)) |
941 | return false; |
942 | |
943 | // It's not always legal to break critical edges and sink the computation |
944 | // to the edge. |
945 | // |
946 | // %bb.1: |
947 | // v1024 |
948 | // Beq %bb.3 |
949 | // <fallthrough> |
950 | // %bb.2: |
951 | // ... no uses of v1024 |
952 | // <fallthrough> |
953 | // %bb.3: |
954 | // ... |
955 | // = v1024 |
956 | // |
957 | // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted: |
958 | // |
959 | // %bb.1: |
960 | // ... |
961 | // Bne %bb.2 |
962 | // %bb.4: |
963 | // v1024 = |
964 | // B %bb.3 |
965 | // %bb.2: |
966 | // ... no uses of v1024 |
967 | // <fallthrough> |
968 | // %bb.3: |
969 | // ... |
970 | // = v1024 |
971 | // |
972 | // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3 |
973 | // flow. We need to ensure the new basic block where the computation is |
974 | // sunk to dominates all the uses. |
975 | // It's only legal to break critical edge and sink the computation to the |
976 | // new block if all the predecessors of "To", except for "From", are |
977 | // not dominated by "From". Given SSA property, this means these |
978 | // predecessors are dominated by "To". |
979 | // |
980 | // There is no need to do this check if all the uses are PHI nodes. PHI |
981 | // sources are only defined on the specific predecessor edges. |
982 | if (!BreakPHIEdge) { |
983 | for (MachineBasicBlock *Pred : ToBB->predecessors()) |
984 | if (Pred != FromBB && !DT->dominates(A: ToBB, B: Pred)) |
985 | return false; |
986 | } |
987 | |
988 | ToSplit.insert(X: std::make_pair(x&: FromBB, y&: ToBB)); |
989 | |
990 | return true; |
991 | } |
992 | |
993 | std::vector<unsigned> & |
994 | MachineSinking::getBBRegisterPressure(const MachineBasicBlock &MBB) { |
995 | // Currently to save compiling time, MBB's register pressure will not change |
996 | // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's |
997 | // register pressure is changed after sinking any instructions into it. |
998 | // FIXME: need a accurate and cheap register pressure estiminate model here. |
999 | auto RP = CachedRegisterPressure.find(Val: &MBB); |
1000 | if (RP != CachedRegisterPressure.end()) |
1001 | return RP->second; |
1002 | |
1003 | RegionPressure Pressure; |
1004 | RegPressureTracker RPTracker(Pressure); |
1005 | |
1006 | // Initialize the register pressure tracker. |
1007 | RPTracker.init(mf: MBB.getParent(), rci: &RegClassInfo, lis: nullptr, mbb: &MBB, pos: MBB.end(), |
1008 | /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true); |
1009 | |
1010 | for (MachineBasicBlock::const_iterator MII = MBB.instr_end(), |
1011 | MIE = MBB.instr_begin(); |
1012 | MII != MIE; --MII) { |
1013 | const MachineInstr &MI = *std::prev(x: MII); |
1014 | if (MI.isDebugInstr() || MI.isPseudoProbe()) |
1015 | continue; |
1016 | RegisterOperands RegOpers; |
1017 | RegOpers.collect(MI, TRI: *TRI, MRI: *MRI, TrackLaneMasks: false, IgnoreDead: false); |
1018 | RPTracker.recedeSkipDebugValues(); |
1019 | assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!" ); |
1020 | RPTracker.recede(RegOpers); |
1021 | } |
1022 | |
1023 | RPTracker.closeRegion(); |
1024 | auto It = CachedRegisterPressure.insert( |
1025 | KV: std::make_pair(x: &MBB, y&: RPTracker.getPressure().MaxSetPressure)); |
1026 | return It.first->second; |
1027 | } |
1028 | |
1029 | bool MachineSinking::registerPressureSetExceedsLimit( |
1030 | unsigned NRegs, const TargetRegisterClass *RC, |
1031 | const MachineBasicBlock &MBB) { |
1032 | unsigned Weight = NRegs * TRI->getRegClassWeight(RC).RegWeight; |
1033 | const int *PS = TRI->getRegClassPressureSets(RC); |
1034 | std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(MBB); |
1035 | for (; *PS != -1; PS++) |
1036 | if (Weight + BBRegisterPressure[*PS] >= |
1037 | TRI->getRegPressureSetLimit(MF: *MBB.getParent(), Idx: *PS)) |
1038 | return true; |
1039 | return false; |
1040 | } |
1041 | |
1042 | /// isProfitableToSinkTo - Return true if it is profitable to sink MI. |
1043 | bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, |
1044 | MachineBasicBlock *MBB, |
1045 | MachineBasicBlock *SuccToSinkTo, |
1046 | AllSuccsCache &AllSuccessors) { |
1047 | assert (SuccToSinkTo && "Invalid SinkTo Candidate BB" ); |
1048 | |
1049 | if (MBB == SuccToSinkTo) |
1050 | return false; |
1051 | |
1052 | // It is profitable if SuccToSinkTo does not post dominate current block. |
1053 | if (!PDT->dominates(A: SuccToSinkTo, B: MBB)) |
1054 | return true; |
1055 | |
1056 | // It is profitable to sink an instruction from a deeper cycle to a shallower |
1057 | // cycle, even if the latter post-dominates the former (PR21115). |
1058 | if (CI->getCycleDepth(Block: MBB) > CI->getCycleDepth(Block: SuccToSinkTo)) |
1059 | return true; |
1060 | |
1061 | // Check if only use in post dominated block is PHI instruction. |
1062 | bool NonPHIUse = false; |
1063 | for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) { |
1064 | MachineBasicBlock *UseBlock = UseInst.getParent(); |
1065 | if (UseBlock == SuccToSinkTo && !UseInst.isPHI()) |
1066 | NonPHIUse = true; |
1067 | } |
1068 | if (!NonPHIUse) |
1069 | return true; |
1070 | |
1071 | // If SuccToSinkTo post dominates then also it may be profitable if MI |
1072 | // can further profitably sinked into another block in next round. |
1073 | bool BreakPHIEdge = false; |
1074 | // FIXME - If finding successor is compile time expensive then cache results. |
1075 | if (MachineBasicBlock *MBB2 = |
1076 | FindSuccToSinkTo(MI, MBB: SuccToSinkTo, BreakPHIEdge, AllSuccessors)) |
1077 | return isProfitableToSinkTo(Reg, MI, MBB: SuccToSinkTo, SuccToSinkTo: MBB2, AllSuccessors); |
1078 | |
1079 | MachineCycle *MCycle = CI->getCycle(Block: MBB); |
1080 | |
1081 | // If the instruction is not inside a cycle, it is not profitable to sink MI to |
1082 | // a post dominate block SuccToSinkTo. |
1083 | if (!MCycle) |
1084 | return false; |
1085 | |
1086 | // If this instruction is inside a Cycle and sinking this instruction can make |
1087 | // more registers live range shorten, it is still prifitable. |
1088 | for (const MachineOperand &MO : MI.operands()) { |
1089 | // Ignore non-register operands. |
1090 | if (!MO.isReg()) |
1091 | continue; |
1092 | Register Reg = MO.getReg(); |
1093 | if (Reg == 0) |
1094 | continue; |
1095 | |
1096 | if (Reg.isPhysical()) { |
1097 | // Don't handle non-constant and non-ignorable physical register uses. |
1098 | if (MO.isUse() && !MRI->isConstantPhysReg(PhysReg: Reg) && !TII->isIgnorableUse(MO)) |
1099 | return false; |
1100 | continue; |
1101 | } |
1102 | |
1103 | // Users for the defs are all dominated by SuccToSinkTo. |
1104 | if (MO.isDef()) { |
1105 | // This def register's live range is shortened after sinking. |
1106 | bool LocalUse = false; |
1107 | if (!AllUsesDominatedByBlock(Reg, MBB: SuccToSinkTo, DefMBB: MBB, BreakPHIEdge, |
1108 | LocalUse)) |
1109 | return false; |
1110 | } else { |
1111 | MachineInstr *DefMI = MRI->getVRegDef(Reg); |
1112 | if (!DefMI) |
1113 | continue; |
1114 | MachineCycle *Cycle = CI->getCycle(Block: DefMI->getParent()); |
1115 | // DefMI is defined outside of cycle. There should be no live range |
1116 | // impact for this operand. Defination outside of cycle means: |
1117 | // 1: defination is outside of cycle. |
1118 | // 2: defination is in this cycle, but it is a PHI in the cycle header. |
1119 | if (Cycle != MCycle || (DefMI->isPHI() && Cycle && Cycle->isReducible() && |
1120 | Cycle->getHeader() == DefMI->getParent())) |
1121 | continue; |
1122 | // The DefMI is defined inside the cycle. |
1123 | // If sinking this operand makes some register pressure set exceed limit, |
1124 | // it is not profitable. |
1125 | if (registerPressureSetExceedsLimit(NRegs: 1, RC: MRI->getRegClass(Reg), |
1126 | MBB: *SuccToSinkTo)) { |
1127 | LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable." ); |
1128 | return false; |
1129 | } |
1130 | } |
1131 | } |
1132 | |
1133 | // If MI is in cycle and all its operands are alive across the whole cycle or |
1134 | // if no operand sinking make register pressure set exceed limit, it is |
1135 | // profitable to sink MI. |
1136 | return true; |
1137 | } |
1138 | |
1139 | /// Get the sorted sequence of successors for this MachineBasicBlock, possibly |
1140 | /// computing it if it was not already cached. |
1141 | SmallVector<MachineBasicBlock *, 4> & |
1142 | MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB, |
1143 | AllSuccsCache &AllSuccessors) const { |
1144 | // Do we have the sorted successors in cache ? |
1145 | auto Succs = AllSuccessors.find(Val: MBB); |
1146 | if (Succs != AllSuccessors.end()) |
1147 | return Succs->second; |
1148 | |
1149 | SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->successors()); |
1150 | |
1151 | // Handle cases where sinking can happen but where the sink point isn't a |
1152 | // successor. For example: |
1153 | // |
1154 | // x = computation |
1155 | // if () {} else {} |
1156 | // use x |
1157 | // |
1158 | for (MachineDomTreeNode *DTChild : DT->getNode(BB: MBB)->children()) { |
1159 | // DomTree children of MBB that have MBB as immediate dominator are added. |
1160 | if (DTChild->getIDom()->getBlock() == MI.getParent() && |
1161 | // Skip MBBs already added to the AllSuccs vector above. |
1162 | !MBB->isSuccessor(MBB: DTChild->getBlock())) |
1163 | AllSuccs.push_back(Elt: DTChild->getBlock()); |
1164 | } |
1165 | |
1166 | // Sort Successors according to their cycle depth or block frequency info. |
1167 | llvm::stable_sort( |
1168 | Range&: AllSuccs, C: [this](const MachineBasicBlock *L, const MachineBasicBlock *R) { |
1169 | uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(MBB: L).getFrequency() : 0; |
1170 | uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(MBB: R).getFrequency() : 0; |
1171 | bool HasBlockFreq = LHSFreq != 0 || RHSFreq != 0; |
1172 | return HasBlockFreq ? LHSFreq < RHSFreq |
1173 | : CI->getCycleDepth(Block: L) < CI->getCycleDepth(Block: R); |
1174 | }); |
1175 | |
1176 | auto it = AllSuccessors.insert(KV: std::make_pair(x&: MBB, y&: AllSuccs)); |
1177 | |
1178 | return it.first->second; |
1179 | } |
1180 | |
1181 | /// FindSuccToSinkTo - Find a successor to sink this instruction to. |
1182 | MachineBasicBlock * |
1183 | MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, |
1184 | bool &BreakPHIEdge, |
1185 | AllSuccsCache &AllSuccessors) { |
1186 | assert (MBB && "Invalid MachineBasicBlock!" ); |
1187 | |
1188 | // loop over all the operands of the specified instruction. If there is |
1189 | // anything we can't handle, bail out. |
1190 | |
1191 | // SuccToSinkTo - This is the successor to sink this instruction to, once we |
1192 | // decide. |
1193 | MachineBasicBlock *SuccToSinkTo = nullptr; |
1194 | for (const MachineOperand &MO : MI.operands()) { |
1195 | if (!MO.isReg()) continue; // Ignore non-register operands. |
1196 | |
1197 | Register Reg = MO.getReg(); |
1198 | if (Reg == 0) continue; |
1199 | |
1200 | if (Reg.isPhysical()) { |
1201 | if (MO.isUse()) { |
1202 | // If the physreg has no defs anywhere, it's just an ambient register |
1203 | // and we can freely move its uses. Alternatively, if it's allocatable, |
1204 | // it could get allocated to something with a def during allocation. |
1205 | if (!MRI->isConstantPhysReg(PhysReg: Reg) && !TII->isIgnorableUse(MO)) |
1206 | return nullptr; |
1207 | } else if (!MO.isDead()) { |
1208 | // A def that isn't dead. We can't move it. |
1209 | return nullptr; |
1210 | } |
1211 | } else { |
1212 | // Virtual register uses are always safe to sink. |
1213 | if (MO.isUse()) continue; |
1214 | |
1215 | // If it's not safe to move defs of the register class, then abort. |
1216 | if (!TII->isSafeToMoveRegClassDefs(RC: MRI->getRegClass(Reg))) |
1217 | return nullptr; |
1218 | |
1219 | // Virtual register defs can only be sunk if all their uses are in blocks |
1220 | // dominated by one of the successors. |
1221 | if (SuccToSinkTo) { |
1222 | // If a previous operand picked a block to sink to, then this operand |
1223 | // must be sinkable to the same block. |
1224 | bool LocalUse = false; |
1225 | if (!AllUsesDominatedByBlock(Reg, MBB: SuccToSinkTo, DefMBB: MBB, |
1226 | BreakPHIEdge, LocalUse)) |
1227 | return nullptr; |
1228 | |
1229 | continue; |
1230 | } |
1231 | |
1232 | // Otherwise, we should look at all the successors and decide which one |
1233 | // we should sink to. If we have reliable block frequency information |
1234 | // (frequency != 0) available, give successors with smaller frequencies |
1235 | // higher priority, otherwise prioritize smaller cycle depths. |
1236 | for (MachineBasicBlock *SuccBlock : |
1237 | GetAllSortedSuccessors(MI, MBB, AllSuccessors)) { |
1238 | bool LocalUse = false; |
1239 | if (AllUsesDominatedByBlock(Reg, MBB: SuccBlock, DefMBB: MBB, |
1240 | BreakPHIEdge, LocalUse)) { |
1241 | SuccToSinkTo = SuccBlock; |
1242 | break; |
1243 | } |
1244 | if (LocalUse) |
1245 | // Def is used locally, it's never safe to move this def. |
1246 | return nullptr; |
1247 | } |
1248 | |
1249 | // If we couldn't find a block to sink to, ignore this instruction. |
1250 | if (!SuccToSinkTo) |
1251 | return nullptr; |
1252 | if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors)) |
1253 | return nullptr; |
1254 | } |
1255 | } |
1256 | |
1257 | // It is not possible to sink an instruction into its own block. This can |
1258 | // happen with cycles. |
1259 | if (MBB == SuccToSinkTo) |
1260 | return nullptr; |
1261 | |
1262 | // It's not safe to sink instructions to EH landing pad. Control flow into |
1263 | // landing pad is implicitly defined. |
1264 | if (SuccToSinkTo && SuccToSinkTo->isEHPad()) |
1265 | return nullptr; |
1266 | |
1267 | // It ought to be okay to sink instructions into an INLINEASM_BR target, but |
1268 | // only if we make sure that MI occurs _before_ an INLINEASM_BR instruction in |
1269 | // the source block (which this code does not yet do). So for now, forbid |
1270 | // doing so. |
1271 | if (SuccToSinkTo && SuccToSinkTo->isInlineAsmBrIndirectTarget()) |
1272 | return nullptr; |
1273 | |
1274 | if (SuccToSinkTo && !TII->isSafeToSink(MI, SuccToSinkTo, CI)) |
1275 | return nullptr; |
1276 | |
1277 | return SuccToSinkTo; |
1278 | } |
1279 | |
1280 | /// Return true if MI is likely to be usable as a memory operation by the |
1281 | /// implicit null check optimization. |
1282 | /// |
1283 | /// This is a "best effort" heuristic, and should not be relied upon for |
1284 | /// correctness. This returning true does not guarantee that the implicit null |
1285 | /// check optimization is legal over MI, and this returning false does not |
1286 | /// guarantee MI cannot possibly be used to do a null check. |
1287 | static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI, |
1288 | const TargetInstrInfo *TII, |
1289 | const TargetRegisterInfo *TRI) { |
1290 | using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate; |
1291 | |
1292 | auto *MBB = MI.getParent(); |
1293 | if (MBB->pred_size() != 1) |
1294 | return false; |
1295 | |
1296 | auto *PredMBB = *MBB->pred_begin(); |
1297 | auto *PredBB = PredMBB->getBasicBlock(); |
1298 | |
1299 | // Frontends that don't use implicit null checks have no reason to emit |
1300 | // branches with make.implicit metadata, and this function should always |
1301 | // return false for them. |
1302 | if (!PredBB || |
1303 | !PredBB->getTerminator()->getMetadata(KindID: LLVMContext::MD_make_implicit)) |
1304 | return false; |
1305 | |
1306 | const MachineOperand *BaseOp; |
1307 | int64_t Offset; |
1308 | bool OffsetIsScalable; |
1309 | if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI)) |
1310 | return false; |
1311 | |
1312 | if (!BaseOp->isReg()) |
1313 | return false; |
1314 | |
1315 | if (!(MI.mayLoad() && !MI.isPredicable())) |
1316 | return false; |
1317 | |
1318 | MachineBranchPredicate MBP; |
1319 | if (TII->analyzeBranchPredicate(MBB&: *PredMBB, MBP, AllowModify: false)) |
1320 | return false; |
1321 | |
1322 | return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && |
1323 | (MBP.Predicate == MachineBranchPredicate::PRED_NE || |
1324 | MBP.Predicate == MachineBranchPredicate::PRED_EQ) && |
1325 | MBP.LHS.getReg() == BaseOp->getReg(); |
1326 | } |
1327 | |
1328 | /// If the sunk instruction is a copy, try to forward the copy instead of |
1329 | /// leaving an 'undef' DBG_VALUE in the original location. Don't do this if |
1330 | /// there's any subregister weirdness involved. Returns true if copy |
1331 | /// propagation occurred. |
1332 | static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI, |
1333 | Register Reg) { |
1334 | const MachineRegisterInfo &MRI = SinkInst.getMF()->getRegInfo(); |
1335 | const TargetInstrInfo &TII = *SinkInst.getMF()->getSubtarget().getInstrInfo(); |
1336 | |
1337 | // Copy DBG_VALUE operand and set the original to undef. We then check to |
1338 | // see whether this is something that can be copy-forwarded. If it isn't, |
1339 | // continue around the loop. |
1340 | |
1341 | const MachineOperand *SrcMO = nullptr, *DstMO = nullptr; |
1342 | auto CopyOperands = TII.isCopyInstr(MI: SinkInst); |
1343 | if (!CopyOperands) |
1344 | return false; |
1345 | SrcMO = CopyOperands->Source; |
1346 | DstMO = CopyOperands->Destination; |
1347 | |
1348 | // Check validity of forwarding this copy. |
1349 | bool PostRA = MRI.getNumVirtRegs() == 0; |
1350 | |
1351 | // Trying to forward between physical and virtual registers is too hard. |
1352 | if (Reg.isVirtual() != SrcMO->getReg().isVirtual()) |
1353 | return false; |
1354 | |
1355 | // Only try virtual register copy-forwarding before regalloc, and physical |
1356 | // register copy-forwarding after regalloc. |
1357 | bool arePhysRegs = !Reg.isVirtual(); |
1358 | if (arePhysRegs != PostRA) |
1359 | return false; |
1360 | |
1361 | // Pre-regalloc, only forward if all subregisters agree (or there are no |
1362 | // subregs at all). More analysis might recover some forwardable copies. |
1363 | if (!PostRA) |
1364 | for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg)) |
1365 | if (DbgMO.getSubReg() != SrcMO->getSubReg() || |
1366 | DbgMO.getSubReg() != DstMO->getSubReg()) |
1367 | return false; |
1368 | |
1369 | // Post-regalloc, we may be sinking a DBG_VALUE of a sub or super-register |
1370 | // of this copy. Only forward the copy if the DBG_VALUE operand exactly |
1371 | // matches the copy destination. |
1372 | if (PostRA && Reg != DstMO->getReg()) |
1373 | return false; |
1374 | |
1375 | for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg)) { |
1376 | DbgMO.setReg(SrcMO->getReg()); |
1377 | DbgMO.setSubReg(SrcMO->getSubReg()); |
1378 | } |
1379 | return true; |
1380 | } |
1381 | |
1382 | using MIRegs = std::pair<MachineInstr *, SmallVector<unsigned, 2>>; |
1383 | /// Sink an instruction and its associated debug instructions. |
1384 | static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo, |
1385 | MachineBasicBlock::iterator InsertPos, |
1386 | ArrayRef<MIRegs> DbgValuesToSink) { |
1387 | // If we cannot find a location to use (merge with), then we erase the debug |
1388 | // location to prevent debug-info driven tools from potentially reporting |
1389 | // wrong location information. |
1390 | if (!SuccToSinkTo.empty() && InsertPos != SuccToSinkTo.end()) |
1391 | MI.setDebugLoc(DILocation::getMergedLocation(LocA: MI.getDebugLoc(), |
1392 | LocB: InsertPos->getDebugLoc())); |
1393 | else |
1394 | MI.setDebugLoc(DebugLoc()); |
1395 | |
1396 | // Move the instruction. |
1397 | MachineBasicBlock *ParentBlock = MI.getParent(); |
1398 | SuccToSinkTo.splice(Where: InsertPos, Other: ParentBlock, From: MI, |
1399 | To: ++MachineBasicBlock::iterator(MI)); |
1400 | |
1401 | // Sink a copy of debug users to the insert position. Mark the original |
1402 | // DBG_VALUE location as 'undef', indicating that any earlier variable |
1403 | // location should be terminated as we've optimised away the value at this |
1404 | // point. |
1405 | for (const auto &DbgValueToSink : DbgValuesToSink) { |
1406 | MachineInstr *DbgMI = DbgValueToSink.first; |
1407 | MachineInstr *NewDbgMI = DbgMI->getMF()->CloneMachineInstr(Orig: DbgMI); |
1408 | SuccToSinkTo.insert(I: InsertPos, MI: NewDbgMI); |
1409 | |
1410 | bool PropagatedAllSunkOps = true; |
1411 | for (unsigned Reg : DbgValueToSink.second) { |
1412 | if (DbgMI->hasDebugOperandForReg(Reg)) { |
1413 | if (!attemptDebugCopyProp(SinkInst&: MI, DbgMI&: *DbgMI, Reg)) { |
1414 | PropagatedAllSunkOps = false; |
1415 | break; |
1416 | } |
1417 | } |
1418 | } |
1419 | if (!PropagatedAllSunkOps) |
1420 | DbgMI->setDebugValueUndef(); |
1421 | } |
1422 | } |
1423 | |
1424 | /// hasStoreBetween - check if there is store betweeen straight line blocks From |
1425 | /// and To. |
1426 | bool MachineSinking::hasStoreBetween(MachineBasicBlock *From, |
1427 | MachineBasicBlock *To, MachineInstr &MI) { |
1428 | // Make sure From and To are in straight line which means From dominates To |
1429 | // and To post dominates From. |
1430 | if (!DT->dominates(A: From, B: To) || !PDT->dominates(A: To, B: From)) |
1431 | return true; |
1432 | |
1433 | auto BlockPair = std::make_pair(x&: From, y&: To); |
1434 | |
1435 | // Does these two blocks pair be queried before and have a definite cached |
1436 | // result? |
1437 | if (auto It = HasStoreCache.find(Val: BlockPair); It != HasStoreCache.end()) |
1438 | return It->second; |
1439 | |
1440 | if (auto It = StoreInstrCache.find(Val: BlockPair); It != StoreInstrCache.end()) |
1441 | return llvm::any_of(Range&: It->second, P: [&](MachineInstr *I) { |
1442 | return I->mayAlias(AA, Other: MI, UseTBAA: false); |
1443 | }); |
1444 | |
1445 | bool SawStore = false; |
1446 | bool HasAliasedStore = false; |
1447 | DenseSet<MachineBasicBlock *> HandledBlocks; |
1448 | DenseSet<MachineBasicBlock *> HandledDomBlocks; |
1449 | // Go through all reachable blocks from From. |
1450 | for (MachineBasicBlock *BB : depth_first(G: From)) { |
1451 | // We insert the instruction at the start of block To, so no need to worry |
1452 | // about stores inside To. |
1453 | // Store in block From should be already considered when just enter function |
1454 | // SinkInstruction. |
1455 | if (BB == To || BB == From) |
1456 | continue; |
1457 | |
1458 | // We already handle this BB in previous iteration. |
1459 | if (HandledBlocks.count(V: BB)) |
1460 | continue; |
1461 | |
1462 | HandledBlocks.insert(V: BB); |
1463 | // To post dominates BB, it must be a path from block From. |
1464 | if (PDT->dominates(A: To, B: BB)) { |
1465 | if (!HandledDomBlocks.count(V: BB)) |
1466 | HandledDomBlocks.insert(V: BB); |
1467 | |
1468 | // If this BB is too big or the block number in straight line between From |
1469 | // and To is too big, stop searching to save compiling time. |
1470 | if (BB->sizeWithoutDebugLargerThan(Limit: SinkLoadInstsPerBlockThreshold) || |
1471 | HandledDomBlocks.size() > SinkLoadBlocksThreshold) { |
1472 | for (auto *DomBB : HandledDomBlocks) { |
1473 | if (DomBB != BB && DT->dominates(A: DomBB, B: BB)) |
1474 | HasStoreCache[std::make_pair(x&: DomBB, y&: To)] = true; |
1475 | else if(DomBB != BB && DT->dominates(A: BB, B: DomBB)) |
1476 | HasStoreCache[std::make_pair(x&: From, y&: DomBB)] = true; |
1477 | } |
1478 | HasStoreCache[BlockPair] = true; |
1479 | return true; |
1480 | } |
1481 | |
1482 | for (MachineInstr &I : *BB) { |
1483 | // Treat as alias conservatively for a call or an ordered memory |
1484 | // operation. |
1485 | if (I.isCall() || I.hasOrderedMemoryRef()) { |
1486 | for (auto *DomBB : HandledDomBlocks) { |
1487 | if (DomBB != BB && DT->dominates(A: DomBB, B: BB)) |
1488 | HasStoreCache[std::make_pair(x&: DomBB, y&: To)] = true; |
1489 | else if(DomBB != BB && DT->dominates(A: BB, B: DomBB)) |
1490 | HasStoreCache[std::make_pair(x&: From, y&: DomBB)] = true; |
1491 | } |
1492 | HasStoreCache[BlockPair] = true; |
1493 | return true; |
1494 | } |
1495 | |
1496 | if (I.mayStore()) { |
1497 | SawStore = true; |
1498 | // We still have chance to sink MI if all stores between are not |
1499 | // aliased to MI. |
1500 | // Cache all store instructions, so that we don't need to go through |
1501 | // all From reachable blocks for next load instruction. |
1502 | if (I.mayAlias(AA, Other: MI, UseTBAA: false)) |
1503 | HasAliasedStore = true; |
1504 | StoreInstrCache[BlockPair].push_back(Elt: &I); |
1505 | } |
1506 | } |
1507 | } |
1508 | } |
1509 | // If there is no store at all, cache the result. |
1510 | if (!SawStore) |
1511 | HasStoreCache[BlockPair] = false; |
1512 | return HasAliasedStore; |
1513 | } |
1514 | |
1515 | /// Sink instructions into cycles if profitable. This especially tries to |
1516 | /// prevent register spills caused by register pressure if there is little to no |
1517 | /// overhead moving instructions into cycles. |
1518 | bool MachineSinking::SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I) { |
1519 | LLVM_DEBUG(dbgs() << "CycleSink: Finding sink block for: " << I); |
1520 | MachineBasicBlock * = Cycle->getCyclePreheader(); |
1521 | assert(Preheader && "Cycle sink needs a preheader block" ); |
1522 | MachineBasicBlock *SinkBlock = nullptr; |
1523 | bool CanSink = true; |
1524 | const MachineOperand &MO = I.getOperand(i: 0); |
1525 | |
1526 | for (MachineInstr &MI : MRI->use_instructions(Reg: MO.getReg())) { |
1527 | LLVM_DEBUG(dbgs() << "CycleSink: Analysing use: " << MI); |
1528 | if (!Cycle->contains(Block: MI.getParent())) { |
1529 | LLVM_DEBUG(dbgs() << "CycleSink: Use not in cycle, can't sink.\n" ); |
1530 | CanSink = false; |
1531 | break; |
1532 | } |
1533 | |
1534 | // FIXME: Come up with a proper cost model that estimates whether sinking |
1535 | // the instruction (and thus possibly executing it on every cycle |
1536 | // iteration) is more expensive than a register. |
1537 | // For now assumes that copies are cheap and thus almost always worth it. |
1538 | if (!MI.isCopy()) { |
1539 | LLVM_DEBUG(dbgs() << "CycleSink: Use is not a copy\n" ); |
1540 | CanSink = false; |
1541 | break; |
1542 | } |
1543 | if (!SinkBlock) { |
1544 | SinkBlock = MI.getParent(); |
1545 | LLVM_DEBUG(dbgs() << "CycleSink: Setting sink block to: " |
1546 | << printMBBReference(*SinkBlock) << "\n" ); |
1547 | continue; |
1548 | } |
1549 | SinkBlock = DT->findNearestCommonDominator(A: SinkBlock, B: MI.getParent()); |
1550 | if (!SinkBlock) { |
1551 | LLVM_DEBUG(dbgs() << "CycleSink: Can't find nearest dominator\n" ); |
1552 | CanSink = false; |
1553 | break; |
1554 | } |
1555 | LLVM_DEBUG(dbgs() << "CycleSink: Setting nearest common dom block: " << |
1556 | printMBBReference(*SinkBlock) << "\n" ); |
1557 | } |
1558 | |
1559 | if (!CanSink) { |
1560 | LLVM_DEBUG(dbgs() << "CycleSink: Can't sink instruction.\n" ); |
1561 | return false; |
1562 | } |
1563 | if (!SinkBlock) { |
1564 | LLVM_DEBUG(dbgs() << "CycleSink: Not sinking, can't find sink block.\n" ); |
1565 | return false; |
1566 | } |
1567 | if (SinkBlock == Preheader) { |
1568 | LLVM_DEBUG( |
1569 | dbgs() << "CycleSink: Not sinking, sink block is the preheader\n" ); |
1570 | return false; |
1571 | } |
1572 | if (SinkBlock->sizeWithoutDebugLargerThan(Limit: SinkLoadInstsPerBlockThreshold)) { |
1573 | LLVM_DEBUG( |
1574 | dbgs() << "CycleSink: Not Sinking, block too large to analyse.\n" ); |
1575 | return false; |
1576 | } |
1577 | |
1578 | LLVM_DEBUG(dbgs() << "CycleSink: Sinking instruction!\n" ); |
1579 | SinkBlock->splice(Where: SinkBlock->SkipPHIsAndLabels(I: SinkBlock->begin()), Other: Preheader, |
1580 | From: I); |
1581 | |
1582 | // Conservatively clear any kill flags on uses of sunk instruction |
1583 | for (MachineOperand &MO : I.operands()) { |
1584 | if (MO.isReg() && MO.readsReg()) |
1585 | RegsToClearKillFlags.insert(V: MO.getReg()); |
1586 | } |
1587 | |
1588 | // The instruction is moved from its basic block, so do not retain the |
1589 | // debug information. |
1590 | assert(!I.isDebugInstr() && "Should not sink debug inst" ); |
1591 | I.setDebugLoc(DebugLoc()); |
1592 | return true; |
1593 | } |
1594 | |
1595 | /// SinkInstruction - Determine whether it is safe to sink the specified machine |
1596 | /// instruction out of its current block into a successor. |
1597 | bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore, |
1598 | AllSuccsCache &AllSuccessors) { |
1599 | // Don't sink instructions that the target prefers not to sink. |
1600 | if (!TII->shouldSink(MI)) |
1601 | return false; |
1602 | |
1603 | // Check if it's safe to move the instruction. |
1604 | if (!MI.isSafeToMove(AA, SawStore)) |
1605 | return false; |
1606 | |
1607 | // Convergent operations may not be made control-dependent on additional |
1608 | // values. |
1609 | if (MI.isConvergent()) |
1610 | return false; |
1611 | |
1612 | // Don't break implicit null checks. This is a performance heuristic, and not |
1613 | // required for correctness. |
1614 | if (SinkingPreventsImplicitNullCheck(MI, TII, TRI)) |
1615 | return false; |
1616 | |
1617 | // FIXME: This should include support for sinking instructions within the |
1618 | // block they are currently in to shorten the live ranges. We often get |
1619 | // instructions sunk into the top of a large block, but it would be better to |
1620 | // also sink them down before their first use in the block. This xform has to |
1621 | // be careful not to *increase* register pressure though, e.g. sinking |
1622 | // "x = y + z" down if it kills y and z would increase the live ranges of y |
1623 | // and z and only shrink the live range of x. |
1624 | |
1625 | bool BreakPHIEdge = false; |
1626 | MachineBasicBlock *ParentBlock = MI.getParent(); |
1627 | MachineBasicBlock *SuccToSinkTo = |
1628 | FindSuccToSinkTo(MI, MBB: ParentBlock, BreakPHIEdge, AllSuccessors); |
1629 | |
1630 | // If there are no outputs, it must have side-effects. |
1631 | if (!SuccToSinkTo) |
1632 | return false; |
1633 | |
1634 | // If the instruction to move defines a dead physical register which is live |
1635 | // when leaving the basic block, don't move it because it could turn into a |
1636 | // "zombie" define of that preg. E.g., EFLAGS. |
1637 | for (const MachineOperand &MO : MI.all_defs()) { |
1638 | Register Reg = MO.getReg(); |
1639 | if (Reg == 0 || !Reg.isPhysical()) |
1640 | continue; |
1641 | if (SuccToSinkTo->isLiveIn(Reg)) |
1642 | return false; |
1643 | } |
1644 | |
1645 | LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo); |
1646 | |
1647 | // If the block has multiple predecessors, this is a critical edge. |
1648 | // Decide if we can sink along it or need to break the edge. |
1649 | if (SuccToSinkTo->pred_size() > 1) { |
1650 | // We cannot sink a load across a critical edge - there may be stores in |
1651 | // other code paths. |
1652 | bool TryBreak = false; |
1653 | bool Store = |
1654 | MI.mayLoad() ? hasStoreBetween(From: ParentBlock, To: SuccToSinkTo, MI) : true; |
1655 | if (!MI.isSafeToMove(AA, SawStore&: Store)) { |
1656 | LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n" ); |
1657 | TryBreak = true; |
1658 | } |
1659 | |
1660 | // We don't want to sink across a critical edge if we don't dominate the |
1661 | // successor. We could be introducing calculations to new code paths. |
1662 | if (!TryBreak && !DT->dominates(A: ParentBlock, B: SuccToSinkTo)) { |
1663 | LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n" ); |
1664 | TryBreak = true; |
1665 | } |
1666 | |
1667 | // Don't sink instructions into a cycle. |
1668 | if (!TryBreak && CI->getCycle(Block: SuccToSinkTo) && |
1669 | (!CI->getCycle(Block: SuccToSinkTo)->isReducible() || |
1670 | CI->getCycle(Block: SuccToSinkTo)->getHeader() == SuccToSinkTo)) { |
1671 | LLVM_DEBUG(dbgs() << " *** NOTE: cycle header found\n" ); |
1672 | TryBreak = true; |
1673 | } |
1674 | |
1675 | // Otherwise we are OK with sinking along a critical edge. |
1676 | if (!TryBreak) |
1677 | LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n" ); |
1678 | else { |
1679 | // Mark this edge as to be split. |
1680 | // If the edge can actually be split, the next iteration of the main loop |
1681 | // will sink MI in the newly created block. |
1682 | bool Status = |
1683 | PostponeSplitCriticalEdge(MI, FromBB: ParentBlock, ToBB: SuccToSinkTo, BreakPHIEdge); |
1684 | if (!Status) |
1685 | LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to " |
1686 | "break critical edge\n" ); |
1687 | // The instruction will not be sunk this time. |
1688 | return false; |
1689 | } |
1690 | } |
1691 | |
1692 | if (BreakPHIEdge) { |
1693 | // BreakPHIEdge is true if all the uses are in the successor MBB being |
1694 | // sunken into and they are all PHI nodes. In this case, machine-sink must |
1695 | // break the critical edge first. |
1696 | bool Status = PostponeSplitCriticalEdge(MI, FromBB: ParentBlock, |
1697 | ToBB: SuccToSinkTo, BreakPHIEdge); |
1698 | if (!Status) |
1699 | LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to " |
1700 | "break critical edge\n" ); |
1701 | // The instruction will not be sunk this time. |
1702 | return false; |
1703 | } |
1704 | |
1705 | // Determine where to insert into. Skip phi nodes. |
1706 | MachineBasicBlock::iterator InsertPos = |
1707 | SuccToSinkTo->SkipPHIsAndLabels(I: SuccToSinkTo->begin()); |
1708 | if (blockPrologueInterferes(BB: SuccToSinkTo, End: InsertPos, MI, TRI, TII, MRI)) { |
1709 | LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n" ); |
1710 | return false; |
1711 | } |
1712 | |
1713 | // Collect debug users of any vreg that this inst defines. |
1714 | SmallVector<MIRegs, 4> DbgUsersToSink; |
1715 | for (auto &MO : MI.all_defs()) { |
1716 | if (!MO.getReg().isVirtual()) |
1717 | continue; |
1718 | if (!SeenDbgUsers.count(Val: MO.getReg())) |
1719 | continue; |
1720 | |
1721 | // Sink any users that don't pass any other DBG_VALUEs for this variable. |
1722 | auto &Users = SeenDbgUsers[MO.getReg()]; |
1723 | for (auto &User : Users) { |
1724 | MachineInstr *DbgMI = User.getPointer(); |
1725 | if (User.getInt()) { |
1726 | // This DBG_VALUE would re-order assignments. If we can't copy-propagate |
1727 | // it, it can't be recovered. Set it undef. |
1728 | if (!attemptDebugCopyProp(SinkInst&: MI, DbgMI&: *DbgMI, Reg: MO.getReg())) |
1729 | DbgMI->setDebugValueUndef(); |
1730 | } else { |
1731 | DbgUsersToSink.push_back( |
1732 | Elt: {DbgMI, SmallVector<unsigned, 2>(1, MO.getReg())}); |
1733 | } |
1734 | } |
1735 | } |
1736 | |
1737 | // After sinking, some debug users may not be dominated any more. If possible, |
1738 | // copy-propagate their operands. As it's expensive, don't do this if there's |
1739 | // no debuginfo in the program. |
1740 | if (MI.getMF()->getFunction().getSubprogram() && MI.isCopy()) |
1741 | SalvageUnsunkDebugUsersOfCopy(MI, TargetBlock: SuccToSinkTo); |
1742 | |
1743 | performSink(MI, SuccToSinkTo&: *SuccToSinkTo, InsertPos, DbgValuesToSink: DbgUsersToSink); |
1744 | |
1745 | // Conservatively, clear any kill flags, since it's possible that they are no |
1746 | // longer correct. |
1747 | // Note that we have to clear the kill flags for any register this instruction |
1748 | // uses as we may sink over another instruction which currently kills the |
1749 | // used registers. |
1750 | for (MachineOperand &MO : MI.all_uses()) |
1751 | RegsToClearKillFlags.insert(V: MO.getReg()); // Remember to clear kill flags. |
1752 | |
1753 | return true; |
1754 | } |
1755 | |
1756 | void MachineSinking::SalvageUnsunkDebugUsersOfCopy( |
1757 | MachineInstr &MI, MachineBasicBlock *TargetBlock) { |
1758 | assert(MI.isCopy()); |
1759 | assert(MI.getOperand(1).isReg()); |
1760 | |
1761 | // Enumerate all users of vreg operands that are def'd. Skip those that will |
1762 | // be sunk. For the rest, if they are not dominated by the block we will sink |
1763 | // MI into, propagate the copy source to them. |
1764 | SmallVector<MachineInstr *, 4> DbgDefUsers; |
1765 | SmallVector<Register, 4> DbgUseRegs; |
1766 | const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); |
1767 | for (auto &MO : MI.all_defs()) { |
1768 | if (!MO.getReg().isVirtual()) |
1769 | continue; |
1770 | DbgUseRegs.push_back(Elt: MO.getReg()); |
1771 | for (auto &User : MRI.use_instructions(Reg: MO.getReg())) { |
1772 | if (!User.isDebugValue() || DT->dominates(A: TargetBlock, B: User.getParent())) |
1773 | continue; |
1774 | |
1775 | // If is in same block, will either sink or be use-before-def. |
1776 | if (User.getParent() == MI.getParent()) |
1777 | continue; |
1778 | |
1779 | assert(User.hasDebugOperandForReg(MO.getReg()) && |
1780 | "DBG_VALUE user of vreg, but has no operand for it?" ); |
1781 | DbgDefUsers.push_back(Elt: &User); |
1782 | } |
1783 | } |
1784 | |
1785 | // Point the users of this copy that are no longer dominated, at the source |
1786 | // of the copy. |
1787 | for (auto *User : DbgDefUsers) { |
1788 | for (auto &Reg : DbgUseRegs) { |
1789 | for (auto &DbgOp : User->getDebugOperandsForReg(Reg)) { |
1790 | DbgOp.setReg(MI.getOperand(i: 1).getReg()); |
1791 | DbgOp.setSubReg(MI.getOperand(i: 1).getSubReg()); |
1792 | } |
1793 | } |
1794 | } |
1795 | } |
1796 | |
1797 | //===----------------------------------------------------------------------===// |
1798 | // This pass is not intended to be a replacement or a complete alternative |
1799 | // for the pre-ra machine sink pass. It is only designed to sink COPY |
1800 | // instructions which should be handled after RA. |
1801 | // |
1802 | // This pass sinks COPY instructions into a successor block, if the COPY is not |
1803 | // used in the current block and the COPY is live-in to a single successor |
1804 | // (i.e., doesn't require the COPY to be duplicated). This avoids executing the |
1805 | // copy on paths where their results aren't needed. This also exposes |
1806 | // additional opportunites for dead copy elimination and shrink wrapping. |
1807 | // |
1808 | // These copies were either not handled by or are inserted after the MachineSink |
1809 | // pass. As an example of the former case, the MachineSink pass cannot sink |
1810 | // COPY instructions with allocatable source registers; for AArch64 these type |
1811 | // of copy instructions are frequently used to move function parameters (PhyReg) |
1812 | // into virtual registers in the entry block. |
1813 | // |
1814 | // For the machine IR below, this pass will sink %w19 in the entry into its |
1815 | // successor (%bb.1) because %w19 is only live-in in %bb.1. |
1816 | // %bb.0: |
1817 | // %wzr = SUBSWri %w1, 1 |
1818 | // %w19 = COPY %w0 |
1819 | // Bcc 11, %bb.2 |
1820 | // %bb.1: |
1821 | // Live Ins: %w19 |
1822 | // BL @fun |
1823 | // %w0 = ADDWrr %w0, %w19 |
1824 | // RET %w0 |
1825 | // %bb.2: |
1826 | // %w0 = COPY %wzr |
1827 | // RET %w0 |
1828 | // As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be |
1829 | // able to see %bb.0 as a candidate. |
1830 | //===----------------------------------------------------------------------===// |
1831 | namespace { |
1832 | |
1833 | class PostRAMachineSinking : public MachineFunctionPass { |
1834 | public: |
1835 | bool runOnMachineFunction(MachineFunction &MF) override; |
1836 | |
1837 | static char ID; |
1838 | PostRAMachineSinking() : MachineFunctionPass(ID) {} |
1839 | StringRef getPassName() const override { return "PostRA Machine Sink" ; } |
1840 | |
1841 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
1842 | AU.setPreservesCFG(); |
1843 | MachineFunctionPass::getAnalysisUsage(AU); |
1844 | } |
1845 | |
1846 | MachineFunctionProperties getRequiredProperties() const override { |
1847 | return MachineFunctionProperties().set( |
1848 | MachineFunctionProperties::Property::NoVRegs); |
1849 | } |
1850 | |
1851 | private: |
1852 | /// Track which register units have been modified and used. |
1853 | LiveRegUnits ModifiedRegUnits, UsedRegUnits; |
1854 | |
1855 | /// Track DBG_VALUEs of (unmodified) register units. Each DBG_VALUE has an |
1856 | /// entry in this map for each unit it touches. The DBG_VALUE's entry |
1857 | /// consists of a pointer to the instruction itself, and a vector of registers |
1858 | /// referred to by the instruction that overlap the key register unit. |
1859 | DenseMap<unsigned, SmallVector<MIRegs, 2>> SeenDbgInstrs; |
1860 | |
1861 | /// Sink Copy instructions unused in the same block close to their uses in |
1862 | /// successors. |
1863 | bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF, |
1864 | const TargetRegisterInfo *TRI, const TargetInstrInfo *TII); |
1865 | }; |
1866 | } // namespace |
1867 | |
1868 | char PostRAMachineSinking::ID = 0; |
1869 | char &llvm::PostRAMachineSinkingID = PostRAMachineSinking::ID; |
1870 | |
1871 | INITIALIZE_PASS(PostRAMachineSinking, "postra-machine-sink" , |
1872 | "PostRA Machine Sink" , false, false) |
1873 | |
1874 | static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg, |
1875 | const TargetRegisterInfo *TRI) { |
1876 | LiveRegUnits LiveInRegUnits(*TRI); |
1877 | LiveInRegUnits.addLiveIns(MBB); |
1878 | return !LiveInRegUnits.available(Reg); |
1879 | } |
1880 | |
1881 | static MachineBasicBlock * |
1882 | getSingleLiveInSuccBB(MachineBasicBlock &CurBB, |
1883 | const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs, |
1884 | unsigned Reg, const TargetRegisterInfo *TRI) { |
1885 | // Try to find a single sinkable successor in which Reg is live-in. |
1886 | MachineBasicBlock *BB = nullptr; |
1887 | for (auto *SI : SinkableBBs) { |
1888 | if (aliasWithRegsInLiveIn(MBB&: *SI, Reg, TRI)) { |
1889 | // If BB is set here, Reg is live-in to at least two sinkable successors, |
1890 | // so quit. |
1891 | if (BB) |
1892 | return nullptr; |
1893 | BB = SI; |
1894 | } |
1895 | } |
1896 | // Reg is not live-in to any sinkable successors. |
1897 | if (!BB) |
1898 | return nullptr; |
1899 | |
1900 | // Check if any register aliased with Reg is live-in in other successors. |
1901 | for (auto *SI : CurBB.successors()) { |
1902 | if (!SinkableBBs.count(Ptr: SI) && aliasWithRegsInLiveIn(MBB&: *SI, Reg, TRI)) |
1903 | return nullptr; |
1904 | } |
1905 | return BB; |
1906 | } |
1907 | |
1908 | static MachineBasicBlock * |
1909 | getSingleLiveInSuccBB(MachineBasicBlock &CurBB, |
1910 | const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs, |
1911 | ArrayRef<unsigned> DefedRegsInCopy, |
1912 | const TargetRegisterInfo *TRI) { |
1913 | MachineBasicBlock *SingleBB = nullptr; |
1914 | for (auto DefReg : DefedRegsInCopy) { |
1915 | MachineBasicBlock *BB = |
1916 | getSingleLiveInSuccBB(CurBB, SinkableBBs, Reg: DefReg, TRI); |
1917 | if (!BB || (SingleBB && SingleBB != BB)) |
1918 | return nullptr; |
1919 | SingleBB = BB; |
1920 | } |
1921 | return SingleBB; |
1922 | } |
1923 | |
1924 | static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, |
1925 | SmallVectorImpl<unsigned> &UsedOpsInCopy, |
1926 | LiveRegUnits &UsedRegUnits, |
1927 | const TargetRegisterInfo *TRI) { |
1928 | for (auto U : UsedOpsInCopy) { |
1929 | MachineOperand &MO = MI->getOperand(i: U); |
1930 | Register SrcReg = MO.getReg(); |
1931 | if (!UsedRegUnits.available(Reg: SrcReg)) { |
1932 | MachineBasicBlock::iterator NI = std::next(x: MI->getIterator()); |
1933 | for (MachineInstr &UI : make_range(x: NI, y: CurBB.end())) { |
1934 | if (UI.killsRegister(Reg: SrcReg, TRI)) { |
1935 | UI.clearRegisterKills(Reg: SrcReg, RegInfo: TRI); |
1936 | MO.setIsKill(true); |
1937 | break; |
1938 | } |
1939 | } |
1940 | } |
1941 | } |
1942 | } |
1943 | |
1944 | static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB, |
1945 | SmallVectorImpl<unsigned> &UsedOpsInCopy, |
1946 | SmallVectorImpl<unsigned> &DefedRegsInCopy) { |
1947 | MachineFunction &MF = *SuccBB->getParent(); |
1948 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
1949 | for (unsigned DefReg : DefedRegsInCopy) |
1950 | for (MCPhysReg S : TRI->subregs_inclusive(Reg: DefReg)) |
1951 | SuccBB->removeLiveIn(Reg: S); |
1952 | for (auto U : UsedOpsInCopy) |
1953 | SuccBB->addLiveIn(PhysReg: MI->getOperand(i: U).getReg()); |
1954 | SuccBB->sortUniqueLiveIns(); |
1955 | } |
1956 | |
1957 | static bool hasRegisterDependency(MachineInstr *MI, |
1958 | SmallVectorImpl<unsigned> &UsedOpsInCopy, |
1959 | SmallVectorImpl<unsigned> &DefedRegsInCopy, |
1960 | LiveRegUnits &ModifiedRegUnits, |
1961 | LiveRegUnits &UsedRegUnits) { |
1962 | bool HasRegDependency = false; |
1963 | for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { |
1964 | MachineOperand &MO = MI->getOperand(i); |
1965 | if (!MO.isReg()) |
1966 | continue; |
1967 | Register Reg = MO.getReg(); |
1968 | if (!Reg) |
1969 | continue; |
1970 | if (MO.isDef()) { |
1971 | if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) { |
1972 | HasRegDependency = true; |
1973 | break; |
1974 | } |
1975 | DefedRegsInCopy.push_back(Elt: Reg); |
1976 | |
1977 | // FIXME: instead of isUse(), readsReg() would be a better fix here, |
1978 | // For example, we can ignore modifications in reg with undef. However, |
1979 | // it's not perfectly clear if skipping the internal read is safe in all |
1980 | // other targets. |
1981 | } else if (MO.isUse()) { |
1982 | if (!ModifiedRegUnits.available(Reg)) { |
1983 | HasRegDependency = true; |
1984 | break; |
1985 | } |
1986 | UsedOpsInCopy.push_back(Elt: i); |
1987 | } |
1988 | } |
1989 | return HasRegDependency; |
1990 | } |
1991 | |
1992 | bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB, |
1993 | MachineFunction &MF, |
1994 | const TargetRegisterInfo *TRI, |
1995 | const TargetInstrInfo *TII) { |
1996 | SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs; |
1997 | // FIXME: For now, we sink only to a successor which has a single predecessor |
1998 | // so that we can directly sink COPY instructions to the successor without |
1999 | // adding any new block or branch instruction. |
2000 | for (MachineBasicBlock *SI : CurBB.successors()) |
2001 | if (!SI->livein_empty() && SI->pred_size() == 1) |
2002 | SinkableBBs.insert(Ptr: SI); |
2003 | |
2004 | if (SinkableBBs.empty()) |
2005 | return false; |
2006 | |
2007 | bool Changed = false; |
2008 | |
2009 | // Track which registers have been modified and used between the end of the |
2010 | // block and the current instruction. |
2011 | ModifiedRegUnits.clear(); |
2012 | UsedRegUnits.clear(); |
2013 | SeenDbgInstrs.clear(); |
2014 | |
2015 | for (MachineInstr &MI : llvm::make_early_inc_range(Range: llvm::reverse(C&: CurBB))) { |
2016 | // Track the operand index for use in Copy. |
2017 | SmallVector<unsigned, 2> UsedOpsInCopy; |
2018 | // Track the register number defed in Copy. |
2019 | SmallVector<unsigned, 2> DefedRegsInCopy; |
2020 | |
2021 | // We must sink this DBG_VALUE if its operand is sunk. To avoid searching |
2022 | // for DBG_VALUEs later, record them when they're encountered. |
2023 | if (MI.isDebugValue() && !MI.isDebugRef()) { |
2024 | SmallDenseMap<MCRegister, SmallVector<unsigned, 2>, 4> MIUnits; |
2025 | bool IsValid = true; |
2026 | for (MachineOperand &MO : MI.debug_operands()) { |
2027 | if (MO.isReg() && MO.getReg().isPhysical()) { |
2028 | // Bail if we can already tell the sink would be rejected, rather |
2029 | // than needlessly accumulating lots of DBG_VALUEs. |
2030 | if (hasRegisterDependency(MI: &MI, UsedOpsInCopy, DefedRegsInCopy, |
2031 | ModifiedRegUnits, UsedRegUnits)) { |
2032 | IsValid = false; |
2033 | break; |
2034 | } |
2035 | |
2036 | // Record debug use of each reg unit. |
2037 | for (MCRegUnit Unit : TRI->regunits(Reg: MO.getReg())) |
2038 | MIUnits[Unit].push_back(Elt: MO.getReg()); |
2039 | } |
2040 | } |
2041 | if (IsValid) { |
2042 | for (auto &RegOps : MIUnits) |
2043 | SeenDbgInstrs[RegOps.first].emplace_back(Args: &MI, |
2044 | Args: std::move(RegOps.second)); |
2045 | } |
2046 | continue; |
2047 | } |
2048 | |
2049 | if (MI.isDebugOrPseudoInstr()) |
2050 | continue; |
2051 | |
2052 | // Do not move any instruction across function call. |
2053 | if (MI.isCall()) |
2054 | return false; |
2055 | |
2056 | if (!MI.isCopy() || !MI.getOperand(i: 0).isRenamable()) { |
2057 | LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, |
2058 | TRI); |
2059 | continue; |
2060 | } |
2061 | |
2062 | // Don't sink the COPY if it would violate a register dependency. |
2063 | if (hasRegisterDependency(MI: &MI, UsedOpsInCopy, DefedRegsInCopy, |
2064 | ModifiedRegUnits, UsedRegUnits)) { |
2065 | LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, |
2066 | TRI); |
2067 | continue; |
2068 | } |
2069 | assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) && |
2070 | "Unexpect SrcReg or DefReg" ); |
2071 | MachineBasicBlock *SuccBB = |
2072 | getSingleLiveInSuccBB(CurBB, SinkableBBs, DefedRegsInCopy, TRI); |
2073 | // Don't sink if we cannot find a single sinkable successor in which Reg |
2074 | // is live-in. |
2075 | if (!SuccBB) { |
2076 | LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, |
2077 | TRI); |
2078 | continue; |
2079 | } |
2080 | assert((SuccBB->pred_size() == 1 && *SuccBB->pred_begin() == &CurBB) && |
2081 | "Unexpected predecessor" ); |
2082 | |
2083 | // Collect DBG_VALUEs that must sink with this copy. We've previously |
2084 | // recorded which reg units that DBG_VALUEs read, if this instruction |
2085 | // writes any of those units then the corresponding DBG_VALUEs must sink. |
2086 | MapVector<MachineInstr *, MIRegs::second_type> DbgValsToSinkMap; |
2087 | for (auto &MO : MI.all_defs()) { |
2088 | for (MCRegUnit Unit : TRI->regunits(Reg: MO.getReg())) { |
2089 | for (const auto &MIRegs : SeenDbgInstrs.lookup(Val: Unit)) { |
2090 | auto &Regs = DbgValsToSinkMap[MIRegs.first]; |
2091 | for (unsigned Reg : MIRegs.second) |
2092 | Regs.push_back(Elt: Reg); |
2093 | } |
2094 | } |
2095 | } |
2096 | auto DbgValsToSink = DbgValsToSinkMap.takeVector(); |
2097 | |
2098 | LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccBB); |
2099 | |
2100 | MachineBasicBlock::iterator InsertPos = |
2101 | SuccBB->SkipPHIsAndLabels(I: SuccBB->begin()); |
2102 | if (blockPrologueInterferes(BB: SuccBB, End: InsertPos, MI, TRI, TII, MRI: nullptr)) { |
2103 | LLVM_DEBUG( |
2104 | dbgs() << " *** Not sinking: prologue interference\n" ); |
2105 | continue; |
2106 | } |
2107 | |
2108 | // Clear the kill flag if SrcReg is killed between MI and the end of the |
2109 | // block. |
2110 | clearKillFlags(MI: &MI, CurBB, UsedOpsInCopy, UsedRegUnits, TRI); |
2111 | performSink(MI, SuccToSinkTo&: *SuccBB, InsertPos, DbgValuesToSink: DbgValsToSink); |
2112 | updateLiveIn(MI: &MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy); |
2113 | |
2114 | Changed = true; |
2115 | ++NumPostRACopySink; |
2116 | } |
2117 | return Changed; |
2118 | } |
2119 | |
2120 | bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) { |
2121 | if (skipFunction(F: MF.getFunction())) |
2122 | return false; |
2123 | |
2124 | bool Changed = false; |
2125 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
2126 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); |
2127 | |
2128 | ModifiedRegUnits.init(TRI: *TRI); |
2129 | UsedRegUnits.init(TRI: *TRI); |
2130 | for (auto &BB : MF) |
2131 | Changed |= tryToSinkCopy(CurBB&: BB, MF, TRI, TII); |
2132 | |
2133 | return Changed; |
2134 | } |
2135 | |