1 | //====- X86FlagsCopyLowering.cpp - Lowers COPY nodes of EFLAGS ------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file |
9 | /// |
10 | /// Lowers COPY nodes of EFLAGS by directly extracting and preserving individual |
11 | /// flag bits. |
12 | /// |
13 | /// We have to do this by carefully analyzing and rewriting the usage of the |
14 | /// copied EFLAGS register because there is no general way to rematerialize the |
15 | /// entire EFLAGS register safely and efficiently. Using `popf` both forces |
16 | /// dynamic stack adjustment and can create correctness issues due to IF, TF, |
17 | /// and other non-status flags being overwritten. Using sequences involving |
18 | /// SAHF don't work on all x86 processors and are often quite slow compared to |
19 | /// directly testing a single status preserved in its own GPR. |
20 | /// |
21 | //===----------------------------------------------------------------------===// |
22 | |
23 | #include "X86.h" |
24 | #include "X86InstrBuilder.h" |
25 | #include "X86InstrInfo.h" |
26 | #include "X86Subtarget.h" |
27 | #include "llvm/ADT/PostOrderIterator.h" |
28 | #include "llvm/ADT/STLExtras.h" |
29 | #include "llvm/ADT/ScopeExit.h" |
30 | #include "llvm/ADT/SmallPtrSet.h" |
31 | #include "llvm/ADT/SmallVector.h" |
32 | #include "llvm/ADT/Statistic.h" |
33 | #include "llvm/CodeGen/MachineBasicBlock.h" |
34 | #include "llvm/CodeGen/MachineConstantPool.h" |
35 | #include "llvm/CodeGen/MachineDominators.h" |
36 | #include "llvm/CodeGen/MachineFunction.h" |
37 | #include "llvm/CodeGen/MachineFunctionPass.h" |
38 | #include "llvm/CodeGen/MachineInstr.h" |
39 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
40 | #include "llvm/CodeGen/MachineModuleInfo.h" |
41 | #include "llvm/CodeGen/MachineOperand.h" |
42 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
43 | #include "llvm/CodeGen/MachineSSAUpdater.h" |
44 | #include "llvm/CodeGen/TargetInstrInfo.h" |
45 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
46 | #include "llvm/CodeGen/TargetSchedule.h" |
47 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
48 | #include "llvm/IR/DebugLoc.h" |
49 | #include "llvm/MC/MCSchedule.h" |
50 | #include "llvm/Pass.h" |
51 | #include "llvm/Support/CommandLine.h" |
52 | #include "llvm/Support/Debug.h" |
53 | #include "llvm/Support/raw_ostream.h" |
54 | #include <algorithm> |
55 | #include <cassert> |
56 | #include <iterator> |
57 | #include <utility> |
58 | |
59 | using namespace llvm; |
60 | |
61 | #define PASS_KEY "x86-flags-copy-lowering" |
62 | #define DEBUG_TYPE PASS_KEY |
63 | |
64 | STATISTIC(NumCopiesEliminated, "Number of copies of EFLAGS eliminated" ); |
65 | STATISTIC(NumSetCCsInserted, "Number of setCC instructions inserted" ); |
66 | STATISTIC(NumTestsInserted, "Number of test instructions inserted" ); |
67 | STATISTIC(NumAddsInserted, "Number of adds instructions inserted" ); |
68 | |
69 | namespace { |
70 | |
71 | // Convenient array type for storing registers associated with each condition. |
72 | using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>; |
73 | |
74 | class X86FlagsCopyLoweringPass : public MachineFunctionPass { |
75 | public: |
76 | X86FlagsCopyLoweringPass() : MachineFunctionPass(ID) { } |
77 | |
78 | StringRef getPassName() const override { return "X86 EFLAGS copy lowering" ; } |
79 | bool runOnMachineFunction(MachineFunction &MF) override; |
80 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
81 | |
82 | /// Pass identification, replacement for typeid. |
83 | static char ID; |
84 | |
85 | private: |
86 | MachineRegisterInfo *MRI = nullptr; |
87 | const X86Subtarget *Subtarget = nullptr; |
88 | const X86InstrInfo *TII = nullptr; |
89 | const TargetRegisterInfo *TRI = nullptr; |
90 | const TargetRegisterClass *PromoteRC = nullptr; |
91 | MachineDominatorTree *MDT = nullptr; |
92 | |
93 | CondRegArray collectCondsInRegs(MachineBasicBlock &MBB, |
94 | MachineBasicBlock::iterator CopyDefI); |
95 | |
96 | Register promoteCondToReg(MachineBasicBlock &MBB, |
97 | MachineBasicBlock::iterator TestPos, |
98 | const DebugLoc &TestLoc, X86::CondCode Cond); |
99 | std::pair<unsigned, bool> getCondOrInverseInReg( |
100 | MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos, |
101 | const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs); |
102 | void insertTest(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, |
103 | const DebugLoc &Loc, unsigned Reg); |
104 | |
105 | void rewriteArithmetic(MachineBasicBlock &TestMBB, |
106 | MachineBasicBlock::iterator TestPos, |
107 | const DebugLoc &TestLoc, MachineInstr &MI, |
108 | MachineOperand &FlagUse, CondRegArray &CondRegs); |
109 | void rewriteCMov(MachineBasicBlock &TestMBB, |
110 | MachineBasicBlock::iterator TestPos, const DebugLoc &TestLoc, |
111 | MachineInstr &CMovI, MachineOperand &FlagUse, |
112 | CondRegArray &CondRegs); |
113 | void rewriteFCMov(MachineBasicBlock &TestMBB, |
114 | MachineBasicBlock::iterator TestPos, |
115 | const DebugLoc &TestLoc, MachineInstr &CMovI, |
116 | MachineOperand &FlagUse, CondRegArray &CondRegs); |
117 | void rewriteCondJmp(MachineBasicBlock &TestMBB, |
118 | MachineBasicBlock::iterator TestPos, |
119 | const DebugLoc &TestLoc, MachineInstr &JmpI, |
120 | CondRegArray &CondRegs); |
121 | void rewriteCopy(MachineInstr &MI, MachineOperand &FlagUse, |
122 | MachineInstr &CopyDefI); |
123 | void rewriteSetCC(MachineBasicBlock &TestMBB, |
124 | MachineBasicBlock::iterator TestPos, |
125 | const DebugLoc &TestLoc, MachineInstr &SetCCI, |
126 | MachineOperand &FlagUse, CondRegArray &CondRegs); |
127 | }; |
128 | |
129 | } // end anonymous namespace |
130 | |
131 | INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringPass, DEBUG_TYPE, |
132 | "X86 EFLAGS copy lowering" , false, false) |
133 | INITIALIZE_PASS_END(X86FlagsCopyLoweringPass, DEBUG_TYPE, |
134 | "X86 EFLAGS copy lowering" , false, false) |
135 | |
136 | FunctionPass *llvm::createX86FlagsCopyLoweringPass() { |
137 | return new X86FlagsCopyLoweringPass(); |
138 | } |
139 | |
140 | char X86FlagsCopyLoweringPass::ID = 0; |
141 | |
142 | void X86FlagsCopyLoweringPass::getAnalysisUsage(AnalysisUsage &AU) const { |
143 | AU.addRequired<MachineDominatorTree>(); |
144 | MachineFunctionPass::getAnalysisUsage(AU); |
145 | } |
146 | |
147 | namespace { |
148 | /// An enumeration of the arithmetic instruction mnemonics which have |
149 | /// interesting flag semantics. |
150 | /// |
151 | /// We can map instruction opcodes into these mnemonics to make it easy to |
152 | /// dispatch with specific functionality. |
153 | enum class FlagArithMnemonic { |
154 | ADC, |
155 | RCL, |
156 | RCR, |
157 | SBB, |
158 | SETB, |
159 | }; |
160 | } // namespace |
161 | |
162 | static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode) { |
163 | switch (Opcode) { |
164 | default: |
165 | report_fatal_error(reason: "No support for lowering a copy into EFLAGS when used " |
166 | "by this instruction!" ); |
167 | |
168 | #define CASE_ND(OP) \ |
169 | case X86::OP: \ |
170 | case X86::OP##_ND: |
171 | |
172 | #define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX) \ |
173 | CASE_ND(MNEMONIC##8##SUFFIX) \ |
174 | CASE_ND(MNEMONIC##16##SUFFIX) \ |
175 | CASE_ND(MNEMONIC##32##SUFFIX) \ |
176 | CASE_ND(MNEMONIC##64##SUFFIX) |
177 | |
178 | #define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC) \ |
179 | LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr) \ |
180 | LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rm) \ |
181 | LLVM_EXPAND_INSTR_SIZES(MNEMONIC, mr) \ |
182 | CASE_ND(MNEMONIC##8ri) \ |
183 | CASE_ND(MNEMONIC##16ri8) \ |
184 | CASE_ND(MNEMONIC##32ri8) \ |
185 | CASE_ND(MNEMONIC##64ri8) \ |
186 | CASE_ND(MNEMONIC##16ri) \ |
187 | CASE_ND(MNEMONIC##32ri) \ |
188 | CASE_ND(MNEMONIC##64ri32) \ |
189 | CASE_ND(MNEMONIC##8mi) \ |
190 | CASE_ND(MNEMONIC##16mi8) \ |
191 | CASE_ND(MNEMONIC##32mi8) \ |
192 | CASE_ND(MNEMONIC##64mi8) \ |
193 | CASE_ND(MNEMONIC##16mi) \ |
194 | CASE_ND(MNEMONIC##32mi) \ |
195 | CASE_ND(MNEMONIC##64mi32) \ |
196 | case X86::MNEMONIC##8i8: \ |
197 | case X86::MNEMONIC##16i16: \ |
198 | case X86::MNEMONIC##32i32: \ |
199 | case X86::MNEMONIC##64i32: |
200 | |
201 | LLVM_EXPAND_ADC_SBB_INSTR(ADC) |
202 | return FlagArithMnemonic::ADC; |
203 | |
204 | LLVM_EXPAND_ADC_SBB_INSTR(SBB) |
205 | return FlagArithMnemonic::SBB; |
206 | |
207 | #undef LLVM_EXPAND_ADC_SBB_INSTR |
208 | |
209 | LLVM_EXPAND_INSTR_SIZES(RCL, rCL) |
210 | LLVM_EXPAND_INSTR_SIZES(RCL, r1) |
211 | LLVM_EXPAND_INSTR_SIZES(RCL, ri) |
212 | return FlagArithMnemonic::RCL; |
213 | |
214 | LLVM_EXPAND_INSTR_SIZES(RCR, rCL) |
215 | LLVM_EXPAND_INSTR_SIZES(RCR, r1) |
216 | LLVM_EXPAND_INSTR_SIZES(RCR, ri) |
217 | return FlagArithMnemonic::RCR; |
218 | |
219 | #undef LLVM_EXPAND_INSTR_SIZES |
220 | #undef CASE_ND |
221 | |
222 | case X86::SETB_C32r: |
223 | case X86::SETB_C64r: |
224 | return FlagArithMnemonic::SETB; |
225 | } |
226 | } |
227 | |
228 | static MachineBasicBlock &splitBlock(MachineBasicBlock &MBB, |
229 | MachineInstr &SplitI, |
230 | const X86InstrInfo &TII) { |
231 | MachineFunction &MF = *MBB.getParent(); |
232 | |
233 | assert(SplitI.getParent() == &MBB && |
234 | "Split instruction must be in the split block!" ); |
235 | assert(SplitI.isBranch() && |
236 | "Only designed to split a tail of branch instructions!" ); |
237 | assert(X86::getCondFromBranch(SplitI) != X86::COND_INVALID && |
238 | "Must split on an actual jCC instruction!" ); |
239 | |
240 | // Dig out the previous instruction to the split point. |
241 | MachineInstr &PrevI = *std::prev(x: SplitI.getIterator()); |
242 | assert(PrevI.isBranch() && "Must split after a branch!" ); |
243 | assert(X86::getCondFromBranch(PrevI) != X86::COND_INVALID && |
244 | "Must split after an actual jCC instruction!" ); |
245 | assert(!std::prev(PrevI.getIterator())->isTerminator() && |
246 | "Must only have this one terminator prior to the split!" ); |
247 | |
248 | // Grab the one successor edge that will stay in `MBB`. |
249 | MachineBasicBlock &UnsplitSucc = *PrevI.getOperand(i: 0).getMBB(); |
250 | |
251 | // Analyze the original block to see if we are actually splitting an edge |
252 | // into two edges. This can happen when we have multiple conditional jumps to |
253 | // the same successor. |
254 | bool IsEdgeSplit = |
255 | std::any_of(first: SplitI.getIterator(), last: MBB.instr_end(), |
256 | pred: [&](MachineInstr &MI) { |
257 | assert(MI.isTerminator() && |
258 | "Should only have spliced terminators!" ); |
259 | return llvm::any_of( |
260 | Range: MI.operands(), P: [&](MachineOperand &MOp) { |
261 | return MOp.isMBB() && MOp.getMBB() == &UnsplitSucc; |
262 | }); |
263 | }) || |
264 | MBB.getFallThrough() == &UnsplitSucc; |
265 | |
266 | MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock(); |
267 | |
268 | // Insert the new block immediately after the current one. Any existing |
269 | // fallthrough will be sunk into this new block anyways. |
270 | MF.insert(MBBI: std::next(x: MachineFunction::iterator(&MBB)), MBB: &NewMBB); |
271 | |
272 | // Splice the tail of instructions into the new block. |
273 | NewMBB.splice(Where: NewMBB.end(), Other: &MBB, From: SplitI.getIterator(), To: MBB.end()); |
274 | |
275 | // Copy the necessary succesors (and their probability info) into the new |
276 | // block. |
277 | for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI) |
278 | if (IsEdgeSplit || *SI != &UnsplitSucc) |
279 | NewMBB.copySuccessor(Orig: &MBB, I: SI); |
280 | // Normalize the probabilities if we didn't end up splitting the edge. |
281 | if (!IsEdgeSplit) |
282 | NewMBB.normalizeSuccProbs(); |
283 | |
284 | // Now replace all of the moved successors in the original block with the new |
285 | // block. This will merge their probabilities. |
286 | for (MachineBasicBlock *Succ : NewMBB.successors()) |
287 | if (Succ != &UnsplitSucc) |
288 | MBB.replaceSuccessor(Old: Succ, New: &NewMBB); |
289 | |
290 | // We should always end up replacing at least one successor. |
291 | assert(MBB.isSuccessor(&NewMBB) && |
292 | "Failed to make the new block a successor!" ); |
293 | |
294 | // Now update all the PHIs. |
295 | for (MachineBasicBlock *Succ : NewMBB.successors()) { |
296 | for (MachineInstr &MI : *Succ) { |
297 | if (!MI.isPHI()) |
298 | break; |
299 | |
300 | for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps; |
301 | OpIdx += 2) { |
302 | MachineOperand &OpV = MI.getOperand(i: OpIdx); |
303 | MachineOperand &OpMBB = MI.getOperand(i: OpIdx + 1); |
304 | assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!" ); |
305 | if (OpMBB.getMBB() != &MBB) |
306 | continue; |
307 | |
308 | // Replace the operand for unsplit successors |
309 | if (!IsEdgeSplit || Succ != &UnsplitSucc) { |
310 | OpMBB.setMBB(&NewMBB); |
311 | |
312 | // We have to continue scanning as there may be multiple entries in |
313 | // the PHI. |
314 | continue; |
315 | } |
316 | |
317 | // When we have split the edge append a new successor. |
318 | MI.addOperand(MF, Op: OpV); |
319 | MI.addOperand(MF, Op: MachineOperand::CreateMBB(MBB: &NewMBB)); |
320 | break; |
321 | } |
322 | } |
323 | } |
324 | |
325 | return NewMBB; |
326 | } |
327 | |
328 | static X86::CondCode getCondFromFCMOV(unsigned Opcode) { |
329 | switch (Opcode) { |
330 | default: return X86::COND_INVALID; |
331 | case X86::CMOVBE_Fp32: case X86::CMOVBE_Fp64: case X86::CMOVBE_Fp80: |
332 | return X86::COND_BE; |
333 | case X86::CMOVB_Fp32: case X86::CMOVB_Fp64: case X86::CMOVB_Fp80: |
334 | return X86::COND_B; |
335 | case X86::CMOVE_Fp32: case X86::CMOVE_Fp64: case X86::CMOVE_Fp80: |
336 | return X86::COND_E; |
337 | case X86::CMOVNBE_Fp32: case X86::CMOVNBE_Fp64: case X86::CMOVNBE_Fp80: |
338 | return X86::COND_A; |
339 | case X86::CMOVNB_Fp32: case X86::CMOVNB_Fp64: case X86::CMOVNB_Fp80: |
340 | return X86::COND_AE; |
341 | case X86::CMOVNE_Fp32: case X86::CMOVNE_Fp64: case X86::CMOVNE_Fp80: |
342 | return X86::COND_NE; |
343 | case X86::CMOVNP_Fp32: case X86::CMOVNP_Fp64: case X86::CMOVNP_Fp80: |
344 | return X86::COND_NP; |
345 | case X86::CMOVP_Fp32: case X86::CMOVP_Fp64: case X86::CMOVP_Fp80: |
346 | return X86::COND_P; |
347 | } |
348 | } |
349 | |
350 | bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) { |
351 | LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName() |
352 | << " **********\n" ); |
353 | |
354 | Subtarget = &MF.getSubtarget<X86Subtarget>(); |
355 | MRI = &MF.getRegInfo(); |
356 | TII = Subtarget->getInstrInfo(); |
357 | TRI = Subtarget->getRegisterInfo(); |
358 | MDT = &getAnalysis<MachineDominatorTree>(); |
359 | PromoteRC = &X86::GR8RegClass; |
360 | |
361 | if (MF.begin() == MF.end()) |
362 | // Nothing to do for a degenerate empty function... |
363 | return false; |
364 | |
365 | // Collect the copies in RPO so that when there are chains where a copy is in |
366 | // turn copied again we visit the first one first. This ensures we can find |
367 | // viable locations for testing the original EFLAGS that dominate all the |
368 | // uses across complex CFGs. |
369 | SmallVector<MachineInstr *, 4> Copies; |
370 | ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); |
371 | for (MachineBasicBlock *MBB : RPOT) |
372 | for (MachineInstr &MI : *MBB) |
373 | if (MI.getOpcode() == TargetOpcode::COPY && |
374 | MI.getOperand(0).getReg() == X86::EFLAGS) |
375 | Copies.push_back(Elt: &MI); |
376 | |
377 | for (MachineInstr *CopyI : Copies) { |
378 | MachineBasicBlock &MBB = *CopyI->getParent(); |
379 | |
380 | MachineOperand &VOp = CopyI->getOperand(i: 1); |
381 | assert(VOp.isReg() && |
382 | "The input to the copy for EFLAGS should always be a register!" ); |
383 | MachineInstr &CopyDefI = *MRI->getVRegDef(Reg: VOp.getReg()); |
384 | if (CopyDefI.getOpcode() != TargetOpcode::COPY) { |
385 | // FIXME: The big likely candidate here are PHI nodes. We could in theory |
386 | // handle PHI nodes, but it gets really, really hard. Insanely hard. Hard |
387 | // enough that it is probably better to change every other part of LLVM |
388 | // to avoid creating them. The issue is that once we have PHIs we won't |
389 | // know which original EFLAGS value we need to capture with our setCCs |
390 | // below. The end result will be computing a complete set of setCCs that |
391 | // we *might* want, computing them in every place where we copy *out* of |
392 | // EFLAGS and then doing SSA formation on all of them to insert necessary |
393 | // PHI nodes and consume those here. Then hoping that somehow we DCE the |
394 | // unnecessary ones. This DCE seems very unlikely to be successful and so |
395 | // we will almost certainly end up with a glut of dead setCC |
396 | // instructions. Until we have a motivating test case and fail to avoid |
397 | // it by changing other parts of LLVM's lowering, we refuse to handle |
398 | // this complex case here. |
399 | LLVM_DEBUG( |
400 | dbgs() << "ERROR: Encountered unexpected def of an eflags copy: " ; |
401 | CopyDefI.dump()); |
402 | report_fatal_error( |
403 | reason: "Cannot lower EFLAGS copy unless it is defined in turn by a copy!" ); |
404 | } |
405 | |
406 | auto Cleanup = make_scope_exit(F: [&] { |
407 | // All uses of the EFLAGS copy are now rewritten, kill the copy into |
408 | // eflags and if dead the copy from. |
409 | CopyI->eraseFromParent(); |
410 | if (MRI->use_empty(RegNo: CopyDefI.getOperand(i: 0).getReg())) |
411 | CopyDefI.eraseFromParent(); |
412 | ++NumCopiesEliminated; |
413 | }); |
414 | |
415 | MachineOperand &DOp = CopyI->getOperand(i: 0); |
416 | assert(DOp.isDef() && "Expected register def!" ); |
417 | assert(DOp.getReg() == X86::EFLAGS && "Unexpected copy def register!" ); |
418 | if (DOp.isDead()) |
419 | continue; |
420 | |
421 | MachineBasicBlock *TestMBB = CopyDefI.getParent(); |
422 | auto TestPos = CopyDefI.getIterator(); |
423 | DebugLoc TestLoc = CopyDefI.getDebugLoc(); |
424 | |
425 | LLVM_DEBUG(dbgs() << "Rewriting copy: " ; CopyI->dump()); |
426 | |
427 | // Walk up across live-in EFLAGS to find where they were actually def'ed. |
428 | // |
429 | // This copy's def may just be part of a region of blocks covered by |
430 | // a single def of EFLAGS and we want to find the top of that region where |
431 | // possible. |
432 | // |
433 | // This is essentially a search for a *candidate* reaching definition |
434 | // location. We don't need to ever find the actual reaching definition here, |
435 | // but we want to walk up the dominator tree to find the highest point which |
436 | // would be viable for such a definition. |
437 | auto HasEFLAGSClobber = [&](MachineBasicBlock::iterator Begin, |
438 | MachineBasicBlock::iterator End) { |
439 | // Scan backwards as we expect these to be relatively short and often find |
440 | // a clobber near the end. |
441 | return llvm::any_of( |
442 | Range: llvm::reverse(C: llvm::make_range(x: Begin, y: End)), P: [&](MachineInstr &MI) { |
443 | // Flag any instruction (other than the copy we are |
444 | // currently rewriting) that defs EFLAGS. |
445 | return &MI != CopyI && |
446 | MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr); |
447 | }); |
448 | }; |
449 | auto HasEFLAGSClobberPath = [&](MachineBasicBlock *BeginMBB, |
450 | MachineBasicBlock *EndMBB) { |
451 | assert(MDT->dominates(BeginMBB, EndMBB) && |
452 | "Only support paths down the dominator tree!" ); |
453 | SmallPtrSet<MachineBasicBlock *, 4> Visited; |
454 | SmallVector<MachineBasicBlock *, 4> Worklist; |
455 | // We terminate at the beginning. No need to scan it. |
456 | Visited.insert(Ptr: BeginMBB); |
457 | Worklist.push_back(Elt: EndMBB); |
458 | do { |
459 | auto *MBB = Worklist.pop_back_val(); |
460 | for (auto *PredMBB : MBB->predecessors()) { |
461 | if (!Visited.insert(Ptr: PredMBB).second) |
462 | continue; |
463 | if (HasEFLAGSClobber(PredMBB->begin(), PredMBB->end())) |
464 | return true; |
465 | // Enqueue this block to walk its predecessors. |
466 | Worklist.push_back(Elt: PredMBB); |
467 | } |
468 | } while (!Worklist.empty()); |
469 | // No clobber found along a path from the begin to end. |
470 | return false; |
471 | }; |
472 | while (TestMBB->isLiveIn(X86::EFLAGS) && !TestMBB->pred_empty() && |
473 | !HasEFLAGSClobber(TestMBB->begin(), TestPos)) { |
474 | // Find the nearest common dominator of the predecessors, as |
475 | // that will be the best candidate to hoist into. |
476 | MachineBasicBlock *HoistMBB = |
477 | std::accumulate(first: std::next(x: TestMBB->pred_begin()), last: TestMBB->pred_end(), |
478 | init: *TestMBB->pred_begin(), |
479 | binary_op: [&](MachineBasicBlock *LHS, MachineBasicBlock *RHS) { |
480 | return MDT->findNearestCommonDominator(A: LHS, B: RHS); |
481 | }); |
482 | |
483 | // Now we need to scan all predecessors that may be reached along paths to |
484 | // the hoist block. A clobber anywhere in any of these blocks the hoist. |
485 | // Note that this even handles loops because we require *no* clobbers. |
486 | if (HasEFLAGSClobberPath(HoistMBB, TestMBB)) |
487 | break; |
488 | |
489 | // We also need the terminators to not sneakily clobber flags. |
490 | if (HasEFLAGSClobber(HoistMBB->getFirstTerminator()->getIterator(), |
491 | HoistMBB->instr_end())) |
492 | break; |
493 | |
494 | // We found a viable location, hoist our test position to it. |
495 | TestMBB = HoistMBB; |
496 | TestPos = TestMBB->getFirstTerminator()->getIterator(); |
497 | // Clear the debug location as it would just be confusing after hoisting. |
498 | TestLoc = DebugLoc(); |
499 | } |
500 | LLVM_DEBUG({ |
501 | auto DefIt = llvm::find_if( |
502 | llvm::reverse(llvm::make_range(TestMBB->instr_begin(), TestPos)), |
503 | [&](MachineInstr &MI) { |
504 | return MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr); |
505 | }); |
506 | if (DefIt.base() != TestMBB->instr_begin()) { |
507 | dbgs() << " Using EFLAGS defined by: " ; |
508 | DefIt->dump(); |
509 | } else { |
510 | dbgs() << " Using live-in flags for BB:\n" ; |
511 | TestMBB->dump(); |
512 | } |
513 | }); |
514 | |
515 | // While rewriting uses, we buffer jumps and rewrite them in a second pass |
516 | // because doing so will perturb the CFG that we are walking to find the |
517 | // uses in the first place. |
518 | SmallVector<MachineInstr *, 4> JmpIs; |
519 | |
520 | // Gather the condition flags that have already been preserved in |
521 | // registers. We do this from scratch each time as we expect there to be |
522 | // very few of them and we expect to not revisit the same copy definition |
523 | // many times. If either of those change sufficiently we could build a map |
524 | // of these up front instead. |
525 | CondRegArray CondRegs = collectCondsInRegs(MBB&: *TestMBB, CopyDefI: TestPos); |
526 | |
527 | // Collect the basic blocks we need to scan. Typically this will just be |
528 | // a single basic block but we may have to scan multiple blocks if the |
529 | // EFLAGS copy lives into successors. |
530 | SmallVector<MachineBasicBlock *, 2> Blocks; |
531 | SmallPtrSet<MachineBasicBlock *, 2> VisitedBlocks; |
532 | Blocks.push_back(Elt: &MBB); |
533 | |
534 | do { |
535 | MachineBasicBlock &UseMBB = *Blocks.pop_back_val(); |
536 | |
537 | // Track when if/when we find a kill of the flags in this block. |
538 | bool FlagsKilled = false; |
539 | |
540 | // In most cases, we walk from the beginning to the end of the block. But |
541 | // when the block is the same block as the copy is from, we will visit it |
542 | // twice. The first time we start from the copy and go to the end. The |
543 | // second time we start from the beginning and go to the copy. This lets |
544 | // us handle copies inside of cycles. |
545 | // FIXME: This loop is *super* confusing. This is at least in part |
546 | // a symptom of all of this routine needing to be refactored into |
547 | // documentable components. Once done, there may be a better way to write |
548 | // this loop. |
549 | for (auto MII = (&UseMBB == &MBB && !VisitedBlocks.count(Ptr: &UseMBB)) |
550 | ? std::next(x: CopyI->getIterator()) |
551 | : UseMBB.instr_begin(), |
552 | MIE = UseMBB.instr_end(); |
553 | MII != MIE;) { |
554 | MachineInstr &MI = *MII++; |
555 | // If we are in the original copy block and encounter either the copy |
556 | // def or the copy itself, break so that we don't re-process any part of |
557 | // the block or process the instructions in the range that was copied |
558 | // over. |
559 | if (&MI == CopyI || &MI == &CopyDefI) { |
560 | assert(&UseMBB == &MBB && VisitedBlocks.count(&MBB) && |
561 | "Should only encounter these on the second pass over the " |
562 | "original block." ); |
563 | break; |
564 | } |
565 | |
566 | MachineOperand *FlagUse = |
567 | MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr); |
568 | if (!FlagUse) { |
569 | if (MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) { |
570 | // If EFLAGS are defined, it's as-if they were killed. We can stop |
571 | // scanning here. |
572 | // |
573 | // NB!!! Many instructions only modify some flags. LLVM currently |
574 | // models this as clobbering all flags, but if that ever changes |
575 | // this will need to be carefully updated to handle that more |
576 | // complex logic. |
577 | FlagsKilled = true; |
578 | break; |
579 | } |
580 | continue; |
581 | } |
582 | |
583 | LLVM_DEBUG(dbgs() << " Rewriting use: " ; MI.dump()); |
584 | |
585 | // Check the kill flag before we rewrite as that may change it. |
586 | if (FlagUse->isKill()) |
587 | FlagsKilled = true; |
588 | |
589 | // Once we encounter a branch, the rest of the instructions must also be |
590 | // branches. We can't rewrite in place here, so we handle them below. |
591 | // |
592 | // Note that we don't have to handle tail calls here, even conditional |
593 | // tail calls, as those are not introduced into the X86 MI until post-RA |
594 | // branch folding or black placement. As a consequence, we get to deal |
595 | // with the simpler formulation of conditional branches followed by tail |
596 | // calls. |
597 | if (X86::getCondFromBranch(MI) != X86::COND_INVALID) { |
598 | auto JmpIt = MI.getIterator(); |
599 | do { |
600 | JmpIs.push_back(Elt: &*JmpIt); |
601 | ++JmpIt; |
602 | } while (JmpIt != UseMBB.instr_end() && |
603 | X86::getCondFromBranch(MI: *JmpIt) != |
604 | X86::COND_INVALID); |
605 | break; |
606 | } |
607 | |
608 | // Otherwise we can just rewrite in-place. |
609 | if (X86::getCondFromCMov(MI) != X86::COND_INVALID || |
610 | X86::getCondFromCFCMov(MI) != X86::COND_INVALID) { |
611 | rewriteCMov(TestMBB&: *TestMBB, TestPos, TestLoc, CMovI&: MI, FlagUse&: *FlagUse, CondRegs); |
612 | } else if (getCondFromFCMOV(Opcode: MI.getOpcode()) != X86::COND_INVALID) { |
613 | rewriteFCMov(TestMBB&: *TestMBB, TestPos, TestLoc, CMovI&: MI, FlagUse&: *FlagUse, CondRegs); |
614 | } else if (X86::getCondFromSETCC(MI) != X86::COND_INVALID) { |
615 | rewriteSetCC(TestMBB&: *TestMBB, TestPos, TestLoc, SetCCI&: MI, FlagUse&: *FlagUse, CondRegs); |
616 | } else if (MI.getOpcode() == TargetOpcode::COPY) { |
617 | rewriteCopy(MI, FlagUse&: *FlagUse, CopyDefI); |
618 | } else { |
619 | // We assume all other instructions that use flags also def them. |
620 | assert(MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr) && |
621 | "Expected a def of EFLAGS for this instruction!" ); |
622 | |
623 | // NB!!! Several arithmetic instructions only *partially* update |
624 | // flags. Theoretically, we could generate MI code sequences that |
625 | // would rely on this fact and observe different flags independently. |
626 | // But currently LLVM models all of these instructions as clobbering |
627 | // all the flags in an undef way. We rely on that to simplify the |
628 | // logic. |
629 | FlagsKilled = true; |
630 | |
631 | // Generically handle remaining uses as arithmetic instructions. |
632 | rewriteArithmetic(TestMBB&: *TestMBB, TestPos, TestLoc, MI, FlagUse&: *FlagUse, |
633 | CondRegs); |
634 | } |
635 | |
636 | // If this was the last use of the flags, we're done. |
637 | if (FlagsKilled) |
638 | break; |
639 | } |
640 | |
641 | // If the flags were killed, we're done with this block. |
642 | if (FlagsKilled) |
643 | continue; |
644 | |
645 | // Otherwise we need to scan successors for ones where the flags live-in |
646 | // and queue those up for processing. |
647 | for (MachineBasicBlock *SuccMBB : UseMBB.successors()) |
648 | if (SuccMBB->isLiveIn(X86::EFLAGS) && |
649 | VisitedBlocks.insert(SuccMBB).second) { |
650 | // We currently don't do any PHI insertion and so we require that the |
651 | // test basic block dominates all of the use basic blocks. Further, we |
652 | // can't have a cycle from the test block back to itself as that would |
653 | // create a cycle requiring a PHI to break it. |
654 | // |
655 | // We could in theory do PHI insertion here if it becomes useful by |
656 | // just taking undef values in along every edge that we don't trace |
657 | // this EFLAGS copy along. This isn't as bad as fully general PHI |
658 | // insertion, but still seems like a great deal of complexity. |
659 | // |
660 | // Because it is theoretically possible that some earlier MI pass or |
661 | // other lowering transformation could induce this to happen, we do |
662 | // a hard check even in non-debug builds here. |
663 | if (SuccMBB == TestMBB || !MDT->dominates(A: TestMBB, B: SuccMBB)) { |
664 | LLVM_DEBUG({ |
665 | dbgs() |
666 | << "ERROR: Encountered use that is not dominated by our test " |
667 | "basic block! Rewriting this would require inserting PHI " |
668 | "nodes to track the flag state across the CFG.\n\nTest " |
669 | "block:\n" ; |
670 | TestMBB->dump(); |
671 | dbgs() << "Use block:\n" ; |
672 | SuccMBB->dump(); |
673 | }); |
674 | report_fatal_error( |
675 | reason: "Cannot lower EFLAGS copy when original copy def " |
676 | "does not dominate all uses." ); |
677 | } |
678 | |
679 | Blocks.push_back(Elt: SuccMBB); |
680 | |
681 | // After this, EFLAGS will be recreated before each use. |
682 | SuccMBB->removeLiveIn(X86::EFLAGS); |
683 | } |
684 | } while (!Blocks.empty()); |
685 | |
686 | // Now rewrite the jumps that use the flags. These we handle specially |
687 | // because if there are multiple jumps in a single basic block we'll have |
688 | // to do surgery on the CFG. |
689 | MachineBasicBlock *LastJmpMBB = nullptr; |
690 | for (MachineInstr *JmpI : JmpIs) { |
691 | // Past the first jump within a basic block we need to split the blocks |
692 | // apart. |
693 | if (JmpI->getParent() == LastJmpMBB) |
694 | splitBlock(MBB&: *JmpI->getParent(), SplitI&: *JmpI, TII: *TII); |
695 | else |
696 | LastJmpMBB = JmpI->getParent(); |
697 | |
698 | rewriteCondJmp(TestMBB&: *TestMBB, TestPos, TestLoc, JmpI&: *JmpI, CondRegs); |
699 | } |
700 | |
701 | // FIXME: Mark the last use of EFLAGS before the copy's def as a kill if |
702 | // the copy's def operand is itself a kill. |
703 | } |
704 | |
705 | #ifndef NDEBUG |
706 | for (MachineBasicBlock &MBB : MF) |
707 | for (MachineInstr &MI : MBB) |
708 | if (MI.getOpcode() == TargetOpcode::COPY && |
709 | (MI.getOperand(0).getReg() == X86::EFLAGS || |
710 | MI.getOperand(1).getReg() == X86::EFLAGS)) { |
711 | LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: " ; |
712 | MI.dump()); |
713 | llvm_unreachable("Unlowered EFLAGS copy!" ); |
714 | } |
715 | #endif |
716 | |
717 | return true; |
718 | } |
719 | |
720 | /// Collect any conditions that have already been set in registers so that we |
721 | /// can re-use them rather than adding duplicates. |
722 | CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs( |
723 | MachineBasicBlock &MBB, MachineBasicBlock::iterator TestPos) { |
724 | CondRegArray CondRegs = {}; |
725 | |
726 | // Scan backwards across the range of instructions with live EFLAGS. |
727 | for (MachineInstr &MI : |
728 | llvm::reverse(C: llvm::make_range(x: MBB.begin(), y: TestPos))) { |
729 | X86::CondCode Cond = X86::getCondFromSETCC(MI); |
730 | if (Cond != X86::COND_INVALID && !MI.mayStore() && |
731 | MI.getOperand(i: 0).isReg() && MI.getOperand(i: 0).getReg().isVirtual()) { |
732 | assert(MI.getOperand(0).isDef() && |
733 | "A non-storing SETcc should always define a register!" ); |
734 | CondRegs[Cond] = MI.getOperand(i: 0).getReg(); |
735 | } |
736 | |
737 | // Stop scanning when we see the first definition of the EFLAGS as prior to |
738 | // this we would potentially capture the wrong flag state. |
739 | if (MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) |
740 | break; |
741 | } |
742 | return CondRegs; |
743 | } |
744 | |
745 | Register X86FlagsCopyLoweringPass::promoteCondToReg( |
746 | MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos, |
747 | const DebugLoc &TestLoc, X86::CondCode Cond) { |
748 | Register Reg = MRI->createVirtualRegister(RegClass: PromoteRC); |
749 | auto SetI = BuildMI(TestMBB, TestPos, TestLoc, |
750 | TII->get(X86::SETCCr), Reg).addImm(Cond); |
751 | (void)SetI; |
752 | LLVM_DEBUG(dbgs() << " save cond: " ; SetI->dump()); |
753 | ++NumSetCCsInserted; |
754 | return Reg; |
755 | } |
756 | |
757 | std::pair<unsigned, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg( |
758 | MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos, |
759 | const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) { |
760 | unsigned &CondReg = CondRegs[Cond]; |
761 | unsigned &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(CC: Cond)]; |
762 | if (!CondReg && !InvCondReg) |
763 | CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond); |
764 | |
765 | if (CondReg) |
766 | return {CondReg, false}; |
767 | else |
768 | return {InvCondReg, true}; |
769 | } |
770 | |
771 | void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB, |
772 | MachineBasicBlock::iterator Pos, |
773 | const DebugLoc &Loc, unsigned Reg) { |
774 | auto TestI = |
775 | BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg); |
776 | (void)TestI; |
777 | LLVM_DEBUG(dbgs() << " test cond: " ; TestI->dump()); |
778 | ++NumTestsInserted; |
779 | } |
780 | |
781 | void X86FlagsCopyLoweringPass::rewriteArithmetic( |
782 | MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos, |
783 | const DebugLoc &TestLoc, MachineInstr &MI, MachineOperand &FlagUse, |
784 | CondRegArray &CondRegs) { |
785 | // Arithmetic is either reading CF or OF. Figure out which condition we need |
786 | // to preserve in a register. |
787 | X86::CondCode Cond = X86::COND_INVALID; |
788 | |
789 | // The addend to use to reset CF or OF when added to the flag value. |
790 | int Addend = 0; |
791 | |
792 | switch (getMnemonicFromOpcode(Opcode: MI.getOpcode())) { |
793 | case FlagArithMnemonic::ADC: |
794 | case FlagArithMnemonic::RCL: |
795 | case FlagArithMnemonic::RCR: |
796 | case FlagArithMnemonic::SBB: |
797 | case FlagArithMnemonic::SETB: |
798 | Cond = X86::COND_B; // CF == 1 |
799 | // Set up an addend that when one is added will need a carry due to not |
800 | // having a higher bit available. |
801 | Addend = 255; |
802 | break; |
803 | } |
804 | |
805 | // Now get a register that contains the value of the flag input to the |
806 | // arithmetic. We require exactly this flag to simplify the arithmetic |
807 | // required to materialize it back into the flag. |
808 | unsigned &CondReg = CondRegs[Cond]; |
809 | if (!CondReg) |
810 | CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond); |
811 | |
812 | MachineBasicBlock &MBB = *MI.getParent(); |
813 | |
814 | // Insert an instruction that will set the flag back to the desired value. |
815 | Register TmpReg = MRI->createVirtualRegister(RegClass: PromoteRC); |
816 | auto AddI = |
817 | BuildMI(MBB, MI.getIterator(), MI.getDebugLoc(), |
818 | TII->get(Subtarget->hasNDD() ? X86::ADD8ri_ND : X86::ADD8ri)) |
819 | .addDef(TmpReg, RegState::Dead) |
820 | .addReg(CondReg) |
821 | .addImm(Addend); |
822 | (void)AddI; |
823 | LLVM_DEBUG(dbgs() << " add cond: " ; AddI->dump()); |
824 | ++NumAddsInserted; |
825 | FlagUse.setIsKill(true); |
826 | } |
827 | |
828 | void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB, |
829 | MachineBasicBlock::iterator TestPos, |
830 | const DebugLoc &TestLoc, |
831 | MachineInstr &CMovI, |
832 | MachineOperand &FlagUse, |
833 | CondRegArray &CondRegs) { |
834 | // First get the register containing this specific condition. |
835 | X86::CondCode Cond = X86::getCondFromCMov(MI: CMovI) == X86::COND_INVALID |
836 | ? X86::getCondFromCFCMov(MI: CMovI) |
837 | : X86::getCondFromCMov(MI: CMovI); |
838 | unsigned CondReg; |
839 | bool Inverted; |
840 | std::tie(args&: CondReg, args&: Inverted) = |
841 | getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs); |
842 | |
843 | MachineBasicBlock &MBB = *CMovI.getParent(); |
844 | |
845 | // Insert a direct test of the saved register. |
846 | insertTest(MBB, Pos: CMovI.getIterator(), Loc: CMovI.getDebugLoc(), Reg: CondReg); |
847 | |
848 | // Rewrite the CMov to use the !ZF flag from the test, and then kill its use |
849 | // of the flags afterward. |
850 | CMovI.getOperand(i: CMovI.getDesc().getNumOperands() - 1) |
851 | .setImm(Inverted ? X86::COND_E : X86::COND_NE); |
852 | FlagUse.setIsKill(true); |
853 | LLVM_DEBUG(dbgs() << " fixed cmov: " ; CMovI.dump()); |
854 | } |
855 | |
856 | void X86FlagsCopyLoweringPass::rewriteFCMov(MachineBasicBlock &TestMBB, |
857 | MachineBasicBlock::iterator TestPos, |
858 | const DebugLoc &TestLoc, |
859 | MachineInstr &CMovI, |
860 | MachineOperand &FlagUse, |
861 | CondRegArray &CondRegs) { |
862 | // First get the register containing this specific condition. |
863 | X86::CondCode Cond = getCondFromFCMOV(Opcode: CMovI.getOpcode()); |
864 | unsigned CondReg; |
865 | bool Inverted; |
866 | std::tie(args&: CondReg, args&: Inverted) = |
867 | getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs); |
868 | |
869 | MachineBasicBlock &MBB = *CMovI.getParent(); |
870 | |
871 | // Insert a direct test of the saved register. |
872 | insertTest(MBB, Pos: CMovI.getIterator(), Loc: CMovI.getDebugLoc(), Reg: CondReg); |
873 | |
874 | auto getFCMOVOpcode = [](unsigned Opcode, bool Inverted) { |
875 | switch (Opcode) { |
876 | default: llvm_unreachable("Unexpected opcode!" ); |
877 | case X86::CMOVBE_Fp32: case X86::CMOVNBE_Fp32: |
878 | case X86::CMOVB_Fp32: case X86::CMOVNB_Fp32: |
879 | case X86::CMOVE_Fp32: case X86::CMOVNE_Fp32: |
880 | case X86::CMOVP_Fp32: case X86::CMOVNP_Fp32: |
881 | return Inverted ? X86::CMOVE_Fp32 : X86::CMOVNE_Fp32; |
882 | case X86::CMOVBE_Fp64: case X86::CMOVNBE_Fp64: |
883 | case X86::CMOVB_Fp64: case X86::CMOVNB_Fp64: |
884 | case X86::CMOVE_Fp64: case X86::CMOVNE_Fp64: |
885 | case X86::CMOVP_Fp64: case X86::CMOVNP_Fp64: |
886 | return Inverted ? X86::CMOVE_Fp64 : X86::CMOVNE_Fp64; |
887 | case X86::CMOVBE_Fp80: case X86::CMOVNBE_Fp80: |
888 | case X86::CMOVB_Fp80: case X86::CMOVNB_Fp80: |
889 | case X86::CMOVE_Fp80: case X86::CMOVNE_Fp80: |
890 | case X86::CMOVP_Fp80: case X86::CMOVNP_Fp80: |
891 | return Inverted ? X86::CMOVE_Fp80 : X86::CMOVNE_Fp80; |
892 | } |
893 | }; |
894 | |
895 | // Rewrite the CMov to use the !ZF flag from the test. |
896 | CMovI.setDesc(TII->get(getFCMOVOpcode(CMovI.getOpcode(), Inverted))); |
897 | FlagUse.setIsKill(true); |
898 | LLVM_DEBUG(dbgs() << " fixed fcmov: " ; CMovI.dump()); |
899 | } |
900 | |
901 | void X86FlagsCopyLoweringPass::rewriteCondJmp( |
902 | MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos, |
903 | const DebugLoc &TestLoc, MachineInstr &JmpI, CondRegArray &CondRegs) { |
904 | // First get the register containing this specific condition. |
905 | X86::CondCode Cond = X86::getCondFromBranch(MI: JmpI); |
906 | unsigned CondReg; |
907 | bool Inverted; |
908 | std::tie(args&: CondReg, args&: Inverted) = |
909 | getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs); |
910 | |
911 | MachineBasicBlock &JmpMBB = *JmpI.getParent(); |
912 | |
913 | // Insert a direct test of the saved register. |
914 | insertTest(MBB&: JmpMBB, Pos: JmpI.getIterator(), Loc: JmpI.getDebugLoc(), Reg: CondReg); |
915 | |
916 | // Rewrite the jump to use the !ZF flag from the test, and kill its use of |
917 | // flags afterward. |
918 | JmpI.getOperand(i: 1).setImm(Inverted ? X86::COND_E : X86::COND_NE); |
919 | JmpI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true); |
920 | LLVM_DEBUG(dbgs() << " fixed jCC: " ; JmpI.dump()); |
921 | } |
922 | |
923 | void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI, |
924 | MachineOperand &FlagUse, |
925 | MachineInstr &CopyDefI) { |
926 | // Just replace this copy with the original copy def. |
927 | MRI->replaceRegWith(FromReg: MI.getOperand(i: 0).getReg(), |
928 | ToReg: CopyDefI.getOperand(i: 0).getReg()); |
929 | MI.eraseFromParent(); |
930 | } |
931 | |
932 | void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB, |
933 | MachineBasicBlock::iterator TestPos, |
934 | const DebugLoc &TestLoc, |
935 | MachineInstr &SetCCI, |
936 | MachineOperand &FlagUse, |
937 | CondRegArray &CondRegs) { |
938 | X86::CondCode Cond = X86::getCondFromSETCC(MI: SetCCI); |
939 | // Note that we can't usefully rewrite this to the inverse without complex |
940 | // analysis of the users of the setCC. Largely we rely on duplicates which |
941 | // could have been avoided already being avoided here. |
942 | unsigned &CondReg = CondRegs[Cond]; |
943 | if (!CondReg) |
944 | CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond); |
945 | |
946 | // Rewriting a register def is trivial: we just replace the register and |
947 | // remove the setcc. |
948 | if (!SetCCI.mayStore()) { |
949 | assert(SetCCI.getOperand(0).isReg() && |
950 | "Cannot have a non-register defined operand to SETcc!" ); |
951 | Register OldReg = SetCCI.getOperand(i: 0).getReg(); |
952 | // Drop Kill flags on the old register before replacing. CondReg may have |
953 | // a longer live range. |
954 | MRI->clearKillFlags(Reg: OldReg); |
955 | MRI->replaceRegWith(FromReg: OldReg, ToReg: CondReg); |
956 | SetCCI.eraseFromParent(); |
957 | return; |
958 | } |
959 | |
960 | // Otherwise, we need to emit a store. |
961 | auto MIB = BuildMI(*SetCCI.getParent(), SetCCI.getIterator(), |
962 | SetCCI.getDebugLoc(), TII->get(X86::MOV8mr)); |
963 | // Copy the address operands. |
964 | for (int i = 0; i < X86::AddrNumOperands; ++i) |
965 | MIB.add(SetCCI.getOperand(i)); |
966 | |
967 | MIB.addReg(CondReg); |
968 | |
969 | MIB.setMemRefs(SetCCI.memoperands()); |
970 | |
971 | SetCCI.eraseFromParent(); |
972 | } |
973 | |