1 | //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the SystemZ implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "SystemZInstrInfo.h" |
14 | #include "MCTargetDesc/SystemZMCTargetDesc.h" |
15 | #include "SystemZ.h" |
16 | #include "SystemZInstrBuilder.h" |
17 | #include "SystemZSubtarget.h" |
18 | #include "llvm/ADT/Statistic.h" |
19 | #include "llvm/CodeGen/LiveInterval.h" |
20 | #include "llvm/CodeGen/LiveIntervals.h" |
21 | #include "llvm/CodeGen/LiveRegUnits.h" |
22 | #include "llvm/CodeGen/LiveVariables.h" |
23 | #include "llvm/CodeGen/MachineBasicBlock.h" |
24 | #include "llvm/CodeGen/MachineFrameInfo.h" |
25 | #include "llvm/CodeGen/MachineFunction.h" |
26 | #include "llvm/CodeGen/MachineInstr.h" |
27 | #include "llvm/CodeGen/MachineMemOperand.h" |
28 | #include "llvm/CodeGen/MachineOperand.h" |
29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
30 | #include "llvm/CodeGen/SlotIndexes.h" |
31 | #include "llvm/CodeGen/StackMaps.h" |
32 | #include "llvm/CodeGen/TargetInstrInfo.h" |
33 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
34 | #include "llvm/CodeGen/VirtRegMap.h" |
35 | #include "llvm/MC/MCInstrDesc.h" |
36 | #include "llvm/MC/MCRegisterInfo.h" |
37 | #include "llvm/Support/BranchProbability.h" |
38 | #include "llvm/Support/ErrorHandling.h" |
39 | #include "llvm/Support/MathExtras.h" |
40 | #include "llvm/Target/TargetMachine.h" |
41 | #include <cassert> |
42 | #include <cstdint> |
43 | #include <iterator> |
44 | |
45 | using namespace llvm; |
46 | |
47 | #define GET_INSTRINFO_CTOR_DTOR |
48 | #define GET_INSTRMAP_INFO |
49 | #include "SystemZGenInstrInfo.inc" |
50 | |
51 | #define DEBUG_TYPE "systemz-II" |
52 | |
53 | // Return a mask with Count low bits set. |
54 | static uint64_t allOnes(unsigned int Count) { |
55 | return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1; |
56 | } |
57 | |
58 | // Pin the vtable to this file. |
59 | void SystemZInstrInfo::anchor() {} |
60 | |
61 | SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti) |
62 | : SystemZGenInstrInfo(-1, -1), |
63 | RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()), |
64 | STI(sti) {} |
65 | |
66 | // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, |
67 | // each having the opcode given by NewOpcode. |
68 | void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, |
69 | unsigned NewOpcode) const { |
70 | MachineBasicBlock *MBB = MI->getParent(); |
71 | MachineFunction &MF = *MBB->getParent(); |
72 | |
73 | // Get two load or store instructions. Use the original instruction for one |
74 | // of them (arbitrarily the second here) and create a clone for the other. |
75 | MachineInstr *EarlierMI = MF.CloneMachineInstr(Orig: &*MI); |
76 | MBB->insert(I: MI, MI: EarlierMI); |
77 | |
78 | // Set up the two 64-bit registers and remember super reg and its flags. |
79 | MachineOperand &HighRegOp = EarlierMI->getOperand(i: 0); |
80 | MachineOperand &LowRegOp = MI->getOperand(i: 0); |
81 | Register Reg128 = LowRegOp.getReg(); |
82 | unsigned Reg128Killed = getKillRegState(B: LowRegOp.isKill()); |
83 | unsigned Reg128Undef = getUndefRegState(B: LowRegOp.isUndef()); |
84 | HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64)); |
85 | LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64)); |
86 | |
87 | if (MI->mayStore()) { |
88 | // Add implicit uses of the super register in case one of the subregs is |
89 | // undefined. We could track liveness and skip storing an undefined |
90 | // subreg, but this is hopefully rare (discovered with llvm-stress). |
91 | // If Reg128 was killed, set kill flag on MI. |
92 | unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit); |
93 | MachineInstrBuilder(MF, EarlierMI).addReg(RegNo: Reg128, flags: Reg128UndefImpl); |
94 | MachineInstrBuilder(MF, MI).addReg(RegNo: Reg128, flags: (Reg128UndefImpl | Reg128Killed)); |
95 | } |
96 | |
97 | // The address in the first (high) instruction is already correct. |
98 | // Adjust the offset in the second (low) instruction. |
99 | MachineOperand &HighOffsetOp = EarlierMI->getOperand(i: 2); |
100 | MachineOperand &LowOffsetOp = MI->getOperand(i: 2); |
101 | LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); |
102 | |
103 | // Clear the kill flags on the registers in the first instruction. |
104 | if (EarlierMI->getOperand(i: 0).isReg() && EarlierMI->getOperand(i: 0).isUse()) |
105 | EarlierMI->getOperand(i: 0).setIsKill(false); |
106 | EarlierMI->getOperand(i: 1).setIsKill(false); |
107 | EarlierMI->getOperand(i: 3).setIsKill(false); |
108 | |
109 | // Set the opcodes. |
110 | unsigned HighOpcode = getOpcodeForOffset(Opcode: NewOpcode, Offset: HighOffsetOp.getImm()); |
111 | unsigned LowOpcode = getOpcodeForOffset(Opcode: NewOpcode, Offset: LowOffsetOp.getImm()); |
112 | assert(HighOpcode && LowOpcode && "Both offsets should be in range" ); |
113 | |
114 | EarlierMI->setDesc(get(HighOpcode)); |
115 | MI->setDesc(get(LowOpcode)); |
116 | } |
117 | |
118 | // Split ADJDYNALLOC instruction MI. |
119 | void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { |
120 | MachineBasicBlock *MBB = MI->getParent(); |
121 | MachineFunction &MF = *MBB->getParent(); |
122 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
123 | MachineOperand &OffsetMO = MI->getOperand(i: 2); |
124 | SystemZCallingConventionRegisters *Regs = STI.getSpecialRegisters(); |
125 | |
126 | uint64_t Offset = (MFFrame.getMaxCallFrameSize() + |
127 | Regs->getCallFrameSize() + |
128 | Regs->getStackPointerBias() + |
129 | OffsetMO.getImm()); |
130 | unsigned NewOpcode = getOpcodeForOffset(SystemZ::Opcode: LA, Offset); |
131 | assert(NewOpcode && "No support for huge argument lists yet" ); |
132 | MI->setDesc(get(NewOpcode)); |
133 | OffsetMO.setImm(Offset); |
134 | } |
135 | |
136 | // MI is an RI-style pseudo instruction. Replace it with LowOpcode |
137 | // if the first operand is a low GR32 and HighOpcode if the first operand |
138 | // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand |
139 | // and HighOpcode takes an unsigned 32-bit operand. In those cases, |
140 | // MI has the same kind of operand as LowOpcode, so needs to be converted |
141 | // if HighOpcode is used. |
142 | void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, |
143 | unsigned HighOpcode, |
144 | bool ConvertHigh) const { |
145 | Register Reg = MI.getOperand(i: 0).getReg(); |
146 | bool IsHigh = SystemZ::isHighReg(Reg); |
147 | MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode)); |
148 | if (IsHigh && ConvertHigh) |
149 | MI.getOperand(i: 1).setImm(uint32_t(MI.getOperand(i: 1).getImm())); |
150 | } |
151 | |
152 | // MI is a three-operand RIE-style pseudo instruction. Replace it with |
153 | // LowOpcodeK if the registers are both low GR32s, otherwise use a move |
154 | // followed by HighOpcode or LowOpcode, depending on whether the target |
155 | // is a high or low GR32. |
156 | void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode, |
157 | unsigned LowOpcodeK, |
158 | unsigned HighOpcode) const { |
159 | Register DestReg = MI.getOperand(i: 0).getReg(); |
160 | Register SrcReg = MI.getOperand(i: 1).getReg(); |
161 | bool DestIsHigh = SystemZ::isHighReg(Reg: DestReg); |
162 | bool SrcIsHigh = SystemZ::isHighReg(Reg: SrcReg); |
163 | if (!DestIsHigh && !SrcIsHigh) |
164 | MI.setDesc(get(LowOpcodeK)); |
165 | else { |
166 | if (DestReg != SrcReg) { |
167 | emitGRX32Move(MBB&: *MI.getParent(), MBBI: MI, DL: MI.getDebugLoc(), DestReg, SrcReg, |
168 | SystemZ::LowLowOpcode: LR, Size: 32, KillSrc: MI.getOperand(i: 1).isKill(), |
169 | UndefSrc: MI.getOperand(i: 1).isUndef()); |
170 | MI.getOperand(i: 1).setReg(DestReg); |
171 | } |
172 | MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode)); |
173 | MI.tieOperands(DefIdx: 0, UseIdx: 1); |
174 | } |
175 | } |
176 | |
177 | // MI is an RXY-style pseudo instruction. Replace it with LowOpcode |
178 | // if the first operand is a low GR32 and HighOpcode if the first operand |
179 | // is a high GR32. |
180 | void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode, |
181 | unsigned HighOpcode) const { |
182 | Register Reg = MI.getOperand(i: 0).getReg(); |
183 | unsigned Opcode = getOpcodeForOffset( |
184 | Opcode: SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode, |
185 | Offset: MI.getOperand(i: 2).getImm()); |
186 | MI.setDesc(get(Opcode)); |
187 | } |
188 | |
189 | // MI is a load-on-condition pseudo instruction with a single register |
190 | // (source or destination) operand. Replace it with LowOpcode if the |
191 | // register is a low GR32 and HighOpcode if the register is a high GR32. |
192 | void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode, |
193 | unsigned HighOpcode) const { |
194 | Register Reg = MI.getOperand(i: 0).getReg(); |
195 | unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode; |
196 | MI.setDesc(get(Opcode)); |
197 | } |
198 | |
199 | // MI is an RR-style pseudo instruction that zero-extends the low Size bits |
200 | // of one GRX32 into another. Replace it with LowOpcode if both operands |
201 | // are low registers, otherwise use RISB[LH]G. |
202 | void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode, |
203 | unsigned Size) const { |
204 | MachineInstrBuilder MIB = |
205 | emitGRX32Move(MBB&: *MI.getParent(), MBBI: MI, DL: MI.getDebugLoc(), |
206 | DestReg: MI.getOperand(i: 0).getReg(), SrcReg: MI.getOperand(i: 1).getReg(), LowLowOpcode: LowOpcode, |
207 | Size, KillSrc: MI.getOperand(i: 1).isKill(), UndefSrc: MI.getOperand(i: 1).isUndef()); |
208 | |
209 | // Keep the remaining operands as-is. |
210 | for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI.operands(), N: 2)) |
211 | MIB.add(MO); |
212 | |
213 | MI.eraseFromParent(); |
214 | } |
215 | |
216 | void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const { |
217 | MachineBasicBlock *MBB = MI->getParent(); |
218 | MachineFunction &MF = *MBB->getParent(); |
219 | const Register Reg64 = MI->getOperand(i: 0).getReg(); |
220 | const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32); |
221 | |
222 | // EAR can only load the low subregister so us a shift for %a0 to produce |
223 | // the GR containing %a0 and %a1. |
224 | |
225 | // ear <reg>, %a0 |
226 | BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32) |
227 | .addReg(SystemZ::A0) |
228 | .addReg(Reg64, RegState::ImplicitDefine); |
229 | |
230 | // sllg <reg>, <reg>, 32 |
231 | BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64) |
232 | .addReg(Reg64) |
233 | .addReg(0) |
234 | .addImm(32); |
235 | |
236 | // ear <reg>, %a1 |
237 | BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32) |
238 | .addReg(SystemZ::A1); |
239 | |
240 | // lg <reg>, 40(<reg>) |
241 | MI->setDesc(get(SystemZ::LG)); |
242 | MachineInstrBuilder(MF, MI).addReg(RegNo: Reg64).addImm(Val: 40).addReg(RegNo: 0); |
243 | } |
244 | |
245 | // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR |
246 | // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg |
247 | // are low registers, otherwise use RISB[LH]G. Size is the number of bits |
248 | // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR). |
249 | // KillSrc is true if this move is the last use of SrcReg. |
250 | MachineInstrBuilder |
251 | SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB, |
252 | MachineBasicBlock::iterator MBBI, |
253 | const DebugLoc &DL, unsigned DestReg, |
254 | unsigned SrcReg, unsigned LowLowOpcode, |
255 | unsigned Size, bool KillSrc, |
256 | bool UndefSrc) const { |
257 | unsigned Opcode; |
258 | bool DestIsHigh = SystemZ::isHighReg(Reg: DestReg); |
259 | bool SrcIsHigh = SystemZ::isHighReg(Reg: SrcReg); |
260 | if (DestIsHigh && SrcIsHigh) |
261 | Opcode = SystemZ::RISBHH; |
262 | else if (DestIsHigh && !SrcIsHigh) |
263 | Opcode = SystemZ::RISBHL; |
264 | else if (!DestIsHigh && SrcIsHigh) |
265 | Opcode = SystemZ::RISBLH; |
266 | else { |
267 | return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg) |
268 | .addReg(SrcReg, getKillRegState(B: KillSrc) | getUndefRegState(B: UndefSrc)); |
269 | } |
270 | unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0); |
271 | return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) |
272 | .addReg(DestReg, RegState::Undef) |
273 | .addReg(SrcReg, getKillRegState(B: KillSrc) | getUndefRegState(B: UndefSrc)) |
274 | .addImm(32 - Size).addImm(128 + 31).addImm(Rotate); |
275 | } |
276 | |
277 | MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI, |
278 | bool NewMI, |
279 | unsigned OpIdx1, |
280 | unsigned OpIdx2) const { |
281 | auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & { |
282 | if (NewMI) |
283 | return *MI.getParent()->getParent()->CloneMachineInstr(Orig: &MI); |
284 | return MI; |
285 | }; |
286 | |
287 | switch (MI.getOpcode()) { |
288 | case SystemZ::SELRMux: |
289 | case SystemZ::SELFHR: |
290 | case SystemZ::SELR: |
291 | case SystemZ::SELGR: |
292 | case SystemZ::LOCRMux: |
293 | case SystemZ::LOCFHR: |
294 | case SystemZ::LOCR: |
295 | case SystemZ::LOCGR: { |
296 | auto &WorkingMI = cloneIfNew(MI); |
297 | // Invert condition. |
298 | unsigned CCValid = WorkingMI.getOperand(i: 3).getImm(); |
299 | unsigned CCMask = WorkingMI.getOperand(i: 4).getImm(); |
300 | WorkingMI.getOperand(i: 4).setImm(CCMask ^ CCValid); |
301 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
302 | OpIdx1, OpIdx2); |
303 | } |
304 | default: |
305 | return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); |
306 | } |
307 | } |
308 | |
309 | // If MI is a simple load or store for a frame object, return the register |
310 | // it loads or stores and set FrameIndex to the index of the frame object. |
311 | // Return 0 otherwise. |
312 | // |
313 | // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. |
314 | static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, |
315 | unsigned Flag) { |
316 | const MCInstrDesc &MCID = MI.getDesc(); |
317 | if ((MCID.TSFlags & Flag) && MI.getOperand(i: 1).isFI() && |
318 | MI.getOperand(i: 2).getImm() == 0 && MI.getOperand(i: 3).getReg() == 0) { |
319 | FrameIndex = MI.getOperand(i: 1).getIndex(); |
320 | return MI.getOperand(i: 0).getReg(); |
321 | } |
322 | return 0; |
323 | } |
324 | |
325 | Register SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
326 | int &FrameIndex) const { |
327 | return isSimpleMove(MI, FrameIndex, Flag: SystemZII::SimpleBDXLoad); |
328 | } |
329 | |
330 | Register SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
331 | int &FrameIndex) const { |
332 | return isSimpleMove(MI, FrameIndex, Flag: SystemZII::SimpleBDXStore); |
333 | } |
334 | |
335 | bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI, |
336 | int &DestFrameIndex, |
337 | int &SrcFrameIndex) const { |
338 | // Check for MVC 0(Length,FI1),0(FI2) |
339 | const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo(); |
340 | if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(i: 0).isFI() || |
341 | MI.getOperand(i: 1).getImm() != 0 || !MI.getOperand(i: 3).isFI() || |
342 | MI.getOperand(i: 4).getImm() != 0) |
343 | return false; |
344 | |
345 | // Check that Length covers the full slots. |
346 | int64_t Length = MI.getOperand(i: 2).getImm(); |
347 | unsigned FI1 = MI.getOperand(i: 0).getIndex(); |
348 | unsigned FI2 = MI.getOperand(i: 3).getIndex(); |
349 | if (MFI.getObjectSize(ObjectIdx: FI1) != Length || |
350 | MFI.getObjectSize(ObjectIdx: FI2) != Length) |
351 | return false; |
352 | |
353 | DestFrameIndex = FI1; |
354 | SrcFrameIndex = FI2; |
355 | return true; |
356 | } |
357 | |
358 | bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
359 | MachineBasicBlock *&TBB, |
360 | MachineBasicBlock *&FBB, |
361 | SmallVectorImpl<MachineOperand> &Cond, |
362 | bool AllowModify) const { |
363 | // Most of the code and comments here are boilerplate. |
364 | |
365 | // Start from the bottom of the block and work up, examining the |
366 | // terminator instructions. |
367 | MachineBasicBlock::iterator I = MBB.end(); |
368 | while (I != MBB.begin()) { |
369 | --I; |
370 | if (I->isDebugInstr()) |
371 | continue; |
372 | |
373 | // Working from the bottom, when we see a non-terminator instruction, we're |
374 | // done. |
375 | if (!isUnpredicatedTerminator(*I)) |
376 | break; |
377 | |
378 | // A terminator that isn't a branch can't easily be handled by this |
379 | // analysis. |
380 | if (!I->isBranch()) |
381 | return true; |
382 | |
383 | // Can't handle indirect branches. |
384 | SystemZII::Branch Branch(getBranchInfo(MI: *I)); |
385 | if (!Branch.hasMBBTarget()) |
386 | return true; |
387 | |
388 | // Punt on compound branches. |
389 | if (Branch.Type != SystemZII::BranchNormal) |
390 | return true; |
391 | |
392 | if (Branch.CCMask == SystemZ::CCMASK_ANY) { |
393 | // Handle unconditional branches. |
394 | if (!AllowModify) { |
395 | TBB = Branch.getMBBTarget(); |
396 | continue; |
397 | } |
398 | |
399 | // If the block has any instructions after a JMP, delete them. |
400 | MBB.erase(I: std::next(x: I), E: MBB.end()); |
401 | |
402 | Cond.clear(); |
403 | FBB = nullptr; |
404 | |
405 | // Delete the JMP if it's equivalent to a fall-through. |
406 | if (MBB.isLayoutSuccessor(MBB: Branch.getMBBTarget())) { |
407 | TBB = nullptr; |
408 | I->eraseFromParent(); |
409 | I = MBB.end(); |
410 | continue; |
411 | } |
412 | |
413 | // TBB is used to indicate the unconditinal destination. |
414 | TBB = Branch.getMBBTarget(); |
415 | continue; |
416 | } |
417 | |
418 | // Working from the bottom, handle the first conditional branch. |
419 | if (Cond.empty()) { |
420 | // FIXME: add X86-style branch swap |
421 | FBB = TBB; |
422 | TBB = Branch.getMBBTarget(); |
423 | Cond.push_back(Elt: MachineOperand::CreateImm(Val: Branch.CCValid)); |
424 | Cond.push_back(Elt: MachineOperand::CreateImm(Val: Branch.CCMask)); |
425 | continue; |
426 | } |
427 | |
428 | // Handle subsequent conditional branches. |
429 | assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch" ); |
430 | |
431 | // Only handle the case where all conditional branches branch to the same |
432 | // destination. |
433 | if (TBB != Branch.getMBBTarget()) |
434 | return true; |
435 | |
436 | // If the conditions are the same, we can leave them alone. |
437 | unsigned OldCCValid = Cond[0].getImm(); |
438 | unsigned OldCCMask = Cond[1].getImm(); |
439 | if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask) |
440 | continue; |
441 | |
442 | // FIXME: Try combining conditions like X86 does. Should be easy on Z! |
443 | return false; |
444 | } |
445 | |
446 | return false; |
447 | } |
448 | |
449 | unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB, |
450 | int *BytesRemoved) const { |
451 | assert(!BytesRemoved && "code size not handled" ); |
452 | |
453 | // Most of the code and comments here are boilerplate. |
454 | MachineBasicBlock::iterator I = MBB.end(); |
455 | unsigned Count = 0; |
456 | |
457 | while (I != MBB.begin()) { |
458 | --I; |
459 | if (I->isDebugInstr()) |
460 | continue; |
461 | if (!I->isBranch()) |
462 | break; |
463 | if (!getBranchInfo(MI: *I).hasMBBTarget()) |
464 | break; |
465 | // Remove the branch. |
466 | I->eraseFromParent(); |
467 | I = MBB.end(); |
468 | ++Count; |
469 | } |
470 | |
471 | return Count; |
472 | } |
473 | |
474 | bool SystemZInstrInfo:: |
475 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
476 | assert(Cond.size() == 2 && "Invalid condition" ); |
477 | Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm()); |
478 | return false; |
479 | } |
480 | |
481 | unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB, |
482 | MachineBasicBlock *TBB, |
483 | MachineBasicBlock *FBB, |
484 | ArrayRef<MachineOperand> Cond, |
485 | const DebugLoc &DL, |
486 | int *BytesAdded) const { |
487 | // In this function we output 32-bit branches, which should always |
488 | // have enough range. They can be shortened and relaxed by later code |
489 | // in the pipeline, if desired. |
490 | |
491 | // Shouldn't be a fall through. |
492 | assert(TBB && "insertBranch must not be told to insert a fallthrough" ); |
493 | assert((Cond.size() == 2 || Cond.size() == 0) && |
494 | "SystemZ branch conditions have one component!" ); |
495 | assert(!BytesAdded && "code size not handled" ); |
496 | |
497 | if (Cond.empty()) { |
498 | // Unconditional branch? |
499 | assert(!FBB && "Unconditional branch with multiple successors!" ); |
500 | BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB); |
501 | return 1; |
502 | } |
503 | |
504 | // Conditional branch. |
505 | unsigned Count = 0; |
506 | unsigned CCValid = Cond[0].getImm(); |
507 | unsigned CCMask = Cond[1].getImm(); |
508 | BuildMI(&MBB, DL, get(SystemZ::BRC)) |
509 | .addImm(CCValid).addImm(CCMask).addMBB(TBB); |
510 | ++Count; |
511 | |
512 | if (FBB) { |
513 | // Two-way Conditional branch. Insert the second branch. |
514 | BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB); |
515 | ++Count; |
516 | } |
517 | return Count; |
518 | } |
519 | |
520 | bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
521 | Register &SrcReg2, int64_t &Mask, |
522 | int64_t &Value) const { |
523 | assert(MI.isCompare() && "Caller should have checked for a comparison" ); |
524 | |
525 | if (MI.getNumExplicitOperands() == 2 && MI.getOperand(i: 0).isReg() && |
526 | MI.getOperand(i: 1).isImm()) { |
527 | SrcReg = MI.getOperand(i: 0).getReg(); |
528 | SrcReg2 = 0; |
529 | Value = MI.getOperand(i: 1).getImm(); |
530 | Mask = ~0; |
531 | return true; |
532 | } |
533 | |
534 | return false; |
535 | } |
536 | |
537 | bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, |
538 | ArrayRef<MachineOperand> Pred, |
539 | Register DstReg, Register TrueReg, |
540 | Register FalseReg, int &CondCycles, |
541 | int &TrueCycles, |
542 | int &FalseCycles) const { |
543 | // Not all subtargets have LOCR instructions. |
544 | if (!STI.hasLoadStoreOnCond()) |
545 | return false; |
546 | if (Pred.size() != 2) |
547 | return false; |
548 | |
549 | // Check register classes. |
550 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
551 | const TargetRegisterClass *RC = |
552 | RI.getCommonSubClass(MRI.getRegClass(Reg: TrueReg), MRI.getRegClass(Reg: FalseReg)); |
553 | if (!RC) |
554 | return false; |
555 | |
556 | // We have LOCR instructions for 32 and 64 bit general purpose registers. |
557 | if ((STI.hasLoadStoreOnCond2() && |
558 | SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) || |
559 | SystemZ::GR32BitRegClass.hasSubClassEq(RC) || |
560 | SystemZ::GR64BitRegClass.hasSubClassEq(RC)) { |
561 | CondCycles = 2; |
562 | TrueCycles = 2; |
563 | FalseCycles = 2; |
564 | return true; |
565 | } |
566 | |
567 | // Can't do anything else. |
568 | return false; |
569 | } |
570 | |
571 | void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB, |
572 | MachineBasicBlock::iterator I, |
573 | const DebugLoc &DL, Register DstReg, |
574 | ArrayRef<MachineOperand> Pred, |
575 | Register TrueReg, |
576 | Register FalseReg) const { |
577 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
578 | const TargetRegisterClass *RC = MRI.getRegClass(Reg: DstReg); |
579 | |
580 | assert(Pred.size() == 2 && "Invalid condition" ); |
581 | unsigned CCValid = Pred[0].getImm(); |
582 | unsigned CCMask = Pred[1].getImm(); |
583 | |
584 | unsigned Opc; |
585 | if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) { |
586 | if (STI.hasMiscellaneousExtensions3()) |
587 | Opc = SystemZ::SELRMux; |
588 | else if (STI.hasLoadStoreOnCond2()) |
589 | Opc = SystemZ::LOCRMux; |
590 | else { |
591 | Opc = SystemZ::LOCR; |
592 | MRI.constrainRegClass(Reg: DstReg, RC: &SystemZ::GR32BitRegClass); |
593 | Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
594 | Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
595 | BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg); |
596 | BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg); |
597 | TrueReg = TReg; |
598 | FalseReg = FReg; |
599 | } |
600 | } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) { |
601 | if (STI.hasMiscellaneousExtensions3()) |
602 | Opc = SystemZ::SELGR; |
603 | else |
604 | Opc = SystemZ::LOCGR; |
605 | } else |
606 | llvm_unreachable("Invalid register class" ); |
607 | |
608 | BuildMI(MBB, I, DL, get(Opc), DstReg) |
609 | .addReg(FalseReg).addReg(TrueReg) |
610 | .addImm(CCValid).addImm(CCMask); |
611 | } |
612 | |
613 | bool SystemZInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, |
614 | Register Reg, |
615 | MachineRegisterInfo *MRI) const { |
616 | unsigned DefOpc = DefMI.getOpcode(); |
617 | if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI && |
618 | DefOpc != SystemZ::LGHI) |
619 | return false; |
620 | if (DefMI.getOperand(i: 0).getReg() != Reg) |
621 | return false; |
622 | int32_t ImmVal = (int32_t)DefMI.getOperand(i: 1).getImm(); |
623 | |
624 | unsigned UseOpc = UseMI.getOpcode(); |
625 | unsigned NewUseOpc; |
626 | unsigned UseIdx; |
627 | int CommuteIdx = -1; |
628 | bool TieOps = false; |
629 | switch (UseOpc) { |
630 | case SystemZ::SELRMux: |
631 | TieOps = true; |
632 | [[fallthrough]]; |
633 | case SystemZ::LOCRMux: |
634 | if (!STI.hasLoadStoreOnCond2()) |
635 | return false; |
636 | NewUseOpc = SystemZ::LOCHIMux; |
637 | if (UseMI.getOperand(i: 2).getReg() == Reg) |
638 | UseIdx = 2; |
639 | else if (UseMI.getOperand(i: 1).getReg() == Reg) |
640 | UseIdx = 2, CommuteIdx = 1; |
641 | else |
642 | return false; |
643 | break; |
644 | case SystemZ::SELGR: |
645 | TieOps = true; |
646 | [[fallthrough]]; |
647 | case SystemZ::LOCGR: |
648 | if (!STI.hasLoadStoreOnCond2()) |
649 | return false; |
650 | NewUseOpc = SystemZ::LOCGHI; |
651 | if (UseMI.getOperand(i: 2).getReg() == Reg) |
652 | UseIdx = 2; |
653 | else if (UseMI.getOperand(i: 1).getReg() == Reg) |
654 | UseIdx = 2, CommuteIdx = 1; |
655 | else |
656 | return false; |
657 | break; |
658 | default: |
659 | return false; |
660 | } |
661 | |
662 | if (CommuteIdx != -1) |
663 | if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx)) |
664 | return false; |
665 | |
666 | bool DeleteDef = MRI->hasOneNonDBGUse(RegNo: Reg); |
667 | UseMI.setDesc(get(NewUseOpc)); |
668 | if (TieOps) |
669 | UseMI.tieOperands(DefIdx: 0, UseIdx: 1); |
670 | UseMI.getOperand(i: UseIdx).ChangeToImmediate(ImmVal); |
671 | if (DeleteDef) |
672 | DefMI.eraseFromParent(); |
673 | |
674 | return true; |
675 | } |
676 | |
677 | bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const { |
678 | unsigned Opcode = MI.getOpcode(); |
679 | if (Opcode == SystemZ::Return || |
680 | Opcode == SystemZ::Return_XPLINK || |
681 | Opcode == SystemZ::Trap || |
682 | Opcode == SystemZ::CallJG || |
683 | Opcode == SystemZ::CallBR) |
684 | return true; |
685 | return false; |
686 | } |
687 | |
688 | bool SystemZInstrInfo:: |
689 | isProfitableToIfCvt(MachineBasicBlock &MBB, |
690 | unsigned NumCycles, unsigned , |
691 | BranchProbability Probability) const { |
692 | // Avoid using conditional returns at the end of a loop (since then |
693 | // we'd need to emit an unconditional branch to the beginning anyway, |
694 | // making the loop body longer). This doesn't apply for low-probability |
695 | // loops (eg. compare-and-swap retry), so just decide based on branch |
696 | // probability instead of looping structure. |
697 | // However, since Compare and Trap instructions cost the same as a regular |
698 | // Compare instruction, we should allow the if conversion to convert this |
699 | // into a Conditional Compare regardless of the branch probability. |
700 | if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap && |
701 | MBB.succ_empty() && Probability < BranchProbability(1, 8)) |
702 | return false; |
703 | // For now only convert single instructions. |
704 | return NumCycles == 1; |
705 | } |
706 | |
707 | bool SystemZInstrInfo:: |
708 | isProfitableToIfCvt(MachineBasicBlock &TMBB, |
709 | unsigned NumCyclesT, unsigned , |
710 | MachineBasicBlock &FMBB, |
711 | unsigned NumCyclesF, unsigned , |
712 | BranchProbability Probability) const { |
713 | // For now avoid converting mutually-exclusive cases. |
714 | return false; |
715 | } |
716 | |
717 | bool SystemZInstrInfo:: |
718 | isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, |
719 | BranchProbability Probability) const { |
720 | // For now only duplicate single instructions. |
721 | return NumCycles == 1; |
722 | } |
723 | |
724 | bool SystemZInstrInfo::PredicateInstruction( |
725 | MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { |
726 | assert(Pred.size() == 2 && "Invalid condition" ); |
727 | unsigned CCValid = Pred[0].getImm(); |
728 | unsigned CCMask = Pred[1].getImm(); |
729 | assert(CCMask > 0 && CCMask < 15 && "Invalid predicate" ); |
730 | unsigned Opcode = MI.getOpcode(); |
731 | if (Opcode == SystemZ::Trap) { |
732 | MI.setDesc(get(SystemZ::CondTrap)); |
733 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
734 | .addImm(CCValid).addImm(CCMask) |
735 | .addReg(SystemZ::CC, RegState::Implicit); |
736 | return true; |
737 | } |
738 | if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) { |
739 | MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn |
740 | : SystemZ::CondReturn_XPLINK)); |
741 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
742 | .addImm(CCValid) |
743 | .addImm(CCMask) |
744 | .addReg(SystemZ::CC, RegState::Implicit); |
745 | return true; |
746 | } |
747 | if (Opcode == SystemZ::CallJG) { |
748 | MachineOperand FirstOp = MI.getOperand(i: 0); |
749 | const uint32_t *RegMask = MI.getOperand(i: 1).getRegMask(); |
750 | MI.removeOperand(OpNo: 1); |
751 | MI.removeOperand(OpNo: 0); |
752 | MI.setDesc(get(SystemZ::CallBRCL)); |
753 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
754 | .addImm(CCValid) |
755 | .addImm(CCMask) |
756 | .add(FirstOp) |
757 | .addRegMask(RegMask) |
758 | .addReg(SystemZ::CC, RegState::Implicit); |
759 | return true; |
760 | } |
761 | if (Opcode == SystemZ::CallBR) { |
762 | MachineOperand Target = MI.getOperand(i: 0); |
763 | const uint32_t *RegMask = MI.getOperand(i: 1).getRegMask(); |
764 | MI.removeOperand(OpNo: 1); |
765 | MI.removeOperand(OpNo: 0); |
766 | MI.setDesc(get(SystemZ::CallBCR)); |
767 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
768 | .addImm(CCValid).addImm(CCMask) |
769 | .add(Target) |
770 | .addRegMask(RegMask) |
771 | .addReg(SystemZ::CC, RegState::Implicit); |
772 | return true; |
773 | } |
774 | return false; |
775 | } |
776 | |
777 | void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
778 | MachineBasicBlock::iterator MBBI, |
779 | const DebugLoc &DL, MCRegister DestReg, |
780 | MCRegister SrcReg, bool KillSrc) const { |
781 | // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the |
782 | // super register in case one of the subregs is undefined. |
783 | // This handles ADDR128 too. |
784 | if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { |
785 | copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64), |
786 | RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc); |
787 | MachineInstrBuilder(*MBB.getParent(), std::prev(x: MBBI)) |
788 | .addReg(RegNo: SrcReg, flags: RegState::Implicit); |
789 | copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64), |
790 | RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc); |
791 | MachineInstrBuilder(*MBB.getParent(), std::prev(x: MBBI)) |
792 | .addReg(RegNo: SrcReg, flags: (getKillRegState(B: KillSrc) | RegState::Implicit)); |
793 | return; |
794 | } |
795 | |
796 | if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) { |
797 | emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc, |
798 | false); |
799 | return; |
800 | } |
801 | |
802 | // Move 128-bit floating-point values between VR128 and FP128. |
803 | if (SystemZ::VR128BitRegClass.contains(DestReg) && |
804 | SystemZ::FP128BitRegClass.contains(SrcReg)) { |
805 | MCRegister SrcRegHi = |
806 | RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64), |
807 | SystemZ::subreg_h64, &SystemZ::VR128BitRegClass); |
808 | MCRegister SrcRegLo = |
809 | RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64), |
810 | SystemZ::subreg_h64, &SystemZ::VR128BitRegClass); |
811 | |
812 | BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg) |
813 | .addReg(SrcRegHi, getKillRegState(KillSrc)) |
814 | .addReg(SrcRegLo, getKillRegState(KillSrc)); |
815 | return; |
816 | } |
817 | if (SystemZ::FP128BitRegClass.contains(DestReg) && |
818 | SystemZ::VR128BitRegClass.contains(SrcReg)) { |
819 | MCRegister DestRegHi = |
820 | RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64), |
821 | SystemZ::subreg_h64, &SystemZ::VR128BitRegClass); |
822 | MCRegister DestRegLo = |
823 | RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64), |
824 | SystemZ::subreg_h64, &SystemZ::VR128BitRegClass); |
825 | |
826 | if (DestRegHi != SrcReg) |
827 | copyPhysReg(MBB, MBBI, DL, DestReg: DestRegHi, SrcReg, KillSrc: false); |
828 | BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo) |
829 | .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1); |
830 | return; |
831 | } |
832 | |
833 | // Move CC value from a GR32. |
834 | if (DestReg == SystemZ::CC) { |
835 | unsigned Opcode = |
836 | SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH; |
837 | BuildMI(MBB, MBBI, DL, get(Opcode)) |
838 | .addReg(SrcReg, getKillRegState(B: KillSrc)) |
839 | .addImm(3 << (SystemZ::IPM_CC - 16)); |
840 | return; |
841 | } |
842 | |
843 | // Everything else needs only one instruction. |
844 | unsigned Opcode; |
845 | if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg)) |
846 | Opcode = SystemZ::LGR; |
847 | else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg)) |
848 | // For z13 we prefer LDR over LER to avoid partial register dependencies. |
849 | Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER; |
850 | else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg)) |
851 | Opcode = SystemZ::LDR; |
852 | else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg)) |
853 | Opcode = SystemZ::LXR; |
854 | else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg)) |
855 | Opcode = SystemZ::VLR32; |
856 | else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg)) |
857 | Opcode = SystemZ::VLR64; |
858 | else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg)) |
859 | Opcode = SystemZ::VLR; |
860 | else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg)) |
861 | Opcode = SystemZ::CPYA; |
862 | else |
863 | llvm_unreachable("Impossible reg-to-reg copy" ); |
864 | |
865 | BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) |
866 | .addReg(SrcReg, getKillRegState(B: KillSrc)); |
867 | } |
868 | |
869 | void SystemZInstrInfo::storeRegToStackSlot( |
870 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, |
871 | bool isKill, int FrameIdx, const TargetRegisterClass *RC, |
872 | const TargetRegisterInfo *TRI, Register VReg) const { |
873 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
874 | |
875 | // Callers may expect a single instruction, so keep 128-bit moves |
876 | // together for now and lower them after register allocation. |
877 | unsigned LoadOpcode, StoreOpcode; |
878 | getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); |
879 | addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) |
880 | .addReg(SrcReg, getKillRegState(B: isKill)), |
881 | FrameIdx); |
882 | } |
883 | |
884 | void SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
885 | MachineBasicBlock::iterator MBBI, |
886 | Register DestReg, int FrameIdx, |
887 | const TargetRegisterClass *RC, |
888 | const TargetRegisterInfo *TRI, |
889 | Register VReg) const { |
890 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
891 | |
892 | // Callers may expect a single instruction, so keep 128-bit moves |
893 | // together for now and lower them after register allocation. |
894 | unsigned LoadOpcode, StoreOpcode; |
895 | getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); |
896 | addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), |
897 | FrameIdx); |
898 | } |
899 | |
900 | // Return true if MI is a simple load or store with a 12-bit displacement |
901 | // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. |
902 | static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { |
903 | const MCInstrDesc &MCID = MI->getDesc(); |
904 | return ((MCID.TSFlags & Flag) && |
905 | isUInt<12>(x: MI->getOperand(i: 2).getImm()) && |
906 | MI->getOperand(i: 3).getReg() == 0); |
907 | } |
908 | |
909 | namespace { |
910 | |
911 | struct LogicOp { |
912 | LogicOp() = default; |
913 | LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize) |
914 | : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {} |
915 | |
916 | explicit operator bool() const { return RegSize; } |
917 | |
918 | unsigned RegSize = 0; |
919 | unsigned ImmLSB = 0; |
920 | unsigned ImmSize = 0; |
921 | }; |
922 | |
923 | } // end anonymous namespace |
924 | |
925 | static LogicOp interpretAndImmediate(unsigned Opcode) { |
926 | switch (Opcode) { |
927 | case SystemZ::NILMux: return LogicOp(32, 0, 16); |
928 | case SystemZ::NIHMux: return LogicOp(32, 16, 16); |
929 | case SystemZ::NILL64: return LogicOp(64, 0, 16); |
930 | case SystemZ::NILH64: return LogicOp(64, 16, 16); |
931 | case SystemZ::NIHL64: return LogicOp(64, 32, 16); |
932 | case SystemZ::NIHH64: return LogicOp(64, 48, 16); |
933 | case SystemZ::NIFMux: return LogicOp(32, 0, 32); |
934 | case SystemZ::NILF64: return LogicOp(64, 0, 32); |
935 | case SystemZ::NIHF64: return LogicOp(64, 32, 32); |
936 | default: return LogicOp(); |
937 | } |
938 | } |
939 | |
940 | static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) { |
941 | if (OldMI->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr)) { |
942 | MachineOperand *CCDef = |
943 | NewMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr); |
944 | if (CCDef != nullptr) |
945 | CCDef->setIsDead(true); |
946 | } |
947 | } |
948 | |
949 | static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI, |
950 | MachineInstr::MIFlag Flag) { |
951 | if (OldMI->getFlag(Flag)) |
952 | NewMI->setFlag(Flag); |
953 | } |
954 | |
955 | MachineInstr * |
956 | SystemZInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, |
957 | LiveIntervals *LIS) const { |
958 | MachineBasicBlock *MBB = MI.getParent(); |
959 | |
960 | // Try to convert an AND into an RISBG-type instruction. |
961 | // TODO: It might be beneficial to select RISBG and shorten to AND instead. |
962 | if (LogicOp And = interpretAndImmediate(Opcode: MI.getOpcode())) { |
963 | uint64_t Imm = MI.getOperand(i: 2).getImm() << And.ImmLSB; |
964 | // AND IMMEDIATE leaves the other bits of the register unchanged. |
965 | Imm |= allOnes(Count: And.RegSize) & ~(allOnes(Count: And.ImmSize) << And.ImmLSB); |
966 | unsigned Start, End; |
967 | if (isRxSBGMask(Mask: Imm, BitSize: And.RegSize, Start, End)) { |
968 | unsigned NewOpcode; |
969 | if (And.RegSize == 64) { |
970 | NewOpcode = SystemZ::RISBG; |
971 | // Prefer RISBGN if available, since it does not clobber CC. |
972 | if (STI.hasMiscellaneousExtensions()) |
973 | NewOpcode = SystemZ::RISBGN; |
974 | } else { |
975 | NewOpcode = SystemZ::RISBMux; |
976 | Start &= 31; |
977 | End &= 31; |
978 | } |
979 | MachineOperand &Dest = MI.getOperand(i: 0); |
980 | MachineOperand &Src = MI.getOperand(i: 1); |
981 | MachineInstrBuilder MIB = |
982 | BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode)) |
983 | .add(Dest) |
984 | .addReg(0) |
985 | .addReg(Src.getReg(), getKillRegState(B: Src.isKill()), |
986 | Src.getSubReg()) |
987 | .addImm(Start) |
988 | .addImm(End + 128) |
989 | .addImm(0); |
990 | if (LV) { |
991 | unsigned NumOps = MI.getNumOperands(); |
992 | for (unsigned I = 1; I < NumOps; ++I) { |
993 | MachineOperand &Op = MI.getOperand(i: I); |
994 | if (Op.isReg() && Op.isKill()) |
995 | LV->replaceKillInstruction(Reg: Op.getReg(), OldMI&: MI, NewMI&: *MIB); |
996 | } |
997 | } |
998 | if (LIS) |
999 | LIS->ReplaceMachineInstrInMaps(MI, NewMI&: *MIB); |
1000 | transferDeadCC(OldMI: &MI, NewMI: MIB); |
1001 | return MIB; |
1002 | } |
1003 | } |
1004 | return nullptr; |
1005 | } |
1006 | |
1007 | MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( |
1008 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
1009 | MachineBasicBlock::iterator InsertPt, int FrameIndex, |
1010 | LiveIntervals *LIS, VirtRegMap *VRM) const { |
1011 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
1012 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1013 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
1014 | unsigned Size = MFI.getObjectSize(ObjectIdx: FrameIndex); |
1015 | unsigned Opcode = MI.getOpcode(); |
1016 | |
1017 | // Check CC liveness if new instruction introduces a dead def of CC. |
1018 | SlotIndex MISlot = SlotIndex(); |
1019 | LiveRange *CCLiveRange = nullptr; |
1020 | bool CCLiveAtMI = true; |
1021 | if (LIS) { |
1022 | MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot(); |
1023 | auto CCUnits = TRI->regunits(MCRegister::from(SystemZ::CC)); |
1024 | assert(range_size(CCUnits) == 1 && "CC only has one reg unit." ); |
1025 | CCLiveRange = &LIS->getRegUnit(Unit: *CCUnits.begin()); |
1026 | CCLiveAtMI = CCLiveRange->liveAt(index: MISlot); |
1027 | } |
1028 | |
1029 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
1030 | if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && |
1031 | isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) { |
1032 | // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST |
1033 | MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt, |
1034 | MI.getDebugLoc(), get(SystemZ::AGSI)) |
1035 | .addFrameIndex(FrameIndex) |
1036 | .addImm(0) |
1037 | .addImm(MI.getOperand(2).getImm()); |
1038 | BuiltMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr) |
1039 | ->setIsDead(true); |
1040 | CCLiveRange->createDeadDef(Def: MISlot, VNIAlloc&: LIS->getVNInfoAllocator()); |
1041 | return BuiltMI; |
1042 | } |
1043 | return nullptr; |
1044 | } |
1045 | |
1046 | // All other cases require a single operand. |
1047 | if (Ops.size() != 1) |
1048 | return nullptr; |
1049 | |
1050 | unsigned OpNum = Ops[0]; |
1051 | assert(Size * 8 == |
1052 | TRI->getRegSizeInBits(*MF.getRegInfo() |
1053 | .getRegClass(MI.getOperand(OpNum).getReg())) && |
1054 | "Invalid size combination" ); |
1055 | |
1056 | if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 && |
1057 | isInt<8>(MI.getOperand(2).getImm())) { |
1058 | // A(G)HI %reg, CONST -> A(G)SI %mem, CONST |
1059 | Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI); |
1060 | MachineInstr *BuiltMI = |
1061 | BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode)) |
1062 | .addFrameIndex(FrameIndex) |
1063 | .addImm(0) |
1064 | .addImm(MI.getOperand(i: 2).getImm()); |
1065 | transferDeadCC(OldMI: &MI, NewMI: BuiltMI); |
1066 | transferMIFlag(OldMI: &MI, NewMI: BuiltMI, Flag: MachineInstr::NoSWrap); |
1067 | return BuiltMI; |
1068 | } |
1069 | |
1070 | if ((Opcode == SystemZ::ALFI && OpNum == 0 && |
1071 | isInt<8>((int32_t)MI.getOperand(2).getImm())) || |
1072 | (Opcode == SystemZ::ALGFI && OpNum == 0 && |
1073 | isInt<8>((int64_t)MI.getOperand(2).getImm()))) { |
1074 | // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST |
1075 | Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI); |
1076 | MachineInstr *BuiltMI = |
1077 | BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode)) |
1078 | .addFrameIndex(FrameIndex) |
1079 | .addImm(0) |
1080 | .addImm((int8_t)MI.getOperand(i: 2).getImm()); |
1081 | transferDeadCC(OldMI: &MI, NewMI: BuiltMI); |
1082 | return BuiltMI; |
1083 | } |
1084 | |
1085 | if ((Opcode == SystemZ::SLFI && OpNum == 0 && |
1086 | isInt<8>((int32_t)-MI.getOperand(2).getImm())) || |
1087 | (Opcode == SystemZ::SLGFI && OpNum == 0 && |
1088 | isInt<8>((int64_t)-MI.getOperand(2).getImm()))) { |
1089 | // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST |
1090 | Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI); |
1091 | MachineInstr *BuiltMI = |
1092 | BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode)) |
1093 | .addFrameIndex(FrameIndex) |
1094 | .addImm(0) |
1095 | .addImm((int8_t)-MI.getOperand(i: 2).getImm()); |
1096 | transferDeadCC(OldMI: &MI, NewMI: BuiltMI); |
1097 | return BuiltMI; |
1098 | } |
1099 | |
1100 | unsigned MemImmOpc = 0; |
1101 | switch (Opcode) { |
1102 | case SystemZ::LHIMux: |
1103 | case SystemZ::LHI: MemImmOpc = SystemZ::MVHI; break; |
1104 | case SystemZ::LGHI: MemImmOpc = SystemZ::MVGHI; break; |
1105 | case SystemZ::CHIMux: |
1106 | case SystemZ::CHI: MemImmOpc = SystemZ::CHSI; break; |
1107 | case SystemZ::CGHI: MemImmOpc = SystemZ::CGHSI; break; |
1108 | case SystemZ::CLFIMux: |
1109 | case SystemZ::CLFI: |
1110 | if (isUInt<16>(MI.getOperand(1).getImm())) |
1111 | MemImmOpc = SystemZ::CLFHSI; |
1112 | break; |
1113 | case SystemZ::CLGFI: |
1114 | if (isUInt<16>(MI.getOperand(1).getImm())) |
1115 | MemImmOpc = SystemZ::CLGHSI; |
1116 | break; |
1117 | default: break; |
1118 | } |
1119 | if (MemImmOpc) |
1120 | return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), |
1121 | get(MemImmOpc)) |
1122 | .addFrameIndex(FrameIndex) |
1123 | .addImm(0) |
1124 | .addImm(MI.getOperand(i: 1).getImm()); |
1125 | |
1126 | if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { |
1127 | bool Op0IsGPR = (Opcode == SystemZ::LGDR); |
1128 | bool Op1IsGPR = (Opcode == SystemZ::LDGR); |
1129 | // If we're spilling the destination of an LDGR or LGDR, store the |
1130 | // source register instead. |
1131 | if (OpNum == 0) { |
1132 | unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; |
1133 | return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), |
1134 | get(StoreOpcode)) |
1135 | .add(MI.getOperand(i: 1)) |
1136 | .addFrameIndex(FrameIndex) |
1137 | .addImm(0) |
1138 | .addReg(0); |
1139 | } |
1140 | // If we're spilling the source of an LDGR or LGDR, load the |
1141 | // destination register instead. |
1142 | if (OpNum == 1) { |
1143 | unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; |
1144 | return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), |
1145 | get(LoadOpcode)) |
1146 | .add(MI.getOperand(i: 0)) |
1147 | .addFrameIndex(FrameIndex) |
1148 | .addImm(0) |
1149 | .addReg(0); |
1150 | } |
1151 | } |
1152 | |
1153 | // Look for cases where the source of a simple store or the destination |
1154 | // of a simple load is being spilled. Try to use MVC instead. |
1155 | // |
1156 | // Although MVC is in practice a fast choice in these cases, it is still |
1157 | // logically a bytewise copy. This means that we cannot use it if the |
1158 | // load or store is volatile. We also wouldn't be able to use MVC if |
1159 | // the two memories partially overlap, but that case cannot occur here, |
1160 | // because we know that one of the memories is a full frame index. |
1161 | // |
1162 | // For performance reasons, we also want to avoid using MVC if the addresses |
1163 | // might be equal. We don't worry about that case here, because spill slot |
1164 | // coloring happens later, and because we have special code to remove |
1165 | // MVCs that turn out to be redundant. |
1166 | if (OpNum == 0 && MI.hasOneMemOperand()) { |
1167 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
1168 | if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) { |
1169 | // Handle conversion of loads. |
1170 | if (isSimpleBD12Move(MI: &MI, Flag: SystemZII::SimpleBDXLoad)) { |
1171 | return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), |
1172 | get(SystemZ::MVC)) |
1173 | .addFrameIndex(FrameIndex) |
1174 | .addImm(0) |
1175 | .addImm(Size) |
1176 | .add(MI.getOperand(1)) |
1177 | .addImm(MI.getOperand(2).getImm()) |
1178 | .addMemOperand(MMO); |
1179 | } |
1180 | // Handle conversion of stores. |
1181 | if (isSimpleBD12Move(MI: &MI, Flag: SystemZII::SimpleBDXStore)) { |
1182 | return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), |
1183 | get(SystemZ::MVC)) |
1184 | .add(MI.getOperand(1)) |
1185 | .addImm(MI.getOperand(2).getImm()) |
1186 | .addImm(Size) |
1187 | .addFrameIndex(FrameIndex) |
1188 | .addImm(0) |
1189 | .addMemOperand(MMO); |
1190 | } |
1191 | } |
1192 | } |
1193 | |
1194 | // If the spilled operand is the final one or the instruction is |
1195 | // commutable, try to change <INSN>R into <INSN>. Don't introduce a def of |
1196 | // CC if it is live and MI does not define it. |
1197 | unsigned NumOps = MI.getNumExplicitOperands(); |
1198 | int MemOpcode = SystemZ::getMemOpcode(Opcode); |
1199 | if (MemOpcode == -1 || |
1200 | (CCLiveAtMI && !MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) && |
1201 | get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC))) |
1202 | return nullptr; |
1203 | |
1204 | // Check if all other vregs have a usable allocation in the case of vector |
1205 | // to FP conversion. |
1206 | const MCInstrDesc &MCID = MI.getDesc(); |
1207 | for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) { |
1208 | const MCOperandInfo &MCOI = MCID.operands()[I]; |
1209 | if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum) |
1210 | continue; |
1211 | const TargetRegisterClass *RC = TRI->getRegClass(i: MCOI.RegClass); |
1212 | if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) { |
1213 | Register Reg = MI.getOperand(i: I).getReg(); |
1214 | Register PhysReg = Reg.isVirtual() |
1215 | ? (VRM ? Register(VRM->getPhys(virtReg: Reg)) : Register()) |
1216 | : Reg; |
1217 | if (!PhysReg || |
1218 | !(SystemZ::FP32BitRegClass.contains(PhysReg) || |
1219 | SystemZ::FP64BitRegClass.contains(PhysReg) || |
1220 | SystemZ::VF128BitRegClass.contains(PhysReg))) |
1221 | return nullptr; |
1222 | } |
1223 | } |
1224 | // Fused multiply and add/sub need to have the same dst and accumulator reg. |
1225 | bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB || |
1226 | Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB); |
1227 | if (FusedFPOp) { |
1228 | Register DstReg = VRM->getPhys(virtReg: MI.getOperand(i: 0).getReg()); |
1229 | Register AccReg = VRM->getPhys(virtReg: MI.getOperand(i: 3).getReg()); |
1230 | if (OpNum == 0 || OpNum == 3 || DstReg != AccReg) |
1231 | return nullptr; |
1232 | } |
1233 | |
1234 | // Try to swap compare operands if possible. |
1235 | bool NeedsCommute = false; |
1236 | if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR || |
1237 | MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR || |
1238 | MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB || |
1239 | MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) && |
1240 | OpNum == 0 && prepareCompareSwapOperands(MI)) |
1241 | NeedsCommute = true; |
1242 | |
1243 | bool CCOperands = false; |
1244 | if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR || |
1245 | MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) { |
1246 | assert(MI.getNumOperands() == 6 && NumOps == 5 && |
1247 | "LOCR/SELR instruction operands corrupt?" ); |
1248 | NumOps -= 2; |
1249 | CCOperands = true; |
1250 | } |
1251 | |
1252 | // See if this is a 3-address instruction that is convertible to 2-address |
1253 | // and suitable for folding below. Only try this with virtual registers |
1254 | // and a provided VRM (during regalloc). |
1255 | if (NumOps == 3 && SystemZ::getTargetMemOpcode(Opcode: MemOpcode) != -1) { |
1256 | if (VRM == nullptr) |
1257 | return nullptr; |
1258 | else { |
1259 | Register DstReg = MI.getOperand(i: 0).getReg(); |
1260 | Register DstPhys = |
1261 | (DstReg.isVirtual() ? Register(VRM->getPhys(virtReg: DstReg)) : DstReg); |
1262 | Register SrcReg = (OpNum == 2 ? MI.getOperand(i: 1).getReg() |
1263 | : ((OpNum == 1 && MI.isCommutable()) |
1264 | ? MI.getOperand(i: 2).getReg() |
1265 | : Register())); |
1266 | if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg && |
1267 | SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg)) |
1268 | NeedsCommute = (OpNum == 1); |
1269 | else |
1270 | return nullptr; |
1271 | } |
1272 | } |
1273 | |
1274 | if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) { |
1275 | const MCInstrDesc &MemDesc = get(MemOpcode); |
1276 | uint64_t AccessBytes = SystemZII::getAccessSize(Flags: MemDesc.TSFlags); |
1277 | assert(AccessBytes != 0 && "Size of access should be known" ); |
1278 | assert(AccessBytes <= Size && "Access outside the frame index" ); |
1279 | uint64_t Offset = Size - AccessBytes; |
1280 | MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, |
1281 | MI.getDebugLoc(), get(MemOpcode)); |
1282 | if (MI.isCompare()) { |
1283 | assert(NumOps == 2 && "Expected 2 register operands for a compare." ); |
1284 | MIB.add(MO: MI.getOperand(i: NeedsCommute ? 1 : 0)); |
1285 | } |
1286 | else if (FusedFPOp) { |
1287 | MIB.add(MO: MI.getOperand(i: 0)); |
1288 | MIB.add(MO: MI.getOperand(i: 3)); |
1289 | MIB.add(MO: MI.getOperand(i: OpNum == 1 ? 2 : 1)); |
1290 | } |
1291 | else { |
1292 | MIB.add(MO: MI.getOperand(i: 0)); |
1293 | if (NeedsCommute) |
1294 | MIB.add(MO: MI.getOperand(i: 2)); |
1295 | else |
1296 | for (unsigned I = 1; I < OpNum; ++I) |
1297 | MIB.add(MO: MI.getOperand(i: I)); |
1298 | } |
1299 | MIB.addFrameIndex(Idx: FrameIndex).addImm(Val: Offset); |
1300 | if (MemDesc.TSFlags & SystemZII::HasIndex) |
1301 | MIB.addReg(RegNo: 0); |
1302 | if (CCOperands) { |
1303 | unsigned CCValid = MI.getOperand(i: NumOps).getImm(); |
1304 | unsigned CCMask = MI.getOperand(i: NumOps + 1).getImm(); |
1305 | MIB.addImm(Val: CCValid); |
1306 | MIB.addImm(Val: NeedsCommute ? CCMask ^ CCValid : CCMask); |
1307 | } |
1308 | if (MIB->definesRegister(SystemZ::CC, /*TRI=*/nullptr) && |
1309 | (!MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) || |
1310 | MI.registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))) { |
1311 | MIB->addRegisterDead(SystemZ::CC, TRI); |
1312 | if (CCLiveRange) |
1313 | CCLiveRange->createDeadDef(Def: MISlot, VNIAlloc&: LIS->getVNInfoAllocator()); |
1314 | } |
1315 | // Constrain the register classes if converted from a vector opcode. The |
1316 | // allocated regs are in an FP reg-class per previous check above. |
1317 | for (const MachineOperand &MO : MIB->operands()) |
1318 | if (MO.isReg() && MO.getReg().isVirtual()) { |
1319 | Register Reg = MO.getReg(); |
1320 | if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass) |
1321 | MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass); |
1322 | else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass) |
1323 | MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass); |
1324 | else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass) |
1325 | MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass); |
1326 | } |
1327 | |
1328 | transferDeadCC(OldMI: &MI, NewMI: MIB); |
1329 | transferMIFlag(OldMI: &MI, NewMI: MIB, Flag: MachineInstr::NoSWrap); |
1330 | transferMIFlag(OldMI: &MI, NewMI: MIB, Flag: MachineInstr::NoFPExcept); |
1331 | return MIB; |
1332 | } |
1333 | |
1334 | return nullptr; |
1335 | } |
1336 | |
1337 | MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( |
1338 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
1339 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
1340 | LiveIntervals *LIS) const { |
1341 | return nullptr; |
1342 | } |
1343 | |
1344 | bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
1345 | switch (MI.getOpcode()) { |
1346 | case SystemZ::L128: |
1347 | splitMove(MI, SystemZ::LG); |
1348 | return true; |
1349 | |
1350 | case SystemZ::ST128: |
1351 | splitMove(MI, SystemZ::STG); |
1352 | return true; |
1353 | |
1354 | case SystemZ::LX: |
1355 | splitMove(MI, SystemZ::LD); |
1356 | return true; |
1357 | |
1358 | case SystemZ::STX: |
1359 | splitMove(MI, SystemZ::STD); |
1360 | return true; |
1361 | |
1362 | case SystemZ::LBMux: |
1363 | expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH); |
1364 | return true; |
1365 | |
1366 | case SystemZ::LHMux: |
1367 | expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH); |
1368 | return true; |
1369 | |
1370 | case SystemZ::LLCRMux: |
1371 | expandZExtPseudo(MI, SystemZ::LLCR, 8); |
1372 | return true; |
1373 | |
1374 | case SystemZ::LLHRMux: |
1375 | expandZExtPseudo(MI, SystemZ::LLHR, 16); |
1376 | return true; |
1377 | |
1378 | case SystemZ::LLCMux: |
1379 | expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH); |
1380 | return true; |
1381 | |
1382 | case SystemZ::LLHMux: |
1383 | expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH); |
1384 | return true; |
1385 | |
1386 | case SystemZ::LMux: |
1387 | expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH); |
1388 | return true; |
1389 | |
1390 | case SystemZ::LOCMux: |
1391 | expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH); |
1392 | return true; |
1393 | |
1394 | case SystemZ::LOCHIMux: |
1395 | expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI); |
1396 | return true; |
1397 | |
1398 | case SystemZ::STCMux: |
1399 | expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH); |
1400 | return true; |
1401 | |
1402 | case SystemZ::STHMux: |
1403 | expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH); |
1404 | return true; |
1405 | |
1406 | case SystemZ::STMux: |
1407 | expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH); |
1408 | return true; |
1409 | |
1410 | case SystemZ::STOCMux: |
1411 | expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH); |
1412 | return true; |
1413 | |
1414 | case SystemZ::LHIMux: |
1415 | expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true); |
1416 | return true; |
1417 | |
1418 | case SystemZ::IIFMux: |
1419 | expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false); |
1420 | return true; |
1421 | |
1422 | case SystemZ::IILMux: |
1423 | expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false); |
1424 | return true; |
1425 | |
1426 | case SystemZ::IIHMux: |
1427 | expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false); |
1428 | return true; |
1429 | |
1430 | case SystemZ::NIFMux: |
1431 | expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false); |
1432 | return true; |
1433 | |
1434 | case SystemZ::NILMux: |
1435 | expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false); |
1436 | return true; |
1437 | |
1438 | case SystemZ::NIHMux: |
1439 | expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false); |
1440 | return true; |
1441 | |
1442 | case SystemZ::OIFMux: |
1443 | expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false); |
1444 | return true; |
1445 | |
1446 | case SystemZ::OILMux: |
1447 | expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false); |
1448 | return true; |
1449 | |
1450 | case SystemZ::OIHMux: |
1451 | expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false); |
1452 | return true; |
1453 | |
1454 | case SystemZ::XIFMux: |
1455 | expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false); |
1456 | return true; |
1457 | |
1458 | case SystemZ::TMLMux: |
1459 | expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false); |
1460 | return true; |
1461 | |
1462 | case SystemZ::TMHMux: |
1463 | expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false); |
1464 | return true; |
1465 | |
1466 | case SystemZ::AHIMux: |
1467 | expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false); |
1468 | return true; |
1469 | |
1470 | case SystemZ::AHIMuxK: |
1471 | expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH); |
1472 | return true; |
1473 | |
1474 | case SystemZ::AFIMux: |
1475 | expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false); |
1476 | return true; |
1477 | |
1478 | case SystemZ::CHIMux: |
1479 | expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false); |
1480 | return true; |
1481 | |
1482 | case SystemZ::CFIMux: |
1483 | expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false); |
1484 | return true; |
1485 | |
1486 | case SystemZ::CLFIMux: |
1487 | expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false); |
1488 | return true; |
1489 | |
1490 | case SystemZ::CMux: |
1491 | expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF); |
1492 | return true; |
1493 | |
1494 | case SystemZ::CLMux: |
1495 | expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF); |
1496 | return true; |
1497 | |
1498 | case SystemZ::RISBMux: { |
1499 | bool DestIsHigh = SystemZ::isHighReg(Reg: MI.getOperand(i: 0).getReg()); |
1500 | bool SrcIsHigh = SystemZ::isHighReg(Reg: MI.getOperand(i: 2).getReg()); |
1501 | if (SrcIsHigh == DestIsHigh) |
1502 | MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL)); |
1503 | else { |
1504 | MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH)); |
1505 | MI.getOperand(i: 5).setImm(MI.getOperand(i: 5).getImm() ^ 32); |
1506 | } |
1507 | return true; |
1508 | } |
1509 | |
1510 | case SystemZ::ADJDYNALLOC: |
1511 | splitAdjDynAlloc(MI); |
1512 | return true; |
1513 | |
1514 | case TargetOpcode::LOAD_STACK_GUARD: |
1515 | expandLoadStackGuard(MI: &MI); |
1516 | return true; |
1517 | |
1518 | default: |
1519 | return false; |
1520 | } |
1521 | } |
1522 | |
1523 | unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { |
1524 | if (MI.isInlineAsm()) { |
1525 | const MachineFunction *MF = MI.getParent()->getParent(); |
1526 | const char *AsmStr = MI.getOperand(i: 0).getSymbolName(); |
1527 | return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); |
1528 | } |
1529 | else if (MI.getOpcode() == SystemZ::PATCHPOINT) |
1530 | return PatchPointOpers(&MI).getNumPatchBytes(); |
1531 | else if (MI.getOpcode() == SystemZ::STACKMAP) |
1532 | return MI.getOperand(i: 1).getImm(); |
1533 | else if (MI.getOpcode() == SystemZ::FENTRY_CALL) |
1534 | return 6; |
1535 | |
1536 | return MI.getDesc().getSize(); |
1537 | } |
1538 | |
1539 | SystemZII::Branch |
1540 | SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const { |
1541 | switch (MI.getOpcode()) { |
1542 | case SystemZ::BR: |
1543 | case SystemZ::BI: |
1544 | case SystemZ::J: |
1545 | case SystemZ::JG: |
1546 | return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, |
1547 | SystemZ::CCMASK_ANY, &MI.getOperand(i: 0)); |
1548 | |
1549 | case SystemZ::BRC: |
1550 | case SystemZ::BRCL: |
1551 | return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(i: 0).getImm(), |
1552 | MI.getOperand(i: 1).getImm(), &MI.getOperand(i: 2)); |
1553 | |
1554 | case SystemZ::BRCT: |
1555 | case SystemZ::BRCTH: |
1556 | return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP, |
1557 | SystemZ::CCMASK_CMP_NE, &MI.getOperand(i: 2)); |
1558 | |
1559 | case SystemZ::BRCTG: |
1560 | return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP, |
1561 | SystemZ::CCMASK_CMP_NE, &MI.getOperand(i: 2)); |
1562 | |
1563 | case SystemZ::CIJ: |
1564 | case SystemZ::CRJ: |
1565 | return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP, |
1566 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1567 | |
1568 | case SystemZ::CLIJ: |
1569 | case SystemZ::CLRJ: |
1570 | return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP, |
1571 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1572 | |
1573 | case SystemZ::CGIJ: |
1574 | case SystemZ::CGRJ: |
1575 | return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP, |
1576 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1577 | |
1578 | case SystemZ::CLGIJ: |
1579 | case SystemZ::CLGRJ: |
1580 | return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP, |
1581 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1582 | |
1583 | case SystemZ::INLINEASM_BR: |
1584 | // Don't try to analyze asm goto, so pass nullptr as branch target argument. |
1585 | return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr); |
1586 | |
1587 | default: |
1588 | llvm_unreachable("Unrecognized branch opcode" ); |
1589 | } |
1590 | } |
1591 | |
1592 | void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, |
1593 | unsigned &LoadOpcode, |
1594 | unsigned &StoreOpcode) const { |
1595 | if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { |
1596 | LoadOpcode = SystemZ::L; |
1597 | StoreOpcode = SystemZ::ST; |
1598 | } else if (RC == &SystemZ::GRH32BitRegClass) { |
1599 | LoadOpcode = SystemZ::LFH; |
1600 | StoreOpcode = SystemZ::STFH; |
1601 | } else if (RC == &SystemZ::GRX32BitRegClass) { |
1602 | LoadOpcode = SystemZ::LMux; |
1603 | StoreOpcode = SystemZ::STMux; |
1604 | } else if (RC == &SystemZ::GR64BitRegClass || |
1605 | RC == &SystemZ::ADDR64BitRegClass) { |
1606 | LoadOpcode = SystemZ::LG; |
1607 | StoreOpcode = SystemZ::STG; |
1608 | } else if (RC == &SystemZ::GR128BitRegClass || |
1609 | RC == &SystemZ::ADDR128BitRegClass) { |
1610 | LoadOpcode = SystemZ::L128; |
1611 | StoreOpcode = SystemZ::ST128; |
1612 | } else if (RC == &SystemZ::FP32BitRegClass) { |
1613 | LoadOpcode = SystemZ::LE; |
1614 | StoreOpcode = SystemZ::STE; |
1615 | } else if (RC == &SystemZ::FP64BitRegClass) { |
1616 | LoadOpcode = SystemZ::LD; |
1617 | StoreOpcode = SystemZ::STD; |
1618 | } else if (RC == &SystemZ::FP128BitRegClass) { |
1619 | LoadOpcode = SystemZ::LX; |
1620 | StoreOpcode = SystemZ::STX; |
1621 | } else if (RC == &SystemZ::VR32BitRegClass) { |
1622 | LoadOpcode = SystemZ::VL32; |
1623 | StoreOpcode = SystemZ::VST32; |
1624 | } else if (RC == &SystemZ::VR64BitRegClass) { |
1625 | LoadOpcode = SystemZ::VL64; |
1626 | StoreOpcode = SystemZ::VST64; |
1627 | } else if (RC == &SystemZ::VF128BitRegClass || |
1628 | RC == &SystemZ::VR128BitRegClass) { |
1629 | LoadOpcode = SystemZ::VL; |
1630 | StoreOpcode = SystemZ::VST; |
1631 | } else |
1632 | llvm_unreachable("Unsupported regclass to load or store" ); |
1633 | } |
1634 | |
1635 | unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, |
1636 | int64_t Offset, |
1637 | const MachineInstr *MI) const { |
1638 | const MCInstrDesc &MCID = get(Opcode); |
1639 | int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); |
1640 | if (isUInt<12>(x: Offset) && isUInt<12>(x: Offset2)) { |
1641 | // Get the instruction to use for unsigned 12-bit displacements. |
1642 | int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); |
1643 | if (Disp12Opcode >= 0) |
1644 | return Disp12Opcode; |
1645 | |
1646 | // All address-related instructions can use unsigned 12-bit |
1647 | // displacements. |
1648 | return Opcode; |
1649 | } |
1650 | if (isInt<20>(x: Offset) && isInt<20>(x: Offset2)) { |
1651 | // Get the instruction to use for signed 20-bit displacements. |
1652 | int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); |
1653 | if (Disp20Opcode >= 0) |
1654 | return Disp20Opcode; |
1655 | |
1656 | // Check whether Opcode allows signed 20-bit displacements. |
1657 | if (MCID.TSFlags & SystemZII::Has20BitOffset) |
1658 | return Opcode; |
1659 | |
1660 | // If a VR32/VR64 reg ended up in an FP register, use the FP opcode. |
1661 | if (MI && MI->getOperand(i: 0).isReg()) { |
1662 | Register Reg = MI->getOperand(i: 0).getReg(); |
1663 | if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) { |
1664 | switch (Opcode) { |
1665 | case SystemZ::VL32: |
1666 | return SystemZ::LEY; |
1667 | case SystemZ::VST32: |
1668 | return SystemZ::STEY; |
1669 | case SystemZ::VL64: |
1670 | return SystemZ::LDY; |
1671 | case SystemZ::VST64: |
1672 | return SystemZ::STDY; |
1673 | default: break; |
1674 | } |
1675 | } |
1676 | } |
1677 | } |
1678 | return 0; |
1679 | } |
1680 | |
1681 | bool SystemZInstrInfo::hasDisplacementPairInsn(unsigned Opcode) const { |
1682 | const MCInstrDesc &MCID = get(Opcode); |
1683 | if (MCID.TSFlags & SystemZII::Has20BitOffset) |
1684 | return SystemZ::getDisp12Opcode(Opcode) >= 0; |
1685 | return SystemZ::getDisp20Opcode(Opcode) >= 0; |
1686 | } |
1687 | |
1688 | unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const { |
1689 | switch (Opcode) { |
1690 | case SystemZ::L: return SystemZ::LT; |
1691 | case SystemZ::LY: return SystemZ::LT; |
1692 | case SystemZ::LG: return SystemZ::LTG; |
1693 | case SystemZ::LGF: return SystemZ::LTGF; |
1694 | case SystemZ::LR: return SystemZ::LTR; |
1695 | case SystemZ::LGFR: return SystemZ::LTGFR; |
1696 | case SystemZ::LGR: return SystemZ::LTGR; |
1697 | case SystemZ::LCDFR: return SystemZ::LCDBR; |
1698 | case SystemZ::LPDFR: return SystemZ::LPDBR; |
1699 | case SystemZ::LNDFR: return SystemZ::LNDBR; |
1700 | case SystemZ::LCDFR_32: return SystemZ::LCEBR; |
1701 | case SystemZ::LPDFR_32: return SystemZ::LPEBR; |
1702 | case SystemZ::LNDFR_32: return SystemZ::LNEBR; |
1703 | // On zEC12 we prefer to use RISBGN. But if there is a chance to |
1704 | // actually use the condition code, we may turn it back into RISGB. |
1705 | // Note that RISBG is not really a "load-and-test" instruction, |
1706 | // but sets the same condition code values, so is OK to use here. |
1707 | case SystemZ::RISBGN: return SystemZ::RISBG; |
1708 | default: return 0; |
1709 | } |
1710 | } |
1711 | |
1712 | bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize, |
1713 | unsigned &Start, unsigned &End) const { |
1714 | // Reject trivial all-zero masks. |
1715 | Mask &= allOnes(Count: BitSize); |
1716 | if (Mask == 0) |
1717 | return false; |
1718 | |
1719 | // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of |
1720 | // the msb and End specifies the index of the lsb. |
1721 | unsigned LSB, Length; |
1722 | if (isShiftedMask_64(Value: Mask, MaskIdx&: LSB, MaskLen&: Length)) { |
1723 | Start = 63 - (LSB + Length - 1); |
1724 | End = 63 - LSB; |
1725 | return true; |
1726 | } |
1727 | |
1728 | // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb |
1729 | // of the low 1s and End specifies the lsb of the high 1s. |
1730 | if (isShiftedMask_64(Value: Mask ^ allOnes(Count: BitSize), MaskIdx&: LSB, MaskLen&: Length)) { |
1731 | assert(LSB > 0 && "Bottom bit must be set" ); |
1732 | assert(LSB + Length < BitSize && "Top bit must be set" ); |
1733 | Start = 63 - (LSB - 1); |
1734 | End = 63 - (LSB + Length); |
1735 | return true; |
1736 | } |
1737 | |
1738 | return false; |
1739 | } |
1740 | |
1741 | unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode, |
1742 | SystemZII::FusedCompareType Type, |
1743 | const MachineInstr *MI) const { |
1744 | switch (Opcode) { |
1745 | case SystemZ::CHI: |
1746 | case SystemZ::CGHI: |
1747 | if (!(MI && isInt<8>(x: MI->getOperand(i: 1).getImm()))) |
1748 | return 0; |
1749 | break; |
1750 | case SystemZ::CLFI: |
1751 | case SystemZ::CLGFI: |
1752 | if (!(MI && isUInt<8>(x: MI->getOperand(i: 1).getImm()))) |
1753 | return 0; |
1754 | break; |
1755 | case SystemZ::CL: |
1756 | case SystemZ::CLG: |
1757 | if (!STI.hasMiscellaneousExtensions()) |
1758 | return 0; |
1759 | if (!(MI && MI->getOperand(i: 3).getReg() == 0)) |
1760 | return 0; |
1761 | break; |
1762 | } |
1763 | switch (Type) { |
1764 | case SystemZII::CompareAndBranch: |
1765 | switch (Opcode) { |
1766 | case SystemZ::CR: |
1767 | return SystemZ::CRJ; |
1768 | case SystemZ::CGR: |
1769 | return SystemZ::CGRJ; |
1770 | case SystemZ::CHI: |
1771 | return SystemZ::CIJ; |
1772 | case SystemZ::CGHI: |
1773 | return SystemZ::CGIJ; |
1774 | case SystemZ::CLR: |
1775 | return SystemZ::CLRJ; |
1776 | case SystemZ::CLGR: |
1777 | return SystemZ::CLGRJ; |
1778 | case SystemZ::CLFI: |
1779 | return SystemZ::CLIJ; |
1780 | case SystemZ::CLGFI: |
1781 | return SystemZ::CLGIJ; |
1782 | default: |
1783 | return 0; |
1784 | } |
1785 | case SystemZII::CompareAndReturn: |
1786 | switch (Opcode) { |
1787 | case SystemZ::CR: |
1788 | return SystemZ::CRBReturn; |
1789 | case SystemZ::CGR: |
1790 | return SystemZ::CGRBReturn; |
1791 | case SystemZ::CHI: |
1792 | return SystemZ::CIBReturn; |
1793 | case SystemZ::CGHI: |
1794 | return SystemZ::CGIBReturn; |
1795 | case SystemZ::CLR: |
1796 | return SystemZ::CLRBReturn; |
1797 | case SystemZ::CLGR: |
1798 | return SystemZ::CLGRBReturn; |
1799 | case SystemZ::CLFI: |
1800 | return SystemZ::CLIBReturn; |
1801 | case SystemZ::CLGFI: |
1802 | return SystemZ::CLGIBReturn; |
1803 | default: |
1804 | return 0; |
1805 | } |
1806 | case SystemZII::CompareAndSibcall: |
1807 | switch (Opcode) { |
1808 | case SystemZ::CR: |
1809 | return SystemZ::CRBCall; |
1810 | case SystemZ::CGR: |
1811 | return SystemZ::CGRBCall; |
1812 | case SystemZ::CHI: |
1813 | return SystemZ::CIBCall; |
1814 | case SystemZ::CGHI: |
1815 | return SystemZ::CGIBCall; |
1816 | case SystemZ::CLR: |
1817 | return SystemZ::CLRBCall; |
1818 | case SystemZ::CLGR: |
1819 | return SystemZ::CLGRBCall; |
1820 | case SystemZ::CLFI: |
1821 | return SystemZ::CLIBCall; |
1822 | case SystemZ::CLGFI: |
1823 | return SystemZ::CLGIBCall; |
1824 | default: |
1825 | return 0; |
1826 | } |
1827 | case SystemZII::CompareAndTrap: |
1828 | switch (Opcode) { |
1829 | case SystemZ::CR: |
1830 | return SystemZ::CRT; |
1831 | case SystemZ::CGR: |
1832 | return SystemZ::CGRT; |
1833 | case SystemZ::CHI: |
1834 | return SystemZ::CIT; |
1835 | case SystemZ::CGHI: |
1836 | return SystemZ::CGIT; |
1837 | case SystemZ::CLR: |
1838 | return SystemZ::CLRT; |
1839 | case SystemZ::CLGR: |
1840 | return SystemZ::CLGRT; |
1841 | case SystemZ::CLFI: |
1842 | return SystemZ::CLFIT; |
1843 | case SystemZ::CLGFI: |
1844 | return SystemZ::CLGIT; |
1845 | case SystemZ::CL: |
1846 | return SystemZ::CLT; |
1847 | case SystemZ::CLG: |
1848 | return SystemZ::CLGT; |
1849 | default: |
1850 | return 0; |
1851 | } |
1852 | } |
1853 | return 0; |
1854 | } |
1855 | |
1856 | bool SystemZInstrInfo:: |
1857 | prepareCompareSwapOperands(MachineBasicBlock::iterator const MBBI) const { |
1858 | assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() && |
1859 | MBBI->getOperand(1).isReg() && !MBBI->mayLoad() && |
1860 | "Not a compare reg/reg." ); |
1861 | |
1862 | MachineBasicBlock *MBB = MBBI->getParent(); |
1863 | bool CCLive = true; |
1864 | SmallVector<MachineInstr *, 4> CCUsers; |
1865 | for (MachineInstr &MI : llvm::make_range(x: std::next(x: MBBI), y: MBB->end())) { |
1866 | if (MI.readsRegister(SystemZ::CC, /*TRI=*/nullptr)) { |
1867 | unsigned Flags = MI.getDesc().TSFlags; |
1868 | if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast)) |
1869 | CCUsers.push_back(Elt: &MI); |
1870 | else |
1871 | return false; |
1872 | } |
1873 | if (MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr)) { |
1874 | CCLive = false; |
1875 | break; |
1876 | } |
1877 | } |
1878 | if (CCLive) { |
1879 | LiveRegUnits LiveRegs(*MBB->getParent()->getSubtarget().getRegisterInfo()); |
1880 | LiveRegs.addLiveOuts(MBB: *MBB); |
1881 | if (!LiveRegs.available(SystemZ::CC)) |
1882 | return false; |
1883 | } |
1884 | |
1885 | // Update all CC users. |
1886 | for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) { |
1887 | unsigned Flags = CCUsers[Idx]->getDesc().TSFlags; |
1888 | unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ? |
1889 | 0 : CCUsers[Idx]->getNumExplicitOperands() - 2); |
1890 | MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(i: FirstOpNum + 1); |
1891 | unsigned NewCCMask = SystemZ::reverseCCMask(CCMask: CCMaskMO.getImm()); |
1892 | CCMaskMO.setImm(NewCCMask); |
1893 | } |
1894 | |
1895 | return true; |
1896 | } |
1897 | |
1898 | unsigned SystemZ::reverseCCMask(unsigned CCMask) { |
1899 | return ((CCMask & SystemZ::CCMASK_CMP_EQ) | |
1900 | (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | |
1901 | (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | |
1902 | (CCMask & SystemZ::CCMASK_CMP_UO)); |
1903 | } |
1904 | |
1905 | MachineBasicBlock *SystemZ::emitBlockAfter(MachineBasicBlock *MBB) { |
1906 | MachineFunction &MF = *MBB->getParent(); |
1907 | MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(BB: MBB->getBasicBlock()); |
1908 | MF.insert(MBBI: std::next(x: MachineFunction::iterator(MBB)), MBB: NewMBB); |
1909 | return NewMBB; |
1910 | } |
1911 | |
1912 | MachineBasicBlock *SystemZ::splitBlockAfter(MachineBasicBlock::iterator MI, |
1913 | MachineBasicBlock *MBB) { |
1914 | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); |
1915 | NewMBB->splice(Where: NewMBB->begin(), Other: MBB, |
1916 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: MBB->end()); |
1917 | NewMBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
1918 | return NewMBB; |
1919 | } |
1920 | |
1921 | MachineBasicBlock *SystemZ::splitBlockBefore(MachineBasicBlock::iterator MI, |
1922 | MachineBasicBlock *MBB) { |
1923 | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); |
1924 | NewMBB->splice(Where: NewMBB->begin(), Other: MBB, From: MI, To: MBB->end()); |
1925 | NewMBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
1926 | return NewMBB; |
1927 | } |
1928 | |
1929 | unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const { |
1930 | if (!STI.hasLoadAndTrap()) |
1931 | return 0; |
1932 | switch (Opcode) { |
1933 | case SystemZ::L: |
1934 | case SystemZ::LY: |
1935 | return SystemZ::LAT; |
1936 | case SystemZ::LG: |
1937 | return SystemZ::LGAT; |
1938 | case SystemZ::LFH: |
1939 | return SystemZ::LFHAT; |
1940 | case SystemZ::LLGF: |
1941 | return SystemZ::LLGFAT; |
1942 | case SystemZ::LLGT: |
1943 | return SystemZ::LLGTAT; |
1944 | } |
1945 | return 0; |
1946 | } |
1947 | |
1948 | void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, |
1949 | MachineBasicBlock::iterator MBBI, |
1950 | unsigned Reg, uint64_t Value) const { |
1951 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
1952 | unsigned Opcode = 0; |
1953 | if (isInt<16>(x: Value)) |
1954 | Opcode = SystemZ::LGHI; |
1955 | else if (SystemZ::isImmLL(Val: Value)) |
1956 | Opcode = SystemZ::LLILL; |
1957 | else if (SystemZ::isImmLH(Val: Value)) { |
1958 | Opcode = SystemZ::LLILH; |
1959 | Value >>= 16; |
1960 | } |
1961 | else if (isInt<32>(Value)) |
1962 | Opcode = SystemZ::LGFI; |
1963 | if (Opcode) { |
1964 | BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value); |
1965 | return; |
1966 | } |
1967 | |
1968 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
1969 | assert (MRI.isSSA() && "Huge values only handled before reg-alloc ." ); |
1970 | Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); |
1971 | Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); |
1972 | BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0); |
1973 | BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1) |
1974 | .addReg(Reg0).addImm(Value >> 32); |
1975 | BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg) |
1976 | .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1)); |
1977 | } |
1978 | |
1979 | bool SystemZInstrInfo::verifyInstruction(const MachineInstr &MI, |
1980 | StringRef &ErrInfo) const { |
1981 | const MCInstrDesc &MCID = MI.getDesc(); |
1982 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { |
1983 | if (I >= MCID.getNumOperands()) |
1984 | break; |
1985 | const MachineOperand &Op = MI.getOperand(i: I); |
1986 | const MCOperandInfo &MCOI = MCID.operands()[I]; |
1987 | // Addressing modes have register and immediate operands. Op should be a |
1988 | // register (or frame index) operand if MCOI.RegClass contains a valid |
1989 | // register class, or an immediate otherwise. |
1990 | if (MCOI.OperandType == MCOI::OPERAND_MEMORY && |
1991 | ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) || |
1992 | (MCOI.RegClass == -1 && !Op.isImm()))) { |
1993 | ErrInfo = "Addressing mode operands corrupt!" ; |
1994 | return false; |
1995 | } |
1996 | } |
1997 | |
1998 | return true; |
1999 | } |
2000 | |
2001 | bool SystemZInstrInfo:: |
2002 | areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, |
2003 | const MachineInstr &MIb) const { |
2004 | |
2005 | if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) |
2006 | return false; |
2007 | |
2008 | // If mem-operands show that the same address Value is used by both |
2009 | // instructions, check for non-overlapping offsets and widths. Not |
2010 | // sure if a register based analysis would be an improvement... |
2011 | |
2012 | MachineMemOperand *MMOa = *MIa.memoperands_begin(); |
2013 | MachineMemOperand *MMOb = *MIb.memoperands_begin(); |
2014 | const Value *VALa = MMOa->getValue(); |
2015 | const Value *VALb = MMOb->getValue(); |
2016 | bool SameVal = (VALa && VALb && (VALa == VALb)); |
2017 | if (!SameVal) { |
2018 | const PseudoSourceValue *PSVa = MMOa->getPseudoValue(); |
2019 | const PseudoSourceValue *PSVb = MMOb->getPseudoValue(); |
2020 | if (PSVa && PSVb && (PSVa == PSVb)) |
2021 | SameVal = true; |
2022 | } |
2023 | if (SameVal) { |
2024 | int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset(); |
2025 | LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize(); |
2026 | int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; |
2027 | int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; |
2028 | LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; |
2029 | if (LowWidth.hasValue() && |
2030 | LowOffset + (int)LowWidth.getValue() <= HighOffset) |
2031 | return true; |
2032 | } |
2033 | |
2034 | return false; |
2035 | } |
2036 | |