1//===-- SystemZFrameLowering.cpp - Frame lowering for SystemZ -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "SystemZFrameLowering.h"
10#include "SystemZCallingConv.h"
11#include "SystemZInstrBuilder.h"
12#include "SystemZInstrInfo.h"
13#include "SystemZMachineFunctionInfo.h"
14#include "SystemZRegisterInfo.h"
15#include "SystemZSubtarget.h"
16#include "llvm/CodeGen/LivePhysRegs.h"
17#include "llvm/CodeGen/MachineModuleInfo.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19#include "llvm/CodeGen/RegisterScavenging.h"
20#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
21#include "llvm/IR/Function.h"
22#include "llvm/Target/TargetMachine.h"
23
24using namespace llvm;
25
26namespace {
27// The ABI-defined register save slots, relative to the CFA (i.e.
28// incoming stack pointer + SystemZMC::ELFCallFrameSize).
29static const TargetFrameLowering::SpillSlot ELFSpillOffsetTable[] = {
30 { SystemZ::R2D, 0x10 },
31 { SystemZ::R3D, 0x18 },
32 { SystemZ::R4D, 0x20 },
33 { SystemZ::R5D, 0x28 },
34 { SystemZ::R6D, 0x30 },
35 { SystemZ::R7D, 0x38 },
36 { SystemZ::R8D, 0x40 },
37 { SystemZ::R9D, 0x48 },
38 { SystemZ::R10D, 0x50 },
39 { SystemZ::R11D, 0x58 },
40 { SystemZ::R12D, 0x60 },
41 { SystemZ::R13D, 0x68 },
42 { SystemZ::R14D, 0x70 },
43 { SystemZ::R15D, 0x78 },
44 { SystemZ::F0D, 0x80 },
45 { SystemZ::F2D, 0x88 },
46 { SystemZ::F4D, 0x90 },
47 { SystemZ::F6D, 0x98 }
48};
49
50static const TargetFrameLowering::SpillSlot XPLINKSpillOffsetTable[] = {
51 {SystemZ::R4D, 0x00}, {SystemZ::R5D, 0x08}, {SystemZ::R6D, 0x10},
52 {SystemZ::R7D, 0x18}, {SystemZ::R8D, 0x20}, {SystemZ::R9D, 0x28},
53 {SystemZ::R10D, 0x30}, {SystemZ::R11D, 0x38}, {SystemZ::R12D, 0x40},
54 {SystemZ::R13D, 0x48}, {SystemZ::R14D, 0x50}, {SystemZ::R15D, 0x58}};
55} // end anonymous namespace
56
57SystemZFrameLowering::SystemZFrameLowering(StackDirection D, Align StackAl,
58 int LAO, Align TransAl,
59 bool StackReal, unsigned PointerSize)
60 : TargetFrameLowering(D, StackAl, LAO, TransAl, StackReal),
61 PointerSize(PointerSize) {}
62
63std::unique_ptr<SystemZFrameLowering>
64SystemZFrameLowering::create(const SystemZSubtarget &STI) {
65 unsigned PtrSz =
66 STI.getTargetLowering()->getTargetMachine().getPointerSize(AS: 0);
67 if (STI.isTargetXPLINK64())
68 return std::make_unique<SystemZXPLINKFrameLowering>(args&: PtrSz);
69 return std::make_unique<SystemZELFFrameLowering>(args&: PtrSz);
70}
71
72namespace {
73struct SZFrameSortingObj {
74 bool IsValid = false; // True if we care about this Object.
75 uint32_t ObjectIndex = 0; // Index of Object into MFI list.
76 uint64_t ObjectSize = 0; // Size of Object in bytes.
77 uint32_t D12Count = 0; // 12-bit displacement only.
78 uint32_t DPairCount = 0; // 12 or 20 bit displacement.
79};
80typedef std::vector<SZFrameSortingObj> SZFrameObjVec;
81} // namespace
82
83// TODO: Move to base class.
84void SystemZELFFrameLowering::orderFrameObjects(
85 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
86 const MachineFrameInfo &MFI = MF.getFrameInfo();
87 auto *TII = MF.getSubtarget<SystemZSubtarget>().getInstrInfo();
88
89 // Make a vector of sorting objects to track all MFI objects and mark those
90 // to be sorted as valid.
91 if (ObjectsToAllocate.size() <= 1)
92 return;
93 SZFrameObjVec SortingObjects(MFI.getObjectIndexEnd());
94 for (auto &Obj : ObjectsToAllocate) {
95 SortingObjects[Obj].IsValid = true;
96 SortingObjects[Obj].ObjectIndex = Obj;
97 SortingObjects[Obj].ObjectSize = MFI.getObjectSize(ObjectIdx: Obj);
98 }
99
100 // Examine uses for each object and record short (12-bit) and "pair"
101 // displacement types.
102 for (auto &MBB : MF)
103 for (auto &MI : MBB) {
104 if (MI.isDebugInstr())
105 continue;
106 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
107 const MachineOperand &MO = MI.getOperand(i: I);
108 if (!MO.isFI())
109 continue;
110 int Index = MO.getIndex();
111 if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
112 SortingObjects[Index].IsValid) {
113 if (TII->hasDisplacementPairInsn(Opcode: MI.getOpcode()))
114 SortingObjects[Index].DPairCount++;
115 else if (!(MI.getDesc().TSFlags & SystemZII::Has20BitOffset))
116 SortingObjects[Index].D12Count++;
117 }
118 }
119 }
120
121 // Sort all objects for short/paired displacements, which should be
122 // sufficient as it seems like all frame objects typically are within the
123 // long displacement range. Sorting works by computing the "density" as
124 // Count / ObjectSize. The comparisons of two such fractions are refactored
125 // by multiplying both sides with A.ObjectSize * B.ObjectSize, in order to
126 // eliminate the (fp) divisions. A higher density object needs to go after
127 // in the list in order for it to end up lower on the stack.
128 auto CmpD12 = [](const SZFrameSortingObj &A, const SZFrameSortingObj &B) {
129 // Put all invalid and variable sized objects at the end.
130 if (!A.IsValid || !B.IsValid)
131 return A.IsValid;
132 if (!A.ObjectSize || !B.ObjectSize)
133 return A.ObjectSize > 0;
134 uint64_t ADensityCmp = A.D12Count * B.ObjectSize;
135 uint64_t BDensityCmp = B.D12Count * A.ObjectSize;
136 if (ADensityCmp != BDensityCmp)
137 return ADensityCmp < BDensityCmp;
138 return A.DPairCount * B.ObjectSize < B.DPairCount * A.ObjectSize;
139 };
140 std::stable_sort(first: SortingObjects.begin(), last: SortingObjects.end(), comp: CmpD12);
141
142 // Now modify the original list to represent the final order that
143 // we want.
144 unsigned Idx = 0;
145 for (auto &Obj : SortingObjects) {
146 // All invalid items are sorted at the end, so it's safe to stop.
147 if (!Obj.IsValid)
148 break;
149 ObjectsToAllocate[Idx++] = Obj.ObjectIndex;
150 }
151}
152
153bool SystemZFrameLowering::hasReservedCallFrame(
154 const MachineFunction &MF) const {
155 // The ELF ABI requires us to allocate 160 bytes of stack space for the
156 // callee, with any outgoing stack arguments being placed above that. It
157 // seems better to make that area a permanent feature of the frame even if
158 // we're using a frame pointer. Similarly, 64-bit XPLINK requires 96 bytes
159 // of stack space for the register save area.
160 return true;
161}
162
163bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots(
164 MachineFunction &MF, const TargetRegisterInfo *TRI,
165 std::vector<CalleeSavedInfo> &CSI) const {
166 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
167 MachineFrameInfo &MFFrame = MF.getFrameInfo();
168 bool IsVarArg = MF.getFunction().isVarArg();
169 if (CSI.empty())
170 return true; // Early exit if no callee saved registers are modified!
171
172 unsigned LowGPR = 0;
173 unsigned HighGPR = SystemZ::R15D;
174 int StartSPOffset = SystemZMC::ELFCallFrameSize;
175 for (auto &CS : CSI) {
176 Register Reg = CS.getReg();
177 int Offset = getRegSpillOffset(MF, Reg);
178 if (Offset) {
179 if (SystemZ::GR64BitRegClass.contains(Reg) && StartSPOffset > Offset) {
180 LowGPR = Reg;
181 StartSPOffset = Offset;
182 }
183 Offset -= SystemZMC::ELFCallFrameSize;
184 int FrameIdx =
185 MFFrame.CreateFixedSpillStackObject(Size: getPointerSize(), SPOffset: Offset);
186 CS.setFrameIdx(FrameIdx);
187 } else
188 CS.setFrameIdx(INT32_MAX);
189 }
190
191 // Save the range of call-saved registers, for use by the
192 // prologue/epilogue inserters.
193 ZFI->setRestoreGPRRegs(Low: LowGPR, High: HighGPR, Offs: StartSPOffset);
194 if (IsVarArg) {
195 // Also save the GPR varargs, if any. R6D is call-saved, so would
196 // already be included, but we also need to handle the call-clobbered
197 // argument registers.
198 Register FirstGPR = ZFI->getVarArgsFirstGPR();
199 if (FirstGPR < SystemZ::ELFNumArgGPRs) {
200 unsigned Reg = SystemZ::ELFArgGPRs[FirstGPR];
201 int Offset = getRegSpillOffset(MF, Reg);
202 if (StartSPOffset > Offset) {
203 LowGPR = Reg; StartSPOffset = Offset;
204 }
205 }
206 }
207 ZFI->setSpillGPRRegs(Low: LowGPR, High: HighGPR, Offs: StartSPOffset);
208
209 // Create fixed stack objects for the remaining registers.
210 int CurrOffset = -SystemZMC::ELFCallFrameSize;
211 if (usePackedStack(MF))
212 CurrOffset += StartSPOffset;
213
214 for (auto &CS : CSI) {
215 if (CS.getFrameIdx() != INT32_MAX)
216 continue;
217 Register Reg = CS.getReg();
218 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
219 unsigned Size = TRI->getSpillSize(RC: *RC);
220 CurrOffset -= Size;
221 assert(CurrOffset % 8 == 0 &&
222 "8-byte alignment required for for all register save slots");
223 int FrameIdx = MFFrame.CreateFixedSpillStackObject(Size, SPOffset: CurrOffset);
224 CS.setFrameIdx(FrameIdx);
225 }
226
227 return true;
228}
229
230void SystemZELFFrameLowering::determineCalleeSaves(MachineFunction &MF,
231 BitVector &SavedRegs,
232 RegScavenger *RS) const {
233 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
234
235 MachineFrameInfo &MFFrame = MF.getFrameInfo();
236 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
237 bool HasFP = hasFP(MF);
238 SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
239 bool IsVarArg = MF.getFunction().isVarArg();
240
241 // va_start stores incoming FPR varargs in the normal way, but delegates
242 // the saving of incoming GPR varargs to spillCalleeSavedRegisters().
243 // Record these pending uses, which typically include the call-saved
244 // argument register R6D.
245 if (IsVarArg)
246 for (unsigned I = MFI->getVarArgsFirstGPR(); I < SystemZ::ELFNumArgGPRs; ++I)
247 SavedRegs.set(SystemZ::ELFArgGPRs[I]);
248
249 // If there are any landing pads, entering them will modify r6/r7.
250 if (!MF.getLandingPads().empty()) {
251 SavedRegs.set(SystemZ::R6D);
252 SavedRegs.set(SystemZ::R7D);
253 }
254
255 // If the function requires a frame pointer, record that the hard
256 // frame pointer will be clobbered.
257 if (HasFP)
258 SavedRegs.set(SystemZ::R11D);
259
260 // If the function calls other functions, record that the return
261 // address register will be clobbered.
262 if (MFFrame.hasCalls())
263 SavedRegs.set(SystemZ::R14D);
264
265 // If we are saving GPRs other than the stack pointer, we might as well
266 // save and restore the stack pointer at the same time, via STMG and LMG.
267 // This allows the deallocation to be done by the LMG, rather than needing
268 // a separate %r15 addition.
269 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(MF: &MF);
270 for (unsigned I = 0; CSRegs[I]; ++I) {
271 unsigned Reg = CSRegs[I];
272 if (SystemZ::GR64BitRegClass.contains(Reg) && SavedRegs.test(Reg)) {
273 SavedRegs.set(SystemZ::R15D);
274 break;
275 }
276 }
277}
278
279SystemZELFFrameLowering::SystemZELFFrameLowering(unsigned PointerSize)
280 : SystemZFrameLowering(TargetFrameLowering::StackGrowsDown, Align(8), 0,
281 Align(8), /* StackRealignable */ false, PointerSize),
282 RegSpillOffsets(0) {
283
284 // Due to the SystemZ ABI, the DWARF CFA (Canonical Frame Address) is not
285 // equal to the incoming stack pointer, but to incoming stack pointer plus
286 // 160. Instead of using a Local Area Offset, the Register save area will
287 // be occupied by fixed frame objects, and all offsets are actually
288 // relative to CFA.
289
290 // Create a mapping from register number to save slot offset.
291 // These offsets are relative to the start of the register save area.
292 RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
293 for (const auto &Entry : ELFSpillOffsetTable)
294 RegSpillOffsets[Entry.Reg] = Entry.Offset;
295}
296
297// Add GPR64 to the save instruction being built by MIB, which is in basic
298// block MBB. IsImplicit says whether this is an explicit operand to the
299// instruction, or an implicit one that comes between the explicit start
300// and end registers.
301static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB,
302 unsigned GPR64, bool IsImplicit) {
303 const TargetRegisterInfo *RI =
304 MBB.getParent()->getSubtarget().getRegisterInfo();
305 Register GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32);
306 bool IsLive = MBB.isLiveIn(Reg: GPR64) || MBB.isLiveIn(Reg: GPR32);
307 if (!IsLive || !IsImplicit) {
308 MIB.addReg(RegNo: GPR64, flags: getImplRegState(B: IsImplicit) | getKillRegState(B: !IsLive));
309 if (!IsLive)
310 MBB.addLiveIn(PhysReg: GPR64);
311 }
312}
313
314bool SystemZELFFrameLowering::spillCalleeSavedRegisters(
315 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
316 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
317 if (CSI.empty())
318 return false;
319
320 MachineFunction &MF = *MBB.getParent();
321 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
322 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
323 bool IsVarArg = MF.getFunction().isVarArg();
324 DebugLoc DL;
325
326 // Save GPRs
327 SystemZ::GPRRegs SpillGPRs = ZFI->getSpillGPRRegs();
328 if (SpillGPRs.LowGPR) {
329 assert(SpillGPRs.LowGPR != SpillGPRs.HighGPR &&
330 "Should be saving %r15 and something else");
331
332 // Build an STMG instruction.
333 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG));
334
335 // Add the explicit register operands.
336 addSavedGPR(MBB, MIB, GPR64: SpillGPRs.LowGPR, IsImplicit: false);
337 addSavedGPR(MBB, MIB, GPR64: SpillGPRs.HighGPR, IsImplicit: false);
338
339 // Add the address.
340 MIB.addReg(SystemZ::R15D).addImm(SpillGPRs.GPROffset);
341
342 // Make sure all call-saved GPRs are included as operands and are
343 // marked as live on entry.
344 for (const CalleeSavedInfo &I : CSI) {
345 Register Reg = I.getReg();
346 if (SystemZ::GR64BitRegClass.contains(Reg))
347 addSavedGPR(MBB, MIB, GPR64: Reg, IsImplicit: true);
348 }
349
350 // ...likewise GPR varargs.
351 if (IsVarArg)
352 for (unsigned I = ZFI->getVarArgsFirstGPR(); I < SystemZ::ELFNumArgGPRs; ++I)
353 addSavedGPR(MBB, MIB, GPR64: SystemZ::ELFArgGPRs[I], IsImplicit: true);
354 }
355
356 // Save FPRs/VRs in the normal TargetInstrInfo way.
357 for (const CalleeSavedInfo &I : CSI) {
358 Register Reg = I.getReg();
359 if (SystemZ::FP64BitRegClass.contains(Reg)) {
360 MBB.addLiveIn(PhysReg: Reg);
361 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
362 &SystemZ::FP64BitRegClass, TRI, Register());
363 }
364 if (SystemZ::VR128BitRegClass.contains(Reg)) {
365 MBB.addLiveIn(PhysReg: Reg);
366 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
367 &SystemZ::VR128BitRegClass, TRI, Register());
368 }
369 }
370
371 return true;
372}
373
374bool SystemZELFFrameLowering::restoreCalleeSavedRegisters(
375 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
376 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
377 if (CSI.empty())
378 return false;
379
380 MachineFunction &MF = *MBB.getParent();
381 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
382 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
383 bool HasFP = hasFP(MF);
384 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
385
386 // Restore FPRs/VRs in the normal TargetInstrInfo way.
387 for (const CalleeSavedInfo &I : CSI) {
388 Register Reg = I.getReg();
389 if (SystemZ::FP64BitRegClass.contains(Reg))
390 TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
391 &SystemZ::FP64BitRegClass, TRI, Register());
392 if (SystemZ::VR128BitRegClass.contains(Reg))
393 TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
394 &SystemZ::VR128BitRegClass, TRI, Register());
395 }
396
397 // Restore call-saved GPRs (but not call-clobbered varargs, which at
398 // this point might hold return values).
399 SystemZ::GPRRegs RestoreGPRs = ZFI->getRestoreGPRRegs();
400 if (RestoreGPRs.LowGPR) {
401 // If we saved any of %r2-%r5 as varargs, we should also be saving
402 // and restoring %r6. If we're saving %r6 or above, we should be
403 // restoring it too.
404 assert(RestoreGPRs.LowGPR != RestoreGPRs.HighGPR &&
405 "Should be loading %r15 and something else");
406
407 // Build an LMG instruction.
408 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::LMG));
409
410 // Add the explicit register operands.
411 MIB.addReg(RegNo: RestoreGPRs.LowGPR, flags: RegState::Define);
412 MIB.addReg(RegNo: RestoreGPRs.HighGPR, flags: RegState::Define);
413
414 // Add the address.
415 MIB.addReg(HasFP ? SystemZ::R11D : SystemZ::R15D);
416 MIB.addImm(Val: RestoreGPRs.GPROffset);
417
418 // Do a second scan adding regs as being defined by instruction
419 for (const CalleeSavedInfo &I : CSI) {
420 Register Reg = I.getReg();
421 if (Reg != RestoreGPRs.LowGPR && Reg != RestoreGPRs.HighGPR &&
422 SystemZ::GR64BitRegClass.contains(Reg))
423 MIB.addReg(RegNo: Reg, flags: RegState::ImplicitDefine);
424 }
425 }
426
427 return true;
428}
429
430void SystemZELFFrameLowering::processFunctionBeforeFrameFinalized(
431 MachineFunction &MF, RegScavenger *RS) const {
432 MachineFrameInfo &MFFrame = MF.getFrameInfo();
433 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
434 MachineRegisterInfo *MRI = &MF.getRegInfo();
435 bool BackChain = MF.getSubtarget<SystemZSubtarget>().hasBackChain();
436
437 if (!usePackedStack(MF) || BackChain)
438 // Create the incoming register save area.
439 getOrCreateFramePointerSaveIndex(MF);
440
441 // Get the size of our stack frame to be allocated ...
442 uint64_t StackSize = (MFFrame.estimateStackSize(MF) +
443 SystemZMC::ELFCallFrameSize);
444 // ... and the maximum offset we may need to reach into the
445 // caller's frame to access the save area or stack arguments.
446 int64_t MaxArgOffset = 0;
447 for (int I = MFFrame.getObjectIndexBegin(); I != 0; ++I)
448 if (MFFrame.getObjectOffset(ObjectIdx: I) >= 0) {
449 int64_t ArgOffset = MFFrame.getObjectOffset(ObjectIdx: I) +
450 MFFrame.getObjectSize(ObjectIdx: I);
451 MaxArgOffset = std::max(a: MaxArgOffset, b: ArgOffset);
452 }
453
454 uint64_t MaxReach = StackSize + MaxArgOffset;
455 if (!isUInt<12>(x: MaxReach)) {
456 // We may need register scavenging slots if some parts of the frame
457 // are outside the reach of an unsigned 12-bit displacement.
458 // Create 2 for the case where both addresses in an MVC are
459 // out of range.
460 RS->addScavengingFrameIndex(
461 FI: MFFrame.CreateStackObject(Size: getPointerSize(), Alignment: Align(8), isSpillSlot: false));
462 RS->addScavengingFrameIndex(
463 FI: MFFrame.CreateStackObject(Size: getPointerSize(), Alignment: Align(8), isSpillSlot: false));
464 }
465
466 // If R6 is used as an argument register it is still callee saved. If it in
467 // this case is not clobbered (and restored) it should never be marked as
468 // killed.
469 if (MF.front().isLiveIn(SystemZ::R6D) &&
470 ZFI->getRestoreGPRRegs().LowGPR != SystemZ::R6D)
471 for (auto &MO : MRI->use_nodbg_operands(SystemZ::R6D))
472 MO.setIsKill(false);
473}
474
475// Emit instructions before MBBI (in MBB) to add NumBytes to Reg.
476static void emitIncrement(MachineBasicBlock &MBB,
477 MachineBasicBlock::iterator &MBBI, const DebugLoc &DL,
478 Register Reg, int64_t NumBytes,
479 const TargetInstrInfo *TII) {
480 while (NumBytes) {
481 unsigned Opcode;
482 int64_t ThisVal = NumBytes;
483 if (isInt<16>(x: NumBytes))
484 Opcode = SystemZ::AGHI;
485 else {
486 Opcode = SystemZ::AGFI;
487 // Make sure we maintain 8-byte stack alignment.
488 int64_t MinVal = -uint64_t(1) << 31;
489 int64_t MaxVal = (int64_t(1) << 31) - 8;
490 if (ThisVal < MinVal)
491 ThisVal = MinVal;
492 else if (ThisVal > MaxVal)
493 ThisVal = MaxVal;
494 }
495 MachineInstr *MI = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode), DestReg: Reg)
496 .addReg(RegNo: Reg).addImm(Val: ThisVal);
497 // The CC implicit def is dead.
498 MI->getOperand(i: 3).setIsDead();
499 NumBytes -= ThisVal;
500 }
501}
502
503// Add CFI for the new CFA offset.
504static void buildCFAOffs(MachineBasicBlock &MBB,
505 MachineBasicBlock::iterator MBBI,
506 const DebugLoc &DL, int Offset,
507 const SystemZInstrInfo *ZII) {
508 unsigned CFIIndex = MBB.getParent()->addFrameInst(
509 Inst: MCCFIInstruction::cfiDefCfaOffset(L: nullptr, Offset: -Offset));
510 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
511 .addCFIIndex(CFIIndex);
512}
513
514// Add CFI for the new frame location.
515static void buildDefCFAReg(MachineBasicBlock &MBB,
516 MachineBasicBlock::iterator MBBI,
517 const DebugLoc &DL, unsigned Reg,
518 const SystemZInstrInfo *ZII) {
519 MachineFunction &MF = *MBB.getParent();
520 MachineModuleInfo &MMI = MF.getMMI();
521 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
522 unsigned RegNum = MRI->getDwarfRegNum(RegNum: Reg, isEH: true);
523 unsigned CFIIndex = MF.addFrameInst(
524 Inst: MCCFIInstruction::createDefCfaRegister(L: nullptr, Register: RegNum));
525 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
526 .addCFIIndex(CFIIndex);
527}
528
529void SystemZELFFrameLowering::emitPrologue(MachineFunction &MF,
530 MachineBasicBlock &MBB) const {
531 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
532 const SystemZSubtarget &STI = MF.getSubtarget<SystemZSubtarget>();
533 const SystemZTargetLowering &TLI = *STI.getTargetLowering();
534 MachineFrameInfo &MFFrame = MF.getFrameInfo();
535 auto *ZII = static_cast<const SystemZInstrInfo *>(STI.getInstrInfo());
536 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
537 MachineBasicBlock::iterator MBBI = MBB.begin();
538 MachineModuleInfo &MMI = MF.getMMI();
539 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
540 const std::vector<CalleeSavedInfo> &CSI = MFFrame.getCalleeSavedInfo();
541 bool HasFP = hasFP(MF);
542
543 // In GHC calling convention C stack space, including the ABI-defined
544 // 160-byte base area, is (de)allocated by GHC itself. This stack space may
545 // be used by LLVM as spill slots for the tail recursive GHC functions. Thus
546 // do not allocate stack space here, too.
547 if (MF.getFunction().getCallingConv() == CallingConv::GHC) {
548 if (MFFrame.getStackSize() > 2048 * sizeof(long)) {
549 report_fatal_error(
550 reason: "Pre allocated stack space for GHC function is too small");
551 }
552 if (HasFP) {
553 report_fatal_error(
554 reason: "In GHC calling convention a frame pointer is not supported");
555 }
556 MFFrame.setStackSize(MFFrame.getStackSize() + SystemZMC::ELFCallFrameSize);
557 return;
558 }
559
560 // Debug location must be unknown since the first debug location is used
561 // to determine the end of the prologue.
562 DebugLoc DL;
563
564 // The current offset of the stack pointer from the CFA.
565 int64_t SPOffsetFromCFA = -SystemZMC::ELFCFAOffsetFromInitialSP;
566
567 if (ZFI->getSpillGPRRegs().LowGPR) {
568 // Skip over the GPR saves.
569 if (MBBI != MBB.end() && MBBI->getOpcode() == SystemZ::STMG)
570 ++MBBI;
571 else
572 llvm_unreachable("Couldn't skip over GPR saves");
573
574 // Add CFI for the GPR saves.
575 for (auto &Save : CSI) {
576 Register Reg = Save.getReg();
577 if (SystemZ::GR64BitRegClass.contains(Reg)) {
578 int FI = Save.getFrameIdx();
579 int64_t Offset = MFFrame.getObjectOffset(ObjectIdx: FI);
580 unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createOffset(
581 L: nullptr, Register: MRI->getDwarfRegNum(RegNum: Reg, isEH: true), Offset));
582 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
583 .addCFIIndex(CFIIndex);
584 }
585 }
586 }
587
588 uint64_t StackSize = MFFrame.getStackSize();
589 // We need to allocate the ABI-defined 160-byte base area whenever
590 // we allocate stack space for our own use and whenever we call another
591 // function.
592 bool HasStackObject = false;
593 for (unsigned i = 0, e = MFFrame.getObjectIndexEnd(); i != e; ++i)
594 if (!MFFrame.isDeadObjectIndex(ObjectIdx: i)) {
595 HasStackObject = true;
596 break;
597 }
598 if (HasStackObject || MFFrame.hasCalls())
599 StackSize += SystemZMC::ELFCallFrameSize;
600 // Don't allocate the incoming reg save area.
601 StackSize = StackSize > SystemZMC::ELFCallFrameSize
602 ? StackSize - SystemZMC::ELFCallFrameSize
603 : 0;
604 MFFrame.setStackSize(StackSize);
605
606 if (StackSize) {
607 // Allocate StackSize bytes.
608 int64_t Delta = -int64_t(StackSize);
609 const unsigned ProbeSize = TLI.getStackProbeSize(MF);
610 bool FreeProbe = (ZFI->getSpillGPRRegs().GPROffset &&
611 (ZFI->getSpillGPRRegs().GPROffset + StackSize) < ProbeSize);
612 if (!FreeProbe &&
613 MF.getSubtarget().getTargetLowering()->hasInlineStackProbe(MF)) {
614 // Stack probing may involve looping, but splitting the prologue block
615 // is not possible at this point since it would invalidate the
616 // SaveBlocks / RestoreBlocks sets of PEI in the single block function
617 // case. Build a pseudo to be handled later by inlineStackProbe().
618 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::PROBED_STACKALLOC))
619 .addImm(StackSize);
620 }
621 else {
622 bool StoreBackchain = MF.getSubtarget<SystemZSubtarget>().hasBackChain();
623 // If we need backchain, save current stack pointer. R1 is free at
624 // this point.
625 if (StoreBackchain)
626 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR))
627 .addReg(SystemZ::R1D, RegState::Define).addReg(SystemZ::R15D);
628 emitIncrement(MBB, MBBI, DL, SystemZ::R15D, Delta, ZII);
629 buildCFAOffs(MBB, MBBI, DL, Offset: SPOffsetFromCFA + Delta, ZII);
630 if (StoreBackchain)
631 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::STG))
632 .addReg(SystemZ::R1D, RegState::Kill).addReg(SystemZ::R15D)
633 .addImm(getBackchainOffset(MF)).addReg(0);
634 }
635 SPOffsetFromCFA += Delta;
636 }
637
638 if (HasFP) {
639 // Copy the base of the frame to R11.
640 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR), SystemZ::R11D)
641 .addReg(SystemZ::R15D);
642
643 // Add CFI for the new frame location.
644 buildDefCFAReg(MBB, MBBI, DL, SystemZ::R11D, ZII);
645
646 // Mark the FramePtr as live at the beginning of every block except
647 // the entry block. (We'll have marked R11 as live on entry when
648 // saving the GPRs.)
649 for (MachineBasicBlock &MBBJ : llvm::drop_begin(MF))
650 MBBJ.addLiveIn(SystemZ::R11D);
651 }
652
653 // Skip over the FPR/VR saves.
654 SmallVector<unsigned, 8> CFIIndexes;
655 for (auto &Save : CSI) {
656 Register Reg = Save.getReg();
657 if (SystemZ::FP64BitRegClass.contains(Reg)) {
658 if (MBBI != MBB.end() &&
659 (MBBI->getOpcode() == SystemZ::STD ||
660 MBBI->getOpcode() == SystemZ::STDY))
661 ++MBBI;
662 else
663 llvm_unreachable("Couldn't skip over FPR save");
664 } else if (SystemZ::VR128BitRegClass.contains(Reg)) {
665 if (MBBI != MBB.end() &&
666 MBBI->getOpcode() == SystemZ::VST)
667 ++MBBI;
668 else
669 llvm_unreachable("Couldn't skip over VR save");
670 } else
671 continue;
672
673 // Add CFI for the this save.
674 unsigned DwarfReg = MRI->getDwarfRegNum(RegNum: Reg, isEH: true);
675 Register IgnoredFrameReg;
676 int64_t Offset =
677 getFrameIndexReference(MF, FI: Save.getFrameIdx(), FrameReg&: IgnoredFrameReg)
678 .getFixed();
679
680 unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createOffset(
681 L: nullptr, Register: DwarfReg, Offset: SPOffsetFromCFA + Offset));
682 CFIIndexes.push_back(Elt: CFIIndex);
683 }
684 // Complete the CFI for the FPR/VR saves, modelling them as taking effect
685 // after the last save.
686 for (auto CFIIndex : CFIIndexes) {
687 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
688 .addCFIIndex(CFIIndex);
689 }
690}
691
692void SystemZELFFrameLowering::emitEpilogue(MachineFunction &MF,
693 MachineBasicBlock &MBB) const {
694 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
695 auto *ZII =
696 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
697 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
698 MachineFrameInfo &MFFrame = MF.getFrameInfo();
699
700 // See SystemZELFFrameLowering::emitPrologue
701 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
702 return;
703
704 // Skip the return instruction.
705 assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks");
706
707 uint64_t StackSize = MFFrame.getStackSize();
708 if (ZFI->getRestoreGPRRegs().LowGPR) {
709 --MBBI;
710 unsigned Opcode = MBBI->getOpcode();
711 if (Opcode != SystemZ::LMG)
712 llvm_unreachable("Expected to see callee-save register restore code");
713
714 unsigned AddrOpNo = 2;
715 DebugLoc DL = MBBI->getDebugLoc();
716 uint64_t Offset = StackSize + MBBI->getOperand(i: AddrOpNo + 1).getImm();
717 unsigned NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
718
719 // If the offset is too large, use the largest stack-aligned offset
720 // and add the rest to the base register (the stack or frame pointer).
721 if (!NewOpcode) {
722 uint64_t NumBytes = Offset - 0x7fff8;
723 emitIncrement(MBB, MBBI, DL, MBBI->getOperand(i: AddrOpNo).getReg(),
724 NumBytes, ZII);
725 Offset -= NumBytes;
726 NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
727 assert(NewOpcode && "No restore instruction available");
728 }
729
730 MBBI->setDesc(ZII->get(NewOpcode));
731 MBBI->getOperand(i: AddrOpNo + 1).ChangeToImmediate(ImmVal: Offset);
732 } else if (StackSize) {
733 DebugLoc DL = MBBI->getDebugLoc();
734 emitIncrement(MBB, MBBI, DL, SystemZ::R15D, StackSize, ZII);
735 }
736}
737
738void SystemZELFFrameLowering::inlineStackProbe(
739 MachineFunction &MF, MachineBasicBlock &PrologMBB) const {
740 auto *ZII =
741 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
742 const SystemZSubtarget &STI = MF.getSubtarget<SystemZSubtarget>();
743 const SystemZTargetLowering &TLI = *STI.getTargetLowering();
744
745 MachineInstr *StackAllocMI = nullptr;
746 for (MachineInstr &MI : PrologMBB)
747 if (MI.getOpcode() == SystemZ::PROBED_STACKALLOC) {
748 StackAllocMI = &MI;
749 break;
750 }
751 if (StackAllocMI == nullptr)
752 return;
753 uint64_t StackSize = StackAllocMI->getOperand(i: 0).getImm();
754 const unsigned ProbeSize = TLI.getStackProbeSize(MF);
755 uint64_t NumFullBlocks = StackSize / ProbeSize;
756 uint64_t Residual = StackSize % ProbeSize;
757 int64_t SPOffsetFromCFA = -SystemZMC::ELFCFAOffsetFromInitialSP;
758 MachineBasicBlock *MBB = &PrologMBB;
759 MachineBasicBlock::iterator MBBI = StackAllocMI;
760 const DebugLoc DL = StackAllocMI->getDebugLoc();
761
762 // Allocate a block of Size bytes on the stack and probe it.
763 auto allocateAndProbe = [&](MachineBasicBlock &InsMBB,
764 MachineBasicBlock::iterator InsPt, unsigned Size,
765 bool EmitCFI) -> void {
766 emitIncrement(InsMBB, InsPt, DL, SystemZ::R15D, -int64_t(Size), ZII);
767 if (EmitCFI) {
768 SPOffsetFromCFA -= Size;
769 buildCFAOffs(InsMBB, InsPt, DL, SPOffsetFromCFA, ZII);
770 }
771 // Probe by means of a volatile compare.
772 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo: MachinePointerInfo(),
773 F: MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad, Size: 8, BaseAlignment: Align(1));
774 BuildMI(InsMBB, InsPt, DL, ZII->get(SystemZ::CG))
775 .addReg(SystemZ::R0D, RegState::Undef)
776 .addReg(SystemZ::R15D).addImm(Size - 8).addReg(0)
777 .addMemOperand(MMO);
778 };
779
780 bool StoreBackchain = MF.getSubtarget<SystemZSubtarget>().hasBackChain();
781 if (StoreBackchain)
782 BuildMI(*MBB, MBBI, DL, ZII->get(SystemZ::LGR))
783 .addReg(SystemZ::R1D, RegState::Define).addReg(SystemZ::R15D);
784
785 MachineBasicBlock *DoneMBB = nullptr;
786 MachineBasicBlock *LoopMBB = nullptr;
787 if (NumFullBlocks < 3) {
788 // Emit unrolled probe statements.
789 for (unsigned int i = 0; i < NumFullBlocks; i++)
790 allocateAndProbe(*MBB, MBBI, ProbeSize, true/*EmitCFI*/);
791 } else {
792 // Emit a loop probing the pages.
793 uint64_t LoopAlloc = ProbeSize * NumFullBlocks;
794 SPOffsetFromCFA -= LoopAlloc;
795
796 // Use R0D to hold the exit value.
797 BuildMI(*MBB, MBBI, DL, ZII->get(SystemZ::LGR), SystemZ::R0D)
798 .addReg(SystemZ::R15D);
799 buildDefCFAReg(*MBB, MBBI, DL, SystemZ::R0D, ZII);
800 emitIncrement(*MBB, MBBI, DL, SystemZ::R0D, -int64_t(LoopAlloc), ZII);
801 buildCFAOffs(*MBB, MBBI, DL, -int64_t(SystemZMC::ELFCallFrameSize + LoopAlloc),
802 ZII);
803
804 DoneMBB = SystemZ::splitBlockBefore(MI: MBBI, MBB);
805 LoopMBB = SystemZ::emitBlockAfter(MBB);
806 MBB->addSuccessor(Succ: LoopMBB);
807 LoopMBB->addSuccessor(Succ: LoopMBB);
808 LoopMBB->addSuccessor(Succ: DoneMBB);
809
810 MBB = LoopMBB;
811 allocateAndProbe(*MBB, MBB->end(), ProbeSize, false/*EmitCFI*/);
812 BuildMI(*MBB, MBB->end(), DL, ZII->get(SystemZ::CLGR))
813 .addReg(SystemZ::R15D).addReg(SystemZ::R0D);
814 BuildMI(*MBB, MBB->end(), DL, ZII->get(SystemZ::BRC))
815 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_GT).addMBB(MBB);
816
817 MBB = DoneMBB;
818 MBBI = DoneMBB->begin();
819 buildDefCFAReg(*MBB, MBBI, DL, SystemZ::R15D, ZII);
820 }
821
822 if (Residual)
823 allocateAndProbe(*MBB, MBBI, Residual, true/*EmitCFI*/);
824
825 if (StoreBackchain)
826 BuildMI(*MBB, MBBI, DL, ZII->get(SystemZ::STG))
827 .addReg(SystemZ::R1D, RegState::Kill).addReg(SystemZ::R15D)
828 .addImm(getBackchainOffset(MF)).addReg(0);
829
830 StackAllocMI->eraseFromParent();
831 if (DoneMBB != nullptr) {
832 // Compute the live-in lists for the new blocks.
833 fullyRecomputeLiveIns(MBBs: {DoneMBB, LoopMBB});
834 }
835}
836
837bool SystemZELFFrameLowering::hasFP(const MachineFunction &MF) const {
838 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
839 MF.getFrameInfo().hasVarSizedObjects());
840}
841
842StackOffset SystemZELFFrameLowering::getFrameIndexReference(
843 const MachineFunction &MF, int FI, Register &FrameReg) const {
844 // Our incoming SP is actually SystemZMC::ELFCallFrameSize below the CFA, so
845 // add that difference here.
846 StackOffset Offset =
847 TargetFrameLowering::getFrameIndexReference(MF, FI, FrameReg);
848 return Offset + StackOffset::getFixed(Fixed: SystemZMC::ELFCallFrameSize);
849}
850
851unsigned SystemZELFFrameLowering::getRegSpillOffset(MachineFunction &MF,
852 Register Reg) const {
853 bool IsVarArg = MF.getFunction().isVarArg();
854 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
855 bool BackChain = Subtarget.hasBackChain();
856 bool SoftFloat = Subtarget.hasSoftFloat();
857 unsigned Offset = RegSpillOffsets[Reg];
858 if (usePackedStack(MF) && !(IsVarArg && !SoftFloat)) {
859 if (SystemZ::GR64BitRegClass.contains(Reg))
860 // Put all GPRs at the top of the Register save area with packed
861 // stack. Make room for the backchain if needed.
862 Offset += BackChain ? 24 : 32;
863 else
864 Offset = 0;
865 }
866 return Offset;
867}
868
869int SystemZELFFrameLowering::getOrCreateFramePointerSaveIndex(
870 MachineFunction &MF) const {
871 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
872 int FI = ZFI->getFramePointerSaveIndex();
873 if (!FI) {
874 MachineFrameInfo &MFFrame = MF.getFrameInfo();
875 int Offset = getBackchainOffset(MF) - SystemZMC::ELFCallFrameSize;
876 FI = MFFrame.CreateFixedObject(Size: getPointerSize(), SPOffset: Offset, IsImmutable: false);
877 ZFI->setFramePointerSaveIndex(FI);
878 }
879 return FI;
880}
881
882bool SystemZELFFrameLowering::usePackedStack(MachineFunction &MF) const {
883 bool HasPackedStackAttr = MF.getFunction().hasFnAttribute(Kind: "packed-stack");
884 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
885 bool BackChain = Subtarget.hasBackChain();
886 bool SoftFloat = Subtarget.hasSoftFloat();
887 if (HasPackedStackAttr && BackChain && !SoftFloat)
888 report_fatal_error(reason: "packed-stack + backchain + hard-float is unsupported.");
889 bool CallConv = MF.getFunction().getCallingConv() != CallingConv::GHC;
890 return HasPackedStackAttr && CallConv;
891}
892
893SystemZXPLINKFrameLowering::SystemZXPLINKFrameLowering(unsigned PointerSize)
894 : SystemZFrameLowering(TargetFrameLowering::StackGrowsDown, Align(32), 0,
895 Align(32), /* StackRealignable */ false,
896 PointerSize),
897 RegSpillOffsets(-1) {
898
899 // Create a mapping from register number to save slot offset.
900 // These offsets are relative to the start of the local are area.
901 RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
902 for (const auto &Entry : XPLINKSpillOffsetTable)
903 RegSpillOffsets[Entry.Reg] = Entry.Offset;
904}
905
906int SystemZXPLINKFrameLowering::getOrCreateFramePointerSaveIndex(
907 MachineFunction &MF) const {
908 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
909 int FI = ZFI->getFramePointerSaveIndex();
910 if (!FI) {
911 MachineFrameInfo &MFFrame = MF.getFrameInfo();
912 FI = MFFrame.CreateFixedObject(Size: getPointerSize(), SPOffset: 0, IsImmutable: false);
913 MFFrame.setStackID(ObjectIdx: FI, ID: TargetStackID::NoAlloc);
914 ZFI->setFramePointerSaveIndex(FI);
915 }
916 return FI;
917}
918
919// Checks if the function is a potential candidate for being a XPLeaf routine.
920static bool isXPLeafCandidate(const MachineFunction &MF) {
921 const MachineFrameInfo &MFFrame = MF.getFrameInfo();
922 const MachineRegisterInfo &MRI = MF.getRegInfo();
923 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
924 auto *Regs =
925 static_cast<SystemZXPLINK64Registers *>(Subtarget.getSpecialRegisters());
926
927 // If function calls other functions including alloca, then it is not a XPLeaf
928 // routine.
929 if (MFFrame.hasCalls())
930 return false;
931
932 // If the function has var Sized Objects, then it is not a XPLeaf routine.
933 if (MFFrame.hasVarSizedObjects())
934 return false;
935
936 // If the function adjusts the stack, then it is not a XPLeaf routine.
937 if (MFFrame.adjustsStack())
938 return false;
939
940 // If function modifies the stack pointer register, then it is not a XPLeaf
941 // routine.
942 if (MRI.isPhysRegModified(PhysReg: Regs->getStackPointerRegister()))
943 return false;
944
945 // If function modifies the ADA register, then it is not a XPLeaf routine.
946 if (MRI.isPhysRegModified(PhysReg: Regs->getAddressOfCalleeRegister()))
947 return false;
948
949 // If function modifies the return address register, then it is not a XPLeaf
950 // routine.
951 if (MRI.isPhysRegModified(PhysReg: Regs->getReturnFunctionAddressRegister()))
952 return false;
953
954 // If the backchain pointer should be stored, then it is not a XPLeaf routine.
955 if (MF.getSubtarget<SystemZSubtarget>().hasBackChain())
956 return false;
957
958 // If function acquires its own stack frame, then it is not a XPLeaf routine.
959 // At the time this function is called, only slots for local variables are
960 // allocated, so this is a very rough estimate.
961 if (MFFrame.estimateStackSize(MF) > 0)
962 return false;
963
964 return true;
965}
966
967bool SystemZXPLINKFrameLowering::assignCalleeSavedSpillSlots(
968 MachineFunction &MF, const TargetRegisterInfo *TRI,
969 std::vector<CalleeSavedInfo> &CSI) const {
970 MachineFrameInfo &MFFrame = MF.getFrameInfo();
971 SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
972 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
973 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
974 auto &GRRegClass = SystemZ::GR64BitRegClass;
975
976 // At this point, the result of isXPLeafCandidate() is not accurate because
977 // the size of the save area has not yet been determined. If
978 // isXPLeafCandidate() indicates a potential leaf function, and there are no
979 // callee-save registers, then it is indeed a leaf function, and we can early
980 // exit.
981 // TODO: It is possible for leaf functions to use callee-saved registers.
982 // It can use the 0-2k range between R4 and the caller's stack frame without
983 // acquiring its own stack frame.
984 bool IsLeaf = CSI.empty() && isXPLeafCandidate(MF);
985 if (IsLeaf)
986 return true;
987
988 // For non-leaf functions:
989 // - the address of callee (entry point) register R6 must be saved
990 CSI.push_back(x: CalleeSavedInfo(Regs.getAddressOfCalleeRegister()));
991 CSI.back().setRestored(false);
992
993 // The return address register R7 must be saved and restored.
994 CSI.push_back(x: CalleeSavedInfo(Regs.getReturnFunctionAddressRegister()));
995
996 // If the function needs a frame pointer, or if the backchain pointer should
997 // be stored, then save the stack pointer register R4.
998 if (hasFP(MF) || Subtarget.hasBackChain())
999 CSI.push_back(x: CalleeSavedInfo(Regs.getStackPointerRegister()));
1000
1001 // If this function has an associated personality function then the
1002 // environment register R5 must be saved in the DSA.
1003 if (!MF.getLandingPads().empty())
1004 CSI.push_back(x: CalleeSavedInfo(Regs.getADARegister()));
1005
1006 // Scan the call-saved GPRs and find the bounds of the register spill area.
1007 Register LowRestoreGPR = 0;
1008 int LowRestoreOffset = INT32_MAX;
1009 Register LowSpillGPR = 0;
1010 int LowSpillOffset = INT32_MAX;
1011 Register HighGPR = 0;
1012 int HighOffset = -1;
1013
1014 // Query index of the saved frame pointer.
1015 int FPSI = MFI->getFramePointerSaveIndex();
1016
1017 for (auto &CS : CSI) {
1018 Register Reg = CS.getReg();
1019 int Offset = RegSpillOffsets[Reg];
1020 if (Offset >= 0) {
1021 if (GRRegClass.contains(Reg)) {
1022 if (LowSpillOffset > Offset) {
1023 LowSpillOffset = Offset;
1024 LowSpillGPR = Reg;
1025 }
1026 if (CS.isRestored() && LowRestoreOffset > Offset) {
1027 LowRestoreOffset = Offset;
1028 LowRestoreGPR = Reg;
1029 }
1030
1031 if (Offset > HighOffset) {
1032 HighOffset = Offset;
1033 HighGPR = Reg;
1034 }
1035 // Non-volatile GPRs are saved in the dedicated register save area at
1036 // the bottom of the stack and are not truly part of the "normal" stack
1037 // frame. Mark the frame index as NoAlloc to indicate it as such.
1038 unsigned RegSize = getPointerSize();
1039 int FrameIdx =
1040 (FPSI && Offset == 0)
1041 ? FPSI
1042 : MFFrame.CreateFixedSpillStackObject(Size: RegSize, SPOffset: Offset);
1043 CS.setFrameIdx(FrameIdx);
1044 MFFrame.setStackID(ObjectIdx: FrameIdx, ID: TargetStackID::NoAlloc);
1045 }
1046 } else {
1047 Register Reg = CS.getReg();
1048 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1049 Align Alignment = TRI->getSpillAlign(RC: *RC);
1050 unsigned Size = TRI->getSpillSize(RC: *RC);
1051 Alignment = std::min(a: Alignment, b: getStackAlign());
1052 int FrameIdx = MFFrame.CreateStackObject(Size, Alignment, isSpillSlot: true);
1053 CS.setFrameIdx(FrameIdx);
1054 }
1055 }
1056
1057 // Save the range of call-saved registers, for use by the
1058 // prologue/epilogue inserters.
1059 if (LowRestoreGPR)
1060 MFI->setRestoreGPRRegs(Low: LowRestoreGPR, High: HighGPR, Offs: LowRestoreOffset);
1061
1062 // Save the range of call-saved registers, for use by the epilogue inserter.
1063 assert(LowSpillGPR && "Expected registers to spill");
1064 MFI->setSpillGPRRegs(Low: LowSpillGPR, High: HighGPR, Offs: LowSpillOffset);
1065
1066 return true;
1067}
1068
1069void SystemZXPLINKFrameLowering::determineCalleeSaves(MachineFunction &MF,
1070 BitVector &SavedRegs,
1071 RegScavenger *RS) const {
1072 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1073
1074 bool HasFP = hasFP(MF);
1075 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
1076 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
1077
1078 // If the function requires a frame pointer, record that the hard
1079 // frame pointer will be clobbered.
1080 if (HasFP)
1081 SavedRegs.set(Regs.getFramePointerRegister());
1082}
1083
1084bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters(
1085 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
1086 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
1087 if (CSI.empty())
1088 return true;
1089
1090 MachineFunction &MF = *MBB.getParent();
1091 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
1092 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
1093 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1094 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
1095 SystemZ::GPRRegs SpillGPRs = ZFI->getSpillGPRRegs();
1096 DebugLoc DL;
1097
1098 // Save GPRs
1099 if (SpillGPRs.LowGPR) {
1100 assert(SpillGPRs.LowGPR != SpillGPRs.HighGPR &&
1101 "Should be saving multiple registers");
1102
1103 // Build an STM/STMG instruction.
1104 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG));
1105
1106 // Add the explicit register operands.
1107 addSavedGPR(MBB, MIB, GPR64: SpillGPRs.LowGPR, IsImplicit: false);
1108 addSavedGPR(MBB, MIB, GPR64: SpillGPRs.HighGPR, IsImplicit: false);
1109
1110 // Add the address r4
1111 MIB.addReg(RegNo: Regs.getStackPointerRegister());
1112
1113 // Add the partial offset
1114 // We cannot add the actual offset as, at the stack is not finalized
1115 MIB.addImm(Val: SpillGPRs.GPROffset);
1116
1117 // Make sure all call-saved GPRs are included as operands and are
1118 // marked as live on entry.
1119 auto &GRRegClass = SystemZ::GR64BitRegClass;
1120 for (const CalleeSavedInfo &I : CSI) {
1121 Register Reg = I.getReg();
1122 if (GRRegClass.contains(Reg))
1123 addSavedGPR(MBB, MIB, GPR64: Reg, IsImplicit: true);
1124 }
1125 }
1126
1127 // Spill FPRs to the stack in the normal TargetInstrInfo way
1128 for (const CalleeSavedInfo &I : CSI) {
1129 Register Reg = I.getReg();
1130 if (SystemZ::FP64BitRegClass.contains(Reg)) {
1131 MBB.addLiveIn(PhysReg: Reg);
1132 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
1133 &SystemZ::FP64BitRegClass, TRI, Register());
1134 }
1135 if (SystemZ::VR128BitRegClass.contains(Reg)) {
1136 MBB.addLiveIn(PhysReg: Reg);
1137 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
1138 &SystemZ::VR128BitRegClass, TRI, Register());
1139 }
1140 }
1141
1142 return true;
1143}
1144
1145bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters(
1146 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
1147 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
1148
1149 if (CSI.empty())
1150 return false;
1151
1152 MachineFunction &MF = *MBB.getParent();
1153 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
1154 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
1155 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1156 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
1157
1158 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1159
1160 // Restore FPRs in the normal TargetInstrInfo way.
1161 for (const CalleeSavedInfo &I : CSI) {
1162 Register Reg = I.getReg();
1163 if (SystemZ::FP64BitRegClass.contains(Reg))
1164 TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
1165 &SystemZ::FP64BitRegClass, TRI, Register());
1166 if (SystemZ::VR128BitRegClass.contains(Reg))
1167 TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
1168 &SystemZ::VR128BitRegClass, TRI, Register());
1169 }
1170
1171 // Restore call-saved GPRs (but not call-clobbered varargs, which at
1172 // this point might hold return values).
1173 SystemZ::GPRRegs RestoreGPRs = ZFI->getRestoreGPRRegs();
1174 if (RestoreGPRs.LowGPR) {
1175 assert(isInt<20>(Regs.getStackPointerBias() + RestoreGPRs.GPROffset));
1176 if (RestoreGPRs.LowGPR == RestoreGPRs.HighGPR)
1177 // Build an LG/L instruction.
1178 BuildMI(MBB, MBBI, DL, TII->get(SystemZ::LG), RestoreGPRs.LowGPR)
1179 .addReg(Regs.getStackPointerRegister())
1180 .addImm(Regs.getStackPointerBias() + RestoreGPRs.GPROffset)
1181 .addReg(0);
1182 else {
1183 // Build an LMG/LM instruction.
1184 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::LMG));
1185
1186 // Add the explicit register operands.
1187 MIB.addReg(RegNo: RestoreGPRs.LowGPR, flags: RegState::Define);
1188 MIB.addReg(RegNo: RestoreGPRs.HighGPR, flags: RegState::Define);
1189
1190 // Add the address.
1191 MIB.addReg(RegNo: Regs.getStackPointerRegister());
1192 MIB.addImm(Val: Regs.getStackPointerBias() + RestoreGPRs.GPROffset);
1193
1194 // Do a second scan adding regs as being defined by instruction
1195 for (const CalleeSavedInfo &I : CSI) {
1196 Register Reg = I.getReg();
1197 if (Reg > RestoreGPRs.LowGPR && Reg < RestoreGPRs.HighGPR)
1198 MIB.addReg(RegNo: Reg, flags: RegState::ImplicitDefine);
1199 }
1200 }
1201 }
1202
1203 return true;
1204}
1205
1206void SystemZXPLINKFrameLowering::emitPrologue(MachineFunction &MF,
1207 MachineBasicBlock &MBB) const {
1208 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
1209 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
1210 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
1211 MachineBasicBlock::iterator MBBI = MBB.begin();
1212 auto *ZII = static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
1213 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
1214 MachineFrameInfo &MFFrame = MF.getFrameInfo();
1215 MachineInstr *StoreInstr = nullptr;
1216
1217 determineFrameLayout(MF);
1218
1219 bool HasFP = hasFP(MF);
1220 // Debug location must be unknown since the first debug location is used
1221 // to determine the end of the prologue.
1222 DebugLoc DL;
1223 uint64_t Offset = 0;
1224
1225 const uint64_t StackSize = MFFrame.getStackSize();
1226
1227 if (ZFI->getSpillGPRRegs().LowGPR) {
1228 // Skip over the GPR saves.
1229 if ((MBBI != MBB.end()) && ((MBBI->getOpcode() == SystemZ::STMG))) {
1230 const int Operand = 3;
1231 // Now we can set the offset for the operation, since now the Stack
1232 // has been finalized.
1233 Offset = Regs.getStackPointerBias() + MBBI->getOperand(i: Operand).getImm();
1234 // Maximum displacement for STMG instruction.
1235 if (isInt<20>(x: Offset - StackSize))
1236 Offset -= StackSize;
1237 else
1238 StoreInstr = &*MBBI;
1239 MBBI->getOperand(i: Operand).setImm(Offset);
1240 ++MBBI;
1241 } else
1242 llvm_unreachable("Couldn't skip over GPR saves");
1243 }
1244
1245 if (StackSize) {
1246 MachineBasicBlock::iterator InsertPt = StoreInstr ? StoreInstr : MBBI;
1247 // Allocate StackSize bytes.
1248 int64_t Delta = -int64_t(StackSize);
1249
1250 // In case the STM(G) instruction also stores SP (R4), but the displacement
1251 // is too large, the SP register is manipulated first before storing,
1252 // resulting in the wrong value stored and retrieved later. In this case, we
1253 // need to temporarily save the value of SP, and store it later to memory.
1254 if (StoreInstr && HasFP) {
1255 // Insert LR r0,r4 before STMG instruction.
1256 BuildMI(MBB, InsertPt, DL, ZII->get(SystemZ::LGR))
1257 .addReg(SystemZ::R0D, RegState::Define)
1258 .addReg(SystemZ::R4D);
1259 // Insert ST r0,xxx(,r4) after STMG instruction.
1260 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::STG))
1261 .addReg(SystemZ::R0D, RegState::Kill)
1262 .addReg(SystemZ::R4D)
1263 .addImm(Offset)
1264 .addReg(0);
1265 }
1266
1267 emitIncrement(MBB, InsertPt, DL, Regs.getStackPointerRegister(), Delta,
1268 ZII);
1269
1270 // If the requested stack size is larger than the guard page, then we need
1271 // to check if we need to call the stack extender. This requires adding a
1272 // conditional branch, but splitting the prologue block is not possible at
1273 // this point since it would invalidate the SaveBlocks / RestoreBlocks sets
1274 // of PEI in the single block function case. Build a pseudo to be handled
1275 // later by inlineStackProbe().
1276 const uint64_t GuardPageSize = 1024 * 1024;
1277 if (StackSize > GuardPageSize) {
1278 assert(StoreInstr && "Wrong insertion point");
1279 BuildMI(MBB, InsertPt, DL, ZII->get(SystemZ::XPLINK_STACKALLOC));
1280 }
1281 }
1282
1283 if (HasFP) {
1284 // Copy the base of the frame to Frame Pointer Register.
1285 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR),
1286 Regs.getFramePointerRegister())
1287 .addReg(Regs.getStackPointerRegister());
1288
1289 // Mark the FramePtr as live at the beginning of every block except
1290 // the entry block. (We'll have marked R8 as live on entry when
1291 // saving the GPRs.)
1292 for (MachineBasicBlock &B : llvm::drop_begin(RangeOrContainer&: MF))
1293 B.addLiveIn(PhysReg: Regs.getFramePointerRegister());
1294 }
1295
1296 // Save GPRs used for varargs, if any.
1297 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1298 bool IsVarArg = MF.getFunction().isVarArg();
1299
1300 if (IsVarArg) {
1301 // FixedRegs is the number of used registers, accounting for shadow
1302 // registers.
1303 unsigned FixedRegs = ZFI->getVarArgsFirstGPR() + ZFI->getVarArgsFirstFPR();
1304 auto &GPRs = SystemZ::XPLINK64ArgGPRs;
1305 for (unsigned I = FixedRegs; I < SystemZ::XPLINK64NumArgGPRs; I++) {
1306 uint64_t StartOffset = MFFrame.getOffsetAdjustment() +
1307 MFFrame.getStackSize() + Regs.getCallFrameSize() +
1308 getOffsetOfLocalArea() + I * getPointerSize();
1309 unsigned Reg = GPRs[I];
1310 BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STG))
1311 .addReg(Reg)
1312 .addReg(Regs.getStackPointerRegister())
1313 .addImm(StartOffset)
1314 .addReg(0);
1315 if (!MBB.isLiveIn(Reg))
1316 MBB.addLiveIn(PhysReg: Reg);
1317 }
1318 }
1319}
1320
1321void SystemZXPLINKFrameLowering::emitEpilogue(MachineFunction &MF,
1322 MachineBasicBlock &MBB) const {
1323 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
1324 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
1325 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
1326 MachineFrameInfo &MFFrame = MF.getFrameInfo();
1327 auto *ZII = static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
1328 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
1329
1330 // Skip the return instruction.
1331 assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks");
1332
1333 uint64_t StackSize = MFFrame.getStackSize();
1334 if (StackSize) {
1335 unsigned SPReg = Regs.getStackPointerRegister();
1336 if (ZFI->getRestoreGPRRegs().LowGPR != SPReg) {
1337 DebugLoc DL = MBBI->getDebugLoc();
1338 emitIncrement(MBB, MBBI, DL, SPReg, StackSize, ZII);
1339 }
1340 }
1341}
1342
1343// Emit a compare of the stack pointer against the stack floor, and a call to
1344// the LE stack extender if needed.
1345void SystemZXPLINKFrameLowering::inlineStackProbe(
1346 MachineFunction &MF, MachineBasicBlock &PrologMBB) const {
1347 auto *ZII =
1348 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
1349
1350 MachineInstr *StackAllocMI = nullptr;
1351 for (MachineInstr &MI : PrologMBB)
1352 if (MI.getOpcode() == SystemZ::XPLINK_STACKALLOC) {
1353 StackAllocMI = &MI;
1354 break;
1355 }
1356 if (StackAllocMI == nullptr)
1357 return;
1358
1359 bool NeedSaveSP = hasFP(MF);
1360 bool NeedSaveArg = PrologMBB.isLiveIn(SystemZ::R3D);
1361 const int64_t SaveSlotR3 = 2192;
1362
1363 MachineBasicBlock &MBB = PrologMBB;
1364 const DebugLoc DL = StackAllocMI->getDebugLoc();
1365
1366 // The 2nd half of block MBB after split.
1367 MachineBasicBlock *NextMBB;
1368
1369 // Add new basic block for the call to the stack overflow function.
1370 MachineBasicBlock *StackExtMBB =
1371 MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock());
1372 MF.push_back(MBB: StackExtMBB);
1373
1374 // LG r3,72(,r3)
1375 BuildMI(StackExtMBB, DL, ZII->get(SystemZ::LG), SystemZ::R3D)
1376 .addReg(SystemZ::R3D)
1377 .addImm(72)
1378 .addReg(0);
1379 // BASR r3,r3
1380 BuildMI(StackExtMBB, DL, ZII->get(SystemZ::CallBASR_STACKEXT))
1381 .addReg(SystemZ::R3D);
1382 if (NeedSaveArg) {
1383 if (!NeedSaveSP) {
1384 // LGR r0,r3
1385 BuildMI(MBB, StackAllocMI, DL, ZII->get(SystemZ::LGR))
1386 .addReg(SystemZ::R0D, RegState::Define)
1387 .addReg(SystemZ::R3D);
1388 } else {
1389 // In this case, the incoming value of r4 is saved in r0 so the
1390 // latter register is unavailable. Store r3 in its corresponding
1391 // slot in the parameter list instead. Do this at the start of
1392 // the prolog before r4 is manipulated by anything else.
1393 // STG r3, 2192(r4)
1394 BuildMI(MBB, MBB.begin(), DL, ZII->get(SystemZ::STG))
1395 .addReg(SystemZ::R3D)
1396 .addReg(SystemZ::R4D)
1397 .addImm(SaveSlotR3)
1398 .addReg(0);
1399 }
1400 }
1401 // LLGT r3,1208
1402 BuildMI(MBB, StackAllocMI, DL, ZII->get(SystemZ::LLGT), SystemZ::R3D)
1403 .addReg(0)
1404 .addImm(1208)
1405 .addReg(0);
1406 // CG r4,64(,r3)
1407 BuildMI(MBB, StackAllocMI, DL, ZII->get(SystemZ::CG))
1408 .addReg(SystemZ::R4D)
1409 .addReg(SystemZ::R3D)
1410 .addImm(64)
1411 .addReg(0);
1412 // JLL b'0100',F'37'
1413 BuildMI(MBB, StackAllocMI, DL, ZII->get(SystemZ::BRC))
1414 .addImm(SystemZ::CCMASK_ICMP)
1415 .addImm(SystemZ::CCMASK_CMP_LT)
1416 .addMBB(StackExtMBB);
1417
1418 NextMBB = SystemZ::splitBlockBefore(MI: StackAllocMI, MBB: &MBB);
1419 MBB.addSuccessor(Succ: NextMBB);
1420 MBB.addSuccessor(Succ: StackExtMBB);
1421 if (NeedSaveArg) {
1422 if (!NeedSaveSP) {
1423 // LGR r3, r0
1424 BuildMI(*NextMBB, StackAllocMI, DL, ZII->get(SystemZ::LGR))
1425 .addReg(SystemZ::R3D, RegState::Define)
1426 .addReg(SystemZ::R0D, RegState::Kill);
1427 } else {
1428 // In this case, the incoming value of r4 is saved in r0 so the
1429 // latter register is unavailable. We stored r3 in its corresponding
1430 // slot in the parameter list instead and we now restore it from there.
1431 // LGR r3, r0
1432 BuildMI(*NextMBB, StackAllocMI, DL, ZII->get(SystemZ::LGR))
1433 .addReg(SystemZ::R3D, RegState::Define)
1434 .addReg(SystemZ::R0D);
1435 // LG r3, 2192(r3)
1436 BuildMI(*NextMBB, StackAllocMI, DL, ZII->get(SystemZ::LG))
1437 .addReg(SystemZ::R3D, RegState::Define)
1438 .addReg(SystemZ::R3D)
1439 .addImm(SaveSlotR3)
1440 .addReg(0);
1441 }
1442 }
1443
1444 // Add jump back from stack extension BB.
1445 BuildMI(StackExtMBB, DL, ZII->get(SystemZ::J)).addMBB(NextMBB);
1446 StackExtMBB->addSuccessor(Succ: NextMBB);
1447
1448 StackAllocMI->eraseFromParent();
1449
1450 // Compute the live-in lists for the new blocks.
1451 fullyRecomputeLiveIns(MBBs: {StackExtMBB, NextMBB});
1452}
1453
1454bool SystemZXPLINKFrameLowering::hasFP(const MachineFunction &MF) const {
1455 return (MF.getFrameInfo().hasVarSizedObjects());
1456}
1457
1458void SystemZXPLINKFrameLowering::processFunctionBeforeFrameFinalized(
1459 MachineFunction &MF, RegScavenger *RS) const {
1460 MachineFrameInfo &MFFrame = MF.getFrameInfo();
1461 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
1462 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
1463
1464 // Setup stack frame offset
1465 MFFrame.setOffsetAdjustment(Regs.getStackPointerBias());
1466
1467 // Nothing to do for leaf functions.
1468 uint64_t StackSize = MFFrame.estimateStackSize(MF);
1469 if (StackSize == 0 && MFFrame.getCalleeSavedInfo().empty())
1470 return;
1471
1472 // Although the XPLINK specifications for AMODE64 state that minimum size
1473 // of the param area is minimum 32 bytes and no rounding is otherwise
1474 // specified, we round this area in 64 bytes increments to be compatible
1475 // with existing compilers.
1476 MFFrame.setMaxCallFrameSize(
1477 std::max(a: 64U, b: (unsigned)alignTo(Value: MFFrame.getMaxCallFrameSize(), Align: 64)));
1478}
1479
1480// Determines the size of the frame, and creates the deferred spill objects.
1481void SystemZXPLINKFrameLowering::determineFrameLayout(
1482 MachineFunction &MF) const {
1483 MachineFrameInfo &MFFrame = MF.getFrameInfo();
1484 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
1485 auto *Regs =
1486 static_cast<SystemZXPLINK64Registers *>(Subtarget.getSpecialRegisters());
1487
1488 uint64_t StackSize = MFFrame.getStackSize();
1489 if (StackSize == 0)
1490 return;
1491
1492 // Add the size of the register save area and the reserved area to the size.
1493 StackSize += Regs->getCallFrameSize();
1494 MFFrame.setStackSize(StackSize);
1495
1496 // We now know the stack size. Update the stack objects for the register save
1497 // area now. This has no impact on the stack frame layout, as this is already
1498 // computed. However, it makes sure that all callee saved registers have a
1499 // valid offset assigned.
1500 for (int FrameIdx = MFFrame.getObjectIndexBegin(); FrameIdx != 0;
1501 ++FrameIdx) {
1502 if (MFFrame.getStackID(ObjectIdx: FrameIdx) == TargetStackID::NoAlloc) {
1503 int64_t SPOffset = MFFrame.getObjectOffset(ObjectIdx: FrameIdx);
1504 SPOffset -= StackSize;
1505 MFFrame.setObjectOffset(ObjectIdx: FrameIdx, SPOffset);
1506 }
1507 }
1508}
1509

source code of llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp