1//===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ARMCallLowering.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMISelLowering.h"
18#include "ARMSubtarget.h"
19#include "Utils/ARMBaseInfo.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/CodeGen/Analysis.h"
22#include "llvm/CodeGen/CallingConvLower.h"
23#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24#include "llvm/CodeGen/GlobalISel/Utils.h"
25#include "llvm/CodeGen/LowLevelTypeUtils.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/TargetRegisterInfo.h"
34#include "llvm/CodeGen/TargetSubtargetInfo.h"
35#include "llvm/CodeGen/ValueTypes.h"
36#include "llvm/CodeGenTypes/LowLevelType.h"
37#include "llvm/CodeGenTypes/MachineValueType.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DerivedTypes.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/Value.h"
44#include "llvm/Support/Casting.h"
45#include <algorithm>
46#include <cassert>
47#include <cstdint>
48#include <functional>
49#include <utility>
50
51using namespace llvm;
52
53ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI)
54 : CallLowering(&TLI) {}
55
56static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
57 Type *T) {
58 if (T->isArrayTy())
59 return isSupportedType(DL, TLI, T: T->getArrayElementType());
60
61 if (T->isStructTy()) {
62 // For now we only allow homogeneous structs that we can manipulate with
63 // G_MERGE_VALUES and G_UNMERGE_VALUES
64 auto StructT = cast<StructType>(Val: T);
65 for (unsigned i = 1, e = StructT->getNumElements(); i != e; ++i)
66 if (StructT->getElementType(N: i) != StructT->getElementType(N: 0))
67 return false;
68 return isSupportedType(DL, TLI, T: StructT->getElementType(N: 0));
69 }
70
71 EVT VT = TLI.getValueType(DL, Ty: T, AllowUnknown: true);
72 if (!VT.isSimple() || VT.isVector() ||
73 !(VT.isInteger() || VT.isFloatingPoint()))
74 return false;
75
76 unsigned VTSize = VT.getSimpleVT().getSizeInBits();
77
78 if (VTSize == 64)
79 // FIXME: Support i64 too
80 return VT.isFloatingPoint();
81
82 return VTSize == 1 || VTSize == 8 || VTSize == 16 || VTSize == 32;
83}
84
85namespace {
86
87/// Helper class for values going out through an ABI boundary (used for handling
88/// function return values and call parameters).
89struct ARMOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
90 ARMOutgoingValueHandler(MachineIRBuilder &MIRBuilder,
91 MachineRegisterInfo &MRI, MachineInstrBuilder &MIB)
92 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
93
94 Register getStackAddress(uint64_t Size, int64_t Offset,
95 MachinePointerInfo &MPO,
96 ISD::ArgFlagsTy Flags) override {
97 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
98 "Unsupported size");
99
100 LLT p0 = LLT::pointer(AddressSpace: 0, SizeInBits: 32);
101 LLT s32 = LLT::scalar(SizeInBits: 32);
102 auto SPReg = MIRBuilder.buildCopy(p0, Register(ARM::SP));
103
104 auto OffsetReg = MIRBuilder.buildConstant(Res: s32, Val: Offset);
105
106 auto AddrReg = MIRBuilder.buildPtrAdd(Res: p0, Op0: SPReg, Op1: OffsetReg);
107
108 MPO = MachinePointerInfo::getStack(MF&: MIRBuilder.getMF(), Offset);
109 return AddrReg.getReg(0);
110 }
111
112 void assignValueToReg(Register ValVReg, Register PhysReg,
113 const CCValAssign &VA) override {
114 assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
115 assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
116
117 assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
118 assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
119
120 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
121 MIRBuilder.buildCopy(Res: PhysReg, Op: ExtReg);
122 MIB.addUse(RegNo: PhysReg, Flags: RegState::Implicit);
123 }
124
125 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
126 const MachinePointerInfo &MPO,
127 const CCValAssign &VA) override {
128 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
129 auto MMO = MIRBuilder.getMF().getMachineMemOperand(
130 PtrInfo: MPO, f: MachineMemOperand::MOStore, MemTy, base_alignment: Align(1));
131 MIRBuilder.buildStore(Val: ExtReg, Addr, MMO&: *MMO);
132 }
133
134 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
135 ArrayRef<CCValAssign> VAs,
136 std::function<void()> *Thunk) override {
137 assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
138
139 const CCValAssign &VA = VAs[0];
140 assert(VA.needsCustom() && "Value doesn't need custom handling");
141
142 // Custom lowering for other types, such as f16, is currently not supported
143 if (VA.getValVT() != MVT::f64)
144 return 0;
145
146 const CCValAssign &NextVA = VAs[1];
147 assert(NextVA.needsCustom() && "Value doesn't need custom handling");
148 assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
149
150 assert(VA.getValNo() == NextVA.getValNo() &&
151 "Values belong to different arguments");
152
153 assert(VA.isRegLoc() && "Value should be in reg");
154 assert(NextVA.isRegLoc() && "Value should be in reg");
155
156 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
157 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
158 MIRBuilder.buildUnmerge(Res: NewRegs, Op: Arg.Regs[0]);
159
160 bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
161 if (!IsLittle)
162 std::swap(a&: NewRegs[0], b&: NewRegs[1]);
163
164 if (Thunk) {
165 *Thunk = [=]() {
166 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
167 assignValueToReg(ValVReg: NewRegs[1], PhysReg: NextVA.getLocReg(), VA: NextVA);
168 };
169 return 2;
170 }
171 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
172 assignValueToReg(ValVReg: NewRegs[1], PhysReg: NextVA.getLocReg(), VA: NextVA);
173 return 2;
174 }
175
176 MachineInstrBuilder MIB;
177};
178
179} // end anonymous namespace
180
181/// Lower the return value for the already existing \p Ret. This assumes that
182/// \p MIRBuilder's insertion point is correct.
183bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
184 const Value *Val, ArrayRef<Register> VRegs,
185 MachineInstrBuilder &Ret) const {
186 if (!Val)
187 // Nothing to do here.
188 return true;
189
190 auto &MF = MIRBuilder.getMF();
191 const auto &F = MF.getFunction();
192
193 const auto &DL = MF.getDataLayout();
194 auto &TLI = *getTLI<ARMTargetLowering>();
195 if (!isSupportedType(DL, TLI, T: Val->getType()))
196 return false;
197
198 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
199 setArgFlags(Arg&: OrigRetInfo, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: F);
200
201 SmallVector<ArgInfo, 4> SplitRetInfos;
202 splitToValueTypes(OrigArgInfo: OrigRetInfo, SplitArgs&: SplitRetInfos, DL, CallConv: F.getCallingConv());
203
204 CCAssignFn *AssignFn =
205 TLI.CCAssignFnForReturn(CC: F.getCallingConv(), isVarArg: F.isVarArg());
206
207 OutgoingValueAssigner RetAssigner(AssignFn);
208 ARMOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
209 return determineAndHandleAssignments(Handler&: RetHandler, Assigner&: RetAssigner, Args&: SplitRetInfos,
210 MIRBuilder, CallConv: F.getCallingConv(),
211 IsVarArg: F.isVarArg());
212}
213
214bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
215 const Value *Val, ArrayRef<Register> VRegs,
216 FunctionLoweringInfo &FLI) const {
217 assert(!Val == VRegs.empty() && "Return value without a vreg");
218
219 auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
220 unsigned Opcode = ST.getReturnOpcode();
221 auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(MOs: predOps(Pred: ARMCC::AL));
222
223 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
224 return false;
225
226 MIRBuilder.insertInstr(MIB: Ret);
227 return true;
228}
229
230namespace {
231
232/// Helper class for values coming in through an ABI boundary (used for handling
233/// formal arguments and call return values).
234struct ARMIncomingValueHandler : public CallLowering::IncomingValueHandler {
235 ARMIncomingValueHandler(MachineIRBuilder &MIRBuilder,
236 MachineRegisterInfo &MRI)
237 : IncomingValueHandler(MIRBuilder, MRI) {}
238
239 Register getStackAddress(uint64_t Size, int64_t Offset,
240 MachinePointerInfo &MPO,
241 ISD::ArgFlagsTy Flags) override {
242 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
243 "Unsupported size");
244
245 auto &MFI = MIRBuilder.getMF().getFrameInfo();
246
247 // Byval is assumed to be writable memory, but other stack passed arguments
248 // are not.
249 const bool IsImmutable = !Flags.isByVal();
250
251 int FI = MFI.CreateFixedObject(Size, SPOffset: Offset, IsImmutable);
252 MPO = MachinePointerInfo::getFixedStack(MF&: MIRBuilder.getMF(), FI);
253
254 return MIRBuilder.buildFrameIndex(Res: LLT::pointer(AddressSpace: MPO.getAddrSpace(), SizeInBits: 32), Idx: FI)
255 .getReg(Idx: 0);
256 }
257
258 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
259 const MachinePointerInfo &MPO,
260 const CCValAssign &VA) override {
261 if (VA.getLocInfo() == CCValAssign::SExt ||
262 VA.getLocInfo() == CCValAssign::ZExt) {
263 // If the value is zero- or sign-extended, its size becomes 4 bytes, so
264 // that's what we should load.
265 MemTy = LLT::scalar(SizeInBits: 32);
266 assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
267
268 auto LoadVReg = buildLoad(Res: LLT::scalar(SizeInBits: 32), Addr, MemTy, MPO);
269 MIRBuilder.buildTrunc(Res: ValVReg, Op: LoadVReg);
270 } else {
271 // If the value is not extended, a simple load will suffice.
272 buildLoad(Res: ValVReg, Addr, MemTy, MPO);
273 }
274 }
275
276 MachineInstrBuilder buildLoad(const DstOp &Res, Register Addr, LLT MemTy,
277 const MachinePointerInfo &MPO) {
278 MachineFunction &MF = MIRBuilder.getMF();
279
280 auto MMO = MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOLoad, MemTy,
281 base_alignment: inferAlignFromPtrInfo(MF, MPO));
282 return MIRBuilder.buildLoad(Res, Addr, MMO&: *MMO);
283 }
284
285 void assignValueToReg(Register ValVReg, Register PhysReg,
286 const CCValAssign &VA) override {
287 assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
288 assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
289
290 uint64_t ValSize = VA.getValVT().getFixedSizeInBits();
291 uint64_t LocSize = VA.getLocVT().getFixedSizeInBits();
292
293 assert(ValSize <= 64 && "Unsupported value size");
294 assert(LocSize <= 64 && "Unsupported location size");
295
296 markPhysRegUsed(PhysReg);
297 if (ValSize == LocSize) {
298 MIRBuilder.buildCopy(Res: ValVReg, Op: PhysReg);
299 } else {
300 assert(ValSize < LocSize && "Extensions not supported");
301
302 // We cannot create a truncating copy, nor a trunc of a physical register.
303 // Therefore, we need to copy the content of the physical register into a
304 // virtual one and then truncate that.
305 auto PhysRegToVReg = MIRBuilder.buildCopy(Res: LLT::scalar(SizeInBits: LocSize), Op: PhysReg);
306 MIRBuilder.buildTrunc(Res: ValVReg, Op: PhysRegToVReg);
307 }
308 }
309
310 unsigned assignCustomValue(ARMCallLowering::ArgInfo &Arg,
311 ArrayRef<CCValAssign> VAs,
312 std::function<void()> *Thunk) override {
313 assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
314
315 const CCValAssign &VA = VAs[0];
316 assert(VA.needsCustom() && "Value doesn't need custom handling");
317
318 // Custom lowering for other types, such as f16, is currently not supported
319 if (VA.getValVT() != MVT::f64)
320 return 0;
321
322 const CCValAssign &NextVA = VAs[1];
323 assert(NextVA.needsCustom() && "Value doesn't need custom handling");
324 assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
325
326 assert(VA.getValNo() == NextVA.getValNo() &&
327 "Values belong to different arguments");
328
329 assert(VA.isRegLoc() && "Value should be in reg");
330 assert(NextVA.isRegLoc() && "Value should be in reg");
331
332 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
333 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
334
335 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
336 assignValueToReg(ValVReg: NewRegs[1], PhysReg: NextVA.getLocReg(), VA: NextVA);
337
338 bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
339 if (!IsLittle)
340 std::swap(a&: NewRegs[0], b&: NewRegs[1]);
341
342 MIRBuilder.buildMergeLikeInstr(Res: Arg.Regs[0], Ops: NewRegs);
343
344 return 2;
345 }
346
347 /// Marking a physical register as used is different between formal
348 /// parameters, where it's a basic block live-in, and call returns, where it's
349 /// an implicit-def of the call instruction.
350 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
351};
352
353struct FormalArgHandler : public ARMIncomingValueHandler {
354 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
355 : ARMIncomingValueHandler(MIRBuilder, MRI) {}
356
357 void markPhysRegUsed(unsigned PhysReg) override {
358 MIRBuilder.getMRI()->addLiveIn(Reg: PhysReg);
359 MIRBuilder.getMBB().addLiveIn(PhysReg);
360 }
361};
362
363} // end anonymous namespace
364
365bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
366 const Function &F,
367 ArrayRef<ArrayRef<Register>> VRegs,
368 FunctionLoweringInfo &FLI) const {
369 auto &TLI = *getTLI<ARMTargetLowering>();
370 auto Subtarget = TLI.getSubtarget();
371
372 if (Subtarget->isThumb1Only())
373 return false;
374
375 // Quick exit if there aren't any args
376 if (F.arg_empty())
377 return true;
378
379 if (F.isVarArg())
380 return false;
381
382 auto &MF = MIRBuilder.getMF();
383 auto &MBB = MIRBuilder.getMBB();
384 const auto &DL = MF.getDataLayout();
385
386 for (auto &Arg : F.args()) {
387 if (!isSupportedType(DL, TLI, T: Arg.getType()))
388 return false;
389 if (Arg.hasPassPointeeByValueCopyAttr())
390 return false;
391 }
392
393 CCAssignFn *AssignFn =
394 TLI.CCAssignFnForCall(CC: F.getCallingConv(), isVarArg: F.isVarArg());
395
396 OutgoingValueAssigner ArgAssigner(AssignFn);
397 FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo());
398
399 SmallVector<ArgInfo, 8> SplitArgInfos;
400 unsigned Idx = 0;
401 for (auto &Arg : F.args()) {
402 ArgInfo OrigArgInfo(VRegs[Idx], Arg.getType(), Idx);
403
404 setArgFlags(Arg&: OrigArgInfo, OpIdx: Idx + AttributeList::FirstArgIndex, DL, FuncInfo: F);
405 splitToValueTypes(OrigArgInfo, SplitArgs&: SplitArgInfos, DL, CallConv: F.getCallingConv());
406
407 Idx++;
408 }
409
410 if (!MBB.empty())
411 MIRBuilder.setInstr(*MBB.begin());
412
413 if (!determineAndHandleAssignments(Handler&: ArgHandler, Assigner&: ArgAssigner, Args&: SplitArgInfos,
414 MIRBuilder, CallConv: F.getCallingConv(),
415 IsVarArg: F.isVarArg()))
416 return false;
417
418 // Move back to the end of the basic block.
419 MIRBuilder.setMBB(MBB);
420 return true;
421}
422
423namespace {
424
425struct CallReturnHandler : public ARMIncomingValueHandler {
426 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
427 MachineInstrBuilder MIB)
428 : ARMIncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
429
430 void markPhysRegUsed(unsigned PhysReg) override {
431 MIB.addDef(RegNo: PhysReg, Flags: RegState::Implicit);
432 }
433
434 MachineInstrBuilder MIB;
435};
436
437// FIXME: This should move to the ARMSubtarget when it supports all the opcodes.
438unsigned getCallOpcode(const MachineFunction &MF, const ARMSubtarget &STI,
439 bool isDirect) {
440 if (isDirect)
441 return STI.isThumb() ? ARM::tBL : ARM::BL;
442
443 if (STI.isThumb())
444 return gettBLXrOpcode(MF);
445
446 if (STI.hasV5TOps())
447 return getBLXOpcode(MF);
448
449 if (STI.hasV4TOps())
450 return ARM::BX_CALL;
451
452 return ARM::BMOVPCRX_CALL;
453}
454} // end anonymous namespace
455
456bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const {
457 MachineFunction &MF = MIRBuilder.getMF();
458 const auto &TLI = *getTLI<ARMTargetLowering>();
459 const auto &DL = MF.getDataLayout();
460 const auto &STI = MF.getSubtarget<ARMSubtarget>();
461 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
462 MachineRegisterInfo &MRI = MF.getRegInfo();
463
464 if (STI.genLongCalls())
465 return false;
466
467 if (STI.isThumb1Only())
468 return false;
469
470 auto CallSeqStart = MIRBuilder.buildInstr(ARM::ADJCALLSTACKDOWN);
471
472 // Create the call instruction so we can add the implicit uses of arg
473 // registers, but don't insert it yet.
474 bool IsDirect = !Info.Callee.isReg();
475 auto CallOpcode = getCallOpcode(MF, STI, isDirect: IsDirect);
476 auto MIB = MIRBuilder.buildInstrNoInsert(Opcode: CallOpcode);
477
478 bool IsThumb = STI.isThumb();
479 if (IsThumb)
480 MIB.add(MOs: predOps(Pred: ARMCC::AL));
481
482 MIB.add(MO: Info.Callee);
483 if (!IsDirect) {
484 auto CalleeReg = Info.Callee.getReg();
485 if (CalleeReg && !CalleeReg.isPhysical()) {
486 unsigned CalleeIdx = IsThumb ? 2 : 0;
487 MIB->getOperand(i: CalleeIdx).setReg(constrainOperandRegClass(
488 MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(),
489 *MIB.getInstr(), MIB->getDesc(), Info.Callee, CalleeIdx));
490 }
491 }
492
493 MIB.addRegMask(Mask: TRI->getCallPreservedMask(MF, Info.CallConv));
494
495 SmallVector<ArgInfo, 8> ArgInfos;
496 for (auto Arg : Info.OrigArgs) {
497 if (!isSupportedType(DL, TLI, T: Arg.Ty))
498 return false;
499
500 if (Arg.Flags[0].isByVal())
501 return false;
502
503 splitToValueTypes(OrigArgInfo: Arg, SplitArgs&: ArgInfos, DL, CallConv: Info.CallConv);
504 }
505
506 auto ArgAssignFn = TLI.CCAssignFnForCall(CC: Info.CallConv, isVarArg: Info.IsVarArg);
507 OutgoingValueAssigner ArgAssigner(ArgAssignFn);
508 ARMOutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB);
509 if (!determineAndHandleAssignments(Handler&: ArgHandler, Assigner&: ArgAssigner, Args&: ArgInfos,
510 MIRBuilder, CallConv: Info.CallConv, IsVarArg: Info.IsVarArg))
511 return false;
512
513 // Now we can add the actual call instruction to the correct basic block.
514 MIRBuilder.insertInstr(MIB);
515
516 if (!Info.OrigRet.Ty->isVoidTy()) {
517 if (!isSupportedType(DL, TLI, T: Info.OrigRet.Ty))
518 return false;
519
520 ArgInfos.clear();
521 splitToValueTypes(OrigArgInfo: Info.OrigRet, SplitArgs&: ArgInfos, DL, CallConv: Info.CallConv);
522 auto RetAssignFn = TLI.CCAssignFnForReturn(CC: Info.CallConv, isVarArg: Info.IsVarArg);
523 OutgoingValueAssigner Assigner(RetAssignFn);
524 CallReturnHandler RetHandler(MIRBuilder, MRI, MIB);
525 if (!determineAndHandleAssignments(Handler&: RetHandler, Assigner, Args&: ArgInfos,
526 MIRBuilder, CallConv: Info.CallConv,
527 IsVarArg: Info.IsVarArg))
528 return false;
529 }
530
531 // We now know the size of the stack - update the ADJCALLSTACKDOWN
532 // accordingly.
533 CallSeqStart.addImm(ArgAssigner.StackSize).addImm(0).add(predOps(Pred: ARMCC::AL));
534
535 MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
536 .addImm(ArgAssigner.StackSize)
537 .addImm(-1ULL)
538 .add(predOps(Pred: ARMCC::AL));
539
540 return true;
541}
542

source code of llvm/lib/Target/ARM/ARMCallLowering.cpp