1//===- XtensaISelLowering.cpp - Xtensa DAG Lowering Implementation --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Xtensa uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "XtensaISelLowering.h"
15#include "XtensaConstantPoolValue.h"
16#include "XtensaSubtarget.h"
17#include "XtensaTargetMachine.h"
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineJumpTableInfo.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/raw_ostream.h"
28#include <deque>
29
30using namespace llvm;
31
32#define DEBUG_TYPE "xtensa-lower"
33
34// Return true if we must use long (in fact, indirect) function call.
35// It's simplified version, production implimentation must
36// resolve a functions in ROM (usually glibc functions)
37static bool isLongCall(const char *str) {
38 // Currently always use long calls
39 return true;
40}
41
42XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM,
43 const XtensaSubtarget &STI)
44 : TargetLowering(TM), Subtarget(STI) {
45 MVT PtrVT = MVT::i32;
46 // Set up the register classes.
47 addRegisterClass(MVT::VT: i32, RC: &Xtensa::ARRegClass);
48
49 // Set up special registers.
50 setStackPointerRegisterToSaveRestore(Xtensa::SP);
51
52 setSchedulingPreference(Sched::RegPressure);
53
54 setMinFunctionAlignment(Align(4));
55
56 setOperationAction(ISD::Constant, MVT::i32, Custom);
57 setOperationAction(ISD::Constant, MVT::i64, Expand);
58
59 setBooleanContents(ZeroOrOneBooleanContent);
60
61 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
62 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
63 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
64
65 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
66 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
67 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
68 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
69 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
70 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
71
72 // No sign extend instructions for i1
73 for (MVT VT : MVT::integer_valuetypes()) {
74 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
75 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
76 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
77 }
78
79 setOperationAction(Op: ISD::ConstantPool, VT: PtrVT, Action: Custom);
80
81 // Compute derived properties from the register classes
82 computeRegisterProperties(STI.getRegisterInfo());
83}
84
85//===----------------------------------------------------------------------===//
86// Calling conventions
87//===----------------------------------------------------------------------===//
88
89#include "XtensaGenCallingConv.inc"
90
91static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT,
92 CCValAssign::LocInfo LocInfo,
93 ISD::ArgFlagsTy ArgFlags, CCState &State) {
94 static const MCPhysReg IntRegs[] = {Xtensa::A2, Xtensa::A3, Xtensa::A4,
95 Xtensa::A5, Xtensa::A6, Xtensa::A7};
96
97 if (ArgFlags.isByVal()) {
98 Align ByValAlign = ArgFlags.getNonZeroByValAlign();
99 unsigned ByValSize = ArgFlags.getByValSize();
100 if (ByValSize < 4) {
101 ByValSize = 4;
102 }
103 if (ByValAlign < Align(4)) {
104 ByValAlign = Align(4);
105 }
106 unsigned Offset = State.AllocateStack(Size: ByValSize, Alignment: ByValAlign);
107 State.addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, HTP: LocInfo));
108 // Mark all unused registers as allocated to avoid misuse
109 // of such registers.
110 while (State.AllocateReg(IntRegs))
111 ;
112 return false;
113 }
114
115 // Promote i8 and i16
116 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
117 LocVT = MVT::i32;
118 if (ArgFlags.isSExt())
119 LocInfo = CCValAssign::SExt;
120 else if (ArgFlags.isZExt())
121 LocInfo = CCValAssign::ZExt;
122 else
123 LocInfo = CCValAssign::AExt;
124 }
125
126 unsigned Register;
127
128 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
129 bool needs64BitAlign = (ValVT == MVT::i32 && OrigAlign == Align(8));
130 bool needs128BitAlign = (ValVT == MVT::i32 && OrigAlign == Align(16));
131
132 if (ValVT == MVT::i32) {
133 Register = State.AllocateReg(IntRegs);
134 // If this is the first part of an i64 arg,
135 // the allocated register must be either A2, A4 or A6.
136 if (needs64BitAlign && (Register == Xtensa::A3 || Register == Xtensa::A5 ||
137 Register == Xtensa::A7))
138 Register = State.AllocateReg(IntRegs);
139 // arguments with 16byte alignment must be passed in the first register or
140 // passed via stack
141 if (needs128BitAlign && (Register != Xtensa::A2))
142 while ((Register = State.AllocateReg(IntRegs)))
143 ;
144 LocVT = MVT::i32;
145 } else if (ValVT == MVT::f64) {
146 // Allocate int register and shadow next int register.
147 Register = State.AllocateReg(IntRegs);
148 if (Register == Xtensa::A3 || Register == Xtensa::A5 ||
149 Register == Xtensa::A7)
150 Register = State.AllocateReg(IntRegs);
151 State.AllocateReg(IntRegs);
152 LocVT = MVT::i32;
153 } else {
154 report_fatal_error(reason: "Cannot handle this ValVT.");
155 }
156
157 if (!Register) {
158 unsigned Offset = State.AllocateStack(Size: ValVT.getStoreSize(), Alignment: OrigAlign);
159 State.addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, HTP: LocInfo));
160 } else {
161 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, RegNo: Register, LocVT, HTP: LocInfo));
162 }
163
164 return false;
165}
166
167CCAssignFn *XtensaTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
168 bool IsVarArg) const {
169 return CC_Xtensa_Custom;
170}
171
172SDValue XtensaTargetLowering::LowerFormalArguments(
173 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
174 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
175 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
176 MachineFunction &MF = DAG.getMachineFunction();
177 MachineFrameInfo &MFI = MF.getFrameInfo();
178
179 // Used with vargs to acumulate store chains.
180 std::vector<SDValue> OutChains;
181
182 if (IsVarArg)
183 report_fatal_error(reason: "Var arg not supported by FormalArguments Lowering");
184
185 // Assign locations to all of the incoming arguments.
186 SmallVector<CCValAssign, 16> ArgLocs;
187 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
188 *DAG.getContext());
189
190 CCInfo.AnalyzeFormalArguments(Ins, Fn: CCAssignFnForCall(CC: CallConv, IsVarArg));
191
192 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
193 CCValAssign &VA = ArgLocs[i];
194 // Arguments stored on registers
195 if (VA.isRegLoc()) {
196 EVT RegVT = VA.getLocVT();
197 const TargetRegisterClass *RC;
198
199 if (RegVT == MVT::i32)
200 RC = &Xtensa::ARRegClass;
201 else
202 report_fatal_error(reason: "RegVT not supported by FormalArguments Lowering");
203
204 // Transform the arguments stored on
205 // physical registers into virtual ones
206 unsigned Register = MF.addLiveIn(PReg: VA.getLocReg(), RC);
207 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl: DL, Reg: Register, VT: RegVT);
208
209 // If this is an 8 or 16-bit value, it has been passed promoted
210 // to 32 bits. Insert an assert[sz]ext to capture this, then
211 // truncate to the right size.
212 if (VA.getLocInfo() != CCValAssign::Full) {
213 unsigned Opcode = 0;
214 if (VA.getLocInfo() == CCValAssign::SExt)
215 Opcode = ISD::AssertSext;
216 else if (VA.getLocInfo() == CCValAssign::ZExt)
217 Opcode = ISD::AssertZext;
218 if (Opcode)
219 ArgValue = DAG.getNode(Opcode, DL, VT: RegVT, N1: ArgValue,
220 N2: DAG.getValueType(VA.getValVT()));
221 ArgValue = DAG.getNode((VA.getValVT() == MVT::f32) ? ISD::BITCAST
222 : ISD::TRUNCATE,
223 DL, VA.getValVT(), ArgValue);
224 }
225
226 InVals.push_back(Elt: ArgValue);
227
228 } else {
229 assert(VA.isMemLoc());
230
231 EVT ValVT = VA.getValVT();
232
233 // The stack pointer offset is relative to the caller stack frame.
234 int FI = MFI.CreateFixedObject(Size: ValVT.getStoreSize(), SPOffset: VA.getLocMemOffset(),
235 IsImmutable: true);
236
237 if (Ins[VA.getValNo()].Flags.isByVal()) {
238 // Assume that in this case load operation is created
239 SDValue FIN = DAG.getFrameIndex(FI, MVT::VT: i32);
240 InVals.push_back(Elt: FIN);
241 } else {
242 // Create load nodes to retrieve arguments from the stack
243 SDValue FIN =
244 DAG.getFrameIndex(FI, VT: getFrameIndexTy(DL: DAG.getDataLayout()));
245 InVals.push_back(Elt: DAG.getLoad(
246 VT: ValVT, dl: DL, Chain, Ptr: FIN,
247 PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)));
248 }
249 }
250 }
251
252 // All stores are grouped in one node to allow the matching between
253 // the size of Ins and InVals. This only happens when on varg functions
254 if (!OutChains.empty()) {
255 OutChains.push_back(x: Chain);
256 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
257 }
258
259 return Chain;
260}
261
262SDValue
263XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI,
264 SmallVectorImpl<SDValue> &InVals) const {
265 SelectionDAG &DAG = CLI.DAG;
266 SDLoc &DL = CLI.DL;
267 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
268 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
269 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
270 SDValue Chain = CLI.Chain;
271 SDValue Callee = CLI.Callee;
272 bool &IsTailCall = CLI.IsTailCall;
273 CallingConv::ID CallConv = CLI.CallConv;
274 bool IsVarArg = CLI.IsVarArg;
275
276 MachineFunction &MF = DAG.getMachineFunction();
277 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
278 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
279
280 // TODO: Support tail call optimization.
281 IsTailCall = false;
282
283 // Analyze the operands of the call, assigning locations to each operand.
284 SmallVector<CCValAssign, 16> ArgLocs;
285 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
286
287 CCAssignFn *CC = CCAssignFnForCall(CC: CallConv, IsVarArg);
288
289 CCInfo.AnalyzeCallOperands(Outs, Fn: CC);
290
291 // Get a count of how many bytes are to be pushed on the stack.
292 unsigned NumBytes = CCInfo.getStackSize();
293
294 Align StackAlignment = TFL->getStackAlign();
295 unsigned NextStackOffset = alignTo(Size: NumBytes, A: StackAlignment);
296
297 Chain = DAG.getCALLSEQ_START(Chain, InSize: NextStackOffset, OutSize: 0, DL);
298
299 // Copy argument values to their designated locations.
300 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
301 SmallVector<SDValue, 8> MemOpChains;
302 SDValue StackPtr;
303 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
304 CCValAssign &VA = ArgLocs[I];
305 SDValue ArgValue = OutVals[I];
306 ISD::ArgFlagsTy Flags = Outs[I].Flags;
307
308 if (VA.isRegLoc())
309 // Queue up the argument copies and emit them at the end.
310 RegsToPass.push_back(x: std::make_pair(x: VA.getLocReg(), y&: ArgValue));
311 else if (Flags.isByVal()) {
312 assert(VA.isMemLoc());
313 assert(Flags.getByValSize() &&
314 "ByVal args of size 0 should have been ignored by front-end.");
315 assert(!IsTailCall &&
316 "Do not tail-call optimize if there is a byval argument.");
317
318 if (!StackPtr.getNode())
319 StackPtr = DAG.getCopyFromReg(Chain, DL, Xtensa::SP, PtrVT);
320 unsigned Offset = VA.getLocMemOffset();
321 SDValue Address = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr,
322 N2: DAG.getIntPtrConstant(Val: Offset, DL));
323 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
324 SDValue Memcpy = DAG.getMemcpy(
325 Chain, dl: DL, Dst: Address, Src: ArgValue, Size: SizeNode, Alignment: Flags.getNonZeroByValAlign(),
326 /*isVolatile=*/isVol: false, /*AlwaysInline=*/false,
327 /*isTailCall=*/false, DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo());
328 MemOpChains.push_back(Elt: Memcpy);
329 } else {
330 assert(VA.isMemLoc() && "Argument not register or memory");
331
332 // Work out the address of the stack slot. Unpromoted ints and
333 // floats are passed as right-justified 8-byte values.
334 if (!StackPtr.getNode())
335 StackPtr = DAG.getCopyFromReg(Chain, DL, Xtensa::SP, PtrVT);
336 unsigned Offset = VA.getLocMemOffset();
337 SDValue Address = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr,
338 N2: DAG.getIntPtrConstant(Val: Offset, DL));
339
340 // Emit the store.
341 MemOpChains.push_back(
342 Elt: DAG.getStore(Chain, dl: DL, Val: ArgValue, Ptr: Address, PtrInfo: MachinePointerInfo()));
343 }
344 }
345
346 // Join the stores, which are independent of one another.
347 if (!MemOpChains.empty())
348 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
349
350 // Build a sequence of copy-to-reg nodes, chained and glued together.
351 SDValue Glue;
352 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
353 unsigned Reg = RegsToPass[I].first;
354 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg, N: RegsToPass[I].second, Glue);
355 Glue = Chain.getValue(R: 1);
356 }
357 std::string name;
358 unsigned char TF = 0;
359
360 // Accept direct calls by converting symbolic call addresses to the
361 // associated Target* opcodes.
362 if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
363 name = E->getSymbol();
364 TF = E->getTargetFlags();
365 if (isPositionIndependent()) {
366 report_fatal_error(reason: "PIC relocations is not supported");
367 } else
368 Callee = DAG.getTargetExternalSymbol(Sym: E->getSymbol(), VT: PtrVT, TargetFlags: TF);
369 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
370 const GlobalValue *GV = G->getGlobal();
371 name = GV->getName().str();
372 }
373
374 if ((!name.empty()) && isLongCall(str: name.c_str())) {
375 // Create a constant pool entry for the callee address
376 XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier;
377
378 XtensaConstantPoolValue *CPV = XtensaConstantPoolSymbol::Create(
379 C&: *DAG.getContext(), S: name.c_str(), ID: 0 /* XtensaCLabelIndex */, PrivLinkage: false,
380 Modifier);
381
382 // Get the address of the callee into a register
383 SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4), Offset: 0, TargetFlags: TF);
384 SDValue CPWrap = getAddrPCRel(Op: CPAddr, DAG);
385 Callee = CPWrap;
386 }
387
388 // The first call operand is the chain and the second is the target address.
389 SmallVector<SDValue, 8> Ops;
390 Ops.push_back(Elt: Chain);
391 Ops.push_back(Elt: Callee);
392
393 // Add a register mask operand representing the call-preserved registers.
394 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
395 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
396 assert(Mask && "Missing call preserved mask for calling convention");
397 Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask));
398
399 // Add argument registers to the end of the list so that they are
400 // known live into the call.
401 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
402 unsigned Reg = RegsToPass[I].first;
403 Ops.push_back(Elt: DAG.getRegister(Reg, VT: RegsToPass[I].second.getValueType()));
404 }
405
406 // Glue the call to the argument copies, if any.
407 if (Glue.getNode())
408 Ops.push_back(Elt: Glue);
409
410 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
411 Chain = DAG.getNode(Opcode: XtensaISD::CALL, DL, VTList: NodeTys, Ops);
412 Glue = Chain.getValue(R: 1);
413
414 // Mark the end of the call, which is glued to the call itself.
415 Chain = DAG.getCALLSEQ_END(Chain, Op1: DAG.getConstant(Val: NumBytes, DL, VT: PtrVT, isTarget: true),
416 Op2: DAG.getConstant(Val: 0, DL, VT: PtrVT, isTarget: true), InGlue: Glue, DL);
417 Glue = Chain.getValue(R: 1);
418
419 // Assign locations to each value returned by this call.
420 SmallVector<CCValAssign, 16> RetLocs;
421 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
422 RetCCInfo.AnalyzeCallResult(Ins, RetCC_Xtensa);
423
424 // Copy all of the result registers out of their specified physreg.
425 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
426 CCValAssign &VA = RetLocs[I];
427
428 // Copy the value out, gluing the copy to the end of the call sequence.
429 unsigned Reg = VA.getLocReg();
430 SDValue RetValue = DAG.getCopyFromReg(Chain, dl: DL, Reg, VT: VA.getLocVT(), Glue);
431 Chain = RetValue.getValue(R: 1);
432 Glue = RetValue.getValue(R: 2);
433
434 InVals.push_back(Elt: RetValue);
435 }
436 return Chain;
437}
438
439bool XtensaTargetLowering::CanLowerReturn(
440 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
441 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
442 SmallVector<CCValAssign, 16> RVLocs;
443 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
444 return CCInfo.CheckReturn(Outs, Fn: RetCC_Xtensa);
445}
446
447SDValue
448XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
449 bool IsVarArg,
450 const SmallVectorImpl<ISD::OutputArg> &Outs,
451 const SmallVectorImpl<SDValue> &OutVals,
452 const SDLoc &DL, SelectionDAG &DAG) const {
453 if (IsVarArg)
454 report_fatal_error(reason: "VarArg not supported");
455
456 MachineFunction &MF = DAG.getMachineFunction();
457
458 // Assign locations to each returned value.
459 SmallVector<CCValAssign, 16> RetLocs;
460 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
461 RetCCInfo.AnalyzeReturn(Outs, Fn: RetCC_Xtensa);
462
463 SDValue Glue;
464 // Quick exit for void returns
465 if (RetLocs.empty())
466 return DAG.getNode(XtensaISD::RET, DL, MVT::Other, Chain);
467
468 // Copy the result values into the output registers.
469 SmallVector<SDValue, 4> RetOps;
470 RetOps.push_back(Elt: Chain);
471 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
472 CCValAssign &VA = RetLocs[I];
473 SDValue RetValue = OutVals[I];
474
475 // Make the return register live on exit.
476 assert(VA.isRegLoc() && "Can only return in registers!");
477
478 // Chain and glue the copies together.
479 unsigned Register = VA.getLocReg();
480 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: Register, N: RetValue, Glue);
481 Glue = Chain.getValue(R: 1);
482 RetOps.push_back(Elt: DAG.getRegister(Reg: Register, VT: VA.getLocVT()));
483 }
484
485 // Update chain and glue.
486 RetOps[0] = Chain;
487 if (Glue.getNode())
488 RetOps.push_back(Elt: Glue);
489
490 return DAG.getNode(XtensaISD::RET, DL, MVT::Other, RetOps);
491}
492
493SDValue XtensaTargetLowering::LowerImmediate(SDValue Op,
494 SelectionDAG &DAG) const {
495 const ConstantSDNode *CN = cast<ConstantSDNode>(Val&: Op);
496 SDLoc DL(CN);
497 APInt APVal = CN->getAPIntValue();
498 int64_t Value = APVal.getSExtValue();
499 if (Op.getValueType() == MVT::i32) {
500 // Check if use node maybe lowered to the MOVI instruction
501 if (Value > -2048 && Value <= 2047)
502 return Op;
503 // Check if use node maybe lowered to the ADDMI instruction
504 SDNode &OpNode = *Op.getNode();
505 if ((OpNode.hasOneUse() && OpNode.use_begin()->getOpcode() == ISD::ADD) &&
506 isShiftedInt<16, 8>(x: Value))
507 return Op;
508 Type *Ty = Type::getInt32Ty(C&: *DAG.getContext());
509 Constant *CV = ConstantInt::get(Ty, V: Value);
510 SDValue CP = DAG.getConstantPool(CV, MVT::i32);
511 return CP;
512 }
513 return Op;
514}
515
516SDValue XtensaTargetLowering::getAddrPCRel(SDValue Op,
517 SelectionDAG &DAG) const {
518 SDLoc DL(Op);
519 EVT Ty = Op.getValueType();
520 return DAG.getNode(Opcode: XtensaISD::PCREL_WRAPPER, DL, VT: Ty, Operand: Op);
521}
522
523SDValue XtensaTargetLowering::LowerConstantPool(ConstantPoolSDNode *CP,
524 SelectionDAG &DAG) const {
525 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
526 SDValue Result;
527 if (!CP->isMachineConstantPoolEntry()) {
528 Result = DAG.getTargetConstantPool(C: CP->getConstVal(), VT: PtrVT, Align: CP->getAlign(),
529 Offset: CP->getOffset());
530 } else {
531 report_fatal_error(reason: "This constantpool type is not supported yet");
532 }
533
534 return getAddrPCRel(Op: Result, DAG);
535}
536
537SDValue XtensaTargetLowering::LowerOperation(SDValue Op,
538 SelectionDAG &DAG) const {
539 switch (Op.getOpcode()) {
540 case ISD::Constant:
541 return LowerImmediate(Op, DAG);
542 case ISD::ConstantPool:
543 return LowerConstantPool(CP: cast<ConstantPoolSDNode>(Val&: Op), DAG);
544 default:
545 report_fatal_error(reason: "Unexpected node to lower");
546 }
547}
548
549const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const {
550 switch (Opcode) {
551 case XtensaISD::CALL:
552 return "XtensaISD::CALL";
553 case XtensaISD::PCREL_WRAPPER:
554 return "XtensaISD::PCREL_WRAPPER";
555 case XtensaISD::RET:
556 return "XtensaISD::RET";
557 }
558 return nullptr;
559}
560

source code of llvm/lib/Target/Xtensa/XtensaISelLowering.cpp