1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
15#include "MCTargetDesc/SparcMCExpr.h"
16#include "MCTargetDesc/SparcMCTargetDesc.h"
17#include "SparcMachineFunctionInfo.h"
18#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
20#include "SparcTargetObjectFile.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/StringSwitch.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/SelectionDAGNodes.h"
30#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/DiagnosticInfo.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/Module.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/KnownBits.h"
37using namespace llvm;
38
39
40//===----------------------------------------------------------------------===//
41// Calling Convention Implementation
42//===----------------------------------------------------------------------===//
43
44static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
45 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
46 ISD::ArgFlagsTy &ArgFlags, CCState &State)
47{
48 assert (ArgFlags.isSRet());
49
50 // Assign SRet argument.
51 State.addLoc(V: CCValAssign::getCustomMem(ValNo, ValVT,
52 Offset: 0,
53 LocVT, HTP: LocInfo));
54 return true;
55}
56
57static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
58 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
59 ISD::ArgFlagsTy &ArgFlags, CCState &State)
60{
61 static const MCPhysReg RegList[] = {
62 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
63 };
64 // Try to get first reg.
65 if (Register Reg = State.AllocateReg(RegList)) {
66 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
67 } else {
68 // Assign whole thing in stack.
69 State.addLoc(V: CCValAssign::getCustomMem(
70 ValNo, ValVT, Offset: State.AllocateStack(Size: 8, Alignment: Align(4)), LocVT, HTP: LocInfo));
71 return true;
72 }
73
74 // Try to get second reg.
75 if (Register Reg = State.AllocateReg(RegList))
76 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
77 else
78 State.addLoc(V: CCValAssign::getCustomMem(
79 ValNo, ValVT, Offset: State.AllocateStack(Size: 4, Alignment: Align(4)), LocVT, HTP: LocInfo));
80 return true;
81}
82
83static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
84 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
85 ISD::ArgFlagsTy &ArgFlags, CCState &State)
86{
87 static const MCPhysReg RegList[] = {
88 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
89 };
90
91 // Try to get first reg.
92 if (Register Reg = State.AllocateReg(RegList))
93 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
94 else
95 return false;
96
97 // Try to get second reg.
98 if (Register Reg = State.AllocateReg(RegList))
99 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
100 else
101 return false;
102
103 return true;
104}
105
106// Allocate a full-sized argument for the 64-bit ABI.
107static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
108 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
109 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
110 assert((LocVT == MVT::f32 || LocVT == MVT::f128
111 || LocVT.getSizeInBits() == 64) &&
112 "Can't handle non-64 bits locations");
113
114 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
115 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
116 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
117 unsigned Offset = State.AllocateStack(Size: size, Alignment: alignment);
118 unsigned Reg = 0;
119
120 if (LocVT == MVT::i64 && Offset < 6*8)
121 // Promote integers to %i0-%i5.
122 Reg = SP::I0 + Offset/8;
123 else if (LocVT == MVT::f64 && Offset < 16*8)
124 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
125 Reg = SP::D0 + Offset/8;
126 else if (LocVT == MVT::f32 && Offset < 16*8)
127 // Promote floats to %f1, %f3, ...
128 Reg = SP::F1 + Offset/4;
129 else if (LocVT == MVT::f128 && Offset < 16*8)
130 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
131 Reg = SP::Q0 + Offset/16;
132
133 // Promote to register when possible, otherwise use the stack slot.
134 if (Reg) {
135 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
136 return true;
137 }
138
139 // Bail out if this is a return CC and we run out of registers to place
140 // values into.
141 if (IsReturn)
142 return false;
143
144 // This argument goes on the stack in an 8-byte slot.
145 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
146 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
147 if (LocVT == MVT::f32)
148 Offset += 4;
149
150 State.addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, HTP: LocInfo));
151 return true;
152}
153
154// Allocate a half-sized argument for the 64-bit ABI.
155//
156// This is used when passing { float, int } structs by value in registers.
157static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
158 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
159 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
160 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
161 unsigned Offset = State.AllocateStack(Size: 4, Alignment: Align(4));
162
163 if (LocVT == MVT::f32 && Offset < 16*8) {
164 // Promote floats to %f0-%f31.
165 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
166 LocVT, LocInfo));
167 return true;
168 }
169
170 if (LocVT == MVT::i32 && Offset < 6*8) {
171 // Promote integers to %i0-%i5, using half the register.
172 unsigned Reg = SP::I0 + Offset/8;
173 LocVT = MVT::i64;
174 LocInfo = CCValAssign::AExt;
175
176 // Set the Custom bit if this i32 goes in the high bits of a register.
177 if (Offset % 8 == 0)
178 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, RegNo: Reg,
179 LocVT, HTP: LocInfo));
180 else
181 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
182 return true;
183 }
184
185 // Bail out if this is a return CC and we run out of registers to place
186 // values into.
187 if (IsReturn)
188 return false;
189
190 State.addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, HTP: LocInfo));
191 return true;
192}
193
194static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
195 CCValAssign::LocInfo &LocInfo,
196 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
197 return Analyze_CC_Sparc64_Full(IsReturn: false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
198 State);
199}
200
201static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
202 CCValAssign::LocInfo &LocInfo,
203 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
204 return Analyze_CC_Sparc64_Half(IsReturn: false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
205 State);
206}
207
208static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
209 CCValAssign::LocInfo &LocInfo,
210 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
211 return Analyze_CC_Sparc64_Full(IsReturn: true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
212 State);
213}
214
215static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
216 CCValAssign::LocInfo &LocInfo,
217 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
218 return Analyze_CC_Sparc64_Half(IsReturn: true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
219 State);
220}
221
222#include "SparcGenCallingConv.inc"
223
224// The calling conventions in SparcCallingConv.td are described in terms of the
225// callee's register window. This function translates registers to the
226// corresponding caller window %o register.
227static unsigned toCallerWindow(unsigned Reg) {
228 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
229 "Unexpected enum");
230 if (Reg >= SP::I0 && Reg <= SP::I7)
231 return Reg - SP::I0 + SP::O0;
232 return Reg;
233}
234
235bool SparcTargetLowering::CanLowerReturn(
236 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
237 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
238 SmallVector<CCValAssign, 16> RVLocs;
239 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
240 return CCInfo.CheckReturn(Outs, Fn: Subtarget->is64Bit() ? RetCC_Sparc64
241 : RetCC_Sparc32);
242}
243
244SDValue
245SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
246 bool IsVarArg,
247 const SmallVectorImpl<ISD::OutputArg> &Outs,
248 const SmallVectorImpl<SDValue> &OutVals,
249 const SDLoc &DL, SelectionDAG &DAG) const {
250 if (Subtarget->is64Bit())
251 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
252 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
253}
254
255SDValue
256SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
257 bool IsVarArg,
258 const SmallVectorImpl<ISD::OutputArg> &Outs,
259 const SmallVectorImpl<SDValue> &OutVals,
260 const SDLoc &DL, SelectionDAG &DAG) const {
261 MachineFunction &MF = DAG.getMachineFunction();
262
263 // CCValAssign - represent the assignment of the return value to locations.
264 SmallVector<CCValAssign, 16> RVLocs;
265
266 // CCState - Info about the registers and stack slot.
267 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
268 *DAG.getContext());
269
270 // Analyze return values.
271 CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Sparc32);
272
273 SDValue Glue;
274 SmallVector<SDValue, 4> RetOps(1, Chain);
275 // Make room for the return address offset.
276 RetOps.push_back(Elt: SDValue());
277
278 // Copy the result values into the output registers.
279 for (unsigned i = 0, realRVLocIdx = 0;
280 i != RVLocs.size();
281 ++i, ++realRVLocIdx) {
282 CCValAssign &VA = RVLocs[i];
283 assert(VA.isRegLoc() && "Can only return in registers!");
284
285 SDValue Arg = OutVals[realRVLocIdx];
286
287 if (VA.needsCustom()) {
288 assert(VA.getLocVT() == MVT::v2i32);
289 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
290 // happen by default if this wasn't a legal type)
291
292 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
293 Arg,
294 DAG.getConstant(Val: 0, DL, VT: getVectorIdxTy(DL: DAG.getDataLayout())));
295 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
296 Arg,
297 DAG.getConstant(Val: 1, DL, VT: getVectorIdxTy(DL: DAG.getDataLayout())));
298
299 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: VA.getLocReg(), N: Part0, Glue);
300 Glue = Chain.getValue(R: 1);
301 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
302 VA = RVLocs[++i]; // skip ahead to next loc
303 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: VA.getLocReg(), N: Part1,
304 Glue);
305 } else
306 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: VA.getLocReg(), N: Arg, Glue);
307
308 // Guarantee that all emitted copies are stuck together with flags.
309 Glue = Chain.getValue(R: 1);
310 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
311 }
312
313 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
314 // If the function returns a struct, copy the SRetReturnReg to I0
315 if (MF.getFunction().hasStructRetAttr()) {
316 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
317 Register Reg = SFI->getSRetReturnReg();
318 if (!Reg)
319 llvm_unreachable("sret virtual register not created in the entry block");
320 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
321 SDValue Val = DAG.getCopyFromReg(Chain, dl: DL, Reg, VT: PtrVT);
322 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
323 Glue = Chain.getValue(R: 1);
324 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
325 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
326 }
327
328 RetOps[0] = Chain; // Update chain.
329 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
330
331 // Add the glue if we have it.
332 if (Glue.getNode())
333 RetOps.push_back(Elt: Glue);
334
335 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
336}
337
338// Lower return values for the 64-bit ABI.
339// Return values are passed the exactly the same way as function arguments.
340SDValue
341SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
342 bool IsVarArg,
343 const SmallVectorImpl<ISD::OutputArg> &Outs,
344 const SmallVectorImpl<SDValue> &OutVals,
345 const SDLoc &DL, SelectionDAG &DAG) const {
346 // CCValAssign - represent the assignment of the return value to locations.
347 SmallVector<CCValAssign, 16> RVLocs;
348
349 // CCState - Info about the registers and stack slot.
350 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
351 *DAG.getContext());
352
353 // Analyze return values.
354 CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Sparc64);
355
356 SDValue Glue;
357 SmallVector<SDValue, 4> RetOps(1, Chain);
358
359 // The second operand on the return instruction is the return address offset.
360 // The return address is always %i7+8 with the 64-bit ABI.
361 RetOps.push_back(Elt: DAG.getConstant(8, DL, MVT::i32));
362
363 // Copy the result values into the output registers.
364 for (unsigned i = 0; i != RVLocs.size(); ++i) {
365 CCValAssign &VA = RVLocs[i];
366 assert(VA.isRegLoc() && "Can only return in registers!");
367 SDValue OutVal = OutVals[i];
368
369 // Integer return values must be sign or zero extended by the callee.
370 switch (VA.getLocInfo()) {
371 case CCValAssign::Full: break;
372 case CCValAssign::SExt:
373 OutVal = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: VA.getLocVT(), Operand: OutVal);
374 break;
375 case CCValAssign::ZExt:
376 OutVal = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: VA.getLocVT(), Operand: OutVal);
377 break;
378 case CCValAssign::AExt:
379 OutVal = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: VA.getLocVT(), Operand: OutVal);
380 break;
381 default:
382 llvm_unreachable("Unknown loc info!");
383 }
384
385 // The custom bit on an i32 return value indicates that it should be passed
386 // in the high bits of the register.
387 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
388 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
389 DAG.getConstant(32, DL, MVT::i32));
390
391 // The next value may go in the low bits of the same register.
392 // Handle both at once.
393 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
394 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
395 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
396 // Skip the next value, it's already done.
397 ++i;
398 }
399 }
400
401 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: VA.getLocReg(), N: OutVal, Glue);
402
403 // Guarantee that all emitted copies are stuck together with flags.
404 Glue = Chain.getValue(R: 1);
405 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
406 }
407
408 RetOps[0] = Chain; // Update chain.
409
410 // Add the flag if we have it.
411 if (Glue.getNode())
412 RetOps.push_back(Elt: Glue);
413
414 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
415}
416
417SDValue SparcTargetLowering::LowerFormalArguments(
418 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
419 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
420 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
421 if (Subtarget->is64Bit())
422 return LowerFormalArguments_64(Chain, CallConv, isVarArg: IsVarArg, Ins,
423 dl: DL, DAG, InVals);
424 return LowerFormalArguments_32(Chain, CallConv, isVarArg: IsVarArg, Ins,
425 dl: DL, DAG, InVals);
426}
427
428/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
429/// passed in either one or two GPRs, including FP values. TODO: we should
430/// pass FP values in FP registers for fastcc functions.
431SDValue SparcTargetLowering::LowerFormalArguments_32(
432 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
433 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
434 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
435 MachineFunction &MF = DAG.getMachineFunction();
436 MachineRegisterInfo &RegInfo = MF.getRegInfo();
437 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
438
439 // Assign locations to all of the incoming arguments.
440 SmallVector<CCValAssign, 16> ArgLocs;
441 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
442 *DAG.getContext());
443 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
444
445 const unsigned StackOffset = 92;
446 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
447
448 unsigned InIdx = 0;
449 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
450 CCValAssign &VA = ArgLocs[i];
451
452 if (Ins[InIdx].Flags.isSRet()) {
453 if (InIdx != 0)
454 report_fatal_error(reason: "sparc only supports sret on the first parameter");
455 // Get SRet from [%fp+64].
456 int FrameIdx = MF.getFrameInfo().CreateFixedObject(Size: 4, SPOffset: 64, IsImmutable: true);
457 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
458 SDValue Arg =
459 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
460 InVals.push_back(Elt: Arg);
461 continue;
462 }
463
464 if (VA.isRegLoc()) {
465 if (VA.needsCustom()) {
466 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
467
468 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
469 MF.getRegInfo().addLiveIn(Reg: VA.getLocReg(), vreg: VRegHi);
470 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
471
472 assert(i+1 < e);
473 CCValAssign &NextVA = ArgLocs[++i];
474
475 SDValue LoVal;
476 if (NextVA.isMemLoc()) {
477 int FrameIdx = MF.getFrameInfo().
478 CreateFixedObject(Size: 4, SPOffset: StackOffset+NextVA.getLocMemOffset(),IsImmutable: true);
479 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
480 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
481 } else {
482 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
483 &SP::IntRegsRegClass);
484 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
485 }
486
487 if (IsLittleEndian)
488 std::swap(a&: LoVal, b&: HiVal);
489
490 SDValue WholeValue =
491 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
492 WholeValue = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: WholeValue);
493 InVals.push_back(Elt: WholeValue);
494 continue;
495 }
496 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
497 MF.getRegInfo().addLiveIn(Reg: VA.getLocReg(), vreg: VReg);
498 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
499 if (VA.getLocVT() == MVT::f32)
500 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
501 else if (VA.getLocVT() != MVT::i32) {
502 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
503 DAG.getValueType(VA.getLocVT()));
504 Arg = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VA.getLocVT(), Operand: Arg);
505 }
506 InVals.push_back(Elt: Arg);
507 continue;
508 }
509
510 assert(VA.isMemLoc());
511
512 unsigned Offset = VA.getLocMemOffset()+StackOffset;
513 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
514
515 if (VA.needsCustom()) {
516 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
517 // If it is double-word aligned, just load.
518 if (Offset % 8 == 0) {
519 int FI = MF.getFrameInfo().CreateFixedObject(Size: 8,
520 SPOffset: Offset,
521 IsImmutable: true);
522 SDValue FIPtr = DAG.getFrameIndex(FI, VT: PtrVT);
523 SDValue Load =
524 DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIPtr, PtrInfo: MachinePointerInfo());
525 InVals.push_back(Elt: Load);
526 continue;
527 }
528
529 int FI = MF.getFrameInfo().CreateFixedObject(Size: 4,
530 SPOffset: Offset,
531 IsImmutable: true);
532 SDValue FIPtr = DAG.getFrameIndex(FI, VT: PtrVT);
533 SDValue HiVal =
534 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
535 int FI2 = MF.getFrameInfo().CreateFixedObject(Size: 4,
536 SPOffset: Offset+4,
537 IsImmutable: true);
538 SDValue FIPtr2 = DAG.getFrameIndex(FI: FI2, VT: PtrVT);
539
540 SDValue LoVal =
541 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
542
543 if (IsLittleEndian)
544 std::swap(a&: LoVal, b&: HiVal);
545
546 SDValue WholeValue =
547 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
548 WholeValue = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: WholeValue);
549 InVals.push_back(Elt: WholeValue);
550 continue;
551 }
552
553 int FI = MF.getFrameInfo().CreateFixedObject(Size: 4,
554 SPOffset: Offset,
555 IsImmutable: true);
556 SDValue FIPtr = DAG.getFrameIndex(FI, VT: PtrVT);
557 SDValue Load ;
558 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
559 Load = DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIPtr, PtrInfo: MachinePointerInfo());
560 } else if (VA.getValVT() == MVT::f128) {
561 report_fatal_error(reason: "SPARCv8 does not handle f128 in calls; "
562 "pass indirectly");
563 } else {
564 // We shouldn't see any other value types here.
565 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
566 }
567 InVals.push_back(Elt: Load);
568 }
569
570 if (MF.getFunction().hasStructRetAttr()) {
571 // Copy the SRet Argument to SRetReturnReg.
572 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
573 Register Reg = SFI->getSRetReturnReg();
574 if (!Reg) {
575 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
576 SFI->setSRetReturnReg(Reg);
577 }
578 SDValue Copy = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg, N: InVals[0]);
579 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
580 }
581
582 // Store remaining ArgRegs to the stack if this is a varargs function.
583 if (isVarArg) {
584 static const MCPhysReg ArgRegs[] = {
585 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
586 };
587 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
588 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
589 unsigned ArgOffset = CCInfo.getStackSize();
590 if (NumAllocated == 6)
591 ArgOffset += StackOffset;
592 else {
593 assert(!ArgOffset);
594 ArgOffset = 68+4*NumAllocated;
595 }
596
597 // Remember the vararg offset for the va_start implementation.
598 FuncInfo->setVarArgsFrameOffset(ArgOffset);
599
600 std::vector<SDValue> OutChains;
601
602 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
603 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
604 MF.getRegInfo().addLiveIn(Reg: *CurArgReg, vreg: VReg);
605 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
606
607 int FrameIdx = MF.getFrameInfo().CreateFixedObject(Size: 4, SPOffset: ArgOffset,
608 IsImmutable: true);
609 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
610
611 OutChains.push_back(
612 x: DAG.getStore(Chain: DAG.getRoot(), dl, Val: Arg, Ptr: FIPtr, PtrInfo: MachinePointerInfo()));
613 ArgOffset += 4;
614 }
615
616 if (!OutChains.empty()) {
617 OutChains.push_back(x: Chain);
618 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
619 }
620 }
621
622 return Chain;
623}
624
625// Lower formal arguments for the 64 bit ABI.
626SDValue SparcTargetLowering::LowerFormalArguments_64(
627 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
628 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
629 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
630 MachineFunction &MF = DAG.getMachineFunction();
631
632 // Analyze arguments according to CC_Sparc64.
633 SmallVector<CCValAssign, 16> ArgLocs;
634 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
635 *DAG.getContext());
636 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
637
638 // The argument array begins at %fp+BIAS+128, after the register save area.
639 const unsigned ArgArea = 128;
640
641 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
642 CCValAssign &VA = ArgLocs[i];
643 if (VA.isRegLoc()) {
644 // This argument is passed in a register.
645 // All integer register arguments are promoted by the caller to i64.
646
647 // Create a virtual register for the promoted live-in value.
648 Register VReg = MF.addLiveIn(PReg: VA.getLocReg(),
649 RC: getRegClassFor(VT: VA.getLocVT()));
650 SDValue Arg = DAG.getCopyFromReg(Chain, dl: DL, Reg: VReg, VT: VA.getLocVT());
651
652 // Get the high bits for i32 struct elements.
653 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
654 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
655 DAG.getConstant(32, DL, MVT::i32));
656
657 // The caller promoted the argument, so insert an Assert?ext SDNode so we
658 // won't promote the value again in this function.
659 switch (VA.getLocInfo()) {
660 case CCValAssign::SExt:
661 Arg = DAG.getNode(Opcode: ISD::AssertSext, DL, VT: VA.getLocVT(), N1: Arg,
662 N2: DAG.getValueType(VA.getValVT()));
663 break;
664 case CCValAssign::ZExt:
665 Arg = DAG.getNode(Opcode: ISD::AssertZext, DL, VT: VA.getLocVT(), N1: Arg,
666 N2: DAG.getValueType(VA.getValVT()));
667 break;
668 default:
669 break;
670 }
671
672 // Truncate the register down to the argument type.
673 if (VA.isExtInLoc())
674 Arg = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VA.getValVT(), Operand: Arg);
675
676 InVals.push_back(Elt: Arg);
677 continue;
678 }
679
680 // The registers are exhausted. This argument was passed on the stack.
681 assert(VA.isMemLoc());
682 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
683 // beginning of the arguments area at %fp+BIAS+128.
684 unsigned Offset = VA.getLocMemOffset() + ArgArea;
685 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
686 // Adjust offset for extended arguments, SPARC is big-endian.
687 // The caller will have written the full slot with extended bytes, but we
688 // prefer our own extending loads.
689 if (VA.isExtInLoc())
690 Offset += 8 - ValSize;
691 int FI = MF.getFrameInfo().CreateFixedObject(Size: ValSize, SPOffset: Offset, IsImmutable: true);
692 InVals.push_back(
693 Elt: DAG.getLoad(VT: VA.getValVT(), dl: DL, Chain,
694 Ptr: DAG.getFrameIndex(FI, VT: getPointerTy(DL: MF.getDataLayout())),
695 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI)));
696 }
697
698 if (!IsVarArg)
699 return Chain;
700
701 // This function takes variable arguments, some of which may have been passed
702 // in registers %i0-%i5. Variable floating point arguments are never passed
703 // in floating point registers. They go on %i0-%i5 or on the stack like
704 // integer arguments.
705 //
706 // The va_start intrinsic needs to know the offset to the first variable
707 // argument.
708 unsigned ArgOffset = CCInfo.getStackSize();
709 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
710 // Skip the 128 bytes of register save area.
711 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
712 Subtarget->getStackPointerBias());
713
714 // Save the variable arguments that were passed in registers.
715 // The caller is required to reserve stack space for 6 arguments regardless
716 // of how many arguments were actually passed.
717 SmallVector<SDValue, 8> OutChains;
718 for (; ArgOffset < 6*8; ArgOffset += 8) {
719 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
720 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
721 int FI = MF.getFrameInfo().CreateFixedObject(Size: 8, SPOffset: ArgOffset + ArgArea, IsImmutable: true);
722 auto PtrVT = getPointerTy(DL: MF.getDataLayout());
723 OutChains.push_back(
724 Elt: DAG.getStore(Chain, dl: DL, Val: VArg, Ptr: DAG.getFrameIndex(FI, VT: PtrVT),
725 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI)));
726 }
727
728 if (!OutChains.empty())
729 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
730
731 return Chain;
732}
733
734// Check whether any of the argument registers are reserved
735static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI,
736 const MachineFunction &MF) {
737 // The register window design means that outgoing parameters at O*
738 // will appear in the callee as I*.
739 // Be conservative and check both sides of the register names.
740 bool Outgoing =
741 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
742 return TRI->isReservedReg(MF, r);
743 });
744 bool Incoming =
745 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
746 return TRI->isReservedReg(MF, r);
747 });
748 return Outgoing || Incoming;
749}
750
751static void emitReservedArgRegCallError(const MachineFunction &MF) {
752 const Function &F = MF.getFunction();
753 F.getContext().diagnose(DI: DiagnosticInfoUnsupported{
754 F, ("SPARC doesn't support"
755 " function calls if any of the argument registers is reserved.")});
756}
757
758SDValue
759SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
760 SmallVectorImpl<SDValue> &InVals) const {
761 if (Subtarget->is64Bit())
762 return LowerCall_64(CLI, InVals);
763 return LowerCall_32(CLI, InVals);
764}
765
766static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
767 const CallBase *Call) {
768 if (Call)
769 return Call->hasFnAttr(Attribute::ReturnsTwice);
770
771 const Function *CalleeFn = nullptr;
772 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
773 CalleeFn = dyn_cast<Function>(Val: G->getGlobal());
774 } else if (ExternalSymbolSDNode *E =
775 dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
776 const Function &Fn = DAG.getMachineFunction().getFunction();
777 const Module *M = Fn.getParent();
778 const char *CalleeName = E->getSymbol();
779 CalleeFn = M->getFunction(Name: CalleeName);
780 }
781
782 if (!CalleeFn)
783 return false;
784 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
785}
786
787/// IsEligibleForTailCallOptimization - Check whether the call is eligible
788/// for tail call optimization.
789bool SparcTargetLowering::IsEligibleForTailCallOptimization(
790 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
791
792 auto &Outs = CLI.Outs;
793 auto &Caller = MF.getFunction();
794
795 // Do not tail call opt functions with "disable-tail-calls" attribute.
796 if (Caller.getFnAttribute(Kind: "disable-tail-calls").getValueAsString() == "true")
797 return false;
798
799 // Do not tail call opt if the stack is used to pass parameters.
800 // 64-bit targets have a slightly higher limit since the ABI requires
801 // to allocate some space even when all the parameters fit inside registers.
802 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
803 if (CCInfo.getStackSize() > StackSizeLimit)
804 return false;
805
806 // Do not tail call opt if either the callee or caller returns
807 // a struct and the other does not.
808 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
809 return false;
810
811 // Byval parameters hand the function a pointer directly into the stack area
812 // we want to reuse during a tail call.
813 for (auto &Arg : Outs)
814 if (Arg.Flags.isByVal())
815 return false;
816
817 return true;
818}
819
820// Lower a call for the 32-bit ABI.
821SDValue
822SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
823 SmallVectorImpl<SDValue> &InVals) const {
824 SelectionDAG &DAG = CLI.DAG;
825 SDLoc &dl = CLI.DL;
826 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
827 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
828 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
829 SDValue Chain = CLI.Chain;
830 SDValue Callee = CLI.Callee;
831 bool &isTailCall = CLI.IsTailCall;
832 CallingConv::ID CallConv = CLI.CallConv;
833 bool isVarArg = CLI.IsVarArg;
834 MachineFunction &MF = DAG.getMachineFunction();
835
836 // Analyze operands of the call, assigning locations to each operand.
837 SmallVector<CCValAssign, 16> ArgLocs;
838 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
839 *DAG.getContext());
840 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
841
842 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
843 CCInfo, CLI, MF&: DAG.getMachineFunction());
844
845 // Get the size of the outgoing arguments stack space requirement.
846 unsigned ArgsSize = CCInfo.getStackSize();
847
848 // Keep stack frames 8-byte aligned.
849 ArgsSize = (ArgsSize+7) & ~7;
850
851 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
852
853 // Create local copies for byval args.
854 SmallVector<SDValue, 8> ByValArgs;
855 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
856 ISD::ArgFlagsTy Flags = Outs[i].Flags;
857 if (!Flags.isByVal())
858 continue;
859
860 SDValue Arg = OutVals[i];
861 unsigned Size = Flags.getByValSize();
862 Align Alignment = Flags.getNonZeroByValAlign();
863
864 if (Size > 0U) {
865 int FI = MFI.CreateStackObject(Size, Alignment, isSpillSlot: false);
866 SDValue FIPtr = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout()));
867 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
868
869 Chain = DAG.getMemcpy(Chain, dl, Dst: FIPtr, Src: Arg, Size: SizeNode, Alignment,
870 isVol: false, // isVolatile,
871 AlwaysInline: (Size <= 32), // AlwaysInline if size <= 32,
872 isTailCall: false, // isTailCall
873 DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo());
874 ByValArgs.push_back(Elt: FIPtr);
875 }
876 else {
877 SDValue nullVal;
878 ByValArgs.push_back(Elt: nullVal);
879 }
880 }
881
882 assert(!isTailCall || ArgsSize == 0);
883
884 if (!isTailCall)
885 Chain = DAG.getCALLSEQ_START(Chain, InSize: ArgsSize, OutSize: 0, DL: dl);
886
887 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
888 SmallVector<SDValue, 8> MemOpChains;
889
890 const unsigned StackOffset = 92;
891 bool hasStructRetAttr = false;
892 unsigned SRetArgSize = 0;
893 // Walk the register/memloc assignments, inserting copies/loads.
894 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
895 i != e;
896 ++i, ++realArgIdx) {
897 CCValAssign &VA = ArgLocs[i];
898 SDValue Arg = OutVals[realArgIdx];
899
900 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
901
902 // Use local copy if it is a byval arg.
903 if (Flags.isByVal()) {
904 Arg = ByValArgs[byvalArgIdx++];
905 if (!Arg) {
906 continue;
907 }
908 }
909
910 // Promote the value if needed.
911 switch (VA.getLocInfo()) {
912 default: llvm_unreachable("Unknown loc info!");
913 case CCValAssign::Full: break;
914 case CCValAssign::SExt:
915 Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
916 break;
917 case CCValAssign::ZExt:
918 Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
919 break;
920 case CCValAssign::AExt:
921 Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
922 break;
923 case CCValAssign::BCvt:
924 Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg);
925 break;
926 }
927
928 if (Flags.isSRet()) {
929 assert(VA.needsCustom());
930
931 if (isTailCall)
932 continue;
933
934 // store SRet argument in %sp+64
935 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
936 SDValue PtrOff = DAG.getIntPtrConstant(Val: 64, DL: dl);
937 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
938 MemOpChains.push_back(
939 Elt: DAG.getStore(Chain, dl, Val: Arg, Ptr: PtrOff, PtrInfo: MachinePointerInfo()));
940 hasStructRetAttr = true;
941 // sret only allowed on first argument
942 assert(Outs[realArgIdx].OrigArgIndex == 0);
943 SRetArgSize =
944 DAG.getDataLayout().getTypeAllocSize(Ty: CLI.getArgs()[0].IndirectType);
945 continue;
946 }
947
948 if (VA.needsCustom()) {
949 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
950
951 if (VA.isMemLoc()) {
952 unsigned Offset = VA.getLocMemOffset() + StackOffset;
953 // if it is double-word aligned, just store.
954 if (Offset % 8 == 0) {
955 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
956 SDValue PtrOff = DAG.getIntPtrConstant(Val: Offset, DL: dl);
957 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
958 MemOpChains.push_back(
959 Elt: DAG.getStore(Chain, dl, Val: Arg, Ptr: PtrOff, PtrInfo: MachinePointerInfo()));
960 continue;
961 }
962 }
963
964 if (VA.getLocVT() == MVT::f64) {
965 // Move from the float value from float registers into the
966 // integer registers.
967 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val&: Arg))
968 Arg = bitcastConstantFPToInt(C, DL: dl, DAG);
969 else
970 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
971 }
972
973 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
974 Arg,
975 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
976 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
977 Arg,
978 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
979
980 if (VA.isRegLoc()) {
981 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Part0));
982 assert(i+1 != e);
983 CCValAssign &NextVA = ArgLocs[++i];
984 if (NextVA.isRegLoc()) {
985 RegsToPass.push_back(Elt: std::make_pair(x: NextVA.getLocReg(), y&: Part1));
986 } else {
987 // Store the second part in stack.
988 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
989 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
990 SDValue PtrOff = DAG.getIntPtrConstant(Val: Offset, DL: dl);
991 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
992 MemOpChains.push_back(
993 Elt: DAG.getStore(Chain, dl, Val: Part1, Ptr: PtrOff, PtrInfo: MachinePointerInfo()));
994 }
995 } else {
996 unsigned Offset = VA.getLocMemOffset() + StackOffset;
997 // Store the first part.
998 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
999 SDValue PtrOff = DAG.getIntPtrConstant(Val: Offset, DL: dl);
1000 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1001 MemOpChains.push_back(
1002 Elt: DAG.getStore(Chain, dl, Val: Part0, Ptr: PtrOff, PtrInfo: MachinePointerInfo()));
1003 // Store the second part.
1004 PtrOff = DAG.getIntPtrConstant(Val: Offset + 4, DL: dl);
1005 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1006 MemOpChains.push_back(
1007 Elt: DAG.getStore(Chain, dl, Val: Part1, Ptr: PtrOff, PtrInfo: MachinePointerInfo()));
1008 }
1009 continue;
1010 }
1011
1012 // Arguments that can be passed on register must be kept at
1013 // RegsToPass vector
1014 if (VA.isRegLoc()) {
1015 if (VA.getLocVT() != MVT::f32) {
1016 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg));
1017 continue;
1018 }
1019 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1020 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg));
1021 continue;
1022 }
1023
1024 assert(VA.isMemLoc());
1025
1026 // Create a store off the stack pointer for this argument.
1027 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1028 SDValue PtrOff = DAG.getIntPtrConstant(Val: VA.getLocMemOffset() + StackOffset,
1029 DL: dl);
1030 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1031 MemOpChains.push_back(
1032 Elt: DAG.getStore(Chain, dl, Val: Arg, Ptr: PtrOff, PtrInfo: MachinePointerInfo()));
1033 }
1034
1035
1036 // Emit all stores, make sure the occur before any copies into physregs.
1037 if (!MemOpChains.empty())
1038 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1039
1040 // Build a sequence of copy-to-reg nodes chained together with token
1041 // chain and flag operands which copy the outgoing args into registers.
1042 // The InGlue in necessary since all emitted instructions must be
1043 // stuck together.
1044 SDValue InGlue;
1045 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1046 Register Reg = RegsToPass[i].first;
1047 if (!isTailCall)
1048 Reg = toCallerWindow(Reg);
1049 Chain = DAG.getCopyToReg(Chain, dl, Reg, N: RegsToPass[i].second, Glue: InGlue);
1050 InGlue = Chain.getValue(R: 1);
1051 }
1052
1053 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, Call: CLI.CB);
1054
1055 // If the callee is a GlobalAddress node (quite common, every direct call is)
1056 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1057 // Likewise ExternalSymbol -> TargetExternalSymbol.
1058 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1059 : SparcMCExpr::VK_Sparc_WDISP30;
1060 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1061 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
1062 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1063 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
1064
1065 // Returns a chain & a flag for retval copy to use
1066 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1067 SmallVector<SDValue, 8> Ops;
1068 Ops.push_back(Elt: Chain);
1069 Ops.push_back(Elt: Callee);
1070 if (hasStructRetAttr)
1071 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1072 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1073 Register Reg = RegsToPass[i].first;
1074 if (!isTailCall)
1075 Reg = toCallerWindow(Reg);
1076 Ops.push_back(Elt: DAG.getRegister(Reg, VT: RegsToPass[i].second.getValueType()));
1077 }
1078
1079 // Add a register mask operand representing the call-preserved registers.
1080 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1081 const uint32_t *Mask =
1082 ((hasReturnsTwice)
1083 ? TRI->getRTCallPreservedMask(CC: CallConv)
1084 : TRI->getCallPreservedMask(MF: DAG.getMachineFunction(), CC: CallConv));
1085
1086 if (isAnyArgRegReserved(TRI, MF))
1087 emitReservedArgRegCallError(MF);
1088
1089 assert(Mask && "Missing call preserved mask for calling convention");
1090 Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask));
1091
1092 if (InGlue.getNode())
1093 Ops.push_back(Elt: InGlue);
1094
1095 if (isTailCall) {
1096 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
1097 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1098 }
1099
1100 Chain = DAG.getNode(Opcode: SPISD::CALL, DL: dl, VTList: NodeTys, Ops);
1101 InGlue = Chain.getValue(R: 1);
1102
1103 Chain = DAG.getCALLSEQ_END(Chain, Size1: ArgsSize, Size2: 0, Glue: InGlue, DL: dl);
1104 InGlue = Chain.getValue(R: 1);
1105
1106 // Assign locations to each value returned by this call.
1107 SmallVector<CCValAssign, 16> RVLocs;
1108 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1109 *DAG.getContext());
1110
1111 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1112
1113 // Copy all of the result registers out of their specified physreg.
1114 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1115 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1116 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1117 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1118 SDValue Lo = DAG.getCopyFromReg(
1119 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1120 Chain = Lo.getValue(R: 1);
1121 InGlue = Lo.getValue(R: 2);
1122 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1123 DAG.getConstant(0, dl, MVT::i32));
1124 SDValue Hi = DAG.getCopyFromReg(
1125 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1126 Chain = Hi.getValue(R: 1);
1127 InGlue = Hi.getValue(R: 2);
1128 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1129 DAG.getConstant(1, dl, MVT::i32));
1130 InVals.push_back(Elt: Vec);
1131 } else {
1132 Chain =
1133 DAG.getCopyFromReg(Chain, dl, Reg: toCallerWindow(Reg: RVLocs[i].getLocReg()),
1134 VT: RVLocs[i].getValVT(), Glue: InGlue)
1135 .getValue(R: 1);
1136 InGlue = Chain.getValue(R: 2);
1137 InVals.push_back(Elt: Chain.getValue(R: 0));
1138 }
1139 }
1140
1141 return Chain;
1142}
1143
1144// FIXME? Maybe this could be a TableGen attribute on some registers and
1145// this table could be generated automatically from RegInfo.
1146Register SparcTargetLowering::getRegisterByName(const char* RegName, LLT VT,
1147 const MachineFunction &MF) const {
1148 Register Reg = StringSwitch<Register>(RegName)
1149 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1150 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1151 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1152 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1153 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1154 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1155 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1156 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1157 .Default(0);
1158
1159 // If we're directly referencing register names
1160 // (e.g in GCC C extension `register int r asm("g1");`),
1161 // make sure that said register is in the reserve list.
1162 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1163 if (!TRI->isReservedReg(MF, Reg))
1164 Reg = 0;
1165
1166 if (Reg)
1167 return Reg;
1168
1169 report_fatal_error(reason: "Invalid register name global variable");
1170}
1171
1172// Fixup floating point arguments in the ... part of a varargs call.
1173//
1174// The SPARC v9 ABI requires that floating point arguments are treated the same
1175// as integers when calling a varargs function. This does not apply to the
1176// fixed arguments that are part of the function's prototype.
1177//
1178// This function post-processes a CCValAssign array created by
1179// AnalyzeCallOperands().
1180static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
1181 ArrayRef<ISD::OutputArg> Outs) {
1182 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1183 CCValAssign &VA = ArgLocs[i];
1184 MVT ValTy = VA.getLocVT();
1185 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1186 // varargs functions.
1187 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1188 continue;
1189 // The fixed arguments to a varargs function still go in FP registers.
1190 if (Outs[VA.getValNo()].IsFixed)
1191 continue;
1192
1193 // This floating point argument should be reassigned.
1194 // Determine the offset into the argument array.
1195 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1196 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1197 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1198 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1199
1200 if (Offset < 6*8) {
1201 // This argument should go in %i0-%i5.
1202 unsigned IReg = SP::I0 + Offset/8;
1203 if (ValTy == MVT::f64)
1204 // Full register, just bitconvert into i64.
1205 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1206 CCValAssign::BCvt);
1207 else {
1208 assert(ValTy == MVT::f128 && "Unexpected type!");
1209 // Full register, just bitconvert into i128 -- We will lower this into
1210 // two i64s in LowerCall_64.
1211 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1212 MVT::i128, CCValAssign::BCvt);
1213 }
1214 } else {
1215 // This needs to go to memory, we're out of integer registers.
1216 VA = CCValAssign::getMem(ValNo: VA.getValNo(), ValVT: VA.getValVT(), Offset,
1217 LocVT: VA.getLocVT(), HTP: VA.getLocInfo());
1218 }
1219 }
1220}
1221
1222// Lower a call for the 64-bit ABI.
1223SDValue
1224SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
1225 SmallVectorImpl<SDValue> &InVals) const {
1226 SelectionDAG &DAG = CLI.DAG;
1227 SDLoc DL = CLI.DL;
1228 SDValue Chain = CLI.Chain;
1229 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
1230 MachineFunction &MF = DAG.getMachineFunction();
1231
1232 // Analyze operands of the call, assigning locations to each operand.
1233 SmallVector<CCValAssign, 16> ArgLocs;
1234 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1235 *DAG.getContext());
1236 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1237
1238 CLI.IsTailCall = CLI.IsTailCall && IsEligibleForTailCallOptimization(
1239 CCInfo, CLI, MF&: DAG.getMachineFunction());
1240
1241 // Get the size of the outgoing arguments stack space requirement.
1242 // The stack offset computed by CC_Sparc64 includes all arguments.
1243 // Called functions expect 6 argument words to exist in the stack frame, used
1244 // or not.
1245 unsigned StackReserved = 6 * 8u;
1246 unsigned ArgsSize = std::max<unsigned>(a: StackReserved, b: CCInfo.getStackSize());
1247
1248 // Keep stack frames 16-byte aligned.
1249 ArgsSize = alignTo(Value: ArgsSize, Align: 16);
1250
1251 // Varargs calls require special treatment.
1252 if (CLI.IsVarArg)
1253 fixupVariableFloatArgs(ArgLocs, Outs: CLI.Outs);
1254
1255 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1256
1257 // Adjust the stack pointer to make room for the arguments.
1258 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1259 // with more than 6 arguments.
1260 if (!CLI.IsTailCall)
1261 Chain = DAG.getCALLSEQ_START(Chain, InSize: ArgsSize, OutSize: 0, DL);
1262
1263 // Collect the set of registers to pass to the function and their values.
1264 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1265 // instruction.
1266 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
1267
1268 // Collect chains from all the memory opeations that copy arguments to the
1269 // stack. They must follow the stack pointer adjustment above and precede the
1270 // call instruction itself.
1271 SmallVector<SDValue, 8> MemOpChains;
1272
1273 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1274 const CCValAssign &VA = ArgLocs[i];
1275 SDValue Arg = CLI.OutVals[i];
1276
1277 // Promote the value if needed.
1278 switch (VA.getLocInfo()) {
1279 default:
1280 llvm_unreachable("Unknown location info!");
1281 case CCValAssign::Full:
1282 break;
1283 case CCValAssign::SExt:
1284 Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: VA.getLocVT(), Operand: Arg);
1285 break;
1286 case CCValAssign::ZExt:
1287 Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: VA.getLocVT(), Operand: Arg);
1288 break;
1289 case CCValAssign::AExt:
1290 Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: VA.getLocVT(), Operand: Arg);
1291 break;
1292 case CCValAssign::BCvt:
1293 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1294 // SPARC does not support i128 natively. Lower it into two i64, see below.
1295 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1296 || VA.getLocVT() != MVT::i128)
1297 Arg = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: VA.getLocVT(), Operand: Arg);
1298 break;
1299 }
1300
1301 if (VA.isRegLoc()) {
1302 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1303 && VA.getLocVT() == MVT::i128) {
1304 // Store and reload into the integer register reg and reg+1.
1305 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1306 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1307 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1308 SDValue HiPtrOff = DAG.getIntPtrConstant(Val: StackOffset, DL);
1309 HiPtrOff = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr, N2: HiPtrOff);
1310 SDValue LoPtrOff = DAG.getIntPtrConstant(Val: StackOffset + 8, DL);
1311 LoPtrOff = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr, N2: LoPtrOff);
1312
1313 // Store to %sp+BIAS+128+Offset
1314 SDValue Store =
1315 DAG.getStore(Chain, dl: DL, Val: Arg, Ptr: HiPtrOff, PtrInfo: MachinePointerInfo());
1316 // Load into Reg and Reg+1
1317 SDValue Hi64 =
1318 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1319 SDValue Lo64 =
1320 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1321
1322 Register HiReg = VA.getLocReg();
1323 Register LoReg = VA.getLocReg() + 1;
1324 if (!CLI.IsTailCall) {
1325 HiReg = toCallerWindow(Reg: HiReg);
1326 LoReg = toCallerWindow(Reg: LoReg);
1327 }
1328
1329 RegsToPass.push_back(Elt: std::make_pair(x&: HiReg, y&: Hi64));
1330 RegsToPass.push_back(Elt: std::make_pair(x&: LoReg, y&: Lo64));
1331 continue;
1332 }
1333
1334 // The custom bit on an i32 return value indicates that it should be
1335 // passed in the high bits of the register.
1336 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1337 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1338 DAG.getConstant(32, DL, MVT::i32));
1339
1340 // The next value may go in the low bits of the same register.
1341 // Handle both at once.
1342 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1343 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1344 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1345 CLI.OutVals[i+1]);
1346 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1347 // Skip the next value, it's already done.
1348 ++i;
1349 }
1350 }
1351
1352 Register Reg = VA.getLocReg();
1353 if (!CLI.IsTailCall)
1354 Reg = toCallerWindow(Reg);
1355 RegsToPass.push_back(Elt: std::make_pair(x&: Reg, y&: Arg));
1356 continue;
1357 }
1358
1359 assert(VA.isMemLoc());
1360
1361 // Create a store off the stack pointer for this argument.
1362 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1363 // The argument area starts at %fp+BIAS+128 in the callee frame,
1364 // %sp+BIAS+128 in ours.
1365 SDValue PtrOff = DAG.getIntPtrConstant(Val: VA.getLocMemOffset() +
1366 Subtarget->getStackPointerBias() +
1367 128, DL);
1368 PtrOff = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr, N2: PtrOff);
1369 MemOpChains.push_back(
1370 Elt: DAG.getStore(Chain, dl: DL, Val: Arg, Ptr: PtrOff, PtrInfo: MachinePointerInfo()));
1371 }
1372
1373 // Emit all stores, make sure they occur before the call.
1374 if (!MemOpChains.empty())
1375 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1376
1377 // Build a sequence of CopyToReg nodes glued together with token chain and
1378 // glue operands which copy the outgoing args into registers. The InGlue is
1379 // necessary since all emitted instructions must be stuck together in order
1380 // to pass the live physical registers.
1381 SDValue InGlue;
1382 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1383 Chain = DAG.getCopyToReg(Chain, dl: DL,
1384 Reg: RegsToPass[i].first, N: RegsToPass[i].second, Glue: InGlue);
1385 InGlue = Chain.getValue(R: 1);
1386 }
1387
1388 // If the callee is a GlobalAddress node (quite common, every direct call is)
1389 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1390 // Likewise ExternalSymbol -> TargetExternalSymbol.
1391 SDValue Callee = CLI.Callee;
1392 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, Call: CLI.CB);
1393 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1394 : SparcMCExpr::VK_Sparc_WDISP30;
1395 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee))
1396 Callee = DAG.getTargetGlobalAddress(GV: G->getGlobal(), DL, VT: PtrVT, offset: 0, TargetFlags: TF);
1397 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Val&: Callee))
1398 Callee = DAG.getTargetExternalSymbol(Sym: E->getSymbol(), VT: PtrVT, TargetFlags: TF);
1399
1400 // Build the operands for the call instruction itself.
1401 SmallVector<SDValue, 8> Ops;
1402 Ops.push_back(Elt: Chain);
1403 Ops.push_back(Elt: Callee);
1404 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1405 Ops.push_back(Elt: DAG.getRegister(Reg: RegsToPass[i].first,
1406 VT: RegsToPass[i].second.getValueType()));
1407
1408 // Add a register mask operand representing the call-preserved registers.
1409 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1410 const uint32_t *Mask =
1411 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CC: CLI.CallConv)
1412 : TRI->getCallPreservedMask(MF: DAG.getMachineFunction(),
1413 CC: CLI.CallConv));
1414
1415 if (isAnyArgRegReserved(TRI, MF))
1416 emitReservedArgRegCallError(MF);
1417
1418 assert(Mask && "Missing call preserved mask for calling convention");
1419 Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask));
1420
1421 // Make sure the CopyToReg nodes are glued to the call instruction which
1422 // consumes the registers.
1423 if (InGlue.getNode())
1424 Ops.push_back(Elt: InGlue);
1425
1426 // Now the call itself.
1427 if (CLI.IsTailCall) {
1428 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
1429 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1430 }
1431 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1432 Chain = DAG.getNode(Opcode: SPISD::CALL, DL, VTList: NodeTys, Ops);
1433 InGlue = Chain.getValue(R: 1);
1434
1435 // Revert the stack pointer immediately after the call.
1436 Chain = DAG.getCALLSEQ_END(Chain, Size1: ArgsSize, Size2: 0, Glue: InGlue, DL);
1437 InGlue = Chain.getValue(R: 1);
1438
1439 // Now extract the return values. This is more or less the same as
1440 // LowerFormalArguments_64.
1441
1442 // Assign locations to each value returned by this call.
1443 SmallVector<CCValAssign, 16> RVLocs;
1444 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1445 *DAG.getContext());
1446
1447 // Set inreg flag manually for codegen generated library calls that
1448 // return float.
1449 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1450 CLI.Ins[0].Flags.setInReg();
1451
1452 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1453
1454 // Copy all of the result registers out of their specified physreg.
1455 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1456 CCValAssign &VA = RVLocs[i];
1457 assert(VA.isRegLoc() && "Can only return in registers!");
1458 unsigned Reg = toCallerWindow(Reg: VA.getLocReg());
1459
1460 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1461 // reside in the same register in the high and low bits. Reuse the
1462 // CopyFromReg previous node to avoid duplicate copies.
1463 SDValue RV;
1464 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Val: Chain.getOperand(i: 1)))
1465 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1466 RV = Chain.getValue(R: 0);
1467
1468 // But usually we'll create a new CopyFromReg for a different register.
1469 if (!RV.getNode()) {
1470 RV = DAG.getCopyFromReg(Chain, dl: DL, Reg, VT: RVLocs[i].getLocVT(), Glue: InGlue);
1471 Chain = RV.getValue(R: 1);
1472 InGlue = Chain.getValue(R: 2);
1473 }
1474
1475 // Get the high bits for i32 struct elements.
1476 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1477 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1478 DAG.getConstant(32, DL, MVT::i32));
1479
1480 // The callee promoted the return value, so insert an Assert?ext SDNode so
1481 // we won't promote the value again in this function.
1482 switch (VA.getLocInfo()) {
1483 case CCValAssign::SExt:
1484 RV = DAG.getNode(Opcode: ISD::AssertSext, DL, VT: VA.getLocVT(), N1: RV,
1485 N2: DAG.getValueType(VA.getValVT()));
1486 break;
1487 case CCValAssign::ZExt:
1488 RV = DAG.getNode(Opcode: ISD::AssertZext, DL, VT: VA.getLocVT(), N1: RV,
1489 N2: DAG.getValueType(VA.getValVT()));
1490 break;
1491 default:
1492 break;
1493 }
1494
1495 // Truncate the register down to the return value type.
1496 if (VA.isExtInLoc())
1497 RV = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VA.getValVT(), Operand: RV);
1498
1499 InVals.push_back(Elt: RV);
1500 }
1501
1502 return Chain;
1503}
1504
1505//===----------------------------------------------------------------------===//
1506// TargetLowering Implementation
1507//===----------------------------------------------------------------------===//
1508
1509TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
1510 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1511 AI->getType()->getPrimitiveSizeInBits() == 32)
1512 return AtomicExpansionKind::None; // Uses xchg instruction
1513
1514 return AtomicExpansionKind::CmpXChg;
1515}
1516
1517/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1518/// rcond condition.
1519static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC) {
1520 switch (CC) {
1521 default:
1522 llvm_unreachable("Unknown/unsigned integer condition code!");
1523 case ISD::SETEQ:
1524 return SPCC::REG_Z;
1525 case ISD::SETNE:
1526 return SPCC::REG_NZ;
1527 case ISD::SETLT:
1528 return SPCC::REG_LZ;
1529 case ISD::SETGT:
1530 return SPCC::REG_GZ;
1531 case ISD::SETLE:
1532 return SPCC::REG_LEZ;
1533 case ISD::SETGE:
1534 return SPCC::REG_GEZ;
1535 }
1536}
1537
1538/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1539/// condition.
1540static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
1541 switch (CC) {
1542 default: llvm_unreachable("Unknown integer condition code!");
1543 case ISD::SETEQ: return SPCC::ICC_E;
1544 case ISD::SETNE: return SPCC::ICC_NE;
1545 case ISD::SETLT: return SPCC::ICC_L;
1546 case ISD::SETGT: return SPCC::ICC_G;
1547 case ISD::SETLE: return SPCC::ICC_LE;
1548 case ISD::SETGE: return SPCC::ICC_GE;
1549 case ISD::SETULT: return SPCC::ICC_CS;
1550 case ISD::SETULE: return SPCC::ICC_LEU;
1551 case ISD::SETUGT: return SPCC::ICC_GU;
1552 case ISD::SETUGE: return SPCC::ICC_CC;
1553 }
1554}
1555
1556/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1557/// FCC condition.
1558static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
1559 switch (CC) {
1560 default: llvm_unreachable("Unknown fp condition code!");
1561 case ISD::SETEQ:
1562 case ISD::SETOEQ: return SPCC::FCC_E;
1563 case ISD::SETNE:
1564 case ISD::SETUNE: return SPCC::FCC_NE;
1565 case ISD::SETLT:
1566 case ISD::SETOLT: return SPCC::FCC_L;
1567 case ISD::SETGT:
1568 case ISD::SETOGT: return SPCC::FCC_G;
1569 case ISD::SETLE:
1570 case ISD::SETOLE: return SPCC::FCC_LE;
1571 case ISD::SETGE:
1572 case ISD::SETOGE: return SPCC::FCC_GE;
1573 case ISD::SETULT: return SPCC::FCC_UL;
1574 case ISD::SETULE: return SPCC::FCC_ULE;
1575 case ISD::SETUGT: return SPCC::FCC_UG;
1576 case ISD::SETUGE: return SPCC::FCC_UGE;
1577 case ISD::SETUO: return SPCC::FCC_U;
1578 case ISD::SETO: return SPCC::FCC_O;
1579 case ISD::SETONE: return SPCC::FCC_LG;
1580 case ISD::SETUEQ: return SPCC::FCC_UE;
1581 }
1582}
1583
1584SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
1585 const SparcSubtarget &STI)
1586 : TargetLowering(TM), Subtarget(&STI) {
1587 MVT PtrVT = MVT::getIntegerVT(BitWidth: TM.getPointerSizeInBits(AS: 0));
1588
1589 // Instructions which use registers as conditionals examine all the
1590 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1591 // matters much whether it's ZeroOrOneBooleanContent, or
1592 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1593 // former.
1594 setBooleanContents(ZeroOrOneBooleanContent);
1595 setBooleanVectorContents(ZeroOrOneBooleanContent);
1596
1597 // Set up the register classes.
1598 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1599 if (!Subtarget->useSoftFloat()) {
1600 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1601 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1602 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1603 }
1604 if (Subtarget->is64Bit()) {
1605 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1606 } else {
1607 // On 32bit sparc, we define a double-register 32bit register
1608 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1609 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1610
1611 // ...but almost all operations must be expanded, so set that as
1612 // the default.
1613 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1614 setOperationAction(Op, MVT::v2i32, Expand);
1615 }
1616 // Truncating/extending stores/loads are also not supported.
1617 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
1618 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1619 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1620 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1621
1622 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1623 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1624 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1625
1626 setTruncStoreAction(VT, MVT::v2i32, Expand);
1627 setTruncStoreAction(MVT::v2i32, VT, Expand);
1628 }
1629 // However, load and store *are* legal.
1630 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1631 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1632 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);
1633 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);
1634
1635 // And we need to promote i64 loads/stores into vector load/store
1636 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1637 setOperationAction(ISD::STORE, MVT::i64, Custom);
1638
1639 // Sadly, this doesn't work:
1640 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1641 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1642 }
1643
1644 // Turn FP extload into load/fpextend
1645 for (MVT VT : MVT::fp_valuetypes()) {
1646 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1647 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1648 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1649 }
1650
1651 // Sparc doesn't have i1 sign extending load
1652 for (MVT VT : MVT::integer_valuetypes())
1653 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1654
1655 // Turn FP truncstore into trunc + store.
1656 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1657 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1658 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1659 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1660 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1661 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1662
1663 // Custom legalize GlobalAddress nodes into LO/HI parts.
1664 setOperationAction(Op: ISD::GlobalAddress, VT: PtrVT, Action: Custom);
1665 setOperationAction(Op: ISD::GlobalTLSAddress, VT: PtrVT, Action: Custom);
1666 setOperationAction(Op: ISD::ConstantPool, VT: PtrVT, Action: Custom);
1667 setOperationAction(Op: ISD::BlockAddress, VT: PtrVT, Action: Custom);
1668
1669 // Sparc doesn't have sext_inreg, replace them with shl/sra
1670 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1671 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
1672 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1673
1674 // Sparc has no REM or DIVREM operations.
1675 setOperationAction(ISD::UREM, MVT::i32, Expand);
1676 setOperationAction(ISD::SREM, MVT::i32, Expand);
1677 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1678 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1679
1680 // ... nor does SparcV9.
1681 if (Subtarget->is64Bit()) {
1682 setOperationAction(ISD::UREM, MVT::i64, Expand);
1683 setOperationAction(ISD::SREM, MVT::i64, Expand);
1684 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1685 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1686 }
1687
1688 // Custom expand fp<->sint
1689 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1690 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1691 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
1692 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
1693
1694 // Custom Expand fp<->uint
1695 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1696 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1697 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
1698 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
1699
1700 // Lower f16 conversion operations into library calls
1701 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1702 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1703 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1704 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1705 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
1706 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
1707
1708 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
1709 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
1710
1711 // Sparc has no select or setcc: expand to SELECT_CC.
1712 setOperationAction(ISD::SELECT, MVT::i32, Expand);
1713 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1714 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1715 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1716
1717 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1718 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1719 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1720 setOperationAction(ISD::SETCC, MVT::f128, Expand);
1721
1722 // Sparc doesn't have BRCOND either, it has BR_CC.
1723 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1724 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1725 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1726 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1727 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1728 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1729 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1730
1731 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1732 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1733 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1734 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1735
1736 setOperationAction(ISD::ADDC, MVT::i32, Custom);
1737 setOperationAction(ISD::ADDE, MVT::i32, Custom);
1738 setOperationAction(ISD::SUBC, MVT::i32, Custom);
1739 setOperationAction(ISD::SUBE, MVT::i32, Custom);
1740
1741 if (Subtarget->is64Bit()) {
1742 setOperationAction(ISD::ADDC, MVT::i64, Custom);
1743 setOperationAction(ISD::ADDE, MVT::i64, Custom);
1744 setOperationAction(ISD::SUBC, MVT::i64, Custom);
1745 setOperationAction(ISD::SUBE, MVT::i64, Custom);
1746 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
1747 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
1748 setOperationAction(ISD::SELECT, MVT::i64, Expand);
1749 setOperationAction(ISD::SETCC, MVT::i64, Expand);
1750 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1751 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1752
1753 setOperationAction(ISD::CTPOP, MVT::i64,
1754 Subtarget->usePopc() ? Legal : Expand);
1755 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1756 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1757 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1758 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1759 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1760 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1761 }
1762
1763 // ATOMICs.
1764 // Atomics are supported on SparcV9. 32-bit atomics are also
1765 // supported by some Leon SparcV8 variants. Otherwise, atomics
1766 // are unsupported.
1767 if (Subtarget->isV9()) {
1768 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1769 // but it hasn't been implemented in the backend yet.
1770 if (Subtarget->is64Bit())
1771 setMaxAtomicSizeInBitsSupported(64);
1772 else
1773 setMaxAtomicSizeInBitsSupported(32);
1774 } else if (Subtarget->hasLeonCasa())
1775 setMaxAtomicSizeInBitsSupported(32);
1776 else
1777 setMaxAtomicSizeInBitsSupported(0);
1778
1779 setMinCmpXchgSizeInBits(32);
1780
1781 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1782
1783 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1784
1785 // Custom Lower Atomic LOAD/STORE
1786 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1787 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1788
1789 if (Subtarget->is64Bit()) {
1790 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1791 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1792 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1793 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1794 }
1795
1796 if (!Subtarget->is64Bit()) {
1797 // These libcalls are not available in 32-bit.
1798 setLibcallName(Call: RTLIB::MULO_I64, Name: nullptr);
1799 setLibcallName(Call: RTLIB::MUL_I128, Name: nullptr);
1800 setLibcallName(Call: RTLIB::SHL_I128, Name: nullptr);
1801 setLibcallName(Call: RTLIB::SRL_I128, Name: nullptr);
1802 setLibcallName(Call: RTLIB::SRA_I128, Name: nullptr);
1803 }
1804
1805 setLibcallName(Call: RTLIB::MULO_I128, Name: nullptr);
1806
1807 if (!Subtarget->isV9()) {
1808 // SparcV8 does not have FNEGD and FABSD.
1809 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1810 setOperationAction(ISD::FABS, MVT::f64, Custom);
1811 }
1812
1813 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1814 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1815 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1816 setOperationAction(ISD::FREM , MVT::f128, Expand);
1817 setOperationAction(ISD::FMA , MVT::f128, Expand);
1818 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1819 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1820 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1821 setOperationAction(ISD::FREM , MVT::f64, Expand);
1822 setOperationAction(ISD::FMA , MVT::f64, Expand);
1823 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1824 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1825 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1826 setOperationAction(ISD::FREM , MVT::f32, Expand);
1827 setOperationAction(ISD::FMA , MVT::f32, Expand);
1828 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1829 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1830 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1831 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1832 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1833 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1834 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1835 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1836 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1837 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1838 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1839
1840 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1841 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1842 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1843
1844 // Expands to [SU]MUL_LOHI.
1845 setOperationAction(ISD::MULHU, MVT::i32, Expand);
1846 setOperationAction(ISD::MULHS, MVT::i32, Expand);
1847 setOperationAction(ISD::MUL, MVT::i32, Expand);
1848
1849 if (Subtarget->useSoftMulDiv()) {
1850 // .umul works for both signed and unsigned
1851 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1852 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1853 setLibcallName(Call: RTLIB::MUL_I32, Name: ".umul");
1854
1855 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1856 setLibcallName(Call: RTLIB::SDIV_I32, Name: ".div");
1857
1858 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1859 setLibcallName(Call: RTLIB::UDIV_I32, Name: ".udiv");
1860
1861 setLibcallName(Call: RTLIB::SREM_I32, Name: ".rem");
1862 setLibcallName(Call: RTLIB::UREM_I32, Name: ".urem");
1863 }
1864
1865 if (Subtarget->is64Bit()) {
1866 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1867 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1868 setOperationAction(ISD::MULHU, MVT::i64, Expand);
1869 setOperationAction(ISD::MULHS, MVT::i64, Expand);
1870
1871 setOperationAction(ISD::UMULO, MVT::i64, Custom);
1872 setOperationAction(ISD::SMULO, MVT::i64, Custom);
1873
1874 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
1875 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
1876 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
1877 }
1878
1879 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1880 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1881 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1882 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1883
1884 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1885 setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);
1886
1887 // Use the default implementation.
1888 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1889 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1890 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1891 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1892 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1893
1894 setStackPointerRegisterToSaveRestore(SP::O6);
1895
1896 setOperationAction(ISD::CTPOP, MVT::i32,
1897 Subtarget->usePopc() ? Legal : Expand);
1898
1899 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1900 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1901 setOperationAction(ISD::STORE, MVT::f128, Legal);
1902 } else {
1903 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1904 setOperationAction(ISD::STORE, MVT::f128, Custom);
1905 }
1906
1907 if (Subtarget->hasHardQuad()) {
1908 setOperationAction(ISD::FADD, MVT::f128, Legal);
1909 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1910 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1911 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1912 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1913 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1914 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1915 if (Subtarget->isV9()) {
1916 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1917 setOperationAction(ISD::FABS, MVT::f128, Legal);
1918 } else {
1919 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1920 setOperationAction(ISD::FABS, MVT::f128, Custom);
1921 }
1922
1923 if (!Subtarget->is64Bit()) {
1924 setLibcallName(Call: RTLIB::FPTOSINT_F128_I64, Name: "_Q_qtoll");
1925 setLibcallName(Call: RTLIB::FPTOUINT_F128_I64, Name: "_Q_qtoull");
1926 setLibcallName(Call: RTLIB::SINTTOFP_I64_F128, Name: "_Q_lltoq");
1927 setLibcallName(Call: RTLIB::UINTTOFP_I64_F128, Name: "_Q_ulltoq");
1928 }
1929
1930 } else {
1931 // Custom legalize f128 operations.
1932
1933 setOperationAction(ISD::FADD, MVT::f128, Custom);
1934 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1935 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1936 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1937 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1938 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1939 setOperationAction(ISD::FABS, MVT::f128, Custom);
1940
1941 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1942 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
1943 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
1944
1945 // Setup Runtime library names.
1946 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1947 setLibcallName(Call: RTLIB::ADD_F128, Name: "_Qp_add");
1948 setLibcallName(Call: RTLIB::SUB_F128, Name: "_Qp_sub");
1949 setLibcallName(Call: RTLIB::MUL_F128, Name: "_Qp_mul");
1950 setLibcallName(Call: RTLIB::DIV_F128, Name: "_Qp_div");
1951 setLibcallName(Call: RTLIB::SQRT_F128, Name: "_Qp_sqrt");
1952 setLibcallName(Call: RTLIB::FPTOSINT_F128_I32, Name: "_Qp_qtoi");
1953 setLibcallName(Call: RTLIB::FPTOUINT_F128_I32, Name: "_Qp_qtoui");
1954 setLibcallName(Call: RTLIB::SINTTOFP_I32_F128, Name: "_Qp_itoq");
1955 setLibcallName(Call: RTLIB::UINTTOFP_I32_F128, Name: "_Qp_uitoq");
1956 setLibcallName(Call: RTLIB::FPTOSINT_F128_I64, Name: "_Qp_qtox");
1957 setLibcallName(Call: RTLIB::FPTOUINT_F128_I64, Name: "_Qp_qtoux");
1958 setLibcallName(Call: RTLIB::SINTTOFP_I64_F128, Name: "_Qp_xtoq");
1959 setLibcallName(Call: RTLIB::UINTTOFP_I64_F128, Name: "_Qp_uxtoq");
1960 setLibcallName(Call: RTLIB::FPEXT_F32_F128, Name: "_Qp_stoq");
1961 setLibcallName(Call: RTLIB::FPEXT_F64_F128, Name: "_Qp_dtoq");
1962 setLibcallName(Call: RTLIB::FPROUND_F128_F32, Name: "_Qp_qtos");
1963 setLibcallName(Call: RTLIB::FPROUND_F128_F64, Name: "_Qp_qtod");
1964 } else if (!Subtarget->useSoftFloat()) {
1965 setLibcallName(Call: RTLIB::ADD_F128, Name: "_Q_add");
1966 setLibcallName(Call: RTLIB::SUB_F128, Name: "_Q_sub");
1967 setLibcallName(Call: RTLIB::MUL_F128, Name: "_Q_mul");
1968 setLibcallName(Call: RTLIB::DIV_F128, Name: "_Q_div");
1969 setLibcallName(Call: RTLIB::SQRT_F128, Name: "_Q_sqrt");
1970 setLibcallName(Call: RTLIB::FPTOSINT_F128_I32, Name: "_Q_qtoi");
1971 setLibcallName(Call: RTLIB::FPTOUINT_F128_I32, Name: "_Q_qtou");
1972 setLibcallName(Call: RTLIB::SINTTOFP_I32_F128, Name: "_Q_itoq");
1973 setLibcallName(Call: RTLIB::UINTTOFP_I32_F128, Name: "_Q_utoq");
1974 setLibcallName(Call: RTLIB::FPTOSINT_F128_I64, Name: "_Q_qtoll");
1975 setLibcallName(Call: RTLIB::FPTOUINT_F128_I64, Name: "_Q_qtoull");
1976 setLibcallName(Call: RTLIB::SINTTOFP_I64_F128, Name: "_Q_lltoq");
1977 setLibcallName(Call: RTLIB::UINTTOFP_I64_F128, Name: "_Q_ulltoq");
1978 setLibcallName(Call: RTLIB::FPEXT_F32_F128, Name: "_Q_stoq");
1979 setLibcallName(Call: RTLIB::FPEXT_F64_F128, Name: "_Q_dtoq");
1980 setLibcallName(Call: RTLIB::FPROUND_F128_F32, Name: "_Q_qtos");
1981 setLibcallName(Call: RTLIB::FPROUND_F128_F64, Name: "_Q_qtod");
1982 }
1983 }
1984
1985 if (Subtarget->fixAllFDIVSQRT()) {
1986 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1987 // the former instructions generate errata on LEON processors.
1988 setOperationAction(ISD::FDIV, MVT::f32, Promote);
1989 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1990 }
1991
1992 if (Subtarget->hasNoFMULS()) {
1993 setOperationAction(ISD::FMUL, MVT::f32, Promote);
1994 }
1995
1996 // Custom combine bitcast between f64 and v2i32
1997 if (!Subtarget->is64Bit())
1998 setTargetDAGCombine(ISD::BITCAST);
1999
2000 if (Subtarget->hasLeonCycleCounter())
2001 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
2002
2003 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2004
2005 setMinFunctionAlignment(Align(4));
2006
2007 computeRegisterProperties(Subtarget->getRegisterInfo());
2008}
2009
2010bool SparcTargetLowering::useSoftFloat() const {
2011 return Subtarget->useSoftFloat();
2012}
2013
2014const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
2015 switch ((SPISD::NodeType)Opcode) {
2016 case SPISD::FIRST_NUMBER: break;
2017 case SPISD::CMPICC: return "SPISD::CMPICC";
2018 case SPISD::CMPFCC: return "SPISD::CMPFCC";
2019 case SPISD::CMPFCC_V9:
2020 return "SPISD::CMPFCC_V9";
2021 case SPISD::BRICC: return "SPISD::BRICC";
2022 case SPISD::BPICC:
2023 return "SPISD::BPICC";
2024 case SPISD::BPXCC:
2025 return "SPISD::BPXCC";
2026 case SPISD::BRFCC: return "SPISD::BRFCC";
2027 case SPISD::BRFCC_V9:
2028 return "SPISD::BRFCC_V9";
2029 case SPISD::BR_REG:
2030 return "SPISD::BR_REG";
2031 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
2032 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
2033 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
2034 case SPISD::SELECT_REG:
2035 return "SPISD::SELECT_REG";
2036 case SPISD::Hi: return "SPISD::Hi";
2037 case SPISD::Lo: return "SPISD::Lo";
2038 case SPISD::FTOI: return "SPISD::FTOI";
2039 case SPISD::ITOF: return "SPISD::ITOF";
2040 case SPISD::FTOX: return "SPISD::FTOX";
2041 case SPISD::XTOF: return "SPISD::XTOF";
2042 case SPISD::CALL: return "SPISD::CALL";
2043 case SPISD::RET_GLUE: return "SPISD::RET_GLUE";
2044 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
2045 case SPISD::FLUSHW: return "SPISD::FLUSHW";
2046 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
2047 case SPISD::TLS_LD: return "SPISD::TLS_LD";
2048 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
2049 case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
2050 case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
2051 }
2052 return nullptr;
2053}
2054
2055EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
2056 EVT VT) const {
2057 if (!VT.isVector())
2058 return MVT::i32;
2059 return VT.changeVectorElementTypeToInteger();
2060}
2061
2062/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2063/// be zero. Op is expected to be a target specific node. Used by DAG
2064/// combiner.
2065void SparcTargetLowering::computeKnownBitsForTargetNode
2066 (const SDValue Op,
2067 KnownBits &Known,
2068 const APInt &DemandedElts,
2069 const SelectionDAG &DAG,
2070 unsigned Depth) const {
2071 KnownBits Known2;
2072 Known.resetAll();
2073
2074 switch (Op.getOpcode()) {
2075 default: break;
2076 case SPISD::SELECT_ICC:
2077 case SPISD::SELECT_XCC:
2078 case SPISD::SELECT_FCC:
2079 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 1), Depth: Depth + 1);
2080 Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1);
2081
2082 // Only known if known in both the LHS and RHS.
2083 Known = Known.intersectWith(RHS: Known2);
2084 break;
2085 }
2086}
2087
2088// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2089// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2090static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
2091 ISD::CondCode CC, unsigned &SPCC) {
2092 if (isNullConstant(V: RHS) && CC == ISD::SETNE &&
2093 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2094 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2095 LHS.getOperand(i: 3).getOpcode() == SPISD::CMPICC) ||
2096 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2097 (LHS.getOperand(i: 3).getOpcode() == SPISD::CMPFCC ||
2098 LHS.getOperand(i: 3).getOpcode() == SPISD::CMPFCC_V9))) &&
2099 isOneConstant(V: LHS.getOperand(i: 0)) && isNullConstant(V: LHS.getOperand(i: 1))) {
2100 SDValue CMPCC = LHS.getOperand(i: 3);
2101 SPCC = LHS.getConstantOperandVal(i: 2);
2102 LHS = CMPCC.getOperand(i: 0);
2103 RHS = CMPCC.getOperand(i: 1);
2104 }
2105}
2106
2107// Convert to a target node and set target flags.
2108SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
2109 SelectionDAG &DAG) const {
2110 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val&: Op))
2111 return DAG.getTargetGlobalAddress(GV: GA->getGlobal(),
2112 DL: SDLoc(GA),
2113 VT: GA->getValueType(ResNo: 0),
2114 offset: GA->getOffset(), TargetFlags: TF);
2115
2116 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Val&: Op))
2117 return DAG.getTargetConstantPool(C: CP->getConstVal(), VT: CP->getValueType(ResNo: 0),
2118 Align: CP->getAlign(), Offset: CP->getOffset(), TargetFlags: TF);
2119
2120 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Val&: Op))
2121 return DAG.getTargetBlockAddress(BA: BA->getBlockAddress(),
2122 VT: Op.getValueType(),
2123 Offset: 0,
2124 TargetFlags: TF);
2125
2126 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Val&: Op))
2127 return DAG.getTargetExternalSymbol(Sym: ES->getSymbol(),
2128 VT: ES->getValueType(ResNo: 0), TargetFlags: TF);
2129
2130 llvm_unreachable("Unhandled address SDNode");
2131}
2132
2133// Split Op into high and low parts according to HiTF and LoTF.
2134// Return an ADD node combining the parts.
2135SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
2136 unsigned HiTF, unsigned LoTF,
2137 SelectionDAG &DAG) const {
2138 SDLoc DL(Op);
2139 EVT VT = Op.getValueType();
2140 SDValue Hi = DAG.getNode(Opcode: SPISD::Hi, DL, VT, Operand: withTargetFlags(Op, TF: HiTF, DAG));
2141 SDValue Lo = DAG.getNode(Opcode: SPISD::Lo, DL, VT, Operand: withTargetFlags(Op, TF: LoTF, DAG));
2142 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: Hi, N2: Lo);
2143}
2144
2145// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2146// or ExternalSymbol SDNode.
2147SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
2148 SDLoc DL(Op);
2149 EVT VT = getPointerTy(DL: DAG.getDataLayout());
2150
2151 // Handle PIC mode first. SPARC needs a got load for every variable!
2152 if (isPositionIndependent()) {
2153 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2154 PICLevel::Level picLevel = M->getPICLevel();
2155 SDValue Idx;
2156
2157 if (picLevel == PICLevel::SmallPIC) {
2158 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2159 Idx = DAG.getNode(Opcode: SPISD::Lo, DL, VT: Op.getValueType(),
2160 Operand: withTargetFlags(Op, TF: SparcMCExpr::VK_Sparc_GOT13, DAG));
2161 } else {
2162 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2163 Idx = makeHiLoPair(Op, HiTF: SparcMCExpr::VK_Sparc_GOT22,
2164 LoTF: SparcMCExpr::VK_Sparc_GOT10, DAG);
2165 }
2166
2167 SDValue GlobalBase = DAG.getNode(Opcode: SPISD::GLOBAL_BASE_REG, DL, VT);
2168 SDValue AbsAddr = DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: GlobalBase, N2: Idx);
2169 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2170 // function has calls.
2171 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2172 MFI.setHasCalls(true);
2173 return DAG.getLoad(VT, dl: DL, Chain: DAG.getEntryNode(), Ptr: AbsAddr,
2174 PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()));
2175 }
2176
2177 // This is one of the absolute code models.
2178 switch(getTargetMachine().getCodeModel()) {
2179 default:
2180 llvm_unreachable("Unsupported absolute code model");
2181 case CodeModel::Small:
2182 // abs32.
2183 return makeHiLoPair(Op, HiTF: SparcMCExpr::VK_Sparc_HI,
2184 LoTF: SparcMCExpr::VK_Sparc_LO, DAG);
2185 case CodeModel::Medium: {
2186 // abs44.
2187 SDValue H44 = makeHiLoPair(Op, HiTF: SparcMCExpr::VK_Sparc_H44,
2188 LoTF: SparcMCExpr::VK_Sparc_M44, DAG);
2189 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2190 SDValue L44 = withTargetFlags(Op, TF: SparcMCExpr::VK_Sparc_L44, DAG);
2191 L44 = DAG.getNode(Opcode: SPISD::Lo, DL, VT, Operand: L44);
2192 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: H44, N2: L44);
2193 }
2194 case CodeModel::Large: {
2195 // abs64.
2196 SDValue Hi = makeHiLoPair(Op, HiTF: SparcMCExpr::VK_Sparc_HH,
2197 LoTF: SparcMCExpr::VK_Sparc_HM, DAG);
2198 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2199 SDValue Lo = makeHiLoPair(Op, HiTF: SparcMCExpr::VK_Sparc_HI,
2200 LoTF: SparcMCExpr::VK_Sparc_LO, DAG);
2201 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: Hi, N2: Lo);
2202 }
2203 }
2204}
2205
2206SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
2207 SelectionDAG &DAG) const {
2208 return makeAddress(Op, DAG);
2209}
2210
2211SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
2212 SelectionDAG &DAG) const {
2213 return makeAddress(Op, DAG);
2214}
2215
2216SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
2217 SelectionDAG &DAG) const {
2218 return makeAddress(Op, DAG);
2219}
2220
2221SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2222 SelectionDAG &DAG) const {
2223
2224 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op);
2225 if (DAG.getTarget().useEmulatedTLS())
2226 return LowerToTLSEmulatedModel(GA, DAG);
2227
2228 SDLoc DL(GA);
2229 const GlobalValue *GV = GA->getGlobal();
2230 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
2231
2232 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2233
2234 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2235 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2236 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2237 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22);
2238 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2239 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2240 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10);
2241 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2242 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2243 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD);
2244 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2245 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2246 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL);
2247
2248 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2249 SDValue Base = DAG.getNode(Opcode: SPISD::GLOBAL_BASE_REG, DL, VT: PtrVT);
2250 SDValue Argument = DAG.getNode(Opcode: SPISD::TLS_ADD, DL, VT: PtrVT, N1: Base, N2: HiLo,
2251 N3: withTargetFlags(Op, TF: addTF, DAG));
2252
2253 SDValue Chain = DAG.getEntryNode();
2254 SDValue InGlue;
2255
2256 Chain = DAG.getCALLSEQ_START(Chain, InSize: 1, OutSize: 0, DL);
2257 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2258 InGlue = Chain.getValue(R: 1);
2259 SDValue Callee = DAG.getTargetExternalSymbol(Sym: "__tls_get_addr", VT: PtrVT);
2260 SDValue Symbol = withTargetFlags(Op, TF: callTF, DAG);
2261
2262 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2263 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2264 MF: DAG.getMachineFunction(), CC: CallingConv::C);
2265 assert(Mask && "Missing call preserved mask for calling convention");
2266 SDValue Ops[] = {Chain,
2267 Callee,
2268 Symbol,
2269 DAG.getRegister(SP::O0, PtrVT),
2270 DAG.getRegisterMask(Mask),
2271 InGlue};
2272 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2273 InGlue = Chain.getValue(R: 1);
2274 Chain = DAG.getCALLSEQ_END(Chain, Size1: 1, Size2: 0, Glue: InGlue, DL);
2275 InGlue = Chain.getValue(R: 1);
2276 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2277
2278 if (model != TLSModel::LocalDynamic)
2279 return Ret;
2280
2281 SDValue Hi = DAG.getNode(Opcode: SPISD::Hi, DL, VT: PtrVT,
2282 Operand: withTargetFlags(Op, TF: SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));
2283 SDValue Lo = DAG.getNode(Opcode: SPISD::Lo, DL, VT: PtrVT,
2284 Operand: withTargetFlags(Op, TF: SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));
2285 HiLo = DAG.getNode(Opcode: ISD::XOR, DL, VT: PtrVT, N1: Hi, N2: Lo);
2286 return DAG.getNode(Opcode: SPISD::TLS_ADD, DL, VT: PtrVT, N1: Ret, N2: HiLo,
2287 N3: withTargetFlags(Op, TF: SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));
2288 }
2289
2290 if (model == TLSModel::InitialExec) {
2291 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2292 : SparcMCExpr::VK_Sparc_TLS_IE_LD);
2293
2294 SDValue Base = DAG.getNode(Opcode: SPISD::GLOBAL_BASE_REG, DL, VT: PtrVT);
2295
2296 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2297 // function has calls.
2298 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2299 MFI.setHasCalls(true);
2300
2301 SDValue TGA = makeHiLoPair(Op,
2302 HiTF: SparcMCExpr::VK_Sparc_TLS_IE_HI22,
2303 LoTF: SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);
2304 SDValue Ptr = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: Base, N2: TGA);
2305 SDValue Offset = DAG.getNode(Opcode: SPISD::TLS_LD,
2306 DL, VT: PtrVT, N1: Ptr,
2307 N2: withTargetFlags(Op, TF: ldTF, DAG));
2308 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2309 DAG.getRegister(SP::G7, PtrVT), Offset,
2310 withTargetFlags(Op,
2311 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));
2312 }
2313
2314 assert(model == TLSModel::LocalExec);
2315 SDValue Hi = DAG.getNode(Opcode: SPISD::Hi, DL, VT: PtrVT,
2316 Operand: withTargetFlags(Op, TF: SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));
2317 SDValue Lo = DAG.getNode(Opcode: SPISD::Lo, DL, VT: PtrVT,
2318 Operand: withTargetFlags(Op, TF: SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));
2319 SDValue Offset = DAG.getNode(Opcode: ISD::XOR, DL, VT: PtrVT, N1: Hi, N2: Lo);
2320
2321 return DAG.getNode(ISD::ADD, DL, PtrVT,
2322 DAG.getRegister(SP::G7, PtrVT), Offset);
2323}
2324
2325SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain,
2326 ArgListTy &Args, SDValue Arg,
2327 const SDLoc &DL,
2328 SelectionDAG &DAG) const {
2329 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2330 EVT ArgVT = Arg.getValueType();
2331 Type *ArgTy = ArgVT.getTypeForEVT(Context&: *DAG.getContext());
2332
2333 ArgListEntry Entry;
2334 Entry.Node = Arg;
2335 Entry.Ty = ArgTy;
2336
2337 if (ArgTy->isFP128Ty()) {
2338 // Create a stack object and pass the pointer to the library function.
2339 int FI = MFI.CreateStackObject(Size: 16, Alignment: Align(8), isSpillSlot: false);
2340 SDValue FIPtr = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout()));
2341 Chain = DAG.getStore(Chain, dl: DL, Val: Entry.Node, Ptr: FIPtr, PtrInfo: MachinePointerInfo(),
2342 Alignment: Align(8));
2343
2344 Entry.Node = FIPtr;
2345 Entry.Ty = PointerType::getUnqual(ElementType: ArgTy);
2346 }
2347 Args.push_back(x: Entry);
2348 return Chain;
2349}
2350
2351SDValue
2352SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
2353 const char *LibFuncName,
2354 unsigned numArgs) const {
2355
2356 ArgListTy Args;
2357
2358 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2359 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
2360
2361 SDValue Callee = DAG.getExternalSymbol(Sym: LibFuncName, VT: PtrVT);
2362 Type *RetTy = Op.getValueType().getTypeForEVT(Context&: *DAG.getContext());
2363 Type *RetTyABI = RetTy;
2364 SDValue Chain = DAG.getEntryNode();
2365 SDValue RetPtr;
2366
2367 if (RetTy->isFP128Ty()) {
2368 // Create a Stack Object to receive the return value of type f128.
2369 ArgListEntry Entry;
2370 int RetFI = MFI.CreateStackObject(Size: 16, Alignment: Align(8), isSpillSlot: false);
2371 RetPtr = DAG.getFrameIndex(FI: RetFI, VT: PtrVT);
2372 Entry.Node = RetPtr;
2373 Entry.Ty = PointerType::getUnqual(ElementType: RetTy);
2374 if (!Subtarget->is64Bit()) {
2375 Entry.IsSRet = true;
2376 Entry.IndirectType = RetTy;
2377 }
2378 Entry.IsReturned = false;
2379 Args.push_back(x: Entry);
2380 RetTyABI = Type::getVoidTy(C&: *DAG.getContext());
2381 }
2382
2383 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2384 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2385 Chain = LowerF128_LibCallArg(Chain, Args, Arg: Op.getOperand(i), DL: SDLoc(Op), DAG);
2386 }
2387 TargetLowering::CallLoweringInfo CLI(DAG);
2388 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2389 .setCallee(CC: CallingConv::C, ResultType: RetTyABI, Target: Callee, ArgsList: std::move(Args));
2390
2391 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2392
2393 // chain is in second result.
2394 if (RetTyABI == RetTy)
2395 return CallInfo.first;
2396
2397 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2398
2399 Chain = CallInfo.second;
2400
2401 // Load RetPtr to get the return value.
2402 return DAG.getLoad(VT: Op.getValueType(), dl: SDLoc(Op), Chain, Ptr: RetPtr,
2403 PtrInfo: MachinePointerInfo(), Alignment: Align(8));
2404}
2405
2406SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
2407 unsigned &SPCC, const SDLoc &DL,
2408 SelectionDAG &DAG) const {
2409
2410 const char *LibCall = nullptr;
2411 bool is64Bit = Subtarget->is64Bit();
2412 switch(SPCC) {
2413 default: llvm_unreachable("Unhandled conditional code!");
2414 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2415 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2416 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2417 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2418 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2419 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2420 case SPCC::FCC_UL :
2421 case SPCC::FCC_ULE:
2422 case SPCC::FCC_UG :
2423 case SPCC::FCC_UGE:
2424 case SPCC::FCC_U :
2425 case SPCC::FCC_O :
2426 case SPCC::FCC_LG :
2427 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2428 }
2429
2430 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
2431 SDValue Callee = DAG.getExternalSymbol(Sym: LibCall, VT: PtrVT);
2432 Type *RetTy = Type::getInt32Ty(C&: *DAG.getContext());
2433 ArgListTy Args;
2434 SDValue Chain = DAG.getEntryNode();
2435 Chain = LowerF128_LibCallArg(Chain, Args, Arg: LHS, DL, DAG);
2436 Chain = LowerF128_LibCallArg(Chain, Args, Arg: RHS, DL, DAG);
2437
2438 TargetLowering::CallLoweringInfo CLI(DAG);
2439 CLI.setDebugLoc(DL).setChain(Chain)
2440 .setCallee(CC: CallingConv::C, ResultType: RetTy, Target: Callee, ArgsList: std::move(Args));
2441
2442 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2443
2444 // result is in first, and chain is in second result.
2445 SDValue Result = CallInfo.first;
2446
2447 switch(SPCC) {
2448 default: {
2449 SDValue RHS = DAG.getConstant(Val: 0, DL, VT: Result.getValueType());
2450 SPCC = SPCC::ICC_NE;
2451 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2452 }
2453 case SPCC::FCC_UL : {
2454 SDValue Mask = DAG.getConstant(Val: 1, DL, VT: Result.getValueType());
2455 Result = DAG.getNode(Opcode: ISD::AND, DL, VT: Result.getValueType(), N1: Result, N2: Mask);
2456 SDValue RHS = DAG.getConstant(Val: 0, DL, VT: Result.getValueType());
2457 SPCC = SPCC::ICC_NE;
2458 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2459 }
2460 case SPCC::FCC_ULE: {
2461 SDValue RHS = DAG.getConstant(Val: 2, DL, VT: Result.getValueType());
2462 SPCC = SPCC::ICC_NE;
2463 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2464 }
2465 case SPCC::FCC_UG : {
2466 SDValue RHS = DAG.getConstant(Val: 1, DL, VT: Result.getValueType());
2467 SPCC = SPCC::ICC_G;
2468 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2469 }
2470 case SPCC::FCC_UGE: {
2471 SDValue RHS = DAG.getConstant(Val: 1, DL, VT: Result.getValueType());
2472 SPCC = SPCC::ICC_NE;
2473 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2474 }
2475
2476 case SPCC::FCC_U : {
2477 SDValue RHS = DAG.getConstant(Val: 3, DL, VT: Result.getValueType());
2478 SPCC = SPCC::ICC_E;
2479 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2480 }
2481 case SPCC::FCC_O : {
2482 SDValue RHS = DAG.getConstant(Val: 3, DL, VT: Result.getValueType());
2483 SPCC = SPCC::ICC_NE;
2484 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2485 }
2486 case SPCC::FCC_LG : {
2487 SDValue Mask = DAG.getConstant(Val: 3, DL, VT: Result.getValueType());
2488 Result = DAG.getNode(Opcode: ISD::AND, DL, VT: Result.getValueType(), N1: Result, N2: Mask);
2489 SDValue RHS = DAG.getConstant(Val: 0, DL, VT: Result.getValueType());
2490 SPCC = SPCC::ICC_NE;
2491 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2492 }
2493 case SPCC::FCC_UE : {
2494 SDValue Mask = DAG.getConstant(Val: 3, DL, VT: Result.getValueType());
2495 Result = DAG.getNode(Opcode: ISD::AND, DL, VT: Result.getValueType(), N1: Result, N2: Mask);
2496 SDValue RHS = DAG.getConstant(Val: 0, DL, VT: Result.getValueType());
2497 SPCC = SPCC::ICC_E;
2498 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2499 }
2500 }
2501}
2502
2503static SDValue
2504LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
2505 const SparcTargetLowering &TLI) {
2506
2507 if (Op.getOperand(0).getValueType() == MVT::f64)
2508 return TLI.LowerF128Op(Op, DAG,
2509 LibFuncName: TLI.getLibcallName(Call: RTLIB::FPEXT_F64_F128), numArgs: 1);
2510
2511 if (Op.getOperand(0).getValueType() == MVT::f32)
2512 return TLI.LowerF128Op(Op, DAG,
2513 LibFuncName: TLI.getLibcallName(Call: RTLIB::FPEXT_F32_F128), numArgs: 1);
2514
2515 llvm_unreachable("fpextend with non-float operand!");
2516 return SDValue();
2517}
2518
2519static SDValue
2520LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
2521 const SparcTargetLowering &TLI) {
2522 // FP_ROUND on f64 and f32 are legal.
2523 if (Op.getOperand(0).getValueType() != MVT::f128)
2524 return Op;
2525
2526 if (Op.getValueType() == MVT::f64)
2527 return TLI.LowerF128Op(Op, DAG,
2528 LibFuncName: TLI.getLibcallName(Call: RTLIB::FPROUND_F128_F64), numArgs: 1);
2529 if (Op.getValueType() == MVT::f32)
2530 return TLI.LowerF128Op(Op, DAG,
2531 LibFuncName: TLI.getLibcallName(Call: RTLIB::FPROUND_F128_F32), numArgs: 1);
2532
2533 llvm_unreachable("fpround to non-float!");
2534 return SDValue();
2535}
2536
2537static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
2538 const SparcTargetLowering &TLI,
2539 bool hasHardQuad) {
2540 SDLoc dl(Op);
2541 EVT VT = Op.getValueType();
2542 assert(VT == MVT::i32 || VT == MVT::i64);
2543
2544 // Expand f128 operations to fp128 abi calls.
2545 if (Op.getOperand(0).getValueType() == MVT::f128
2546 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2547 const char *libName = TLI.getLibcallName(VT == MVT::i32
2548 ? RTLIB::FPTOSINT_F128_I32
2549 : RTLIB::FPTOSINT_F128_I64);
2550 return TLI.LowerF128Op(Op, DAG, LibFuncName: libName, numArgs: 1);
2551 }
2552
2553 // Expand if the resulting type is illegal.
2554 if (!TLI.isTypeLegal(VT))
2555 return SDValue();
2556
2557 // Otherwise, Convert the fp value to integer in an FP register.
2558 if (VT == MVT::i32)
2559 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2560 else
2561 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2562
2563 return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Op);
2564}
2565
2566static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2567 const SparcTargetLowering &TLI,
2568 bool hasHardQuad) {
2569 SDLoc dl(Op);
2570 EVT OpVT = Op.getOperand(i: 0).getValueType();
2571 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2572
2573 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2574
2575 // Expand f128 operations to fp128 ABI calls.
2576 if (Op.getValueType() == MVT::f128
2577 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2578 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2579 ? RTLIB::SINTTOFP_I32_F128
2580 : RTLIB::SINTTOFP_I64_F128);
2581 return TLI.LowerF128Op(Op, DAG, LibFuncName: libName, numArgs: 1);
2582 }
2583
2584 // Expand if the operand type is illegal.
2585 if (!TLI.isTypeLegal(VT: OpVT))
2586 return SDValue();
2587
2588 // Otherwise, Convert the int value to FP in an FP register.
2589 SDValue Tmp = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: floatVT, Operand: Op.getOperand(i: 0));
2590 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2591 return DAG.getNode(Opcode: opcode, DL: dl, VT: Op.getValueType(), Operand: Tmp);
2592}
2593
2594static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
2595 const SparcTargetLowering &TLI,
2596 bool hasHardQuad) {
2597 SDLoc dl(Op);
2598 EVT VT = Op.getValueType();
2599
2600 // Expand if it does not involve f128 or the target has support for
2601 // quad floating point instructions and the resulting type is legal.
2602 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2603 (hasHardQuad && TLI.isTypeLegal(VT)))
2604 return SDValue();
2605
2606 assert(VT == MVT::i32 || VT == MVT::i64);
2607
2608 return TLI.LowerF128Op(Op, DAG,
2609 TLI.getLibcallName(VT == MVT::i32
2610 ? RTLIB::FPTOUINT_F128_I32
2611 : RTLIB::FPTOUINT_F128_I64),
2612 1);
2613}
2614
2615static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2616 const SparcTargetLowering &TLI,
2617 bool hasHardQuad) {
2618 SDLoc dl(Op);
2619 EVT OpVT = Op.getOperand(i: 0).getValueType();
2620 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2621
2622 // Expand if it does not involve f128 or the target has support for
2623 // quad floating point instructions and the operand type is legal.
2624 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2625 return SDValue();
2626
2627 return TLI.LowerF128Op(Op, DAG,
2628 TLI.getLibcallName(OpVT == MVT::i32
2629 ? RTLIB::UINTTOFP_I32_F128
2630 : RTLIB::UINTTOFP_I64_F128),
2631 1);
2632}
2633
2634static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
2635 const SparcTargetLowering &TLI, bool hasHardQuad,
2636 bool isV9, bool is64Bit) {
2637 SDValue Chain = Op.getOperand(i: 0);
2638 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get();
2639 SDValue LHS = Op.getOperand(i: 2);
2640 SDValue RHS = Op.getOperand(i: 3);
2641 SDValue Dest = Op.getOperand(i: 4);
2642 SDLoc dl(Op);
2643 unsigned Opc, SPCC = ~0U;
2644
2645 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2646 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2647 LookThroughSetCC(LHS, RHS, CC, SPCC);
2648 assert(LHS.getValueType() == RHS.getValueType());
2649
2650 // Get the condition flag.
2651 SDValue CompareFlag;
2652 if (LHS.getValueType().isInteger()) {
2653 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2654 // and the RHS is zero we might be able to use a specialized branch.
2655 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2656 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC))
2657 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2658 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2659 LHS);
2660
2661 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2662 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2663 if (isV9)
2664 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2665 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2666 else
2667 // Non-v9 targets don't have xcc.
2668 Opc = SPISD::BRICC;
2669 } else {
2670 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2671 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2672 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, DL: dl, DAG);
2673 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2674 } else {
2675 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2676 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2677 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2678 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2679 }
2680 }
2681 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2682 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2683}
2684
2685static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2686 const SparcTargetLowering &TLI, bool hasHardQuad,
2687 bool isV9, bool is64Bit) {
2688 SDValue LHS = Op.getOperand(i: 0);
2689 SDValue RHS = Op.getOperand(i: 1);
2690 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get();
2691 SDValue TrueVal = Op.getOperand(i: 2);
2692 SDValue FalseVal = Op.getOperand(i: 3);
2693 SDLoc dl(Op);
2694 unsigned Opc, SPCC = ~0U;
2695
2696 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2697 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2698 LookThroughSetCC(LHS, RHS, CC, SPCC);
2699 assert(LHS.getValueType() == RHS.getValueType());
2700
2701 SDValue CompareFlag;
2702 if (LHS.getValueType().isInteger()) {
2703 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2704 // and the RHS is zero we might be able to use a specialized select.
2705 // All SELECT_CC between any two scalar integer types are eligible for
2706 // lowering to specialized instructions. Additionally, f32 and f64 types
2707 // are also eligible, but for f128 we can only use the specialized
2708 // instruction when we have hardquad.
2709 EVT ValType = TrueVal.getValueType();
2710 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2711 ValType == MVT::f64 ||
2712 (ValType == MVT::f128 && hasHardQuad);
2713 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2714 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2715 return DAG.getNode(
2716 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2717 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2718
2719 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2720 Opc = LHS.getValueType() == MVT::i32 ?
2721 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2722 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2723 } else {
2724 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2725 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2726 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, DL: dl, DAG);
2727 Opc = SPISD::SELECT_ICC;
2728 } else {
2729 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2730 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2731 Opc = SPISD::SELECT_FCC;
2732 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2733 }
2734 }
2735 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2736 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2737}
2738
2739static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
2740 const SparcTargetLowering &TLI) {
2741 MachineFunction &MF = DAG.getMachineFunction();
2742 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
2743 auto PtrVT = TLI.getPointerTy(DL: DAG.getDataLayout());
2744
2745 // Need frame address to find the address of VarArgsFrameIndex.
2746 MF.getFrameInfo().setFrameAddressIsTaken(true);
2747
2748 // vastart just stores the address of the VarArgsFrameIndex slot into the
2749 // memory location argument.
2750 SDLoc DL(Op);
2751 SDValue Offset =
2752 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2753 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2754 const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue();
2755 return DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, Val: Offset, Ptr: Op.getOperand(i: 1),
2756 PtrInfo: MachinePointerInfo(SV));
2757}
2758
2759static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
2760 SDNode *Node = Op.getNode();
2761 EVT VT = Node->getValueType(ResNo: 0);
2762 SDValue InChain = Node->getOperand(Num: 0);
2763 SDValue VAListPtr = Node->getOperand(Num: 1);
2764 EVT PtrVT = VAListPtr.getValueType();
2765 const Value *SV = cast<SrcValueSDNode>(Val: Node->getOperand(Num: 2))->getValue();
2766 SDLoc DL(Node);
2767 SDValue VAList =
2768 DAG.getLoad(VT: PtrVT, dl: DL, Chain: InChain, Ptr: VAListPtr, PtrInfo: MachinePointerInfo(SV));
2769 // Increment the pointer, VAList, to the next vaarg.
2770 SDValue NextPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: VAList,
2771 N2: DAG.getIntPtrConstant(Val: VT.getSizeInBits()/8,
2772 DL));
2773 // Store the incremented VAList to the legalized pointer.
2774 InChain = DAG.getStore(Chain: VAList.getValue(R: 1), dl: DL, Val: NextPtr, Ptr: VAListPtr,
2775 PtrInfo: MachinePointerInfo(SV));
2776 // Load the actual argument out of the pointer VAList.
2777 // We can't count on greater alignment than the word size.
2778 return DAG.getLoad(
2779 VT, dl: DL, Chain: InChain, Ptr: VAList, PtrInfo: MachinePointerInfo(),
2780 Alignment: Align(std::min(a: PtrVT.getFixedSizeInBits(), b: VT.getFixedSizeInBits()) / 8));
2781}
2782
2783static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
2784 const SparcSubtarget *Subtarget) {
2785 SDValue Chain = Op.getOperand(i: 0); // Legalize the chain.
2786 SDValue Size = Op.getOperand(i: 1); // Legalize the size.
2787 MaybeAlign Alignment =
2788 cast<ConstantSDNode>(Val: Op.getOperand(i: 2))->getMaybeAlignValue();
2789 Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2790 EVT VT = Size->getValueType(ResNo: 0);
2791 SDLoc dl(Op);
2792
2793 // TODO: implement over-aligned alloca. (Note: also implies
2794 // supporting support for overaligned function frames + dynamic
2795 // allocations, at all, which currently isn't supported)
2796 if (Alignment && *Alignment > StackAlign) {
2797 const MachineFunction &MF = DAG.getMachineFunction();
2798 report_fatal_error(reason: "Function \"" + Twine(MF.getName()) + "\": "
2799 "over-aligned dynamic alloca not supported.");
2800 }
2801
2802 // The resultant pointer needs to be above the register spill area
2803 // at the bottom of the stack.
2804 unsigned regSpillArea;
2805 if (Subtarget->is64Bit()) {
2806 regSpillArea = 128;
2807 } else {
2808 // On Sparc32, the size of the spill area is 92. Unfortunately,
2809 // that's only 4-byte aligned, not 8-byte aligned (the stack
2810 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2811 // aligned dynamic allocation, we actually need to add 96 to the
2812 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2813
2814 // That also means adding 4 to the size of the allocation --
2815 // before applying the 8-byte rounding. Unfortunately, we the
2816 // value we get here has already had rounding applied. So, we need
2817 // to add 8, instead, wasting a bit more memory.
2818
2819 // Further, this only actually needs to be done if the required
2820 // alignment is > 4, but, we've lost that info by this point, too,
2821 // so we always apply it.
2822
2823 // (An alternative approach would be to always reserve 96 bytes
2824 // instead of the required 92, but then we'd waste 4 extra bytes
2825 // in every frame, not just those with dynamic stack allocations)
2826
2827 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2828
2829 Size = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Size,
2830 N2: DAG.getConstant(Val: 8, DL: dl, VT));
2831 regSpillArea = 96;
2832 }
2833
2834 unsigned SPReg = SP::O6;
2835 SDValue SP = DAG.getCopyFromReg(Chain, dl, Reg: SPReg, VT);
2836 SDValue NewSP = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: SP, N2: Size); // Value
2837 Chain = DAG.getCopyToReg(Chain: SP.getValue(R: 1), dl, Reg: SPReg, N: NewSP); // Output chain
2838
2839 regSpillArea += Subtarget->getStackPointerBias();
2840
2841 SDValue NewVal = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: NewSP,
2842 N2: DAG.getConstant(Val: regSpillArea, DL: dl, VT));
2843 SDValue Ops[2] = { NewVal, Chain };
2844 return DAG.getMergeValues(Ops, dl);
2845}
2846
2847
2848static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
2849 SDLoc dl(Op);
2850 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2851 dl, MVT::Other, DAG.getEntryNode());
2852 return Chain;
2853}
2854
2855static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2856 const SparcSubtarget *Subtarget,
2857 bool AlwaysFlush = false) {
2858 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2859 MFI.setFrameAddressIsTaken(true);
2860
2861 EVT VT = Op.getValueType();
2862 SDLoc dl(Op);
2863 unsigned FrameReg = SP::I6;
2864 unsigned stackBias = Subtarget->getStackPointerBias();
2865
2866 SDValue FrameAddr;
2867 SDValue Chain;
2868
2869 // flush first to make sure the windowed registers' values are in stack
2870 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2871
2872 FrameAddr = DAG.getCopyFromReg(Chain, dl, Reg: FrameReg, VT);
2873
2874 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2875
2876 while (depth--) {
2877 SDValue Ptr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr,
2878 N2: DAG.getIntPtrConstant(Val: Offset, DL: dl));
2879 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, PtrInfo: MachinePointerInfo());
2880 }
2881 if (Subtarget->is64Bit())
2882 FrameAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr,
2883 N2: DAG.getIntPtrConstant(Val: stackBias, DL: dl));
2884 return FrameAddr;
2885}
2886
2887
2888static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
2889 const SparcSubtarget *Subtarget) {
2890
2891 uint64_t depth = Op.getConstantOperandVal(i: 0);
2892
2893 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2894
2895}
2896
2897static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
2898 const SparcTargetLowering &TLI,
2899 const SparcSubtarget *Subtarget) {
2900 MachineFunction &MF = DAG.getMachineFunction();
2901 MachineFrameInfo &MFI = MF.getFrameInfo();
2902 MFI.setReturnAddressIsTaken(true);
2903
2904 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2905 return SDValue();
2906
2907 EVT VT = Op.getValueType();
2908 SDLoc dl(Op);
2909 uint64_t depth = Op.getConstantOperandVal(i: 0);
2910
2911 SDValue RetAddr;
2912 if (depth == 0) {
2913 auto PtrVT = TLI.getPointerTy(DL: DAG.getDataLayout());
2914 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2915 RetAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: RetReg, VT);
2916 return RetAddr;
2917 }
2918
2919 // Need frame address to find return address of the caller.
2920 SDValue FrameAddr = getFRAMEADDR(depth: depth - 1, Op, DAG, Subtarget, AlwaysFlush: true);
2921
2922 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2923 SDValue Ptr = DAG.getNode(Opcode: ISD::ADD,
2924 DL: dl, VT,
2925 N1: FrameAddr,
2926 N2: DAG.getIntPtrConstant(Val: Offset, DL: dl));
2927 RetAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr, PtrInfo: MachinePointerInfo());
2928
2929 return RetAddr;
2930}
2931
2932static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2933 unsigned opcode) {
2934 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2935 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2936
2937 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2938 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2939 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2940
2941 // Note: in little-endian, the floating-point value is stored in the
2942 // registers are in the opposite order, so the subreg with the sign
2943 // bit is the highest-numbered (odd), rather than the
2944 // lowest-numbered (even).
2945
2946 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2947 SrcReg64);
2948 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2949 SrcReg64);
2950
2951 if (DAG.getDataLayout().isLittleEndian())
2952 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2953 else
2954 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2955
2956 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2957 dl, MVT::f64), 0);
2958 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2959 DstReg64, Hi32);
2960 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2961 DstReg64, Lo32);
2962 return DstReg64;
2963}
2964
2965// Lower a f128 load into two f64 loads.
2966static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
2967{
2968 SDLoc dl(Op);
2969 LoadSDNode *LdNode = cast<LoadSDNode>(Val: Op.getNode());
2970 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2971
2972 Align Alignment = commonAlignment(A: LdNode->getOriginalAlign(), Offset: 8);
2973
2974 SDValue Hi64 =
2975 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2976 LdNode->getPointerInfo(), Alignment);
2977 EVT addrVT = LdNode->getBasePtr().getValueType();
2978 SDValue LoPtr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: addrVT,
2979 N1: LdNode->getBasePtr(),
2980 N2: DAG.getConstant(Val: 8, DL: dl, VT: addrVT));
2981 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2982 LdNode->getPointerInfo().getWithOffset(8),
2983 Alignment);
2984
2985 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2986 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2987
2988 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2989 dl, MVT::f128);
2990 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2991 MVT::f128,
2992 SDValue(InFP128, 0),
2993 Hi64,
2994 SubRegEven);
2995 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2996 MVT::f128,
2997 SDValue(InFP128, 0),
2998 Lo64,
2999 SubRegOdd);
3000 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
3001 SDValue(Lo64.getNode(), 1) };
3002 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3003 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
3004 return DAG.getMergeValues(Ops, dl);
3005}
3006
3007static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
3008{
3009 LoadSDNode *LdNode = cast<LoadSDNode>(Val: Op.getNode());
3010
3011 EVT MemVT = LdNode->getMemoryVT();
3012 if (MemVT == MVT::f128)
3013 return LowerF128Load(Op, DAG);
3014
3015 return Op;
3016}
3017
3018// Lower a f128 store into two f64 stores.
3019static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
3020 SDLoc dl(Op);
3021 StoreSDNode *StNode = cast<StoreSDNode>(Val: Op.getNode());
3022 assert(StNode->getOffset().isUndef() && "Unexpected node type");
3023
3024 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
3025 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
3026
3027 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3028 dl,
3029 MVT::f64,
3030 StNode->getValue(),
3031 SubRegEven);
3032 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3033 dl,
3034 MVT::f64,
3035 StNode->getValue(),
3036 SubRegOdd);
3037
3038 Align Alignment = commonAlignment(A: StNode->getOriginalAlign(), Offset: 8);
3039
3040 SDValue OutChains[2];
3041 OutChains[0] =
3042 DAG.getStore(Chain: StNode->getChain(), dl, Val: SDValue(Hi64, 0),
3043 Ptr: StNode->getBasePtr(), PtrInfo: StNode->getPointerInfo(),
3044 Alignment);
3045 EVT addrVT = StNode->getBasePtr().getValueType();
3046 SDValue LoPtr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: addrVT,
3047 N1: StNode->getBasePtr(),
3048 N2: DAG.getConstant(Val: 8, DL: dl, VT: addrVT));
3049 OutChains[1] = DAG.getStore(Chain: StNode->getChain(), dl, Val: SDValue(Lo64, 0), Ptr: LoPtr,
3050 PtrInfo: StNode->getPointerInfo().getWithOffset(O: 8),
3051 Alignment);
3052 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3053}
3054
3055static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
3056{
3057 SDLoc dl(Op);
3058 StoreSDNode *St = cast<StoreSDNode>(Val: Op.getNode());
3059
3060 EVT MemVT = St->getMemoryVT();
3061 if (MemVT == MVT::f128)
3062 return LowerF128Store(Op, DAG);
3063
3064 if (MemVT == MVT::i64) {
3065 // Custom handling for i64 stores: turn it into a bitcast and a
3066 // v2i32 store.
3067 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3068 SDValue Chain = DAG.getStore(
3069 Chain: St->getChain(), dl, Val, Ptr: St->getBasePtr(), PtrInfo: St->getPointerInfo(),
3070 Alignment: St->getOriginalAlign(), MMOFlags: St->getMemOperand()->getFlags(),
3071 AAInfo: St->getAAInfo());
3072 return Chain;
3073 }
3074
3075 return SDValue();
3076}
3077
3078static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
3079 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3080 && "invalid opcode");
3081
3082 SDLoc dl(Op);
3083
3084 if (Op.getValueType() == MVT::f64)
3085 return LowerF64Op(SrcReg64: Op.getOperand(i: 0), dl, DAG, opcode: Op.getOpcode());
3086 if (Op.getValueType() != MVT::f128)
3087 return Op;
3088
3089 // Lower fabs/fneg on f128 to fabs/fneg on f64
3090 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3091 // (As with LowerF64Op, on little-endian, we need to negate the odd
3092 // subreg)
3093
3094 SDValue SrcReg128 = Op.getOperand(i: 0);
3095 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3096 SrcReg128);
3097 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3098 SrcReg128);
3099
3100 if (DAG.getDataLayout().isLittleEndian()) {
3101 if (isV9)
3102 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3103 else
3104 Lo64 = LowerF64Op(SrcReg64: Lo64, dl, DAG, opcode: Op.getOpcode());
3105 } else {
3106 if (isV9)
3107 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3108 else
3109 Hi64 = LowerF64Op(SrcReg64: Hi64, dl, DAG, opcode: Op.getOpcode());
3110 }
3111
3112 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3113 dl, MVT::f128), 0);
3114 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3115 DstReg128, Hi64);
3116 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3117 DstReg128, Lo64);
3118 return DstReg128;
3119}
3120
3121static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
3122
3123 if (Op.getValueType() != MVT::i64)
3124 return Op;
3125
3126 SDLoc dl(Op);
3127 SDValue Src1 = Op.getOperand(i: 0);
3128 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
3129 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
3130 DAG.getConstant(32, dl, MVT::i64));
3131 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
3132
3133 SDValue Src2 = Op.getOperand(i: 1);
3134 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
3135 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
3136 DAG.getConstant(32, dl, MVT::i64));
3137 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
3138
3139
3140 bool hasChain = false;
3141 unsigned hiOpc = Op.getOpcode();
3142 switch (Op.getOpcode()) {
3143 default: llvm_unreachable("Invalid opcode");
3144 case ISD::ADDC: hiOpc = ISD::ADDE; break;
3145 case ISD::ADDE: hasChain = true; break;
3146 case ISD::SUBC: hiOpc = ISD::SUBE; break;
3147 case ISD::SUBE: hasChain = true; break;
3148 }
3149 SDValue Lo;
3150 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
3151 if (hasChain) {
3152 Lo = DAG.getNode(Opcode: Op.getOpcode(), DL: dl, VTList: VTs, N1: Src1Lo, N2: Src2Lo,
3153 N3: Op.getOperand(i: 2));
3154 } else {
3155 Lo = DAG.getNode(Opcode: Op.getOpcode(), DL: dl, VTList: VTs, N1: Src1Lo, N2: Src2Lo);
3156 }
3157 SDValue Hi = DAG.getNode(Opcode: hiOpc, DL: dl, VTList: VTs, N1: Src1Hi, N2: Src2Hi, N3: Lo.getValue(R: 1));
3158 SDValue Carry = Hi.getValue(R: 1);
3159
3160 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
3161 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
3162 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
3163 DAG.getConstant(32, dl, MVT::i64));
3164
3165 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
3166 SDValue Ops[2] = { Dst, Carry };
3167 return DAG.getMergeValues(Ops, dl);
3168}
3169
3170// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3171// in LegalizeDAG.cpp except the order of arguments to the library function.
3172static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
3173 const SparcTargetLowering &TLI)
3174{
3175 unsigned opcode = Op.getOpcode();
3176 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3177
3178 bool isSigned = (opcode == ISD::SMULO);
3179 EVT VT = MVT::i64;
3180 EVT WideVT = MVT::i128;
3181 SDLoc dl(Op);
3182 SDValue LHS = Op.getOperand(i: 0);
3183
3184 if (LHS.getValueType() != VT)
3185 return Op;
3186
3187 SDValue ShiftAmt = DAG.getConstant(Val: 63, DL: dl, VT);
3188
3189 SDValue RHS = Op.getOperand(i: 1);
3190 SDValue HiLHS, HiRHS;
3191 if (isSigned) {
3192 HiLHS = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: LHS, N2: ShiftAmt);
3193 HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3194 } else {
3195 HiLHS = DAG.getConstant(Val: 0, DL: dl, VT);
3196 HiRHS = DAG.getConstant(0, dl, MVT::i64);
3197 }
3198
3199 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3200
3201 TargetLowering::MakeLibCallOptions CallOptions;
3202 CallOptions.setSExt(isSigned);
3203 SDValue MulResult = TLI.makeLibCall(DAG,
3204 LC: RTLIB::MUL_I128, RetVT: WideVT,
3205 Ops: Args, CallOptions, dl).first;
3206 SDValue BottomHalf, TopHalf;
3207 std::tie(args&: BottomHalf, args&: TopHalf) = DAG.SplitScalar(N: MulResult, DL: dl, LoVT: VT, HiVT: VT);
3208 if (isSigned) {
3209 SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: BottomHalf, N2: ShiftAmt);
3210 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3211 } else {
3212 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3213 ISD::SETNE);
3214 }
3215 // MulResult is a node with an illegal type. Because such things are not
3216 // generally permitted during this phase of legalization, ensure that
3217 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3218 // been folded.
3219 assert(MulResult->use_empty() && "Illegally typed node still in use!");
3220
3221 SDValue Ops[2] = { BottomHalf, TopHalf } ;
3222 return DAG.getMergeValues(Ops, dl);
3223}
3224
3225static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
3226 if (isStrongerThanMonotonic(AO: cast<AtomicSDNode>(Val&: Op)->getSuccessOrdering())) {
3227 // Expand with a fence.
3228 return SDValue();
3229 }
3230
3231 // Monotonic load/stores are legal.
3232 return Op;
3233}
3234
3235SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3236 SelectionDAG &DAG) const {
3237 unsigned IntNo = Op.getConstantOperandVal(i: 0);
3238 SDLoc dl(Op);
3239 switch (IntNo) {
3240 default: return SDValue(); // Don't custom lower most intrinsics.
3241 case Intrinsic::thread_pointer: {
3242 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
3243 return DAG.getRegister(SP::G7, PtrVT);
3244 }
3245 }
3246}
3247
3248SDValue SparcTargetLowering::
3249LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3250
3251 bool hasHardQuad = Subtarget->hasHardQuad();
3252 bool isV9 = Subtarget->isV9();
3253 bool is64Bit = Subtarget->is64Bit();
3254
3255 switch (Op.getOpcode()) {
3256 default: llvm_unreachable("Should not custom lower this!");
3257
3258 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, TLI: *this,
3259 Subtarget);
3260 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3261 Subtarget);
3262 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3263 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3264 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3265 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3266 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, TLI: *this,
3267 hasHardQuad);
3268 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, TLI: *this,
3269 hasHardQuad);
3270 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, TLI: *this,
3271 hasHardQuad);
3272 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, TLI: *this,
3273 hasHardQuad);
3274 case ISD::BR_CC:
3275 return LowerBR_CC(Op, DAG, TLI: *this, hasHardQuad, isV9, is64Bit);
3276 case ISD::SELECT_CC:
3277 return LowerSELECT_CC(Op, DAG, TLI: *this, hasHardQuad, isV9, is64Bit);
3278 case ISD::VASTART: return LowerVASTART(Op, DAG, TLI: *this);
3279 case ISD::VAARG: return LowerVAARG(Op, DAG);
3280 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3281 Subtarget);
3282
3283 case ISD::LOAD: return LowerLOAD(Op, DAG);
3284 case ISD::STORE: return LowerSTORE(Op, DAG);
3285 case ISD::FADD: return LowerF128Op(Op, DAG,
3286 LibFuncName: getLibcallName(Call: RTLIB::ADD_F128), numArgs: 2);
3287 case ISD::FSUB: return LowerF128Op(Op, DAG,
3288 LibFuncName: getLibcallName(Call: RTLIB::SUB_F128), numArgs: 2);
3289 case ISD::FMUL: return LowerF128Op(Op, DAG,
3290 LibFuncName: getLibcallName(Call: RTLIB::MUL_F128), numArgs: 2);
3291 case ISD::FDIV: return LowerF128Op(Op, DAG,
3292 LibFuncName: getLibcallName(Call: RTLIB::DIV_F128), numArgs: 2);
3293 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3294 LibFuncName: getLibcallName(Call: RTLIB::SQRT_F128),numArgs: 1);
3295 case ISD::FABS:
3296 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3297 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, TLI: *this);
3298 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, TLI: *this);
3299 case ISD::ADDC:
3300 case ISD::ADDE:
3301 case ISD::SUBC:
3302 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3303 case ISD::UMULO:
3304 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, TLI: *this);
3305 case ISD::ATOMIC_LOAD:
3306 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3307 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3308 }
3309}
3310
3311SDValue SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode *C,
3312 const SDLoc &DL,
3313 SelectionDAG &DAG) const {
3314 APInt V = C->getValueAPF().bitcastToAPInt();
3315 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3316 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3317 if (DAG.getDataLayout().isLittleEndian())
3318 std::swap(a&: Lo, b&: Hi);
3319 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3320}
3321
3322SDValue SparcTargetLowering::PerformBITCASTCombine(SDNode *N,
3323 DAGCombinerInfo &DCI) const {
3324 SDLoc dl(N);
3325 SDValue Src = N->getOperand(Num: 0);
3326
3327 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3328 Src.getSimpleValueType() == MVT::f64)
3329 return bitcastConstantFPToInt(C: cast<ConstantFPSDNode>(Val&: Src), DL: dl, DAG&: DCI.DAG);
3330
3331 return SDValue();
3332}
3333
3334SDValue SparcTargetLowering::PerformDAGCombine(SDNode *N,
3335 DAGCombinerInfo &DCI) const {
3336 switch (N->getOpcode()) {
3337 default:
3338 break;
3339 case ISD::BITCAST:
3340 return PerformBITCASTCombine(N, DCI);
3341 }
3342 return SDValue();
3343}
3344
3345MachineBasicBlock *
3346SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
3347 MachineBasicBlock *BB) const {
3348 switch (MI.getOpcode()) {
3349 default: llvm_unreachable("Unknown SELECT_CC!");
3350 case SP::SELECT_CC_Int_ICC:
3351 case SP::SELECT_CC_FP_ICC:
3352 case SP::SELECT_CC_DFP_ICC:
3353 case SP::SELECT_CC_QFP_ICC:
3354 if (Subtarget->isV9())
3355 return expandSelectCC(MI, BB, SP::BPICC);
3356 return expandSelectCC(MI, BB, SP::BCOND);
3357 case SP::SELECT_CC_Int_XCC:
3358 case SP::SELECT_CC_FP_XCC:
3359 case SP::SELECT_CC_DFP_XCC:
3360 case SP::SELECT_CC_QFP_XCC:
3361 return expandSelectCC(MI, BB, SP::BPXCC);
3362 case SP::SELECT_CC_Int_FCC:
3363 case SP::SELECT_CC_FP_FCC:
3364 case SP::SELECT_CC_DFP_FCC:
3365 case SP::SELECT_CC_QFP_FCC:
3366 if (Subtarget->isV9())
3367 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3368 return expandSelectCC(MI, BB, SP::FBCOND);
3369 }
3370}
3371
3372MachineBasicBlock *
3373SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
3374 unsigned BROpcode) const {
3375 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3376 DebugLoc dl = MI.getDebugLoc();
3377 unsigned CC = (SPCC::CondCodes)MI.getOperand(i: 3).getImm();
3378
3379 // To "insert" a SELECT_CC instruction, we actually have to insert the
3380 // triangle control-flow pattern. The incoming instruction knows the
3381 // destination vreg to set, the condition code register to branch on, the
3382 // true/false values to select between, and the condition code for the branch.
3383 //
3384 // We produce the following control flow:
3385 // ThisMBB
3386 // | \
3387 // | IfFalseMBB
3388 // | /
3389 // SinkMBB
3390 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3391 MachineFunction::iterator It = ++BB->getIterator();
3392
3393 MachineBasicBlock *ThisMBB = BB;
3394 MachineFunction *F = BB->getParent();
3395 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
3396 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
3397 F->insert(MBBI: It, MBB: IfFalseMBB);
3398 F->insert(MBBI: It, MBB: SinkMBB);
3399
3400 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3401 SinkMBB->splice(Where: SinkMBB->begin(), Other: ThisMBB,
3402 From: std::next(x: MachineBasicBlock::iterator(MI)), To: ThisMBB->end());
3403 SinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: ThisMBB);
3404
3405 // Set the new successors for ThisMBB.
3406 ThisMBB->addSuccessor(Succ: IfFalseMBB);
3407 ThisMBB->addSuccessor(Succ: SinkMBB);
3408
3409 BuildMI(BB: ThisMBB, MIMD: dl, MCID: TII.get(Opcode: BROpcode))
3410 .addMBB(MBB: SinkMBB)
3411 .addImm(Val: CC);
3412
3413 // IfFalseMBB just falls through to SinkMBB.
3414 IfFalseMBB->addSuccessor(Succ: SinkMBB);
3415
3416 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3417 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3418 MI.getOperand(0).getReg())
3419 .addReg(MI.getOperand(1).getReg())
3420 .addMBB(ThisMBB)
3421 .addReg(MI.getOperand(2).getReg())
3422 .addMBB(IfFalseMBB);
3423
3424 MI.eraseFromParent(); // The pseudo instruction is gone now.
3425 return SinkMBB;
3426}
3427
3428//===----------------------------------------------------------------------===//
3429// Sparc Inline Assembly Support
3430//===----------------------------------------------------------------------===//
3431
3432/// getConstraintType - Given a constraint letter, return the type of
3433/// constraint it is for this target.
3434SparcTargetLowering::ConstraintType
3435SparcTargetLowering::getConstraintType(StringRef Constraint) const {
3436 if (Constraint.size() == 1) {
3437 switch (Constraint[0]) {
3438 default: break;
3439 case 'r':
3440 case 'f':
3441 case 'e':
3442 return C_RegisterClass;
3443 case 'I': // SIMM13
3444 return C_Immediate;
3445 }
3446 }
3447
3448 return TargetLowering::getConstraintType(Constraint);
3449}
3450
3451TargetLowering::ConstraintWeight SparcTargetLowering::
3452getSingleConstraintMatchWeight(AsmOperandInfo &info,
3453 const char *constraint) const {
3454 ConstraintWeight weight = CW_Invalid;
3455 Value *CallOperandVal = info.CallOperandVal;
3456 // If we don't have a value, we can't do a match,
3457 // but allow it at the lowest weight.
3458 if (!CallOperandVal)
3459 return CW_Default;
3460
3461 // Look at the constraint type.
3462 switch (*constraint) {
3463 default:
3464 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3465 break;
3466 case 'I': // SIMM13
3467 if (ConstantInt *C = dyn_cast<ConstantInt>(Val: info.CallOperandVal)) {
3468 if (isInt<13>(x: C->getSExtValue()))
3469 weight = CW_Constant;
3470 }
3471 break;
3472 }
3473 return weight;
3474}
3475
3476/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3477/// vector. If it is invalid, don't add anything to Ops.
3478void SparcTargetLowering::LowerAsmOperandForConstraint(
3479 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3480 SelectionDAG &DAG) const {
3481 SDValue Result;
3482
3483 // Only support length 1 constraints for now.
3484 if (Constraint.size() > 1)
3485 return;
3486
3487 char ConstraintLetter = Constraint[0];
3488 switch (ConstraintLetter) {
3489 default: break;
3490 case 'I':
3491 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
3492 if (isInt<13>(x: C->getSExtValue())) {
3493 Result = DAG.getTargetConstant(Val: C->getSExtValue(), DL: SDLoc(Op),
3494 VT: Op.getValueType());
3495 break;
3496 }
3497 return;
3498 }
3499 }
3500
3501 if (Result.getNode()) {
3502 Ops.push_back(x: Result);
3503 return;
3504 }
3505 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3506}
3507
3508std::pair<unsigned, const TargetRegisterClass *>
3509SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3510 StringRef Constraint,
3511 MVT VT) const {
3512 if (Constraint.empty())
3513 return std::make_pair(x: 0U, y: nullptr);
3514
3515 if (Constraint.size() == 1) {
3516 switch (Constraint[0]) {
3517 case 'r':
3518 if (VT == MVT::v2i32)
3519 return std::make_pair(0U, &SP::IntPairRegClass);
3520 else if (Subtarget->is64Bit())
3521 return std::make_pair(0U, &SP::I64RegsRegClass);
3522 else
3523 return std::make_pair(0U, &SP::IntRegsRegClass);
3524 case 'f':
3525 if (VT == MVT::f32 || VT == MVT::i32)
3526 return std::make_pair(0U, &SP::FPRegsRegClass);
3527 else if (VT == MVT::f64 || VT == MVT::i64)
3528 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3529 else if (VT == MVT::f128)
3530 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3531 // This will generate an error message
3532 return std::make_pair(x: 0U, y: nullptr);
3533 case 'e':
3534 if (VT == MVT::f32 || VT == MVT::i32)
3535 return std::make_pair(0U, &SP::FPRegsRegClass);
3536 else if (VT == MVT::f64 || VT == MVT::i64 )
3537 return std::make_pair(0U, &SP::DFPRegsRegClass);
3538 else if (VT == MVT::f128)
3539 return std::make_pair(0U, &SP::QFPRegsRegClass);
3540 // This will generate an error message
3541 return std::make_pair(x: 0U, y: nullptr);
3542 }
3543 }
3544
3545 if (Constraint.front() != '{')
3546 return std::make_pair(x: 0U, y: nullptr);
3547
3548 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3549 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3550 if (RegName.empty())
3551 return std::make_pair(x: 0U, y: nullptr);
3552
3553 unsigned long long RegNo;
3554 // Handle numbered register aliases.
3555 if (RegName[0] == 'r' &&
3556 getAsUnsignedInteger(Str: RegName.begin() + 1, Radix: 10, Result&: RegNo)) {
3557 // r0-r7 -> g0-g7
3558 // r8-r15 -> o0-o7
3559 // r16-r23 -> l0-l7
3560 // r24-r31 -> i0-i7
3561 if (RegNo > 31)
3562 return std::make_pair(x: 0U, y: nullptr);
3563 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3564 char RegType = RegTypes[RegNo / 8];
3565 char RegIndex = '0' + (RegNo % 8);
3566 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3567 return getRegForInlineAsmConstraint(TRI, Constraint: Tmp, VT);
3568 }
3569
3570 // Rewrite the fN constraint according to the value type if needed.
3571 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3572 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3573 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3574 return getRegForInlineAsmConstraint(
3575 TRI, Constraint: StringRef("{d" + utostr(X: RegNo / 2) + "}"), VT);
3576 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3577 return getRegForInlineAsmConstraint(
3578 TRI, Constraint: StringRef("{q" + utostr(X: RegNo / 4) + "}"), VT);
3579 } else {
3580 return std::make_pair(x: 0U, y: nullptr);
3581 }
3582 }
3583
3584 auto ResultPair =
3585 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3586 if (!ResultPair.second)
3587 return std::make_pair(x: 0U, y: nullptr);
3588
3589 // Force the use of I64Regs over IntRegs for 64-bit values.
3590 if (Subtarget->is64Bit() && VT == MVT::i64) {
3591 assert(ResultPair.second == &SP::IntRegsRegClass &&
3592 "Unexpected register class");
3593 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3594 }
3595
3596 return ResultPair;
3597}
3598
3599bool
3600SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3601 // The Sparc target isn't yet aware of offsets.
3602 return false;
3603}
3604
3605void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
3606 SmallVectorImpl<SDValue>& Results,
3607 SelectionDAG &DAG) const {
3608
3609 SDLoc dl(N);
3610
3611 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3612
3613 switch (N->getOpcode()) {
3614 default:
3615 llvm_unreachable("Do not know how to custom type legalize this operation!");
3616
3617 case ISD::FP_TO_SINT:
3618 case ISD::FP_TO_UINT:
3619 // Custom lower only if it involves f128 or i64.
3620 if (N->getOperand(0).getValueType() != MVT::f128
3621 || N->getValueType(0) != MVT::i64)
3622 return;
3623 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3624 ? RTLIB::FPTOSINT_F128_I64
3625 : RTLIB::FPTOUINT_F128_I64);
3626
3627 Results.push_back(Elt: LowerF128Op(Op: SDValue(N, 0),
3628 DAG,
3629 LibFuncName: getLibcallName(Call: libCall),
3630 numArgs: 1));
3631 return;
3632 case ISD::READCYCLECOUNTER: {
3633 assert(Subtarget->hasLeonCycleCounter());
3634 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3635 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3636 SDValue Ops[] = { Lo, Hi };
3637 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3638 Results.push_back(Elt: Pair);
3639 Results.push_back(Elt: N->getOperand(Num: 0));
3640 return;
3641 }
3642 case ISD::SINT_TO_FP:
3643 case ISD::UINT_TO_FP:
3644 // Custom lower only if it involves f128 or i64.
3645 if (N->getValueType(0) != MVT::f128
3646 || N->getOperand(0).getValueType() != MVT::i64)
3647 return;
3648
3649 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3650 ? RTLIB::SINTTOFP_I64_F128
3651 : RTLIB::UINTTOFP_I64_F128);
3652
3653 Results.push_back(Elt: LowerF128Op(Op: SDValue(N, 0),
3654 DAG,
3655 LibFuncName: getLibcallName(Call: libCall),
3656 numArgs: 1));
3657 return;
3658 case ISD::LOAD: {
3659 LoadSDNode *Ld = cast<LoadSDNode>(Val: N);
3660 // Custom handling only for i64: turn i64 load into a v2i32 load,
3661 // and a bitcast.
3662 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3663 return;
3664
3665 SDLoc dl(N);
3666 SDValue LoadRes = DAG.getExtLoad(
3667 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3668 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3669 Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3670 Ld->getAAInfo());
3671
3672 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3673 Results.push_back(Elt: Res);
3674 Results.push_back(Elt: LoadRes.getValue(R: 1));
3675 return;
3676 }
3677 }
3678}
3679
3680// Override to enable LOAD_STACK_GUARD lowering on Linux.
3681bool SparcTargetLowering::useLoadStackGuardNode() const {
3682 if (!Subtarget->isTargetLinux())
3683 return TargetLowering::useLoadStackGuardNode();
3684 return true;
3685}
3686
3687// Override to disable global variable loading on Linux.
3688void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
3689 if (!Subtarget->isTargetLinux())
3690 return TargetLowering::insertSSPDeclarations(M);
3691}
3692
3693void SparcTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
3694 SDNode *Node) const {
3695 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3696 // If the result is dead, replace it with %g0.
3697 if (!Node->hasAnyUseOfValue(0))
3698 MI.getOperand(0).setReg(SP::G0);
3699}
3700

source code of llvm/lib/Target/Sparc/SparcISelLowering.cpp