1 | //===- PPC.cpp ------------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "ABIInfoImpl.h" |
10 | #include "TargetInfo.h" |
11 | #include "clang/Basic/DiagnosticFrontend.h" |
12 | |
13 | using namespace clang; |
14 | using namespace clang::CodeGen; |
15 | |
16 | static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr, |
17 | QualType Ty, CharUnits SlotSize, |
18 | CharUnits EltSize, const ComplexType *CTy) { |
19 | Address Addr = |
20 | emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy: CGF.Int8Ty, DirectSize: SlotSize * 2, |
21 | DirectAlign: SlotSize, SlotSize, /*AllowHigher*/ AllowHigherAlign: true); |
22 | |
23 | Address RealAddr = Addr; |
24 | Address ImagAddr = RealAddr; |
25 | if (CGF.CGM.getDataLayout().isBigEndian()) { |
26 | RealAddr = |
27 | CGF.Builder.CreateConstInBoundsByteGEP(Addr: RealAddr, Offset: SlotSize - EltSize); |
28 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: ImagAddr, |
29 | Offset: 2 * SlotSize - EltSize); |
30 | } else { |
31 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: RealAddr, Offset: SlotSize); |
32 | } |
33 | |
34 | llvm::Type *EltTy = CGF.ConvertTypeForMem(T: CTy->getElementType()); |
35 | RealAddr = RealAddr.withElementType(ElemTy: EltTy); |
36 | ImagAddr = ImagAddr.withElementType(ElemTy: EltTy); |
37 | llvm::Value *Real = CGF.Builder.CreateLoad(Addr: RealAddr, Name: ".vareal" ); |
38 | llvm::Value *Imag = CGF.Builder.CreateLoad(Addr: ImagAddr, Name: ".vaimag" ); |
39 | |
40 | Address Temp = CGF.CreateMemTemp(T: Ty, Name: "vacplx" ); |
41 | CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Addr: Temp, T: Ty), |
42 | /*init*/ true); |
43 | return Temp; |
44 | } |
45 | |
46 | static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
47 | llvm::Value *Address, bool Is64Bit, |
48 | bool IsAIX) { |
49 | // This is calculated from the LLVM and GCC tables and verified |
50 | // against gcc output. AFAIK all PPC ABIs use the same encoding. |
51 | |
52 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
53 | |
54 | llvm::IntegerType *i8 = CGF.Int8Ty; |
55 | llvm::Value *Four8 = llvm::ConstantInt::get(Ty: i8, V: 4); |
56 | llvm::Value *Eight8 = llvm::ConstantInt::get(Ty: i8, V: 8); |
57 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(Ty: i8, V: 16); |
58 | |
59 | // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers |
60 | AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 0, LastIndex: 31); |
61 | |
62 | // 32-63: fp0-31, the 8-byte floating-point registers |
63 | AssignToArrayRange(Builder, Array: Address, Value: Eight8, FirstIndex: 32, LastIndex: 63); |
64 | |
65 | // 64-67 are various 4-byte or 8-byte special-purpose registers: |
66 | // 64: mq |
67 | // 65: lr |
68 | // 66: ctr |
69 | // 67: ap |
70 | AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 64, LastIndex: 67); |
71 | |
72 | // 68-76 are various 4-byte special-purpose registers: |
73 | // 68-75 cr0-7 |
74 | // 76: xer |
75 | AssignToArrayRange(Builder, Array: Address, Value: Four8, FirstIndex: 68, LastIndex: 76); |
76 | |
77 | // 77-108: v0-31, the 16-byte vector registers |
78 | AssignToArrayRange(Builder, Array: Address, Value: Sixteen8, FirstIndex: 77, LastIndex: 108); |
79 | |
80 | // 109: vrsave |
81 | // 110: vscr |
82 | AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 109, LastIndex: 110); |
83 | |
84 | // AIX does not utilize the rest of the registers. |
85 | if (IsAIX) |
86 | return false; |
87 | |
88 | // 111: spe_acc |
89 | // 112: spefscr |
90 | // 113: sfp |
91 | AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 111, LastIndex: 113); |
92 | |
93 | if (!Is64Bit) |
94 | return false; |
95 | |
96 | // TODO: Need to verify if these registers are used on 64 bit AIX with Power8 |
97 | // or above CPU. |
98 | // 64-bit only registers: |
99 | // 114: tfhar |
100 | // 115: tfiar |
101 | // 116: texasr |
102 | AssignToArrayRange(Builder, Array: Address, Value: Eight8, FirstIndex: 114, LastIndex: 116); |
103 | |
104 | return false; |
105 | } |
106 | |
107 | // AIX |
108 | namespace { |
109 | /// AIXABIInfo - The AIX XCOFF ABI information. |
110 | class AIXABIInfo : public ABIInfo { |
111 | const bool Is64Bit; |
112 | const unsigned PtrByteSize; |
113 | CharUnits getParamTypeAlignment(QualType Ty) const; |
114 | |
115 | public: |
116 | AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) |
117 | : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {} |
118 | |
119 | bool isPromotableTypeForABI(QualType Ty) const; |
120 | |
121 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
122 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
123 | |
124 | void computeInfo(CGFunctionInfo &FI) const override { |
125 | if (!getCXXABI().classifyReturnType(FI)) |
126 | FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType()); |
127 | |
128 | for (auto &I : FI.arguments()) |
129 | I.info = classifyArgumentType(Ty: I.type); |
130 | } |
131 | |
132 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
133 | QualType Ty) const override; |
134 | }; |
135 | |
136 | class AIXTargetCodeGenInfo : public TargetCodeGenInfo { |
137 | const bool Is64Bit; |
138 | |
139 | public: |
140 | AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) |
141 | : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(args&: CGT, args&: Is64Bit)), |
142 | Is64Bit(Is64Bit) {} |
143 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
144 | return 1; // r1 is the dedicated stack pointer |
145 | } |
146 | |
147 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
148 | llvm::Value *Address) const override; |
149 | |
150 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
151 | CodeGen::CodeGenModule &M) const override; |
152 | }; |
153 | } // namespace |
154 | |
155 | // Return true if the ABI requires Ty to be passed sign- or zero- |
156 | // extended to 32/64 bits. |
157 | bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const { |
158 | // Treat an enum type as its underlying type. |
159 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
160 | Ty = EnumTy->getDecl()->getIntegerType(); |
161 | |
162 | // Promotable integer types are required to be promoted by the ABI. |
163 | if (getContext().isPromotableIntegerType(T: Ty)) |
164 | return true; |
165 | |
166 | if (!Is64Bit) |
167 | return false; |
168 | |
169 | // For 64 bit mode, in addition to the usual promotable integer types, we also |
170 | // need to extend all 32-bit types, since the ABI requires promotion to 64 |
171 | // bits. |
172 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
173 | switch (BT->getKind()) { |
174 | case BuiltinType::Int: |
175 | case BuiltinType::UInt: |
176 | return true; |
177 | default: |
178 | break; |
179 | } |
180 | |
181 | return false; |
182 | } |
183 | |
184 | ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const { |
185 | if (RetTy->isAnyComplexType()) |
186 | return ABIArgInfo::getDirect(); |
187 | |
188 | if (RetTy->isVectorType()) |
189 | return ABIArgInfo::getDirect(); |
190 | |
191 | if (RetTy->isVoidType()) |
192 | return ABIArgInfo::getIgnore(); |
193 | |
194 | if (isAggregateTypeForABI(T: RetTy)) |
195 | return getNaturalAlignIndirect(Ty: RetTy); |
196 | |
197 | return (isPromotableTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy) |
198 | : ABIArgInfo::getDirect()); |
199 | } |
200 | |
201 | ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const { |
202 | Ty = useFirstFieldIfTransparentUnion(Ty); |
203 | |
204 | if (Ty->isAnyComplexType()) |
205 | return ABIArgInfo::getDirect(); |
206 | |
207 | if (Ty->isVectorType()) |
208 | return ABIArgInfo::getDirect(); |
209 | |
210 | if (isAggregateTypeForABI(T: Ty)) { |
211 | // Records with non-trivial destructors/copy-constructors should not be |
212 | // passed by value. |
213 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) |
214 | return getNaturalAlignIndirect(Ty, ByVal: RAA == CGCXXABI::RAA_DirectInMemory); |
215 | |
216 | CharUnits CCAlign = getParamTypeAlignment(Ty); |
217 | CharUnits TyAlign = getContext().getTypeAlignInChars(T: Ty); |
218 | |
219 | return ABIArgInfo::getIndirect(Alignment: CCAlign, /*ByVal*/ true, |
220 | /*Realign*/ TyAlign > CCAlign); |
221 | } |
222 | |
223 | return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
224 | : ABIArgInfo::getDirect()); |
225 | } |
226 | |
227 | CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const { |
228 | // Complex types are passed just like their elements. |
229 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
230 | Ty = CTy->getElementType(); |
231 | |
232 | if (Ty->isVectorType()) |
233 | return CharUnits::fromQuantity(Quantity: 16); |
234 | |
235 | // If the structure contains a vector type, the alignment is 16. |
236 | if (isRecordWithSIMDVectorType(Context&: getContext(), Ty)) |
237 | return CharUnits::fromQuantity(Quantity: 16); |
238 | |
239 | return CharUnits::fromQuantity(Quantity: PtrByteSize); |
240 | } |
241 | |
242 | Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
243 | QualType Ty) const { |
244 | |
245 | auto TypeInfo = getContext().getTypeInfoInChars(T: Ty); |
246 | TypeInfo.Align = getParamTypeAlignment(Ty); |
247 | |
248 | CharUnits SlotSize = CharUnits::fromQuantity(Quantity: PtrByteSize); |
249 | |
250 | // If we have a complex type and the base type is smaller than the register |
251 | // size, the ABI calls for the real and imaginary parts to be right-adjusted |
252 | // in separate words in 32bit mode or doublewords in 64bit mode. However, |
253 | // Clang expects us to produce a pointer to a structure with the two parts |
254 | // packed tightly. So generate loads of the real and imaginary parts relative |
255 | // to the va_list pointer, and store them to a temporary structure. We do the |
256 | // same as the PPC64ABI here. |
257 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { |
258 | CharUnits EltSize = TypeInfo.Width / 2; |
259 | if (EltSize < SlotSize) |
260 | return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy); |
261 | } |
262 | |
263 | return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, /*Indirect*/ IsIndirect: false, ValueInfo: TypeInfo, |
264 | SlotSizeAndAlign: SlotSize, /*AllowHigher*/ AllowHigherAlign: true); |
265 | } |
266 | |
267 | bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable( |
268 | CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { |
269 | return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true); |
270 | } |
271 | |
272 | void AIXTargetCodeGenInfo::setTargetAttributes( |
273 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { |
274 | if (!isa<llvm::GlobalVariable>(Val: GV)) |
275 | return; |
276 | |
277 | auto *GVar = cast<llvm::GlobalVariable>(Val: GV); |
278 | auto GVId = GV->getName(); |
279 | |
280 | // Is this a global variable specified by the user as toc-data? |
281 | bool UserSpecifiedTOC = |
282 | llvm::binary_search(Range: M.getCodeGenOpts().TocDataVarsUserSpecified, Value&: GVId); |
283 | // Assumes the same variable cannot be in both TocVarsUserSpecified and |
284 | // NoTocVars. |
285 | if (UserSpecifiedTOC || |
286 | ((M.getCodeGenOpts().AllTocData) && |
287 | !llvm::binary_search(Range: M.getCodeGenOpts().NoTocDataVars, Value&: GVId))) { |
288 | const unsigned long PointerSize = |
289 | GV->getParent()->getDataLayout().getPointerSizeInBits() / 8; |
290 | auto *VarD = dyn_cast<VarDecl>(Val: D); |
291 | assert(VarD && "Invalid declaration of global variable." ); |
292 | |
293 | ASTContext &Context = D->getASTContext(); |
294 | unsigned Alignment = Context.toBits(CharSize: Context.getDeclAlign(D)) / 8; |
295 | const auto *Ty = VarD->getType().getTypePtr(); |
296 | const RecordDecl *RDecl = |
297 | Ty->isRecordType() ? Ty->getAs<RecordType>()->getDecl() : nullptr; |
298 | |
299 | bool EmitDiagnostic = UserSpecifiedTOC && GV->hasExternalLinkage(); |
300 | auto reportUnsupportedWarning = [&](bool ShouldEmitWarning, StringRef Msg) { |
301 | if (ShouldEmitWarning) |
302 | M.getDiags().Report(D->getLocation(), diag::warn_toc_unsupported_type) |
303 | << GVId << Msg; |
304 | }; |
305 | if (!Ty || Ty->isIncompleteType()) |
306 | reportUnsupportedWarning(EmitDiagnostic, "of incomplete type" ); |
307 | else if (RDecl && RDecl->hasFlexibleArrayMember()) |
308 | reportUnsupportedWarning(EmitDiagnostic, |
309 | "it contains a flexible array member" ); |
310 | else if (VarD->getTLSKind() != VarDecl::TLS_None) |
311 | reportUnsupportedWarning(EmitDiagnostic, "of thread local storage" ); |
312 | else if (PointerSize < Context.getTypeInfo(VarD->getType()).Width / 8) |
313 | reportUnsupportedWarning(EmitDiagnostic, |
314 | "variable is larger than a pointer" ); |
315 | else if (PointerSize < Alignment) |
316 | reportUnsupportedWarning(EmitDiagnostic, |
317 | "variable is aligned wider than a pointer" ); |
318 | else if (D->hasAttr<SectionAttr>()) |
319 | reportUnsupportedWarning(EmitDiagnostic, |
320 | "variable has a section attribute" ); |
321 | else if (GV->hasExternalLinkage() || |
322 | (M.getCodeGenOpts().AllTocData && !GV->hasLocalLinkage())) |
323 | GVar->addAttribute(Kind: "toc-data" ); |
324 | } |
325 | } |
326 | |
327 | // PowerPC-32 |
328 | namespace { |
329 | /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. |
330 | class PPC32_SVR4_ABIInfo : public DefaultABIInfo { |
331 | bool IsSoftFloatABI; |
332 | bool IsRetSmallStructInRegABI; |
333 | |
334 | CharUnits getParamTypeAlignment(QualType Ty) const; |
335 | |
336 | public: |
337 | PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI, |
338 | bool RetSmallStructInRegABI) |
339 | : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI), |
340 | IsRetSmallStructInRegABI(RetSmallStructInRegABI) {} |
341 | |
342 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
343 | |
344 | void computeInfo(CGFunctionInfo &FI) const override { |
345 | if (!getCXXABI().classifyReturnType(FI)) |
346 | FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType()); |
347 | for (auto &I : FI.arguments()) |
348 | I.info = classifyArgumentType(RetTy: I.type); |
349 | } |
350 | |
351 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
352 | QualType Ty) const override; |
353 | }; |
354 | |
355 | class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { |
356 | public: |
357 | PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI, |
358 | bool RetSmallStructInRegABI) |
359 | : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>( |
360 | args&: CGT, args&: SoftFloatABI, args&: RetSmallStructInRegABI)) {} |
361 | |
362 | static bool isStructReturnInRegABI(const llvm::Triple &Triple, |
363 | const CodeGenOptions &Opts); |
364 | |
365 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
366 | // This is recovered from gcc output. |
367 | return 1; // r1 is the dedicated stack pointer |
368 | } |
369 | |
370 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
371 | llvm::Value *Address) const override; |
372 | }; |
373 | } |
374 | |
375 | CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { |
376 | // Complex types are passed just like their elements. |
377 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
378 | Ty = CTy->getElementType(); |
379 | |
380 | if (Ty->isVectorType()) |
381 | return CharUnits::fromQuantity(Quantity: getContext().getTypeSize(T: Ty) == 128 ? 16 |
382 | : 4); |
383 | |
384 | // For single-element float/vector structs, we consider the whole type |
385 | // to have the same alignment requirements as its single element. |
386 | const Type *AlignTy = nullptr; |
387 | if (const Type *EltType = isSingleElementStruct(T: Ty, Context&: getContext())) { |
388 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); |
389 | if ((EltType->isVectorType() && getContext().getTypeSize(T: EltType) == 128) || |
390 | (BT && BT->isFloatingPoint())) |
391 | AlignTy = EltType; |
392 | } |
393 | |
394 | if (AlignTy) |
395 | return CharUnits::fromQuantity(Quantity: AlignTy->isVectorType() ? 16 : 4); |
396 | return CharUnits::fromQuantity(Quantity: 4); |
397 | } |
398 | |
399 | ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { |
400 | uint64_t Size; |
401 | |
402 | // -msvr4-struct-return puts small aggregates in GPR3 and GPR4. |
403 | if (isAggregateTypeForABI(T: RetTy) && IsRetSmallStructInRegABI && |
404 | (Size = getContext().getTypeSize(T: RetTy)) <= 64) { |
405 | // System V ABI (1995), page 3-22, specified: |
406 | // > A structure or union whose size is less than or equal to 8 bytes |
407 | // > shall be returned in r3 and r4, as if it were first stored in the |
408 | // > 8-byte aligned memory area and then the low addressed word were |
409 | // > loaded into r3 and the high-addressed word into r4. Bits beyond |
410 | // > the last member of the structure or union are not defined. |
411 | // |
412 | // GCC for big-endian PPC32 inserts the pad before the first member, |
413 | // not "beyond the last member" of the struct. To stay compatible |
414 | // with GCC, we coerce the struct to an integer of the same size. |
415 | // LLVM will extend it and return i32 in r3, or i64 in r3:r4. |
416 | if (Size == 0) |
417 | return ABIArgInfo::getIgnore(); |
418 | else { |
419 | llvm::Type *CoerceTy = llvm::Type::getIntNTy(C&: getVMContext(), N: Size); |
420 | return ABIArgInfo::getDirect(T: CoerceTy); |
421 | } |
422 | } |
423 | |
424 | return DefaultABIInfo::classifyReturnType(RetTy); |
425 | } |
426 | |
427 | // TODO: this implementation is now likely redundant with |
428 | // DefaultABIInfo::EmitVAArg. |
429 | Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, |
430 | QualType Ty) const { |
431 | if (getTarget().getTriple().isOSDarwin()) { |
432 | auto TI = getContext().getTypeInfoInChars(T: Ty); |
433 | TI.Align = getParamTypeAlignment(Ty); |
434 | |
435 | CharUnits SlotSize = CharUnits::fromQuantity(Quantity: 4); |
436 | return emitVoidPtrVAArg(CGF, VAListAddr: VAList, ValueTy: Ty, |
437 | IsIndirect: classifyArgumentType(RetTy: Ty).isIndirect(), ValueInfo: TI, SlotSizeAndAlign: SlotSize, |
438 | /*AllowHigherAlign=*/true); |
439 | } |
440 | |
441 | const unsigned OverflowLimit = 8; |
442 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { |
443 | // TODO: Implement this. For now ignore. |
444 | (void)CTy; |
445 | return Address::invalid(); // FIXME? |
446 | } |
447 | |
448 | // struct __va_list_tag { |
449 | // unsigned char gpr; |
450 | // unsigned char fpr; |
451 | // unsigned short reserved; |
452 | // void *overflow_arg_area; |
453 | // void *reg_save_area; |
454 | // }; |
455 | |
456 | bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(T: Ty) == 64; |
457 | bool isInt = !Ty->isFloatingType(); |
458 | bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(T: Ty) == 64; |
459 | |
460 | // All aggregates are passed indirectly? That doesn't seem consistent |
461 | // with the argument-lowering code. |
462 | bool isIndirect = isAggregateTypeForABI(T: Ty); |
463 | |
464 | CGBuilderTy &Builder = CGF.Builder; |
465 | |
466 | // The calling convention either uses 1-2 GPRs or 1 FPR. |
467 | Address NumRegsAddr = Address::invalid(); |
468 | if (isInt || IsSoftFloatABI) { |
469 | NumRegsAddr = Builder.CreateStructGEP(Addr: VAList, Index: 0, Name: "gpr" ); |
470 | } else { |
471 | NumRegsAddr = Builder.CreateStructGEP(Addr: VAList, Index: 1, Name: "fpr" ); |
472 | } |
473 | |
474 | llvm::Value *NumRegs = Builder.CreateLoad(Addr: NumRegsAddr, Name: "numUsedRegs" ); |
475 | |
476 | // "Align" the register count when TY is i64. |
477 | if (isI64 || (isF64 && IsSoftFloatABI)) { |
478 | NumRegs = Builder.CreateAdd(LHS: NumRegs, RHS: Builder.getInt8(C: 1)); |
479 | NumRegs = Builder.CreateAnd(LHS: NumRegs, RHS: Builder.getInt8(C: (uint8_t) ~1U)); |
480 | } |
481 | |
482 | llvm::Value *CC = |
483 | Builder.CreateICmpULT(LHS: NumRegs, RHS: Builder.getInt8(C: OverflowLimit), Name: "cond" ); |
484 | |
485 | llvm::BasicBlock *UsingRegs = CGF.createBasicBlock(name: "using_regs" ); |
486 | llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock(name: "using_overflow" ); |
487 | llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont" ); |
488 | |
489 | Builder.CreateCondBr(Cond: CC, True: UsingRegs, False: UsingOverflow); |
490 | |
491 | llvm::Type *DirectTy = CGF.ConvertType(T: Ty), *ElementTy = DirectTy; |
492 | if (isIndirect) |
493 | DirectTy = CGF.UnqualPtrTy; |
494 | |
495 | // Case 1: consume registers. |
496 | Address RegAddr = Address::invalid(); |
497 | { |
498 | CGF.EmitBlock(BB: UsingRegs); |
499 | |
500 | Address RegSaveAreaPtr = Builder.CreateStructGEP(Addr: VAList, Index: 4); |
501 | RegAddr = Address(Builder.CreateLoad(Addr: RegSaveAreaPtr), CGF.Int8Ty, |
502 | CharUnits::fromQuantity(Quantity: 8)); |
503 | assert(RegAddr.getElementType() == CGF.Int8Ty); |
504 | |
505 | // Floating-point registers start after the general-purpose registers. |
506 | if (!(isInt || IsSoftFloatABI)) { |
507 | RegAddr = Builder.CreateConstInBoundsByteGEP(Addr: RegAddr, |
508 | Offset: CharUnits::fromQuantity(Quantity: 32)); |
509 | } |
510 | |
511 | // Get the address of the saved value by scaling the number of |
512 | // registers we've used by the number of |
513 | CharUnits RegSize = CharUnits::fromQuantity(Quantity: (isInt || IsSoftFloatABI) ? 4 : 8); |
514 | llvm::Value *RegOffset = |
515 | Builder.CreateMul(LHS: NumRegs, RHS: Builder.getInt8(C: RegSize.getQuantity())); |
516 | RegAddr = Address(Builder.CreateInBoundsGEP( |
517 | Ty: CGF.Int8Ty, Ptr: RegAddr.emitRawPointer(CGF), IdxList: RegOffset), |
518 | DirectTy, |
519 | RegAddr.getAlignment().alignmentOfArrayElement(elementSize: RegSize)); |
520 | |
521 | // Increase the used-register count. |
522 | NumRegs = |
523 | Builder.CreateAdd(LHS: NumRegs, |
524 | RHS: Builder.getInt8(C: (isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); |
525 | Builder.CreateStore(Val: NumRegs, Addr: NumRegsAddr); |
526 | |
527 | CGF.EmitBranch(Block: Cont); |
528 | } |
529 | |
530 | // Case 2: consume space in the overflow area. |
531 | Address MemAddr = Address::invalid(); |
532 | { |
533 | CGF.EmitBlock(BB: UsingOverflow); |
534 | |
535 | Builder.CreateStore(Val: Builder.getInt8(C: OverflowLimit), Addr: NumRegsAddr); |
536 | |
537 | // Everything in the overflow area is rounded up to a size of at least 4. |
538 | CharUnits OverflowAreaAlign = CharUnits::fromQuantity(Quantity: 4); |
539 | |
540 | CharUnits Size; |
541 | if (!isIndirect) { |
542 | auto TypeInfo = CGF.getContext().getTypeInfoInChars(T: Ty); |
543 | Size = TypeInfo.Width.alignTo(Align: OverflowAreaAlign); |
544 | } else { |
545 | Size = CGF.getPointerSize(); |
546 | } |
547 | |
548 | Address OverflowAreaAddr = Builder.CreateStructGEP(Addr: VAList, Index: 3); |
549 | Address OverflowArea = |
550 | Address(Builder.CreateLoad(Addr: OverflowAreaAddr, Name: "argp.cur" ), CGF.Int8Ty, |
551 | OverflowAreaAlign); |
552 | // Round up address of argument to alignment |
553 | CharUnits Align = CGF.getContext().getTypeAlignInChars(T: Ty); |
554 | if (Align > OverflowAreaAlign) { |
555 | llvm::Value *Ptr = OverflowArea.emitRawPointer(CGF); |
556 | OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), |
557 | OverflowArea.getElementType(), Align); |
558 | } |
559 | |
560 | MemAddr = OverflowArea.withElementType(ElemTy: DirectTy); |
561 | |
562 | // Increase the overflow area. |
563 | OverflowArea = Builder.CreateConstInBoundsByteGEP(Addr: OverflowArea, Offset: Size); |
564 | Builder.CreateStore(Val: OverflowArea.emitRawPointer(CGF), Addr: OverflowAreaAddr); |
565 | CGF.EmitBranch(Block: Cont); |
566 | } |
567 | |
568 | CGF.EmitBlock(BB: Cont); |
569 | |
570 | // Merge the cases with a phi. |
571 | Address Result = emitMergePHI(CGF, Addr1: RegAddr, Block1: UsingRegs, Addr2: MemAddr, Block2: UsingOverflow, |
572 | Name: "vaarg.addr" ); |
573 | |
574 | // Load the pointer if the argument was passed indirectly. |
575 | if (isIndirect) { |
576 | Result = Address(Builder.CreateLoad(Addr: Result, Name: "aggr" ), ElementTy, |
577 | getContext().getTypeAlignInChars(T: Ty)); |
578 | } |
579 | |
580 | return Result; |
581 | } |
582 | |
583 | bool PPC32TargetCodeGenInfo::isStructReturnInRegABI( |
584 | const llvm::Triple &Triple, const CodeGenOptions &Opts) { |
585 | assert(Triple.isPPC32()); |
586 | |
587 | switch (Opts.getStructReturnConvention()) { |
588 | case CodeGenOptions::SRCK_Default: |
589 | break; |
590 | case CodeGenOptions::SRCK_OnStack: // -maix-struct-return |
591 | return false; |
592 | case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return |
593 | return true; |
594 | } |
595 | |
596 | if (Triple.isOSBinFormatELF() && !Triple.isOSLinux()) |
597 | return true; |
598 | |
599 | return false; |
600 | } |
601 | |
602 | bool |
603 | PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
604 | llvm::Value *Address) const { |
605 | return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false, |
606 | /*IsAIX*/ false); |
607 | } |
608 | |
609 | // PowerPC-64 |
610 | |
611 | namespace { |
612 | |
613 | /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. |
614 | class PPC64_SVR4_ABIInfo : public ABIInfo { |
615 | static const unsigned GPRBits = 64; |
616 | PPC64_SVR4_ABIKind Kind; |
617 | bool IsSoftFloatABI; |
618 | |
619 | public: |
620 | PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind, |
621 | bool SoftFloatABI) |
622 | : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {} |
623 | |
624 | bool isPromotableTypeForABI(QualType Ty) const; |
625 | CharUnits getParamTypeAlignment(QualType Ty) const; |
626 | |
627 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
628 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
629 | |
630 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; |
631 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
632 | uint64_t Members) const override; |
633 | |
634 | // TODO: We can add more logic to computeInfo to improve performance. |
635 | // Example: For aggregate arguments that fit in a register, we could |
636 | // use getDirectInReg (as is done below for structs containing a single |
637 | // floating-point value) to avoid pushing them to memory on function |
638 | // entry. This would require changing the logic in PPCISelLowering |
639 | // when lowering the parameters in the caller and args in the callee. |
640 | void computeInfo(CGFunctionInfo &FI) const override { |
641 | if (!getCXXABI().classifyReturnType(FI)) |
642 | FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType()); |
643 | for (auto &I : FI.arguments()) { |
644 | // We rely on the default argument classification for the most part. |
645 | // One exception: An aggregate containing a single floating-point |
646 | // or vector item must be passed in a register if one is available. |
647 | const Type *T = isSingleElementStruct(I.type, getContext()); |
648 | if (T) { |
649 | const BuiltinType *BT = T->getAs<BuiltinType>(); |
650 | if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || |
651 | (BT && BT->isFloatingPoint())) { |
652 | QualType QT(T, 0); |
653 | I.info = ABIArgInfo::getDirectInReg(T: CGT.ConvertType(T: QT)); |
654 | continue; |
655 | } |
656 | } |
657 | I.info = classifyArgumentType(Ty: I.type); |
658 | } |
659 | } |
660 | |
661 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
662 | QualType Ty) const override; |
663 | }; |
664 | |
665 | class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { |
666 | |
667 | public: |
668 | PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind, |
669 | bool SoftFloatABI) |
670 | : TargetCodeGenInfo( |
671 | std::make_unique<PPC64_SVR4_ABIInfo>(args&: CGT, args&: Kind, args&: SoftFloatABI)) { |
672 | SwiftInfo = |
673 | std::make_unique<SwiftABIInfo>(args&: CGT, /*SwiftErrorInRegister=*/args: false); |
674 | } |
675 | |
676 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
677 | // This is recovered from gcc output. |
678 | return 1; // r1 is the dedicated stack pointer |
679 | } |
680 | |
681 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
682 | llvm::Value *Address) const override; |
683 | void emitTargetMetadata(CodeGen::CodeGenModule &CGM, |
684 | const llvm::MapVector<GlobalDecl, StringRef> |
685 | &MangledDeclNames) const override; |
686 | }; |
687 | |
688 | class PPC64TargetCodeGenInfo : public TargetCodeGenInfo { |
689 | public: |
690 | PPC64TargetCodeGenInfo(CodeGenTypes &CGT) |
691 | : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(args&: CGT)) {} |
692 | |
693 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
694 | // This is recovered from gcc output. |
695 | return 1; // r1 is the dedicated stack pointer |
696 | } |
697 | |
698 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
699 | llvm::Value *Address) const override; |
700 | }; |
701 | } |
702 | |
703 | // Return true if the ABI requires Ty to be passed sign- or zero- |
704 | // extended to 64 bits. |
705 | bool |
706 | PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { |
707 | // Treat an enum type as its underlying type. |
708 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
709 | Ty = EnumTy->getDecl()->getIntegerType(); |
710 | |
711 | // Promotable integer types are required to be promoted by the ABI. |
712 | if (isPromotableIntegerTypeForABI(Ty)) |
713 | return true; |
714 | |
715 | // In addition to the usual promotable integer types, we also need to |
716 | // extend all 32-bit types, since the ABI requires promotion to 64 bits. |
717 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
718 | switch (BT->getKind()) { |
719 | case BuiltinType::Int: |
720 | case BuiltinType::UInt: |
721 | return true; |
722 | default: |
723 | break; |
724 | } |
725 | |
726 | if (const auto *EIT = Ty->getAs<BitIntType>()) |
727 | if (EIT->getNumBits() < 64) |
728 | return true; |
729 | |
730 | return false; |
731 | } |
732 | |
733 | /// isAlignedParamType - Determine whether a type requires 16-byte or |
734 | /// higher alignment in the parameter area. Always returns at least 8. |
735 | CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { |
736 | // Complex types are passed just like their elements. |
737 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
738 | Ty = CTy->getElementType(); |
739 | |
740 | auto FloatUsesVector = [this](QualType Ty){ |
741 | return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics( |
742 | T: Ty) == &llvm::APFloat::IEEEquad(); |
743 | }; |
744 | |
745 | // Only vector types of size 16 bytes need alignment (larger types are |
746 | // passed via reference, smaller types are not aligned). |
747 | if (Ty->isVectorType()) { |
748 | return CharUnits::fromQuantity(Quantity: getContext().getTypeSize(T: Ty) == 128 ? 16 : 8); |
749 | } else if (FloatUsesVector(Ty)) { |
750 | // According to ABI document section 'Optional Save Areas': If extended |
751 | // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION |
752 | // format are supported, map them to a single quadword, quadword aligned. |
753 | return CharUnits::fromQuantity(Quantity: 16); |
754 | } |
755 | |
756 | // For single-element float/vector structs, we consider the whole type |
757 | // to have the same alignment requirements as its single element. |
758 | const Type *AlignAsType = nullptr; |
759 | const Type *EltType = isSingleElementStruct(T: Ty, Context&: getContext()); |
760 | if (EltType) { |
761 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); |
762 | if ((EltType->isVectorType() && getContext().getTypeSize(T: EltType) == 128) || |
763 | (BT && BT->isFloatingPoint())) |
764 | AlignAsType = EltType; |
765 | } |
766 | |
767 | // Likewise for ELFv2 homogeneous aggregates. |
768 | const Type *Base = nullptr; |
769 | uint64_t Members = 0; |
770 | if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 && |
771 | isAggregateTypeForABI(T: Ty) && isHomogeneousAggregate(Ty, Base, Members)) |
772 | AlignAsType = Base; |
773 | |
774 | // With special case aggregates, only vector base types need alignment. |
775 | if (AlignAsType) { |
776 | bool UsesVector = AlignAsType->isVectorType() || |
777 | FloatUsesVector(QualType(AlignAsType, 0)); |
778 | return CharUnits::fromQuantity(Quantity: UsesVector ? 16 : 8); |
779 | } |
780 | |
781 | // Otherwise, we only need alignment for any aggregate type that |
782 | // has an alignment requirement of >= 16 bytes. |
783 | if (isAggregateTypeForABI(T: Ty) && getContext().getTypeAlign(T: Ty) >= 128) { |
784 | return CharUnits::fromQuantity(Quantity: 16); |
785 | } |
786 | |
787 | return CharUnits::fromQuantity(Quantity: 8); |
788 | } |
789 | |
790 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
791 | // Homogeneous aggregates for ELFv2 must have base types of float, |
792 | // double, long double, or 128-bit vectors. |
793 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
794 | if (BT->getKind() == BuiltinType::Float || |
795 | BT->getKind() == BuiltinType::Double || |
796 | BT->getKind() == BuiltinType::LongDouble || |
797 | BT->getKind() == BuiltinType::Ibm128 || |
798 | (getContext().getTargetInfo().hasFloat128Type() && |
799 | (BT->getKind() == BuiltinType::Float128))) { |
800 | if (IsSoftFloatABI) |
801 | return false; |
802 | return true; |
803 | } |
804 | } |
805 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
806 | if (getContext().getTypeSize(VT) == 128) |
807 | return true; |
808 | } |
809 | return false; |
810 | } |
811 | |
812 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( |
813 | const Type *Base, uint64_t Members) const { |
814 | // Vector and fp128 types require one register, other floating point types |
815 | // require one or two registers depending on their size. |
816 | uint32_t NumRegs = |
817 | ((getContext().getTargetInfo().hasFloat128Type() && |
818 | Base->isFloat128Type()) || |
819 | Base->isVectorType()) ? 1 |
820 | : (getContext().getTypeSize(T: Base) + 63) / 64; |
821 | |
822 | // Homogeneous Aggregates may occupy at most 8 registers. |
823 | return Members * NumRegs <= 8; |
824 | } |
825 | |
826 | ABIArgInfo |
827 | PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { |
828 | Ty = useFirstFieldIfTransparentUnion(Ty); |
829 | |
830 | if (Ty->isAnyComplexType()) |
831 | return ABIArgInfo::getDirect(); |
832 | |
833 | // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) |
834 | // or via reference (larger than 16 bytes). |
835 | if (Ty->isVectorType()) { |
836 | uint64_t Size = getContext().getTypeSize(T: Ty); |
837 | if (Size > 128) |
838 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); |
839 | else if (Size < 128) { |
840 | llvm::Type *CoerceTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: Size); |
841 | return ABIArgInfo::getDirect(T: CoerceTy); |
842 | } |
843 | } |
844 | |
845 | if (const auto *EIT = Ty->getAs<BitIntType>()) |
846 | if (EIT->getNumBits() > 128) |
847 | return getNaturalAlignIndirect(Ty, /*ByVal=*/true); |
848 | |
849 | if (isAggregateTypeForABI(T: Ty)) { |
850 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) |
851 | return getNaturalAlignIndirect(Ty, ByVal: RAA == CGCXXABI::RAA_DirectInMemory); |
852 | |
853 | uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); |
854 | uint64_t TyAlign = getContext().getTypeAlignInChars(T: Ty).getQuantity(); |
855 | |
856 | // ELFv2 homogeneous aggregates are passed as array types. |
857 | const Type *Base = nullptr; |
858 | uint64_t Members = 0; |
859 | if (Kind == PPC64_SVR4_ABIKind::ELFv2 && |
860 | isHomogeneousAggregate(Ty, Base, Members)) { |
861 | llvm::Type *BaseTy = CGT.ConvertType(T: QualType(Base, 0)); |
862 | llvm::Type *CoerceTy = llvm::ArrayType::get(ElementType: BaseTy, NumElements: Members); |
863 | return ABIArgInfo::getDirect(T: CoerceTy); |
864 | } |
865 | |
866 | // If an aggregate may end up fully in registers, we do not |
867 | // use the ByVal method, but pass the aggregate as array. |
868 | // This is usually beneficial since we avoid forcing the |
869 | // back-end to store the argument to memory. |
870 | uint64_t Bits = getContext().getTypeSize(T: Ty); |
871 | if (Bits > 0 && Bits <= 8 * GPRBits) { |
872 | llvm::Type *CoerceTy; |
873 | |
874 | // Types up to 8 bytes are passed as integer type (which will be |
875 | // properly aligned in the argument save area doubleword). |
876 | if (Bits <= GPRBits) |
877 | CoerceTy = |
878 | llvm::IntegerType::get(C&: getVMContext(), NumBits: llvm::alignTo(Value: Bits, Align: 8)); |
879 | // Larger types are passed as arrays, with the base type selected |
880 | // according to the required alignment in the save area. |
881 | else { |
882 | uint64_t RegBits = ABIAlign * 8; |
883 | uint64_t NumRegs = llvm::alignTo(Value: Bits, Align: RegBits) / RegBits; |
884 | llvm::Type *RegTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: RegBits); |
885 | CoerceTy = llvm::ArrayType::get(ElementType: RegTy, NumElements: NumRegs); |
886 | } |
887 | |
888 | return ABIArgInfo::getDirect(T: CoerceTy); |
889 | } |
890 | |
891 | // All other aggregates are passed ByVal. |
892 | return ABIArgInfo::getIndirect(Alignment: CharUnits::fromQuantity(Quantity: ABIAlign), |
893 | /*ByVal=*/true, |
894 | /*Realign=*/TyAlign > ABIAlign); |
895 | } |
896 | |
897 | return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
898 | : ABIArgInfo::getDirect()); |
899 | } |
900 | |
901 | ABIArgInfo |
902 | PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { |
903 | if (RetTy->isVoidType()) |
904 | return ABIArgInfo::getIgnore(); |
905 | |
906 | if (RetTy->isAnyComplexType()) |
907 | return ABIArgInfo::getDirect(); |
908 | |
909 | // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) |
910 | // or via reference (larger than 16 bytes). |
911 | if (RetTy->isVectorType()) { |
912 | uint64_t Size = getContext().getTypeSize(T: RetTy); |
913 | if (Size > 128) |
914 | return getNaturalAlignIndirect(Ty: RetTy); |
915 | else if (Size < 128) { |
916 | llvm::Type *CoerceTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: Size); |
917 | return ABIArgInfo::getDirect(T: CoerceTy); |
918 | } |
919 | } |
920 | |
921 | if (const auto *EIT = RetTy->getAs<BitIntType>()) |
922 | if (EIT->getNumBits() > 128) |
923 | return getNaturalAlignIndirect(Ty: RetTy, /*ByVal=*/false); |
924 | |
925 | if (isAggregateTypeForABI(T: RetTy)) { |
926 | // ELFv2 homogeneous aggregates are returned as array types. |
927 | const Type *Base = nullptr; |
928 | uint64_t Members = 0; |
929 | if (Kind == PPC64_SVR4_ABIKind::ELFv2 && |
930 | isHomogeneousAggregate(Ty: RetTy, Base, Members)) { |
931 | llvm::Type *BaseTy = CGT.ConvertType(T: QualType(Base, 0)); |
932 | llvm::Type *CoerceTy = llvm::ArrayType::get(ElementType: BaseTy, NumElements: Members); |
933 | return ABIArgInfo::getDirect(T: CoerceTy); |
934 | } |
935 | |
936 | // ELFv2 small aggregates are returned in up to two registers. |
937 | uint64_t Bits = getContext().getTypeSize(T: RetTy); |
938 | if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) { |
939 | if (Bits == 0) |
940 | return ABIArgInfo::getIgnore(); |
941 | |
942 | llvm::Type *CoerceTy; |
943 | if (Bits > GPRBits) { |
944 | CoerceTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: GPRBits); |
945 | CoerceTy = llvm::StructType::get(elt1: CoerceTy, elts: CoerceTy); |
946 | } else |
947 | CoerceTy = |
948 | llvm::IntegerType::get(C&: getVMContext(), NumBits: llvm::alignTo(Value: Bits, Align: 8)); |
949 | return ABIArgInfo::getDirect(T: CoerceTy); |
950 | } |
951 | |
952 | // All other aggregates are returned indirectly. |
953 | return getNaturalAlignIndirect(Ty: RetTy); |
954 | } |
955 | |
956 | return (isPromotableTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy) |
957 | : ABIArgInfo::getDirect()); |
958 | } |
959 | |
960 | // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. |
961 | Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
962 | QualType Ty) const { |
963 | auto TypeInfo = getContext().getTypeInfoInChars(T: Ty); |
964 | TypeInfo.Align = getParamTypeAlignment(Ty); |
965 | |
966 | CharUnits SlotSize = CharUnits::fromQuantity(Quantity: 8); |
967 | |
968 | // If we have a complex type and the base type is smaller than 8 bytes, |
969 | // the ABI calls for the real and imaginary parts to be right-adjusted |
970 | // in separate doublewords. However, Clang expects us to produce a |
971 | // pointer to a structure with the two parts packed tightly. So generate |
972 | // loads of the real and imaginary parts relative to the va_list pointer, |
973 | // and store them to a temporary structure. |
974 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { |
975 | CharUnits EltSize = TypeInfo.Width / 2; |
976 | if (EltSize < SlotSize) |
977 | return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy); |
978 | } |
979 | |
980 | // Otherwise, just use the general rule. |
981 | // |
982 | // The PPC64 ABI passes some arguments in integer registers, even to variadic |
983 | // functions. To allow va_list to use the simple "void*" representation, |
984 | // variadic calls allocate space in the argument area for the integer argument |
985 | // registers, and variadic functions spill their integer argument registers to |
986 | // this area in their prologues. When aggregates smaller than a register are |
987 | // passed this way, they are passed in the least significant bits of the |
988 | // register, which means that after spilling on big-endian targets they will |
989 | // be right-aligned in their argument slot. This is uncommon; for a variety of |
990 | // reasons, other big-endian targets don't end up right-aligning aggregate |
991 | // types this way, and so right-alignment only applies to fundamental types. |
992 | // So on PPC64, we must force the use of right-alignment even for aggregates. |
993 | return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, /*Indirect*/ IsIndirect: false, ValueInfo: TypeInfo, |
994 | SlotSizeAndAlign: SlotSize, /*AllowHigher*/ AllowHigherAlign: true, |
995 | /*ForceRightAdjust*/ true); |
996 | } |
997 | |
998 | bool |
999 | PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( |
1000 | CodeGen::CodeGenFunction &CGF, |
1001 | llvm::Value *Address) const { |
1002 | return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, |
1003 | /*IsAIX*/ false); |
1004 | } |
1005 | |
1006 | void PPC64_SVR4_TargetCodeGenInfo::emitTargetMetadata( |
1007 | CodeGen::CodeGenModule &CGM, |
1008 | const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const { |
1009 | if (CGM.getTypes().isLongDoubleReferenced()) { |
1010 | llvm::LLVMContext &Ctx = CGM.getLLVMContext(); |
1011 | const auto *flt = &CGM.getTarget().getLongDoubleFormat(); |
1012 | if (flt == &llvm::APFloat::PPCDoubleDouble()) |
1013 | CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "float-abi" , |
1014 | Val: llvm::MDString::get(Context&: Ctx, Str: "doubledouble" )); |
1015 | else if (flt == &llvm::APFloat::IEEEquad()) |
1016 | CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "float-abi" , |
1017 | Val: llvm::MDString::get(Context&: Ctx, Str: "ieeequad" )); |
1018 | else if (flt == &llvm::APFloat::IEEEdouble()) |
1019 | CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "float-abi" , |
1020 | Val: llvm::MDString::get(Context&: Ctx, Str: "ieeedouble" )); |
1021 | } |
1022 | } |
1023 | |
1024 | bool |
1025 | PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
1026 | llvm::Value *Address) const { |
1027 | return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, |
1028 | /*IsAIX*/ false); |
1029 | } |
1030 | |
1031 | std::unique_ptr<TargetCodeGenInfo> |
1032 | CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) { |
1033 | return std::make_unique<AIXTargetCodeGenInfo>(args&: CGM.getTypes(), args&: Is64Bit); |
1034 | } |
1035 | |
1036 | std::unique_ptr<TargetCodeGenInfo> |
1037 | CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) { |
1038 | bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI( |
1039 | Triple: CGM.getTriple(), Opts: CGM.getCodeGenOpts()); |
1040 | return std::make_unique<PPC32TargetCodeGenInfo>(args&: CGM.getTypes(), args&: SoftFloatABI, |
1041 | args&: RetSmallStructInRegABI); |
1042 | } |
1043 | |
1044 | std::unique_ptr<TargetCodeGenInfo> |
1045 | CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) { |
1046 | return std::make_unique<PPC64TargetCodeGenInfo>(args&: CGM.getTypes()); |
1047 | } |
1048 | |
1049 | std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo( |
1050 | CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) { |
1051 | return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(args&: CGM.getTypes(), args&: Kind, |
1052 | args&: SoftFloatABI); |
1053 | } |
1054 | |