1 | //===--- CGCall.cpp - Encapsulate calling convention details --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // These classes wrap the information about a call or function |
10 | // definition used to handle ABI compliancy. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "CGCall.h" |
15 | #include "ABIInfo.h" |
16 | #include "ABIInfoImpl.h" |
17 | #include "CGBlocks.h" |
18 | #include "CGCXXABI.h" |
19 | #include "CGCleanup.h" |
20 | #include "CGRecordLayout.h" |
21 | #include "CodeGenFunction.h" |
22 | #include "CodeGenModule.h" |
23 | #include "TargetInfo.h" |
24 | #include "clang/AST/Attr.h" |
25 | #include "clang/AST/Decl.h" |
26 | #include "clang/AST/DeclCXX.h" |
27 | #include "clang/AST/DeclObjC.h" |
28 | #include "clang/Basic/CodeGenOptions.h" |
29 | #include "clang/Basic/TargetInfo.h" |
30 | #include "clang/CodeGen/CGFunctionInfo.h" |
31 | #include "clang/CodeGen/SwiftCallingConv.h" |
32 | #include "llvm/ADT/StringExtras.h" |
33 | #include "llvm/Analysis/ValueTracking.h" |
34 | #include "llvm/IR/Assumptions.h" |
35 | #include "llvm/IR/AttributeMask.h" |
36 | #include "llvm/IR/Attributes.h" |
37 | #include "llvm/IR/CallingConv.h" |
38 | #include "llvm/IR/DataLayout.h" |
39 | #include "llvm/IR/InlineAsm.h" |
40 | #include "llvm/IR/IntrinsicInst.h" |
41 | #include "llvm/IR/Intrinsics.h" |
42 | #include "llvm/IR/Type.h" |
43 | #include "llvm/Transforms/Utils/Local.h" |
44 | #include <optional> |
45 | using namespace clang; |
46 | using namespace CodeGen; |
47 | |
48 | /***/ |
49 | |
50 | unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { |
51 | switch (CC) { |
52 | default: return llvm::CallingConv::C; |
53 | case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; |
54 | case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; |
55 | case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; |
56 | case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; |
57 | case CC_Win64: return llvm::CallingConv::Win64; |
58 | case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; |
59 | case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; |
60 | case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; |
61 | case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; |
62 | // TODO: Add support for __pascal to LLVM. |
63 | case CC_X86Pascal: return llvm::CallingConv::C; |
64 | // TODO: Add support for __vectorcall to LLVM. |
65 | case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; |
66 | case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; |
67 | case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall; |
68 | case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL; |
69 | case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; |
70 | case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); |
71 | case CC_PreserveMost: return llvm::CallingConv::PreserveMost; |
72 | case CC_PreserveAll: return llvm::CallingConv::PreserveAll; |
73 | case CC_Swift: return llvm::CallingConv::Swift; |
74 | case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; |
75 | case CC_M68kRTD: return llvm::CallingConv::M68k_RTD; |
76 | case CC_PreserveNone: return llvm::CallingConv::PreserveNone; |
77 | // clang-format off |
78 | case CC_RISCVVectorCall: return llvm::CallingConv::RISCV_VectorCall; |
79 | // clang-format on |
80 | } |
81 | } |
82 | |
83 | /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR |
84 | /// qualification. Either or both of RD and MD may be null. A null RD indicates |
85 | /// that there is no meaningful 'this' type, and a null MD can occur when |
86 | /// calling a method pointer. |
87 | CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, |
88 | const CXXMethodDecl *MD) { |
89 | QualType RecTy; |
90 | if (RD) |
91 | RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); |
92 | else |
93 | RecTy = Context.VoidTy; |
94 | |
95 | if (MD) |
96 | RecTy = Context.getAddrSpaceQualType(T: RecTy, AddressSpace: MD->getMethodQualifiers().getAddressSpace()); |
97 | return Context.getPointerType(T: CanQualType::CreateUnsafe(Other: RecTy)); |
98 | } |
99 | |
100 | /// Returns the canonical formal type of the given C++ method. |
101 | static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { |
102 | return MD->getType()->getCanonicalTypeUnqualified() |
103 | .getAs<FunctionProtoType>(); |
104 | } |
105 | |
106 | /// Returns the "extra-canonicalized" return type, which discards |
107 | /// qualifiers on the return type. Codegen doesn't care about them, |
108 | /// and it makes ABI code a little easier to be able to assume that |
109 | /// all parameter and return types are top-level unqualified. |
110 | static CanQualType GetReturnType(QualType RetTy) { |
111 | return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); |
112 | } |
113 | |
114 | /// Arrange the argument and result information for a value of the given |
115 | /// unprototyped freestanding function type. |
116 | const CGFunctionInfo & |
117 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { |
118 | // When translating an unprototyped function type, always use a |
119 | // variadic type. |
120 | return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), |
121 | FnInfoOpts::None, std::nullopt, |
122 | FTNP->getExtInfo(), {}, RequiredArgs(0)); |
123 | } |
124 | |
125 | static void addExtParameterInfosForCall( |
126 | llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
127 | const FunctionProtoType *proto, |
128 | unsigned prefixArgs, |
129 | unsigned totalArgs) { |
130 | assert(proto->hasExtParameterInfos()); |
131 | assert(paramInfos.size() <= prefixArgs); |
132 | assert(proto->getNumParams() + prefixArgs <= totalArgs); |
133 | |
134 | paramInfos.reserve(N: totalArgs); |
135 | |
136 | // Add default infos for any prefix args that don't already have infos. |
137 | paramInfos.resize(N: prefixArgs); |
138 | |
139 | // Add infos for the prototype. |
140 | for (const auto &ParamInfo : proto->getExtParameterInfos()) { |
141 | paramInfos.push_back(Elt: ParamInfo); |
142 | // pass_object_size params have no parameter info. |
143 | if (ParamInfo.hasPassObjectSize()) |
144 | paramInfos.emplace_back(); |
145 | } |
146 | |
147 | assert(paramInfos.size() <= totalArgs && |
148 | "Did we forget to insert pass_object_size args?" ); |
149 | // Add default infos for the variadic and/or suffix arguments. |
150 | paramInfos.resize(N: totalArgs); |
151 | } |
152 | |
153 | /// Adds the formal parameters in FPT to the given prefix. If any parameter in |
154 | /// FPT has pass_object_size attrs, then we'll add parameters for those, too. |
155 | static void appendParameterTypes(const CodeGenTypes &CGT, |
156 | SmallVectorImpl<CanQualType> &prefix, |
157 | SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
158 | CanQual<FunctionProtoType> FPT) { |
159 | // Fast path: don't touch param info if we don't need to. |
160 | if (!FPT->hasExtParameterInfos()) { |
161 | assert(paramInfos.empty() && |
162 | "We have paramInfos, but the prototype doesn't?" ); |
163 | prefix.append(FPT->param_type_begin(), FPT->param_type_end()); |
164 | return; |
165 | } |
166 | |
167 | unsigned PrefixSize = prefix.size(); |
168 | // In the vast majority of cases, we'll have precisely FPT->getNumParams() |
169 | // parameters; the only thing that can change this is the presence of |
170 | // pass_object_size. So, we preallocate for the common case. |
171 | prefix.reserve(prefix.size() + FPT->getNumParams()); |
172 | |
173 | auto ExtInfos = FPT->getExtParameterInfos(); |
174 | assert(ExtInfos.size() == FPT->getNumParams()); |
175 | for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { |
176 | prefix.push_back(FPT->getParamType(I)); |
177 | if (ExtInfos[I].hasPassObjectSize()) |
178 | prefix.push_back(Elt: CGT.getContext().getSizeType()); |
179 | } |
180 | |
181 | addExtParameterInfosForCall(paramInfos, proto: FPT.getTypePtr(), prefixArgs: PrefixSize, |
182 | totalArgs: prefix.size()); |
183 | } |
184 | |
185 | /// Arrange the LLVM function layout for a value of the given function |
186 | /// type, on top of any implicit parameters already stored. |
187 | static const CGFunctionInfo & |
188 | arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, |
189 | SmallVectorImpl<CanQualType> &prefix, |
190 | CanQual<FunctionProtoType> FTP) { |
191 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
192 | RequiredArgs Required = RequiredArgs::forPrototypePlus(prototype: FTP, additional: prefix.size()); |
193 | // FIXME: Kill copy. |
194 | appendParameterTypes(CGT, prefix, paramInfos, FPT: FTP); |
195 | CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); |
196 | |
197 | FnInfoOpts opts = |
198 | instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None; |
199 | return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix, |
200 | FTP->getExtInfo(), paramInfos, Required); |
201 | } |
202 | |
203 | /// Arrange the argument and result information for a value of the |
204 | /// given freestanding function type. |
205 | const CGFunctionInfo & |
206 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { |
207 | SmallVector<CanQualType, 16> argTypes; |
208 | return ::arrangeLLVMFunctionInfo(CGT&: *this, /*instanceMethod=*/false, prefix&: argTypes, |
209 | FTP); |
210 | } |
211 | |
212 | static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, |
213 | bool IsWindows) { |
214 | // Set the appropriate calling convention for the Function. |
215 | if (D->hasAttr<StdCallAttr>()) |
216 | return CC_X86StdCall; |
217 | |
218 | if (D->hasAttr<FastCallAttr>()) |
219 | return CC_X86FastCall; |
220 | |
221 | if (D->hasAttr<RegCallAttr>()) |
222 | return CC_X86RegCall; |
223 | |
224 | if (D->hasAttr<ThisCallAttr>()) |
225 | return CC_X86ThisCall; |
226 | |
227 | if (D->hasAttr<VectorCallAttr>()) |
228 | return CC_X86VectorCall; |
229 | |
230 | if (D->hasAttr<PascalAttr>()) |
231 | return CC_X86Pascal; |
232 | |
233 | if (PcsAttr *PCS = D->getAttr<PcsAttr>()) |
234 | return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); |
235 | |
236 | if (D->hasAttr<AArch64VectorPcsAttr>()) |
237 | return CC_AArch64VectorCall; |
238 | |
239 | if (D->hasAttr<AArch64SVEPcsAttr>()) |
240 | return CC_AArch64SVEPCS; |
241 | |
242 | if (D->hasAttr<AMDGPUKernelCallAttr>()) |
243 | return CC_AMDGPUKernelCall; |
244 | |
245 | if (D->hasAttr<IntelOclBiccAttr>()) |
246 | return CC_IntelOclBicc; |
247 | |
248 | if (D->hasAttr<MSABIAttr>()) |
249 | return IsWindows ? CC_C : CC_Win64; |
250 | |
251 | if (D->hasAttr<SysVABIAttr>()) |
252 | return IsWindows ? CC_X86_64SysV : CC_C; |
253 | |
254 | if (D->hasAttr<PreserveMostAttr>()) |
255 | return CC_PreserveMost; |
256 | |
257 | if (D->hasAttr<PreserveAllAttr>()) |
258 | return CC_PreserveAll; |
259 | |
260 | if (D->hasAttr<M68kRTDAttr>()) |
261 | return CC_M68kRTD; |
262 | |
263 | if (D->hasAttr<PreserveNoneAttr>()) |
264 | return CC_PreserveNone; |
265 | |
266 | if (D->hasAttr<RISCVVectorCCAttr>()) |
267 | return CC_RISCVVectorCall; |
268 | |
269 | return CC_C; |
270 | } |
271 | |
272 | /// Arrange the argument and result information for a call to an |
273 | /// unknown C++ non-static member function of the given abstract type. |
274 | /// (A null RD means we don't have any meaningful "this" argument type, |
275 | /// so fall back to a generic pointer type). |
276 | /// The member function must be an ordinary function, i.e. not a |
277 | /// constructor or destructor. |
278 | const CGFunctionInfo & |
279 | CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, |
280 | const FunctionProtoType *FTP, |
281 | const CXXMethodDecl *MD) { |
282 | SmallVector<CanQualType, 16> argTypes; |
283 | |
284 | // Add the 'this' pointer. |
285 | argTypes.push_back(Elt: DeriveThisType(RD, MD)); |
286 | |
287 | return ::arrangeLLVMFunctionInfo( |
288 | CGT&: *this, /*instanceMethod=*/true, prefix&: argTypes, |
289 | FTP: FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); |
290 | } |
291 | |
292 | /// Set calling convention for CUDA/HIP kernel. |
293 | static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, |
294 | const FunctionDecl *FD) { |
295 | if (FD->hasAttr<CUDAGlobalAttr>()) { |
296 | const FunctionType *FT = FTy->getAs<FunctionType>(); |
297 | CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); |
298 | FTy = FT->getCanonicalTypeUnqualified(); |
299 | } |
300 | } |
301 | |
302 | /// Arrange the argument and result information for a declaration or |
303 | /// definition of the given C++ non-static member function. The |
304 | /// member function must be an ordinary function, i.e. not a |
305 | /// constructor or destructor. |
306 | const CGFunctionInfo & |
307 | CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { |
308 | assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!" ); |
309 | assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!" ); |
310 | |
311 | CanQualType FT = GetFormalType(MD).getAs<Type>(); |
312 | setCUDAKernelCallingConvention(FT, CGM, MD); |
313 | auto prototype = FT.getAs<FunctionProtoType>(); |
314 | |
315 | if (MD->isImplicitObjectMemberFunction()) { |
316 | // The abstract case is perfectly fine. |
317 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); |
318 | return arrangeCXXMethodType(RD: ThisType, FTP: prototype.getTypePtr(), MD); |
319 | } |
320 | |
321 | return arrangeFreeFunctionType(FTP: prototype); |
322 | } |
323 | |
324 | bool CodeGenTypes::inheritingCtorHasParams( |
325 | const InheritedConstructor &Inherited, CXXCtorType Type) { |
326 | // Parameters are unnecessary if we're constructing a base class subobject |
327 | // and the inherited constructor lives in a virtual base. |
328 | return Type == Ctor_Complete || |
329 | !Inherited.getShadowDecl()->constructsVirtualBase() || |
330 | !Target.getCXXABI().hasConstructorVariants(); |
331 | } |
332 | |
333 | const CGFunctionInfo & |
334 | CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { |
335 | auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl()); |
336 | |
337 | SmallVector<CanQualType, 16> argTypes; |
338 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
339 | |
340 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD); |
341 | argTypes.push_back(Elt: DeriveThisType(RD: ThisType, MD)); |
342 | |
343 | bool PassParams = true; |
344 | |
345 | if (auto *CD = dyn_cast<CXXConstructorDecl>(Val: MD)) { |
346 | // A base class inheriting constructor doesn't get forwarded arguments |
347 | // needed to construct a virtual base (or base class thereof). |
348 | if (auto Inherited = CD->getInheritedConstructor()) |
349 | PassParams = inheritingCtorHasParams(Inherited, Type: GD.getCtorType()); |
350 | } |
351 | |
352 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
353 | |
354 | // Add the formal parameters. |
355 | if (PassParams) |
356 | appendParameterTypes(CGT: *this, prefix&: argTypes, paramInfos, FPT: FTP); |
357 | |
358 | CGCXXABI::AddedStructorArgCounts AddedArgs = |
359 | TheCXXABI.buildStructorSignature(GD, ArgTys&: argTypes); |
360 | if (!paramInfos.empty()) { |
361 | // Note: prefix implies after the first param. |
362 | if (AddedArgs.Prefix) |
363 | paramInfos.insert(I: paramInfos.begin() + 1, NumToInsert: AddedArgs.Prefix, |
364 | Elt: FunctionProtoType::ExtParameterInfo{}); |
365 | if (AddedArgs.Suffix) |
366 | paramInfos.append(NumInputs: AddedArgs.Suffix, |
367 | Elt: FunctionProtoType::ExtParameterInfo{}); |
368 | } |
369 | |
370 | RequiredArgs required = |
371 | (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) |
372 | : RequiredArgs::All); |
373 | |
374 | FunctionType::ExtInfo extInfo = FTP->getExtInfo(); |
375 | CanQualType resultType = TheCXXABI.HasThisReturn(GD) |
376 | ? argTypes.front() |
377 | : TheCXXABI.hasMostDerivedReturn(GD) |
378 | ? CGM.getContext().VoidPtrTy |
379 | : Context.VoidTy; |
380 | return arrangeLLVMFunctionInfo(returnType: resultType, opts: FnInfoOpts::IsInstanceMethod, |
381 | argTypes, info: extInfo, paramInfos, args: required); |
382 | } |
383 | |
384 | static SmallVector<CanQualType, 16> |
385 | getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { |
386 | SmallVector<CanQualType, 16> argTypes; |
387 | for (auto &arg : args) |
388 | argTypes.push_back(Elt: ctx.getCanonicalParamType(T: arg.Ty)); |
389 | return argTypes; |
390 | } |
391 | |
392 | static SmallVector<CanQualType, 16> |
393 | getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { |
394 | SmallVector<CanQualType, 16> argTypes; |
395 | for (auto &arg : args) |
396 | argTypes.push_back(Elt: ctx.getCanonicalParamType(T: arg->getType())); |
397 | return argTypes; |
398 | } |
399 | |
400 | static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> |
401 | getExtParameterInfosForCall(const FunctionProtoType *proto, |
402 | unsigned prefixArgs, unsigned totalArgs) { |
403 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; |
404 | if (proto->hasExtParameterInfos()) { |
405 | addExtParameterInfosForCall(paramInfos&: result, proto, prefixArgs, totalArgs); |
406 | } |
407 | return result; |
408 | } |
409 | |
410 | /// Arrange a call to a C++ method, passing the given arguments. |
411 | /// |
412 | /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` |
413 | /// parameter. |
414 | /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of |
415 | /// args. |
416 | /// PassProtoArgs indicates whether `args` has args for the parameters in the |
417 | /// given CXXConstructorDecl. |
418 | const CGFunctionInfo & |
419 | CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, |
420 | const CXXConstructorDecl *D, |
421 | CXXCtorType CtorKind, |
422 | unsigned , |
423 | unsigned , |
424 | bool PassProtoArgs) { |
425 | // FIXME: Kill copy. |
426 | SmallVector<CanQualType, 16> ArgTypes; |
427 | for (const auto &Arg : args) |
428 | ArgTypes.push_back(Elt: Context.getCanonicalParamType(T: Arg.Ty)); |
429 | |
430 | // +1 for implicit this, which should always be args[0]. |
431 | unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; |
432 | |
433 | CanQual<FunctionProtoType> FPT = GetFormalType(D); |
434 | RequiredArgs Required = PassProtoArgs |
435 | ? RequiredArgs::forPrototypePlus( |
436 | prototype: FPT, additional: TotalPrefixArgs + ExtraSuffixArgs) |
437 | : RequiredArgs::All; |
438 | |
439 | GlobalDecl GD(D, CtorKind); |
440 | CanQualType ResultType = TheCXXABI.HasThisReturn(GD) |
441 | ? ArgTypes.front() |
442 | : TheCXXABI.hasMostDerivedReturn(GD) |
443 | ? CGM.getContext().VoidPtrTy |
444 | : Context.VoidTy; |
445 | |
446 | FunctionType::ExtInfo Info = FPT->getExtInfo(); |
447 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; |
448 | // If the prototype args are elided, we should only have ABI-specific args, |
449 | // which never have param info. |
450 | if (PassProtoArgs && FPT->hasExtParameterInfos()) { |
451 | // ABI-specific suffix arguments are treated the same as variadic arguments. |
452 | addExtParameterInfosForCall(paramInfos&: ParamInfos, proto: FPT.getTypePtr(), prefixArgs: TotalPrefixArgs, |
453 | totalArgs: ArgTypes.size()); |
454 | } |
455 | |
456 | return arrangeLLVMFunctionInfo(returnType: ResultType, opts: FnInfoOpts::IsInstanceMethod, |
457 | argTypes: ArgTypes, info: Info, paramInfos: ParamInfos, args: Required); |
458 | } |
459 | |
460 | /// Arrange the argument and result information for the declaration or |
461 | /// definition of the given function. |
462 | const CGFunctionInfo & |
463 | CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { |
464 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD)) |
465 | if (MD->isImplicitObjectMemberFunction()) |
466 | return arrangeCXXMethodDeclaration(MD); |
467 | |
468 | CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); |
469 | |
470 | assert(isa<FunctionType>(FTy)); |
471 | setCUDAKernelCallingConvention(FTy, CGM, FD); |
472 | |
473 | // When declaring a function without a prototype, always use a |
474 | // non-variadic type. |
475 | if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { |
476 | return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None, |
477 | std::nullopt, noProto->getExtInfo(), {}, |
478 | RequiredArgs::All); |
479 | } |
480 | |
481 | return arrangeFreeFunctionType(FTP: FTy.castAs<FunctionProtoType>()); |
482 | } |
483 | |
484 | /// Arrange the argument and result information for the declaration or |
485 | /// definition of an Objective-C method. |
486 | const CGFunctionInfo & |
487 | CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { |
488 | // It happens that this is the same as a call with no optional |
489 | // arguments, except also using the formal 'self' type. |
490 | return arrangeObjCMessageSendSignature(MD, receiverType: MD->getSelfDecl()->getType()); |
491 | } |
492 | |
493 | /// Arrange the argument and result information for the function type |
494 | /// through which to perform a send to the given Objective-C method, |
495 | /// using the given receiver type. The receiver type is not always |
496 | /// the 'self' type of the method or even an Objective-C pointer type. |
497 | /// This is *not* the right method for actually performing such a |
498 | /// message send, due to the possibility of optional arguments. |
499 | const CGFunctionInfo & |
500 | CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, |
501 | QualType receiverType) { |
502 | SmallVector<CanQualType, 16> argTys; |
503 | SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos( |
504 | MD->isDirectMethod() ? 1 : 2); |
505 | argTys.push_back(Elt: Context.getCanonicalParamType(T: receiverType)); |
506 | if (!MD->isDirectMethod()) |
507 | argTys.push_back(Elt: Context.getCanonicalParamType(T: Context.getObjCSelType())); |
508 | // FIXME: Kill copy? |
509 | for (const auto *I : MD->parameters()) { |
510 | argTys.push_back(Elt: Context.getCanonicalParamType(T: I->getType())); |
511 | auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( |
512 | I->hasAttr<NoEscapeAttr>()); |
513 | extParamInfos.push_back(Elt: extParamInfo); |
514 | } |
515 | |
516 | FunctionType::ExtInfo einfo; |
517 | bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); |
518 | einfo = einfo.withCallingConv(cc: getCallingConventionForDecl(D: MD, IsWindows)); |
519 | |
520 | if (getContext().getLangOpts().ObjCAutoRefCount && |
521 | MD->hasAttr<NSReturnsRetainedAttr>()) |
522 | einfo = einfo.withProducesResult(producesResult: true); |
523 | |
524 | RequiredArgs required = |
525 | (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); |
526 | |
527 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: MD->getReturnType()), |
528 | opts: FnInfoOpts::None, argTypes: argTys, info: einfo, paramInfos: extParamInfos, |
529 | args: required); |
530 | } |
531 | |
532 | const CGFunctionInfo & |
533 | CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, |
534 | const CallArgList &args) { |
535 | auto argTypes = getArgTypesForCall(ctx&: Context, args); |
536 | FunctionType::ExtInfo einfo; |
537 | |
538 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: returnType), opts: FnInfoOpts::None, |
539 | argTypes, info: einfo, paramInfos: {}, args: RequiredArgs::All); |
540 | } |
541 | |
542 | const CGFunctionInfo & |
543 | CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { |
544 | // FIXME: Do we need to handle ObjCMethodDecl? |
545 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
546 | |
547 | if (isa<CXXConstructorDecl>(Val: GD.getDecl()) || |
548 | isa<CXXDestructorDecl>(Val: GD.getDecl())) |
549 | return arrangeCXXStructorDeclaration(GD); |
550 | |
551 | return arrangeFunctionDeclaration(FD); |
552 | } |
553 | |
554 | /// Arrange a thunk that takes 'this' as the first parameter followed by |
555 | /// varargs. Return a void pointer, regardless of the actual return type. |
556 | /// The body of the thunk will end in a musttail call to a function of the |
557 | /// correct type, and the caller will bitcast the function to the correct |
558 | /// prototype. |
559 | const CGFunctionInfo & |
560 | CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { |
561 | assert(MD->isVirtual() && "only methods have thunks" ); |
562 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
563 | CanQualType ArgTys[] = {DeriveThisType(RD: MD->getParent(), MD)}; |
564 | return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::None, ArgTys, |
565 | FTP->getExtInfo(), {}, RequiredArgs(1)); |
566 | } |
567 | |
568 | const CGFunctionInfo & |
569 | CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, |
570 | CXXCtorType CT) { |
571 | assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); |
572 | |
573 | CanQual<FunctionProtoType> FTP = GetFormalType(CD); |
574 | SmallVector<CanQualType, 2> ArgTys; |
575 | const CXXRecordDecl *RD = CD->getParent(); |
576 | ArgTys.push_back(Elt: DeriveThisType(RD, CD)); |
577 | if (CT == Ctor_CopyingClosure) |
578 | ArgTys.push_back(*FTP->param_type_begin()); |
579 | if (RD->getNumVBases() > 0) |
580 | ArgTys.push_back(Elt: Context.IntTy); |
581 | CallingConv CC = Context.getDefaultCallingConvention( |
582 | /*IsVariadic=*/false, /*IsCXXMethod=*/true); |
583 | return arrangeLLVMFunctionInfo(returnType: Context.VoidTy, opts: FnInfoOpts::IsInstanceMethod, |
584 | argTypes: ArgTys, info: FunctionType::ExtInfo(CC), paramInfos: {}, |
585 | args: RequiredArgs::All); |
586 | } |
587 | |
588 | /// Arrange a call as unto a free function, except possibly with an |
589 | /// additional number of formal parameters considered required. |
590 | static const CGFunctionInfo & |
591 | arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, |
592 | CodeGenModule &CGM, |
593 | const CallArgList &args, |
594 | const FunctionType *fnType, |
595 | unsigned , |
596 | bool chainCall) { |
597 | assert(args.size() >= numExtraRequiredArgs); |
598 | |
599 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
600 | |
601 | // In most cases, there are no optional arguments. |
602 | RequiredArgs required = RequiredArgs::All; |
603 | |
604 | // If we have a variadic prototype, the required arguments are the |
605 | // extra prefix plus the arguments in the prototype. |
606 | if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(Val: fnType)) { |
607 | if (proto->isVariadic()) |
608 | required = RequiredArgs::forPrototypePlus(prototype: proto, additional: numExtraRequiredArgs); |
609 | |
610 | if (proto->hasExtParameterInfos()) |
611 | addExtParameterInfosForCall(paramInfos, proto, prefixArgs: numExtraRequiredArgs, |
612 | totalArgs: args.size()); |
613 | |
614 | // If we don't have a prototype at all, but we're supposed to |
615 | // explicitly use the variadic convention for unprototyped calls, |
616 | // treat all of the arguments as required but preserve the nominal |
617 | // possibility of variadics. |
618 | } else if (CGM.getTargetCodeGenInfo() |
619 | .isNoProtoCallVariadic(args, |
620 | fnType: cast<FunctionNoProtoType>(Val: fnType))) { |
621 | required = RequiredArgs(args.size()); |
622 | } |
623 | |
624 | // FIXME: Kill copy. |
625 | SmallVector<CanQualType, 16> argTypes; |
626 | for (const auto &arg : args) |
627 | argTypes.push_back(Elt: CGT.getContext().getCanonicalParamType(T: arg.Ty)); |
628 | FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; |
629 | return CGT.arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: fnType->getReturnType()), |
630 | opts, argTypes, info: fnType->getExtInfo(), |
631 | paramInfos, args: required); |
632 | } |
633 | |
634 | /// Figure out the rules for calling a function with the given formal |
635 | /// type using the given arguments. The arguments are necessary |
636 | /// because the function might be unprototyped, in which case it's |
637 | /// target-dependent in crazy ways. |
638 | const CGFunctionInfo & |
639 | CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, |
640 | const FunctionType *fnType, |
641 | bool chainCall) { |
642 | return arrangeFreeFunctionLikeCall(CGT&: *this, CGM, args, fnType, |
643 | numExtraRequiredArgs: chainCall ? 1 : 0, chainCall); |
644 | } |
645 | |
646 | /// A block function is essentially a free function with an |
647 | /// extra implicit argument. |
648 | const CGFunctionInfo & |
649 | CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, |
650 | const FunctionType *fnType) { |
651 | return arrangeFreeFunctionLikeCall(CGT&: *this, CGM, args, fnType, numExtraRequiredArgs: 1, |
652 | /*chainCall=*/false); |
653 | } |
654 | |
655 | const CGFunctionInfo & |
656 | CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, |
657 | const FunctionArgList ¶ms) { |
658 | auto paramInfos = getExtParameterInfosForCall(proto, prefixArgs: 1, totalArgs: params.size()); |
659 | auto argTypes = getArgTypesForDeclaration(ctx&: Context, args: params); |
660 | |
661 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(proto->getReturnType()), |
662 | opts: FnInfoOpts::None, argTypes, |
663 | info: proto->getExtInfo(), paramInfos, |
664 | args: RequiredArgs::forPrototypePlus(prototype: proto, additional: 1)); |
665 | } |
666 | |
667 | const CGFunctionInfo & |
668 | CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, |
669 | const CallArgList &args) { |
670 | // FIXME: Kill copy. |
671 | SmallVector<CanQualType, 16> argTypes; |
672 | for (const auto &Arg : args) |
673 | argTypes.push_back(Elt: Context.getCanonicalParamType(T: Arg.Ty)); |
674 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: resultType), opts: FnInfoOpts::None, |
675 | argTypes, info: FunctionType::ExtInfo(), |
676 | /*paramInfos=*/{}, args: RequiredArgs::All); |
677 | } |
678 | |
679 | const CGFunctionInfo & |
680 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, |
681 | const FunctionArgList &args) { |
682 | auto argTypes = getArgTypesForDeclaration(ctx&: Context, args); |
683 | |
684 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: resultType), opts: FnInfoOpts::None, |
685 | argTypes, info: FunctionType::ExtInfo(), paramInfos: {}, |
686 | args: RequiredArgs::All); |
687 | } |
688 | |
689 | const CGFunctionInfo & |
690 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, |
691 | ArrayRef<CanQualType> argTypes) { |
692 | return arrangeLLVMFunctionInfo(returnType: resultType, opts: FnInfoOpts::None, argTypes, |
693 | info: FunctionType::ExtInfo(), paramInfos: {}, |
694 | args: RequiredArgs::All); |
695 | } |
696 | |
697 | /// Arrange a call to a C++ method, passing the given arguments. |
698 | /// |
699 | /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It |
700 | /// does not count `this`. |
701 | const CGFunctionInfo & |
702 | CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, |
703 | const FunctionProtoType *proto, |
704 | RequiredArgs required, |
705 | unsigned numPrefixArgs) { |
706 | assert(numPrefixArgs + 1 <= args.size() && |
707 | "Emitting a call with less args than the required prefix?" ); |
708 | // Add one to account for `this`. It's a bit awkward here, but we don't count |
709 | // `this` in similar places elsewhere. |
710 | auto paramInfos = |
711 | getExtParameterInfosForCall(proto, prefixArgs: numPrefixArgs + 1, totalArgs: args.size()); |
712 | |
713 | // FIXME: Kill copy. |
714 | auto argTypes = getArgTypesForCall(ctx&: Context, args); |
715 | |
716 | FunctionType::ExtInfo info = proto->getExtInfo(); |
717 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(proto->getReturnType()), |
718 | opts: FnInfoOpts::IsInstanceMethod, argTypes, info, |
719 | paramInfos, args: required); |
720 | } |
721 | |
722 | const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { |
723 | return arrangeLLVMFunctionInfo(returnType: getContext().VoidTy, opts: FnInfoOpts::None, |
724 | argTypes: std::nullopt, info: FunctionType::ExtInfo(), paramInfos: {}, |
725 | args: RequiredArgs::All); |
726 | } |
727 | |
728 | const CGFunctionInfo & |
729 | CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, |
730 | const CallArgList &args) { |
731 | assert(signature.arg_size() <= args.size()); |
732 | if (signature.arg_size() == args.size()) |
733 | return signature; |
734 | |
735 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
736 | auto sigParamInfos = signature.getExtParameterInfos(); |
737 | if (!sigParamInfos.empty()) { |
738 | paramInfos.append(in_start: sigParamInfos.begin(), in_end: sigParamInfos.end()); |
739 | paramInfos.resize(N: args.size()); |
740 | } |
741 | |
742 | auto argTypes = getArgTypesForCall(ctx&: Context, args); |
743 | |
744 | assert(signature.getRequiredArgs().allowsOptionalArgs()); |
745 | FnInfoOpts opts = FnInfoOpts::None; |
746 | if (signature.isInstanceMethod()) |
747 | opts |= FnInfoOpts::IsInstanceMethod; |
748 | if (signature.isChainCall()) |
749 | opts |= FnInfoOpts::IsChainCall; |
750 | if (signature.isDelegateCall()) |
751 | opts |= FnInfoOpts::IsDelegateCall; |
752 | return arrangeLLVMFunctionInfo(returnType: signature.getReturnType(), opts, argTypes, |
753 | info: signature.getExtInfo(), paramInfos, |
754 | args: signature.getRequiredArgs()); |
755 | } |
756 | |
757 | namespace clang { |
758 | namespace CodeGen { |
759 | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); |
760 | } |
761 | } |
762 | |
763 | /// Arrange the argument and result information for an abstract value |
764 | /// of a given function type. This is the method which all of the |
765 | /// above functions ultimately defer to. |
766 | const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo( |
767 | CanQualType resultType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes, |
768 | FunctionType::ExtInfo info, |
769 | ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, |
770 | RequiredArgs required) { |
771 | assert(llvm::all_of(argTypes, |
772 | [](CanQualType T) { return T.isCanonicalAsParam(); })); |
773 | |
774 | // Lookup or create unique function info. |
775 | llvm::FoldingSetNodeID ID; |
776 | bool isInstanceMethod = |
777 | (opts & FnInfoOpts::IsInstanceMethod) == FnInfoOpts::IsInstanceMethod; |
778 | bool isChainCall = |
779 | (opts & FnInfoOpts::IsChainCall) == FnInfoOpts::IsChainCall; |
780 | bool isDelegateCall = |
781 | (opts & FnInfoOpts::IsDelegateCall) == FnInfoOpts::IsDelegateCall; |
782 | CGFunctionInfo::Profile(ID, InstanceMethod: isInstanceMethod, ChainCall: isChainCall, IsDelegateCall: isDelegateCall, |
783 | info, paramInfos, required, resultType, argTypes); |
784 | |
785 | void *insertPos = nullptr; |
786 | CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos&: insertPos); |
787 | if (FI) |
788 | return *FI; |
789 | |
790 | unsigned CC = ClangCallConvToLLVMCallConv(CC: info.getCC()); |
791 | |
792 | // Construct the function info. We co-allocate the ArgInfos. |
793 | FI = CGFunctionInfo::create(llvmCC: CC, instanceMethod: isInstanceMethod, chainCall: isChainCall, delegateCall: isDelegateCall, |
794 | extInfo: info, paramInfos, resultType, argTypes, required); |
795 | FunctionInfos.InsertNode(N: FI, InsertPos: insertPos); |
796 | |
797 | bool inserted = FunctionsBeingProcessed.insert(Ptr: FI).second; |
798 | (void)inserted; |
799 | assert(inserted && "Recursively being processed?" ); |
800 | |
801 | // Compute ABI information. |
802 | if (CC == llvm::CallingConv::SPIR_KERNEL) { |
803 | // Force target independent argument handling for the host visible |
804 | // kernel functions. |
805 | computeSPIRKernelABIInfo(CGM, FI&: *FI); |
806 | } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { |
807 | swiftcall::computeABIInfo(CGM, FI&: *FI); |
808 | } else { |
809 | getABIInfo().computeInfo(FI&: *FI); |
810 | } |
811 | |
812 | // Loop over all of the computed argument and return value info. If any of |
813 | // them are direct or extend without a specified coerce type, specify the |
814 | // default now. |
815 | ABIArgInfo &retInfo = FI->getReturnInfo(); |
816 | if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) |
817 | retInfo.setCoerceToType(ConvertType(T: FI->getReturnType())); |
818 | |
819 | for (auto &I : FI->arguments()) |
820 | if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) |
821 | I.info.setCoerceToType(ConvertType(T: I.type)); |
822 | |
823 | bool erased = FunctionsBeingProcessed.erase(Ptr: FI); (void)erased; |
824 | assert(erased && "Not in set?" ); |
825 | |
826 | return *FI; |
827 | } |
828 | |
829 | CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod, |
830 | bool chainCall, bool delegateCall, |
831 | const FunctionType::ExtInfo &info, |
832 | ArrayRef<ExtParameterInfo> paramInfos, |
833 | CanQualType resultType, |
834 | ArrayRef<CanQualType> argTypes, |
835 | RequiredArgs required) { |
836 | assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); |
837 | assert(!required.allowsOptionalArgs() || |
838 | required.getNumRequiredArgs() <= argTypes.size()); |
839 | |
840 | void *buffer = |
841 | operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( |
842 | Counts: argTypes.size() + 1, Counts: paramInfos.size())); |
843 | |
844 | CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); |
845 | FI->CallingConvention = llvmCC; |
846 | FI->EffectiveCallingConvention = llvmCC; |
847 | FI->ASTCallingConvention = info.getCC(); |
848 | FI->InstanceMethod = instanceMethod; |
849 | FI->ChainCall = chainCall; |
850 | FI->DelegateCall = delegateCall; |
851 | FI->CmseNSCall = info.getCmseNSCall(); |
852 | FI->NoReturn = info.getNoReturn(); |
853 | FI->ReturnsRetained = info.getProducesResult(); |
854 | FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); |
855 | FI->NoCfCheck = info.getNoCfCheck(); |
856 | FI->Required = required; |
857 | FI->HasRegParm = info.getHasRegParm(); |
858 | FI->RegParm = info.getRegParm(); |
859 | FI->ArgStruct = nullptr; |
860 | FI->ArgStructAlign = 0; |
861 | FI->NumArgs = argTypes.size(); |
862 | FI->HasExtParameterInfos = !paramInfos.empty(); |
863 | FI->getArgsBuffer()[0].type = resultType; |
864 | FI->MaxVectorWidth = 0; |
865 | for (unsigned i = 0, e = argTypes.size(); i != e; ++i) |
866 | FI->getArgsBuffer()[i + 1].type = argTypes[i]; |
867 | for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) |
868 | FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; |
869 | return FI; |
870 | } |
871 | |
872 | /***/ |
873 | |
874 | namespace { |
875 | // ABIArgInfo::Expand implementation. |
876 | |
877 | // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. |
878 | struct TypeExpansion { |
879 | enum TypeExpansionKind { |
880 | // Elements of constant arrays are expanded recursively. |
881 | TEK_ConstantArray, |
882 | // Record fields are expanded recursively (but if record is a union, only |
883 | // the field with the largest size is expanded). |
884 | TEK_Record, |
885 | // For complex types, real and imaginary parts are expanded recursively. |
886 | TEK_Complex, |
887 | // All other types are not expandable. |
888 | TEK_None |
889 | }; |
890 | |
891 | const TypeExpansionKind Kind; |
892 | |
893 | TypeExpansion(TypeExpansionKind K) : Kind(K) {} |
894 | virtual ~TypeExpansion() {} |
895 | }; |
896 | |
897 | struct ConstantArrayExpansion : TypeExpansion { |
898 | QualType EltTy; |
899 | uint64_t NumElts; |
900 | |
901 | ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) |
902 | : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} |
903 | static bool classof(const TypeExpansion *TE) { |
904 | return TE->Kind == TEK_ConstantArray; |
905 | } |
906 | }; |
907 | |
908 | struct RecordExpansion : TypeExpansion { |
909 | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
910 | |
911 | SmallVector<const FieldDecl *, 1> Fields; |
912 | |
913 | RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, |
914 | SmallVector<const FieldDecl *, 1> &&Fields) |
915 | : TypeExpansion(TEK_Record), Bases(std::move(Bases)), |
916 | Fields(std::move(Fields)) {} |
917 | static bool classof(const TypeExpansion *TE) { |
918 | return TE->Kind == TEK_Record; |
919 | } |
920 | }; |
921 | |
922 | struct ComplexExpansion : TypeExpansion { |
923 | QualType EltTy; |
924 | |
925 | ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} |
926 | static bool classof(const TypeExpansion *TE) { |
927 | return TE->Kind == TEK_Complex; |
928 | } |
929 | }; |
930 | |
931 | struct NoExpansion : TypeExpansion { |
932 | NoExpansion() : TypeExpansion(TEK_None) {} |
933 | static bool classof(const TypeExpansion *TE) { |
934 | return TE->Kind == TEK_None; |
935 | } |
936 | }; |
937 | } // namespace |
938 | |
939 | static std::unique_ptr<TypeExpansion> |
940 | getTypeExpansion(QualType Ty, const ASTContext &Context) { |
941 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T: Ty)) { |
942 | return std::make_unique<ConstantArrayExpansion>(AT->getElementType(), |
943 | AT->getZExtSize()); |
944 | } |
945 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
946 | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
947 | SmallVector<const FieldDecl *, 1> Fields; |
948 | const RecordDecl *RD = RT->getDecl(); |
949 | assert(!RD->hasFlexibleArrayMember() && |
950 | "Cannot expand structure with flexible array." ); |
951 | if (RD->isUnion()) { |
952 | // Unions can be here only in degenerative cases - all the fields are same |
953 | // after flattening. Thus we have to use the "largest" field. |
954 | const FieldDecl *LargestFD = nullptr; |
955 | CharUnits UnionSize = CharUnits::Zero(); |
956 | |
957 | for (const auto *FD : RD->fields()) { |
958 | if (FD->isZeroLengthBitField(Ctx: Context)) |
959 | continue; |
960 | assert(!FD->isBitField() && |
961 | "Cannot expand structure with bit-field members." ); |
962 | CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); |
963 | if (UnionSize < FieldSize) { |
964 | UnionSize = FieldSize; |
965 | LargestFD = FD; |
966 | } |
967 | } |
968 | if (LargestFD) |
969 | Fields.push_back(Elt: LargestFD); |
970 | } else { |
971 | if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
972 | assert(!CXXRD->isDynamicClass() && |
973 | "cannot expand vtable pointers in dynamic classes" ); |
974 | llvm::append_range(C&: Bases, R: llvm::make_pointer_range(Range: CXXRD->bases())); |
975 | } |
976 | |
977 | for (const auto *FD : RD->fields()) { |
978 | if (FD->isZeroLengthBitField(Ctx: Context)) |
979 | continue; |
980 | assert(!FD->isBitField() && |
981 | "Cannot expand structure with bit-field members." ); |
982 | Fields.push_back(Elt: FD); |
983 | } |
984 | } |
985 | return std::make_unique<RecordExpansion>(args: std::move(Bases), |
986 | args: std::move(Fields)); |
987 | } |
988 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
989 | return std::make_unique<ComplexExpansion>(args: CT->getElementType()); |
990 | } |
991 | return std::make_unique<NoExpansion>(); |
992 | } |
993 | |
994 | static int getExpansionSize(QualType Ty, const ASTContext &Context) { |
995 | auto Exp = getTypeExpansion(Ty, Context); |
996 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
997 | return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); |
998 | } |
999 | if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1000 | int Res = 0; |
1001 | for (auto BS : RExp->Bases) |
1002 | Res += getExpansionSize(Ty: BS->getType(), Context); |
1003 | for (auto FD : RExp->Fields) |
1004 | Res += getExpansionSize(FD->getType(), Context); |
1005 | return Res; |
1006 | } |
1007 | if (isa<ComplexExpansion>(Val: Exp.get())) |
1008 | return 2; |
1009 | assert(isa<NoExpansion>(Exp.get())); |
1010 | return 1; |
1011 | } |
1012 | |
1013 | void |
1014 | CodeGenTypes::getExpandedTypes(QualType Ty, |
1015 | SmallVectorImpl<llvm::Type *>::iterator &TI) { |
1016 | auto Exp = getTypeExpansion(Ty, Context); |
1017 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
1018 | for (int i = 0, n = CAExp->NumElts; i < n; i++) { |
1019 | getExpandedTypes(Ty: CAExp->EltTy, TI); |
1020 | } |
1021 | } else if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1022 | for (auto BS : RExp->Bases) |
1023 | getExpandedTypes(Ty: BS->getType(), TI); |
1024 | for (auto FD : RExp->Fields) |
1025 | getExpandedTypes(Ty: FD->getType(), TI); |
1026 | } else if (auto CExp = dyn_cast<ComplexExpansion>(Val: Exp.get())) { |
1027 | llvm::Type *EltTy = ConvertType(T: CExp->EltTy); |
1028 | *TI++ = EltTy; |
1029 | *TI++ = EltTy; |
1030 | } else { |
1031 | assert(isa<NoExpansion>(Exp.get())); |
1032 | *TI++ = ConvertType(T: Ty); |
1033 | } |
1034 | } |
1035 | |
1036 | static void forConstantArrayExpansion(CodeGenFunction &CGF, |
1037 | ConstantArrayExpansion *CAE, |
1038 | Address BaseAddr, |
1039 | llvm::function_ref<void(Address)> Fn) { |
1040 | for (int i = 0, n = CAE->NumElts; i < n; i++) { |
1041 | Address EltAddr = CGF.Builder.CreateConstGEP2_32(Addr: BaseAddr, Idx0: 0, Idx1: i); |
1042 | Fn(EltAddr); |
1043 | } |
1044 | } |
1045 | |
1046 | void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, |
1047 | llvm::Function::arg_iterator &AI) { |
1048 | assert(LV.isSimple() && |
1049 | "Unexpected non-simple lvalue during struct expansion." ); |
1050 | |
1051 | auto Exp = getTypeExpansion(Ty, Context: getContext()); |
1052 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
1053 | forConstantArrayExpansion( |
1054 | CGF&: *this, CAE: CAExp, BaseAddr: LV.getAddress(CGF&: *this), Fn: [&](Address EltAddr) { |
1055 | LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); |
1056 | ExpandTypeFromArgs(Ty: CAExp->EltTy, LV, AI); |
1057 | }); |
1058 | } else if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1059 | Address This = LV.getAddress(CGF&: *this); |
1060 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1061 | // Perform a single step derived-to-base conversion. |
1062 | Address Base = |
1063 | GetAddressOfBaseClass(Value: This, Derived: Ty->getAsCXXRecordDecl(), PathBegin: &BS, PathEnd: &BS + 1, |
1064 | /*NullCheckValue=*/false, Loc: SourceLocation()); |
1065 | LValue SubLV = MakeAddrLValue(Addr: Base, T: BS->getType()); |
1066 | |
1067 | // Recurse onto bases. |
1068 | ExpandTypeFromArgs(Ty: BS->getType(), LV: SubLV, AI); |
1069 | } |
1070 | for (auto FD : RExp->Fields) { |
1071 | // FIXME: What are the right qualifiers here? |
1072 | LValue SubLV = EmitLValueForFieldInitialization(Base: LV, Field: FD); |
1073 | ExpandTypeFromArgs(Ty: FD->getType(), LV: SubLV, AI); |
1074 | } |
1075 | } else if (isa<ComplexExpansion>(Val: Exp.get())) { |
1076 | auto realValue = &*AI++; |
1077 | auto imagValue = &*AI++; |
1078 | EmitStoreOfComplex(V: ComplexPairTy(realValue, imagValue), dest: LV, /*init*/ isInit: true); |
1079 | } else { |
1080 | // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a |
1081 | // primitive store. |
1082 | assert(isa<NoExpansion>(Exp.get())); |
1083 | llvm::Value *Arg = &*AI++; |
1084 | if (LV.isBitField()) { |
1085 | EmitStoreThroughLValue(Src: RValue::get(V: Arg), Dst: LV); |
1086 | } else { |
1087 | // TODO: currently there are some places are inconsistent in what LLVM |
1088 | // pointer type they use (see D118744). Once clang uses opaque pointers |
1089 | // all LLVM pointer types will be the same and we can remove this check. |
1090 | if (Arg->getType()->isPointerTy()) { |
1091 | Address Addr = LV.getAddress(CGF&: *this); |
1092 | Arg = Builder.CreateBitCast(V: Arg, DestTy: Addr.getElementType()); |
1093 | } |
1094 | EmitStoreOfScalar(value: Arg, lvalue: LV); |
1095 | } |
1096 | } |
1097 | } |
1098 | |
1099 | void CodeGenFunction::ExpandTypeToArgs( |
1100 | QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, |
1101 | SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { |
1102 | auto Exp = getTypeExpansion(Ty, Context: getContext()); |
1103 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
1104 | Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(CGF&: *this) |
1105 | : Arg.getKnownRValue().getAggregateAddress(); |
1106 | forConstantArrayExpansion( |
1107 | CGF&: *this, CAE: CAExp, BaseAddr: Addr, Fn: [&](Address EltAddr) { |
1108 | CallArg EltArg = CallArg( |
1109 | convertTempToRValue(addr: EltAddr, type: CAExp->EltTy, Loc: SourceLocation()), |
1110 | CAExp->EltTy); |
1111 | ExpandTypeToArgs(Ty: CAExp->EltTy, Arg: EltArg, IRFuncTy, IRCallArgs, |
1112 | IRCallArgPos); |
1113 | }); |
1114 | } else if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1115 | Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(CGF&: *this) |
1116 | : Arg.getKnownRValue().getAggregateAddress(); |
1117 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1118 | // Perform a single step derived-to-base conversion. |
1119 | Address Base = |
1120 | GetAddressOfBaseClass(Value: This, Derived: Ty->getAsCXXRecordDecl(), PathBegin: &BS, PathEnd: &BS + 1, |
1121 | /*NullCheckValue=*/false, Loc: SourceLocation()); |
1122 | CallArg BaseArg = CallArg(RValue::getAggregate(addr: Base), BS->getType()); |
1123 | |
1124 | // Recurse onto bases. |
1125 | ExpandTypeToArgs(Ty: BS->getType(), Arg: BaseArg, IRFuncTy, IRCallArgs, |
1126 | IRCallArgPos); |
1127 | } |
1128 | |
1129 | LValue LV = MakeAddrLValue(Addr: This, T: Ty); |
1130 | for (auto FD : RExp->Fields) { |
1131 | CallArg FldArg = |
1132 | CallArg(EmitRValueForField(LV, FD, Loc: SourceLocation()), FD->getType()); |
1133 | ExpandTypeToArgs(Ty: FD->getType(), Arg: FldArg, IRFuncTy, IRCallArgs, |
1134 | IRCallArgPos); |
1135 | } |
1136 | } else if (isa<ComplexExpansion>(Val: Exp.get())) { |
1137 | ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); |
1138 | IRCallArgs[IRCallArgPos++] = CV.first; |
1139 | IRCallArgs[IRCallArgPos++] = CV.second; |
1140 | } else { |
1141 | assert(isa<NoExpansion>(Exp.get())); |
1142 | auto RV = Arg.getKnownRValue(); |
1143 | assert(RV.isScalar() && |
1144 | "Unexpected non-scalar rvalue during struct expansion." ); |
1145 | |
1146 | // Insert a bitcast as needed. |
1147 | llvm::Value *V = RV.getScalarVal(); |
1148 | if (IRCallArgPos < IRFuncTy->getNumParams() && |
1149 | V->getType() != IRFuncTy->getParamType(i: IRCallArgPos)) |
1150 | V = Builder.CreateBitCast(V, DestTy: IRFuncTy->getParamType(i: IRCallArgPos)); |
1151 | |
1152 | IRCallArgs[IRCallArgPos++] = V; |
1153 | } |
1154 | } |
1155 | |
1156 | /// Create a temporary allocation for the purposes of coercion. |
1157 | static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, |
1158 | llvm::Type *Ty, |
1159 | CharUnits MinAlign, |
1160 | const Twine &Name = "tmp" ) { |
1161 | // Don't use an alignment that's worse than what LLVM would prefer. |
1162 | auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty); |
1163 | CharUnits Align = std::max(a: MinAlign, b: CharUnits::fromQuantity(Quantity: PrefAlign)); |
1164 | |
1165 | return CGF.CreateTempAlloca(Ty, align: Align, Name: Name + ".coerce" ); |
1166 | } |
1167 | |
1168 | /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are |
1169 | /// accessing some number of bytes out of it, try to gep into the struct to get |
1170 | /// at its inner goodness. Dive as deep as possible without entering an element |
1171 | /// with an in-memory size smaller than DstSize. |
1172 | static Address |
1173 | EnterStructPointerForCoercedAccess(Address SrcPtr, |
1174 | llvm::StructType *SrcSTy, |
1175 | uint64_t DstSize, CodeGenFunction &CGF) { |
1176 | // We can't dive into a zero-element struct. |
1177 | if (SrcSTy->getNumElements() == 0) return SrcPtr; |
1178 | |
1179 | llvm::Type *FirstElt = SrcSTy->getElementType(N: 0); |
1180 | |
1181 | // If the first elt is at least as large as what we're looking for, or if the |
1182 | // first element is the same size as the whole struct, we can enter it. The |
1183 | // comparison must be made on the store size and not the alloca size. Using |
1184 | // the alloca size may overstate the size of the load. |
1185 | uint64_t FirstEltSize = |
1186 | CGF.CGM.getDataLayout().getTypeStoreSize(Ty: FirstElt); |
1187 | if (FirstEltSize < DstSize && |
1188 | FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(Ty: SrcSTy)) |
1189 | return SrcPtr; |
1190 | |
1191 | // GEP into the first element. |
1192 | SrcPtr = CGF.Builder.CreateStructGEP(Addr: SrcPtr, Index: 0, Name: "coerce.dive" ); |
1193 | |
1194 | // If the first element is a struct, recurse. |
1195 | llvm::Type *SrcTy = SrcPtr.getElementType(); |
1196 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(Val: SrcTy)) |
1197 | return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); |
1198 | |
1199 | return SrcPtr; |
1200 | } |
1201 | |
1202 | /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both |
1203 | /// are either integers or pointers. This does a truncation of the value if it |
1204 | /// is too large or a zero extension if it is too small. |
1205 | /// |
1206 | /// This behaves as if the value were coerced through memory, so on big-endian |
1207 | /// targets the high bits are preserved in a truncation, while little-endian |
1208 | /// targets preserve the low bits. |
1209 | static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, |
1210 | llvm::Type *Ty, |
1211 | CodeGenFunction &CGF) { |
1212 | if (Val->getType() == Ty) |
1213 | return Val; |
1214 | |
1215 | if (isa<llvm::PointerType>(Val: Val->getType())) { |
1216 | // If this is Pointer->Pointer avoid conversion to and from int. |
1217 | if (isa<llvm::PointerType>(Val: Ty)) |
1218 | return CGF.Builder.CreateBitCast(V: Val, DestTy: Ty, Name: "coerce.val" ); |
1219 | |
1220 | // Convert the pointer to an integer so we can play with its width. |
1221 | Val = CGF.Builder.CreatePtrToInt(V: Val, DestTy: CGF.IntPtrTy, Name: "coerce.val.pi" ); |
1222 | } |
1223 | |
1224 | llvm::Type *DestIntTy = Ty; |
1225 | if (isa<llvm::PointerType>(Val: DestIntTy)) |
1226 | DestIntTy = CGF.IntPtrTy; |
1227 | |
1228 | if (Val->getType() != DestIntTy) { |
1229 | const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); |
1230 | if (DL.isBigEndian()) { |
1231 | // Preserve the high bits on big-endian targets. |
1232 | // That is what memory coercion does. |
1233 | uint64_t SrcSize = DL.getTypeSizeInBits(Ty: Val->getType()); |
1234 | uint64_t DstSize = DL.getTypeSizeInBits(Ty: DestIntTy); |
1235 | |
1236 | if (SrcSize > DstSize) { |
1237 | Val = CGF.Builder.CreateLShr(LHS: Val, RHS: SrcSize - DstSize, Name: "coerce.highbits" ); |
1238 | Val = CGF.Builder.CreateTrunc(V: Val, DestTy: DestIntTy, Name: "coerce.val.ii" ); |
1239 | } else { |
1240 | Val = CGF.Builder.CreateZExt(V: Val, DestTy: DestIntTy, Name: "coerce.val.ii" ); |
1241 | Val = CGF.Builder.CreateShl(LHS: Val, RHS: DstSize - SrcSize, Name: "coerce.highbits" ); |
1242 | } |
1243 | } else { |
1244 | // Little-endian targets preserve the low bits. No shifts required. |
1245 | Val = CGF.Builder.CreateIntCast(V: Val, DestTy: DestIntTy, isSigned: false, Name: "coerce.val.ii" ); |
1246 | } |
1247 | } |
1248 | |
1249 | if (isa<llvm::PointerType>(Val: Ty)) |
1250 | Val = CGF.Builder.CreateIntToPtr(V: Val, DestTy: Ty, Name: "coerce.val.ip" ); |
1251 | return Val; |
1252 | } |
1253 | |
1254 | |
1255 | |
1256 | /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as |
1257 | /// a pointer to an object of type \arg Ty, known to be aligned to |
1258 | /// \arg SrcAlign bytes. |
1259 | /// |
1260 | /// This safely handles the case when the src type is smaller than the |
1261 | /// destination type; in this situation the values of bits which not |
1262 | /// present in the src are undefined. |
1263 | static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, |
1264 | CodeGenFunction &CGF) { |
1265 | llvm::Type *SrcTy = Src.getElementType(); |
1266 | |
1267 | // If SrcTy and Ty are the same, just do a load. |
1268 | if (SrcTy == Ty) |
1269 | return CGF.Builder.CreateLoad(Addr: Src); |
1270 | |
1271 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); |
1272 | |
1273 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(Val: SrcTy)) { |
1274 | Src = EnterStructPointerForCoercedAccess(SrcPtr: Src, SrcSTy, |
1275 | DstSize: DstSize.getFixedValue(), CGF); |
1276 | SrcTy = Src.getElementType(); |
1277 | } |
1278 | |
1279 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
1280 | |
1281 | // If the source and destination are integer or pointer types, just do an |
1282 | // extension or truncation to the desired type. |
1283 | if ((isa<llvm::IntegerType>(Val: Ty) || isa<llvm::PointerType>(Val: Ty)) && |
1284 | (isa<llvm::IntegerType>(Val: SrcTy) || isa<llvm::PointerType>(Val: SrcTy))) { |
1285 | llvm::Value *Load = CGF.Builder.CreateLoad(Addr: Src); |
1286 | return CoerceIntOrPtrToIntOrPtr(Val: Load, Ty, CGF); |
1287 | } |
1288 | |
1289 | // If load is legal, just bitcast the src pointer. |
1290 | if (!SrcSize.isScalable() && !DstSize.isScalable() && |
1291 | SrcSize.getFixedValue() >= DstSize.getFixedValue()) { |
1292 | // Generally SrcSize is never greater than DstSize, since this means we are |
1293 | // losing bits. However, this can happen in cases where the structure has |
1294 | // additional padding, for example due to a user specified alignment. |
1295 | // |
1296 | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1297 | // to that information. |
1298 | Src = Src.withElementType(ElemTy: Ty); |
1299 | return CGF.Builder.CreateLoad(Addr: Src); |
1300 | } |
1301 | |
1302 | // If coercing a fixed vector to a scalable vector for ABI compatibility, and |
1303 | // the types match, use the llvm.vector.insert intrinsic to perform the |
1304 | // conversion. |
1305 | if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Val: Ty)) { |
1306 | if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) { |
1307 | // If we are casting a fixed i8 vector to a scalable i1 predicate |
1308 | // vector, use a vector insert and bitcast the result. |
1309 | if (ScalableDstTy->getElementType()->isIntegerTy(Bitwidth: 1) && |
1310 | ScalableDstTy->getElementCount().isKnownMultipleOf(RHS: 8) && |
1311 | FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) { |
1312 | ScalableDstTy = llvm::ScalableVectorType::get( |
1313 | ElementType: FixedSrcTy->getElementType(), |
1314 | MinNumElts: ScalableDstTy->getElementCount().getKnownMinValue() / 8); |
1315 | } |
1316 | if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) { |
1317 | auto *Load = CGF.Builder.CreateLoad(Addr: Src); |
1318 | auto *UndefVec = llvm::UndefValue::get(T: ScalableDstTy); |
1319 | auto *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty); |
1320 | llvm::Value *Result = CGF.Builder.CreateInsertVector( |
1321 | DstType: ScalableDstTy, SrcVec: UndefVec, SubVec: Load, Idx: Zero, Name: "cast.scalable" ); |
1322 | if (ScalableDstTy != Ty) |
1323 | Result = CGF.Builder.CreateBitCast(V: Result, DestTy: Ty); |
1324 | return Result; |
1325 | } |
1326 | } |
1327 | } |
1328 | |
1329 | // Otherwise do coercion through memory. This is stupid, but simple. |
1330 | RawAddress Tmp = |
1331 | CreateTempAllocaForCoercion(CGF, Ty, MinAlign: Src.getAlignment(), Name: Src.getName()); |
1332 | CGF.Builder.CreateMemCpy( |
1333 | Dst: Tmp.getPointer(), DstAlign: Tmp.getAlignment().getAsAlign(), |
1334 | Src: Src.emitRawPointer(CGF), SrcAlign: Src.getAlignment().getAsAlign(), |
1335 | Size: llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: SrcSize.getKnownMinValue())); |
1336 | return CGF.Builder.CreateLoad(Addr: Tmp); |
1337 | } |
1338 | |
1339 | // Function to store a first-class aggregate into memory. We prefer to |
1340 | // store the elements rather than the aggregate to be more friendly to |
1341 | // fast-isel. |
1342 | // FIXME: Do we need to recurse here? |
1343 | void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, |
1344 | bool DestIsVolatile) { |
1345 | // Prefer scalar stores to first-class aggregate stores. |
1346 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val: Val->getType())) { |
1347 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
1348 | Address EltPtr = Builder.CreateStructGEP(Addr: Dest, Index: i); |
1349 | llvm::Value *Elt = Builder.CreateExtractValue(Agg: Val, Idxs: i); |
1350 | Builder.CreateStore(Val: Elt, Addr: EltPtr, IsVolatile: DestIsVolatile); |
1351 | } |
1352 | } else { |
1353 | Builder.CreateStore(Val, Addr: Dest, IsVolatile: DestIsVolatile); |
1354 | } |
1355 | } |
1356 | |
1357 | /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, |
1358 | /// where the source and destination may have different types. The |
1359 | /// destination is known to be aligned to \arg DstAlign bytes. |
1360 | /// |
1361 | /// This safely handles the case when the src type is larger than the |
1362 | /// destination type; the upper bits of the src will be lost. |
1363 | static void CreateCoercedStore(llvm::Value *Src, |
1364 | Address Dst, |
1365 | bool DstIsVolatile, |
1366 | CodeGenFunction &CGF) { |
1367 | llvm::Type *SrcTy = Src->getType(); |
1368 | llvm::Type *DstTy = Dst.getElementType(); |
1369 | if (SrcTy == DstTy) { |
1370 | CGF.Builder.CreateStore(Val: Src, Addr: Dst, IsVolatile: DstIsVolatile); |
1371 | return; |
1372 | } |
1373 | |
1374 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
1375 | |
1376 | if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(Val: DstTy)) { |
1377 | Dst = EnterStructPointerForCoercedAccess(SrcPtr: Dst, SrcSTy: DstSTy, |
1378 | DstSize: SrcSize.getFixedValue(), CGF); |
1379 | DstTy = Dst.getElementType(); |
1380 | } |
1381 | |
1382 | llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(Val: SrcTy); |
1383 | llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(Val: DstTy); |
1384 | if (SrcPtrTy && DstPtrTy && |
1385 | SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { |
1386 | Src = CGF.Builder.CreateAddrSpaceCast(V: Src, DestTy: DstTy); |
1387 | CGF.Builder.CreateStore(Val: Src, Addr: Dst, IsVolatile: DstIsVolatile); |
1388 | return; |
1389 | } |
1390 | |
1391 | // If the source and destination are integer or pointer types, just do an |
1392 | // extension or truncation to the desired type. |
1393 | if ((isa<llvm::IntegerType>(Val: SrcTy) || isa<llvm::PointerType>(Val: SrcTy)) && |
1394 | (isa<llvm::IntegerType>(Val: DstTy) || isa<llvm::PointerType>(Val: DstTy))) { |
1395 | Src = CoerceIntOrPtrToIntOrPtr(Val: Src, Ty: DstTy, CGF); |
1396 | CGF.Builder.CreateStore(Val: Src, Addr: Dst, IsVolatile: DstIsVolatile); |
1397 | return; |
1398 | } |
1399 | |
1400 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty: DstTy); |
1401 | |
1402 | // If store is legal, just bitcast the src pointer. |
1403 | if (isa<llvm::ScalableVectorType>(Val: SrcTy) || |
1404 | isa<llvm::ScalableVectorType>(Val: DstTy) || |
1405 | SrcSize.getFixedValue() <= DstSize.getFixedValue()) { |
1406 | Dst = Dst.withElementType(ElemTy: SrcTy); |
1407 | CGF.EmitAggregateStore(Val: Src, Dest: Dst, DestIsVolatile: DstIsVolatile); |
1408 | } else { |
1409 | // Otherwise do coercion through memory. This is stupid, but |
1410 | // simple. |
1411 | |
1412 | // Generally SrcSize is never greater than DstSize, since this means we are |
1413 | // losing bits. However, this can happen in cases where the structure has |
1414 | // additional padding, for example due to a user specified alignment. |
1415 | // |
1416 | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1417 | // to that information. |
1418 | RawAddress Tmp = |
1419 | CreateTempAllocaForCoercion(CGF, Ty: SrcTy, MinAlign: Dst.getAlignment()); |
1420 | CGF.Builder.CreateStore(Val: Src, Addr: Tmp); |
1421 | CGF.Builder.CreateMemCpy( |
1422 | Dst: Dst.emitRawPointer(CGF), DstAlign: Dst.getAlignment().getAsAlign(), |
1423 | Src: Tmp.getPointer(), SrcAlign: Tmp.getAlignment().getAsAlign(), |
1424 | Size: llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: DstSize.getFixedValue())); |
1425 | } |
1426 | } |
1427 | |
1428 | static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, |
1429 | const ABIArgInfo &info) { |
1430 | if (unsigned offset = info.getDirectOffset()) { |
1431 | addr = addr.withElementType(ElemTy: CGF.Int8Ty); |
1432 | addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: addr, |
1433 | Offset: CharUnits::fromQuantity(Quantity: offset)); |
1434 | addr = addr.withElementType(ElemTy: info.getCoerceToType()); |
1435 | } |
1436 | return addr; |
1437 | } |
1438 | |
1439 | namespace { |
1440 | |
1441 | /// Encapsulates information about the way function arguments from |
1442 | /// CGFunctionInfo should be passed to actual LLVM IR function. |
1443 | class ClangToLLVMArgMapping { |
1444 | static const unsigned InvalidIndex = ~0U; |
1445 | unsigned InallocaArgNo; |
1446 | unsigned SRetArgNo; |
1447 | unsigned TotalIRArgs; |
1448 | |
1449 | /// Arguments of LLVM IR function corresponding to single Clang argument. |
1450 | struct IRArgs { |
1451 | unsigned PaddingArgIndex; |
1452 | // Argument is expanded to IR arguments at positions |
1453 | // [FirstArgIndex, FirstArgIndex + NumberOfArgs). |
1454 | unsigned FirstArgIndex; |
1455 | unsigned NumberOfArgs; |
1456 | |
1457 | IRArgs() |
1458 | : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), |
1459 | NumberOfArgs(0) {} |
1460 | }; |
1461 | |
1462 | SmallVector<IRArgs, 8> ArgInfo; |
1463 | |
1464 | public: |
1465 | ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, |
1466 | bool OnlyRequiredArgs = false) |
1467 | : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), |
1468 | ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { |
1469 | construct(Context, FI, OnlyRequiredArgs); |
1470 | } |
1471 | |
1472 | bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } |
1473 | unsigned getInallocaArgNo() const { |
1474 | assert(hasInallocaArg()); |
1475 | return InallocaArgNo; |
1476 | } |
1477 | |
1478 | bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } |
1479 | unsigned getSRetArgNo() const { |
1480 | assert(hasSRetArg()); |
1481 | return SRetArgNo; |
1482 | } |
1483 | |
1484 | unsigned totalIRArgs() const { return TotalIRArgs; } |
1485 | |
1486 | bool hasPaddingArg(unsigned ArgNo) const { |
1487 | assert(ArgNo < ArgInfo.size()); |
1488 | return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; |
1489 | } |
1490 | unsigned getPaddingArgNo(unsigned ArgNo) const { |
1491 | assert(hasPaddingArg(ArgNo)); |
1492 | return ArgInfo[ArgNo].PaddingArgIndex; |
1493 | } |
1494 | |
1495 | /// Returns index of first IR argument corresponding to ArgNo, and their |
1496 | /// quantity. |
1497 | std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { |
1498 | assert(ArgNo < ArgInfo.size()); |
1499 | return std::make_pair(x: ArgInfo[ArgNo].FirstArgIndex, |
1500 | y: ArgInfo[ArgNo].NumberOfArgs); |
1501 | } |
1502 | |
1503 | private: |
1504 | void construct(const ASTContext &Context, const CGFunctionInfo &FI, |
1505 | bool OnlyRequiredArgs); |
1506 | }; |
1507 | |
1508 | void ClangToLLVMArgMapping::construct(const ASTContext &Context, |
1509 | const CGFunctionInfo &FI, |
1510 | bool OnlyRequiredArgs) { |
1511 | unsigned IRArgNo = 0; |
1512 | bool SwapThisWithSRet = false; |
1513 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
1514 | |
1515 | if (RetAI.getKind() == ABIArgInfo::Indirect) { |
1516 | SwapThisWithSRet = RetAI.isSRetAfterThis(); |
1517 | SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; |
1518 | } |
1519 | |
1520 | unsigned ArgNo = 0; |
1521 | unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); |
1522 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; |
1523 | ++I, ++ArgNo) { |
1524 | assert(I != FI.arg_end()); |
1525 | QualType ArgType = I->type; |
1526 | const ABIArgInfo &AI = I->info; |
1527 | // Collect data about IR arguments corresponding to Clang argument ArgNo. |
1528 | auto &IRArgs = ArgInfo[ArgNo]; |
1529 | |
1530 | if (AI.getPaddingType()) |
1531 | IRArgs.PaddingArgIndex = IRArgNo++; |
1532 | |
1533 | switch (AI.getKind()) { |
1534 | case ABIArgInfo::Extend: |
1535 | case ABIArgInfo::Direct: { |
1536 | // FIXME: handle sseregparm someday... |
1537 | llvm::StructType *STy = dyn_cast<llvm::StructType>(Val: AI.getCoerceToType()); |
1538 | if (AI.isDirect() && AI.getCanBeFlattened() && STy) { |
1539 | IRArgs.NumberOfArgs = STy->getNumElements(); |
1540 | } else { |
1541 | IRArgs.NumberOfArgs = 1; |
1542 | } |
1543 | break; |
1544 | } |
1545 | case ABIArgInfo::Indirect: |
1546 | case ABIArgInfo::IndirectAliased: |
1547 | IRArgs.NumberOfArgs = 1; |
1548 | break; |
1549 | case ABIArgInfo::Ignore: |
1550 | case ABIArgInfo::InAlloca: |
1551 | // ignore and inalloca doesn't have matching LLVM parameters. |
1552 | IRArgs.NumberOfArgs = 0; |
1553 | break; |
1554 | case ABIArgInfo::CoerceAndExpand: |
1555 | IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); |
1556 | break; |
1557 | case ABIArgInfo::Expand: |
1558 | IRArgs.NumberOfArgs = getExpansionSize(Ty: ArgType, Context); |
1559 | break; |
1560 | } |
1561 | |
1562 | if (IRArgs.NumberOfArgs > 0) { |
1563 | IRArgs.FirstArgIndex = IRArgNo; |
1564 | IRArgNo += IRArgs.NumberOfArgs; |
1565 | } |
1566 | |
1567 | // Skip over the sret parameter when it comes second. We already handled it |
1568 | // above. |
1569 | if (IRArgNo == 1 && SwapThisWithSRet) |
1570 | IRArgNo++; |
1571 | } |
1572 | assert(ArgNo == ArgInfo.size()); |
1573 | |
1574 | if (FI.usesInAlloca()) |
1575 | InallocaArgNo = IRArgNo++; |
1576 | |
1577 | TotalIRArgs = IRArgNo; |
1578 | } |
1579 | } // namespace |
1580 | |
1581 | /***/ |
1582 | |
1583 | bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { |
1584 | const auto &RI = FI.getReturnInfo(); |
1585 | return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); |
1586 | } |
1587 | |
1588 | bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { |
1589 | return ReturnTypeUsesSRet(FI) && |
1590 | getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); |
1591 | } |
1592 | |
1593 | bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { |
1594 | if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { |
1595 | switch (BT->getKind()) { |
1596 | default: |
1597 | return false; |
1598 | case BuiltinType::Float: |
1599 | return getTarget().useObjCFPRetForRealType(T: FloatModeKind::Float); |
1600 | case BuiltinType::Double: |
1601 | return getTarget().useObjCFPRetForRealType(T: FloatModeKind::Double); |
1602 | case BuiltinType::LongDouble: |
1603 | return getTarget().useObjCFPRetForRealType(T: FloatModeKind::LongDouble); |
1604 | } |
1605 | } |
1606 | |
1607 | return false; |
1608 | } |
1609 | |
1610 | bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { |
1611 | if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { |
1612 | if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { |
1613 | if (BT->getKind() == BuiltinType::LongDouble) |
1614 | return getTarget().useObjCFP2RetForComplexLongDouble(); |
1615 | } |
1616 | } |
1617 | |
1618 | return false; |
1619 | } |
1620 | |
1621 | llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { |
1622 | const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); |
1623 | return GetFunctionType(Info: FI); |
1624 | } |
1625 | |
1626 | llvm::FunctionType * |
1627 | CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { |
1628 | |
1629 | bool Inserted = FunctionsBeingProcessed.insert(Ptr: &FI).second; |
1630 | (void)Inserted; |
1631 | assert(Inserted && "Recursively being processed?" ); |
1632 | |
1633 | llvm::Type *resultType = nullptr; |
1634 | const ABIArgInfo &retAI = FI.getReturnInfo(); |
1635 | switch (retAI.getKind()) { |
1636 | case ABIArgInfo::Expand: |
1637 | case ABIArgInfo::IndirectAliased: |
1638 | llvm_unreachable("Invalid ABI kind for return argument" ); |
1639 | |
1640 | case ABIArgInfo::Extend: |
1641 | case ABIArgInfo::Direct: |
1642 | resultType = retAI.getCoerceToType(); |
1643 | break; |
1644 | |
1645 | case ABIArgInfo::InAlloca: |
1646 | if (retAI.getInAllocaSRet()) { |
1647 | // sret things on win32 aren't void, they return the sret pointer. |
1648 | QualType ret = FI.getReturnType(); |
1649 | unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(T: ret); |
1650 | resultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: addressSpace); |
1651 | } else { |
1652 | resultType = llvm::Type::getVoidTy(C&: getLLVMContext()); |
1653 | } |
1654 | break; |
1655 | |
1656 | case ABIArgInfo::Indirect: |
1657 | case ABIArgInfo::Ignore: |
1658 | resultType = llvm::Type::getVoidTy(C&: getLLVMContext()); |
1659 | break; |
1660 | |
1661 | case ABIArgInfo::CoerceAndExpand: |
1662 | resultType = retAI.getUnpaddedCoerceAndExpandType(); |
1663 | break; |
1664 | } |
1665 | |
1666 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); |
1667 | SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); |
1668 | |
1669 | // Add type for sret argument. |
1670 | if (IRFunctionArgs.hasSRetArg()) { |
1671 | QualType Ret = FI.getReturnType(); |
1672 | unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(T: Ret); |
1673 | ArgTypes[IRFunctionArgs.getSRetArgNo()] = |
1674 | llvm::PointerType::get(C&: getLLVMContext(), AddressSpace); |
1675 | } |
1676 | |
1677 | // Add type for inalloca argument. |
1678 | if (IRFunctionArgs.hasInallocaArg()) |
1679 | ArgTypes[IRFunctionArgs.getInallocaArgNo()] = |
1680 | llvm::PointerType::getUnqual(C&: getLLVMContext()); |
1681 | |
1682 | // Add in all of the required arguments. |
1683 | unsigned ArgNo = 0; |
1684 | CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), |
1685 | ie = it + FI.getNumRequiredArgs(); |
1686 | for (; it != ie; ++it, ++ArgNo) { |
1687 | const ABIArgInfo &ArgInfo = it->info; |
1688 | |
1689 | // Insert a padding type to ensure proper alignment. |
1690 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) |
1691 | ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
1692 | ArgInfo.getPaddingType(); |
1693 | |
1694 | unsigned FirstIRArg, NumIRArgs; |
1695 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
1696 | |
1697 | switch (ArgInfo.getKind()) { |
1698 | case ABIArgInfo::Ignore: |
1699 | case ABIArgInfo::InAlloca: |
1700 | assert(NumIRArgs == 0); |
1701 | break; |
1702 | |
1703 | case ABIArgInfo::Indirect: |
1704 | assert(NumIRArgs == 1); |
1705 | // indirect arguments are always on the stack, which is alloca addr space. |
1706 | ArgTypes[FirstIRArg] = llvm::PointerType::get( |
1707 | C&: getLLVMContext(), AddressSpace: CGM.getDataLayout().getAllocaAddrSpace()); |
1708 | break; |
1709 | case ABIArgInfo::IndirectAliased: |
1710 | assert(NumIRArgs == 1); |
1711 | ArgTypes[FirstIRArg] = llvm::PointerType::get( |
1712 | C&: getLLVMContext(), AddressSpace: ArgInfo.getIndirectAddrSpace()); |
1713 | break; |
1714 | case ABIArgInfo::Extend: |
1715 | case ABIArgInfo::Direct: { |
1716 | // Fast-isel and the optimizer generally like scalar values better than |
1717 | // FCAs, so we flatten them if this is safe to do for this argument. |
1718 | llvm::Type *argType = ArgInfo.getCoerceToType(); |
1719 | llvm::StructType *st = dyn_cast<llvm::StructType>(Val: argType); |
1720 | if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { |
1721 | assert(NumIRArgs == st->getNumElements()); |
1722 | for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) |
1723 | ArgTypes[FirstIRArg + i] = st->getElementType(N: i); |
1724 | } else { |
1725 | assert(NumIRArgs == 1); |
1726 | ArgTypes[FirstIRArg] = argType; |
1727 | } |
1728 | break; |
1729 | } |
1730 | |
1731 | case ABIArgInfo::CoerceAndExpand: { |
1732 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1733 | for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { |
1734 | *ArgTypesIter++ = EltTy; |
1735 | } |
1736 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1737 | break; |
1738 | } |
1739 | |
1740 | case ABIArgInfo::Expand: |
1741 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1742 | getExpandedTypes(Ty: it->type, TI&: ArgTypesIter); |
1743 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1744 | break; |
1745 | } |
1746 | } |
1747 | |
1748 | bool Erased = FunctionsBeingProcessed.erase(Ptr: &FI); (void)Erased; |
1749 | assert(Erased && "Not in set?" ); |
1750 | |
1751 | return llvm::FunctionType::get(Result: resultType, Params: ArgTypes, isVarArg: FI.isVariadic()); |
1752 | } |
1753 | |
1754 | llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { |
1755 | const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl()); |
1756 | const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); |
1757 | |
1758 | if (!isFuncTypeConvertible(FPT)) |
1759 | return llvm::StructType::get(Context&: getLLVMContext()); |
1760 | |
1761 | return GetFunctionType(GD); |
1762 | } |
1763 | |
1764 | static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, |
1765 | llvm::AttrBuilder &FuncAttrs, |
1766 | const FunctionProtoType *FPT) { |
1767 | if (!FPT) |
1768 | return; |
1769 | |
1770 | if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && |
1771 | FPT->isNothrow()) |
1772 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
1773 | |
1774 | unsigned SMEBits = FPT->getAArch64SMEAttributes(); |
1775 | if (SMEBits & FunctionType::SME_PStateSMEnabledMask) |
1776 | FuncAttrs.addAttribute(A: "aarch64_pstate_sm_enabled" ); |
1777 | if (SMEBits & FunctionType::SME_PStateSMCompatibleMask) |
1778 | FuncAttrs.addAttribute(A: "aarch64_pstate_sm_compatible" ); |
1779 | |
1780 | // ZA |
1781 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_Preserves) |
1782 | FuncAttrs.addAttribute(A: "aarch64_preserves_za" ); |
1783 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_In) |
1784 | FuncAttrs.addAttribute(A: "aarch64_in_za" ); |
1785 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_Out) |
1786 | FuncAttrs.addAttribute(A: "aarch64_out_za" ); |
1787 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_InOut) |
1788 | FuncAttrs.addAttribute(A: "aarch64_inout_za" ); |
1789 | |
1790 | // ZT0 |
1791 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_Preserves) |
1792 | FuncAttrs.addAttribute(A: "aarch64_preserves_zt0" ); |
1793 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_In) |
1794 | FuncAttrs.addAttribute(A: "aarch64_in_zt0" ); |
1795 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_Out) |
1796 | FuncAttrs.addAttribute(A: "aarch64_out_zt0" ); |
1797 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_InOut) |
1798 | FuncAttrs.addAttribute(A: "aarch64_inout_zt0" ); |
1799 | } |
1800 | |
1801 | static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, |
1802 | const Decl *Callee) { |
1803 | if (!Callee) |
1804 | return; |
1805 | |
1806 | SmallVector<StringRef, 4> Attrs; |
1807 | |
1808 | for (const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>()) |
1809 | AA->getAssumption().split(Attrs, "," ); |
1810 | |
1811 | if (!Attrs.empty()) |
1812 | FuncAttrs.addAttribute(A: llvm::AssumptionAttrKey, |
1813 | V: llvm::join(Begin: Attrs.begin(), End: Attrs.end(), Separator: "," )); |
1814 | } |
1815 | |
1816 | bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, |
1817 | QualType ReturnType) const { |
1818 | // We can't just discard the return value for a record type with a |
1819 | // complex destructor or a non-trivially copyable type. |
1820 | if (const RecordType *RT = |
1821 | ReturnType.getCanonicalType()->getAs<RecordType>()) { |
1822 | if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
1823 | return ClassDecl->hasTrivialDestructor(); |
1824 | } |
1825 | return ReturnType.isTriviallyCopyableType(Context); |
1826 | } |
1827 | |
1828 | static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, |
1829 | const Decl *TargetDecl) { |
1830 | // As-is msan can not tolerate noundef mismatch between caller and |
1831 | // implementation. Mismatch is possible for e.g. indirect calls from C-caller |
1832 | // into C++. Such mismatches lead to confusing false reports. To avoid |
1833 | // expensive workaround on msan we enforce initialization event in uncommon |
1834 | // cases where it's allowed. |
1835 | if (Module.getLangOpts().Sanitize.has(K: SanitizerKind::Memory)) |
1836 | return true; |
1837 | // C++ explicitly makes returning undefined values UB. C's rule only applies |
1838 | // to used values, so we never mark them noundef for now. |
1839 | if (!Module.getLangOpts().CPlusPlus) |
1840 | return false; |
1841 | if (TargetDecl) { |
1842 | if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(Val: TargetDecl)) { |
1843 | if (FDecl->isExternC()) |
1844 | return false; |
1845 | } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(Val: TargetDecl)) { |
1846 | // Function pointer. |
1847 | if (VDecl->isExternC()) |
1848 | return false; |
1849 | } |
1850 | } |
1851 | |
1852 | // We don't want to be too aggressive with the return checking, unless |
1853 | // it's explicit in the code opts or we're using an appropriate sanitizer. |
1854 | // Try to respect what the programmer intended. |
1855 | return Module.getCodeGenOpts().StrictReturn || |
1856 | !Module.MayDropFunctionReturn(Context: Module.getContext(), ReturnType: RetTy) || |
1857 | Module.getLangOpts().Sanitize.has(K: SanitizerKind::Return); |
1858 | } |
1859 | |
1860 | /// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the |
1861 | /// requested denormal behavior, accounting for the overriding behavior of the |
1862 | /// -f32 case. |
1863 | static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, |
1864 | llvm::DenormalMode FP32DenormalMode, |
1865 | llvm::AttrBuilder &FuncAttrs) { |
1866 | if (FPDenormalMode != llvm::DenormalMode::getDefault()) |
1867 | FuncAttrs.addAttribute(A: "denormal-fp-math" , V: FPDenormalMode.str()); |
1868 | |
1869 | if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid()) |
1870 | FuncAttrs.addAttribute(A: "denormal-fp-math-f32" , V: FP32DenormalMode.str()); |
1871 | } |
1872 | |
1873 | /// Add default attributes to a function, which have merge semantics under |
1874 | /// -mlink-builtin-bitcode and should not simply overwrite any existing |
1875 | /// attributes in the linked library. |
1876 | static void |
1877 | addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, |
1878 | llvm::AttrBuilder &FuncAttrs) { |
1879 | addDenormalModeAttrs(FPDenormalMode: CodeGenOpts.FPDenormalMode, FP32DenormalMode: CodeGenOpts.FP32DenormalMode, |
1880 | FuncAttrs); |
1881 | } |
1882 | |
1883 | static void getTrivialDefaultFunctionAttributes( |
1884 | StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, |
1885 | const LangOptions &LangOpts, bool AttrOnCallSite, |
1886 | llvm::AttrBuilder &FuncAttrs) { |
1887 | // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. |
1888 | if (!HasOptnone) { |
1889 | if (CodeGenOpts.OptimizeSize) |
1890 | FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); |
1891 | if (CodeGenOpts.OptimizeSize == 2) |
1892 | FuncAttrs.addAttribute(llvm::Attribute::MinSize); |
1893 | } |
1894 | |
1895 | if (CodeGenOpts.DisableRedZone) |
1896 | FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); |
1897 | if (CodeGenOpts.IndirectTlsSegRefs) |
1898 | FuncAttrs.addAttribute(A: "indirect-tls-seg-refs" ); |
1899 | if (CodeGenOpts.NoImplicitFloat) |
1900 | FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); |
1901 | |
1902 | if (AttrOnCallSite) { |
1903 | // Attributes that should go on the call site only. |
1904 | // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking |
1905 | // the -fno-builtin-foo list. |
1906 | if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) |
1907 | FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); |
1908 | if (!CodeGenOpts.TrapFuncName.empty()) |
1909 | FuncAttrs.addAttribute(A: "trap-func-name" , V: CodeGenOpts.TrapFuncName); |
1910 | } else { |
1911 | switch (CodeGenOpts.getFramePointer()) { |
1912 | case CodeGenOptions::FramePointerKind::None: |
1913 | // This is the default behavior. |
1914 | break; |
1915 | case CodeGenOptions::FramePointerKind::NonLeaf: |
1916 | case CodeGenOptions::FramePointerKind::All: |
1917 | FuncAttrs.addAttribute(A: "frame-pointer" , |
1918 | V: CodeGenOptions::getFramePointerKindName( |
1919 | Kind: CodeGenOpts.getFramePointer())); |
1920 | } |
1921 | |
1922 | if (CodeGenOpts.LessPreciseFPMAD) |
1923 | FuncAttrs.addAttribute(A: "less-precise-fpmad" , V: "true" ); |
1924 | |
1925 | if (CodeGenOpts.NullPointerIsValid) |
1926 | FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); |
1927 | |
1928 | if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore) |
1929 | FuncAttrs.addAttribute(A: "no-trapping-math" , V: "true" ); |
1930 | |
1931 | // TODO: Are these all needed? |
1932 | // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. |
1933 | if (LangOpts.NoHonorInfs) |
1934 | FuncAttrs.addAttribute(A: "no-infs-fp-math" , V: "true" ); |
1935 | if (LangOpts.NoHonorNaNs) |
1936 | FuncAttrs.addAttribute(A: "no-nans-fp-math" , V: "true" ); |
1937 | if (LangOpts.ApproxFunc) |
1938 | FuncAttrs.addAttribute(A: "approx-func-fp-math" , V: "true" ); |
1939 | if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip && |
1940 | LangOpts.NoSignedZero && LangOpts.ApproxFunc && |
1941 | (LangOpts.getDefaultFPContractMode() == |
1942 | LangOptions::FPModeKind::FPM_Fast || |
1943 | LangOpts.getDefaultFPContractMode() == |
1944 | LangOptions::FPModeKind::FPM_FastHonorPragmas)) |
1945 | FuncAttrs.addAttribute(A: "unsafe-fp-math" , V: "true" ); |
1946 | if (CodeGenOpts.SoftFloat) |
1947 | FuncAttrs.addAttribute(A: "use-soft-float" , V: "true" ); |
1948 | FuncAttrs.addAttribute(A: "stack-protector-buffer-size" , |
1949 | V: llvm::utostr(X: CodeGenOpts.SSPBufferSize)); |
1950 | if (LangOpts.NoSignedZero) |
1951 | FuncAttrs.addAttribute(A: "no-signed-zeros-fp-math" , V: "true" ); |
1952 | |
1953 | // TODO: Reciprocal estimate codegen options should apply to instructions? |
1954 | const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; |
1955 | if (!Recips.empty()) |
1956 | FuncAttrs.addAttribute(A: "reciprocal-estimates" , |
1957 | V: llvm::join(R: Recips, Separator: "," )); |
1958 | |
1959 | if (!CodeGenOpts.PreferVectorWidth.empty() && |
1960 | CodeGenOpts.PreferVectorWidth != "none" ) |
1961 | FuncAttrs.addAttribute(A: "prefer-vector-width" , |
1962 | V: CodeGenOpts.PreferVectorWidth); |
1963 | |
1964 | if (CodeGenOpts.StackRealignment) |
1965 | FuncAttrs.addAttribute(A: "stackrealign" ); |
1966 | if (CodeGenOpts.Backchain) |
1967 | FuncAttrs.addAttribute(A: "backchain" ); |
1968 | if (CodeGenOpts.EnableSegmentedStacks) |
1969 | FuncAttrs.addAttribute(A: "split-stack" ); |
1970 | |
1971 | if (CodeGenOpts.SpeculativeLoadHardening) |
1972 | FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); |
1973 | |
1974 | // Add zero-call-used-regs attribute. |
1975 | switch (CodeGenOpts.getZeroCallUsedRegs()) { |
1976 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip: |
1977 | FuncAttrs.removeAttribute(A: "zero-call-used-regs" ); |
1978 | break; |
1979 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg: |
1980 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used-gpr-arg" ); |
1981 | break; |
1982 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR: |
1983 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used-gpr" ); |
1984 | break; |
1985 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg: |
1986 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used-arg" ); |
1987 | break; |
1988 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used: |
1989 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used" ); |
1990 | break; |
1991 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg: |
1992 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all-gpr-arg" ); |
1993 | break; |
1994 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR: |
1995 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all-gpr" ); |
1996 | break; |
1997 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg: |
1998 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all-arg" ); |
1999 | break; |
2000 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All: |
2001 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all" ); |
2002 | break; |
2003 | } |
2004 | } |
2005 | |
2006 | if (LangOpts.assumeFunctionsAreConvergent()) { |
2007 | // Conservatively, mark all functions and calls in CUDA and OpenCL as |
2008 | // convergent (meaning, they may call an intrinsically convergent op, such |
2009 | // as __syncthreads() / barrier(), and so can't have certain optimizations |
2010 | // applied around them). LLVM will remove this attribute where it safely |
2011 | // can. |
2012 | FuncAttrs.addAttribute(llvm::Attribute::Convergent); |
2013 | } |
2014 | |
2015 | // TODO: NoUnwind attribute should be added for other GPU modes HIP, |
2016 | // OpenMP offload. AFAIK, neither of them support exceptions in device code. |
2017 | if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL || |
2018 | LangOpts.SYCLIsDevice) { |
2019 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2020 | } |
2021 | |
2022 | for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { |
2023 | StringRef Var, Value; |
2024 | std::tie(args&: Var, args&: Value) = Attr.split(Separator: '='); |
2025 | FuncAttrs.addAttribute(A: Var, V: Value); |
2026 | } |
2027 | } |
2028 | |
2029 | /// Merges `target-features` from \TargetOpts and \F, and sets the result in |
2030 | /// \FuncAttr |
2031 | /// * features from \F are always kept |
2032 | /// * a feature from \TargetOpts is kept if itself and its opposite are absent |
2033 | /// from \F |
2034 | static void |
2035 | overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, |
2036 | const llvm::Function &F, |
2037 | const TargetOptions &TargetOpts) { |
2038 | auto FFeatures = F.getFnAttribute(Kind: "target-features" ); |
2039 | |
2040 | llvm::StringSet<> MergedNames; |
2041 | SmallVector<StringRef> MergedFeatures; |
2042 | MergedFeatures.reserve(N: TargetOpts.Features.size()); |
2043 | |
2044 | auto AddUnmergedFeatures = [&](auto &&FeatureRange) { |
2045 | for (StringRef Feature : FeatureRange) { |
2046 | if (Feature.empty()) |
2047 | continue; |
2048 | assert(Feature[0] == '+' || Feature[0] == '-'); |
2049 | StringRef Name = Feature.drop_front(N: 1); |
2050 | bool Merged = !MergedNames.insert(key: Name).second; |
2051 | if (!Merged) |
2052 | MergedFeatures.push_back(Elt: Feature); |
2053 | } |
2054 | }; |
2055 | |
2056 | if (FFeatures.isValid()) |
2057 | AddUnmergedFeatures(llvm::split(Str: FFeatures.getValueAsString(), Separator: ',')); |
2058 | AddUnmergedFeatures(TargetOpts.Features); |
2059 | |
2060 | if (!MergedFeatures.empty()) { |
2061 | llvm::sort(C&: MergedFeatures); |
2062 | FuncAttr.addAttribute(A: "target-features" , V: llvm::join(R&: MergedFeatures, Separator: "," )); |
2063 | } |
2064 | } |
2065 | |
2066 | void CodeGen::mergeDefaultFunctionDefinitionAttributes( |
2067 | llvm::Function &F, const CodeGenOptions &CodeGenOpts, |
2068 | const LangOptions &LangOpts, const TargetOptions &TargetOpts, |
2069 | bool WillInternalize) { |
2070 | |
2071 | llvm::AttrBuilder FuncAttrs(F.getContext()); |
2072 | // Here we only extract the options that are relevant compared to the version |
2073 | // from GetCPUAndFeaturesAttributes. |
2074 | if (!TargetOpts.CPU.empty()) |
2075 | FuncAttrs.addAttribute(A: "target-cpu" , V: TargetOpts.CPU); |
2076 | if (!TargetOpts.TuneCPU.empty()) |
2077 | FuncAttrs.addAttribute(A: "tune-cpu" , V: TargetOpts.TuneCPU); |
2078 | |
2079 | ::getTrivialDefaultFunctionAttributes(Name: F.getName(), HasOptnone: F.hasOptNone(), |
2080 | CodeGenOpts, LangOpts, |
2081 | /*AttrOnCallSite=*/false, FuncAttrs); |
2082 | |
2083 | if (!WillInternalize && F.isInterposable()) { |
2084 | // Do not promote "dynamic" denormal-fp-math to this translation unit's |
2085 | // setting for weak functions that won't be internalized. The user has no |
2086 | // real control for how builtin bitcode is linked, so we shouldn't assume |
2087 | // later copies will use a consistent mode. |
2088 | F.addFnAttrs(Attrs: FuncAttrs); |
2089 | return; |
2090 | } |
2091 | |
2092 | llvm::AttributeMask AttrsToRemove; |
2093 | |
2094 | llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw(); |
2095 | llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw(); |
2096 | llvm::DenormalMode Merged = |
2097 | CodeGenOpts.FPDenormalMode.mergeCalleeMode(Callee: DenormModeToMerge); |
2098 | llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode; |
2099 | |
2100 | if (DenormModeToMergeF32.isValid()) { |
2101 | MergedF32 = |
2102 | CodeGenOpts.FP32DenormalMode.mergeCalleeMode(Callee: DenormModeToMergeF32); |
2103 | } |
2104 | |
2105 | if (Merged == llvm::DenormalMode::getDefault()) { |
2106 | AttrsToRemove.addAttribute(A: "denormal-fp-math" ); |
2107 | } else if (Merged != DenormModeToMerge) { |
2108 | // Overwrite existing attribute |
2109 | FuncAttrs.addAttribute(A: "denormal-fp-math" , |
2110 | V: CodeGenOpts.FPDenormalMode.str()); |
2111 | } |
2112 | |
2113 | if (MergedF32 == llvm::DenormalMode::getDefault()) { |
2114 | AttrsToRemove.addAttribute(A: "denormal-fp-math-f32" ); |
2115 | } else if (MergedF32 != DenormModeToMergeF32) { |
2116 | // Overwrite existing attribute |
2117 | FuncAttrs.addAttribute(A: "denormal-fp-math-f32" , |
2118 | V: CodeGenOpts.FP32DenormalMode.str()); |
2119 | } |
2120 | |
2121 | F.removeFnAttrs(Attrs: AttrsToRemove); |
2122 | addDenormalModeAttrs(FPDenormalMode: Merged, FP32DenormalMode: MergedF32, FuncAttrs); |
2123 | |
2124 | overrideFunctionFeaturesWithTargetFeatures(FuncAttr&: FuncAttrs, F, TargetOpts); |
2125 | |
2126 | F.addFnAttrs(Attrs: FuncAttrs); |
2127 | } |
2128 | |
2129 | void CodeGenModule::getTrivialDefaultFunctionAttributes( |
2130 | StringRef Name, bool HasOptnone, bool AttrOnCallSite, |
2131 | llvm::AttrBuilder &FuncAttrs) { |
2132 | ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, CodeGenOpts: getCodeGenOpts(), |
2133 | LangOpts: getLangOpts(), AttrOnCallSite, |
2134 | FuncAttrs); |
2135 | } |
2136 | |
2137 | void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, |
2138 | bool HasOptnone, |
2139 | bool AttrOnCallSite, |
2140 | llvm::AttrBuilder &FuncAttrs) { |
2141 | getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, |
2142 | FuncAttrs); |
2143 | // If we're just getting the default, get the default values for mergeable |
2144 | // attributes. |
2145 | if (!AttrOnCallSite) |
2146 | addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs); |
2147 | } |
2148 | |
2149 | void CodeGenModule::addDefaultFunctionDefinitionAttributes( |
2150 | llvm::AttrBuilder &attrs) { |
2151 | getDefaultFunctionAttributes(/*function name*/ Name: "" , /*optnone*/ HasOptnone: false, |
2152 | /*for call*/ AttrOnCallSite: false, FuncAttrs&: attrs); |
2153 | GetCPUAndFeaturesAttributes(GD: GlobalDecl(), AttrBuilder&: attrs); |
2154 | } |
2155 | |
2156 | static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, |
2157 | const LangOptions &LangOpts, |
2158 | const NoBuiltinAttr *NBA = nullptr) { |
2159 | auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { |
2160 | SmallString<32> AttributeName; |
2161 | AttributeName += "no-builtin-" ; |
2162 | AttributeName += BuiltinName; |
2163 | FuncAttrs.addAttribute(A: AttributeName); |
2164 | }; |
2165 | |
2166 | // First, handle the language options passed through -fno-builtin. |
2167 | if (LangOpts.NoBuiltin) { |
2168 | // -fno-builtin disables them all. |
2169 | FuncAttrs.addAttribute(A: "no-builtins" ); |
2170 | return; |
2171 | } |
2172 | |
2173 | // Then, add attributes for builtins specified through -fno-builtin-<name>. |
2174 | llvm::for_each(Range: LangOpts.NoBuiltinFuncs, F: AddNoBuiltinAttr); |
2175 | |
2176 | // Now, let's check the __attribute__((no_builtin("...")) attribute added to |
2177 | // the source. |
2178 | if (!NBA) |
2179 | return; |
2180 | |
2181 | // If there is a wildcard in the builtin names specified through the |
2182 | // attribute, disable them all. |
2183 | if (llvm::is_contained(NBA->builtinNames(), "*" )) { |
2184 | FuncAttrs.addAttribute(A: "no-builtins" ); |
2185 | return; |
2186 | } |
2187 | |
2188 | // And last, add the rest of the builtin names. |
2189 | llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); |
2190 | } |
2191 | |
2192 | static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, |
2193 | const llvm::DataLayout &DL, const ABIArgInfo &AI, |
2194 | bool CheckCoerce = true) { |
2195 | llvm::Type *Ty = Types.ConvertTypeForMem(T: QTy); |
2196 | if (AI.getKind() == ABIArgInfo::Indirect || |
2197 | AI.getKind() == ABIArgInfo::IndirectAliased) |
2198 | return true; |
2199 | if (AI.getKind() == ABIArgInfo::Extend) |
2200 | return true; |
2201 | if (!DL.typeSizeEqualsStoreSize(Ty)) |
2202 | // TODO: This will result in a modest amount of values not marked noundef |
2203 | // when they could be. We care about values that *invisibly* contain undef |
2204 | // bits from the perspective of LLVM IR. |
2205 | return false; |
2206 | if (CheckCoerce && AI.canHaveCoerceToType()) { |
2207 | llvm::Type *CoerceTy = AI.getCoerceToType(); |
2208 | if (llvm::TypeSize::isKnownGT(LHS: DL.getTypeSizeInBits(Ty: CoerceTy), |
2209 | RHS: DL.getTypeSizeInBits(Ty))) |
2210 | // If we're coercing to a type with a greater size than the canonical one, |
2211 | // we're introducing new undef bits. |
2212 | // Coercing to a type of smaller or equal size is ok, as we know that |
2213 | // there's no internal padding (typeSizeEqualsStoreSize). |
2214 | return false; |
2215 | } |
2216 | if (QTy->isBitIntType()) |
2217 | return true; |
2218 | if (QTy->isReferenceType()) |
2219 | return true; |
2220 | if (QTy->isNullPtrType()) |
2221 | return false; |
2222 | if (QTy->isMemberPointerType()) |
2223 | // TODO: Some member pointers are `noundef`, but it depends on the ABI. For |
2224 | // now, never mark them. |
2225 | return false; |
2226 | if (QTy->isScalarType()) { |
2227 | if (const ComplexType *Complex = dyn_cast<ComplexType>(Val&: QTy)) |
2228 | return DetermineNoUndef(QTy: Complex->getElementType(), Types, DL, AI, CheckCoerce: false); |
2229 | return true; |
2230 | } |
2231 | if (const VectorType *Vector = dyn_cast<VectorType>(Val&: QTy)) |
2232 | return DetermineNoUndef(QTy: Vector->getElementType(), Types, DL, AI, CheckCoerce: false); |
2233 | if (const MatrixType *Matrix = dyn_cast<MatrixType>(Val&: QTy)) |
2234 | return DetermineNoUndef(QTy: Matrix->getElementType(), Types, DL, AI, CheckCoerce: false); |
2235 | if (const ArrayType *Array = dyn_cast<ArrayType>(Val&: QTy)) |
2236 | return DetermineNoUndef(QTy: Array->getElementType(), Types, DL, AI, CheckCoerce: false); |
2237 | |
2238 | // TODO: Some structs may be `noundef`, in specific situations. |
2239 | return false; |
2240 | } |
2241 | |
2242 | /// Check if the argument of a function has maybe_undef attribute. |
2243 | static bool IsArgumentMaybeUndef(const Decl *TargetDecl, |
2244 | unsigned NumRequiredArgs, unsigned ArgNo) { |
2245 | const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl); |
2246 | if (!FD) |
2247 | return false; |
2248 | |
2249 | // Assume variadic arguments do not have maybe_undef attribute. |
2250 | if (ArgNo >= NumRequiredArgs) |
2251 | return false; |
2252 | |
2253 | // Check if argument has maybe_undef attribute. |
2254 | if (ArgNo < FD->getNumParams()) { |
2255 | const ParmVarDecl *Param = FD->getParamDecl(i: ArgNo); |
2256 | if (Param && Param->hasAttr<MaybeUndefAttr>()) |
2257 | return true; |
2258 | } |
2259 | |
2260 | return false; |
2261 | } |
2262 | |
2263 | /// Test if it's legal to apply nofpclass for the given parameter type and it's |
2264 | /// lowered IR type. |
2265 | static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, |
2266 | bool IsReturn) { |
2267 | // Should only apply to FP types in the source, not ABI promoted. |
2268 | if (!ParamType->hasFloatingRepresentation()) |
2269 | return false; |
2270 | |
2271 | // The promoted-to IR type also needs to support nofpclass. |
2272 | llvm::Type *IRTy = AI.getCoerceToType(); |
2273 | if (llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty: IRTy)) |
2274 | return true; |
2275 | |
2276 | if (llvm::StructType *ST = dyn_cast<llvm::StructType>(Val: IRTy)) { |
2277 | return !IsReturn && AI.getCanBeFlattened() && |
2278 | llvm::all_of(Range: ST->elements(), P: [](llvm::Type *Ty) { |
2279 | return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty); |
2280 | }); |
2281 | } |
2282 | |
2283 | return false; |
2284 | } |
2285 | |
2286 | /// Return the nofpclass mask that can be applied to floating-point parameters. |
2287 | static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) { |
2288 | llvm::FPClassTest Mask = llvm::fcNone; |
2289 | if (LangOpts.NoHonorInfs) |
2290 | Mask |= llvm::fcInf; |
2291 | if (LangOpts.NoHonorNaNs) |
2292 | Mask |= llvm::fcNan; |
2293 | return Mask; |
2294 | } |
2295 | |
2296 | void CodeGenModule::AdjustMemoryAttribute(StringRef Name, |
2297 | CGCalleeInfo CalleeInfo, |
2298 | llvm::AttributeList &Attrs) { |
2299 | if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) { |
2300 | Attrs = Attrs.removeFnAttribute(getLLVMContext(), llvm::Attribute::Memory); |
2301 | llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects( |
2302 | Context&: getLLVMContext(), ME: llvm::MemoryEffects::writeOnly()); |
2303 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Attr: MemoryAttr); |
2304 | } |
2305 | } |
2306 | |
2307 | /// Construct the IR attribute list of a function or call. |
2308 | /// |
2309 | /// When adding an attribute, please consider where it should be handled: |
2310 | /// |
2311 | /// - getDefaultFunctionAttributes is for attributes that are essentially |
2312 | /// part of the global target configuration (but perhaps can be |
2313 | /// overridden on a per-function basis). Adding attributes there |
2314 | /// will cause them to also be set in frontends that build on Clang's |
2315 | /// target-configuration logic, as well as for code defined in library |
2316 | /// modules such as CUDA's libdevice. |
2317 | /// |
2318 | /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes |
2319 | /// and adds declaration-specific, convention-specific, and |
2320 | /// frontend-specific logic. The last is of particular importance: |
2321 | /// attributes that restrict how the frontend generates code must be |
2322 | /// added here rather than getDefaultFunctionAttributes. |
2323 | /// |
2324 | void CodeGenModule::ConstructAttributeList(StringRef Name, |
2325 | const CGFunctionInfo &FI, |
2326 | CGCalleeInfo CalleeInfo, |
2327 | llvm::AttributeList &AttrList, |
2328 | unsigned &CallingConv, |
2329 | bool AttrOnCallSite, bool IsThunk) { |
2330 | llvm::AttrBuilder FuncAttrs(getLLVMContext()); |
2331 | llvm::AttrBuilder RetAttrs(getLLVMContext()); |
2332 | |
2333 | // Collect function IR attributes from the CC lowering. |
2334 | // We'll collect the paramete and result attributes later. |
2335 | CallingConv = FI.getEffectiveCallingConvention(); |
2336 | if (FI.isNoReturn()) |
2337 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); |
2338 | if (FI.isCmseNSCall()) |
2339 | FuncAttrs.addAttribute(A: "cmse_nonsecure_call" ); |
2340 | |
2341 | // Collect function IR attributes from the callee prototype if we have one. |
2342 | AddAttributesFromFunctionProtoType(Ctx&: getContext(), FuncAttrs, |
2343 | FPT: CalleeInfo.getCalleeFunctionProtoType()); |
2344 | |
2345 | const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); |
2346 | |
2347 | // Attach assumption attributes to the declaration. If this is a call |
2348 | // site, attach assumptions from the caller to the call as well. |
2349 | AddAttributesFromOMPAssumes(FuncAttrs, Callee: TargetDecl); |
2350 | |
2351 | bool HasOptnone = false; |
2352 | // The NoBuiltinAttr attached to the target FunctionDecl. |
2353 | const NoBuiltinAttr *NBA = nullptr; |
2354 | |
2355 | // Some ABIs may result in additional accesses to arguments that may |
2356 | // otherwise not be present. |
2357 | auto AddPotentialArgAccess = [&]() { |
2358 | llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory); |
2359 | if (A.isValid()) |
2360 | FuncAttrs.addMemoryAttr(ME: A.getMemoryEffects() | |
2361 | llvm::MemoryEffects::argMemOnly()); |
2362 | }; |
2363 | |
2364 | // Collect function IR attributes based on declaration-specific |
2365 | // information. |
2366 | // FIXME: handle sseregparm someday... |
2367 | if (TargetDecl) { |
2368 | if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) |
2369 | FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); |
2370 | if (TargetDecl->hasAttr<NoThrowAttr>()) |
2371 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2372 | if (TargetDecl->hasAttr<NoReturnAttr>()) |
2373 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); |
2374 | if (TargetDecl->hasAttr<ColdAttr>()) |
2375 | FuncAttrs.addAttribute(llvm::Attribute::Cold); |
2376 | if (TargetDecl->hasAttr<HotAttr>()) |
2377 | FuncAttrs.addAttribute(llvm::Attribute::Hot); |
2378 | if (TargetDecl->hasAttr<NoDuplicateAttr>()) |
2379 | FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); |
2380 | if (TargetDecl->hasAttr<ConvergentAttr>()) |
2381 | FuncAttrs.addAttribute(llvm::Attribute::Convergent); |
2382 | |
2383 | if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(Val: TargetDecl)) { |
2384 | AddAttributesFromFunctionProtoType( |
2385 | getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); |
2386 | if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { |
2387 | // A sane operator new returns a non-aliasing pointer. |
2388 | auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); |
2389 | if (getCodeGenOpts().AssumeSaneOperatorNew && |
2390 | (Kind == OO_New || Kind == OO_Array_New)) |
2391 | RetAttrs.addAttribute(llvm::Attribute::NoAlias); |
2392 | } |
2393 | const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: Fn); |
2394 | const bool IsVirtualCall = MD && MD->isVirtual(); |
2395 | // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a |
2396 | // virtual function. These attributes are not inherited by overloads. |
2397 | if (!(AttrOnCallSite && IsVirtualCall)) { |
2398 | if (Fn->isNoReturn()) |
2399 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); |
2400 | NBA = Fn->getAttr<NoBuiltinAttr>(); |
2401 | } |
2402 | } |
2403 | |
2404 | if (isa<FunctionDecl>(Val: TargetDecl) || isa<VarDecl>(Val: TargetDecl)) { |
2405 | // Only place nomerge attribute on call sites, never functions. This |
2406 | // allows it to work on indirect virtual function calls. |
2407 | if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) |
2408 | FuncAttrs.addAttribute(llvm::Attribute::NoMerge); |
2409 | } |
2410 | |
2411 | // 'const', 'pure' and 'noalias' attributed functions are also nounwind. |
2412 | if (TargetDecl->hasAttr<ConstAttr>()) { |
2413 | FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::none()); |
2414 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2415 | // gcc specifies that 'const' functions have greater restrictions than |
2416 | // 'pure' functions, so they also cannot have infinite loops. |
2417 | FuncAttrs.addAttribute(llvm::Attribute::WillReturn); |
2418 | } else if (TargetDecl->hasAttr<PureAttr>()) { |
2419 | FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::readOnly()); |
2420 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2421 | // gcc specifies that 'pure' functions cannot have infinite loops. |
2422 | FuncAttrs.addAttribute(llvm::Attribute::WillReturn); |
2423 | } else if (TargetDecl->hasAttr<NoAliasAttr>()) { |
2424 | FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::inaccessibleOrArgMemOnly()); |
2425 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2426 | } |
2427 | if (TargetDecl->hasAttr<RestrictAttr>()) |
2428 | RetAttrs.addAttribute(llvm::Attribute::NoAlias); |
2429 | if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && |
2430 | !CodeGenOpts.NullPointerIsValid) |
2431 | RetAttrs.addAttribute(llvm::Attribute::NonNull); |
2432 | if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) |
2433 | FuncAttrs.addAttribute(A: "no_caller_saved_registers" ); |
2434 | if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) |
2435 | FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); |
2436 | if (TargetDecl->hasAttr<LeafAttr>()) |
2437 | FuncAttrs.addAttribute(llvm::Attribute::NoCallback); |
2438 | |
2439 | HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); |
2440 | if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { |
2441 | std::optional<unsigned> NumElemsParam; |
2442 | if (AllocSize->getNumElemsParam().isValid()) |
2443 | NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); |
2444 | FuncAttrs.addAllocSizeAttr(ElemSizeArg: AllocSize->getElemSizeParam().getLLVMIndex(), |
2445 | NumElemsArg: NumElemsParam); |
2446 | } |
2447 | |
2448 | if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { |
2449 | if (getLangOpts().OpenCLVersion <= 120) { |
2450 | // OpenCL v1.2 Work groups are always uniform |
2451 | FuncAttrs.addAttribute(A: "uniform-work-group-size" , V: "true" ); |
2452 | } else { |
2453 | // OpenCL v2.0 Work groups may be whether uniform or not. |
2454 | // '-cl-uniform-work-group-size' compile option gets a hint |
2455 | // to the compiler that the global work-size be a multiple of |
2456 | // the work-group size specified to clEnqueueNDRangeKernel |
2457 | // (i.e. work groups are uniform). |
2458 | FuncAttrs.addAttribute( |
2459 | A: "uniform-work-group-size" , |
2460 | V: llvm::toStringRef(B: getLangOpts().OffloadUniformBlock)); |
2461 | } |
2462 | } |
2463 | |
2464 | if (TargetDecl->hasAttr<CUDAGlobalAttr>() && |
2465 | getLangOpts().OffloadUniformBlock) |
2466 | FuncAttrs.addAttribute(A: "uniform-work-group-size" , V: "true" ); |
2467 | |
2468 | if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>()) |
2469 | FuncAttrs.addAttribute(A: "aarch64_pstate_sm_body" ); |
2470 | } |
2471 | |
2472 | // Attach "no-builtins" attributes to: |
2473 | // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". |
2474 | // * definitions: "no-builtins" or "no-builtin-<name>" only. |
2475 | // The attributes can come from: |
2476 | // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> |
2477 | // * FunctionDecl attributes: __attribute__((no_builtin(...))) |
2478 | addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); |
2479 | |
2480 | // Collect function IR attributes based on global settiings. |
2481 | getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); |
2482 | |
2483 | // Override some default IR attributes based on declaration-specific |
2484 | // information. |
2485 | if (TargetDecl) { |
2486 | if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) |
2487 | FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); |
2488 | if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) |
2489 | FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); |
2490 | if (TargetDecl->hasAttr<NoSplitStackAttr>()) |
2491 | FuncAttrs.removeAttribute(A: "split-stack" ); |
2492 | if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) { |
2493 | // A function "__attribute__((...))" overrides the command-line flag. |
2494 | auto Kind = |
2495 | TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs(); |
2496 | FuncAttrs.removeAttribute(A: "zero-call-used-regs" ); |
2497 | FuncAttrs.addAttribute( |
2498 | "zero-call-used-regs" , |
2499 | ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind)); |
2500 | } |
2501 | |
2502 | // Add NonLazyBind attribute to function declarations when -fno-plt |
2503 | // is used. |
2504 | // FIXME: what if we just haven't processed the function definition |
2505 | // yet, or if it's an external definition like C99 inline? |
2506 | if (CodeGenOpts.NoPLT) { |
2507 | if (auto *Fn = dyn_cast<FunctionDecl>(Val: TargetDecl)) { |
2508 | if (!Fn->isDefined() && !AttrOnCallSite) { |
2509 | FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); |
2510 | } |
2511 | } |
2512 | } |
2513 | } |
2514 | |
2515 | // Add "sample-profile-suffix-elision-policy" attribute for internal linkage |
2516 | // functions with -funique-internal-linkage-names. |
2517 | if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { |
2518 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
2519 | if (!FD->isExternallyVisible()) |
2520 | FuncAttrs.addAttribute(A: "sample-profile-suffix-elision-policy" , |
2521 | V: "selected" ); |
2522 | } |
2523 | } |
2524 | |
2525 | // Collect non-call-site function IR attributes from declaration-specific |
2526 | // information. |
2527 | if (!AttrOnCallSite) { |
2528 | if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) |
2529 | FuncAttrs.addAttribute(A: "cmse_nonsecure_entry" ); |
2530 | |
2531 | // Whether tail calls are enabled. |
2532 | auto shouldDisableTailCalls = [&] { |
2533 | // Should this be honored in getDefaultFunctionAttributes? |
2534 | if (CodeGenOpts.DisableTailCalls) |
2535 | return true; |
2536 | |
2537 | if (!TargetDecl) |
2538 | return false; |
2539 | |
2540 | if (TargetDecl->hasAttr<DisableTailCallsAttr>() || |
2541 | TargetDecl->hasAttr<AnyX86InterruptAttr>()) |
2542 | return true; |
2543 | |
2544 | if (CodeGenOpts.NoEscapingBlockTailCalls) { |
2545 | if (const auto *BD = dyn_cast<BlockDecl>(Val: TargetDecl)) |
2546 | if (!BD->doesNotEscape()) |
2547 | return true; |
2548 | } |
2549 | |
2550 | return false; |
2551 | }; |
2552 | if (shouldDisableTailCalls()) |
2553 | FuncAttrs.addAttribute(A: "disable-tail-calls" , V: "true" ); |
2554 | |
2555 | // CPU/feature overrides. addDefaultFunctionDefinitionAttributes |
2556 | // handles these separately to set them based on the global defaults. |
2557 | GetCPUAndFeaturesAttributes(GD: CalleeInfo.getCalleeDecl(), AttrBuilder&: FuncAttrs); |
2558 | } |
2559 | |
2560 | // Collect attributes from arguments and return values. |
2561 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); |
2562 | |
2563 | QualType RetTy = FI.getReturnType(); |
2564 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
2565 | const llvm::DataLayout &DL = getDataLayout(); |
2566 | |
2567 | // Determine if the return type could be partially undef |
2568 | if (CodeGenOpts.EnableNoundefAttrs && |
2569 | HasStrictReturn(Module: *this, RetTy, TargetDecl)) { |
2570 | if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && |
2571 | DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) |
2572 | RetAttrs.addAttribute(llvm::Attribute::NoUndef); |
2573 | } |
2574 | |
2575 | switch (RetAI.getKind()) { |
2576 | case ABIArgInfo::Extend: |
2577 | if (RetAI.isSignExt()) |
2578 | RetAttrs.addAttribute(llvm::Attribute::SExt); |
2579 | else |
2580 | RetAttrs.addAttribute(llvm::Attribute::ZExt); |
2581 | [[fallthrough]]; |
2582 | case ABIArgInfo::Direct: |
2583 | if (RetAI.getInReg()) |
2584 | RetAttrs.addAttribute(llvm::Attribute::InReg); |
2585 | |
2586 | if (canApplyNoFPClass(AI: RetAI, ParamType: RetTy, IsReturn: true)) |
2587 | RetAttrs.addNoFPClassAttr(NoFPClassMask: getNoFPClassTestMask(LangOpts: getLangOpts())); |
2588 | |
2589 | break; |
2590 | case ABIArgInfo::Ignore: |
2591 | break; |
2592 | |
2593 | case ABIArgInfo::InAlloca: |
2594 | case ABIArgInfo::Indirect: { |
2595 | // inalloca and sret disable readnone and readonly |
2596 | AddPotentialArgAccess(); |
2597 | break; |
2598 | } |
2599 | |
2600 | case ABIArgInfo::CoerceAndExpand: |
2601 | break; |
2602 | |
2603 | case ABIArgInfo::Expand: |
2604 | case ABIArgInfo::IndirectAliased: |
2605 | llvm_unreachable("Invalid ABI kind for return argument" ); |
2606 | } |
2607 | |
2608 | if (!IsThunk) { |
2609 | // FIXME: fix this properly, https://reviews.llvm.org/D100388 |
2610 | if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { |
2611 | QualType PTy = RefTy->getPointeeType(); |
2612 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) |
2613 | RetAttrs.addDereferenceableAttr( |
2614 | Bytes: getMinimumObjectSize(Ty: PTy).getQuantity()); |
2615 | if (getTypes().getTargetAddressSpace(PTy) == 0 && |
2616 | !CodeGenOpts.NullPointerIsValid) |
2617 | RetAttrs.addAttribute(llvm::Attribute::NonNull); |
2618 | if (PTy->isObjectType()) { |
2619 | llvm::Align Alignment = |
2620 | getNaturalPointeeTypeAlignment(T: RetTy).getAsAlign(); |
2621 | RetAttrs.addAlignmentAttr(Align: Alignment); |
2622 | } |
2623 | } |
2624 | } |
2625 | |
2626 | bool hasUsedSRet = false; |
2627 | SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); |
2628 | |
2629 | // Attach attributes to sret. |
2630 | if (IRFunctionArgs.hasSRetArg()) { |
2631 | llvm::AttrBuilder SRETAttrs(getLLVMContext()); |
2632 | SRETAttrs.addStructRetAttr(Ty: getTypes().ConvertTypeForMem(T: RetTy)); |
2633 | SRETAttrs.addAttribute(llvm::Attribute::Writable); |
2634 | SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind); |
2635 | hasUsedSRet = true; |
2636 | if (RetAI.getInReg()) |
2637 | SRETAttrs.addAttribute(llvm::Attribute::InReg); |
2638 | SRETAttrs.addAlignmentAttr(Align: RetAI.getIndirectAlign().getQuantity()); |
2639 | ArgAttrs[IRFunctionArgs.getSRetArgNo()] = |
2640 | llvm::AttributeSet::get(C&: getLLVMContext(), B: SRETAttrs); |
2641 | } |
2642 | |
2643 | // Attach attributes to inalloca argument. |
2644 | if (IRFunctionArgs.hasInallocaArg()) { |
2645 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2646 | Attrs.addInAllocaAttr(Ty: FI.getArgStruct()); |
2647 | ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = |
2648 | llvm::AttributeSet::get(C&: getLLVMContext(), B: Attrs); |
2649 | } |
2650 | |
2651 | // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, |
2652 | // unless this is a thunk function. |
2653 | // FIXME: fix this properly, https://reviews.llvm.org/D100388 |
2654 | if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && |
2655 | !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { |
2656 | auto IRArgs = IRFunctionArgs.getIRArgs(ArgNo: 0); |
2657 | |
2658 | assert(IRArgs.second == 1 && "Expected only a single `this` pointer." ); |
2659 | |
2660 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2661 | |
2662 | QualType ThisTy = |
2663 | FI.arg_begin()->type.getTypePtr()->getPointeeType(); |
2664 | |
2665 | if (!CodeGenOpts.NullPointerIsValid && |
2666 | getTypes().getTargetAddressSpace(T: FI.arg_begin()->type) == 0) { |
2667 | Attrs.addAttribute(llvm::Attribute::NonNull); |
2668 | Attrs.addDereferenceableAttr(Bytes: getMinimumObjectSize(Ty: ThisTy).getQuantity()); |
2669 | } else { |
2670 | // FIXME dereferenceable should be correct here, regardless of |
2671 | // NullPointerIsValid. However, dereferenceable currently does not always |
2672 | // respect NullPointerIsValid and may imply nonnull and break the program. |
2673 | // See https://reviews.llvm.org/D66618 for discussions. |
2674 | Attrs.addDereferenceableOrNullAttr( |
2675 | Bytes: getMinimumObjectSize( |
2676 | Ty: FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) |
2677 | .getQuantity()); |
2678 | } |
2679 | |
2680 | llvm::Align Alignment = |
2681 | getNaturalTypeAlignment(T: ThisTy, /*BaseInfo=*/nullptr, |
2682 | /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) |
2683 | .getAsAlign(); |
2684 | Attrs.addAlignmentAttr(Align: Alignment); |
2685 | |
2686 | ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(C&: getLLVMContext(), B: Attrs); |
2687 | } |
2688 | |
2689 | unsigned ArgNo = 0; |
2690 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), |
2691 | E = FI.arg_end(); |
2692 | I != E; ++I, ++ArgNo) { |
2693 | QualType ParamType = I->type; |
2694 | const ABIArgInfo &AI = I->info; |
2695 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2696 | |
2697 | // Add attribute for padding argument, if necessary. |
2698 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) { |
2699 | if (AI.getPaddingInReg()) { |
2700 | ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
2701 | llvm::AttributeSet::get( |
2702 | getLLVMContext(), |
2703 | llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg)); |
2704 | } |
2705 | } |
2706 | |
2707 | // Decide whether the argument we're handling could be partially undef |
2708 | if (CodeGenOpts.EnableNoundefAttrs && |
2709 | DetermineNoUndef(QTy: ParamType, Types&: getTypes(), DL, AI)) { |
2710 | Attrs.addAttribute(llvm::Attribute::NoUndef); |
2711 | } |
2712 | |
2713 | // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we |
2714 | // have the corresponding parameter variable. It doesn't make |
2715 | // sense to do it here because parameters are so messed up. |
2716 | switch (AI.getKind()) { |
2717 | case ABIArgInfo::Extend: |
2718 | if (AI.isSignExt()) |
2719 | Attrs.addAttribute(llvm::Attribute::SExt); |
2720 | else |
2721 | Attrs.addAttribute(llvm::Attribute::ZExt); |
2722 | [[fallthrough]]; |
2723 | case ABIArgInfo::Direct: |
2724 | if (ArgNo == 0 && FI.isChainCall()) |
2725 | Attrs.addAttribute(llvm::Attribute::Nest); |
2726 | else if (AI.getInReg()) |
2727 | Attrs.addAttribute(llvm::Attribute::InReg); |
2728 | Attrs.addStackAlignmentAttr(Align: llvm::MaybeAlign(AI.getDirectAlign())); |
2729 | |
2730 | if (canApplyNoFPClass(AI, ParamType, IsReturn: false)) |
2731 | Attrs.addNoFPClassAttr(NoFPClassMask: getNoFPClassTestMask(LangOpts: getLangOpts())); |
2732 | break; |
2733 | case ABIArgInfo::Indirect: { |
2734 | if (AI.getInReg()) |
2735 | Attrs.addAttribute(llvm::Attribute::InReg); |
2736 | |
2737 | if (AI.getIndirectByVal()) |
2738 | Attrs.addByValAttr(Ty: getTypes().ConvertTypeForMem(T: ParamType)); |
2739 | |
2740 | auto *Decl = ParamType->getAsRecordDecl(); |
2741 | if (CodeGenOpts.PassByValueIsNoAlias && Decl && |
2742 | Decl->getArgPassingRestrictions() == |
2743 | RecordArgPassingKind::CanPassInRegs) |
2744 | // When calling the function, the pointer passed in will be the only |
2745 | // reference to the underlying object. Mark it accordingly. |
2746 | Attrs.addAttribute(llvm::Attribute::NoAlias); |
2747 | |
2748 | // TODO: We could add the byref attribute if not byval, but it would |
2749 | // require updating many testcases. |
2750 | |
2751 | CharUnits Align = AI.getIndirectAlign(); |
2752 | |
2753 | // In a byval argument, it is important that the required |
2754 | // alignment of the type is honored, as LLVM might be creating a |
2755 | // *new* stack object, and needs to know what alignment to give |
2756 | // it. (Sometimes it can deduce a sensible alignment on its own, |
2757 | // but not if clang decides it must emit a packed struct, or the |
2758 | // user specifies increased alignment requirements.) |
2759 | // |
2760 | // This is different from indirect *not* byval, where the object |
2761 | // exists already, and the align attribute is purely |
2762 | // informative. |
2763 | assert(!Align.isZero()); |
2764 | |
2765 | // For now, only add this when we have a byval argument. |
2766 | // TODO: be less lazy about updating test cases. |
2767 | if (AI.getIndirectByVal()) |
2768 | Attrs.addAlignmentAttr(Align: Align.getQuantity()); |
2769 | |
2770 | // byval disables readnone and readonly. |
2771 | AddPotentialArgAccess(); |
2772 | break; |
2773 | } |
2774 | case ABIArgInfo::IndirectAliased: { |
2775 | CharUnits Align = AI.getIndirectAlign(); |
2776 | Attrs.addByRefAttr(Ty: getTypes().ConvertTypeForMem(T: ParamType)); |
2777 | Attrs.addAlignmentAttr(Align: Align.getQuantity()); |
2778 | break; |
2779 | } |
2780 | case ABIArgInfo::Ignore: |
2781 | case ABIArgInfo::Expand: |
2782 | case ABIArgInfo::CoerceAndExpand: |
2783 | break; |
2784 | |
2785 | case ABIArgInfo::InAlloca: |
2786 | // inalloca disables readnone and readonly. |
2787 | AddPotentialArgAccess(); |
2788 | continue; |
2789 | } |
2790 | |
2791 | if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { |
2792 | QualType PTy = RefTy->getPointeeType(); |
2793 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) |
2794 | Attrs.addDereferenceableAttr( |
2795 | Bytes: getMinimumObjectSize(Ty: PTy).getQuantity()); |
2796 | if (getTypes().getTargetAddressSpace(PTy) == 0 && |
2797 | !CodeGenOpts.NullPointerIsValid) |
2798 | Attrs.addAttribute(llvm::Attribute::NonNull); |
2799 | if (PTy->isObjectType()) { |
2800 | llvm::Align Alignment = |
2801 | getNaturalPointeeTypeAlignment(T: ParamType).getAsAlign(); |
2802 | Attrs.addAlignmentAttr(Align: Alignment); |
2803 | } |
2804 | } |
2805 | |
2806 | // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types: |
2807 | // > For arguments to a __kernel function declared to be a pointer to a |
2808 | // > data type, the OpenCL compiler can assume that the pointee is always |
2809 | // > appropriately aligned as required by the data type. |
2810 | if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() && |
2811 | ParamType->isPointerType()) { |
2812 | QualType PTy = ParamType->getPointeeType(); |
2813 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { |
2814 | llvm::Align Alignment = |
2815 | getNaturalPointeeTypeAlignment(T: ParamType).getAsAlign(); |
2816 | Attrs.addAlignmentAttr(Align: Alignment); |
2817 | } |
2818 | } |
2819 | |
2820 | switch (FI.getExtParameterInfo(argIndex: ArgNo).getABI()) { |
2821 | case ParameterABI::Ordinary: |
2822 | break; |
2823 | |
2824 | case ParameterABI::SwiftIndirectResult: { |
2825 | // Add 'sret' if we haven't already used it for something, but |
2826 | // only if the result is void. |
2827 | if (!hasUsedSRet && RetTy->isVoidType()) { |
2828 | Attrs.addStructRetAttr(Ty: getTypes().ConvertTypeForMem(T: ParamType)); |
2829 | hasUsedSRet = true; |
2830 | } |
2831 | |
2832 | // Add 'noalias' in either case. |
2833 | Attrs.addAttribute(llvm::Attribute::NoAlias); |
2834 | |
2835 | // Add 'dereferenceable' and 'alignment'. |
2836 | auto PTy = ParamType->getPointeeType(); |
2837 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { |
2838 | auto info = getContext().getTypeInfoInChars(PTy); |
2839 | Attrs.addDereferenceableAttr(Bytes: info.Width.getQuantity()); |
2840 | Attrs.addAlignmentAttr(info.Align.getAsAlign()); |
2841 | } |
2842 | break; |
2843 | } |
2844 | |
2845 | case ParameterABI::SwiftErrorResult: |
2846 | Attrs.addAttribute(llvm::Attribute::SwiftError); |
2847 | break; |
2848 | |
2849 | case ParameterABI::SwiftContext: |
2850 | Attrs.addAttribute(llvm::Attribute::SwiftSelf); |
2851 | break; |
2852 | |
2853 | case ParameterABI::SwiftAsyncContext: |
2854 | Attrs.addAttribute(llvm::Attribute::SwiftAsync); |
2855 | break; |
2856 | } |
2857 | |
2858 | if (FI.getExtParameterInfo(ArgNo).isNoEscape()) |
2859 | Attrs.addAttribute(llvm::Attribute::NoCapture); |
2860 | |
2861 | if (Attrs.hasAttributes()) { |
2862 | unsigned FirstIRArg, NumIRArgs; |
2863 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
2864 | for (unsigned i = 0; i < NumIRArgs; i++) |
2865 | ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes( |
2866 | C&: getLLVMContext(), AS: llvm::AttributeSet::get(C&: getLLVMContext(), B: Attrs)); |
2867 | } |
2868 | } |
2869 | assert(ArgNo == FI.arg_size()); |
2870 | |
2871 | AttrList = llvm::AttributeList::get( |
2872 | C&: getLLVMContext(), FnAttrs: llvm::AttributeSet::get(C&: getLLVMContext(), B: FuncAttrs), |
2873 | RetAttrs: llvm::AttributeSet::get(C&: getLLVMContext(), B: RetAttrs), ArgAttrs); |
2874 | } |
2875 | |
2876 | /// An argument came in as a promoted argument; demote it back to its |
2877 | /// declared type. |
2878 | static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, |
2879 | const VarDecl *var, |
2880 | llvm::Value *value) { |
2881 | llvm::Type *varType = CGF.ConvertType(var->getType()); |
2882 | |
2883 | // This can happen with promotions that actually don't change the |
2884 | // underlying type, like the enum promotions. |
2885 | if (value->getType() == varType) return value; |
2886 | |
2887 | assert((varType->isIntegerTy() || varType->isFloatingPointTy()) |
2888 | && "unexpected promotion type" ); |
2889 | |
2890 | if (isa<llvm::IntegerType>(Val: varType)) |
2891 | return CGF.Builder.CreateTrunc(V: value, DestTy: varType, Name: "arg.unpromote" ); |
2892 | |
2893 | return CGF.Builder.CreateFPCast(V: value, DestTy: varType, Name: "arg.unpromote" ); |
2894 | } |
2895 | |
2896 | /// Returns the attribute (either parameter attribute, or function |
2897 | /// attribute), which declares argument ArgNo to be non-null. |
2898 | static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, |
2899 | QualType ArgType, unsigned ArgNo) { |
2900 | // FIXME: __attribute__((nonnull)) can also be applied to: |
2901 | // - references to pointers, where the pointee is known to be |
2902 | // nonnull (apparently a Clang extension) |
2903 | // - transparent unions containing pointers |
2904 | // In the former case, LLVM IR cannot represent the constraint. In |
2905 | // the latter case, we have no guarantee that the transparent union |
2906 | // is in fact passed as a pointer. |
2907 | if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) |
2908 | return nullptr; |
2909 | // First, check attribute on parameter itself. |
2910 | if (PVD) { |
2911 | if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) |
2912 | return ParmNNAttr; |
2913 | } |
2914 | // Check function attributes. |
2915 | if (!FD) |
2916 | return nullptr; |
2917 | for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { |
2918 | if (NNAttr->isNonNull(ArgNo)) |
2919 | return NNAttr; |
2920 | } |
2921 | return nullptr; |
2922 | } |
2923 | |
2924 | namespace { |
2925 | struct CopyBackSwiftError final : EHScopeStack::Cleanup { |
2926 | Address Temp; |
2927 | Address Arg; |
2928 | CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} |
2929 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2930 | llvm::Value *errorValue = CGF.Builder.CreateLoad(Addr: Temp); |
2931 | CGF.Builder.CreateStore(Val: errorValue, Addr: Arg); |
2932 | } |
2933 | }; |
2934 | } |
2935 | |
2936 | void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, |
2937 | llvm::Function *Fn, |
2938 | const FunctionArgList &Args) { |
2939 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) |
2940 | // Naked functions don't have prologues. |
2941 | return; |
2942 | |
2943 | // If this is an implicit-return-zero function, go ahead and |
2944 | // initialize the return value. TODO: it might be nice to have |
2945 | // a more general mechanism for this that didn't require synthesized |
2946 | // return statements. |
2947 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl)) { |
2948 | if (FD->hasImplicitReturnZero()) { |
2949 | QualType RetTy = FD->getReturnType().getUnqualifiedType(); |
2950 | llvm::Type* LLVMTy = CGM.getTypes().ConvertType(T: RetTy); |
2951 | llvm::Constant* Zero = llvm::Constant::getNullValue(Ty: LLVMTy); |
2952 | Builder.CreateStore(Val: Zero, Addr: ReturnValue); |
2953 | } |
2954 | } |
2955 | |
2956 | // FIXME: We no longer need the types from FunctionArgList; lift up and |
2957 | // simplify. |
2958 | |
2959 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); |
2960 | assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); |
2961 | |
2962 | // If we're using inalloca, all the memory arguments are GEPs off of the last |
2963 | // parameter, which is a pointer to the complete memory area. |
2964 | Address ArgStruct = Address::invalid(); |
2965 | if (IRFunctionArgs.hasInallocaArg()) |
2966 | ArgStruct = Address(Fn->getArg(i: IRFunctionArgs.getInallocaArgNo()), |
2967 | FI.getArgStruct(), FI.getArgStructAlignment()); |
2968 | |
2969 | // Name the struct return parameter. |
2970 | if (IRFunctionArgs.hasSRetArg()) { |
2971 | auto AI = Fn->getArg(i: IRFunctionArgs.getSRetArgNo()); |
2972 | AI->setName("agg.result" ); |
2973 | AI->addAttr(llvm::Attribute::NoAlias); |
2974 | } |
2975 | |
2976 | // Track if we received the parameter as a pointer (indirect, byval, or |
2977 | // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it |
2978 | // into a local alloca for us. |
2979 | SmallVector<ParamValue, 16> ArgVals; |
2980 | ArgVals.reserve(N: Args.size()); |
2981 | |
2982 | // Create a pointer value for every parameter declaration. This usually |
2983 | // entails copying one or more LLVM IR arguments into an alloca. Don't push |
2984 | // any cleanups or do anything that might unwind. We do that separately, so |
2985 | // we can push the cleanups in the correct order for the ABI. |
2986 | assert(FI.arg_size() == Args.size() && |
2987 | "Mismatch between function signature & arguments." ); |
2988 | unsigned ArgNo = 0; |
2989 | CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); |
2990 | for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); |
2991 | i != e; ++i, ++info_it, ++ArgNo) { |
2992 | const VarDecl *Arg = *i; |
2993 | const ABIArgInfo &ArgI = info_it->info; |
2994 | |
2995 | bool isPromoted = |
2996 | isa<ParmVarDecl>(Val: Arg) && cast<ParmVarDecl>(Val: Arg)->isKNRPromoted(); |
2997 | // We are converting from ABIArgInfo type to VarDecl type directly, unless |
2998 | // the parameter is promoted. In this case we convert to |
2999 | // CGFunctionInfo::ArgInfo type with subsequent argument demotion. |
3000 | QualType Ty = isPromoted ? info_it->type : Arg->getType(); |
3001 | assert(hasScalarEvaluationKind(Ty) == |
3002 | hasScalarEvaluationKind(Arg->getType())); |
3003 | |
3004 | unsigned FirstIRArg, NumIRArgs; |
3005 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
3006 | |
3007 | switch (ArgI.getKind()) { |
3008 | case ABIArgInfo::InAlloca: { |
3009 | assert(NumIRArgs == 0); |
3010 | auto FieldIndex = ArgI.getInAllocaFieldIndex(); |
3011 | Address V = |
3012 | Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); |
3013 | if (ArgI.getInAllocaIndirect()) |
3014 | V = Address(Builder.CreateLoad(Addr: V), ConvertTypeForMem(T: Ty), |
3015 | getContext().getTypeAlignInChars(T: Ty)); |
3016 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: V)); |
3017 | break; |
3018 | } |
3019 | |
3020 | case ABIArgInfo::Indirect: |
3021 | case ABIArgInfo::IndirectAliased: { |
3022 | assert(NumIRArgs == 1); |
3023 | Address ParamAddr = makeNaturalAddressForPointer( |
3024 | Ptr: Fn->getArg(i: FirstIRArg), T: Ty, Alignment: ArgI.getIndirectAlign(), ForPointeeType: false, BaseInfo: nullptr, |
3025 | TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull); |
3026 | |
3027 | if (!hasScalarEvaluationKind(T: Ty)) { |
3028 | // Aggregates and complex variables are accessed by reference. All we |
3029 | // need to do is realign the value, if requested. Also, if the address |
3030 | // may be aliased, copy it to ensure that the parameter variable is |
3031 | // mutable and has a unique adress, as C requires. |
3032 | if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { |
3033 | RawAddress AlignedTemp = CreateMemTemp(T: Ty, Name: "coerce" ); |
3034 | |
3035 | // Copy from the incoming argument pointer to the temporary with the |
3036 | // appropriate alignment. |
3037 | // |
3038 | // FIXME: We should have a common utility for generating an aggregate |
3039 | // copy. |
3040 | CharUnits Size = getContext().getTypeSizeInChars(T: Ty); |
3041 | Builder.CreateMemCpy( |
3042 | Dst: AlignedTemp.getPointer(), DstAlign: AlignedTemp.getAlignment().getAsAlign(), |
3043 | Src: ParamAddr.emitRawPointer(CGF&: *this), |
3044 | SrcAlign: ParamAddr.getAlignment().getAsAlign(), |
3045 | Size: llvm::ConstantInt::get(Ty: IntPtrTy, V: Size.getQuantity())); |
3046 | ParamAddr = AlignedTemp; |
3047 | } |
3048 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: ParamAddr)); |
3049 | } else { |
3050 | // Load scalar value from indirect argument. |
3051 | llvm::Value *V = |
3052 | EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); |
3053 | |
3054 | if (isPromoted) |
3055 | V = emitArgumentDemotion(CGF&: *this, var: Arg, value: V); |
3056 | ArgVals.push_back(Elt: ParamValue::forDirect(value: V)); |
3057 | } |
3058 | break; |
3059 | } |
3060 | |
3061 | case ABIArgInfo::Extend: |
3062 | case ABIArgInfo::Direct: { |
3063 | auto AI = Fn->getArg(i: FirstIRArg); |
3064 | llvm::Type *LTy = ConvertType(Arg->getType()); |
3065 | |
3066 | // Prepare parameter attributes. So far, only attributes for pointer |
3067 | // parameters are prepared. See |
3068 | // http://llvm.org/docs/LangRef.html#paramattrs. |
3069 | if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && |
3070 | ArgI.getCoerceToType()->isPointerTy()) { |
3071 | assert(NumIRArgs == 1); |
3072 | |
3073 | if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Val: Arg)) { |
3074 | // Set `nonnull` attribute if any. |
3075 | if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), |
3076 | PVD->getFunctionScopeIndex()) && |
3077 | !CGM.getCodeGenOpts().NullPointerIsValid) |
3078 | AI->addAttr(llvm::Attribute::NonNull); |
3079 | |
3080 | QualType OTy = PVD->getOriginalType(); |
3081 | if (const auto *ArrTy = |
3082 | getContext().getAsConstantArrayType(T: OTy)) { |
3083 | // A C99 array parameter declaration with the static keyword also |
3084 | // indicates dereferenceability, and if the size is constant we can |
3085 | // use the dereferenceable attribute (which requires the size in |
3086 | // bytes). |
3087 | if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { |
3088 | QualType ETy = ArrTy->getElementType(); |
3089 | llvm::Align Alignment = |
3090 | CGM.getNaturalTypeAlignment(T: ETy).getAsAlign(); |
3091 | AI->addAttrs(B&: llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Align: Alignment)); |
3092 | uint64_t ArrSize = ArrTy->getZExtSize(); |
3093 | if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && |
3094 | ArrSize) { |
3095 | llvm::AttrBuilder Attrs(getLLVMContext()); |
3096 | Attrs.addDereferenceableAttr( |
3097 | Bytes: getContext().getTypeSizeInChars(T: ETy).getQuantity() * |
3098 | ArrSize); |
3099 | AI->addAttrs(B&: Attrs); |
3100 | } else if (getContext().getTargetInfo().getNullPointerValue( |
3101 | AddrSpace: ETy.getAddressSpace()) == 0 && |
3102 | !CGM.getCodeGenOpts().NullPointerIsValid) { |
3103 | AI->addAttr(llvm::Attribute::NonNull); |
3104 | } |
3105 | } |
3106 | } else if (const auto *ArrTy = |
3107 | getContext().getAsVariableArrayType(T: OTy)) { |
3108 | // For C99 VLAs with the static keyword, we don't know the size so |
3109 | // we can't use the dereferenceable attribute, but in addrspace(0) |
3110 | // we know that it must be nonnull. |
3111 | if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { |
3112 | QualType ETy = ArrTy->getElementType(); |
3113 | llvm::Align Alignment = |
3114 | CGM.getNaturalTypeAlignment(T: ETy).getAsAlign(); |
3115 | AI->addAttrs(B&: llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Align: Alignment)); |
3116 | if (!getTypes().getTargetAddressSpace(ETy) && |
3117 | !CGM.getCodeGenOpts().NullPointerIsValid) |
3118 | AI->addAttr(llvm::Attribute::NonNull); |
3119 | } |
3120 | } |
3121 | |
3122 | // Set `align` attribute if any. |
3123 | const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); |
3124 | if (!AVAttr) |
3125 | if (const auto *TOTy = OTy->getAs<TypedefType>()) |
3126 | AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); |
3127 | if (AVAttr && !SanOpts.has(K: SanitizerKind::Alignment)) { |
3128 | // If alignment-assumption sanitizer is enabled, we do *not* add |
3129 | // alignment attribute here, but emit normal alignment assumption, |
3130 | // so the UBSAN check could function. |
3131 | llvm::ConstantInt *AlignmentCI = |
3132 | cast<llvm::ConstantInt>(EmitScalarExpr(E: AVAttr->getAlignment())); |
3133 | uint64_t AlignmentInt = |
3134 | AlignmentCI->getLimitedValue(Limit: llvm::Value::MaximumAlignment); |
3135 | if (AI->getParamAlign().valueOrOne() < AlignmentInt) { |
3136 | AI->removeAttr(llvm::Attribute::AttrKind::Alignment); |
3137 | AI->addAttrs(B&: llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr( |
3138 | Align: llvm::Align(AlignmentInt))); |
3139 | } |
3140 | } |
3141 | } |
3142 | |
3143 | // Set 'noalias' if an argument type has the `restrict` qualifier. |
3144 | if (Arg->getType().isRestrictQualified()) |
3145 | AI->addAttr(llvm::Attribute::NoAlias); |
3146 | } |
3147 | |
3148 | // Prepare the argument value. If we have the trivial case, handle it |
3149 | // with no muss and fuss. |
3150 | if (!isa<llvm::StructType>(Val: ArgI.getCoerceToType()) && |
3151 | ArgI.getCoerceToType() == ConvertType(T: Ty) && |
3152 | ArgI.getDirectOffset() == 0) { |
3153 | assert(NumIRArgs == 1); |
3154 | |
3155 | // LLVM expects swifterror parameters to be used in very restricted |
3156 | // ways. Copy the value into a less-restricted temporary. |
3157 | llvm::Value *V = AI; |
3158 | if (FI.getExtParameterInfo(argIndex: ArgNo).getABI() |
3159 | == ParameterABI::SwiftErrorResult) { |
3160 | QualType pointeeTy = Ty->getPointeeType(); |
3161 | assert(pointeeTy->isPointerType()); |
3162 | RawAddress temp = |
3163 | CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp" ); |
3164 | Address arg = makeNaturalAddressForPointer( |
3165 | Ptr: V, T: pointeeTy, Alignment: getContext().getTypeAlignInChars(T: pointeeTy)); |
3166 | llvm::Value *incomingErrorValue = Builder.CreateLoad(Addr: arg); |
3167 | Builder.CreateStore(Val: incomingErrorValue, Addr: temp); |
3168 | V = temp.getPointer(); |
3169 | |
3170 | // Push a cleanup to copy the value back at the end of the function. |
3171 | // The convention does not guarantee that the value will be written |
3172 | // back if the function exits with an unwind exception. |
3173 | EHStack.pushCleanup<CopyBackSwiftError>(Kind: NormalCleanup, A: temp, A: arg); |
3174 | } |
3175 | |
3176 | // Ensure the argument is the correct type. |
3177 | if (V->getType() != ArgI.getCoerceToType()) |
3178 | V = Builder.CreateBitCast(V, DestTy: ArgI.getCoerceToType()); |
3179 | |
3180 | if (isPromoted) |
3181 | V = emitArgumentDemotion(CGF&: *this, var: Arg, value: V); |
3182 | |
3183 | // Because of merging of function types from multiple decls it is |
3184 | // possible for the type of an argument to not match the corresponding |
3185 | // type in the function type. Since we are codegening the callee |
3186 | // in here, add a cast to the argument type. |
3187 | llvm::Type *LTy = ConvertType(Arg->getType()); |
3188 | if (V->getType() != LTy) |
3189 | V = Builder.CreateBitCast(V, DestTy: LTy); |
3190 | |
3191 | ArgVals.push_back(Elt: ParamValue::forDirect(value: V)); |
3192 | break; |
3193 | } |
3194 | |
3195 | // VLST arguments are coerced to VLATs at the function boundary for |
3196 | // ABI consistency. If this is a VLST that was coerced to |
3197 | // a VLAT at the function boundary and the types match up, use |
3198 | // llvm.vector.extract to convert back to the original VLST. |
3199 | if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { |
3200 | llvm::Value *Coerced = Fn->getArg(i: FirstIRArg); |
3201 | if (auto *VecTyFrom = |
3202 | dyn_cast<llvm::ScalableVectorType>(Val: Coerced->getType())) { |
3203 | // If we are casting a scalable i1 predicate vector to a fixed i8 |
3204 | // vector, bitcast the source and use a vector extract. |
3205 | if (VecTyFrom->getElementType()->isIntegerTy(Bitwidth: 1) && |
3206 | VecTyFrom->getElementCount().isKnownMultipleOf(RHS: 8) && |
3207 | VecTyTo->getElementType() == Builder.getInt8Ty()) { |
3208 | VecTyFrom = llvm::ScalableVectorType::get( |
3209 | VecTyTo->getElementType(), |
3210 | VecTyFrom->getElementCount().getKnownMinValue() / 8); |
3211 | Coerced = Builder.CreateBitCast(V: Coerced, DestTy: VecTyFrom); |
3212 | } |
3213 | if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { |
3214 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
3215 | |
3216 | assert(NumIRArgs == 1); |
3217 | Coerced->setName(Arg->getName() + ".coerce" ); |
3218 | ArgVals.push_back(Elt: ParamValue::forDirect(value: Builder.CreateExtractVector( |
3219 | DstType: VecTyTo, SrcVec: Coerced, Idx: Zero, Name: "cast.fixed" ))); |
3220 | break; |
3221 | } |
3222 | } |
3223 | } |
3224 | |
3225 | llvm::StructType *STy = |
3226 | dyn_cast<llvm::StructType>(Val: ArgI.getCoerceToType()); |
3227 | if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy && |
3228 | STy->getNumElements() > 1) { |
3229 | [[maybe_unused]] llvm::TypeSize StructSize = |
3230 | CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
3231 | [[maybe_unused]] llvm::TypeSize PtrElementSize = |
3232 | CGM.getDataLayout().getTypeAllocSize(Ty: ConvertTypeForMem(T: Ty)); |
3233 | if (STy->containsHomogeneousScalableVectorTypes()) { |
3234 | assert(StructSize == PtrElementSize && |
3235 | "Only allow non-fractional movement of structure with" |
3236 | "homogeneous scalable vector type" ); |
3237 | |
3238 | ArgVals.push_back(Elt: ParamValue::forDirect(value: AI)); |
3239 | break; |
3240 | } |
3241 | } |
3242 | |
3243 | Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), |
3244 | Arg->getName()); |
3245 | |
3246 | // Pointer to store into. |
3247 | Address Ptr = emitAddressAtOffset(CGF&: *this, addr: Alloca, info: ArgI); |
3248 | |
3249 | // Fast-isel and the optimizer generally like scalar values better than |
3250 | // FCAs, so we flatten them if this is safe to do for this argument. |
3251 | if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && |
3252 | STy->getNumElements() > 1) { |
3253 | llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
3254 | llvm::TypeSize PtrElementSize = |
3255 | CGM.getDataLayout().getTypeAllocSize(Ty: Ptr.getElementType()); |
3256 | if (StructSize.isScalable()) { |
3257 | assert(STy->containsHomogeneousScalableVectorTypes() && |
3258 | "ABI only supports structure with homogeneous scalable vector " |
3259 | "type" ); |
3260 | assert(StructSize == PtrElementSize && |
3261 | "Only allow non-fractional movement of structure with" |
3262 | "homogeneous scalable vector type" ); |
3263 | assert(STy->getNumElements() == NumIRArgs); |
3264 | |
3265 | llvm::Value *LoadedStructValue = llvm::PoisonValue::get(T: STy); |
3266 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
3267 | auto *AI = Fn->getArg(i: FirstIRArg + i); |
3268 | AI->setName(Arg->getName() + ".coerce" + Twine(i)); |
3269 | LoadedStructValue = |
3270 | Builder.CreateInsertValue(Agg: LoadedStructValue, Val: AI, Idxs: i); |
3271 | } |
3272 | |
3273 | Builder.CreateStore(Val: LoadedStructValue, Addr: Ptr); |
3274 | } else { |
3275 | uint64_t SrcSize = StructSize.getFixedValue(); |
3276 | uint64_t DstSize = PtrElementSize.getFixedValue(); |
3277 | |
3278 | Address AddrToStoreInto = Address::invalid(); |
3279 | if (SrcSize <= DstSize) { |
3280 | AddrToStoreInto = Ptr.withElementType(ElemTy: STy); |
3281 | } else { |
3282 | AddrToStoreInto = |
3283 | CreateTempAlloca(Ty: STy, align: Alloca.getAlignment(), Name: "coerce" ); |
3284 | } |
3285 | |
3286 | assert(STy->getNumElements() == NumIRArgs); |
3287 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
3288 | auto AI = Fn->getArg(i: FirstIRArg + i); |
3289 | AI->setName(Arg->getName() + ".coerce" + Twine(i)); |
3290 | Address EltPtr = Builder.CreateStructGEP(Addr: AddrToStoreInto, Index: i); |
3291 | Builder.CreateStore(Val: AI, Addr: EltPtr); |
3292 | } |
3293 | |
3294 | if (SrcSize > DstSize) { |
3295 | Builder.CreateMemCpy(Dest: Ptr, Src: AddrToStoreInto, Size: DstSize); |
3296 | } |
3297 | } |
3298 | } else { |
3299 | // Simple case, just do a coerced store of the argument into the alloca. |
3300 | assert(NumIRArgs == 1); |
3301 | auto AI = Fn->getArg(i: FirstIRArg); |
3302 | AI->setName(Arg->getName() + ".coerce" ); |
3303 | CreateCoercedStore(Src: AI, Dst: Ptr, /*DstIsVolatile=*/false, CGF&: *this); |
3304 | } |
3305 | |
3306 | // Match to what EmitParmDecl is expecting for this type. |
3307 | if (CodeGenFunction::hasScalarEvaluationKind(T: Ty)) { |
3308 | llvm::Value *V = |
3309 | EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); |
3310 | if (isPromoted) |
3311 | V = emitArgumentDemotion(CGF&: *this, var: Arg, value: V); |
3312 | ArgVals.push_back(Elt: ParamValue::forDirect(value: V)); |
3313 | } else { |
3314 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: Alloca)); |
3315 | } |
3316 | break; |
3317 | } |
3318 | |
3319 | case ABIArgInfo::CoerceAndExpand: { |
3320 | // Reconstruct into a temporary. |
3321 | Address alloca = CreateMemTemp(T: Ty, Align: getContext().getDeclAlign(Arg)); |
3322 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: alloca)); |
3323 | |
3324 | auto coercionType = ArgI.getCoerceAndExpandType(); |
3325 | alloca = alloca.withElementType(ElemTy: coercionType); |
3326 | |
3327 | unsigned argIndex = FirstIRArg; |
3328 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
3329 | llvm::Type *eltType = coercionType->getElementType(N: i); |
3330 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) |
3331 | continue; |
3332 | |
3333 | auto eltAddr = Builder.CreateStructGEP(Addr: alloca, Index: i); |
3334 | auto elt = Fn->getArg(i: argIndex++); |
3335 | Builder.CreateStore(Val: elt, Addr: eltAddr); |
3336 | } |
3337 | assert(argIndex == FirstIRArg + NumIRArgs); |
3338 | break; |
3339 | } |
3340 | |
3341 | case ABIArgInfo::Expand: { |
3342 | // If this structure was expanded into multiple arguments then |
3343 | // we need to create a temporary and reconstruct it from the |
3344 | // arguments. |
3345 | Address Alloca = CreateMemTemp(T: Ty, Align: getContext().getDeclAlign(Arg)); |
3346 | LValue LV = MakeAddrLValue(Addr: Alloca, T: Ty); |
3347 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: Alloca)); |
3348 | |
3349 | auto FnArgIter = Fn->arg_begin() + FirstIRArg; |
3350 | ExpandTypeFromArgs(Ty, LV, AI&: FnArgIter); |
3351 | assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); |
3352 | for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { |
3353 | auto AI = Fn->getArg(i: FirstIRArg + i); |
3354 | AI->setName(Arg->getName() + "." + Twine(i)); |
3355 | } |
3356 | break; |
3357 | } |
3358 | |
3359 | case ABIArgInfo::Ignore: |
3360 | assert(NumIRArgs == 0); |
3361 | // Initialize the local variable appropriately. |
3362 | if (!hasScalarEvaluationKind(T: Ty)) { |
3363 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: CreateMemTemp(T: Ty))); |
3364 | } else { |
3365 | llvm::Value *U = llvm::UndefValue::get(T: ConvertType(Arg->getType())); |
3366 | ArgVals.push_back(Elt: ParamValue::forDirect(value: U)); |
3367 | } |
3368 | break; |
3369 | } |
3370 | } |
3371 | |
3372 | if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { |
3373 | for (int I = Args.size() - 1; I >= 0; --I) |
3374 | EmitParmDecl(D: *Args[I], Arg: ArgVals[I], ArgNo: I + 1); |
3375 | } else { |
3376 | for (unsigned I = 0, E = Args.size(); I != E; ++I) |
3377 | EmitParmDecl(D: *Args[I], Arg: ArgVals[I], ArgNo: I + 1); |
3378 | } |
3379 | } |
3380 | |
3381 | static void eraseUnusedBitCasts(llvm::Instruction *insn) { |
3382 | while (insn->use_empty()) { |
3383 | llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: insn); |
3384 | if (!bitcast) return; |
3385 | |
3386 | // This is "safe" because we would have used a ConstantExpr otherwise. |
3387 | insn = cast<llvm::Instruction>(Val: bitcast->getOperand(i_nocapture: 0)); |
3388 | bitcast->eraseFromParent(); |
3389 | } |
3390 | } |
3391 | |
3392 | /// Try to emit a fused autorelease of a return result. |
3393 | static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, |
3394 | llvm::Value *result) { |
3395 | // We must be immediately followed the cast. |
3396 | llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); |
3397 | if (BB->empty()) return nullptr; |
3398 | if (&BB->back() != result) return nullptr; |
3399 | |
3400 | llvm::Type *resultType = result->getType(); |
3401 | |
3402 | // result is in a BasicBlock and is therefore an Instruction. |
3403 | llvm::Instruction *generator = cast<llvm::Instruction>(Val: result); |
3404 | |
3405 | SmallVector<llvm::Instruction *, 4> InstsToKill; |
3406 | |
3407 | // Look for: |
3408 | // %generator = bitcast %type1* %generator2 to %type2* |
3409 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: generator)) { |
3410 | // We would have emitted this as a constant if the operand weren't |
3411 | // an Instruction. |
3412 | generator = cast<llvm::Instruction>(Val: bitcast->getOperand(i_nocapture: 0)); |
3413 | |
3414 | // Require the generator to be immediately followed by the cast. |
3415 | if (generator->getNextNode() != bitcast) |
3416 | return nullptr; |
3417 | |
3418 | InstsToKill.push_back(Elt: bitcast); |
3419 | } |
3420 | |
3421 | // Look for: |
3422 | // %generator = call i8* @objc_retain(i8* %originalResult) |
3423 | // or |
3424 | // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) |
3425 | llvm::CallInst *call = dyn_cast<llvm::CallInst>(Val: generator); |
3426 | if (!call) return nullptr; |
3427 | |
3428 | bool doRetainAutorelease; |
3429 | |
3430 | if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { |
3431 | doRetainAutorelease = true; |
3432 | } else if (call->getCalledOperand() == |
3433 | CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { |
3434 | doRetainAutorelease = false; |
3435 | |
3436 | // If we emitted an assembly marker for this call (and the |
3437 | // ARCEntrypoints field should have been set if so), go looking |
3438 | // for that call. If we can't find it, we can't do this |
3439 | // optimization. But it should always be the immediately previous |
3440 | // instruction, unless we needed bitcasts around the call. |
3441 | if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { |
3442 | llvm::Instruction *prev = call->getPrevNode(); |
3443 | assert(prev); |
3444 | if (isa<llvm::BitCastInst>(Val: prev)) { |
3445 | prev = prev->getPrevNode(); |
3446 | assert(prev); |
3447 | } |
3448 | assert(isa<llvm::CallInst>(prev)); |
3449 | assert(cast<llvm::CallInst>(prev)->getCalledOperand() == |
3450 | CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); |
3451 | InstsToKill.push_back(Elt: prev); |
3452 | } |
3453 | } else { |
3454 | return nullptr; |
3455 | } |
3456 | |
3457 | result = call->getArgOperand(i: 0); |
3458 | InstsToKill.push_back(Elt: call); |
3459 | |
3460 | // Keep killing bitcasts, for sanity. Note that we no longer care |
3461 | // about precise ordering as long as there's exactly one use. |
3462 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: result)) { |
3463 | if (!bitcast->hasOneUse()) break; |
3464 | InstsToKill.push_back(Elt: bitcast); |
3465 | result = bitcast->getOperand(i_nocapture: 0); |
3466 | } |
3467 | |
3468 | // Delete all the unnecessary instructions, from latest to earliest. |
3469 | for (auto *I : InstsToKill) |
3470 | I->eraseFromParent(); |
3471 | |
3472 | // Do the fused retain/autorelease if we were asked to. |
3473 | if (doRetainAutorelease) |
3474 | result = CGF.EmitARCRetainAutoreleaseReturnValue(value: result); |
3475 | |
3476 | // Cast back to the result type. |
3477 | return CGF.Builder.CreateBitCast(V: result, DestTy: resultType); |
3478 | } |
3479 | |
3480 | /// If this is a +1 of the value of an immutable 'self', remove it. |
3481 | static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, |
3482 | llvm::Value *result) { |
3483 | // This is only applicable to a method with an immutable 'self'. |
3484 | const ObjCMethodDecl *method = |
3485 | dyn_cast_or_null<ObjCMethodDecl>(Val: CGF.CurCodeDecl); |
3486 | if (!method) return nullptr; |
3487 | const VarDecl *self = method->getSelfDecl(); |
3488 | if (!self->getType().isConstQualified()) return nullptr; |
3489 | |
3490 | // Look for a retain call. Note: stripPointerCasts looks through returned arg |
3491 | // functions, which would cause us to miss the retain. |
3492 | llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(Val: result); |
3493 | if (!retainCall || retainCall->getCalledOperand() != |
3494 | CGF.CGM.getObjCEntrypoints().objc_retain) |
3495 | return nullptr; |
3496 | |
3497 | // Look for an ordinary load of 'self'. |
3498 | llvm::Value *retainedValue = retainCall->getArgOperand(i: 0); |
3499 | llvm::LoadInst *load = |
3500 | dyn_cast<llvm::LoadInst>(Val: retainedValue->stripPointerCasts()); |
3501 | if (!load || load->isAtomic() || load->isVolatile() || |
3502 | load->getPointerOperand() != CGF.GetAddrOfLocalVar(VD: self).getBasePointer()) |
3503 | return nullptr; |
3504 | |
3505 | // Okay! Burn it all down. This relies for correctness on the |
3506 | // assumption that the retain is emitted as part of the return and |
3507 | // that thereafter everything is used "linearly". |
3508 | llvm::Type *resultType = result->getType(); |
3509 | eraseUnusedBitCasts(insn: cast<llvm::Instruction>(Val: result)); |
3510 | assert(retainCall->use_empty()); |
3511 | retainCall->eraseFromParent(); |
3512 | eraseUnusedBitCasts(insn: cast<llvm::Instruction>(Val: retainedValue)); |
3513 | |
3514 | return CGF.Builder.CreateBitCast(V: load, DestTy: resultType); |
3515 | } |
3516 | |
3517 | /// Emit an ARC autorelease of the result of a function. |
3518 | /// |
3519 | /// \return the value to actually return from the function |
3520 | static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, |
3521 | llvm::Value *result) { |
3522 | // If we're returning 'self', kill the initial retain. This is a |
3523 | // heuristic attempt to "encourage correctness" in the really unfortunate |
3524 | // case where we have a return of self during a dealloc and we desperately |
3525 | // need to avoid the possible autorelease. |
3526 | if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) |
3527 | return self; |
3528 | |
3529 | // At -O0, try to emit a fused retain/autorelease. |
3530 | if (CGF.shouldUseFusedARCCalls()) |
3531 | if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) |
3532 | return fused; |
3533 | |
3534 | return CGF.EmitARCAutoreleaseReturnValue(value: result); |
3535 | } |
3536 | |
3537 | /// Heuristically search for a dominating store to the return-value slot. |
3538 | static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { |
3539 | llvm::Value *ReturnValuePtr = CGF.ReturnValue.getBasePointer(); |
3540 | |
3541 | // Check if a User is a store which pointerOperand is the ReturnValue. |
3542 | // We are looking for stores to the ReturnValue, not for stores of the |
3543 | // ReturnValue to some other location. |
3544 | auto GetStoreIfValid = [&CGF, |
3545 | ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * { |
3546 | auto *SI = dyn_cast<llvm::StoreInst>(Val: U); |
3547 | if (!SI || SI->getPointerOperand() != ReturnValuePtr || |
3548 | SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) |
3549 | return nullptr; |
3550 | // These aren't actually possible for non-coerced returns, and we |
3551 | // only care about non-coerced returns on this code path. |
3552 | // All memory instructions inside __try block are volatile. |
3553 | assert(!SI->isAtomic() && |
3554 | (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry())); |
3555 | return SI; |
3556 | }; |
3557 | // If there are multiple uses of the return-value slot, just check |
3558 | // for something immediately preceding the IP. Sometimes this can |
3559 | // happen with how we generate implicit-returns; it can also happen |
3560 | // with noreturn cleanups. |
3561 | if (!ReturnValuePtr->hasOneUse()) { |
3562 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); |
3563 | if (IP->empty()) return nullptr; |
3564 | |
3565 | // Look at directly preceding instruction, skipping bitcasts and lifetime |
3566 | // markers. |
3567 | for (llvm::Instruction &I : make_range(x: IP->rbegin(), y: IP->rend())) { |
3568 | if (isa<llvm::BitCastInst>(Val: &I)) |
3569 | continue; |
3570 | if (auto *II = dyn_cast<llvm::IntrinsicInst>(Val: &I)) |
3571 | if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end) |
3572 | continue; |
3573 | |
3574 | return GetStoreIfValid(&I); |
3575 | } |
3576 | return nullptr; |
3577 | } |
3578 | |
3579 | llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back()); |
3580 | if (!store) return nullptr; |
3581 | |
3582 | // Now do a first-and-dirty dominance check: just walk up the |
3583 | // single-predecessors chain from the current insertion point. |
3584 | llvm::BasicBlock *StoreBB = store->getParent(); |
3585 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); |
3586 | llvm::SmallPtrSet<llvm::BasicBlock *, 4> SeenBBs; |
3587 | while (IP != StoreBB) { |
3588 | if (!SeenBBs.insert(Ptr: IP).second || !(IP = IP->getSinglePredecessor())) |
3589 | return nullptr; |
3590 | } |
3591 | |
3592 | // Okay, the store's basic block dominates the insertion point; we |
3593 | // can do our thing. |
3594 | return store; |
3595 | } |
3596 | |
3597 | // Helper functions for EmitCMSEClearRecord |
3598 | |
3599 | // Set the bits corresponding to a field having width `BitWidth` and located at |
3600 | // offset `BitOffset` (from the least significant bit) within a storage unit of |
3601 | // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. |
3602 | // Use little-endian layout, i.e.`Bits[0]` is the LSB. |
3603 | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, |
3604 | int BitWidth, int CharWidth) { |
3605 | assert(CharWidth <= 64); |
3606 | assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); |
3607 | |
3608 | int Pos = 0; |
3609 | if (BitOffset >= CharWidth) { |
3610 | Pos += BitOffset / CharWidth; |
3611 | BitOffset = BitOffset % CharWidth; |
3612 | } |
3613 | |
3614 | const uint64_t Used = (uint64_t(1) << CharWidth) - 1; |
3615 | if (BitOffset + BitWidth >= CharWidth) { |
3616 | Bits[Pos++] |= (Used << BitOffset) & Used; |
3617 | BitWidth -= CharWidth - BitOffset; |
3618 | BitOffset = 0; |
3619 | } |
3620 | |
3621 | while (BitWidth >= CharWidth) { |
3622 | Bits[Pos++] = Used; |
3623 | BitWidth -= CharWidth; |
3624 | } |
3625 | |
3626 | if (BitWidth > 0) |
3627 | Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; |
3628 | } |
3629 | |
3630 | // Set the bits corresponding to a field having width `BitWidth` and located at |
3631 | // offset `BitOffset` (from the least significant bit) within a storage unit of |
3632 | // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of |
3633 | // `Bits` corresponds to one target byte. Use target endian layout. |
3634 | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, |
3635 | int StorageSize, int BitOffset, int BitWidth, |
3636 | int CharWidth, bool BigEndian) { |
3637 | |
3638 | SmallVector<uint64_t, 8> TmpBits(StorageSize); |
3639 | setBitRange(Bits&: TmpBits, BitOffset, BitWidth, CharWidth); |
3640 | |
3641 | if (BigEndian) |
3642 | std::reverse(first: TmpBits.begin(), last: TmpBits.end()); |
3643 | |
3644 | for (uint64_t V : TmpBits) |
3645 | Bits[StorageOffset++] |= V; |
3646 | } |
3647 | |
3648 | static void setUsedBits(CodeGenModule &, QualType, int, |
3649 | SmallVectorImpl<uint64_t> &); |
3650 | |
3651 | // Set the bits in `Bits`, which correspond to the value representations of |
3652 | // the actual members of the record type `RTy`. Note that this function does |
3653 | // not handle base classes, virtual tables, etc, since they cannot happen in |
3654 | // CMSE function arguments or return. The bit mask corresponds to the target |
3655 | // memory layout, i.e. it's endian dependent. |
3656 | static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, |
3657 | SmallVectorImpl<uint64_t> &Bits) { |
3658 | ASTContext &Context = CGM.getContext(); |
3659 | int CharWidth = Context.getCharWidth(); |
3660 | const RecordDecl *RD = RTy->getDecl()->getDefinition(); |
3661 | const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(D: RD); |
3662 | const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); |
3663 | |
3664 | int Idx = 0; |
3665 | for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { |
3666 | const FieldDecl *F = *I; |
3667 | |
3668 | if (F->isUnnamedBitField() || F->isZeroLengthBitField(Ctx: Context) || |
3669 | F->getType()->isIncompleteArrayType()) |
3670 | continue; |
3671 | |
3672 | if (F->isBitField()) { |
3673 | const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(FD: F); |
3674 | setBitRange(Bits, StorageOffset: Offset + BFI.StorageOffset.getQuantity(), |
3675 | StorageSize: BFI.StorageSize / CharWidth, BitOffset: BFI.Offset, |
3676 | BitWidth: BFI.Size, CharWidth, |
3677 | BigEndian: CGM.getDataLayout().isBigEndian()); |
3678 | continue; |
3679 | } |
3680 | |
3681 | setUsedBits(CGM, F->getType(), |
3682 | Offset + ASTLayout.getFieldOffset(FieldNo: Idx) / CharWidth, Bits); |
3683 | } |
3684 | } |
3685 | |
3686 | // Set the bits in `Bits`, which correspond to the value representations of |
3687 | // the elements of an array type `ATy`. |
3688 | static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, |
3689 | int Offset, SmallVectorImpl<uint64_t> &Bits) { |
3690 | const ASTContext &Context = CGM.getContext(); |
3691 | |
3692 | QualType ETy = Context.getBaseElementType(ATy); |
3693 | int Size = Context.getTypeSizeInChars(T: ETy).getQuantity(); |
3694 | SmallVector<uint64_t, 4> TmpBits(Size); |
3695 | setUsedBits(CGM, ETy, 0, TmpBits); |
3696 | |
3697 | for (int I = 0, N = Context.getConstantArrayElementCount(CA: ATy); I < N; ++I) { |
3698 | auto Src = TmpBits.begin(); |
3699 | auto Dst = Bits.begin() + Offset + I * Size; |
3700 | for (int J = 0; J < Size; ++J) |
3701 | *Dst++ |= *Src++; |
3702 | } |
3703 | } |
3704 | |
3705 | // Set the bits in `Bits`, which correspond to the value representations of |
3706 | // the type `QTy`. |
3707 | static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, |
3708 | SmallVectorImpl<uint64_t> &Bits) { |
3709 | if (const auto *RTy = QTy->getAs<RecordType>()) |
3710 | return setUsedBits(CGM, RTy, Offset, Bits); |
3711 | |
3712 | ASTContext &Context = CGM.getContext(); |
3713 | if (const auto *ATy = Context.getAsConstantArrayType(T: QTy)) |
3714 | return setUsedBits(CGM, ATy, Offset, Bits); |
3715 | |
3716 | int Size = Context.getTypeSizeInChars(T: QTy).getQuantity(); |
3717 | if (Size <= 0) |
3718 | return; |
3719 | |
3720 | std::fill_n(first: Bits.begin() + Offset, n: Size, |
3721 | value: (uint64_t(1) << Context.getCharWidth()) - 1); |
3722 | } |
3723 | |
3724 | static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, |
3725 | int Pos, int Size, int CharWidth, |
3726 | bool BigEndian) { |
3727 | assert(Size > 0); |
3728 | uint64_t Mask = 0; |
3729 | if (BigEndian) { |
3730 | for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; |
3731 | ++P) |
3732 | Mask = (Mask << CharWidth) | *P; |
3733 | } else { |
3734 | auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; |
3735 | do |
3736 | Mask = (Mask << CharWidth) | *--P; |
3737 | while (P != End); |
3738 | } |
3739 | return Mask; |
3740 | } |
3741 | |
3742 | // Emit code to clear the bits in a record, which aren't a part of any user |
3743 | // declared member, when the record is a function return. |
3744 | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, |
3745 | llvm::IntegerType *ITy, |
3746 | QualType QTy) { |
3747 | assert(Src->getType() == ITy); |
3748 | assert(ITy->getScalarSizeInBits() <= 64); |
3749 | |
3750 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); |
3751 | int Size = DataLayout.getTypeStoreSize(Ty: ITy); |
3752 | SmallVector<uint64_t, 4> Bits(Size); |
3753 | setUsedBits(CGM, RTy: QTy->castAs<RecordType>(), Offset: 0, Bits); |
3754 | |
3755 | int CharWidth = CGM.getContext().getCharWidth(); |
3756 | uint64_t Mask = |
3757 | buildMultiCharMask(Bits, Pos: 0, Size, CharWidth, BigEndian: DataLayout.isBigEndian()); |
3758 | |
3759 | return Builder.CreateAnd(LHS: Src, RHS: Mask, Name: "cmse.clear" ); |
3760 | } |
3761 | |
3762 | // Emit code to clear the bits in a record, which aren't a part of any user |
3763 | // declared member, when the record is a function argument. |
3764 | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, |
3765 | llvm::ArrayType *ATy, |
3766 | QualType QTy) { |
3767 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); |
3768 | int Size = DataLayout.getTypeStoreSize(Ty: ATy); |
3769 | SmallVector<uint64_t, 16> Bits(Size); |
3770 | setUsedBits(CGM, RTy: QTy->castAs<RecordType>(), Offset: 0, Bits); |
3771 | |
3772 | // Clear each element of the LLVM array. |
3773 | int CharWidth = CGM.getContext().getCharWidth(); |
3774 | int CharsPerElt = |
3775 | ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; |
3776 | int MaskIndex = 0; |
3777 | llvm::Value *R = llvm::PoisonValue::get(T: ATy); |
3778 | for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { |
3779 | uint64_t Mask = buildMultiCharMask(Bits, Pos: MaskIndex, Size: CharsPerElt, CharWidth, |
3780 | BigEndian: DataLayout.isBigEndian()); |
3781 | MaskIndex += CharsPerElt; |
3782 | llvm::Value *T0 = Builder.CreateExtractValue(Agg: Src, Idxs: I); |
3783 | llvm::Value *T1 = Builder.CreateAnd(LHS: T0, RHS: Mask, Name: "cmse.clear" ); |
3784 | R = Builder.CreateInsertValue(Agg: R, Val: T1, Idxs: I); |
3785 | } |
3786 | |
3787 | return R; |
3788 | } |
3789 | |
3790 | void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, |
3791 | bool EmitRetDbgLoc, |
3792 | SourceLocation EndLoc) { |
3793 | if (FI.isNoReturn()) { |
3794 | // Noreturn functions don't return. |
3795 | EmitUnreachable(Loc: EndLoc); |
3796 | return; |
3797 | } |
3798 | |
3799 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { |
3800 | // Naked functions don't have epilogues. |
3801 | Builder.CreateUnreachable(); |
3802 | return; |
3803 | } |
3804 | |
3805 | // Functions with no result always return void. |
3806 | if (!ReturnValue.isValid()) { |
3807 | Builder.CreateRetVoid(); |
3808 | return; |
3809 | } |
3810 | |
3811 | llvm::DebugLoc RetDbgLoc; |
3812 | llvm::Value *RV = nullptr; |
3813 | QualType RetTy = FI.getReturnType(); |
3814 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
3815 | |
3816 | switch (RetAI.getKind()) { |
3817 | case ABIArgInfo::InAlloca: |
3818 | // Aggregates get evaluated directly into the destination. Sometimes we |
3819 | // need to return the sret value in a register, though. |
3820 | assert(hasAggregateEvaluationKind(RetTy)); |
3821 | if (RetAI.getInAllocaSRet()) { |
3822 | llvm::Function::arg_iterator EI = CurFn->arg_end(); |
3823 | --EI; |
3824 | llvm::Value *ArgStruct = &*EI; |
3825 | llvm::Value *SRet = Builder.CreateStructGEP( |
3826 | Ty: FI.getArgStruct(), Ptr: ArgStruct, Idx: RetAI.getInAllocaFieldIndex()); |
3827 | llvm::Type *Ty = |
3828 | cast<llvm::GetElementPtrInst>(Val: SRet)->getResultElementType(); |
3829 | RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret" ); |
3830 | } |
3831 | break; |
3832 | |
3833 | case ABIArgInfo::Indirect: { |
3834 | auto AI = CurFn->arg_begin(); |
3835 | if (RetAI.isSRetAfterThis()) |
3836 | ++AI; |
3837 | switch (getEvaluationKind(T: RetTy)) { |
3838 | case TEK_Complex: { |
3839 | ComplexPairTy RT = |
3840 | EmitLoadOfComplex(src: MakeAddrLValue(Addr: ReturnValue, T: RetTy), loc: EndLoc); |
3841 | EmitStoreOfComplex(V: RT, dest: MakeNaturalAlignAddrLValue(V: &*AI, T: RetTy), |
3842 | /*isInit*/ true); |
3843 | break; |
3844 | } |
3845 | case TEK_Aggregate: |
3846 | // Do nothing; aggregates get evaluated directly into the destination. |
3847 | break; |
3848 | case TEK_Scalar: { |
3849 | LValueBaseInfo BaseInfo; |
3850 | TBAAAccessInfo TBAAInfo; |
3851 | CharUnits Alignment = |
3852 | CGM.getNaturalTypeAlignment(T: RetTy, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
3853 | Address ArgAddr(&*AI, ConvertType(T: RetTy), Alignment); |
3854 | LValue ArgVal = |
3855 | LValue::MakeAddr(Addr: ArgAddr, type: RetTy, Context&: getContext(), BaseInfo, TBAAInfo); |
3856 | EmitStoreOfScalar( |
3857 | value: Builder.CreateLoad(Addr: ReturnValue), lvalue: ArgVal, /*isInit*/ true); |
3858 | break; |
3859 | } |
3860 | } |
3861 | break; |
3862 | } |
3863 | |
3864 | case ABIArgInfo::Extend: |
3865 | case ABIArgInfo::Direct: |
3866 | if (RetAI.getCoerceToType() == ConvertType(T: RetTy) && |
3867 | RetAI.getDirectOffset() == 0) { |
3868 | // The internal return value temp always will have pointer-to-return-type |
3869 | // type, just do a load. |
3870 | |
3871 | // If there is a dominating store to ReturnValue, we can elide |
3872 | // the load, zap the store, and usually zap the alloca. |
3873 | if (llvm::StoreInst *SI = |
3874 | findDominatingStoreToReturnValue(CGF&: *this)) { |
3875 | // Reuse the debug location from the store unless there is |
3876 | // cleanup code to be emitted between the store and return |
3877 | // instruction. |
3878 | if (EmitRetDbgLoc && !AutoreleaseResult) |
3879 | RetDbgLoc = SI->getDebugLoc(); |
3880 | // Get the stored value and nuke the now-dead store. |
3881 | RV = SI->getValueOperand(); |
3882 | SI->eraseFromParent(); |
3883 | |
3884 | // Otherwise, we have to do a simple load. |
3885 | } else { |
3886 | RV = Builder.CreateLoad(Addr: ReturnValue); |
3887 | } |
3888 | } else { |
3889 | // If the value is offset in memory, apply the offset now. |
3890 | Address V = emitAddressAtOffset(CGF&: *this, addr: ReturnValue, info: RetAI); |
3891 | |
3892 | RV = CreateCoercedLoad(Src: V, Ty: RetAI.getCoerceToType(), CGF&: *this); |
3893 | } |
3894 | |
3895 | // In ARC, end functions that return a retainable type with a call |
3896 | // to objc_autoreleaseReturnValue. |
3897 | if (AutoreleaseResult) { |
3898 | #ifndef NDEBUG |
3899 | // Type::isObjCRetainabletype has to be called on a QualType that hasn't |
3900 | // been stripped of the typedefs, so we cannot use RetTy here. Get the |
3901 | // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from |
3902 | // CurCodeDecl or BlockInfo. |
3903 | QualType RT; |
3904 | |
3905 | if (auto *FD = dyn_cast<FunctionDecl>(Val: CurCodeDecl)) |
3906 | RT = FD->getReturnType(); |
3907 | else if (auto *MD = dyn_cast<ObjCMethodDecl>(Val: CurCodeDecl)) |
3908 | RT = MD->getReturnType(); |
3909 | else if (isa<BlockDecl>(Val: CurCodeDecl)) |
3910 | RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); |
3911 | else |
3912 | llvm_unreachable("Unexpected function/method type" ); |
3913 | |
3914 | assert(getLangOpts().ObjCAutoRefCount && |
3915 | !FI.isReturnsRetained() && |
3916 | RT->isObjCRetainableType()); |
3917 | #endif |
3918 | RV = emitAutoreleaseOfResult(CGF&: *this, result: RV); |
3919 | } |
3920 | |
3921 | break; |
3922 | |
3923 | case ABIArgInfo::Ignore: |
3924 | break; |
3925 | |
3926 | case ABIArgInfo::CoerceAndExpand: { |
3927 | auto coercionType = RetAI.getCoerceAndExpandType(); |
3928 | |
3929 | // Load all of the coerced elements out into results. |
3930 | llvm::SmallVector<llvm::Value*, 4> results; |
3931 | Address addr = ReturnValue.withElementType(ElemTy: coercionType); |
3932 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
3933 | auto coercedEltType = coercionType->getElementType(N: i); |
3934 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType: coercedEltType)) |
3935 | continue; |
3936 | |
3937 | auto eltAddr = Builder.CreateStructGEP(Addr: addr, Index: i); |
3938 | auto elt = Builder.CreateLoad(Addr: eltAddr); |
3939 | results.push_back(Elt: elt); |
3940 | } |
3941 | |
3942 | // If we have one result, it's the single direct result type. |
3943 | if (results.size() == 1) { |
3944 | RV = results[0]; |
3945 | |
3946 | // Otherwise, we need to make a first-class aggregate. |
3947 | } else { |
3948 | // Construct a return type that lacks padding elements. |
3949 | llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); |
3950 | |
3951 | RV = llvm::PoisonValue::get(T: returnType); |
3952 | for (unsigned i = 0, e = results.size(); i != e; ++i) { |
3953 | RV = Builder.CreateInsertValue(Agg: RV, Val: results[i], Idxs: i); |
3954 | } |
3955 | } |
3956 | break; |
3957 | } |
3958 | case ABIArgInfo::Expand: |
3959 | case ABIArgInfo::IndirectAliased: |
3960 | llvm_unreachable("Invalid ABI kind for return argument" ); |
3961 | } |
3962 | |
3963 | llvm::Instruction *Ret; |
3964 | if (RV) { |
3965 | if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { |
3966 | // For certain return types, clear padding bits, as they may reveal |
3967 | // sensitive information. |
3968 | // Small struct/union types are passed as integers. |
3969 | auto *ITy = dyn_cast<llvm::IntegerType>(Val: RV->getType()); |
3970 | if (ITy != nullptr && isa<RecordType>(Val: RetTy.getCanonicalType())) |
3971 | RV = EmitCMSEClearRecord(Src: RV, ITy, QTy: RetTy); |
3972 | } |
3973 | EmitReturnValueCheck(RV); |
3974 | Ret = Builder.CreateRet(V: RV); |
3975 | } else { |
3976 | Ret = Builder.CreateRetVoid(); |
3977 | } |
3978 | |
3979 | if (RetDbgLoc) |
3980 | Ret->setDebugLoc(std::move(RetDbgLoc)); |
3981 | } |
3982 | |
3983 | void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { |
3984 | // A current decl may not be available when emitting vtable thunks. |
3985 | if (!CurCodeDecl) |
3986 | return; |
3987 | |
3988 | // If the return block isn't reachable, neither is this check, so don't emit |
3989 | // it. |
3990 | if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) |
3991 | return; |
3992 | |
3993 | ReturnsNonNullAttr *RetNNAttr = nullptr; |
3994 | if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) |
3995 | RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); |
3996 | |
3997 | if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) |
3998 | return; |
3999 | |
4000 | // Prefer the returns_nonnull attribute if it's present. |
4001 | SourceLocation AttrLoc; |
4002 | SanitizerMask CheckKind; |
4003 | SanitizerHandler Handler; |
4004 | if (RetNNAttr) { |
4005 | assert(!requiresReturnValueNullabilityCheck() && |
4006 | "Cannot check nullability and the nonnull attribute" ); |
4007 | AttrLoc = RetNNAttr->getLocation(); |
4008 | CheckKind = SanitizerKind::ReturnsNonnullAttribute; |
4009 | Handler = SanitizerHandler::NonnullReturn; |
4010 | } else { |
4011 | if (auto *DD = dyn_cast<DeclaratorDecl>(Val: CurCodeDecl)) |
4012 | if (auto *TSI = DD->getTypeSourceInfo()) |
4013 | if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) |
4014 | AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); |
4015 | CheckKind = SanitizerKind::NullabilityReturn; |
4016 | Handler = SanitizerHandler::NullabilityReturn; |
4017 | } |
4018 | |
4019 | SanitizerScope SanScope(this); |
4020 | |
4021 | // Make sure the "return" source location is valid. If we're checking a |
4022 | // nullability annotation, make sure the preconditions for the check are met. |
4023 | llvm::BasicBlock *Check = createBasicBlock(name: "nullcheck" ); |
4024 | llvm::BasicBlock *NoCheck = createBasicBlock(name: "no.nullcheck" ); |
4025 | llvm::Value *SLocPtr = Builder.CreateLoad(Addr: ReturnLocation, Name: "return.sloc.load" ); |
4026 | llvm::Value *CanNullCheck = Builder.CreateIsNotNull(Arg: SLocPtr); |
4027 | if (requiresReturnValueNullabilityCheck()) |
4028 | CanNullCheck = |
4029 | Builder.CreateAnd(LHS: CanNullCheck, RHS: RetValNullabilityPrecondition); |
4030 | Builder.CreateCondBr(Cond: CanNullCheck, True: Check, False: NoCheck); |
4031 | EmitBlock(BB: Check); |
4032 | |
4033 | // Now do the null check. |
4034 | llvm::Value *Cond = Builder.CreateIsNotNull(Arg: RV); |
4035 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc: AttrLoc)}; |
4036 | llvm::Value *DynamicData[] = {SLocPtr}; |
4037 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: CheckKind), Check: Handler, StaticArgs: StaticData, DynamicArgs: DynamicData); |
4038 | |
4039 | EmitBlock(BB: NoCheck); |
4040 | |
4041 | #ifndef NDEBUG |
4042 | // The return location should not be used after the check has been emitted. |
4043 | ReturnLocation = Address::invalid(); |
4044 | #endif |
4045 | } |
4046 | |
4047 | static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { |
4048 | const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); |
4049 | return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; |
4050 | } |
4051 | |
4052 | static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, |
4053 | QualType Ty) { |
4054 | // FIXME: Generate IR in one pass, rather than going back and fixing up these |
4055 | // placeholders. |
4056 | llvm::Type *IRTy = CGF.ConvertTypeForMem(T: Ty); |
4057 | llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(C&: CGF.getLLVMContext()); |
4058 | llvm::Value *Placeholder = llvm::PoisonValue::get(T: IRPtrTy); |
4059 | |
4060 | // FIXME: When we generate this IR in one pass, we shouldn't need |
4061 | // this win32-specific alignment hack. |
4062 | CharUnits Align = CharUnits::fromQuantity(Quantity: 4); |
4063 | Placeholder = CGF.Builder.CreateAlignedLoad(Ty: IRPtrTy, Addr: Placeholder, Align); |
4064 | |
4065 | return AggValueSlot::forAddr(addr: Address(Placeholder, IRTy, Align), |
4066 | quals: Ty.getQualifiers(), |
4067 | isDestructed: AggValueSlot::IsNotDestructed, |
4068 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
4069 | isAliased: AggValueSlot::IsNotAliased, |
4070 | mayOverlap: AggValueSlot::DoesNotOverlap); |
4071 | } |
4072 | |
4073 | void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, |
4074 | const VarDecl *param, |
4075 | SourceLocation loc) { |
4076 | // StartFunction converted the ABI-lowered parameter(s) into a |
4077 | // local alloca. We need to turn that into an r-value suitable |
4078 | // for EmitCall. |
4079 | Address local = GetAddrOfLocalVar(VD: param); |
4080 | |
4081 | QualType type = param->getType(); |
4082 | |
4083 | // GetAddrOfLocalVar returns a pointer-to-pointer for references, |
4084 | // but the argument needs to be the original pointer. |
4085 | if (type->isReferenceType()) { |
4086 | args.add(rvalue: RValue::get(V: Builder.CreateLoad(Addr: local)), type); |
4087 | |
4088 | // In ARC, move out of consumed arguments so that the release cleanup |
4089 | // entered by StartFunction doesn't cause an over-release. This isn't |
4090 | // optimal -O0 code generation, but it should get cleaned up when |
4091 | // optimization is enabled. This also assumes that delegate calls are |
4092 | // performed exactly once for a set of arguments, but that should be safe. |
4093 | } else if (getLangOpts().ObjCAutoRefCount && |
4094 | param->hasAttr<NSConsumedAttr>() && |
4095 | type->isObjCRetainableType()) { |
4096 | llvm::Value *ptr = Builder.CreateLoad(Addr: local); |
4097 | auto null = |
4098 | llvm::ConstantPointerNull::get(T: cast<llvm::PointerType>(Val: ptr->getType())); |
4099 | Builder.CreateStore(Val: null, Addr: local); |
4100 | args.add(rvalue: RValue::get(V: ptr), type); |
4101 | |
4102 | // For the most part, we just need to load the alloca, except that |
4103 | // aggregate r-values are actually pointers to temporaries. |
4104 | } else { |
4105 | args.add(rvalue: convertTempToRValue(addr: local, type, Loc: loc), type); |
4106 | } |
4107 | |
4108 | // Deactivate the cleanup for the callee-destructed param that was pushed. |
4109 | if (type->isRecordType() && !CurFuncIsThunk && |
4110 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && |
4111 | param->needsDestruction(Ctx: getContext())) { |
4112 | EHScopeStack::stable_iterator cleanup = |
4113 | CalleeDestructedParamCleanups.lookup(Val: cast<ParmVarDecl>(Val: param)); |
4114 | assert(cleanup.isValid() && |
4115 | "cleanup for callee-destructed param not recorded" ); |
4116 | // This unreachable is a temporary marker which will be removed later. |
4117 | llvm::Instruction *isActive = Builder.CreateUnreachable(); |
4118 | args.addArgCleanupDeactivation(Cleanup: cleanup, IsActiveIP: isActive); |
4119 | } |
4120 | } |
4121 | |
4122 | static bool isProvablyNull(llvm::Value *addr) { |
4123 | return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(Val: addr); |
4124 | } |
4125 | |
4126 | static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) { |
4127 | return llvm::isKnownNonZero(V: Addr.getBasePointer(), Q: CGF.CGM.getDataLayout()); |
4128 | } |
4129 | |
4130 | /// Emit the actual writing-back of a writeback. |
4131 | static void emitWriteback(CodeGenFunction &CGF, |
4132 | const CallArgList::Writeback &writeback) { |
4133 | const LValue &srcLV = writeback.Source; |
4134 | Address srcAddr = srcLV.getAddress(CGF); |
4135 | assert(!isProvablyNull(srcAddr.getBasePointer()) && |
4136 | "shouldn't have writeback for provably null argument" ); |
4137 | |
4138 | llvm::BasicBlock *contBB = nullptr; |
4139 | |
4140 | // If the argument wasn't provably non-null, we need to null check |
4141 | // before doing the store. |
4142 | bool provablyNonNull = isProvablyNonNull(Addr: srcAddr, CGF); |
4143 | |
4144 | if (!provablyNonNull) { |
4145 | llvm::BasicBlock *writebackBB = CGF.createBasicBlock(name: "icr.writeback" ); |
4146 | contBB = CGF.createBasicBlock(name: "icr.done" ); |
4147 | |
4148 | llvm::Value *isNull = CGF.Builder.CreateIsNull(Addr: srcAddr, Name: "icr.isnull" ); |
4149 | CGF.Builder.CreateCondBr(Cond: isNull, True: contBB, False: writebackBB); |
4150 | CGF.EmitBlock(BB: writebackBB); |
4151 | } |
4152 | |
4153 | // Load the value to writeback. |
4154 | llvm::Value *value = CGF.Builder.CreateLoad(Addr: writeback.Temporary); |
4155 | |
4156 | // Cast it back, in case we're writing an id to a Foo* or something. |
4157 | value = CGF.Builder.CreateBitCast(V: value, DestTy: srcAddr.getElementType(), |
4158 | Name: "icr.writeback-cast" ); |
4159 | |
4160 | // Perform the writeback. |
4161 | |
4162 | // If we have a "to use" value, it's something we need to emit a use |
4163 | // of. This has to be carefully threaded in: if it's done after the |
4164 | // release it's potentially undefined behavior (and the optimizer |
4165 | // will ignore it), and if it happens before the retain then the |
4166 | // optimizer could move the release there. |
4167 | if (writeback.ToUse) { |
4168 | assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); |
4169 | |
4170 | // Retain the new value. No need to block-copy here: the block's |
4171 | // being passed up the stack. |
4172 | value = CGF.EmitARCRetainNonBlock(value); |
4173 | |
4174 | // Emit the intrinsic use here. |
4175 | CGF.EmitARCIntrinsicUse(values: writeback.ToUse); |
4176 | |
4177 | // Load the old value (primitively). |
4178 | llvm::Value *oldValue = CGF.EmitLoadOfScalar(lvalue: srcLV, Loc: SourceLocation()); |
4179 | |
4180 | // Put the new value in place (primitively). |
4181 | CGF.EmitStoreOfScalar(value, lvalue: srcLV, /*init*/ isInit: false); |
4182 | |
4183 | // Release the old value. |
4184 | CGF.EmitARCRelease(value: oldValue, precise: srcLV.isARCPreciseLifetime()); |
4185 | |
4186 | // Otherwise, we can just do a normal lvalue store. |
4187 | } else { |
4188 | CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: srcLV); |
4189 | } |
4190 | |
4191 | // Jump to the continuation block. |
4192 | if (!provablyNonNull) |
4193 | CGF.EmitBlock(BB: contBB); |
4194 | } |
4195 | |
4196 | static void emitWritebacks(CodeGenFunction &CGF, |
4197 | const CallArgList &args) { |
4198 | for (const auto &I : args.writebacks()) |
4199 | emitWriteback(CGF, writeback: I); |
4200 | } |
4201 | |
4202 | static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, |
4203 | const CallArgList &CallArgs) { |
4204 | ArrayRef<CallArgList::CallArgCleanup> Cleanups = |
4205 | CallArgs.getCleanupsToDeactivate(); |
4206 | // Iterate in reverse to increase the likelihood of popping the cleanup. |
4207 | for (const auto &I : llvm::reverse(C&: Cleanups)) { |
4208 | CGF.DeactivateCleanupBlock(Cleanup: I.Cleanup, DominatingIP: I.IsActiveIP); |
4209 | I.IsActiveIP->eraseFromParent(); |
4210 | } |
4211 | } |
4212 | |
4213 | static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { |
4214 | if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(Val: E->IgnoreParens())) |
4215 | if (uop->getOpcode() == UO_AddrOf) |
4216 | return uop->getSubExpr(); |
4217 | return nullptr; |
4218 | } |
4219 | |
4220 | /// Emit an argument that's being passed call-by-writeback. That is, |
4221 | /// we are passing the address of an __autoreleased temporary; it |
4222 | /// might be copy-initialized with the current value of the given |
4223 | /// address, but it will definitely be copied out of after the call. |
4224 | static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, |
4225 | const ObjCIndirectCopyRestoreExpr *CRE) { |
4226 | LValue srcLV; |
4227 | |
4228 | // Make an optimistic effort to emit the address as an l-value. |
4229 | // This can fail if the argument expression is more complicated. |
4230 | if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(E: CRE->getSubExpr())) { |
4231 | srcLV = CGF.EmitLValue(E: lvExpr); |
4232 | |
4233 | // Otherwise, just emit it as a scalar. |
4234 | } else { |
4235 | Address srcAddr = CGF.EmitPointerWithAlignment(Addr: CRE->getSubExpr()); |
4236 | |
4237 | QualType srcAddrType = |
4238 | CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); |
4239 | srcLV = CGF.MakeAddrLValue(Addr: srcAddr, T: srcAddrType); |
4240 | } |
4241 | Address srcAddr = srcLV.getAddress(CGF); |
4242 | |
4243 | // The dest and src types don't necessarily match in LLVM terms |
4244 | // because of the crazy ObjC compatibility rules. |
4245 | |
4246 | llvm::PointerType *destType = |
4247 | cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); |
4248 | llvm::Type *destElemType = |
4249 | CGF.ConvertTypeForMem(T: CRE->getType()->getPointeeType()); |
4250 | |
4251 | // If the address is a constant null, just pass the appropriate null. |
4252 | if (isProvablyNull(addr: srcAddr.getBasePointer())) { |
4253 | args.add(rvalue: RValue::get(V: llvm::ConstantPointerNull::get(T: destType)), |
4254 | type: CRE->getType()); |
4255 | return; |
4256 | } |
4257 | |
4258 | // Create the temporary. |
4259 | Address temp = |
4260 | CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp" ); |
4261 | // Loading an l-value can introduce a cleanup if the l-value is __weak, |
4262 | // and that cleanup will be conditional if we can't prove that the l-value |
4263 | // isn't null, so we need to register a dominating point so that the cleanups |
4264 | // system will make valid IR. |
4265 | CodeGenFunction::ConditionalEvaluation condEval(CGF); |
4266 | |
4267 | // Zero-initialize it if we're not doing a copy-initialization. |
4268 | bool shouldCopy = CRE->shouldCopy(); |
4269 | if (!shouldCopy) { |
4270 | llvm::Value *null = |
4271 | llvm::ConstantPointerNull::get(T: cast<llvm::PointerType>(Val: destElemType)); |
4272 | CGF.Builder.CreateStore(Val: null, Addr: temp); |
4273 | } |
4274 | |
4275 | llvm::BasicBlock *contBB = nullptr; |
4276 | llvm::BasicBlock *originBB = nullptr; |
4277 | |
4278 | // If the address is *not* known to be non-null, we need to switch. |
4279 | llvm::Value *finalArgument; |
4280 | |
4281 | bool provablyNonNull = isProvablyNonNull(Addr: srcAddr, CGF); |
4282 | |
4283 | if (provablyNonNull) { |
4284 | finalArgument = temp.emitRawPointer(CGF); |
4285 | } else { |
4286 | llvm::Value *isNull = CGF.Builder.CreateIsNull(Addr: srcAddr, Name: "icr.isnull" ); |
4287 | |
4288 | finalArgument = CGF.Builder.CreateSelect( |
4289 | C: isNull, True: llvm::ConstantPointerNull::get(T: destType), |
4290 | False: temp.emitRawPointer(CGF), Name: "icr.argument" ); |
4291 | |
4292 | // If we need to copy, then the load has to be conditional, which |
4293 | // means we need control flow. |
4294 | if (shouldCopy) { |
4295 | originBB = CGF.Builder.GetInsertBlock(); |
4296 | contBB = CGF.createBasicBlock(name: "icr.cont" ); |
4297 | llvm::BasicBlock *copyBB = CGF.createBasicBlock(name: "icr.copy" ); |
4298 | CGF.Builder.CreateCondBr(Cond: isNull, True: contBB, False: copyBB); |
4299 | CGF.EmitBlock(BB: copyBB); |
4300 | condEval.begin(CGF); |
4301 | } |
4302 | } |
4303 | |
4304 | llvm::Value *valueToUse = nullptr; |
4305 | |
4306 | // Perform a copy if necessary. |
4307 | if (shouldCopy) { |
4308 | RValue srcRV = CGF.EmitLoadOfLValue(V: srcLV, Loc: SourceLocation()); |
4309 | assert(srcRV.isScalar()); |
4310 | |
4311 | llvm::Value *src = srcRV.getScalarVal(); |
4312 | src = CGF.Builder.CreateBitCast(V: src, DestTy: destElemType, Name: "icr.cast" ); |
4313 | |
4314 | // Use an ordinary store, not a store-to-lvalue. |
4315 | CGF.Builder.CreateStore(Val: src, Addr: temp); |
4316 | |
4317 | // If optimization is enabled, and the value was held in a |
4318 | // __strong variable, we need to tell the optimizer that this |
4319 | // value has to stay alive until we're doing the store back. |
4320 | // This is because the temporary is effectively unretained, |
4321 | // and so otherwise we can violate the high-level semantics. |
4322 | if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && |
4323 | srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { |
4324 | valueToUse = src; |
4325 | } |
4326 | } |
4327 | |
4328 | // Finish the control flow if we needed it. |
4329 | if (shouldCopy && !provablyNonNull) { |
4330 | llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); |
4331 | CGF.EmitBlock(BB: contBB); |
4332 | |
4333 | // Make a phi for the value to intrinsically use. |
4334 | if (valueToUse) { |
4335 | llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(Ty: valueToUse->getType(), NumReservedValues: 2, |
4336 | Name: "icr.to-use" ); |
4337 | phiToUse->addIncoming(V: valueToUse, BB: copyBB); |
4338 | phiToUse->addIncoming(V: llvm::UndefValue::get(T: valueToUse->getType()), |
4339 | BB: originBB); |
4340 | valueToUse = phiToUse; |
4341 | } |
4342 | |
4343 | condEval.end(CGF); |
4344 | } |
4345 | |
4346 | args.addWriteback(srcLV, temporary: temp, toUse: valueToUse); |
4347 | args.add(rvalue: RValue::get(V: finalArgument), type: CRE->getType()); |
4348 | } |
4349 | |
4350 | void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { |
4351 | assert(!StackBase); |
4352 | |
4353 | // Save the stack. |
4354 | StackBase = CGF.Builder.CreateStackSave(Name: "inalloca.save" ); |
4355 | } |
4356 | |
4357 | void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { |
4358 | if (StackBase) { |
4359 | // Restore the stack after the call. |
4360 | CGF.Builder.CreateStackRestore(Ptr: StackBase); |
4361 | } |
4362 | } |
4363 | |
4364 | void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, |
4365 | SourceLocation ArgLoc, |
4366 | AbstractCallee AC, |
4367 | unsigned ParmNum) { |
4368 | if (!AC.getDecl() || !(SanOpts.has(K: SanitizerKind::NonnullAttribute) || |
4369 | SanOpts.has(K: SanitizerKind::NullabilityArg))) |
4370 | return; |
4371 | |
4372 | // The param decl may be missing in a variadic function. |
4373 | auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(I: ParmNum) : nullptr; |
4374 | unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; |
4375 | |
4376 | // Prefer the nonnull attribute if it's present. |
4377 | const NonNullAttr *NNAttr = nullptr; |
4378 | if (SanOpts.has(SanitizerKind::NonnullAttribute)) |
4379 | NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); |
4380 | |
4381 | bool CanCheckNullability = false; |
4382 | if (SanOpts.has(K: SanitizerKind::NullabilityArg) && !NNAttr && PVD && |
4383 | !PVD->getType()->isRecordType()) { |
4384 | auto Nullability = PVD->getType()->getNullability(); |
4385 | CanCheckNullability = Nullability && |
4386 | *Nullability == NullabilityKind::NonNull && |
4387 | PVD->getTypeSourceInfo(); |
4388 | } |
4389 | |
4390 | if (!NNAttr && !CanCheckNullability) |
4391 | return; |
4392 | |
4393 | SourceLocation AttrLoc; |
4394 | SanitizerMask CheckKind; |
4395 | SanitizerHandler Handler; |
4396 | if (NNAttr) { |
4397 | AttrLoc = NNAttr->getLocation(); |
4398 | CheckKind = SanitizerKind::NonnullAttribute; |
4399 | Handler = SanitizerHandler::NonnullArg; |
4400 | } else { |
4401 | AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); |
4402 | CheckKind = SanitizerKind::NullabilityArg; |
4403 | Handler = SanitizerHandler::NullabilityArg; |
4404 | } |
4405 | |
4406 | SanitizerScope SanScope(this); |
4407 | llvm::Value *Cond = EmitNonNullRValueCheck(RV, T: ArgType); |
4408 | llvm::Constant *StaticData[] = { |
4409 | EmitCheckSourceLocation(Loc: ArgLoc), EmitCheckSourceLocation(Loc: AttrLoc), |
4410 | llvm::ConstantInt::get(Ty: Int32Ty, V: ArgNo + 1), |
4411 | }; |
4412 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: CheckKind), Check: Handler, StaticArgs: StaticData, DynamicArgs: std::nullopt); |
4413 | } |
4414 | |
4415 | void CodeGenFunction::EmitNonNullArgCheck(Address Addr, QualType ArgType, |
4416 | SourceLocation ArgLoc, |
4417 | AbstractCallee AC, unsigned ParmNum) { |
4418 | if (!AC.getDecl() || !(SanOpts.has(K: SanitizerKind::NonnullAttribute) || |
4419 | SanOpts.has(K: SanitizerKind::NullabilityArg))) |
4420 | return; |
4421 | |
4422 | EmitNonNullArgCheck(RV: RValue::get(Addr, CGF&: *this), ArgType, ArgLoc, AC, ParmNum); |
4423 | } |
4424 | |
4425 | // Check if the call is going to use the inalloca convention. This needs to |
4426 | // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged |
4427 | // later, so we can't check it directly. |
4428 | static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, |
4429 | ArrayRef<QualType> ArgTypes) { |
4430 | // The Swift calling conventions don't go through the target-specific |
4431 | // argument classification, they never use inalloca. |
4432 | // TODO: Consider limiting inalloca use to only calling conventions supported |
4433 | // by MSVC. |
4434 | if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) |
4435 | return false; |
4436 | if (!CGM.getTarget().getCXXABI().isMicrosoft()) |
4437 | return false; |
4438 | return llvm::any_of(Range&: ArgTypes, P: [&](QualType Ty) { |
4439 | return isInAllocaArgument(ABI&: CGM.getCXXABI(), type: Ty); |
4440 | }); |
4441 | } |
4442 | |
4443 | #ifndef NDEBUG |
4444 | // Determine whether the given argument is an Objective-C method |
4445 | // that may have type parameters in its signature. |
4446 | static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { |
4447 | const DeclContext *dc = method->getDeclContext(); |
4448 | if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(Val: dc)) { |
4449 | return classDecl->getTypeParamListAsWritten(); |
4450 | } |
4451 | |
4452 | if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(Val: dc)) { |
4453 | return catDecl->getTypeParamList(); |
4454 | } |
4455 | |
4456 | return false; |
4457 | } |
4458 | #endif |
4459 | |
4460 | /// EmitCallArgs - Emit call arguments for a function. |
4461 | void CodeGenFunction::EmitCallArgs( |
4462 | CallArgList &Args, PrototypeWrapper Prototype, |
4463 | llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, |
4464 | AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { |
4465 | SmallVector<QualType, 16> ArgTypes; |
4466 | |
4467 | assert((ParamsToSkip == 0 || Prototype.P) && |
4468 | "Can't skip parameters if type info is not provided" ); |
4469 | |
4470 | // This variable only captures *explicitly* written conventions, not those |
4471 | // applied by default via command line flags or target defaults, such as |
4472 | // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would |
4473 | // require knowing if this is a C++ instance method or being able to see |
4474 | // unprototyped FunctionTypes. |
4475 | CallingConv ExplicitCC = CC_C; |
4476 | |
4477 | // First, if a prototype was provided, use those argument types. |
4478 | bool IsVariadic = false; |
4479 | if (Prototype.P) { |
4480 | const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); |
4481 | if (MD) { |
4482 | IsVariadic = MD->isVariadic(); |
4483 | ExplicitCC = getCallingConventionForDecl( |
4484 | D: MD, IsWindows: CGM.getTarget().getTriple().isOSWindows()); |
4485 | ArgTypes.assign(in_start: MD->param_type_begin() + ParamsToSkip, |
4486 | in_end: MD->param_type_end()); |
4487 | } else { |
4488 | const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); |
4489 | IsVariadic = FPT->isVariadic(); |
4490 | ExplicitCC = FPT->getExtInfo().getCC(); |
4491 | ArgTypes.assign(in_start: FPT->param_type_begin() + ParamsToSkip, |
4492 | in_end: FPT->param_type_end()); |
4493 | } |
4494 | |
4495 | #ifndef NDEBUG |
4496 | // Check that the prototyped types match the argument expression types. |
4497 | bool isGenericMethod = MD && isObjCMethodWithTypeParams(method: MD); |
4498 | CallExpr::const_arg_iterator Arg = ArgRange.begin(); |
4499 | for (QualType Ty : ArgTypes) { |
4500 | assert(Arg != ArgRange.end() && "Running over edge of argument list!" ); |
4501 | assert( |
4502 | (isGenericMethod || Ty->isVariablyModifiedType() || |
4503 | Ty.getNonReferenceType()->isObjCRetainableType() || |
4504 | getContext() |
4505 | .getCanonicalType(Ty.getNonReferenceType()) |
4506 | .getTypePtr() == |
4507 | getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && |
4508 | "type mismatch in call argument!" ); |
4509 | ++Arg; |
4510 | } |
4511 | |
4512 | // Either we've emitted all the call args, or we have a call to variadic |
4513 | // function. |
4514 | assert((Arg == ArgRange.end() || IsVariadic) && |
4515 | "Extra arguments in non-variadic function!" ); |
4516 | #endif |
4517 | } |
4518 | |
4519 | // If we still have any arguments, emit them using the type of the argument. |
4520 | for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) |
4521 | ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); |
4522 | assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); |
4523 | |
4524 | // We must evaluate arguments from right to left in the MS C++ ABI, |
4525 | // because arguments are destroyed left to right in the callee. As a special |
4526 | // case, there are certain language constructs that require left-to-right |
4527 | // evaluation, and in those cases we consider the evaluation order requirement |
4528 | // to trump the "destruction order is reverse construction order" guarantee. |
4529 | bool LeftToRight = |
4530 | CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() |
4531 | ? Order == EvaluationOrder::ForceLeftToRight |
4532 | : Order != EvaluationOrder::ForceRightToLeft; |
4533 | |
4534 | auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, |
4535 | RValue EmittedArg) { |
4536 | if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) |
4537 | return; |
4538 | auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); |
4539 | if (PS == nullptr) |
4540 | return; |
4541 | |
4542 | const auto &Context = getContext(); |
4543 | auto SizeTy = Context.getSizeType(); |
4544 | auto T = Builder.getIntNTy(N: Context.getTypeSize(T: SizeTy)); |
4545 | assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?" ); |
4546 | llvm::Value *V = evaluateOrEmitBuiltinObjectSize(E: Arg, Type: PS->getType(), ResType: T, |
4547 | EmittedE: EmittedArg.getScalarVal(), |
4548 | IsDynamic: PS->isDynamic()); |
4549 | Args.add(rvalue: RValue::get(V), type: SizeTy); |
4550 | // If we're emitting args in reverse, be sure to do so with |
4551 | // pass_object_size, as well. |
4552 | if (!LeftToRight) |
4553 | std::swap(a&: Args.back(), b&: *(&Args.back() - 1)); |
4554 | }; |
4555 | |
4556 | // Insert a stack save if we're going to need any inalloca args. |
4557 | if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { |
4558 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && |
4559 | "inalloca only supported on x86" ); |
4560 | Args.allocateArgumentMemory(CGF&: *this); |
4561 | } |
4562 | |
4563 | // Evaluate each argument in the appropriate order. |
4564 | size_t CallArgsStart = Args.size(); |
4565 | for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { |
4566 | unsigned Idx = LeftToRight ? I : E - I - 1; |
4567 | CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; |
4568 | unsigned InitialArgSize = Args.size(); |
4569 | // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of |
4570 | // the argument and parameter match or the objc method is parameterized. |
4571 | assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || |
4572 | getContext().hasSameUnqualifiedType((*Arg)->getType(), |
4573 | ArgTypes[Idx]) || |
4574 | (isa<ObjCMethodDecl>(AC.getDecl()) && |
4575 | isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && |
4576 | "Argument and parameter types don't match" ); |
4577 | EmitCallArg(args&: Args, E: *Arg, ArgType: ArgTypes[Idx]); |
4578 | // In particular, we depend on it being the last arg in Args, and the |
4579 | // objectsize bits depend on there only being one arg if !LeftToRight. |
4580 | assert(InitialArgSize + 1 == Args.size() && |
4581 | "The code below depends on only adding one arg per EmitCallArg" ); |
4582 | (void)InitialArgSize; |
4583 | // Since pointer argument are never emitted as LValue, it is safe to emit |
4584 | // non-null argument check for r-value only. |
4585 | if (!Args.back().hasLValue()) { |
4586 | RValue RVArg = Args.back().getKnownRValue(); |
4587 | EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, |
4588 | ParamsToSkip + Idx); |
4589 | // @llvm.objectsize should never have side-effects and shouldn't need |
4590 | // destruction/cleanups, so we can safely "emit" it after its arg, |
4591 | // regardless of right-to-leftness |
4592 | MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); |
4593 | } |
4594 | } |
4595 | |
4596 | if (!LeftToRight) { |
4597 | // Un-reverse the arguments we just evaluated so they match up with the LLVM |
4598 | // IR function. |
4599 | std::reverse(first: Args.begin() + CallArgsStart, last: Args.end()); |
4600 | } |
4601 | } |
4602 | |
4603 | namespace { |
4604 | |
4605 | struct DestroyUnpassedArg final : EHScopeStack::Cleanup { |
4606 | DestroyUnpassedArg(Address Addr, QualType Ty) |
4607 | : Addr(Addr), Ty(Ty) {} |
4608 | |
4609 | Address Addr; |
4610 | QualType Ty; |
4611 | |
4612 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
4613 | QualType::DestructionKind DtorKind = Ty.isDestructedType(); |
4614 | if (DtorKind == QualType::DK_cxx_destructor) { |
4615 | const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); |
4616 | assert(!Dtor->isTrivial()); |
4617 | CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, |
4618 | /*Delegating=*/false, Addr, Ty); |
4619 | } else { |
4620 | CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); |
4621 | } |
4622 | } |
4623 | }; |
4624 | |
4625 | struct DisableDebugLocationUpdates { |
4626 | CodeGenFunction &CGF; |
4627 | bool disabledDebugInfo; |
4628 | DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { |
4629 | if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(Val: E) && CGF.getDebugInfo())) |
4630 | CGF.disableDebugInfo(); |
4631 | } |
4632 | ~DisableDebugLocationUpdates() { |
4633 | if (disabledDebugInfo) |
4634 | CGF.enableDebugInfo(); |
4635 | } |
4636 | }; |
4637 | |
4638 | } // end anonymous namespace |
4639 | |
4640 | RValue CallArg::getRValue(CodeGenFunction &CGF) const { |
4641 | if (!HasLV) |
4642 | return RV; |
4643 | LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); |
4644 | CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, |
4645 | LV.isVolatile()); |
4646 | IsUsed = true; |
4647 | return RValue::getAggregate(addr: Copy.getAddress(CGF)); |
4648 | } |
4649 | |
4650 | void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { |
4651 | LValue Dst = CGF.MakeAddrLValue(Addr, Ty); |
4652 | if (!HasLV && RV.isScalar()) |
4653 | CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); |
4654 | else if (!HasLV && RV.isComplex()) |
4655 | CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); |
4656 | else { |
4657 | auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); |
4658 | LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); |
4659 | // We assume that call args are never copied into subobjects. |
4660 | CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, |
4661 | HasLV ? LV.isVolatileQualified() |
4662 | : RV.isVolatileQualified()); |
4663 | } |
4664 | IsUsed = true; |
4665 | } |
4666 | |
4667 | void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, |
4668 | QualType type) { |
4669 | DisableDebugLocationUpdates Dis(*this, E); |
4670 | if (const ObjCIndirectCopyRestoreExpr *CRE |
4671 | = dyn_cast<ObjCIndirectCopyRestoreExpr>(Val: E)) { |
4672 | assert(getLangOpts().ObjCAutoRefCount); |
4673 | return emitWritebackArg(CGF&: *this, args, CRE); |
4674 | } |
4675 | |
4676 | assert(type->isReferenceType() == E->isGLValue() && |
4677 | "reference binding to unmaterialized r-value!" ); |
4678 | |
4679 | if (E->isGLValue()) { |
4680 | assert(E->getObjectKind() == OK_Ordinary); |
4681 | return args.add(rvalue: EmitReferenceBindingToExpr(E), type); |
4682 | } |
4683 | |
4684 | bool HasAggregateEvalKind = hasAggregateEvaluationKind(T: type); |
4685 | |
4686 | // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. |
4687 | // However, we still have to push an EH-only cleanup in case we unwind before |
4688 | // we make it to the call. |
4689 | if (type->isRecordType() && |
4690 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { |
4691 | // If we're using inalloca, use the argument memory. Otherwise, use a |
4692 | // temporary. |
4693 | AggValueSlot Slot = args.isUsingInAlloca() |
4694 | ? createPlaceholderSlot(CGF&: *this, Ty: type) : CreateAggTemp(T: type, Name: "agg.tmp" ); |
4695 | |
4696 | bool DestroyedInCallee = true, NeedsEHCleanup = true; |
4697 | if (const auto *RD = type->getAsCXXRecordDecl()) |
4698 | DestroyedInCallee = RD->hasNonTrivialDestructor(); |
4699 | else |
4700 | NeedsEHCleanup = needsEHCleanup(kind: type.isDestructedType()); |
4701 | |
4702 | if (DestroyedInCallee) |
4703 | Slot.setExternallyDestructed(); |
4704 | |
4705 | EmitAggExpr(E, AS: Slot); |
4706 | RValue RV = Slot.asRValue(); |
4707 | args.add(rvalue: RV, type); |
4708 | |
4709 | if (DestroyedInCallee && NeedsEHCleanup) { |
4710 | // Create a no-op GEP between the placeholder and the cleanup so we can |
4711 | // RAUW it successfully. It also serves as a marker of the first |
4712 | // instruction where the cleanup is active. |
4713 | pushFullExprCleanup<DestroyUnpassedArg>(kind: EHCleanup, A: Slot.getAddress(), |
4714 | A: type); |
4715 | // This unreachable is a temporary marker which will be removed later. |
4716 | llvm::Instruction *IsActive = Builder.CreateUnreachable(); |
4717 | args.addArgCleanupDeactivation(Cleanup: EHStack.stable_begin(), IsActiveIP: IsActive); |
4718 | } |
4719 | return; |
4720 | } |
4721 | |
4722 | if (HasAggregateEvalKind && isa<ImplicitCastExpr>(Val: E) && |
4723 | cast<CastExpr>(Val: E)->getCastKind() == CK_LValueToRValue && |
4724 | !type->isArrayParameterType()) { |
4725 | LValue L = EmitLValue(E: cast<CastExpr>(Val: E)->getSubExpr()); |
4726 | assert(L.isSimple()); |
4727 | args.addUncopiedAggregate(LV: L, type); |
4728 | return; |
4729 | } |
4730 | |
4731 | args.add(rvalue: EmitAnyExprToTemp(E), type); |
4732 | } |
4733 | |
4734 | QualType CodeGenFunction::getVarArgType(const Expr *Arg) { |
4735 | // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC |
4736 | // implicitly widens null pointer constants that are arguments to varargs |
4737 | // functions to pointer-sized ints. |
4738 | if (!getTarget().getTriple().isOSWindows()) |
4739 | return Arg->getType(); |
4740 | |
4741 | if (Arg->getType()->isIntegerType() && |
4742 | getContext().getTypeSize(T: Arg->getType()) < |
4743 | getContext().getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) && |
4744 | Arg->isNullPointerConstant(Ctx&: getContext(), |
4745 | NPC: Expr::NPC_ValueDependentIsNotNull)) { |
4746 | return getContext().getIntPtrType(); |
4747 | } |
4748 | |
4749 | return Arg->getType(); |
4750 | } |
4751 | |
4752 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
4753 | // optimizer it can aggressively ignore unwind edges. |
4754 | void |
4755 | CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { |
4756 | if (CGM.getCodeGenOpts().OptimizationLevel != 0 && |
4757 | !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) |
4758 | Inst->setMetadata(Kind: "clang.arc.no_objc_arc_exceptions" , |
4759 | Node: CGM.getNoObjCARCExceptionsMetadata()); |
4760 | } |
4761 | |
4762 | /// Emits a call to the given no-arguments nounwind runtime function. |
4763 | llvm::CallInst * |
4764 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4765 | const llvm::Twine &name) { |
4766 | return EmitNounwindRuntimeCall(callee, args: ArrayRef<llvm::Value *>(), name); |
4767 | } |
4768 | |
4769 | /// Emits a call to the given nounwind runtime function. |
4770 | llvm::CallInst * |
4771 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4772 | ArrayRef<Address> args, |
4773 | const llvm::Twine &name) { |
4774 | SmallVector<llvm::Value *, 3> values; |
4775 | for (auto arg : args) |
4776 | values.push_back(Elt: arg.emitRawPointer(CGF&: *this)); |
4777 | return EmitNounwindRuntimeCall(callee, args: values, name); |
4778 | } |
4779 | |
4780 | llvm::CallInst * |
4781 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4782 | ArrayRef<llvm::Value *> args, |
4783 | const llvm::Twine &name) { |
4784 | llvm::CallInst *call = EmitRuntimeCall(callee, args, name); |
4785 | call->setDoesNotThrow(); |
4786 | return call; |
4787 | } |
4788 | |
4789 | /// Emits a simple call (never an invoke) to the given no-arguments |
4790 | /// runtime function. |
4791 | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, |
4792 | const llvm::Twine &name) { |
4793 | return EmitRuntimeCall(callee, args: std::nullopt, name); |
4794 | } |
4795 | |
4796 | // Calls which may throw must have operand bundles indicating which funclet |
4797 | // they are nested within. |
4798 | SmallVector<llvm::OperandBundleDef, 1> |
4799 | CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { |
4800 | // There is no need for a funclet operand bundle if we aren't inside a |
4801 | // funclet. |
4802 | if (!CurrentFuncletPad) |
4803 | return (SmallVector<llvm::OperandBundleDef, 1>()); |
4804 | |
4805 | // Skip intrinsics which cannot throw (as long as they don't lower into |
4806 | // regular function calls in the course of IR transformations). |
4807 | if (auto *CalleeFn = dyn_cast<llvm::Function>(Val: Callee->stripPointerCasts())) { |
4808 | if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) { |
4809 | auto IID = CalleeFn->getIntrinsicID(); |
4810 | if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID)) |
4811 | return (SmallVector<llvm::OperandBundleDef, 1>()); |
4812 | } |
4813 | } |
4814 | |
4815 | SmallVector<llvm::OperandBundleDef, 1> BundleList; |
4816 | BundleList.emplace_back(Args: "funclet" , Args&: CurrentFuncletPad); |
4817 | return BundleList; |
4818 | } |
4819 | |
4820 | /// Emits a simple call (never an invoke) to the given runtime function. |
4821 | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, |
4822 | ArrayRef<llvm::Value *> args, |
4823 | const llvm::Twine &name) { |
4824 | llvm::CallInst *call = Builder.CreateCall( |
4825 | Callee: callee, Args: args, OpBundles: getBundlesForFunclet(Callee: callee.getCallee()), Name: name); |
4826 | call->setCallingConv(getRuntimeCC()); |
4827 | return call; |
4828 | } |
4829 | |
4830 | /// Emits a call or invoke to the given noreturn runtime function. |
4831 | void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( |
4832 | llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { |
4833 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
4834 | getBundlesForFunclet(Callee: callee.getCallee()); |
4835 | |
4836 | if (getInvokeDest()) { |
4837 | llvm::InvokeInst *invoke = |
4838 | Builder.CreateInvoke(Callee: callee, |
4839 | NormalDest: getUnreachableBlock(), |
4840 | UnwindDest: getInvokeDest(), |
4841 | Args: args, |
4842 | OpBundles: BundleList); |
4843 | invoke->setDoesNotReturn(); |
4844 | invoke->setCallingConv(getRuntimeCC()); |
4845 | } else { |
4846 | llvm::CallInst *call = Builder.CreateCall(Callee: callee, Args: args, OpBundles: BundleList); |
4847 | call->setDoesNotReturn(); |
4848 | call->setCallingConv(getRuntimeCC()); |
4849 | Builder.CreateUnreachable(); |
4850 | } |
4851 | } |
4852 | |
4853 | /// Emits a call or invoke instruction to the given nullary runtime function. |
4854 | llvm::CallBase * |
4855 | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, |
4856 | const Twine &name) { |
4857 | return EmitRuntimeCallOrInvoke(callee, args: std::nullopt, name); |
4858 | } |
4859 | |
4860 | /// Emits a call or invoke instruction to the given runtime function. |
4861 | llvm::CallBase * |
4862 | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, |
4863 | ArrayRef<llvm::Value *> args, |
4864 | const Twine &name) { |
4865 | llvm::CallBase *call = EmitCallOrInvoke(Callee: callee, Args: args, Name: name); |
4866 | call->setCallingConv(getRuntimeCC()); |
4867 | return call; |
4868 | } |
4869 | |
4870 | /// Emits a call or invoke instruction to the given function, depending |
4871 | /// on the current state of the EH stack. |
4872 | llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, |
4873 | ArrayRef<llvm::Value *> Args, |
4874 | const Twine &Name) { |
4875 | llvm::BasicBlock *InvokeDest = getInvokeDest(); |
4876 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
4877 | getBundlesForFunclet(Callee: Callee.getCallee()); |
4878 | |
4879 | llvm::CallBase *Inst; |
4880 | if (!InvokeDest) |
4881 | Inst = Builder.CreateCall(Callee, Args, OpBundles: BundleList, Name); |
4882 | else { |
4883 | llvm::BasicBlock *ContBB = createBasicBlock(name: "invoke.cont" ); |
4884 | Inst = Builder.CreateInvoke(Callee, NormalDest: ContBB, UnwindDest: InvokeDest, Args, OpBundles: BundleList, |
4885 | Name); |
4886 | EmitBlock(BB: ContBB); |
4887 | } |
4888 | |
4889 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
4890 | // optimizer it can aggressively ignore unwind edges. |
4891 | if (CGM.getLangOpts().ObjCAutoRefCount) |
4892 | AddObjCARCExceptionMetadata(Inst); |
4893 | |
4894 | return Inst; |
4895 | } |
4896 | |
4897 | void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, |
4898 | llvm::Value *New) { |
4899 | DeferredReplacements.push_back( |
4900 | Elt: std::make_pair(x: llvm::WeakTrackingVH(Old), y&: New)); |
4901 | } |
4902 | |
4903 | namespace { |
4904 | |
4905 | /// Specify given \p NewAlign as the alignment of return value attribute. If |
4906 | /// such attribute already exists, re-set it to the maximal one of two options. |
4907 | [[nodiscard]] llvm::AttributeList |
4908 | maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, |
4909 | const llvm::AttributeList &Attrs, |
4910 | llvm::Align NewAlign) { |
4911 | llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); |
4912 | if (CurAlign >= NewAlign) |
4913 | return Attrs; |
4914 | llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Context&: Ctx, Alignment: NewAlign); |
4915 | return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment) |
4916 | .addRetAttribute(Ctx, AlignAttr); |
4917 | } |
4918 | |
4919 | template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { |
4920 | protected: |
4921 | CodeGenFunction &CGF; |
4922 | |
4923 | /// We do nothing if this is, or becomes, nullptr. |
4924 | const AlignedAttrTy *AA = nullptr; |
4925 | |
4926 | llvm::Value *Alignment = nullptr; // May or may not be a constant. |
4927 | llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. |
4928 | |
4929 | AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) |
4930 | : CGF(CGF_) { |
4931 | if (!FuncDecl) |
4932 | return; |
4933 | AA = FuncDecl->getAttr<AlignedAttrTy>(); |
4934 | } |
4935 | |
4936 | public: |
4937 | /// If we can, materialize the alignment as an attribute on return value. |
4938 | [[nodiscard]] llvm::AttributeList |
4939 | TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { |
4940 | if (!AA || OffsetCI || CGF.SanOpts.has(K: SanitizerKind::Alignment)) |
4941 | return Attrs; |
4942 | const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Val: Alignment); |
4943 | if (!AlignmentCI) |
4944 | return Attrs; |
4945 | // We may legitimately have non-power-of-2 alignment here. |
4946 | // If so, this is UB land, emit it via `@llvm.assume` instead. |
4947 | if (!AlignmentCI->getValue().isPowerOf2()) |
4948 | return Attrs; |
4949 | llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( |
4950 | Ctx&: CGF.getLLVMContext(), Attrs, |
4951 | NewAlign: llvm::Align( |
4952 | AlignmentCI->getLimitedValue(Limit: llvm::Value::MaximumAlignment))); |
4953 | AA = nullptr; // We're done. Disallow doing anything else. |
4954 | return NewAttrs; |
4955 | } |
4956 | |
4957 | /// Emit alignment assumption. |
4958 | /// This is a general fallback that we take if either there is an offset, |
4959 | /// or the alignment is variable or we are sanitizing for alignment. |
4960 | void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { |
4961 | if (!AA) |
4962 | return; |
4963 | CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, |
4964 | AA->getLocation(), Alignment, OffsetCI); |
4965 | AA = nullptr; // We're done. Disallow doing anything else. |
4966 | } |
4967 | }; |
4968 | |
4969 | /// Helper data structure to emit `AssumeAlignedAttr`. |
4970 | class AssumeAlignedAttrEmitter final |
4971 | : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { |
4972 | public: |
4973 | AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) |
4974 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { |
4975 | if (!AA) |
4976 | return; |
4977 | // It is guaranteed that the alignment/offset are constants. |
4978 | Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); |
4979 | if (Expr *Offset = AA->getOffset()) { |
4980 | OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); |
4981 | if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. |
4982 | OffsetCI = nullptr; |
4983 | } |
4984 | } |
4985 | }; |
4986 | |
4987 | /// Helper data structure to emit `AllocAlignAttr`. |
4988 | class AllocAlignAttrEmitter final |
4989 | : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { |
4990 | public: |
4991 | AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, |
4992 | const CallArgList &CallArgs) |
4993 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { |
4994 | if (!AA) |
4995 | return; |
4996 | // Alignment may or may not be a constant, and that is okay. |
4997 | Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] |
4998 | .getRValue(CGF) |
4999 | .getScalarVal(); |
5000 | } |
5001 | }; |
5002 | |
5003 | } // namespace |
5004 | |
5005 | static unsigned getMaxVectorWidth(const llvm::Type *Ty) { |
5006 | if (auto *VT = dyn_cast<llvm::VectorType>(Val: Ty)) |
5007 | return VT->getPrimitiveSizeInBits().getKnownMinValue(); |
5008 | if (auto *AT = dyn_cast<llvm::ArrayType>(Val: Ty)) |
5009 | return getMaxVectorWidth(Ty: AT->getElementType()); |
5010 | |
5011 | unsigned MaxVectorWidth = 0; |
5012 | if (auto *ST = dyn_cast<llvm::StructType>(Val: Ty)) |
5013 | for (auto *I : ST->elements()) |
5014 | MaxVectorWidth = std::max(a: MaxVectorWidth, b: getMaxVectorWidth(Ty: I)); |
5015 | return MaxVectorWidth; |
5016 | } |
5017 | |
5018 | RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, |
5019 | const CGCallee &Callee, |
5020 | ReturnValueSlot ReturnValue, |
5021 | const CallArgList &CallArgs, |
5022 | llvm::CallBase **callOrInvoke, bool IsMustTail, |
5023 | SourceLocation Loc) { |
5024 | // FIXME: We no longer need the types from CallArgs; lift up and simplify. |
5025 | |
5026 | assert(Callee.isOrdinary() || Callee.isVirtual()); |
5027 | |
5028 | // Handle struct-return functions by passing a pointer to the |
5029 | // location that we would like to return into. |
5030 | QualType RetTy = CallInfo.getReturnType(); |
5031 | const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); |
5032 | |
5033 | llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(FI: CallInfo); |
5034 | |
5035 | const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); |
5036 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
5037 | // We can only guarantee that a function is called from the correct |
5038 | // context/function based on the appropriate target attributes, |
5039 | // so only check in the case where we have both always_inline and target |
5040 | // since otherwise we could be making a conditional call after a check for |
5041 | // the proper cpu features (and it won't cause code generation issues due to |
5042 | // function based code generation). |
5043 | if (TargetDecl->hasAttr<AlwaysInlineAttr>() && |
5044 | (TargetDecl->hasAttr<TargetAttr>() || |
5045 | (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>()))) |
5046 | checkTargetFeatures(Loc, TargetDecl: FD); |
5047 | |
5048 | // Some architectures (such as x86-64) have the ABI changed based on |
5049 | // attribute-target/features. Give them a chance to diagnose. |
5050 | CGM.getTargetCodeGenInfo().checkFunctionCallABI( |
5051 | CGM, CallLoc: Loc, Caller: dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl), Callee: FD, Args: CallArgs); |
5052 | } |
5053 | |
5054 | // 1. Set up the arguments. |
5055 | |
5056 | // If we're using inalloca, insert the allocation after the stack save. |
5057 | // FIXME: Do this earlier rather than hacking it in here! |
5058 | RawAddress ArgMemory = RawAddress::invalid(); |
5059 | if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { |
5060 | const llvm::DataLayout &DL = CGM.getDataLayout(); |
5061 | llvm::Instruction *IP = CallArgs.getStackBase(); |
5062 | llvm::AllocaInst *AI; |
5063 | if (IP) { |
5064 | IP = IP->getNextNode(); |
5065 | AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), |
5066 | "argmem" , IP); |
5067 | } else { |
5068 | AI = CreateTempAlloca(Ty: ArgStruct, Name: "argmem" ); |
5069 | } |
5070 | auto Align = CallInfo.getArgStructAlignment(); |
5071 | AI->setAlignment(Align.getAsAlign()); |
5072 | AI->setUsedWithInAlloca(true); |
5073 | assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); |
5074 | ArgMemory = RawAddress(AI, ArgStruct, Align); |
5075 | } |
5076 | |
5077 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); |
5078 | SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); |
5079 | |
5080 | // If the call returns a temporary with struct return, create a temporary |
5081 | // alloca to hold the result, unless one is given to us. |
5082 | Address SRetPtr = Address::invalid(); |
5083 | RawAddress SRetAlloca = RawAddress::invalid(); |
5084 | llvm::Value *UnusedReturnSizePtr = nullptr; |
5085 | if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { |
5086 | if (!ReturnValue.isNull()) { |
5087 | SRetPtr = ReturnValue.getAddress(); |
5088 | } else { |
5089 | SRetPtr = CreateMemTemp(T: RetTy, Name: "tmp" , Alloca: &SRetAlloca); |
5090 | if (HaveInsertPoint() && ReturnValue.isUnused()) { |
5091 | llvm::TypeSize size = |
5092 | CGM.getDataLayout().getTypeAllocSize(Ty: ConvertTypeForMem(T: RetTy)); |
5093 | UnusedReturnSizePtr = EmitLifetimeStart(Size: size, Addr: SRetAlloca.getPointer()); |
5094 | } |
5095 | } |
5096 | if (IRFunctionArgs.hasSRetArg()) { |
5097 | IRCallArgs[IRFunctionArgs.getSRetArgNo()] = |
5098 | getAsNaturalPointerTo(Addr: SRetPtr, PointeeType: RetTy); |
5099 | } else if (RetAI.isInAlloca()) { |
5100 | Address Addr = |
5101 | Builder.CreateStructGEP(Addr: ArgMemory, Index: RetAI.getInAllocaFieldIndex()); |
5102 | Builder.CreateStore(Val: getAsNaturalPointerTo(Addr: SRetPtr, PointeeType: RetTy), Addr); |
5103 | } |
5104 | } |
5105 | |
5106 | RawAddress swiftErrorTemp = RawAddress::invalid(); |
5107 | Address swiftErrorArg = Address::invalid(); |
5108 | |
5109 | // When passing arguments using temporary allocas, we need to add the |
5110 | // appropriate lifetime markers. This vector keeps track of all the lifetime |
5111 | // markers that need to be ended right after the call. |
5112 | SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; |
5113 | |
5114 | // Translate all of the arguments as necessary to match the IR lowering. |
5115 | assert(CallInfo.arg_size() == CallArgs.size() && |
5116 | "Mismatch between function signature & arguments." ); |
5117 | unsigned ArgNo = 0; |
5118 | CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); |
5119 | for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); |
5120 | I != E; ++I, ++info_it, ++ArgNo) { |
5121 | const ABIArgInfo &ArgInfo = info_it->info; |
5122 | |
5123 | // Insert a padding argument to ensure proper alignment. |
5124 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) |
5125 | IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
5126 | llvm::UndefValue::get(T: ArgInfo.getPaddingType()); |
5127 | |
5128 | unsigned FirstIRArg, NumIRArgs; |
5129 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
5130 | |
5131 | bool ArgHasMaybeUndefAttr = |
5132 | IsArgumentMaybeUndef(TargetDecl, NumRequiredArgs: CallInfo.getNumRequiredArgs(), ArgNo); |
5133 | |
5134 | switch (ArgInfo.getKind()) { |
5135 | case ABIArgInfo::InAlloca: { |
5136 | assert(NumIRArgs == 0); |
5137 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86); |
5138 | if (I->isAggregate()) { |
5139 | RawAddress Addr = I->hasLValue() |
5140 | ? I->getKnownLValue().getAddress(CGF&: *this) |
5141 | : I->getKnownRValue().getAggregateAddress(); |
5142 | llvm::Instruction *Placeholder = |
5143 | cast<llvm::Instruction>(Val: Addr.getPointer()); |
5144 | |
5145 | if (!ArgInfo.getInAllocaIndirect()) { |
5146 | // Replace the placeholder with the appropriate argument slot GEP. |
5147 | CGBuilderTy::InsertPoint IP = Builder.saveIP(); |
5148 | Builder.SetInsertPoint(Placeholder); |
5149 | Addr = Builder.CreateStructGEP(Addr: ArgMemory, |
5150 | Index: ArgInfo.getInAllocaFieldIndex()); |
5151 | Builder.restoreIP(IP); |
5152 | } else { |
5153 | // For indirect things such as overaligned structs, replace the |
5154 | // placeholder with a regular aggregate temporary alloca. Store the |
5155 | // address of this alloca into the struct. |
5156 | Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp" ); |
5157 | Address ArgSlot = Builder.CreateStructGEP( |
5158 | Addr: ArgMemory, Index: ArgInfo.getInAllocaFieldIndex()); |
5159 | Builder.CreateStore(Val: Addr.getPointer(), Addr: ArgSlot); |
5160 | } |
5161 | deferPlaceholderReplacement(Old: Placeholder, New: Addr.getPointer()); |
5162 | } else if (ArgInfo.getInAllocaIndirect()) { |
5163 | // Make a temporary alloca and store the address of it into the argument |
5164 | // struct. |
5165 | RawAddress Addr = CreateMemTempWithoutCast( |
5166 | I->Ty, getContext().getTypeAlignInChars(I->Ty), |
5167 | "indirect-arg-temp" ); |
5168 | I->copyInto(CGF&: *this, Addr); |
5169 | Address ArgSlot = |
5170 | Builder.CreateStructGEP(Addr: ArgMemory, Index: ArgInfo.getInAllocaFieldIndex()); |
5171 | Builder.CreateStore(Val: Addr.getPointer(), Addr: ArgSlot); |
5172 | } else { |
5173 | // Store the RValue into the argument struct. |
5174 | Address Addr = |
5175 | Builder.CreateStructGEP(Addr: ArgMemory, Index: ArgInfo.getInAllocaFieldIndex()); |
5176 | Addr = Addr.withElementType(ElemTy: ConvertTypeForMem(T: I->Ty)); |
5177 | I->copyInto(CGF&: *this, Addr); |
5178 | } |
5179 | break; |
5180 | } |
5181 | |
5182 | case ABIArgInfo::Indirect: |
5183 | case ABIArgInfo::IndirectAliased: { |
5184 | assert(NumIRArgs == 1); |
5185 | if (!I->isAggregate()) { |
5186 | // Make a temporary alloca to pass the argument. |
5187 | RawAddress Addr = CreateMemTempWithoutCast( |
5188 | I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp" ); |
5189 | |
5190 | llvm::Value *Val = getAsNaturalPointerTo(Addr, PointeeType: I->Ty); |
5191 | if (ArgHasMaybeUndefAttr) |
5192 | Val = Builder.CreateFreeze(V: Val); |
5193 | IRCallArgs[FirstIRArg] = Val; |
5194 | |
5195 | I->copyInto(CGF&: *this, Addr); |
5196 | } else { |
5197 | // We want to avoid creating an unnecessary temporary+copy here; |
5198 | // however, we need one in three cases: |
5199 | // 1. If the argument is not byval, and we are required to copy the |
5200 | // source. (This case doesn't occur on any common architecture.) |
5201 | // 2. If the argument is byval, RV is not sufficiently aligned, and |
5202 | // we cannot force it to be sufficiently aligned. |
5203 | // 3. If the argument is byval, but RV is not located in default |
5204 | // or alloca address space. |
5205 | Address Addr = I->hasLValue() |
5206 | ? I->getKnownLValue().getAddress(CGF&: *this) |
5207 | : I->getKnownRValue().getAggregateAddress(); |
5208 | CharUnits Align = ArgInfo.getIndirectAlign(); |
5209 | const llvm::DataLayout *TD = &CGM.getDataLayout(); |
5210 | |
5211 | assert((FirstIRArg >= IRFuncTy->getNumParams() || |
5212 | IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == |
5213 | TD->getAllocaAddrSpace()) && |
5214 | "indirect argument must be in alloca address space" ); |
5215 | |
5216 | bool NeedCopy = false; |
5217 | if (Addr.getAlignment() < Align && |
5218 | llvm::getOrEnforceKnownAlignment(V: Addr.emitRawPointer(CGF&: *this), |
5219 | PrefAlign: Align.getAsAlign(), |
5220 | DL: *TD) < Align.getAsAlign()) { |
5221 | NeedCopy = true; |
5222 | } else if (I->hasLValue()) { |
5223 | auto LV = I->getKnownLValue(); |
5224 | auto AS = LV.getAddressSpace(); |
5225 | |
5226 | bool isByValOrRef = |
5227 | ArgInfo.isIndirectAliased() || ArgInfo.getIndirectByVal(); |
5228 | |
5229 | if (!isByValOrRef || |
5230 | (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { |
5231 | NeedCopy = true; |
5232 | } |
5233 | if (!getLangOpts().OpenCL) { |
5234 | if ((isByValOrRef && |
5235 | (AS != LangAS::Default && |
5236 | AS != CGM.getASTAllocaAddressSpace()))) { |
5237 | NeedCopy = true; |
5238 | } |
5239 | } |
5240 | // For OpenCL even if RV is located in default or alloca address space |
5241 | // we don't want to perform address space cast for it. |
5242 | else if ((isByValOrRef && |
5243 | Addr.getType()->getAddressSpace() != IRFuncTy-> |
5244 | getParamType(i: FirstIRArg)->getPointerAddressSpace())) { |
5245 | NeedCopy = true; |
5246 | } |
5247 | } |
5248 | |
5249 | if (NeedCopy) { |
5250 | // Create an aligned temporary, and copy to it. |
5251 | RawAddress AI = CreateMemTempWithoutCast( |
5252 | I->Ty, ArgInfo.getIndirectAlign(), "byval-temp" ); |
5253 | llvm::Value *Val = getAsNaturalPointerTo(Addr: AI, PointeeType: I->Ty); |
5254 | if (ArgHasMaybeUndefAttr) |
5255 | Val = Builder.CreateFreeze(V: Val); |
5256 | IRCallArgs[FirstIRArg] = Val; |
5257 | |
5258 | // Emit lifetime markers for the temporary alloca. |
5259 | llvm::TypeSize ByvalTempElementSize = |
5260 | CGM.getDataLayout().getTypeAllocSize(Ty: AI.getElementType()); |
5261 | llvm::Value *LifetimeSize = |
5262 | EmitLifetimeStart(Size: ByvalTempElementSize, Addr: AI.getPointer()); |
5263 | |
5264 | // Add cleanup code to emit the end lifetime marker after the call. |
5265 | if (LifetimeSize) // In case we disabled lifetime markers. |
5266 | CallLifetimeEndAfterCall.emplace_back(Args&: AI, Args&: LifetimeSize); |
5267 | |
5268 | // Generate the copy. |
5269 | I->copyInto(CGF&: *this, Addr: AI); |
5270 | } else { |
5271 | // Skip the extra memcpy call. |
5272 | llvm::Value *V = getAsNaturalPointerTo(Addr, PointeeType: I->Ty); |
5273 | auto *T = llvm::PointerType::get( |
5274 | C&: CGM.getLLVMContext(), AddressSpace: CGM.getDataLayout().getAllocaAddrSpace()); |
5275 | |
5276 | llvm::Value *Val = getTargetHooks().performAddrSpaceCast( |
5277 | *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, |
5278 | true); |
5279 | if (ArgHasMaybeUndefAttr) |
5280 | Val = Builder.CreateFreeze(V: Val); |
5281 | IRCallArgs[FirstIRArg] = Val; |
5282 | } |
5283 | } |
5284 | break; |
5285 | } |
5286 | |
5287 | case ABIArgInfo::Ignore: |
5288 | assert(NumIRArgs == 0); |
5289 | break; |
5290 | |
5291 | case ABIArgInfo::Extend: |
5292 | case ABIArgInfo::Direct: { |
5293 | if (!isa<llvm::StructType>(Val: ArgInfo.getCoerceToType()) && |
5294 | ArgInfo.getCoerceToType() == ConvertType(info_it->type) && |
5295 | ArgInfo.getDirectOffset() == 0) { |
5296 | assert(NumIRArgs == 1); |
5297 | llvm::Value *V; |
5298 | if (!I->isAggregate()) |
5299 | V = I->getKnownRValue().getScalarVal(); |
5300 | else |
5301 | V = Builder.CreateLoad( |
5302 | Addr: I->hasLValue() ? I->getKnownLValue().getAddress(CGF&: *this) |
5303 | : I->getKnownRValue().getAggregateAddress()); |
5304 | |
5305 | // Implement swifterror by copying into a new swifterror argument. |
5306 | // We'll write back in the normal path out of the call. |
5307 | if (CallInfo.getExtParameterInfo(argIndex: ArgNo).getABI() |
5308 | == ParameterABI::SwiftErrorResult) { |
5309 | assert(!swiftErrorTemp.isValid() && "multiple swifterror args" ); |
5310 | |
5311 | QualType pointeeTy = I->Ty->getPointeeType(); |
5312 | swiftErrorArg = makeNaturalAddressForPointer( |
5313 | Ptr: V, T: pointeeTy, Alignment: getContext().getTypeAlignInChars(T: pointeeTy)); |
5314 | |
5315 | swiftErrorTemp = |
5316 | CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp" ); |
5317 | V = swiftErrorTemp.getPointer(); |
5318 | cast<llvm::AllocaInst>(Val: V)->setSwiftError(true); |
5319 | |
5320 | llvm::Value *errorValue = Builder.CreateLoad(Addr: swiftErrorArg); |
5321 | Builder.CreateStore(Val: errorValue, Addr: swiftErrorTemp); |
5322 | } |
5323 | |
5324 | // We might have to widen integers, but we should never truncate. |
5325 | if (ArgInfo.getCoerceToType() != V->getType() && |
5326 | V->getType()->isIntegerTy()) |
5327 | V = Builder.CreateZExt(V, DestTy: ArgInfo.getCoerceToType()); |
5328 | |
5329 | // If the argument doesn't match, perform a bitcast to coerce it. This |
5330 | // can happen due to trivial type mismatches. |
5331 | if (FirstIRArg < IRFuncTy->getNumParams() && |
5332 | V->getType() != IRFuncTy->getParamType(i: FirstIRArg)) |
5333 | V = Builder.CreateBitCast(V, DestTy: IRFuncTy->getParamType(i: FirstIRArg)); |
5334 | |
5335 | if (ArgHasMaybeUndefAttr) |
5336 | V = Builder.CreateFreeze(V); |
5337 | IRCallArgs[FirstIRArg] = V; |
5338 | break; |
5339 | } |
5340 | |
5341 | llvm::StructType *STy = |
5342 | dyn_cast<llvm::StructType>(Val: ArgInfo.getCoerceToType()); |
5343 | if (STy && ArgInfo.isDirect() && !ArgInfo.getCanBeFlattened()) { |
5344 | llvm::Type *SrcTy = ConvertTypeForMem(T: I->Ty); |
5345 | [[maybe_unused]] llvm::TypeSize SrcTypeSize = |
5346 | CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
5347 | [[maybe_unused]] llvm::TypeSize DstTypeSize = |
5348 | CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
5349 | if (STy->containsHomogeneousScalableVectorTypes()) { |
5350 | assert(SrcTypeSize == DstTypeSize && |
5351 | "Only allow non-fractional movement of structure with " |
5352 | "homogeneous scalable vector type" ); |
5353 | |
5354 | IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal(); |
5355 | break; |
5356 | } |
5357 | } |
5358 | |
5359 | // FIXME: Avoid the conversion through memory if possible. |
5360 | Address Src = Address::invalid(); |
5361 | if (!I->isAggregate()) { |
5362 | Src = CreateMemTemp(I->Ty, "coerce" ); |
5363 | I->copyInto(CGF&: *this, Addr: Src); |
5364 | } else { |
5365 | Src = I->hasLValue() ? I->getKnownLValue().getAddress(CGF&: *this) |
5366 | : I->getKnownRValue().getAggregateAddress(); |
5367 | } |
5368 | |
5369 | // If the value is offset in memory, apply the offset now. |
5370 | Src = emitAddressAtOffset(CGF&: *this, addr: Src, info: ArgInfo); |
5371 | |
5372 | // Fast-isel and the optimizer generally like scalar values better than |
5373 | // FCAs, so we flatten them if this is safe to do for this argument. |
5374 | if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { |
5375 | llvm::Type *SrcTy = Src.getElementType(); |
5376 | llvm::TypeSize SrcTypeSize = |
5377 | CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
5378 | llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
5379 | if (SrcTypeSize.isScalable()) { |
5380 | assert(STy->containsHomogeneousScalableVectorTypes() && |
5381 | "ABI only supports structure with homogeneous scalable vector " |
5382 | "type" ); |
5383 | assert(SrcTypeSize == DstTypeSize && |
5384 | "Only allow non-fractional movement of structure with " |
5385 | "homogeneous scalable vector type" ); |
5386 | assert(NumIRArgs == STy->getNumElements()); |
5387 | |
5388 | llvm::Value *StoredStructValue = |
5389 | Builder.CreateLoad(Addr: Src, Name: Src.getName() + ".tuple" ); |
5390 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
5391 | llvm::Value * = Builder.CreateExtractValue( |
5392 | Agg: StoredStructValue, Idxs: i, Name: Src.getName() + ".extract" + Twine(i)); |
5393 | IRCallArgs[FirstIRArg + i] = Extract; |
5394 | } |
5395 | } else { |
5396 | uint64_t SrcSize = SrcTypeSize.getFixedValue(); |
5397 | uint64_t DstSize = DstTypeSize.getFixedValue(); |
5398 | |
5399 | // If the source type is smaller than the destination type of the |
5400 | // coerce-to logic, copy the source value into a temp alloca the size |
5401 | // of the destination type to allow loading all of it. The bits past |
5402 | // the source value are left undef. |
5403 | if (SrcSize < DstSize) { |
5404 | Address TempAlloca = CreateTempAlloca(Ty: STy, align: Src.getAlignment(), |
5405 | Name: Src.getName() + ".coerce" ); |
5406 | Builder.CreateMemCpy(Dest: TempAlloca, Src, Size: SrcSize); |
5407 | Src = TempAlloca; |
5408 | } else { |
5409 | Src = Src.withElementType(ElemTy: STy); |
5410 | } |
5411 | |
5412 | assert(NumIRArgs == STy->getNumElements()); |
5413 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
5414 | Address EltPtr = Builder.CreateStructGEP(Addr: Src, Index: i); |
5415 | llvm::Value *LI = Builder.CreateLoad(Addr: EltPtr); |
5416 | if (ArgHasMaybeUndefAttr) |
5417 | LI = Builder.CreateFreeze(V: LI); |
5418 | IRCallArgs[FirstIRArg + i] = LI; |
5419 | } |
5420 | } |
5421 | } else { |
5422 | // In the simple case, just pass the coerced loaded value. |
5423 | assert(NumIRArgs == 1); |
5424 | llvm::Value *Load = |
5425 | CreateCoercedLoad(Src, Ty: ArgInfo.getCoerceToType(), CGF&: *this); |
5426 | |
5427 | if (CallInfo.isCmseNSCall()) { |
5428 | // For certain parameter types, clear padding bits, as they may reveal |
5429 | // sensitive information. |
5430 | // Small struct/union types are passed as integer arrays. |
5431 | auto *ATy = dyn_cast<llvm::ArrayType>(Val: Load->getType()); |
5432 | if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) |
5433 | Load = EmitCMSEClearRecord(Load, ATy, I->Ty); |
5434 | } |
5435 | |
5436 | if (ArgHasMaybeUndefAttr) |
5437 | Load = Builder.CreateFreeze(V: Load); |
5438 | IRCallArgs[FirstIRArg] = Load; |
5439 | } |
5440 | |
5441 | break; |
5442 | } |
5443 | |
5444 | case ABIArgInfo::CoerceAndExpand: { |
5445 | auto coercionType = ArgInfo.getCoerceAndExpandType(); |
5446 | auto layout = CGM.getDataLayout().getStructLayout(Ty: coercionType); |
5447 | |
5448 | llvm::Value *tempSize = nullptr; |
5449 | Address addr = Address::invalid(); |
5450 | RawAddress AllocaAddr = RawAddress::invalid(); |
5451 | if (I->isAggregate()) { |
5452 | addr = I->hasLValue() ? I->getKnownLValue().getAddress(CGF&: *this) |
5453 | : I->getKnownRValue().getAggregateAddress(); |
5454 | |
5455 | } else { |
5456 | RValue RV = I->getKnownRValue(); |
5457 | assert(RV.isScalar()); // complex should always just be direct |
5458 | |
5459 | llvm::Type *scalarType = RV.getScalarVal()->getType(); |
5460 | auto scalarSize = CGM.getDataLayout().getTypeAllocSize(Ty: scalarType); |
5461 | auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(Ty: scalarType); |
5462 | |
5463 | // Materialize to a temporary. |
5464 | addr = CreateTempAlloca( |
5465 | Ty: RV.getScalarVal()->getType(), |
5466 | align: CharUnits::fromQuantity(Quantity: std::max(a: layout->getAlignment(), b: scalarAlign)), |
5467 | Name: "tmp" , |
5468 | /*ArraySize=*/nullptr, Alloca: &AllocaAddr); |
5469 | tempSize = EmitLifetimeStart(Size: scalarSize, Addr: AllocaAddr.getPointer()); |
5470 | |
5471 | Builder.CreateStore(Val: RV.getScalarVal(), Addr: addr); |
5472 | } |
5473 | |
5474 | addr = addr.withElementType(ElemTy: coercionType); |
5475 | |
5476 | unsigned IRArgPos = FirstIRArg; |
5477 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
5478 | llvm::Type *eltType = coercionType->getElementType(N: i); |
5479 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; |
5480 | Address eltAddr = Builder.CreateStructGEP(Addr: addr, Index: i); |
5481 | llvm::Value *elt = Builder.CreateLoad(Addr: eltAddr); |
5482 | if (ArgHasMaybeUndefAttr) |
5483 | elt = Builder.CreateFreeze(V: elt); |
5484 | IRCallArgs[IRArgPos++] = elt; |
5485 | } |
5486 | assert(IRArgPos == FirstIRArg + NumIRArgs); |
5487 | |
5488 | if (tempSize) { |
5489 | EmitLifetimeEnd(Size: tempSize, Addr: AllocaAddr.getPointer()); |
5490 | } |
5491 | |
5492 | break; |
5493 | } |
5494 | |
5495 | case ABIArgInfo::Expand: { |
5496 | unsigned IRArgPos = FirstIRArg; |
5497 | ExpandTypeToArgs(Ty: I->Ty, Arg: *I, IRFuncTy, IRCallArgs, IRCallArgPos&: IRArgPos); |
5498 | assert(IRArgPos == FirstIRArg + NumIRArgs); |
5499 | break; |
5500 | } |
5501 | } |
5502 | } |
5503 | |
5504 | const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(CGF&: *this); |
5505 | llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); |
5506 | |
5507 | // If we're using inalloca, set up that argument. |
5508 | if (ArgMemory.isValid()) { |
5509 | llvm::Value *Arg = ArgMemory.getPointer(); |
5510 | assert(IRFunctionArgs.hasInallocaArg()); |
5511 | IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; |
5512 | } |
5513 | |
5514 | // 2. Prepare the function pointer. |
5515 | |
5516 | // If the callee is a bitcast of a non-variadic function to have a |
5517 | // variadic function pointer type, check to see if we can remove the |
5518 | // bitcast. This comes up with unprototyped functions. |
5519 | // |
5520 | // This makes the IR nicer, but more importantly it ensures that we |
5521 | // can inline the function at -O0 if it is marked always_inline. |
5522 | auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, |
5523 | llvm::Value *Ptr) -> llvm::Function * { |
5524 | if (!CalleeFT->isVarArg()) |
5525 | return nullptr; |
5526 | |
5527 | // Get underlying value if it's a bitcast |
5528 | if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Val: Ptr)) { |
5529 | if (CE->getOpcode() == llvm::Instruction::BitCast) |
5530 | Ptr = CE->getOperand(i_nocapture: 0); |
5531 | } |
5532 | |
5533 | llvm::Function *OrigFn = dyn_cast<llvm::Function>(Val: Ptr); |
5534 | if (!OrigFn) |
5535 | return nullptr; |
5536 | |
5537 | llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); |
5538 | |
5539 | // If the original type is variadic, or if any of the component types |
5540 | // disagree, we cannot remove the cast. |
5541 | if (OrigFT->isVarArg() || |
5542 | OrigFT->getNumParams() != CalleeFT->getNumParams() || |
5543 | OrigFT->getReturnType() != CalleeFT->getReturnType()) |
5544 | return nullptr; |
5545 | |
5546 | for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) |
5547 | if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) |
5548 | return nullptr; |
5549 | |
5550 | return OrigFn; |
5551 | }; |
5552 | |
5553 | if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { |
5554 | CalleePtr = OrigFn; |
5555 | IRFuncTy = OrigFn->getFunctionType(); |
5556 | } |
5557 | |
5558 | // 3. Perform the actual call. |
5559 | |
5560 | // Deactivate any cleanups that we're supposed to do immediately before |
5561 | // the call. |
5562 | if (!CallArgs.getCleanupsToDeactivate().empty()) |
5563 | deactivateArgCleanupsBeforeCall(CGF&: *this, CallArgs); |
5564 | |
5565 | // Assert that the arguments we computed match up. The IR verifier |
5566 | // will catch this, but this is a common enough source of problems |
5567 | // during IRGen changes that it's way better for debugging to catch |
5568 | // it ourselves here. |
5569 | #ifndef NDEBUG |
5570 | assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); |
5571 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) { |
5572 | // Inalloca argument can have different type. |
5573 | if (IRFunctionArgs.hasInallocaArg() && |
5574 | i == IRFunctionArgs.getInallocaArgNo()) |
5575 | continue; |
5576 | if (i < IRFuncTy->getNumParams()) |
5577 | assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); |
5578 | } |
5579 | #endif |
5580 | |
5581 | // Update the largest vector width if any arguments have vector types. |
5582 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) |
5583 | LargestVectorWidth = std::max(a: LargestVectorWidth, |
5584 | b: getMaxVectorWidth(Ty: IRCallArgs[i]->getType())); |
5585 | |
5586 | // Compute the calling convention and attributes. |
5587 | unsigned CallingConv; |
5588 | llvm::AttributeList Attrs; |
5589 | CGM.ConstructAttributeList(Name: CalleePtr->getName(), FI: CallInfo, |
5590 | CalleeInfo: Callee.getAbstractInfo(), AttrList&: Attrs, CallingConv, |
5591 | /*AttrOnCallSite=*/true, |
5592 | /*IsThunk=*/false); |
5593 | |
5594 | if (CallingConv == llvm::CallingConv::X86_VectorCall && |
5595 | getTarget().getTriple().isWindowsArm64EC()) { |
5596 | CGM.Error(loc: Loc, error: "__vectorcall calling convention is not currently " |
5597 | "supported" ); |
5598 | } |
5599 | |
5600 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurFuncDecl)) { |
5601 | if (FD->hasAttr<StrictFPAttr>()) |
5602 | // All calls within a strictfp function are marked strictfp |
5603 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); |
5604 | |
5605 | // If -ffast-math is enabled and the function is guarded by an |
5606 | // '__attribute__((optnone)) adjust the memory attribute so the BE emits the |
5607 | // library call instead of the intrinsic. |
5608 | if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath) |
5609 | CGM.AdjustMemoryAttribute(Name: CalleePtr->getName(), CalleeInfo: Callee.getAbstractInfo(), |
5610 | Attrs); |
5611 | } |
5612 | // Add call-site nomerge attribute if exists. |
5613 | if (InNoMergeAttributedStmt) |
5614 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge); |
5615 | |
5616 | // Add call-site noinline attribute if exists. |
5617 | if (InNoInlineAttributedStmt) |
5618 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); |
5619 | |
5620 | // Add call-site always_inline attribute if exists. |
5621 | if (InAlwaysInlineAttributedStmt) |
5622 | Attrs = |
5623 | Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); |
5624 | |
5625 | // Apply some call-site-specific attributes. |
5626 | // TODO: work this into building the attribute set. |
5627 | |
5628 | // Apply always_inline to all calls within flatten functions. |
5629 | // FIXME: should this really take priority over __try, below? |
5630 | if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && |
5631 | !InNoInlineAttributedStmt && |
5632 | !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { |
5633 | Attrs = |
5634 | Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); |
5635 | } |
5636 | |
5637 | // Disable inlining inside SEH __try blocks. |
5638 | if (isSEHTryScope()) { |
5639 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); |
5640 | } |
5641 | |
5642 | // Decide whether to use a call or an invoke. |
5643 | bool CannotThrow; |
5644 | if (currentFunctionUsesSEHTry()) { |
5645 | // SEH cares about asynchronous exceptions, so everything can "throw." |
5646 | CannotThrow = false; |
5647 | } else if (isCleanupPadScope() && |
5648 | EHPersonality::get(CGF&: *this).isMSVCXXPersonality()) { |
5649 | // The MSVC++ personality will implicitly terminate the program if an |
5650 | // exception is thrown during a cleanup outside of a try/catch. |
5651 | // We don't need to model anything in IR to get this behavior. |
5652 | CannotThrow = true; |
5653 | } else { |
5654 | // Otherwise, nounwind call sites will never throw. |
5655 | CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind); |
5656 | |
5657 | if (auto *FPtr = dyn_cast<llvm::Function>(Val: CalleePtr)) |
5658 | if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) |
5659 | CannotThrow = true; |
5660 | } |
5661 | |
5662 | // If we made a temporary, be sure to clean up after ourselves. Note that we |
5663 | // can't depend on being inside of an ExprWithCleanups, so we need to manually |
5664 | // pop this cleanup later on. Being eager about this is OK, since this |
5665 | // temporary is 'invisible' outside of the callee. |
5666 | if (UnusedReturnSizePtr) |
5667 | pushFullExprCleanup<CallLifetimeEnd>(kind: NormalEHLifetimeMarker, A: SRetAlloca, |
5668 | A: UnusedReturnSizePtr); |
5669 | |
5670 | llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); |
5671 | |
5672 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
5673 | getBundlesForFunclet(Callee: CalleePtr); |
5674 | |
5675 | if (SanOpts.has(K: SanitizerKind::KCFI) && |
5676 | !isa_and_nonnull<FunctionDecl>(Val: TargetDecl)) |
5677 | EmitKCFIOperandBundle(Callee: ConcreteCallee, Bundles&: BundleList); |
5678 | |
5679 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) |
5680 | if (FD->hasAttr<StrictFPAttr>()) |
5681 | // All calls within a strictfp function are marked strictfp |
5682 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); |
5683 | |
5684 | AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); |
5685 | Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); |
5686 | |
5687 | AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); |
5688 | Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); |
5689 | |
5690 | // Emit the actual call/invoke instruction. |
5691 | llvm::CallBase *CI; |
5692 | if (!InvokeDest) { |
5693 | CI = Builder.CreateCall(FTy: IRFuncTy, Callee: CalleePtr, Args: IRCallArgs, OpBundles: BundleList); |
5694 | } else { |
5695 | llvm::BasicBlock *Cont = createBasicBlock(name: "invoke.cont" ); |
5696 | CI = Builder.CreateInvoke(Ty: IRFuncTy, Callee: CalleePtr, NormalDest: Cont, UnwindDest: InvokeDest, Args: IRCallArgs, |
5697 | OpBundles: BundleList); |
5698 | EmitBlock(BB: Cont); |
5699 | } |
5700 | if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() && |
5701 | CI->getCalledFunction()->getName().starts_with(Prefix: "_Z4sqrt" )) { |
5702 | SetSqrtFPAccuracy(CI); |
5703 | } |
5704 | if (callOrInvoke) |
5705 | *callOrInvoke = CI; |
5706 | |
5707 | // If this is within a function that has the guard(nocf) attribute and is an |
5708 | // indirect call, add the "guard_nocf" attribute to this call to indicate that |
5709 | // Control Flow Guard checks should not be added, even if the call is inlined. |
5710 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: CurFuncDecl)) { |
5711 | if (const auto *A = FD->getAttr<CFGuardAttr>()) { |
5712 | if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) |
5713 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Kind: "guard_nocf" ); |
5714 | } |
5715 | } |
5716 | |
5717 | // Apply the attributes and calling convention. |
5718 | CI->setAttributes(Attrs); |
5719 | CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); |
5720 | |
5721 | // Apply various metadata. |
5722 | |
5723 | if (!CI->getType()->isVoidTy()) |
5724 | CI->setName("call" ); |
5725 | |
5726 | if (getTarget().getTriple().isSPIRVLogical() && CI->isConvergent()) |
5727 | CI = addControlledConvergenceToken(Input: CI); |
5728 | |
5729 | // Update largest vector width from the return type. |
5730 | LargestVectorWidth = |
5731 | std::max(a: LargestVectorWidth, b: getMaxVectorWidth(Ty: CI->getType())); |
5732 | |
5733 | // Insert instrumentation or attach profile metadata at indirect call sites. |
5734 | // For more details, see the comment before the definition of |
5735 | // IPVK_IndirectCallTarget in InstrProfData.inc. |
5736 | if (!CI->getCalledFunction()) |
5737 | PGO.valueProfile(Builder, ValueKind: llvm::IPVK_IndirectCallTarget, |
5738 | ValueSite: CI, ValuePtr: CalleePtr); |
5739 | |
5740 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
5741 | // optimizer it can aggressively ignore unwind edges. |
5742 | if (CGM.getLangOpts().ObjCAutoRefCount) |
5743 | AddObjCARCExceptionMetadata(Inst: CI); |
5744 | |
5745 | // Set tail call kind if necessary. |
5746 | if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(Val: CI)) { |
5747 | if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) |
5748 | Call->setTailCallKind(llvm::CallInst::TCK_NoTail); |
5749 | else if (IsMustTail) |
5750 | Call->setTailCallKind(llvm::CallInst::TCK_MustTail); |
5751 | } |
5752 | |
5753 | // Add metadata for calls to MSAllocator functions |
5754 | if (getDebugInfo() && TargetDecl && |
5755 | TargetDecl->hasAttr<MSAllocatorAttr>()) |
5756 | getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: RetTy->getPointeeType(), Loc); |
5757 | |
5758 | // Add metadata if calling an __attribute__((error(""))) or warning fn. |
5759 | if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { |
5760 | llvm::ConstantInt *Line = |
5761 | llvm::ConstantInt::get(Ty: Int32Ty, V: Loc.getRawEncoding()); |
5762 | llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(C: Line); |
5763 | llvm::MDTuple *MDT = llvm::MDNode::get(Context&: getLLVMContext(), MDs: {MD}); |
5764 | CI->setMetadata(Kind: "srcloc" , Node: MDT); |
5765 | } |
5766 | |
5767 | // 4. Finish the call. |
5768 | |
5769 | // If the call doesn't return, finish the basic block and clear the |
5770 | // insertion point; this allows the rest of IRGen to discard |
5771 | // unreachable code. |
5772 | if (CI->doesNotReturn()) { |
5773 | if (UnusedReturnSizePtr) |
5774 | PopCleanupBlock(); |
5775 | |
5776 | // Strip away the noreturn attribute to better diagnose unreachable UB. |
5777 | if (SanOpts.has(K: SanitizerKind::Unreachable)) { |
5778 | // Also remove from function since CallBase::hasFnAttr additionally checks |
5779 | // attributes of the called function. |
5780 | if (auto *F = CI->getCalledFunction()) |
5781 | F->removeFnAttr(llvm::Attribute::NoReturn); |
5782 | CI->removeFnAttr(llvm::Attribute::NoReturn); |
5783 | |
5784 | // Avoid incompatibility with ASan which relies on the `noreturn` |
5785 | // attribute to insert handler calls. |
5786 | if (SanOpts.hasOneOf(K: SanitizerKind::Address | |
5787 | SanitizerKind::KernelAddress)) { |
5788 | SanitizerScope SanScope(this); |
5789 | llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); |
5790 | Builder.SetInsertPoint(CI); |
5791 | auto *FnType = llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false); |
5792 | llvm::FunctionCallee Fn = |
5793 | CGM.CreateRuntimeFunction(Ty: FnType, Name: "__asan_handle_no_return" ); |
5794 | EmitNounwindRuntimeCall(callee: Fn); |
5795 | } |
5796 | } |
5797 | |
5798 | EmitUnreachable(Loc); |
5799 | Builder.ClearInsertionPoint(); |
5800 | |
5801 | // FIXME: For now, emit a dummy basic block because expr emitters in |
5802 | // generally are not ready to handle emitting expressions at unreachable |
5803 | // points. |
5804 | EnsureInsertPoint(); |
5805 | |
5806 | // Return a reasonable RValue. |
5807 | return GetUndefRValue(Ty: RetTy); |
5808 | } |
5809 | |
5810 | // If this is a musttail call, return immediately. We do not branch to the |
5811 | // epilogue in this case. |
5812 | if (IsMustTail) { |
5813 | for (auto it = EHStack.find(sp: CurrentCleanupScopeDepth); it != EHStack.end(); |
5814 | ++it) { |
5815 | EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(Val: &*it); |
5816 | if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) |
5817 | CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups" ); |
5818 | } |
5819 | if (CI->getType()->isVoidTy()) |
5820 | Builder.CreateRetVoid(); |
5821 | else |
5822 | Builder.CreateRet(V: CI); |
5823 | Builder.ClearInsertionPoint(); |
5824 | EnsureInsertPoint(); |
5825 | return GetUndefRValue(Ty: RetTy); |
5826 | } |
5827 | |
5828 | // Perform the swifterror writeback. |
5829 | if (swiftErrorTemp.isValid()) { |
5830 | llvm::Value *errorResult = Builder.CreateLoad(Addr: swiftErrorTemp); |
5831 | Builder.CreateStore(Val: errorResult, Addr: swiftErrorArg); |
5832 | } |
5833 | |
5834 | // Emit any call-associated writebacks immediately. Arguably this |
5835 | // should happen after any return-value munging. |
5836 | if (CallArgs.hasWritebacks()) |
5837 | emitWritebacks(CGF&: *this, args: CallArgs); |
5838 | |
5839 | // The stack cleanup for inalloca arguments has to run out of the normal |
5840 | // lexical order, so deactivate it and run it manually here. |
5841 | CallArgs.freeArgumentMemory(CGF&: *this); |
5842 | |
5843 | // Extract the return value. |
5844 | RValue Ret = [&] { |
5845 | switch (RetAI.getKind()) { |
5846 | case ABIArgInfo::CoerceAndExpand: { |
5847 | auto coercionType = RetAI.getCoerceAndExpandType(); |
5848 | |
5849 | Address addr = SRetPtr.withElementType(ElemTy: coercionType); |
5850 | |
5851 | assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); |
5852 | bool = isa<llvm::StructType>(Val: CI->getType()); |
5853 | |
5854 | unsigned unpaddedIndex = 0; |
5855 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
5856 | llvm::Type *eltType = coercionType->getElementType(N: i); |
5857 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; |
5858 | Address eltAddr = Builder.CreateStructGEP(Addr: addr, Index: i); |
5859 | llvm::Value *elt = CI; |
5860 | if (requiresExtract) |
5861 | elt = Builder.CreateExtractValue(Agg: elt, Idxs: unpaddedIndex++); |
5862 | else |
5863 | assert(unpaddedIndex == 0); |
5864 | Builder.CreateStore(Val: elt, Addr: eltAddr); |
5865 | } |
5866 | [[fallthrough]]; |
5867 | } |
5868 | |
5869 | case ABIArgInfo::InAlloca: |
5870 | case ABIArgInfo::Indirect: { |
5871 | RValue ret = convertTempToRValue(addr: SRetPtr, type: RetTy, Loc: SourceLocation()); |
5872 | if (UnusedReturnSizePtr) |
5873 | PopCleanupBlock(); |
5874 | return ret; |
5875 | } |
5876 | |
5877 | case ABIArgInfo::Ignore: |
5878 | // If we are ignoring an argument that had a result, make sure to |
5879 | // construct the appropriate return value for our caller. |
5880 | return GetUndefRValue(Ty: RetTy); |
5881 | |
5882 | case ABIArgInfo::Extend: |
5883 | case ABIArgInfo::Direct: { |
5884 | llvm::Type *RetIRTy = ConvertType(T: RetTy); |
5885 | if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { |
5886 | switch (getEvaluationKind(T: RetTy)) { |
5887 | case TEK_Complex: { |
5888 | llvm::Value *Real = Builder.CreateExtractValue(Agg: CI, Idxs: 0); |
5889 | llvm::Value *Imag = Builder.CreateExtractValue(Agg: CI, Idxs: 1); |
5890 | return RValue::getComplex(C: std::make_pair(x&: Real, y&: Imag)); |
5891 | } |
5892 | case TEK_Aggregate: { |
5893 | Address DestPtr = ReturnValue.getAddress(); |
5894 | bool DestIsVolatile = ReturnValue.isVolatile(); |
5895 | |
5896 | if (!DestPtr.isValid()) { |
5897 | DestPtr = CreateMemTemp(T: RetTy, Name: "agg.tmp" ); |
5898 | DestIsVolatile = false; |
5899 | } |
5900 | EmitAggregateStore(Val: CI, Dest: DestPtr, DestIsVolatile); |
5901 | return RValue::getAggregate(addr: DestPtr); |
5902 | } |
5903 | case TEK_Scalar: { |
5904 | // If the argument doesn't match, perform a bitcast to coerce it. This |
5905 | // can happen due to trivial type mismatches. |
5906 | llvm::Value *V = CI; |
5907 | if (V->getType() != RetIRTy) |
5908 | V = Builder.CreateBitCast(V, DestTy: RetIRTy); |
5909 | return RValue::get(V); |
5910 | } |
5911 | } |
5912 | llvm_unreachable("bad evaluation kind" ); |
5913 | } |
5914 | |
5915 | // If coercing a fixed vector from a scalable vector for ABI |
5916 | // compatibility, and the types match, use the llvm.vector.extract |
5917 | // intrinsic to perform the conversion. |
5918 | if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(Val: RetIRTy)) { |
5919 | llvm::Value *V = CI; |
5920 | if (auto *ScalableSrcTy = |
5921 | dyn_cast<llvm::ScalableVectorType>(Val: V->getType())) { |
5922 | if (FixedDstTy->getElementType() == ScalableSrcTy->getElementType()) { |
5923 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
5924 | V = Builder.CreateExtractVector(DstType: FixedDstTy, SrcVec: V, Idx: Zero, Name: "cast.fixed" ); |
5925 | return RValue::get(V); |
5926 | } |
5927 | } |
5928 | } |
5929 | |
5930 | Address DestPtr = ReturnValue.getValue(); |
5931 | bool DestIsVolatile = ReturnValue.isVolatile(); |
5932 | |
5933 | if (!DestPtr.isValid()) { |
5934 | DestPtr = CreateMemTemp(T: RetTy, Name: "coerce" ); |
5935 | DestIsVolatile = false; |
5936 | } |
5937 | |
5938 | // An empty record can overlap other data (if declared with |
5939 | // no_unique_address); omit the store for such types - as there is no |
5940 | // actual data to store. |
5941 | if (!isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: true)) { |
5942 | // If the value is offset in memory, apply the offset now. |
5943 | Address StorePtr = emitAddressAtOffset(CGF&: *this, addr: DestPtr, info: RetAI); |
5944 | CreateCoercedStore(Src: CI, Dst: StorePtr, DstIsVolatile: DestIsVolatile, CGF&: *this); |
5945 | } |
5946 | |
5947 | return convertTempToRValue(addr: DestPtr, type: RetTy, Loc: SourceLocation()); |
5948 | } |
5949 | |
5950 | case ABIArgInfo::Expand: |
5951 | case ABIArgInfo::IndirectAliased: |
5952 | llvm_unreachable("Invalid ABI kind for return argument" ); |
5953 | } |
5954 | |
5955 | llvm_unreachable("Unhandled ABIArgInfo::Kind" ); |
5956 | } (); |
5957 | |
5958 | // Emit the assume_aligned check on the return value. |
5959 | if (Ret.isScalar() && TargetDecl) { |
5960 | AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); |
5961 | AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); |
5962 | } |
5963 | |
5964 | // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though |
5965 | // we can't use the full cleanup mechanism. |
5966 | for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) |
5967 | LifetimeEnd.Emit(CGF&: *this, /*Flags=*/flags: {}); |
5968 | |
5969 | if (!ReturnValue.isExternallyDestructed() && |
5970 | RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) |
5971 | pushDestroy(dtorKind: QualType::DK_nontrivial_c_struct, addr: Ret.getAggregateAddress(), |
5972 | type: RetTy); |
5973 | |
5974 | return Ret; |
5975 | } |
5976 | |
5977 | CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { |
5978 | if (isVirtual()) { |
5979 | const CallExpr *CE = getVirtualCallExpr(); |
5980 | return CGF.CGM.getCXXABI().getVirtualFunctionPointer( |
5981 | CGF, GD: getVirtualMethodDecl(), This: getThisAddress(), Ty: getVirtualFunctionType(), |
5982 | Loc: CE ? CE->getBeginLoc() : SourceLocation()); |
5983 | } |
5984 | |
5985 | return *this; |
5986 | } |
5987 | |
5988 | /* VarArg handling */ |
5989 | |
5990 | Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { |
5991 | VAListAddr = VE->isMicrosoftABI() |
5992 | ? EmitMSVAListRef(E: VE->getSubExpr()) |
5993 | : EmitVAListRef(E: VE->getSubExpr()); |
5994 | QualType Ty = VE->getType(); |
5995 | if (VE->isMicrosoftABI()) |
5996 | return CGM.getTypes().getABIInfo().EmitMSVAArg(CGF&: *this, VAListAddr, Ty); |
5997 | return CGM.getTypes().getABIInfo().EmitVAArg(CGF&: *this, VAListAddr, Ty); |
5998 | } |
5999 | |