1 | //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // These classes wrap the information about a call or function |
10 | // definition used to handle ABI compliancy. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H |
15 | #define LLVM_CLANG_LIB_CODEGEN_CGCALL_H |
16 | |
17 | #include "CGValue.h" |
18 | #include "EHScopeStack.h" |
19 | #include "clang/AST/ASTFwd.h" |
20 | #include "clang/AST/CanonicalType.h" |
21 | #include "clang/AST/GlobalDecl.h" |
22 | #include "clang/AST/Type.h" |
23 | #include "llvm/ADT/STLForwardCompat.h" |
24 | #include "llvm/IR/Value.h" |
25 | |
26 | namespace llvm { |
27 | class Type; |
28 | class Value; |
29 | } // namespace llvm |
30 | |
31 | namespace clang { |
32 | class Decl; |
33 | class FunctionDecl; |
34 | class TargetOptions; |
35 | class VarDecl; |
36 | |
37 | namespace CodeGen { |
38 | |
39 | /// Abstract information about a function or function prototype. |
40 | class CGCalleeInfo { |
41 | /// The function prototype of the callee. |
42 | const FunctionProtoType *CalleeProtoTy; |
43 | /// The function declaration of the callee. |
44 | GlobalDecl CalleeDecl; |
45 | |
46 | public: |
47 | explicit CGCalleeInfo() : CalleeProtoTy(nullptr) {} |
48 | CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl) |
49 | : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {} |
50 | CGCalleeInfo(const FunctionProtoType *calleeProtoTy) |
51 | : CalleeProtoTy(calleeProtoTy) {} |
52 | CGCalleeInfo(GlobalDecl calleeDecl) |
53 | : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {} |
54 | |
55 | const FunctionProtoType *getCalleeFunctionProtoType() const { |
56 | return CalleeProtoTy; |
57 | } |
58 | const GlobalDecl getCalleeDecl() const { return CalleeDecl; } |
59 | }; |
60 | |
61 | /// All available information about a concrete callee. |
62 | class CGCallee { |
63 | enum class SpecialKind : uintptr_t { |
64 | Invalid, |
65 | Builtin, |
66 | PseudoDestructor, |
67 | Virtual, |
68 | |
69 | Last = Virtual |
70 | }; |
71 | |
72 | struct BuiltinInfoStorage { |
73 | const FunctionDecl *Decl; |
74 | unsigned ID; |
75 | }; |
76 | struct PseudoDestructorInfoStorage { |
77 | const CXXPseudoDestructorExpr *Expr; |
78 | }; |
79 | struct VirtualInfoStorage { |
80 | const CallExpr *CE; |
81 | GlobalDecl MD; |
82 | Address Addr; |
83 | llvm::FunctionType *FTy; |
84 | }; |
85 | |
86 | SpecialKind KindOrFunctionPointer; |
87 | union { |
88 | CGCalleeInfo AbstractInfo; |
89 | BuiltinInfoStorage BuiltinInfo; |
90 | PseudoDestructorInfoStorage PseudoDestructorInfo; |
91 | VirtualInfoStorage VirtualInfo; |
92 | }; |
93 | |
94 | explicit CGCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {} |
95 | |
96 | CGCallee(const FunctionDecl *builtinDecl, unsigned builtinID) |
97 | : KindOrFunctionPointer(SpecialKind::Builtin) { |
98 | BuiltinInfo.Decl = builtinDecl; |
99 | BuiltinInfo.ID = builtinID; |
100 | } |
101 | |
102 | public: |
103 | CGCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {} |
104 | |
105 | /// Construct a callee. Call this constructor directly when this |
106 | /// isn't a direct call. |
107 | CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr) |
108 | : KindOrFunctionPointer( |
109 | SpecialKind(reinterpret_cast<uintptr_t>(functionPtr))) { |
110 | AbstractInfo = abstractInfo; |
111 | assert(functionPtr && "configuring callee without function pointer" ); |
112 | assert(functionPtr->getType()->isPointerTy()); |
113 | } |
114 | |
115 | static CGCallee forBuiltin(unsigned builtinID, |
116 | const FunctionDecl *builtinDecl) { |
117 | CGCallee result(SpecialKind::Builtin); |
118 | result.BuiltinInfo.Decl = builtinDecl; |
119 | result.BuiltinInfo.ID = builtinID; |
120 | return result; |
121 | } |
122 | |
123 | static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E) { |
124 | CGCallee result(SpecialKind::PseudoDestructor); |
125 | result.PseudoDestructorInfo.Expr = E; |
126 | return result; |
127 | } |
128 | |
129 | static CGCallee forDirect(llvm::Constant *functionPtr, |
130 | const CGCalleeInfo &abstractInfo = CGCalleeInfo()) { |
131 | return CGCallee(abstractInfo, functionPtr); |
132 | } |
133 | |
134 | static CGCallee forDirect(llvm::FunctionCallee functionPtr, |
135 | const CGCalleeInfo &abstractInfo = CGCalleeInfo()) { |
136 | return CGCallee(abstractInfo, functionPtr.getCallee()); |
137 | } |
138 | |
139 | static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr, |
140 | llvm::FunctionType *FTy) { |
141 | CGCallee result(SpecialKind::Virtual); |
142 | result.VirtualInfo.CE = CE; |
143 | result.VirtualInfo.MD = MD; |
144 | result.VirtualInfo.Addr = Addr; |
145 | result.VirtualInfo.FTy = FTy; |
146 | return result; |
147 | } |
148 | |
149 | bool isBuiltin() const { |
150 | return KindOrFunctionPointer == SpecialKind::Builtin; |
151 | } |
152 | const FunctionDecl *getBuiltinDecl() const { |
153 | assert(isBuiltin()); |
154 | return BuiltinInfo.Decl; |
155 | } |
156 | unsigned getBuiltinID() const { |
157 | assert(isBuiltin()); |
158 | return BuiltinInfo.ID; |
159 | } |
160 | |
161 | bool isPseudoDestructor() const { |
162 | return KindOrFunctionPointer == SpecialKind::PseudoDestructor; |
163 | } |
164 | const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const { |
165 | assert(isPseudoDestructor()); |
166 | return PseudoDestructorInfo.Expr; |
167 | } |
168 | |
169 | bool isOrdinary() const { |
170 | return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last); |
171 | } |
172 | CGCalleeInfo getAbstractInfo() const { |
173 | if (isVirtual()) |
174 | return VirtualInfo.MD; |
175 | assert(isOrdinary()); |
176 | return AbstractInfo; |
177 | } |
178 | llvm::Value *getFunctionPointer() const { |
179 | assert(isOrdinary()); |
180 | return reinterpret_cast<llvm::Value *>(uintptr_t(KindOrFunctionPointer)); |
181 | } |
182 | void setFunctionPointer(llvm::Value *functionPtr) { |
183 | assert(isOrdinary()); |
184 | KindOrFunctionPointer = |
185 | SpecialKind(reinterpret_cast<uintptr_t>(functionPtr)); |
186 | } |
187 | |
188 | bool isVirtual() const { |
189 | return KindOrFunctionPointer == SpecialKind::Virtual; |
190 | } |
191 | const CallExpr *getVirtualCallExpr() const { |
192 | assert(isVirtual()); |
193 | return VirtualInfo.CE; |
194 | } |
195 | GlobalDecl getVirtualMethodDecl() const { |
196 | assert(isVirtual()); |
197 | return VirtualInfo.MD; |
198 | } |
199 | Address getThisAddress() const { |
200 | assert(isVirtual()); |
201 | return VirtualInfo.Addr; |
202 | } |
203 | llvm::FunctionType *getVirtualFunctionType() const { |
204 | assert(isVirtual()); |
205 | return VirtualInfo.FTy; |
206 | } |
207 | |
208 | /// If this is a delayed callee computation of some sort, prepare |
209 | /// a concrete callee. |
210 | CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const; |
211 | }; |
212 | |
213 | struct CallArg { |
214 | private: |
215 | union { |
216 | RValue RV; |
217 | LValue LV; /// The argument is semantically a load from this l-value. |
218 | }; |
219 | bool HasLV; |
220 | |
221 | /// A data-flow flag to make sure getRValue and/or copyInto are not |
222 | /// called twice for duplicated IR emission. |
223 | mutable bool IsUsed; |
224 | |
225 | public: |
226 | QualType Ty; |
227 | CallArg(RValue rv, QualType ty) |
228 | : RV(rv), HasLV(false), IsUsed(false), Ty(ty) {} |
229 | CallArg(LValue lv, QualType ty) |
230 | : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {} |
231 | bool hasLValue() const { return HasLV; } |
232 | QualType getType() const { return Ty; } |
233 | |
234 | /// \returns an independent RValue. If the CallArg contains an LValue, |
235 | /// a temporary copy is returned. |
236 | RValue getRValue(CodeGenFunction &CGF) const; |
237 | |
238 | LValue getKnownLValue() const { |
239 | assert(HasLV && !IsUsed); |
240 | return LV; |
241 | } |
242 | RValue getKnownRValue() const { |
243 | assert(!HasLV && !IsUsed); |
244 | return RV; |
245 | } |
246 | void setRValue(RValue _RV) { |
247 | assert(!HasLV); |
248 | RV = _RV; |
249 | } |
250 | |
251 | bool isAggregate() const { return HasLV || RV.isAggregate(); } |
252 | |
253 | void copyInto(CodeGenFunction &CGF, Address A) const; |
254 | }; |
255 | |
256 | /// CallArgList - Type for representing both the value and type of |
257 | /// arguments in a call. |
258 | class CallArgList : public SmallVector<CallArg, 8> { |
259 | public: |
260 | CallArgList() = default; |
261 | |
262 | struct Writeback { |
263 | /// The original argument. Note that the argument l-value |
264 | /// is potentially null. |
265 | LValue Source; |
266 | |
267 | /// The temporary alloca. |
268 | Address Temporary; |
269 | |
270 | /// A value to "use" after the writeback, or null. |
271 | llvm::Value *ToUse; |
272 | }; |
273 | |
274 | struct CallArgCleanup { |
275 | EHScopeStack::stable_iterator Cleanup; |
276 | |
277 | /// The "is active" insertion point. This instruction is temporary and |
278 | /// will be removed after insertion. |
279 | llvm::Instruction *IsActiveIP; |
280 | }; |
281 | |
282 | void add(RValue rvalue, QualType type) { push_back(Elt: CallArg(rvalue, type)); } |
283 | |
284 | void addUncopiedAggregate(LValue LV, QualType type) { |
285 | push_back(Elt: CallArg(LV, type)); |
286 | } |
287 | |
288 | /// Add all the arguments from another CallArgList to this one. After doing |
289 | /// this, the old CallArgList retains its list of arguments, but must not |
290 | /// be used to emit a call. |
291 | void addFrom(const CallArgList &other) { |
292 | insert(I: end(), From: other.begin(), To: other.end()); |
293 | Writebacks.insert(I: Writebacks.end(), From: other.Writebacks.begin(), |
294 | To: other.Writebacks.end()); |
295 | CleanupsToDeactivate.insert(I: CleanupsToDeactivate.end(), |
296 | From: other.CleanupsToDeactivate.begin(), |
297 | To: other.CleanupsToDeactivate.end()); |
298 | assert(!(StackBase && other.StackBase) && "can't merge stackbases" ); |
299 | if (!StackBase) |
300 | StackBase = other.StackBase; |
301 | } |
302 | |
303 | void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse) { |
304 | Writeback writeback = {srcLV, temporary, toUse}; |
305 | Writebacks.push_back(Elt: writeback); |
306 | } |
307 | |
308 | bool hasWritebacks() const { return !Writebacks.empty(); } |
309 | |
310 | typedef llvm::iterator_range<SmallVectorImpl<Writeback>::const_iterator> |
311 | writeback_const_range; |
312 | |
313 | writeback_const_range writebacks() const { |
314 | return writeback_const_range(Writebacks.begin(), Writebacks.end()); |
315 | } |
316 | |
317 | void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, |
318 | llvm::Instruction *IsActiveIP) { |
319 | CallArgCleanup ArgCleanup; |
320 | ArgCleanup.Cleanup = Cleanup; |
321 | ArgCleanup.IsActiveIP = IsActiveIP; |
322 | CleanupsToDeactivate.push_back(Elt: ArgCleanup); |
323 | } |
324 | |
325 | ArrayRef<CallArgCleanup> getCleanupsToDeactivate() const { |
326 | return CleanupsToDeactivate; |
327 | } |
328 | |
329 | void allocateArgumentMemory(CodeGenFunction &CGF); |
330 | llvm::Instruction *getStackBase() const { return StackBase; } |
331 | void freeArgumentMemory(CodeGenFunction &CGF) const; |
332 | |
333 | /// Returns if we're using an inalloca struct to pass arguments in |
334 | /// memory. |
335 | bool isUsingInAlloca() const { return StackBase; } |
336 | |
337 | private: |
338 | SmallVector<Writeback, 1> Writebacks; |
339 | |
340 | /// Deactivate these cleanups immediately before making the call. This |
341 | /// is used to cleanup objects that are owned by the callee once the call |
342 | /// occurs. |
343 | SmallVector<CallArgCleanup, 1> CleanupsToDeactivate; |
344 | |
345 | /// The stacksave call. It dominates all of the argument evaluation. |
346 | llvm::CallInst *StackBase = nullptr; |
347 | }; |
348 | |
349 | /// FunctionArgList - Type for representing both the decl and type |
350 | /// of parameters to a function. The decl must be either a |
351 | /// ParmVarDecl or ImplicitParamDecl. |
352 | class FunctionArgList : public SmallVector<const VarDecl *, 16> {}; |
353 | |
354 | /// ReturnValueSlot - Contains the address where the return value of a |
355 | /// function can be stored, and whether the address is volatile or not. |
356 | class ReturnValueSlot { |
357 | Address Addr = Address::invalid(); |
358 | |
359 | // Return value slot flags |
360 | LLVM_PREFERRED_TYPE(bool) |
361 | unsigned IsVolatile : 1; |
362 | LLVM_PREFERRED_TYPE(bool) |
363 | unsigned IsUnused : 1; |
364 | LLVM_PREFERRED_TYPE(bool) |
365 | unsigned IsExternallyDestructed : 1; |
366 | |
367 | public: |
368 | ReturnValueSlot() |
369 | : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {} |
370 | ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false, |
371 | bool IsExternallyDestructed = false) |
372 | : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused), |
373 | IsExternallyDestructed(IsExternallyDestructed) {} |
374 | |
375 | bool isNull() const { return !Addr.isValid(); } |
376 | bool isVolatile() const { return IsVolatile; } |
377 | Address getValue() const { return Addr; } |
378 | bool isUnused() const { return IsUnused; } |
379 | bool isExternallyDestructed() const { return IsExternallyDestructed; } |
380 | Address getAddress() const { return Addr; } |
381 | }; |
382 | |
383 | /// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as |
384 | /// though we had emitted it ourselves. We remove any attributes on F that |
385 | /// conflict with the attributes we add here. |
386 | /// |
387 | /// This is useful for adding attrs to bitcode modules that you want to link |
388 | /// with but don't control, such as CUDA's libdevice. When linking with such |
389 | /// a bitcode library, you might want to set e.g. its functions' |
390 | /// "unsafe-fp-math" attribute to match the attr of the functions you're |
391 | /// codegen'ing. Otherwise, LLVM will interpret the bitcode module's lack of |
392 | /// unsafe-fp-math attrs as tantamount to unsafe-fp-math=false, and then LLVM |
393 | /// will propagate unsafe-fp-math=false up to every transitive caller of a |
394 | /// function in the bitcode library! |
395 | /// |
396 | /// With the exception of fast-math attrs, this will only make the attributes |
397 | /// on the function more conservative. But it's unsafe to call this on a |
398 | /// function which relies on particular fast-math attributes for correctness. |
399 | /// It's up to you to ensure that this is safe. |
400 | void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, |
401 | const CodeGenOptions &CodeGenOpts, |
402 | const LangOptions &LangOpts, |
403 | const TargetOptions &TargetOpts, |
404 | bool WillInternalize); |
405 | |
406 | enum class FnInfoOpts { |
407 | None = 0, |
408 | IsInstanceMethod = 1 << 0, |
409 | IsChainCall = 1 << 1, |
410 | IsDelegateCall = 1 << 2, |
411 | }; |
412 | |
413 | inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) { |
414 | return static_cast<FnInfoOpts>(llvm::to_underlying(E: A) | |
415 | llvm::to_underlying(E: B)); |
416 | } |
417 | |
418 | inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) { |
419 | return static_cast<FnInfoOpts>(llvm::to_underlying(E: A) & |
420 | llvm::to_underlying(E: B)); |
421 | } |
422 | |
423 | inline FnInfoOpts operator|=(FnInfoOpts A, FnInfoOpts B) { |
424 | A = A | B; |
425 | return A; |
426 | } |
427 | |
428 | inline FnInfoOpts operator&=(FnInfoOpts A, FnInfoOpts B) { |
429 | A = A & B; |
430 | return A; |
431 | } |
432 | |
433 | } // end namespace CodeGen |
434 | } // end namespace clang |
435 | |
436 | #endif |
437 | |