1//===- CoroInternal.h - Internal Coroutine interfaces ---------*- C++ -*---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// Common definitions/declarations used internally by coroutine lowering passes.
9//===----------------------------------------------------------------------===//
10
11#ifndef LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H
12#define LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H
13
14#include "CoroInstr.h"
15#include "llvm/Analysis/TargetTransformInfo.h"
16#include "llvm/IR/IRBuilder.h"
17
18namespace llvm {
19
20class CallGraph;
21
22namespace coro {
23
24bool declaresAnyIntrinsic(const Module &M);
25bool declaresIntrinsics(const Module &M,
26 const std::initializer_list<StringRef>);
27void replaceCoroFree(CoroIdInst *CoroId, bool Elide);
28
29/// Attempts to rewrite the location operand of debug intrinsics in terms of
30/// the coroutine frame pointer, folding pointer offsets into the DIExpression
31/// of the intrinsic.
32/// If the frame pointer is an Argument, store it into an alloca if
33/// OptimizeFrame is false.
34void salvageDebugInfo(
35 SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
36 DbgVariableIntrinsic &DVI, bool OptimizeFrame, bool IsEntryPoint);
37void salvageDebugInfo(
38 SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
39 DbgVariableRecord &DVR, bool OptimizeFrame, bool UseEntryValue);
40
41// Keeps data and helper functions for lowering coroutine intrinsics.
42struct LowererBase {
43 Module &TheModule;
44 LLVMContext &Context;
45 PointerType *const Int8Ptr;
46 FunctionType *const ResumeFnType;
47 ConstantPointerNull *const NullPtr;
48
49 LowererBase(Module &M);
50 Value *makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt);
51};
52
53enum class ABI {
54 /// The "resume-switch" lowering, where there are separate resume and
55 /// destroy functions that are shared between all suspend points. The
56 /// coroutine frame implicitly stores the resume and destroy functions,
57 /// the current index, and any promise value.
58 Switch,
59
60 /// The "returned-continuation" lowering, where each suspend point creates a
61 /// single continuation function that is used for both resuming and
62 /// destroying. Does not support promises.
63 Retcon,
64
65 /// The "unique returned-continuation" lowering, where each suspend point
66 /// creates a single continuation function that is used for both resuming
67 /// and destroying. Does not support promises. The function is known to
68 /// suspend at most once during its execution, and the return value of
69 /// the continuation is void.
70 RetconOnce,
71
72 /// The "async continuation" lowering, where each suspend point creates a
73 /// single continuation function. The continuation function is available as an
74 /// intrinsic.
75 Async,
76};
77
78// Holds structural Coroutine Intrinsics for a particular function and other
79// values used during CoroSplit pass.
80struct LLVM_LIBRARY_VISIBILITY Shape {
81 CoroBeginInst *CoroBegin;
82 SmallVector<AnyCoroEndInst *, 4> CoroEnds;
83 SmallVector<CoroSizeInst *, 2> CoroSizes;
84 SmallVector<CoroAlignInst *, 2> CoroAligns;
85 SmallVector<AnyCoroSuspendInst *, 4> CoroSuspends;
86 SmallVector<CallInst*, 2> SwiftErrorOps;
87 SmallVector<CoroAwaitSuspendInst *, 4> CoroAwaitSuspends;
88
89 // Field indexes for special fields in the switch lowering.
90 struct SwitchFieldIndex {
91 enum {
92 Resume,
93 Destroy
94
95 // The promise field is always at a fixed offset from the start of
96 // frame given its type, but the index isn't a constant for all
97 // possible frames.
98
99 // The switch-index field isn't at a fixed offset or index, either;
100 // we just work it in where it fits best.
101 };
102 };
103
104 coro::ABI ABI;
105
106 StructType *FrameTy;
107 Align FrameAlign;
108 uint64_t FrameSize;
109 Value *FramePtr;
110 BasicBlock *AllocaSpillBlock;
111
112 /// This would only be true if optimization are enabled.
113 bool OptimizeFrame;
114
115 struct SwitchLoweringStorage {
116 SwitchInst *ResumeSwitch;
117 AllocaInst *PromiseAlloca;
118 BasicBlock *ResumeEntryBlock;
119 unsigned IndexField;
120 unsigned IndexAlign;
121 unsigned IndexOffset;
122 bool HasFinalSuspend;
123 bool HasUnwindCoroEnd;
124 };
125
126 struct RetconLoweringStorage {
127 Function *ResumePrototype;
128 Function *Alloc;
129 Function *Dealloc;
130 BasicBlock *ReturnBlock;
131 bool IsFrameInlineInStorage;
132 };
133
134 struct AsyncLoweringStorage {
135 Value *Context;
136 CallingConv::ID AsyncCC;
137 unsigned ContextArgNo;
138 uint64_t ContextHeaderSize;
139 uint64_t ContextAlignment;
140 uint64_t FrameOffset; // Start of the frame.
141 uint64_t ContextSize; // Includes frame size.
142 GlobalVariable *AsyncFuncPointer;
143
144 Align getContextAlignment() const { return Align(ContextAlignment); }
145 };
146
147 union {
148 SwitchLoweringStorage SwitchLowering;
149 RetconLoweringStorage RetconLowering;
150 AsyncLoweringStorage AsyncLowering;
151 };
152
153 CoroIdInst *getSwitchCoroId() const {
154 assert(ABI == coro::ABI::Switch);
155 return cast<CoroIdInst>(Val: CoroBegin->getId());
156 }
157
158 AnyCoroIdRetconInst *getRetconCoroId() const {
159 assert(ABI == coro::ABI::Retcon ||
160 ABI == coro::ABI::RetconOnce);
161 return cast<AnyCoroIdRetconInst>(Val: CoroBegin->getId());
162 }
163
164 CoroIdAsyncInst *getAsyncCoroId() const {
165 assert(ABI == coro::ABI::Async);
166 return cast<CoroIdAsyncInst>(Val: CoroBegin->getId());
167 }
168
169 unsigned getSwitchIndexField() const {
170 assert(ABI == coro::ABI::Switch);
171 assert(FrameTy && "frame type not assigned");
172 return SwitchLowering.IndexField;
173 }
174 IntegerType *getIndexType() const {
175 assert(ABI == coro::ABI::Switch);
176 assert(FrameTy && "frame type not assigned");
177 return cast<IntegerType>(Val: FrameTy->getElementType(N: getSwitchIndexField()));
178 }
179 ConstantInt *getIndex(uint64_t Value) const {
180 return ConstantInt::get(Ty: getIndexType(), V: Value);
181 }
182
183 PointerType *getSwitchResumePointerType() const {
184 assert(ABI == coro::ABI::Switch);
185 assert(FrameTy && "frame type not assigned");
186 return cast<PointerType>(Val: FrameTy->getElementType(N: SwitchFieldIndex::Resume));
187 }
188
189 FunctionType *getResumeFunctionType() const {
190 switch (ABI) {
191 case coro::ABI::Switch:
192 return FunctionType::get(Result: Type::getVoidTy(C&: FrameTy->getContext()),
193 Params: PointerType::getUnqual(C&: FrameTy->getContext()),
194 /*IsVarArg=*/isVarArg: false);
195 case coro::ABI::Retcon:
196 case coro::ABI::RetconOnce:
197 return RetconLowering.ResumePrototype->getFunctionType();
198 case coro::ABI::Async:
199 // Not used. The function type depends on the active suspend.
200 return nullptr;
201 }
202
203 llvm_unreachable("Unknown coro::ABI enum");
204 }
205
206 ArrayRef<Type*> getRetconResultTypes() const {
207 assert(ABI == coro::ABI::Retcon ||
208 ABI == coro::ABI::RetconOnce);
209 auto FTy = CoroBegin->getFunction()->getFunctionType();
210
211 // The safety of all this is checked by checkWFRetconPrototype.
212 if (auto STy = dyn_cast<StructType>(Val: FTy->getReturnType())) {
213 return STy->elements().slice(N: 1);
214 } else {
215 return ArrayRef<Type*>();
216 }
217 }
218
219 ArrayRef<Type*> getRetconResumeTypes() const {
220 assert(ABI == coro::ABI::Retcon ||
221 ABI == coro::ABI::RetconOnce);
222
223 // The safety of all this is checked by checkWFRetconPrototype.
224 auto FTy = RetconLowering.ResumePrototype->getFunctionType();
225 return FTy->params().slice(N: 1);
226 }
227
228 CallingConv::ID getResumeFunctionCC() const {
229 switch (ABI) {
230 case coro::ABI::Switch:
231 return CallingConv::Fast;
232
233 case coro::ABI::Retcon:
234 case coro::ABI::RetconOnce:
235 return RetconLowering.ResumePrototype->getCallingConv();
236 case coro::ABI::Async:
237 return AsyncLowering.AsyncCC;
238 }
239 llvm_unreachable("Unknown coro::ABI enum");
240 }
241
242 AllocaInst *getPromiseAlloca() const {
243 if (ABI == coro::ABI::Switch)
244 return SwitchLowering.PromiseAlloca;
245 return nullptr;
246 }
247
248 BasicBlock::iterator getInsertPtAfterFramePtr() const {
249 if (auto *I = dyn_cast<Instruction>(Val: FramePtr)) {
250 BasicBlock::iterator It = std::next(x: I->getIterator());
251 It.setHeadBit(true); // Copy pre-RemoveDIs behaviour.
252 return It;
253 }
254 return cast<Argument>(Val: FramePtr)->getParent()->getEntryBlock().begin();
255 }
256
257 /// Allocate memory according to the rules of the active lowering.
258 ///
259 /// \param CG - if non-null, will be updated for the new call
260 Value *emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const;
261
262 /// Deallocate memory according to the rules of the active lowering.
263 ///
264 /// \param CG - if non-null, will be updated for the new call
265 void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const;
266
267 Shape() = default;
268 explicit Shape(Function &F, bool OptimizeFrame = false)
269 : OptimizeFrame(OptimizeFrame) {
270 buildFrom(F);
271 }
272 void buildFrom(Function &F);
273};
274
275bool defaultMaterializable(Instruction &V);
276void buildCoroutineFrame(
277 Function &F, Shape &Shape, TargetTransformInfo &TTI,
278 const std::function<bool(Instruction &)> &MaterializableCallback);
279CallInst *createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
280 TargetTransformInfo &TTI,
281 ArrayRef<Value *> Arguments, IRBuilder<> &);
282} // End namespace coro.
283} // End namespace llvm
284
285#endif
286

source code of llvm/lib/Transforms/Coroutines/CoroInternal.h