1 | //===----------------------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Internal per-function state used for AST-to-ClangIR code gen |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H |
14 | #define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H |
15 | |
16 | #include "CIRGenBuilder.h" |
17 | #include "CIRGenCall.h" |
18 | #include "CIRGenModule.h" |
19 | #include "CIRGenTypeCache.h" |
20 | #include "CIRGenValue.h" |
21 | |
22 | #include "Address.h" |
23 | |
24 | #include "clang/AST/ASTContext.h" |
25 | #include "clang/AST/CharUnits.h" |
26 | #include "clang/AST/Decl.h" |
27 | #include "clang/AST/Stmt.h" |
28 | #include "clang/AST/Type.h" |
29 | #include "clang/CIR/Dialect/IR/CIRDialect.h" |
30 | #include "clang/CIR/MissingFeatures.h" |
31 | #include "clang/CIR/TypeEvaluationKind.h" |
32 | |
33 | namespace { |
34 | class ScalarExprEmitter; |
35 | } // namespace |
36 | |
37 | namespace mlir { |
38 | namespace acc { |
39 | class LoopOp; |
40 | } // namespace acc |
41 | } // namespace mlir |
42 | |
43 | namespace clang::CIRGen { |
44 | |
45 | class CIRGenFunction : public CIRGenTypeCache { |
46 | public: |
47 | CIRGenModule &cgm; |
48 | |
49 | private: |
50 | friend class ::ScalarExprEmitter; |
51 | /// The builder is a helper class to create IR inside a function. The |
52 | /// builder is stateful, in particular it keeps an "insertion point": this |
53 | /// is where the next operations will be introduced. |
54 | CIRGenBuilderTy &builder; |
55 | |
56 | public: |
57 | /// The GlobalDecl for the current function being compiled or the global |
58 | /// variable currently being initialized. |
59 | clang::GlobalDecl curGD; |
60 | |
61 | /// The compiler-generated variable that holds the return value. |
62 | std::optional<mlir::Value> fnRetAlloca; |
63 | |
64 | /// CXXThisDecl - When generating code for a C++ member function, |
65 | /// this will hold the implicit 'this' declaration. |
66 | ImplicitParamDecl *cxxabiThisDecl = nullptr; |
67 | mlir::Value cxxabiThisValue = nullptr; |
68 | mlir::Value cxxThisValue = nullptr; |
69 | |
70 | // Holds the Decl for the current outermost non-closure context |
71 | const clang::Decl *curFuncDecl = nullptr; |
72 | |
73 | /// The function for which code is currently being generated. |
74 | cir::FuncOp curFn; |
75 | |
76 | using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>; |
77 | /// This keeps track of the CIR allocas or globals for local C |
78 | /// declarations. |
79 | DeclMapTy localDeclMap; |
80 | |
81 | /// The type of the condition for the emitting switch statement. |
82 | llvm::SmallVector<mlir::Type, 2> condTypeStack; |
83 | |
84 | clang::ASTContext &getContext() const { return cgm.getASTContext(); } |
85 | |
86 | CIRGenBuilderTy &getBuilder() { return builder; } |
87 | |
88 | CIRGenModule &getCIRGenModule() { return cgm; } |
89 | const CIRGenModule &getCIRGenModule() const { return cgm; } |
90 | |
91 | mlir::Block *getCurFunctionEntryBlock() { return &curFn.getRegion().front(); } |
92 | |
93 | /// Sanitizers enabled for this function. |
94 | clang::SanitizerSet sanOpts; |
95 | |
96 | /// Whether or not a Microsoft-style asm block has been processed within |
97 | /// this fuction. These can potentially set the return value. |
98 | bool sawAsmBlock = false; |
99 | |
100 | mlir::Type convertTypeForMem(QualType t); |
101 | |
102 | mlir::Type convertType(clang::QualType t); |
103 | mlir::Type convertType(const TypeDecl *t) { |
104 | return convertType(getContext().getTypeDeclType(Decl: t)); |
105 | } |
106 | |
107 | /// Return the cir::TypeEvaluationKind of QualType \c type. |
108 | static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type); |
109 | |
110 | static bool hasScalarEvaluationKind(clang::QualType type) { |
111 | return getEvaluationKind(type) == cir::TEK_Scalar; |
112 | } |
113 | |
114 | static bool hasAggregateEvaluationKind(clang::QualType type) { |
115 | return getEvaluationKind(type) == cir::TEK_Aggregate; |
116 | } |
117 | |
118 | CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, |
119 | bool suppressNewContext = false); |
120 | ~CIRGenFunction(); |
121 | |
122 | CIRGenTypes &getTypes() const { return cgm.getTypes(); } |
123 | |
124 | const TargetInfo &getTarget() const { return cgm.getTarget(); } |
125 | mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); } |
126 | |
127 | // --------------------- |
128 | // Opaque value handling |
129 | // --------------------- |
130 | |
131 | /// Keeps track of the current set of opaque value expressions. |
132 | llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues; |
133 | llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues; |
134 | |
135 | public: |
136 | /// A non-RAII class containing all the information about a bound |
137 | /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for |
138 | /// this which makes individual mappings very simple; using this |
139 | /// class directly is useful when you have a variable number of |
140 | /// opaque values or don't want the RAII functionality for some |
141 | /// reason. |
142 | class OpaqueValueMappingData { |
143 | const OpaqueValueExpr *opaqueValue; |
144 | bool boundLValue; |
145 | |
146 | OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue) |
147 | : opaqueValue(ov), boundLValue(boundLValue) {} |
148 | |
149 | public: |
150 | OpaqueValueMappingData() : opaqueValue(nullptr) {} |
151 | |
152 | static bool shouldBindAsLValue(const Expr *expr) { |
153 | // gl-values should be bound as l-values for obvious reasons. |
154 | // Records should be bound as l-values because IR generation |
155 | // always keeps them in memory. Expressions of function type |
156 | // act exactly like l-values but are formally required to be |
157 | // r-values in C. |
158 | return expr->isGLValue() || expr->getType()->isFunctionType() || |
159 | hasAggregateEvaluationKind(type: expr->getType()); |
160 | } |
161 | |
162 | static OpaqueValueMappingData |
163 | bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) { |
164 | if (shouldBindAsLValue(ov)) |
165 | return bind(cgf, ov, lv: cgf.emitLValue(e)); |
166 | return bind(cgf, ov, rv: cgf.emitAnyExpr(e)); |
167 | } |
168 | |
169 | static OpaqueValueMappingData |
170 | bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) { |
171 | assert(shouldBindAsLValue(ov)); |
172 | cgf.opaqueLValues.insert(KV: std::make_pair(x&: ov, y: lv)); |
173 | return OpaqueValueMappingData(ov, true); |
174 | } |
175 | |
176 | static OpaqueValueMappingData |
177 | bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) { |
178 | assert(!shouldBindAsLValue(ov)); |
179 | cgf.opaqueRValues.insert(KV: std::make_pair(x&: ov, y: rv)); |
180 | |
181 | OpaqueValueMappingData data(ov, false); |
182 | |
183 | // Work around an extremely aggressive peephole optimization in |
184 | // EmitScalarConversion which assumes that all other uses of a |
185 | // value are extant. |
186 | assert(!cir::MissingFeatures::peepholeProtection() && "NYI" ); |
187 | return data; |
188 | } |
189 | |
190 | bool isValid() const { return opaqueValue != nullptr; } |
191 | void clear() { opaqueValue = nullptr; } |
192 | |
193 | void unbind(CIRGenFunction &cgf) { |
194 | assert(opaqueValue && "no data to unbind!" ); |
195 | |
196 | if (boundLValue) { |
197 | cgf.opaqueLValues.erase(Val: opaqueValue); |
198 | } else { |
199 | cgf.opaqueRValues.erase(Val: opaqueValue); |
200 | assert(!cir::MissingFeatures::peepholeProtection() && "NYI" ); |
201 | } |
202 | } |
203 | }; |
204 | |
205 | /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr. |
206 | class OpaqueValueMapping { |
207 | CIRGenFunction &cgf; |
208 | OpaqueValueMappingData data; |
209 | |
210 | public: |
211 | static bool shouldBindAsLValue(const Expr *expr) { |
212 | return OpaqueValueMappingData::shouldBindAsLValue(expr); |
213 | } |
214 | |
215 | /// Build the opaque value mapping for the given conditional |
216 | /// operator if it's the GNU ?: extension. This is a common |
217 | /// enough pattern that the convenience operator is really |
218 | /// helpful. |
219 | /// |
220 | OpaqueValueMapping(CIRGenFunction &cgf, |
221 | const AbstractConditionalOperator *op) |
222 | : cgf(cgf) { |
223 | if (mlir::isa<ConditionalOperator>(op)) |
224 | // Leave Data empty. |
225 | return; |
226 | |
227 | const BinaryConditionalOperator *e = |
228 | mlir::cast<BinaryConditionalOperator>(op); |
229 | data = OpaqueValueMappingData::bind(cgf, ov: e->getOpaqueValue(), |
230 | e: e->getCommon()); |
231 | } |
232 | |
233 | /// Build the opaque value mapping for an OpaqueValueExpr whose source |
234 | /// expression is set to the expression the OVE represents. |
235 | OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov) |
236 | : cgf(cgf) { |
237 | if (ov) { |
238 | assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used " |
239 | "for OVE with no source expression" ); |
240 | data = OpaqueValueMappingData::bind(cgf, ov, e: ov->getSourceExpr()); |
241 | } |
242 | } |
243 | |
244 | OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, |
245 | LValue lvalue) |
246 | : cgf(cgf), |
247 | data(OpaqueValueMappingData::bind(cgf, ov: opaqueValue, lv: lvalue)) {} |
248 | |
249 | OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, |
250 | RValue rvalue) |
251 | : cgf(cgf), |
252 | data(OpaqueValueMappingData::bind(cgf, ov: opaqueValue, rv: rvalue)) {} |
253 | |
254 | void pop() { |
255 | data.unbind(cgf); |
256 | data.clear(); |
257 | } |
258 | |
259 | ~OpaqueValueMapping() { |
260 | if (data.isValid()) |
261 | data.unbind(cgf); |
262 | } |
263 | }; |
264 | |
265 | private: |
266 | /// Declare a variable in the current scope, return success if the variable |
267 | /// wasn't declared yet. |
268 | void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty, |
269 | mlir::Location loc, clang::CharUnits alignment, |
270 | bool isParam = false); |
271 | |
272 | public: |
273 | mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt); |
274 | |
275 | void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty); |
276 | |
277 | private: |
278 | // Track current variable initialization (if there's one) |
279 | const clang::VarDecl *currVarDecl = nullptr; |
280 | class VarDeclContext { |
281 | CIRGenFunction &p; |
282 | const clang::VarDecl *oldVal = nullptr; |
283 | |
284 | public: |
285 | VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) { |
286 | if (p.currVarDecl) |
287 | oldVal = p.currVarDecl; |
288 | p.currVarDecl = value; |
289 | } |
290 | |
291 | /// Can be used to restore the state early, before the dtor |
292 | /// is run. |
293 | void restore() { p.currVarDecl = oldVal; } |
294 | ~VarDeclContext() { restore(); } |
295 | }; |
296 | |
297 | public: |
298 | /// Use to track source locations across nested visitor traversals. |
299 | /// Always use a `SourceLocRAIIObject` to change currSrcLoc. |
300 | std::optional<mlir::Location> currSrcLoc; |
301 | class SourceLocRAIIObject { |
302 | CIRGenFunction &cgf; |
303 | std::optional<mlir::Location> oldLoc; |
304 | |
305 | public: |
306 | SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) { |
307 | if (cgf.currSrcLoc) |
308 | oldLoc = cgf.currSrcLoc; |
309 | cgf.currSrcLoc = value; |
310 | } |
311 | |
312 | /// Can be used to restore the state early, before the dtor |
313 | /// is run. |
314 | void restore() { cgf.currSrcLoc = oldLoc; } |
315 | ~SourceLocRAIIObject() { restore(); } |
316 | }; |
317 | |
318 | /// Helpers to convert Clang's SourceLocation to a MLIR Location. |
319 | mlir::Location getLoc(clang::SourceLocation srcLoc); |
320 | mlir::Location getLoc(clang::SourceRange srcLoc); |
321 | mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); |
322 | |
323 | const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); } |
324 | |
325 | // Wrapper for function prototype sources. Wraps either a FunctionProtoType or |
326 | // an ObjCMethodDecl. |
327 | struct PrototypeWrapper { |
328 | llvm::PointerUnion<const clang::FunctionProtoType *, |
329 | const clang::ObjCMethodDecl *> |
330 | p; |
331 | |
332 | PrototypeWrapper(const clang::FunctionProtoType *ft) : p(ft) {} |
333 | PrototypeWrapper(const clang::ObjCMethodDecl *md) : p(md) {} |
334 | }; |
335 | |
336 | /// An abstract representation of regular/ObjC call/message targets. |
337 | class AbstractCallee { |
338 | /// The function declaration of the callee. |
339 | [[maybe_unused]] const clang::Decl *calleeDecl; |
340 | |
341 | public: |
342 | AbstractCallee() : calleeDecl(nullptr) {} |
343 | AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {} |
344 | |
345 | bool hasFunctionDecl() const { |
346 | return llvm::isa_and_nonnull<clang::FunctionDecl>(Val: calleeDecl); |
347 | } |
348 | |
349 | unsigned getNumParams() const { |
350 | if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(Val: calleeDecl)) |
351 | return fd->getNumParams(); |
352 | return llvm::cast<clang::ObjCMethodDecl>(Val: calleeDecl)->param_size(); |
353 | } |
354 | |
355 | const clang::ParmVarDecl *getParamDecl(unsigned I) const { |
356 | if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(Val: calleeDecl)) |
357 | return fd->getParamDecl(i: I); |
358 | return *(llvm::cast<clang::ObjCMethodDecl>(Val: calleeDecl)->param_begin() + |
359 | I); |
360 | } |
361 | }; |
362 | |
363 | void finishFunction(SourceLocation endLoc); |
364 | |
365 | /// Determine whether the given initializer is trivial in the sense |
366 | /// that it requires no code to be generated. |
367 | bool isTrivialInitializer(const Expr *init); |
368 | |
369 | /// If the specified expression does not fold to a constant, or if it does but |
370 | /// contains a label, return false. If it constant folds return true and set |
371 | /// the boolean result in Result. |
372 | bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, |
373 | bool allowLabels = false); |
374 | bool constantFoldsToSimpleInteger(const clang::Expr *cond, |
375 | llvm::APSInt &resultInt, |
376 | bool allowLabels = false); |
377 | |
378 | /// Return true if the statement contains a label in it. If |
379 | /// this statement is not executed normally, it not containing a label means |
380 | /// that we can just remove the code. |
381 | bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false); |
382 | |
383 | class ConstantEmission { |
384 | // Cannot use mlir::TypedAttr directly here because of bit availability. |
385 | llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference; |
386 | ConstantEmission(mlir::TypedAttr c, bool isReference) |
387 | : valueAndIsReference(c, isReference) {} |
388 | |
389 | public: |
390 | ConstantEmission() {} |
391 | static ConstantEmission forReference(mlir::TypedAttr c) { |
392 | return ConstantEmission(c, true); |
393 | } |
394 | static ConstantEmission forValue(mlir::TypedAttr c) { |
395 | return ConstantEmission(c, false); |
396 | } |
397 | |
398 | explicit operator bool() const { |
399 | return valueAndIsReference.getOpaqueValue() != nullptr; |
400 | } |
401 | |
402 | bool isReference() const { return valueAndIsReference.getInt(); } |
403 | LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const { |
404 | assert(isReference()); |
405 | cgf.cgm.errorNYI(refExpr->getSourceRange(), |
406 | "ConstantEmission::getReferenceLValue" ); |
407 | return {}; |
408 | } |
409 | |
410 | mlir::TypedAttr getValue() const { |
411 | assert(!isReference()); |
412 | return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer()); |
413 | } |
414 | }; |
415 | |
416 | ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr); |
417 | |
418 | struct AutoVarEmission { |
419 | const clang::VarDecl *Variable; |
420 | /// The address of the alloca for languages with explicit address space |
421 | /// (e.g. OpenCL) or alloca casted to generic pointer for address space |
422 | /// agnostic languages (e.g. C++). Invalid if the variable was emitted |
423 | /// as a global constant. |
424 | Address Addr; |
425 | |
426 | /// True if the variable is of aggregate type and has a constant |
427 | /// initializer. |
428 | bool IsConstantAggregate = false; |
429 | |
430 | /// True if the variable is a __block variable that is captured by an |
431 | /// escaping block. |
432 | bool IsEscapingByRef = false; |
433 | |
434 | mlir::Value NRVOFlag{}; |
435 | |
436 | struct Invalid {}; |
437 | AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} |
438 | |
439 | AutoVarEmission(const clang::VarDecl &variable) |
440 | : Variable(&variable), Addr(Address::invalid()) {} |
441 | |
442 | static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } |
443 | |
444 | bool wasEmittedAsGlobal() const { return !Addr.isValid(); } |
445 | |
446 | /// Returns the raw, allocated address, which is not necessarily |
447 | /// the address of the object itself. It is casted to default |
448 | /// address space for address space agnostic languages. |
449 | Address getAllocatedAddress() const { return Addr; } |
450 | |
451 | /// Returns the address of the object within this declaration. |
452 | /// Note that this does not chase the forwarding pointer for |
453 | /// __block decls. |
454 | Address getObjectAddress(CIRGenFunction &cgf) const { |
455 | if (!IsEscapingByRef) |
456 | return Addr; |
457 | |
458 | assert(!cir::MissingFeatures::opAllocaEscapeByReference()); |
459 | return Address::invalid(); |
460 | } |
461 | }; |
462 | |
463 | /// Perform the usual unary conversions on the specified expression and |
464 | /// compare the result against zero, returning an Int1Ty value. |
465 | mlir::Value evaluateExprAsBool(const clang::Expr *e); |
466 | |
467 | /// Set the address of a local variable. |
468 | void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) { |
469 | assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!" ); |
470 | localDeclMap.insert({vd, addr}); |
471 | // TODO: Add symbol table support |
472 | } |
473 | |
474 | bool shouldNullCheckClassCastValue(const CastExpr *ce); |
475 | |
476 | LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t); |
477 | |
478 | /// Construct an address with the natural alignment of T. If a pointer to T |
479 | /// is expected to be signed, the pointer passed to this function must have |
480 | /// been signed, and the returned Address will have the pointer authentication |
481 | /// information needed to authenticate the signed pointer. |
482 | Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, |
483 | CharUnits alignment, |
484 | bool forPointeeType = false, |
485 | LValueBaseInfo *baseInfo = nullptr) { |
486 | if (alignment.isZero()) |
487 | alignment = cgm.getNaturalTypeAlignment(t, baseInfo); |
488 | return Address(ptr, convertTypeForMem(t), alignment); |
489 | } |
490 | |
491 | Address getAddressOfBaseClass( |
492 | Address value, const CXXRecordDecl *derived, |
493 | llvm::iterator_range<CastExpr::path_const_iterator> path, |
494 | bool nullCheckValue, SourceLocation loc); |
495 | |
496 | LValue makeAddrLValue(Address addr, QualType ty, |
497 | AlignmentSource source = AlignmentSource::Type) { |
498 | return makeAddrLValue(addr, ty, baseInfo: LValueBaseInfo(source)); |
499 | } |
500 | |
501 | LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo) { |
502 | return LValue::makeAddr(address: addr, t: ty, baseInfo); |
503 | } |
504 | |
505 | /// Return the address of a local variable. |
506 | Address getAddrOfLocalVar(const clang::VarDecl *vd) { |
507 | auto it = localDeclMap.find(vd); |
508 | assert(it != localDeclMap.end() && |
509 | "Invalid argument to getAddrOfLocalVar(), no decl!" ); |
510 | return it->second; |
511 | } |
512 | |
513 | /// Load the value for 'this'. This function is only valid while generating |
514 | /// code for an C++ member function. |
515 | /// FIXME(cir): this should return a mlir::Value! |
516 | mlir::Value loadCXXThis() { |
517 | assert(cxxThisValue && "no 'this' value for this function" ); |
518 | return cxxThisValue; |
519 | } |
520 | |
521 | /// Get an appropriate 'undef' rvalue for the given type. |
522 | /// TODO: What's the equivalent for MLIR? Currently we're only using this for |
523 | /// void types so it just returns RValue::get(nullptr) but it'll need |
524 | /// addressed later. |
525 | RValue getUndefRValue(clang::QualType ty); |
526 | |
527 | cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, |
528 | cir::FuncType funcType); |
529 | |
530 | clang::QualType buildFunctionArgList(clang::GlobalDecl gd, |
531 | FunctionArgList &args); |
532 | |
533 | /// Emit code for the start of a function. |
534 | /// \param loc The location to be associated with the function. |
535 | /// \param startLoc The location of the function body. |
536 | void startFunction(clang::GlobalDecl gd, clang::QualType returnType, |
537 | cir::FuncOp fn, cir::FuncType funcType, |
538 | FunctionArgList args, clang::SourceLocation loc, |
539 | clang::SourceLocation startLoc); |
540 | |
541 | /// Represents a scope, including function bodies, compound statements, and |
542 | /// the substatements of if/while/do/for/switch/try statements. This class |
543 | /// handles any automatic cleanup, along with the return value. |
544 | struct LexicalScope { |
545 | private: |
546 | // TODO(CIR): This will live in the base class RunCleanupScope once that |
547 | // class is upstreamed. |
548 | CIRGenFunction &cgf; |
549 | |
550 | // Points to the scope entry block. This is useful, for instance, for |
551 | // helping to insert allocas before finalizing any recursive CodeGen from |
552 | // switches. |
553 | mlir::Block *entryBlock; |
554 | |
555 | LexicalScope *parentScope = nullptr; |
556 | |
557 | // Only Regular is used at the moment. Support for other kinds will be |
558 | // added as the relevant statements/expressions are upstreamed. |
559 | enum Kind { |
560 | Regular, // cir.if, cir.scope, if_regions |
561 | Ternary, // cir.ternary |
562 | Switch, // cir.switch |
563 | Try, // cir.try |
564 | GlobalInit // cir.global initialization code |
565 | }; |
566 | Kind scopeKind = Kind::Regular; |
567 | |
568 | // The scope return value. |
569 | mlir::Value retVal = nullptr; |
570 | |
571 | mlir::Location beginLoc; |
572 | mlir::Location endLoc; |
573 | |
574 | public: |
575 | unsigned depth = 0; |
576 | |
577 | LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb) |
578 | : cgf(cgf), entryBlock(eb), parentScope(cgf.curLexScope), beginLoc(loc), |
579 | endLoc(loc) { |
580 | |
581 | assert(entryBlock && "LexicalScope requires an entry block" ); |
582 | cgf.curLexScope = this; |
583 | if (parentScope) |
584 | ++depth; |
585 | |
586 | if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) { |
587 | assert(fusedLoc.getLocations().size() == 2 && "too many locations" ); |
588 | beginLoc = fusedLoc.getLocations()[0]; |
589 | endLoc = fusedLoc.getLocations()[1]; |
590 | } |
591 | } |
592 | |
593 | void setRetVal(mlir::Value v) { retVal = v; } |
594 | |
595 | void cleanup(); |
596 | void restore() { cgf.curLexScope = parentScope; } |
597 | |
598 | ~LexicalScope() { |
599 | assert(!cir::MissingFeatures::generateDebugInfo()); |
600 | cleanup(); |
601 | restore(); |
602 | } |
603 | |
604 | // --- |
605 | // Kind |
606 | // --- |
607 | bool isGlobalInit() { return scopeKind == Kind::GlobalInit; } |
608 | bool isRegular() { return scopeKind == Kind::Regular; } |
609 | bool isSwitch() { return scopeKind == Kind::Switch; } |
610 | bool isTernary() { return scopeKind == Kind::Ternary; } |
611 | bool isTry() { return scopeKind == Kind::Try; } |
612 | |
613 | void setAsGlobalInit() { scopeKind = Kind::GlobalInit; } |
614 | void setAsSwitch() { scopeKind = Kind::Switch; } |
615 | void setAsTernary() { scopeKind = Kind::Ternary; } |
616 | |
617 | // --- |
618 | // Return handling. |
619 | // --- |
620 | |
621 | private: |
622 | // `returnBlock`, `returnLoc`, and all the functions that deal with them |
623 | // will change and become more complicated when `switch` statements are |
624 | // upstreamed. `case` statements within the `switch` are in the same scope |
625 | // but have their own regions. Therefore the LexicalScope will need to |
626 | // keep track of multiple return blocks. |
627 | mlir::Block *returnBlock = nullptr; |
628 | std::optional<mlir::Location> returnLoc; |
629 | |
630 | // See the comment on `getOrCreateRetBlock`. |
631 | mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) { |
632 | assert(returnBlock == nullptr && "only one return block per scope" ); |
633 | // Create the cleanup block but don't hook it up just yet. |
634 | mlir::OpBuilder::InsertionGuard guard(cgf.builder); |
635 | returnBlock = |
636 | cgf.builder.createBlock(cgf.builder.getBlock()->getParent()); |
637 | updateRetLoc(returnBlock, loc); |
638 | return returnBlock; |
639 | } |
640 | |
641 | cir::ReturnOp emitReturn(mlir::Location loc); |
642 | void emitImplicitReturn(); |
643 | |
644 | public: |
645 | mlir::Block *getRetBlock() { return returnBlock; } |
646 | mlir::Location getRetLoc(mlir::Block *b) { return *returnLoc; } |
647 | void updateRetLoc(mlir::Block *b, mlir::Location loc) { returnLoc = loc; } |
648 | |
649 | // Create the return block for this scope, or return the existing one. |
650 | // This get-or-create logic is necessary to handle multiple return |
651 | // statements within the same scope, which can happen if some of them are |
652 | // dead code or if there is a `goto` into the middle of the scope. |
653 | mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) { |
654 | if (returnBlock == nullptr) { |
655 | returnBlock = createRetBlock(cgf, loc); |
656 | return returnBlock; |
657 | } |
658 | updateRetLoc(returnBlock, loc); |
659 | return returnBlock; |
660 | } |
661 | |
662 | mlir::Block *getEntryBlock() { return entryBlock; } |
663 | }; |
664 | |
665 | LexicalScope *curLexScope = nullptr; |
666 | |
667 | /// ---------------------- |
668 | /// CIR emit functions |
669 | /// ---------------------- |
670 | private: |
671 | void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc, |
672 | clang::CharUnits alignment); |
673 | |
674 | CIRGenCallee emitDirectCallee(const GlobalDecl &gd); |
675 | |
676 | public: |
677 | Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, |
678 | llvm::StringRef fieldName, |
679 | unsigned fieldIndex); |
680 | |
681 | mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, |
682 | mlir::Location loc, clang::CharUnits alignment, |
683 | bool insertIntoFnEntryBlock, |
684 | mlir::Value arraySize = nullptr); |
685 | mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, |
686 | mlir::Location loc, clang::CharUnits alignment, |
687 | mlir::OpBuilder::InsertPoint ip, |
688 | mlir::Value arraySize = nullptr); |
689 | |
690 | void emitAggExpr(const clang::Expr *e, AggValueSlot slot); |
691 | |
692 | LValue emitAggExprToLValue(const Expr *e); |
693 | |
694 | /// Emit code to compute the specified expression which can have any type. The |
695 | /// result is returned as an RValue struct. If this is an aggregate |
696 | /// expression, the aggloc/agglocvolatile arguments indicate where the result |
697 | /// should be returned. |
698 | RValue emitAnyExpr(const clang::Expr *e); |
699 | |
700 | /// Similarly to emitAnyExpr(), however, the result will always be accessible |
701 | /// even if no aggregate location is provided. |
702 | RValue emitAnyExprToTemp(const clang::Expr *e); |
703 | |
704 | LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e); |
705 | |
706 | Address emitArrayToPointerDecay(const Expr *array); |
707 | |
708 | AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d); |
709 | |
710 | /// Emit code and set up symbol table for a variable declaration with auto, |
711 | /// register, or no storage class specifier. These turn into simple stack |
712 | /// objects, globals depending on target. |
713 | void emitAutoVarDecl(const clang::VarDecl &d); |
714 | |
715 | void emitAutoVarCleanups(const AutoVarEmission &emission); |
716 | void emitAutoVarInit(const AutoVarEmission &emission); |
717 | |
718 | LValue emitBinaryOperatorLValue(const BinaryOperator *e); |
719 | |
720 | mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s); |
721 | |
722 | RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, |
723 | const clang::CallExpr *e, ReturnValueSlot returnValue); |
724 | |
725 | RValue emitCall(const CIRGenFunctionInfo &funcInfo, |
726 | const CIRGenCallee &callee, ReturnValueSlot returnValue, |
727 | const CallArgList &args, cir::CIRCallOpInterface *callOp, |
728 | mlir::Location loc); |
729 | RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee, |
730 | const clang::CallExpr *e, ReturnValueSlot returnValue); |
731 | void emitCallArg(CallArgList &args, const clang::Expr *e, |
732 | clang::QualType argType); |
733 | void emitCallArgs( |
734 | CallArgList &args, PrototypeWrapper prototype, |
735 | llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange, |
736 | AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0); |
737 | RValue emitCallExpr(const clang::CallExpr *e, |
738 | ReturnValueSlot returnValue = ReturnValueSlot()); |
739 | LValue emitCallExprLValue(const clang::CallExpr *e); |
740 | CIRGenCallee emitCallee(const clang::Expr *e); |
741 | |
742 | template <typename T> |
743 | mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, |
744 | mlir::ArrayAttr value, |
745 | cir::CaseOpKind kind, |
746 | bool buildingTopLevelCase); |
747 | |
748 | mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, |
749 | mlir::Type condType, |
750 | bool buildingTopLevelCase); |
751 | |
752 | LValue emitCastLValue(const CastExpr *e); |
753 | |
754 | LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e); |
755 | |
756 | mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s); |
757 | |
758 | void emitCXXConstructExpr(const clang::CXXConstructExpr *e, |
759 | AggValueSlot dest); |
760 | |
761 | void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, |
762 | clang::CXXCtorType type, bool forVirtualBase, |
763 | bool delegating, AggValueSlot thisAVS, |
764 | const clang::CXXConstructExpr *e); |
765 | |
766 | void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, |
767 | clang::CXXCtorType type, bool forVirtualBase, |
768 | bool delegating, Address thisAddr, |
769 | CallArgList &args, clang::SourceLocation loc); |
770 | |
771 | mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, |
772 | llvm::ArrayRef<const Attr *> attrs); |
773 | |
774 | RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, |
775 | ReturnValueSlot returnValue); |
776 | |
777 | RValue emitCXXMemberOrOperatorCall( |
778 | const clang::CXXMethodDecl *md, const CIRGenCallee &callee, |
779 | ReturnValueSlot returnValue, mlir::Value thisPtr, |
780 | mlir::Value implicitParam, clang::QualType implicitParamTy, |
781 | const clang::CallExpr *ce, CallArgList *rtlArgs); |
782 | |
783 | RValue emitCXXMemberOrOperatorMemberCallExpr( |
784 | const clang::CallExpr *ce, const clang::CXXMethodDecl *md, |
785 | ReturnValueSlot returnValue, bool hasQualifier, |
786 | clang::NestedNameSpecifier *qualifier, bool isArrow, |
787 | const clang::Expr *base); |
788 | |
789 | RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, |
790 | const CXXMethodDecl *md, |
791 | ReturnValueSlot returnValue); |
792 | |
793 | mlir::LogicalResult emitDoStmt(const clang::DoStmt &s); |
794 | |
795 | /// Emit an expression as an initializer for an object (variable, field, etc.) |
796 | /// at the given location. The expression is not necessarily the normal |
797 | /// initializer for the object, and the address is not necessarily |
798 | /// its normal location. |
799 | /// |
800 | /// \param init the initializing expression |
801 | /// \param d the object to act as if we're initializing |
802 | /// \param lvalue the lvalue to initialize |
803 | /// \param capturedByInit true if \p d is a __block variable whose address is |
804 | /// potentially changed by the initializer |
805 | void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, |
806 | LValue lvalue, bool capturedByInit = false); |
807 | |
808 | mlir::LogicalResult emitFunctionBody(const clang::Stmt *body); |
809 | |
810 | mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType); |
811 | |
812 | /// Emit the computation of the specified expression of scalar type. |
813 | mlir::Value emitScalarExpr(const clang::Expr *e); |
814 | |
815 | mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, |
816 | bool isInc, bool isPre); |
817 | |
818 | /// Build a debug stoppoint if we are emitting debug info. |
819 | void emitStopPoint(const Stmt *s); |
820 | |
821 | // Build CIR for a statement. useCurrentScope should be true if no |
822 | // new scopes need be created when finding a compound statement. |
823 | mlir::LogicalResult |
824 | emitStmt(const clang::Stmt *s, bool useCurrentScope, |
825 | llvm::ArrayRef<const Attr *> attrs = std::nullopt); |
826 | |
827 | mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, |
828 | bool useCurrentScope); |
829 | |
830 | mlir::LogicalResult emitForStmt(const clang::ForStmt &s); |
831 | |
832 | void emitCompoundStmt(const clang::CompoundStmt &s); |
833 | |
834 | void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s); |
835 | |
836 | void emitDecl(const clang::Decl &d); |
837 | mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s); |
838 | LValue emitDeclRefLValue(const clang::DeclRefExpr *e); |
839 | |
840 | mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, |
841 | mlir::Type condType, |
842 | bool buildingTopLevelCase); |
843 | |
844 | /// Emit an `if` on a boolean condition to the specified blocks. |
845 | /// FIXME: Based on the condition, this might try to simplify the codegen of |
846 | /// the conditional based on the branch. |
847 | /// In the future, we may apply code generation simplifications here, |
848 | /// similar to those used in classic LLVM codegen |
849 | /// See `EmitBranchOnBoolExpr` for inspiration. |
850 | mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, |
851 | const clang::Stmt *thenS, |
852 | const clang::Stmt *elseS); |
853 | cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond, |
854 | BuilderCallbackRef thenBuilder, |
855 | mlir::Location thenLoc, |
856 | BuilderCallbackRef elseBuilder, |
857 | std::optional<mlir::Location> elseLoc = {}); |
858 | |
859 | mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond); |
860 | |
861 | mlir::LogicalResult emitIfStmt(const clang::IfStmt &s); |
862 | |
863 | /// Emit code to compute the specified expression, |
864 | /// ignoring the result. |
865 | void emitIgnoredExpr(const clang::Expr *e); |
866 | |
867 | /// Given an expression that represents a value lvalue, this method emits |
868 | /// the address of the lvalue, then loads the result as an rvalue, |
869 | /// returning the rvalue. |
870 | RValue emitLoadOfLValue(LValue lv, SourceLocation loc); |
871 | |
872 | Address emitLoadOfReference(LValue refLVal, mlir::Location loc, |
873 | LValueBaseInfo *pointeeBaseInfo); |
874 | LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, |
875 | QualType refTy, AlignmentSource source); |
876 | |
877 | /// EmitLoadOfScalar - Load a scalar value from an address, taking |
878 | /// care to appropriately convert from the memory representation to |
879 | /// the LLVM value representation. The l-value must be a simple |
880 | /// l-value. |
881 | mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc); |
882 | |
883 | /// Emit code to compute a designator that specifies the location |
884 | /// of the expression. |
885 | /// FIXME: document this function better. |
886 | LValue emitLValue(const clang::Expr *e); |
887 | LValue emitLValueForField(LValue base, const clang::FieldDecl *field); |
888 | |
889 | LValue emitMemberExpr(const MemberExpr *e); |
890 | |
891 | /// Given an expression with a pointer type, emit the value and compute our |
892 | /// best estimate of the alignment of the pointee. |
893 | /// |
894 | /// One reasonable way to use this information is when there's a language |
895 | /// guarantee that the pointer must be aligned to some stricter value, and |
896 | /// we're simply trying to ensure that sufficiently obvious uses of under- |
897 | /// aligned objects don't get miscompiled; for example, a placement new |
898 | /// into the address of a local variable. In such a case, it's quite |
899 | /// reasonable to just ignore the returned alignment when it isn't from an |
900 | /// explicit source. |
901 | Address emitPointerWithAlignment(const clang::Expr *expr, |
902 | LValueBaseInfo *baseInfo); |
903 | |
904 | /// Emits a reference binding to the passed in expression. |
905 | RValue emitReferenceBindingToExpr(const Expr *e); |
906 | |
907 | mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s); |
908 | |
909 | mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e); |
910 | |
911 | /// Emit a conversion from the specified type to the specified destination |
912 | /// type, both of which are CIR scalar types. |
913 | mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, |
914 | clang::QualType dstType, |
915 | clang::SourceLocation loc); |
916 | |
917 | void emitScalarInit(const clang::Expr *init, mlir::Location loc, |
918 | LValue lvalue, bool capturedByInit = false); |
919 | |
920 | void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, |
921 | clang::QualType ty, bool isInit = false, |
922 | bool isNontemporal = false); |
923 | void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); |
924 | |
925 | /// Store the specified rvalue into the specified |
926 | /// lvalue, where both are guaranteed to the have the same type, and that type |
927 | /// is 'Ty'. |
928 | void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false); |
929 | |
930 | mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult); |
931 | |
932 | LValue emitStringLiteralLValue(const StringLiteral *e); |
933 | |
934 | mlir::LogicalResult emitSwitchBody(const clang::Stmt *s); |
935 | mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, |
936 | bool buildingTopLevelCase); |
937 | mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s); |
938 | |
939 | /// Given a value and its clang type, returns the value casted to its memory |
940 | /// representation. |
941 | /// Note: CIR defers most of the special casting to the final lowering passes |
942 | /// to conserve the high level information. |
943 | mlir::Value emitToMemory(mlir::Value value, clang::QualType ty); |
944 | |
945 | LValue emitUnaryOpLValue(const clang::UnaryOperator *e); |
946 | |
947 | /// This method handles emission of any variable declaration |
948 | /// inside a function, including static vars etc. |
949 | void emitVarDecl(const clang::VarDecl &d); |
950 | |
951 | mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s); |
952 | |
953 | /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is |
954 | /// nonnull, if 1\p LHS is marked _Nonnull. |
955 | void emitNullabilityCheck(LValue lhs, mlir::Value rhs, |
956 | clang::SourceLocation loc); |
957 | |
958 | /// An object to manage conditionally-evaluated expressions. |
959 | class ConditionalEvaluation { |
960 | CIRGenFunction &cgf; |
961 | mlir::OpBuilder::InsertPoint insertPt; |
962 | |
963 | public: |
964 | ConditionalEvaluation(CIRGenFunction &cgf) |
965 | : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {} |
966 | ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip) |
967 | : cgf(cgf), insertPt(ip) {} |
968 | |
969 | void beginEvaluation() { |
970 | assert(cgf.outermostConditional != this); |
971 | if (!cgf.outermostConditional) |
972 | cgf.outermostConditional = this; |
973 | } |
974 | |
975 | void endEvaluation() { |
976 | assert(cgf.outermostConditional != nullptr); |
977 | if (cgf.outermostConditional == this) |
978 | cgf.outermostConditional = nullptr; |
979 | } |
980 | |
981 | /// Returns the insertion point which will be executed prior to each |
982 | /// evaluation of the conditional code. In LLVM OG, this method |
983 | /// is called getStartingBlock. |
984 | mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; } |
985 | }; |
986 | |
987 | struct ConditionalInfo { |
988 | std::optional<LValue> lhs{}, rhs{}; |
989 | mlir::Value result{}; |
990 | }; |
991 | |
992 | // Return true if we're currently emitting one branch or the other of a |
993 | // conditional expression. |
994 | bool isInConditionalBranch() const { return outermostConditional != nullptr; } |
995 | |
996 | void setBeforeOutermostConditional(mlir::Value value, Address addr) { |
997 | assert(isInConditionalBranch()); |
998 | { |
999 | mlir::OpBuilder::InsertionGuard guard(builder); |
1000 | builder.restoreInsertionPoint(outermostConditional->getInsertPoint()); |
1001 | builder.createStore( |
1002 | value.getLoc(), value, addr, |
1003 | mlir::IntegerAttr::get( |
1004 | mlir::IntegerType::get(value.getContext(), 64), |
1005 | (uint64_t)addr.getAlignment().getAsAlign().value())); |
1006 | } |
1007 | } |
1008 | |
1009 | // Points to the outermost active conditional control. This is used so that |
1010 | // we know if a temporary should be destroyed conditionally. |
1011 | ConditionalEvaluation *outermostConditional = nullptr; |
1012 | |
1013 | template <typename FuncTy> |
1014 | ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, |
1015 | const FuncTy &branchGenFunc); |
1016 | |
1017 | mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, |
1018 | const clang::Stmt *thenS, |
1019 | const clang::Stmt *elseS); |
1020 | |
1021 | /// ---------------------- |
1022 | /// CIR build helpers |
1023 | /// ----------------- |
1024 | public: |
1025 | cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, |
1026 | const Twine &name = "tmp" , |
1027 | mlir::Value arraySize = nullptr, |
1028 | bool insertIntoFnEntryBlock = false); |
1029 | cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, |
1030 | const Twine &name = "tmp" , |
1031 | mlir::OpBuilder::InsertPoint ip = {}, |
1032 | mlir::Value arraySize = nullptr); |
1033 | Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc, |
1034 | const Twine &name = "tmp" , |
1035 | mlir::Value arraySize = nullptr, |
1036 | Address *alloca = nullptr, |
1037 | mlir::OpBuilder::InsertPoint ip = {}); |
1038 | Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, |
1039 | mlir::Location loc, |
1040 | const Twine &name = "tmp" , |
1041 | mlir::Value arraySize = nullptr, |
1042 | mlir::OpBuilder::InsertPoint ip = {}); |
1043 | |
1044 | /// Create a temporary memory object of the given type, with |
1045 | /// appropriate alignmen and cast it to the default address space. Returns |
1046 | /// the original alloca instruction by \p Alloca if it is not nullptr. |
1047 | Address createMemTemp(QualType t, mlir::Location loc, |
1048 | const Twine &name = "tmp" , Address *alloca = nullptr, |
1049 | mlir::OpBuilder::InsertPoint ip = {}); |
1050 | Address createMemTemp(QualType t, CharUnits align, mlir::Location loc, |
1051 | const Twine &name = "tmp" , Address *alloca = nullptr, |
1052 | mlir::OpBuilder::InsertPoint ip = {}); |
1053 | |
1054 | //===--------------------------------------------------------------------===// |
1055 | // OpenACC Emission |
1056 | //===--------------------------------------------------------------------===// |
1057 | private: |
1058 | template <typename Op> |
1059 | Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind, |
1060 | SourceLocation dirLoc, |
1061 | llvm::ArrayRef<const OpenACCClause *> clauses); |
1062 | // Function to do the basic implementation of an operation with an Associated |
1063 | // Statement. Models AssociatedStmtConstruct. |
1064 | template <typename Op, typename TermOp> |
1065 | mlir::LogicalResult emitOpenACCOpAssociatedStmt( |
1066 | mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind, |
1067 | SourceLocation dirLoc, llvm::ArrayRef<const OpenACCClause *> clauses, |
1068 | const Stmt *associatedStmt); |
1069 | |
1070 | template <typename Op, typename TermOp> |
1071 | mlir::LogicalResult emitOpenACCOpCombinedConstruct( |
1072 | mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind, |
1073 | SourceLocation dirLoc, llvm::ArrayRef<const OpenACCClause *> clauses, |
1074 | const Stmt *loopStmt); |
1075 | |
1076 | template <typename Op> |
1077 | void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind, |
1078 | SourceLocation dirLoc, |
1079 | ArrayRef<const OpenACCClause *> clauses); |
1080 | // The second template argument doesn't need to be a template, since it should |
1081 | // always be an mlir::acc::LoopOp, but as this is a template anyway, we make |
1082 | // it a template argument as this way we can avoid including the OpenACC MLIR |
1083 | // headers here. We will count on linker failures/explicit instantiation to |
1084 | // ensure we don't mess this up, but it is only called from 1 place, and |
1085 | // instantiated 3x. |
1086 | template <typename ComputeOp, typename LoopOp> |
1087 | void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp, |
1088 | OpenACCDirectiveKind dirKind, SourceLocation dirLoc, |
1089 | ArrayRef<const OpenACCClause *> clauses); |
1090 | |
1091 | // The OpenACC LoopOp requires that we have auto, seq, or independent on all |
1092 | // LoopOp operations for the 'none' device type case. This function checks if |
1093 | // the LoopOp has one, else it updates it to have one. |
1094 | void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan, |
1095 | OpenACCDirectiveKind dk); |
1096 | |
1097 | public: |
1098 | mlir::LogicalResult |
1099 | emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s); |
1100 | mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s); |
1101 | mlir::LogicalResult |
1102 | emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s); |
1103 | mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s); |
1104 | mlir::LogicalResult |
1105 | emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s); |
1106 | mlir::LogicalResult |
1107 | emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s); |
1108 | mlir::LogicalResult |
1109 | emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s); |
1110 | mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s); |
1111 | mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s); |
1112 | mlir::LogicalResult |
1113 | emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s); |
1114 | mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s); |
1115 | mlir::LogicalResult |
1116 | emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s); |
1117 | mlir::LogicalResult |
1118 | emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s); |
1119 | mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s); |
1120 | |
1121 | void emitOpenACCDeclare(const OpenACCDeclareDecl &d); |
1122 | void emitOpenACCRoutine(const OpenACCRoutineDecl &d); |
1123 | |
1124 | private: |
1125 | QualType getVarArgType(const Expr *arg); |
1126 | }; |
1127 | |
1128 | } // namespace clang::CIRGen |
1129 | |
1130 | #endif |
1131 | |