| 1 | //===----------------------------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // Internal per-function state used for AST-to-ClangIR code gen |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H |
| 14 | #define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H |
| 15 | |
| 16 | #include "CIRGenBuilder.h" |
| 17 | #include "CIRGenCall.h" |
| 18 | #include "CIRGenModule.h" |
| 19 | #include "CIRGenTypeCache.h" |
| 20 | #include "CIRGenValue.h" |
| 21 | |
| 22 | #include "Address.h" |
| 23 | |
| 24 | #include "clang/AST/ASTContext.h" |
| 25 | #include "clang/AST/CharUnits.h" |
| 26 | #include "clang/AST/Decl.h" |
| 27 | #include "clang/AST/Stmt.h" |
| 28 | #include "clang/AST/Type.h" |
| 29 | #include "clang/CIR/Dialect/IR/CIRDialect.h" |
| 30 | #include "clang/CIR/MissingFeatures.h" |
| 31 | #include "clang/CIR/TypeEvaluationKind.h" |
| 32 | |
| 33 | namespace { |
| 34 | class ScalarExprEmitter; |
| 35 | } // namespace |
| 36 | |
| 37 | namespace mlir { |
| 38 | namespace acc { |
| 39 | class LoopOp; |
| 40 | } // namespace acc |
| 41 | } // namespace mlir |
| 42 | |
| 43 | namespace clang::CIRGen { |
| 44 | |
| 45 | class CIRGenFunction : public CIRGenTypeCache { |
| 46 | public: |
| 47 | CIRGenModule &cgm; |
| 48 | |
| 49 | private: |
| 50 | friend class ::ScalarExprEmitter; |
| 51 | /// The builder is a helper class to create IR inside a function. The |
| 52 | /// builder is stateful, in particular it keeps an "insertion point": this |
| 53 | /// is where the next operations will be introduced. |
| 54 | CIRGenBuilderTy &builder; |
| 55 | |
| 56 | public: |
| 57 | /// The GlobalDecl for the current function being compiled or the global |
| 58 | /// variable currently being initialized. |
| 59 | clang::GlobalDecl curGD; |
| 60 | |
| 61 | /// The compiler-generated variable that holds the return value. |
| 62 | std::optional<mlir::Value> fnRetAlloca; |
| 63 | |
| 64 | /// CXXThisDecl - When generating code for a C++ member function, |
| 65 | /// this will hold the implicit 'this' declaration. |
| 66 | ImplicitParamDecl *cxxabiThisDecl = nullptr; |
| 67 | mlir::Value cxxabiThisValue = nullptr; |
| 68 | mlir::Value cxxThisValue = nullptr; |
| 69 | clang::CharUnits cxxThisAlignment; |
| 70 | |
| 71 | /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this |
| 72 | /// expression. |
| 73 | Address cxxDefaultInitExprThis = Address::invalid(); |
| 74 | |
| 75 | // Holds the Decl for the current outermost non-closure context |
| 76 | const clang::Decl *curFuncDecl = nullptr; |
| 77 | |
| 78 | /// The function for which code is currently being generated. |
| 79 | cir::FuncOp curFn; |
| 80 | |
| 81 | using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>; |
| 82 | /// This keeps track of the CIR allocas or globals for local C |
| 83 | /// declarations. |
| 84 | DeclMapTy localDeclMap; |
| 85 | |
| 86 | /// The type of the condition for the emitting switch statement. |
| 87 | llvm::SmallVector<mlir::Type, 2> condTypeStack; |
| 88 | |
| 89 | clang::ASTContext &getContext() const { return cgm.getASTContext(); } |
| 90 | |
| 91 | CIRGenBuilderTy &getBuilder() { return builder; } |
| 92 | |
| 93 | CIRGenModule &getCIRGenModule() { return cgm; } |
| 94 | const CIRGenModule &getCIRGenModule() const { return cgm; } |
| 95 | |
| 96 | mlir::Block *getCurFunctionEntryBlock() { return &curFn.getRegion().front(); } |
| 97 | |
| 98 | /// Sanitizers enabled for this function. |
| 99 | clang::SanitizerSet sanOpts; |
| 100 | |
| 101 | /// Whether or not a Microsoft-style asm block has been processed within |
| 102 | /// this fuction. These can potentially set the return value. |
| 103 | bool sawAsmBlock = false; |
| 104 | |
| 105 | mlir::Type convertTypeForMem(QualType t); |
| 106 | |
| 107 | mlir::Type convertType(clang::QualType t); |
| 108 | mlir::Type convertType(const TypeDecl *t) { |
| 109 | return convertType(getContext().getTypeDeclType(Decl: t)); |
| 110 | } |
| 111 | |
| 112 | /// Return the cir::TypeEvaluationKind of QualType \c type. |
| 113 | static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type); |
| 114 | |
| 115 | static bool hasScalarEvaluationKind(clang::QualType type) { |
| 116 | return getEvaluationKind(type) == cir::TEK_Scalar; |
| 117 | } |
| 118 | |
| 119 | static bool hasAggregateEvaluationKind(clang::QualType type) { |
| 120 | return getEvaluationKind(type) == cir::TEK_Aggregate; |
| 121 | } |
| 122 | |
| 123 | CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, |
| 124 | bool suppressNewContext = false); |
| 125 | ~CIRGenFunction(); |
| 126 | |
| 127 | CIRGenTypes &getTypes() const { return cgm.getTypes(); } |
| 128 | |
| 129 | const TargetInfo &getTarget() const { return cgm.getTarget(); } |
| 130 | mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); } |
| 131 | |
| 132 | // --------------------- |
| 133 | // Opaque value handling |
| 134 | // --------------------- |
| 135 | |
| 136 | /// Keeps track of the current set of opaque value expressions. |
| 137 | llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues; |
| 138 | llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues; |
| 139 | |
| 140 | public: |
| 141 | /// A non-RAII class containing all the information about a bound |
| 142 | /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for |
| 143 | /// this which makes individual mappings very simple; using this |
| 144 | /// class directly is useful when you have a variable number of |
| 145 | /// opaque values or don't want the RAII functionality for some |
| 146 | /// reason. |
| 147 | class OpaqueValueMappingData { |
| 148 | const OpaqueValueExpr *opaqueValue; |
| 149 | bool boundLValue; |
| 150 | |
| 151 | OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue) |
| 152 | : opaqueValue(ov), boundLValue(boundLValue) {} |
| 153 | |
| 154 | public: |
| 155 | OpaqueValueMappingData() : opaqueValue(nullptr) {} |
| 156 | |
| 157 | static bool shouldBindAsLValue(const Expr *expr) { |
| 158 | // gl-values should be bound as l-values for obvious reasons. |
| 159 | // Records should be bound as l-values because IR generation |
| 160 | // always keeps them in memory. Expressions of function type |
| 161 | // act exactly like l-values but are formally required to be |
| 162 | // r-values in C. |
| 163 | return expr->isGLValue() || expr->getType()->isFunctionType() || |
| 164 | hasAggregateEvaluationKind(type: expr->getType()); |
| 165 | } |
| 166 | |
| 167 | static OpaqueValueMappingData |
| 168 | bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) { |
| 169 | if (shouldBindAsLValue(expr: ov)) |
| 170 | return bind(cgf, ov, lv: cgf.emitLValue(e)); |
| 171 | return bind(cgf, ov, rv: cgf.emitAnyExpr(e)); |
| 172 | } |
| 173 | |
| 174 | static OpaqueValueMappingData |
| 175 | bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) { |
| 176 | assert(shouldBindAsLValue(ov)); |
| 177 | cgf.opaqueLValues.insert(KV: std::make_pair(x&: ov, y: lv)); |
| 178 | return OpaqueValueMappingData(ov, true); |
| 179 | } |
| 180 | |
| 181 | static OpaqueValueMappingData |
| 182 | bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) { |
| 183 | assert(!shouldBindAsLValue(ov)); |
| 184 | cgf.opaqueRValues.insert(KV: std::make_pair(x&: ov, y: rv)); |
| 185 | |
| 186 | OpaqueValueMappingData data(ov, false); |
| 187 | |
| 188 | // Work around an extremely aggressive peephole optimization in |
| 189 | // EmitScalarConversion which assumes that all other uses of a |
| 190 | // value are extant. |
| 191 | assert(!cir::MissingFeatures::peepholeProtection() && "NYI" ); |
| 192 | return data; |
| 193 | } |
| 194 | |
| 195 | bool isValid() const { return opaqueValue != nullptr; } |
| 196 | void clear() { opaqueValue = nullptr; } |
| 197 | |
| 198 | void unbind(CIRGenFunction &cgf) { |
| 199 | assert(opaqueValue && "no data to unbind!" ); |
| 200 | |
| 201 | if (boundLValue) { |
| 202 | cgf.opaqueLValues.erase(Val: opaqueValue); |
| 203 | } else { |
| 204 | cgf.opaqueRValues.erase(Val: opaqueValue); |
| 205 | assert(!cir::MissingFeatures::peepholeProtection() && "NYI" ); |
| 206 | } |
| 207 | } |
| 208 | }; |
| 209 | |
| 210 | /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr. |
| 211 | class OpaqueValueMapping { |
| 212 | CIRGenFunction &cgf; |
| 213 | OpaqueValueMappingData data; |
| 214 | |
| 215 | public: |
| 216 | static bool shouldBindAsLValue(const Expr *expr) { |
| 217 | return OpaqueValueMappingData::shouldBindAsLValue(expr); |
| 218 | } |
| 219 | |
| 220 | /// Build the opaque value mapping for the given conditional |
| 221 | /// operator if it's the GNU ?: extension. This is a common |
| 222 | /// enough pattern that the convenience operator is really |
| 223 | /// helpful. |
| 224 | /// |
| 225 | OpaqueValueMapping(CIRGenFunction &cgf, |
| 226 | const AbstractConditionalOperator *op) |
| 227 | : cgf(cgf) { |
| 228 | if (mlir::isa<ConditionalOperator>(op)) |
| 229 | // Leave Data empty. |
| 230 | return; |
| 231 | |
| 232 | const BinaryConditionalOperator *e = |
| 233 | mlir::cast<BinaryConditionalOperator>(op); |
| 234 | data = OpaqueValueMappingData::bind(cgf, ov: e->getOpaqueValue(), |
| 235 | e: e->getCommon()); |
| 236 | } |
| 237 | |
| 238 | /// Build the opaque value mapping for an OpaqueValueExpr whose source |
| 239 | /// expression is set to the expression the OVE represents. |
| 240 | OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov) |
| 241 | : cgf(cgf) { |
| 242 | if (ov) { |
| 243 | assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used " |
| 244 | "for OVE with no source expression" ); |
| 245 | data = OpaqueValueMappingData::bind(cgf, ov, e: ov->getSourceExpr()); |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, |
| 250 | LValue lvalue) |
| 251 | : cgf(cgf), |
| 252 | data(OpaqueValueMappingData::bind(cgf, ov: opaqueValue, lv: lvalue)) {} |
| 253 | |
| 254 | OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, |
| 255 | RValue rvalue) |
| 256 | : cgf(cgf), |
| 257 | data(OpaqueValueMappingData::bind(cgf, ov: opaqueValue, rv: rvalue)) {} |
| 258 | |
| 259 | void pop() { |
| 260 | data.unbind(cgf); |
| 261 | data.clear(); |
| 262 | } |
| 263 | |
| 264 | ~OpaqueValueMapping() { |
| 265 | if (data.isValid()) |
| 266 | data.unbind(cgf); |
| 267 | } |
| 268 | }; |
| 269 | |
| 270 | private: |
| 271 | /// Declare a variable in the current scope, return success if the variable |
| 272 | /// wasn't declared yet. |
| 273 | void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty, |
| 274 | mlir::Location loc, clang::CharUnits alignment, |
| 275 | bool isParam = false); |
| 276 | |
| 277 | public: |
| 278 | mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt); |
| 279 | |
| 280 | void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty); |
| 281 | |
| 282 | private: |
| 283 | // Track current variable initialization (if there's one) |
| 284 | const clang::VarDecl *currVarDecl = nullptr; |
| 285 | class VarDeclContext { |
| 286 | CIRGenFunction &p; |
| 287 | const clang::VarDecl *oldVal = nullptr; |
| 288 | |
| 289 | public: |
| 290 | VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) { |
| 291 | if (p.currVarDecl) |
| 292 | oldVal = p.currVarDecl; |
| 293 | p.currVarDecl = value; |
| 294 | } |
| 295 | |
| 296 | /// Can be used to restore the state early, before the dtor |
| 297 | /// is run. |
| 298 | void restore() { p.currVarDecl = oldVal; } |
| 299 | ~VarDeclContext() { restore(); } |
| 300 | }; |
| 301 | |
| 302 | public: |
| 303 | /// Use to track source locations across nested visitor traversals. |
| 304 | /// Always use a `SourceLocRAIIObject` to change currSrcLoc. |
| 305 | std::optional<mlir::Location> currSrcLoc; |
| 306 | class SourceLocRAIIObject { |
| 307 | CIRGenFunction &cgf; |
| 308 | std::optional<mlir::Location> oldLoc; |
| 309 | |
| 310 | public: |
| 311 | SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) { |
| 312 | if (cgf.currSrcLoc) |
| 313 | oldLoc = cgf.currSrcLoc; |
| 314 | cgf.currSrcLoc = value; |
| 315 | } |
| 316 | |
| 317 | /// Can be used to restore the state early, before the dtor |
| 318 | /// is run. |
| 319 | void restore() { cgf.currSrcLoc = oldLoc; } |
| 320 | ~SourceLocRAIIObject() { restore(); } |
| 321 | }; |
| 322 | |
| 323 | /// Hold counters for incrementally naming temporaries |
| 324 | unsigned counterAggTmp = 0; |
| 325 | std::string getCounterAggTmpAsString(); |
| 326 | |
| 327 | /// Helpers to convert Clang's SourceLocation to a MLIR Location. |
| 328 | mlir::Location getLoc(clang::SourceLocation srcLoc); |
| 329 | mlir::Location getLoc(clang::SourceRange srcLoc); |
| 330 | mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); |
| 331 | |
| 332 | const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); } |
| 333 | |
| 334 | // Wrapper for function prototype sources. Wraps either a FunctionProtoType or |
| 335 | // an ObjCMethodDecl. |
| 336 | struct PrototypeWrapper { |
| 337 | llvm::PointerUnion<const clang::FunctionProtoType *, |
| 338 | const clang::ObjCMethodDecl *> |
| 339 | p; |
| 340 | |
| 341 | PrototypeWrapper(const clang::FunctionProtoType *ft) : p(ft) {} |
| 342 | PrototypeWrapper(const clang::ObjCMethodDecl *md) : p(md) {} |
| 343 | }; |
| 344 | |
| 345 | bool isLValueSuitableForInlineAtomic(LValue lv); |
| 346 | |
| 347 | /// An abstract representation of regular/ObjC call/message targets. |
| 348 | class AbstractCallee { |
| 349 | /// The function declaration of the callee. |
| 350 | [[maybe_unused]] const clang::Decl *calleeDecl; |
| 351 | |
| 352 | public: |
| 353 | AbstractCallee() : calleeDecl(nullptr) {} |
| 354 | AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {} |
| 355 | |
| 356 | bool hasFunctionDecl() const { |
| 357 | return llvm::isa_and_nonnull<clang::FunctionDecl>(Val: calleeDecl); |
| 358 | } |
| 359 | |
| 360 | unsigned getNumParams() const { |
| 361 | if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(Val: calleeDecl)) |
| 362 | return fd->getNumParams(); |
| 363 | return llvm::cast<clang::ObjCMethodDecl>(Val: calleeDecl)->param_size(); |
| 364 | } |
| 365 | |
| 366 | const clang::ParmVarDecl *getParamDecl(unsigned I) const { |
| 367 | if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(Val: calleeDecl)) |
| 368 | return fd->getParamDecl(i: I); |
| 369 | return *(llvm::cast<clang::ObjCMethodDecl>(Val: calleeDecl)->param_begin() + |
| 370 | I); |
| 371 | } |
| 372 | }; |
| 373 | |
| 374 | void finishFunction(SourceLocation endLoc); |
| 375 | |
| 376 | /// Determine whether the given initializer is trivial in the sense |
| 377 | /// that it requires no code to be generated. |
| 378 | bool isTrivialInitializer(const Expr *init); |
| 379 | |
| 380 | /// If the specified expression does not fold to a constant, or if it does but |
| 381 | /// contains a label, return false. If it constant folds return true and set |
| 382 | /// the boolean result in Result. |
| 383 | bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, |
| 384 | bool allowLabels = false); |
| 385 | bool constantFoldsToSimpleInteger(const clang::Expr *cond, |
| 386 | llvm::APSInt &resultInt, |
| 387 | bool allowLabels = false); |
| 388 | |
| 389 | /// Return true if the statement contains a label in it. If |
| 390 | /// this statement is not executed normally, it not containing a label means |
| 391 | /// that we can just remove the code. |
| 392 | bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false); |
| 393 | |
| 394 | class ConstantEmission { |
| 395 | // Cannot use mlir::TypedAttr directly here because of bit availability. |
| 396 | llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference; |
| 397 | ConstantEmission(mlir::TypedAttr c, bool isReference) |
| 398 | : valueAndIsReference(c, isReference) {} |
| 399 | |
| 400 | public: |
| 401 | ConstantEmission() {} |
| 402 | static ConstantEmission forReference(mlir::TypedAttr c) { |
| 403 | return ConstantEmission(c, true); |
| 404 | } |
| 405 | static ConstantEmission forValue(mlir::TypedAttr c) { |
| 406 | return ConstantEmission(c, false); |
| 407 | } |
| 408 | |
| 409 | explicit operator bool() const { |
| 410 | return valueAndIsReference.getOpaqueValue() != nullptr; |
| 411 | } |
| 412 | |
| 413 | bool isReference() const { return valueAndIsReference.getInt(); } |
| 414 | LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const { |
| 415 | assert(isReference()); |
| 416 | cgf.cgm.errorNYI(refExpr->getSourceRange(), |
| 417 | "ConstantEmission::getReferenceLValue" ); |
| 418 | return {}; |
| 419 | } |
| 420 | |
| 421 | mlir::TypedAttr getValue() const { |
| 422 | assert(!isReference()); |
| 423 | return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer()); |
| 424 | } |
| 425 | }; |
| 426 | |
| 427 | ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr); |
| 428 | |
| 429 | struct AutoVarEmission { |
| 430 | const clang::VarDecl *Variable; |
| 431 | /// The address of the alloca for languages with explicit address space |
| 432 | /// (e.g. OpenCL) or alloca casted to generic pointer for address space |
| 433 | /// agnostic languages (e.g. C++). Invalid if the variable was emitted |
| 434 | /// as a global constant. |
| 435 | Address Addr; |
| 436 | |
| 437 | /// True if the variable is of aggregate type and has a constant |
| 438 | /// initializer. |
| 439 | bool IsConstantAggregate = false; |
| 440 | |
| 441 | /// True if the variable is a __block variable that is captured by an |
| 442 | /// escaping block. |
| 443 | bool IsEscapingByRef = false; |
| 444 | |
| 445 | mlir::Value NRVOFlag{}; |
| 446 | |
| 447 | struct Invalid {}; |
| 448 | AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} |
| 449 | |
| 450 | AutoVarEmission(const clang::VarDecl &variable) |
| 451 | : Variable(&variable), Addr(Address::invalid()) {} |
| 452 | |
| 453 | static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } |
| 454 | |
| 455 | bool wasEmittedAsGlobal() const { return !Addr.isValid(); } |
| 456 | |
| 457 | /// Returns the raw, allocated address, which is not necessarily |
| 458 | /// the address of the object itself. It is casted to default |
| 459 | /// address space for address space agnostic languages. |
| 460 | Address getAllocatedAddress() const { return Addr; } |
| 461 | |
| 462 | /// Returns the address of the object within this declaration. |
| 463 | /// Note that this does not chase the forwarding pointer for |
| 464 | /// __block decls. |
| 465 | Address getObjectAddress(CIRGenFunction &cgf) const { |
| 466 | if (!IsEscapingByRef) |
| 467 | return Addr; |
| 468 | |
| 469 | assert(!cir::MissingFeatures::opAllocaEscapeByReference()); |
| 470 | return Address::invalid(); |
| 471 | } |
| 472 | }; |
| 473 | |
| 474 | /// Perform the usual unary conversions on the specified expression and |
| 475 | /// compare the result against zero, returning an Int1Ty value. |
| 476 | mlir::Value evaluateExprAsBool(const clang::Expr *e); |
| 477 | |
| 478 | cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, |
| 479 | cir::GlobalOp gv, |
| 480 | cir::GetGlobalOp gvAddr); |
| 481 | |
| 482 | /// Set the address of a local variable. |
| 483 | void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) { |
| 484 | assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!" ); |
| 485 | localDeclMap.insert(KV: {vd, addr}); |
| 486 | // TODO: Add symbol table support |
| 487 | } |
| 488 | |
| 489 | bool shouldNullCheckClassCastValue(const CastExpr *ce); |
| 490 | |
| 491 | RValue convertTempToRValue(Address addr, clang::QualType type, |
| 492 | clang::SourceLocation loc); |
| 493 | |
| 494 | static bool |
| 495 | isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor); |
| 496 | |
| 497 | /// A scope within which we are constructing the fields of an object which |
| 498 | /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if |
| 499 | /// we need to evaluate the CXXDefaultInitExpr within the evaluation. |
| 500 | class FieldConstructionScope { |
| 501 | public: |
| 502 | FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr) |
| 503 | : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) { |
| 504 | cgf.cxxDefaultInitExprThis = thisAddr; |
| 505 | } |
| 506 | ~FieldConstructionScope() { |
| 507 | cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis; |
| 508 | } |
| 509 | |
| 510 | private: |
| 511 | CIRGenFunction &cgf; |
| 512 | Address oldCXXDefaultInitExprThis; |
| 513 | }; |
| 514 | |
| 515 | LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t); |
| 516 | LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty); |
| 517 | |
| 518 | /// Construct an address with the natural alignment of T. If a pointer to T |
| 519 | /// is expected to be signed, the pointer passed to this function must have |
| 520 | /// been signed, and the returned Address will have the pointer authentication |
| 521 | /// information needed to authenticate the signed pointer. |
| 522 | Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, |
| 523 | CharUnits alignment, |
| 524 | bool forPointeeType = false, |
| 525 | LValueBaseInfo *baseInfo = nullptr) { |
| 526 | if (alignment.isZero()) |
| 527 | alignment = cgm.getNaturalTypeAlignment(t, baseInfo); |
| 528 | return Address(ptr, convertTypeForMem(t), alignment); |
| 529 | } |
| 530 | |
| 531 | Address getAddressOfBaseClass( |
| 532 | Address value, const CXXRecordDecl *derived, |
| 533 | llvm::iterator_range<CastExpr::path_const_iterator> path, |
| 534 | bool nullCheckValue, SourceLocation loc); |
| 535 | |
| 536 | LValue makeAddrLValue(Address addr, QualType ty, |
| 537 | AlignmentSource source = AlignmentSource::Type) { |
| 538 | return makeAddrLValue(addr, ty, baseInfo: LValueBaseInfo(source)); |
| 539 | } |
| 540 | |
| 541 | LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo) { |
| 542 | return LValue::makeAddr(address: addr, t: ty, baseInfo); |
| 543 | } |
| 544 | |
| 545 | /// Return the address of a local variable. |
| 546 | Address getAddrOfLocalVar(const clang::VarDecl *vd) { |
| 547 | auto it = localDeclMap.find(Val: vd); |
| 548 | assert(it != localDeclMap.end() && |
| 549 | "Invalid argument to getAddrOfLocalVar(), no decl!" ); |
| 550 | return it->second; |
| 551 | } |
| 552 | |
| 553 | Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, |
| 554 | mlir::Type fieldType, unsigned index); |
| 555 | |
| 556 | /// Load the value for 'this'. This function is only valid while generating |
| 557 | /// code for an C++ member function. |
| 558 | /// FIXME(cir): this should return a mlir::Value! |
| 559 | mlir::Value loadCXXThis() { |
| 560 | assert(cxxThisValue && "no 'this' value for this function" ); |
| 561 | return cxxThisValue; |
| 562 | } |
| 563 | Address loadCXXThisAddress(); |
| 564 | |
| 565 | /// Convert the given pointer to a complete class to the given direct base. |
| 566 | Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, |
| 567 | Address value, |
| 568 | const CXXRecordDecl *derived, |
| 569 | const CXXRecordDecl *base, |
| 570 | bool baseIsVirtual); |
| 571 | |
| 572 | /// Determine whether a base class initialization may overlap some other |
| 573 | /// object. |
| 574 | AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, |
| 575 | const CXXRecordDecl *baseRD, |
| 576 | bool isVirtual); |
| 577 | |
| 578 | /// Get an appropriate 'undef' rvalue for the given type. |
| 579 | /// TODO: What's the equivalent for MLIR? Currently we're only using this for |
| 580 | /// void types so it just returns RValue::get(nullptr) but it'll need |
| 581 | /// addressed later. |
| 582 | RValue getUndefRValue(clang::QualType ty); |
| 583 | |
| 584 | cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, |
| 585 | cir::FuncType funcType); |
| 586 | |
| 587 | clang::QualType buildFunctionArgList(clang::GlobalDecl gd, |
| 588 | FunctionArgList &args); |
| 589 | |
| 590 | /// Emit code for the start of a function. |
| 591 | /// \param loc The location to be associated with the function. |
| 592 | /// \param startLoc The location of the function body. |
| 593 | void startFunction(clang::GlobalDecl gd, clang::QualType returnType, |
| 594 | cir::FuncOp fn, cir::FuncType funcType, |
| 595 | FunctionArgList args, clang::SourceLocation loc, |
| 596 | clang::SourceLocation startLoc); |
| 597 | |
| 598 | /// Represents a scope, including function bodies, compound statements, and |
| 599 | /// the substatements of if/while/do/for/switch/try statements. This class |
| 600 | /// handles any automatic cleanup, along with the return value. |
| 601 | struct LexicalScope { |
| 602 | private: |
| 603 | // TODO(CIR): This will live in the base class RunCleanupScope once that |
| 604 | // class is upstreamed. |
| 605 | CIRGenFunction &cgf; |
| 606 | |
| 607 | // Points to the scope entry block. This is useful, for instance, for |
| 608 | // helping to insert allocas before finalizing any recursive CodeGen from |
| 609 | // switches. |
| 610 | mlir::Block *entryBlock; |
| 611 | |
| 612 | LexicalScope *parentScope = nullptr; |
| 613 | |
| 614 | // Only Regular is used at the moment. Support for other kinds will be |
| 615 | // added as the relevant statements/expressions are upstreamed. |
| 616 | enum Kind { |
| 617 | Regular, // cir.if, cir.scope, if_regions |
| 618 | Ternary, // cir.ternary |
| 619 | Switch, // cir.switch |
| 620 | Try, // cir.try |
| 621 | GlobalInit // cir.global initialization code |
| 622 | }; |
| 623 | Kind scopeKind = Kind::Regular; |
| 624 | |
| 625 | // The scope return value. |
| 626 | mlir::Value retVal = nullptr; |
| 627 | |
| 628 | mlir::Location beginLoc; |
| 629 | mlir::Location endLoc; |
| 630 | |
| 631 | public: |
| 632 | unsigned depth = 0; |
| 633 | |
| 634 | LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb) |
| 635 | : cgf(cgf), entryBlock(eb), parentScope(cgf.curLexScope), beginLoc(loc), |
| 636 | endLoc(loc) { |
| 637 | |
| 638 | assert(entryBlock && "LexicalScope requires an entry block" ); |
| 639 | cgf.curLexScope = this; |
| 640 | if (parentScope) |
| 641 | ++depth; |
| 642 | |
| 643 | if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) { |
| 644 | assert(fusedLoc.getLocations().size() == 2 && "too many locations" ); |
| 645 | beginLoc = fusedLoc.getLocations()[0]; |
| 646 | endLoc = fusedLoc.getLocations()[1]; |
| 647 | } |
| 648 | } |
| 649 | |
| 650 | void setRetVal(mlir::Value v) { retVal = v; } |
| 651 | |
| 652 | void cleanup(); |
| 653 | void restore() { cgf.curLexScope = parentScope; } |
| 654 | |
| 655 | ~LexicalScope() { |
| 656 | assert(!cir::MissingFeatures::generateDebugInfo()); |
| 657 | cleanup(); |
| 658 | restore(); |
| 659 | } |
| 660 | |
| 661 | // --- |
| 662 | // Kind |
| 663 | // --- |
| 664 | bool isGlobalInit() { return scopeKind == Kind::GlobalInit; } |
| 665 | bool isRegular() { return scopeKind == Kind::Regular; } |
| 666 | bool isSwitch() { return scopeKind == Kind::Switch; } |
| 667 | bool isTernary() { return scopeKind == Kind::Ternary; } |
| 668 | bool isTry() { return scopeKind == Kind::Try; } |
| 669 | |
| 670 | void setAsGlobalInit() { scopeKind = Kind::GlobalInit; } |
| 671 | void setAsSwitch() { scopeKind = Kind::Switch; } |
| 672 | void setAsTernary() { scopeKind = Kind::Ternary; } |
| 673 | |
| 674 | // --- |
| 675 | // Return handling. |
| 676 | // --- |
| 677 | |
| 678 | private: |
| 679 | // `returnBlock`, `returnLoc`, and all the functions that deal with them |
| 680 | // will change and become more complicated when `switch` statements are |
| 681 | // upstreamed. `case` statements within the `switch` are in the same scope |
| 682 | // but have their own regions. Therefore the LexicalScope will need to |
| 683 | // keep track of multiple return blocks. |
| 684 | mlir::Block *returnBlock = nullptr; |
| 685 | std::optional<mlir::Location> returnLoc; |
| 686 | |
| 687 | // See the comment on `getOrCreateRetBlock`. |
| 688 | mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) { |
| 689 | assert(returnBlock == nullptr && "only one return block per scope" ); |
| 690 | // Create the cleanup block but don't hook it up just yet. |
| 691 | mlir::OpBuilder::InsertionGuard guard(cgf.builder); |
| 692 | returnBlock = |
| 693 | cgf.builder.createBlock(cgf.builder.getBlock()->getParent()); |
| 694 | updateRetLoc(returnBlock, loc); |
| 695 | return returnBlock; |
| 696 | } |
| 697 | |
| 698 | cir::ReturnOp emitReturn(mlir::Location loc); |
| 699 | void emitImplicitReturn(); |
| 700 | |
| 701 | public: |
| 702 | mlir::Block *getRetBlock() { return returnBlock; } |
| 703 | mlir::Location getRetLoc(mlir::Block *b) { return *returnLoc; } |
| 704 | void updateRetLoc(mlir::Block *b, mlir::Location loc) { returnLoc = loc; } |
| 705 | |
| 706 | // Create the return block for this scope, or return the existing one. |
| 707 | // This get-or-create logic is necessary to handle multiple return |
| 708 | // statements within the same scope, which can happen if some of them are |
| 709 | // dead code or if there is a `goto` into the middle of the scope. |
| 710 | mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) { |
| 711 | if (returnBlock == nullptr) { |
| 712 | returnBlock = createRetBlock(cgf, loc); |
| 713 | return returnBlock; |
| 714 | } |
| 715 | updateRetLoc(returnBlock, loc); |
| 716 | return returnBlock; |
| 717 | } |
| 718 | |
| 719 | mlir::Block *getEntryBlock() { return entryBlock; } |
| 720 | }; |
| 721 | |
| 722 | LexicalScope *curLexScope = nullptr; |
| 723 | |
| 724 | /// ---------------------- |
| 725 | /// CIR emit functions |
| 726 | /// ---------------------- |
| 727 | private: |
| 728 | void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc, |
| 729 | clang::CharUnits alignment); |
| 730 | |
| 731 | CIRGenCallee emitDirectCallee(const GlobalDecl &gd); |
| 732 | |
| 733 | public: |
| 734 | Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, |
| 735 | llvm::StringRef fieldName, |
| 736 | unsigned fieldIndex); |
| 737 | |
| 738 | mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, |
| 739 | mlir::Location loc, clang::CharUnits alignment, |
| 740 | bool insertIntoFnEntryBlock, |
| 741 | mlir::Value arraySize = nullptr); |
| 742 | mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, |
| 743 | mlir::Location loc, clang::CharUnits alignment, |
| 744 | mlir::OpBuilder::InsertPoint ip, |
| 745 | mlir::Value arraySize = nullptr); |
| 746 | |
| 747 | void emitAggregateStore(mlir::Value value, Address dest); |
| 748 | |
| 749 | void emitAggExpr(const clang::Expr *e, AggValueSlot slot); |
| 750 | |
| 751 | LValue emitAggExprToLValue(const Expr *e); |
| 752 | |
| 753 | /// Emit code to compute the specified expression which can have any type. The |
| 754 | /// result is returned as an RValue struct. If this is an aggregate |
| 755 | /// expression, the aggloc/agglocvolatile arguments indicate where the result |
| 756 | /// should be returned. |
| 757 | RValue emitAnyExpr(const clang::Expr *e, |
| 758 | AggValueSlot aggSlot = AggValueSlot::ignored()); |
| 759 | |
| 760 | /// Similarly to emitAnyExpr(), however, the result will always be accessible |
| 761 | /// even if no aggregate location is provided. |
| 762 | RValue emitAnyExprToTemp(const clang::Expr *e); |
| 763 | |
| 764 | LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e); |
| 765 | |
| 766 | Address emitArrayToPointerDecay(const Expr *array); |
| 767 | |
| 768 | AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d); |
| 769 | |
| 770 | /// Emit code and set up symbol table for a variable declaration with auto, |
| 771 | /// register, or no storage class specifier. These turn into simple stack |
| 772 | /// objects, globals depending on target. |
| 773 | void emitAutoVarDecl(const clang::VarDecl &d); |
| 774 | |
| 775 | void emitAutoVarCleanups(const AutoVarEmission &emission); |
| 776 | void emitAutoVarInit(const AutoVarEmission &emission); |
| 777 | |
| 778 | void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, |
| 779 | CXXCtorInitializer *baseInit); |
| 780 | |
| 781 | LValue emitBinaryOperatorLValue(const BinaryOperator *e); |
| 782 | |
| 783 | mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s); |
| 784 | |
| 785 | RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, |
| 786 | const clang::CallExpr *e, ReturnValueSlot returnValue); |
| 787 | |
| 788 | RValue emitCall(const CIRGenFunctionInfo &funcInfo, |
| 789 | const CIRGenCallee &callee, ReturnValueSlot returnValue, |
| 790 | const CallArgList &args, cir::CIRCallOpInterface *callOp, |
| 791 | mlir::Location loc); |
| 792 | RValue emitCall(const CIRGenFunctionInfo &funcInfo, |
| 793 | const CIRGenCallee &callee, ReturnValueSlot returnValue, |
| 794 | const CallArgList &args, |
| 795 | cir::CIRCallOpInterface *callOrTryCall = nullptr) { |
| 796 | assert(currSrcLoc && "source location must have been set" ); |
| 797 | return emitCall(funcInfo, callee, returnValue, args, callOrTryCall, |
| 798 | *currSrcLoc); |
| 799 | } |
| 800 | |
| 801 | RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee, |
| 802 | const clang::CallExpr *e, ReturnValueSlot returnValue); |
| 803 | void emitCallArg(CallArgList &args, const clang::Expr *e, |
| 804 | clang::QualType argType); |
| 805 | void emitCallArgs( |
| 806 | CallArgList &args, PrototypeWrapper prototype, |
| 807 | llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange, |
| 808 | AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0); |
| 809 | RValue emitCallExpr(const clang::CallExpr *e, |
| 810 | ReturnValueSlot returnValue = ReturnValueSlot()); |
| 811 | LValue emitCallExprLValue(const clang::CallExpr *e); |
| 812 | CIRGenCallee emitCallee(const clang::Expr *e); |
| 813 | |
| 814 | template <typename T> |
| 815 | mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, |
| 816 | mlir::ArrayAttr value, |
| 817 | cir::CaseOpKind kind, |
| 818 | bool buildingTopLevelCase); |
| 819 | |
| 820 | mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, |
| 821 | mlir::Type condType, |
| 822 | bool buildingTopLevelCase); |
| 823 | |
| 824 | LValue emitCastLValue(const CastExpr *e); |
| 825 | |
| 826 | /// Emits an argument for a call to a `__builtin_assume`. If the builtin |
| 827 | /// sanitizer is enabled, a runtime check is also emitted. |
| 828 | mlir::Value emitCheckedArgForAssume(const Expr *e); |
| 829 | |
| 830 | LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e); |
| 831 | |
| 832 | void emitConstructorBody(FunctionArgList &args); |
| 833 | void emitDestructorBody(FunctionArgList &args); |
| 834 | |
| 835 | mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s); |
| 836 | |
| 837 | void emitCXXConstructExpr(const clang::CXXConstructExpr *e, |
| 838 | AggValueSlot dest); |
| 839 | |
| 840 | void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, |
| 841 | clang::CXXCtorType type, bool forVirtualBase, |
| 842 | bool delegating, AggValueSlot thisAVS, |
| 843 | const clang::CXXConstructExpr *e); |
| 844 | |
| 845 | void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, |
| 846 | clang::CXXCtorType type, bool forVirtualBase, |
| 847 | bool delegating, Address thisAddr, |
| 848 | CallArgList &args, clang::SourceLocation loc); |
| 849 | |
| 850 | mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, |
| 851 | llvm::ArrayRef<const Attr *> attrs); |
| 852 | |
| 853 | RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, |
| 854 | ReturnValueSlot returnValue); |
| 855 | |
| 856 | RValue emitCXXMemberOrOperatorCall( |
| 857 | const clang::CXXMethodDecl *md, const CIRGenCallee &callee, |
| 858 | ReturnValueSlot returnValue, mlir::Value thisPtr, |
| 859 | mlir::Value implicitParam, clang::QualType implicitParamTy, |
| 860 | const clang::CallExpr *ce, CallArgList *rtlArgs); |
| 861 | |
| 862 | RValue emitCXXMemberOrOperatorMemberCallExpr( |
| 863 | const clang::CallExpr *ce, const clang::CXXMethodDecl *md, |
| 864 | ReturnValueSlot returnValue, bool hasQualifier, |
| 865 | clang::NestedNameSpecifier *qualifier, bool isArrow, |
| 866 | const clang::Expr *base); |
| 867 | |
| 868 | mlir::Value emitCXXNewExpr(const CXXNewExpr *e); |
| 869 | |
| 870 | RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, |
| 871 | const CXXMethodDecl *md, |
| 872 | ReturnValueSlot returnValue); |
| 873 | |
| 874 | void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, |
| 875 | clang::CXXCtorType ctorType, FunctionArgList &args); |
| 876 | |
| 877 | // It's important not to confuse this and emitDelegateCXXConstructorCall. |
| 878 | // Delegating constructors are the C++11 feature. The constructor delegate |
| 879 | // optimization is used to reduce duplication in the base and complete |
| 880 | // constructors where they are substantially the same. |
| 881 | void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, |
| 882 | const FunctionArgList &args); |
| 883 | |
| 884 | mlir::LogicalResult emitDoStmt(const clang::DoStmt &s); |
| 885 | |
| 886 | /// Emit an expression as an initializer for an object (variable, field, etc.) |
| 887 | /// at the given location. The expression is not necessarily the normal |
| 888 | /// initializer for the object, and the address is not necessarily |
| 889 | /// its normal location. |
| 890 | /// |
| 891 | /// \param init the initializing expression |
| 892 | /// \param d the object to act as if we're initializing |
| 893 | /// \param lvalue the lvalue to initialize |
| 894 | /// \param capturedByInit true if \p d is a __block variable whose address is |
| 895 | /// potentially changed by the initializer |
| 896 | void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, |
| 897 | LValue lvalue, bool capturedByInit = false); |
| 898 | |
| 899 | mlir::LogicalResult emitFunctionBody(const clang::Stmt *body); |
| 900 | |
| 901 | void emitImplicitAssignmentOperatorBody(FunctionArgList &args); |
| 902 | |
| 903 | void emitInitializerForField(clang::FieldDecl *field, LValue lhs, |
| 904 | clang::Expr *init); |
| 905 | |
| 906 | mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType); |
| 907 | |
| 908 | mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType); |
| 909 | |
| 910 | /// Emit the computation of the specified expression of scalar type. |
| 911 | mlir::Value emitScalarExpr(const clang::Expr *e); |
| 912 | |
| 913 | mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, |
| 914 | bool isInc, bool isPre); |
| 915 | |
| 916 | /// Build a debug stoppoint if we are emitting debug info. |
| 917 | void emitStopPoint(const Stmt *s); |
| 918 | |
| 919 | // Build CIR for a statement. useCurrentScope should be true if no |
| 920 | // new scopes need be created when finding a compound statement. |
| 921 | mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, |
| 922 | llvm::ArrayRef<const Attr *> attrs = {}); |
| 923 | |
| 924 | mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, |
| 925 | bool useCurrentScope); |
| 926 | |
| 927 | mlir::LogicalResult emitForStmt(const clang::ForStmt &s); |
| 928 | |
| 929 | /// Emit the computation of the specified expression of complex type, |
| 930 | /// returning the result. |
| 931 | mlir::Value emitComplexExpr(const Expr *e); |
| 932 | |
| 933 | LValue emitComplexAssignmentLValue(const BinaryOperator *e); |
| 934 | |
| 935 | void emitCompoundStmt(const clang::CompoundStmt &s); |
| 936 | |
| 937 | void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s); |
| 938 | |
| 939 | void emitDecl(const clang::Decl &d); |
| 940 | mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s); |
| 941 | LValue emitDeclRefLValue(const clang::DeclRefExpr *e); |
| 942 | |
| 943 | mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, |
| 944 | mlir::Type condType, |
| 945 | bool buildingTopLevelCase); |
| 946 | |
| 947 | void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, |
| 948 | clang::CXXCtorType ctorType, |
| 949 | const FunctionArgList &args, |
| 950 | clang::SourceLocation loc); |
| 951 | |
| 952 | /// We are performing a delegate call; that is, the current function is |
| 953 | /// delegating to another one. Produce a r-value suitable for passing the |
| 954 | /// given parameter. |
| 955 | void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, |
| 956 | clang::SourceLocation loc); |
| 957 | |
| 958 | /// Emit an `if` on a boolean condition to the specified blocks. |
| 959 | /// FIXME: Based on the condition, this might try to simplify the codegen of |
| 960 | /// the conditional based on the branch. |
| 961 | /// In the future, we may apply code generation simplifications here, |
| 962 | /// similar to those used in classic LLVM codegen |
| 963 | /// See `EmitBranchOnBoolExpr` for inspiration. |
| 964 | mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, |
| 965 | const clang::Stmt *thenS, |
| 966 | const clang::Stmt *elseS); |
| 967 | cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond, |
| 968 | BuilderCallbackRef thenBuilder, |
| 969 | mlir::Location thenLoc, |
| 970 | BuilderCallbackRef elseBuilder, |
| 971 | std::optional<mlir::Location> elseLoc = {}); |
| 972 | |
| 973 | mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond); |
| 974 | |
| 975 | mlir::LogicalResult emitIfStmt(const clang::IfStmt &s); |
| 976 | |
| 977 | /// Emit code to compute the specified expression, |
| 978 | /// ignoring the result. |
| 979 | void emitIgnoredExpr(const clang::Expr *e); |
| 980 | |
| 981 | RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc); |
| 982 | |
| 983 | /// Given an expression that represents a value lvalue, this method emits |
| 984 | /// the address of the lvalue, then loads the result as an rvalue, |
| 985 | /// returning the rvalue. |
| 986 | RValue emitLoadOfLValue(LValue lv, SourceLocation loc); |
| 987 | |
| 988 | Address emitLoadOfReference(LValue refLVal, mlir::Location loc, |
| 989 | LValueBaseInfo *pointeeBaseInfo); |
| 990 | LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, |
| 991 | QualType refTy, AlignmentSource source); |
| 992 | |
| 993 | /// EmitLoadOfScalar - Load a scalar value from an address, taking |
| 994 | /// care to appropriately convert from the memory representation to |
| 995 | /// the LLVM value representation. The l-value must be a simple |
| 996 | /// l-value. |
| 997 | mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc); |
| 998 | |
| 999 | /// Emit code to compute a designator that specifies the location |
| 1000 | /// of the expression. |
| 1001 | /// FIXME: document this function better. |
| 1002 | LValue emitLValue(const clang::Expr *e); |
| 1003 | LValue emitLValueForBitField(LValue base, const FieldDecl *field); |
| 1004 | LValue emitLValueForField(LValue base, const clang::FieldDecl *field); |
| 1005 | |
| 1006 | /// Like emitLValueForField, excpet that if the Field is a reference, this |
| 1007 | /// will return the address of the reference and not the address of the value |
| 1008 | /// stored in the reference. |
| 1009 | LValue emitLValueForFieldInitialization(LValue base, |
| 1010 | const clang::FieldDecl *field, |
| 1011 | llvm::StringRef fieldName); |
| 1012 | |
| 1013 | LValue emitMemberExpr(const MemberExpr *e); |
| 1014 | |
| 1015 | /// Given an expression with a pointer type, emit the value and compute our |
| 1016 | /// best estimate of the alignment of the pointee. |
| 1017 | /// |
| 1018 | /// One reasonable way to use this information is when there's a language |
| 1019 | /// guarantee that the pointer must be aligned to some stricter value, and |
| 1020 | /// we're simply trying to ensure that sufficiently obvious uses of under- |
| 1021 | /// aligned objects don't get miscompiled; for example, a placement new |
| 1022 | /// into the address of a local variable. In such a case, it's quite |
| 1023 | /// reasonable to just ignore the returned alignment when it isn't from an |
| 1024 | /// explicit source. |
| 1025 | Address emitPointerWithAlignment(const clang::Expr *expr, |
| 1026 | LValueBaseInfo *baseInfo); |
| 1027 | |
| 1028 | /// Emits a reference binding to the passed in expression. |
| 1029 | RValue emitReferenceBindingToExpr(const Expr *e); |
| 1030 | |
| 1031 | mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s); |
| 1032 | |
| 1033 | mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e); |
| 1034 | |
| 1035 | /// Emit a conversion from the specified type to the specified destination |
| 1036 | /// type, both of which are CIR scalar types. |
| 1037 | mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, |
| 1038 | clang::QualType dstType, |
| 1039 | clang::SourceLocation loc); |
| 1040 | |
| 1041 | void emitScalarInit(const clang::Expr *init, mlir::Location loc, |
| 1042 | LValue lvalue, bool capturedByInit = false); |
| 1043 | |
| 1044 | void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage); |
| 1045 | |
| 1046 | void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, |
| 1047 | bool isInit); |
| 1048 | |
| 1049 | void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, |
| 1050 | clang::QualType ty, bool isInit = false, |
| 1051 | bool isNontemporal = false); |
| 1052 | void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); |
| 1053 | |
| 1054 | /// Store the specified rvalue into the specified |
| 1055 | /// lvalue, where both are guaranteed to the have the same type, and that type |
| 1056 | /// is 'Ty'. |
| 1057 | void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false); |
| 1058 | |
| 1059 | mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult); |
| 1060 | |
| 1061 | LValue emitStringLiteralLValue(const StringLiteral *e); |
| 1062 | |
| 1063 | mlir::LogicalResult emitSwitchBody(const clang::Stmt *s); |
| 1064 | mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, |
| 1065 | bool buildingTopLevelCase); |
| 1066 | mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s); |
| 1067 | |
| 1068 | /// Given a value and its clang type, returns the value casted to its memory |
| 1069 | /// representation. |
| 1070 | /// Note: CIR defers most of the special casting to the final lowering passes |
| 1071 | /// to conserve the high level information. |
| 1072 | mlir::Value emitToMemory(mlir::Value value, clang::QualType ty); |
| 1073 | |
| 1074 | LValue emitUnaryOpLValue(const clang::UnaryOperator *e); |
| 1075 | |
| 1076 | /// This method handles emission of any variable declaration |
| 1077 | /// inside a function, including static vars etc. |
| 1078 | void emitVarDecl(const clang::VarDecl &d); |
| 1079 | |
| 1080 | mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s); |
| 1081 | |
| 1082 | /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is |
| 1083 | /// nonnull, if 1\p LHS is marked _Nonnull. |
| 1084 | void emitNullabilityCheck(LValue lhs, mlir::Value rhs, |
| 1085 | clang::SourceLocation loc); |
| 1086 | |
| 1087 | /// An object to manage conditionally-evaluated expressions. |
| 1088 | class ConditionalEvaluation { |
| 1089 | CIRGenFunction &cgf; |
| 1090 | mlir::OpBuilder::InsertPoint insertPt; |
| 1091 | |
| 1092 | public: |
| 1093 | ConditionalEvaluation(CIRGenFunction &cgf) |
| 1094 | : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {} |
| 1095 | ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip) |
| 1096 | : cgf(cgf), insertPt(ip) {} |
| 1097 | |
| 1098 | void beginEvaluation() { |
| 1099 | assert(cgf.outermostConditional != this); |
| 1100 | if (!cgf.outermostConditional) |
| 1101 | cgf.outermostConditional = this; |
| 1102 | } |
| 1103 | |
| 1104 | void endEvaluation() { |
| 1105 | assert(cgf.outermostConditional != nullptr); |
| 1106 | if (cgf.outermostConditional == this) |
| 1107 | cgf.outermostConditional = nullptr; |
| 1108 | } |
| 1109 | |
| 1110 | /// Returns the insertion point which will be executed prior to each |
| 1111 | /// evaluation of the conditional code. In LLVM OG, this method |
| 1112 | /// is called getStartingBlock. |
| 1113 | mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; } |
| 1114 | }; |
| 1115 | |
| 1116 | struct ConditionalInfo { |
| 1117 | std::optional<LValue> lhs{}, rhs{}; |
| 1118 | mlir::Value result{}; |
| 1119 | }; |
| 1120 | |
| 1121 | // Return true if we're currently emitting one branch or the other of a |
| 1122 | // conditional expression. |
| 1123 | bool isInConditionalBranch() const { return outermostConditional != nullptr; } |
| 1124 | |
| 1125 | void setBeforeOutermostConditional(mlir::Value value, Address addr) { |
| 1126 | assert(isInConditionalBranch()); |
| 1127 | { |
| 1128 | mlir::OpBuilder::InsertionGuard guard(builder); |
| 1129 | builder.restoreInsertionPoint(outermostConditional->getInsertPoint()); |
| 1130 | builder.createStore( |
| 1131 | value.getLoc(), value, addr, |
| 1132 | mlir::IntegerAttr::get( |
| 1133 | mlir::IntegerType::get(value.getContext(), 64), |
| 1134 | (uint64_t)addr.getAlignment().getAsAlign().value())); |
| 1135 | } |
| 1136 | } |
| 1137 | |
| 1138 | // Points to the outermost active conditional control. This is used so that |
| 1139 | // we know if a temporary should be destroyed conditionally. |
| 1140 | ConditionalEvaluation *outermostConditional = nullptr; |
| 1141 | |
| 1142 | template <typename FuncTy> |
| 1143 | ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, |
| 1144 | const FuncTy &branchGenFunc); |
| 1145 | |
| 1146 | mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, |
| 1147 | const clang::Stmt *thenS, |
| 1148 | const clang::Stmt *elseS); |
| 1149 | |
| 1150 | /// ---------------------- |
| 1151 | /// CIR build helpers |
| 1152 | /// ----------------- |
| 1153 | public: |
| 1154 | cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, |
| 1155 | const Twine &name = "tmp" , |
| 1156 | mlir::Value arraySize = nullptr, |
| 1157 | bool insertIntoFnEntryBlock = false); |
| 1158 | cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, |
| 1159 | const Twine &name = "tmp" , |
| 1160 | mlir::OpBuilder::InsertPoint ip = {}, |
| 1161 | mlir::Value arraySize = nullptr); |
| 1162 | Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc, |
| 1163 | const Twine &name = "tmp" , |
| 1164 | mlir::Value arraySize = nullptr, |
| 1165 | Address *alloca = nullptr, |
| 1166 | mlir::OpBuilder::InsertPoint ip = {}); |
| 1167 | Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, |
| 1168 | mlir::Location loc, |
| 1169 | const Twine &name = "tmp" , |
| 1170 | mlir::Value arraySize = nullptr, |
| 1171 | mlir::OpBuilder::InsertPoint ip = {}); |
| 1172 | |
| 1173 | /// Create a temporary memory object of the given type, with |
| 1174 | /// appropriate alignmen and cast it to the default address space. Returns |
| 1175 | /// the original alloca instruction by \p Alloca if it is not nullptr. |
| 1176 | Address createMemTemp(QualType t, mlir::Location loc, |
| 1177 | const Twine &name = "tmp" , Address *alloca = nullptr, |
| 1178 | mlir::OpBuilder::InsertPoint ip = {}); |
| 1179 | Address createMemTemp(QualType t, CharUnits align, mlir::Location loc, |
| 1180 | const Twine &name = "tmp" , Address *alloca = nullptr, |
| 1181 | mlir::OpBuilder::InsertPoint ip = {}); |
| 1182 | |
| 1183 | //===--------------------------------------------------------------------===// |
| 1184 | // OpenACC Emission |
| 1185 | //===--------------------------------------------------------------------===// |
| 1186 | private: |
| 1187 | template <typename Op> |
| 1188 | Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind, |
| 1189 | SourceLocation dirLoc, |
| 1190 | llvm::ArrayRef<const OpenACCClause *> clauses); |
| 1191 | // Function to do the basic implementation of an operation with an Associated |
| 1192 | // Statement. Models AssociatedStmtConstruct. |
| 1193 | template <typename Op, typename TermOp> |
| 1194 | mlir::LogicalResult emitOpenACCOpAssociatedStmt( |
| 1195 | mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind, |
| 1196 | SourceLocation dirLoc, llvm::ArrayRef<const OpenACCClause *> clauses, |
| 1197 | const Stmt *associatedStmt); |
| 1198 | |
| 1199 | template <typename Op, typename TermOp> |
| 1200 | mlir::LogicalResult emitOpenACCOpCombinedConstruct( |
| 1201 | mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind, |
| 1202 | SourceLocation dirLoc, llvm::ArrayRef<const OpenACCClause *> clauses, |
| 1203 | const Stmt *loopStmt); |
| 1204 | |
| 1205 | template <typename Op> |
| 1206 | void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind, |
| 1207 | SourceLocation dirLoc, |
| 1208 | ArrayRef<const OpenACCClause *> clauses); |
| 1209 | // The second template argument doesn't need to be a template, since it should |
| 1210 | // always be an mlir::acc::LoopOp, but as this is a template anyway, we make |
| 1211 | // it a template argument as this way we can avoid including the OpenACC MLIR |
| 1212 | // headers here. We will count on linker failures/explicit instantiation to |
| 1213 | // ensure we don't mess this up, but it is only called from 1 place, and |
| 1214 | // instantiated 3x. |
| 1215 | template <typename ComputeOp, typename LoopOp> |
| 1216 | void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp, |
| 1217 | OpenACCDirectiveKind dirKind, SourceLocation dirLoc, |
| 1218 | ArrayRef<const OpenACCClause *> clauses); |
| 1219 | |
| 1220 | // The OpenACC LoopOp requires that we have auto, seq, or independent on all |
| 1221 | // LoopOp operations for the 'none' device type case. This function checks if |
| 1222 | // the LoopOp has one, else it updates it to have one. |
| 1223 | void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan, |
| 1224 | OpenACCDirectiveKind dk); |
| 1225 | |
| 1226 | // The OpenACC 'cache' construct actually applies to the 'loop' if present. So |
| 1227 | // keep track of the 'loop' so that we can add the cache vars to it correctly. |
| 1228 | mlir::acc::LoopOp *activeLoopOp = nullptr; |
| 1229 | |
| 1230 | struct ActiveOpenACCLoopRAII { |
| 1231 | CIRGenFunction &cgf; |
| 1232 | mlir::acc::LoopOp *oldLoopOp; |
| 1233 | |
| 1234 | ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp) |
| 1235 | : cgf(cgf), oldLoopOp(cgf.activeLoopOp) { |
| 1236 | cgf.activeLoopOp = newOp; |
| 1237 | } |
| 1238 | ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; } |
| 1239 | }; |
| 1240 | |
| 1241 | public: |
| 1242 | // Helper type used to store the list of important information for a 'data' |
| 1243 | // clause variable, or a 'cache' variable reference. |
| 1244 | struct OpenACCDataOperandInfo { |
| 1245 | mlir::Location beginLoc; |
| 1246 | mlir::Value varValue; |
| 1247 | std::string name; |
| 1248 | llvm::SmallVector<mlir::Value> bounds; |
| 1249 | }; |
| 1250 | // Gets the collection of info required to lower and OpenACC clause or cache |
| 1251 | // construct variable reference. |
| 1252 | OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e); |
| 1253 | // Helper function to emit the integer expressions as required by an OpenACC |
| 1254 | // clause/construct. |
| 1255 | mlir::Value emitOpenACCIntExpr(const Expr *intExpr); |
| 1256 | // Helper function to emit an integer constant as an mlir int type, used for |
| 1257 | // constants in OpenACC constructs/clauses. |
| 1258 | mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, |
| 1259 | int64_t value); |
| 1260 | |
| 1261 | mlir::LogicalResult |
| 1262 | emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s); |
| 1263 | mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s); |
| 1264 | mlir::LogicalResult |
| 1265 | emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s); |
| 1266 | mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s); |
| 1267 | mlir::LogicalResult |
| 1268 | emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s); |
| 1269 | mlir::LogicalResult |
| 1270 | emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s); |
| 1271 | mlir::LogicalResult |
| 1272 | emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s); |
| 1273 | mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s); |
| 1274 | mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s); |
| 1275 | mlir::LogicalResult |
| 1276 | emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s); |
| 1277 | mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s); |
| 1278 | mlir::LogicalResult |
| 1279 | emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s); |
| 1280 | mlir::LogicalResult |
| 1281 | emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s); |
| 1282 | mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s); |
| 1283 | |
| 1284 | void emitOpenACCDeclare(const OpenACCDeclareDecl &d); |
| 1285 | void emitOpenACCRoutine(const OpenACCRoutineDecl &d); |
| 1286 | |
| 1287 | /// Create a temporary memory object for the given aggregate type. |
| 1288 | AggValueSlot createAggTemp(QualType ty, mlir::Location loc, |
| 1289 | const Twine &name = "tmp" , |
| 1290 | Address *alloca = nullptr) { |
| 1291 | assert(!cir::MissingFeatures::aggValueSlot()); |
| 1292 | return AggValueSlot::forAddr( |
| 1293 | createMemTemp(ty, loc, name, alloca), ty.getQualifiers(), |
| 1294 | AggValueSlot::IsNotDestructed, AggValueSlot::IsNotAliased, |
| 1295 | AggValueSlot::DoesNotOverlap); |
| 1296 | } |
| 1297 | |
| 1298 | private: |
| 1299 | QualType getVarArgType(const Expr *arg); |
| 1300 | }; |
| 1301 | |
| 1302 | } // namespace clang::CIRGen |
| 1303 | |
| 1304 | #endif |
| 1305 | |