1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/GlobalDecl.h"
21#include "clang/CIR/MissingFeatures.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
27CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {}
30
31CIRGenFunction::~CIRGenFunction() {}
32
33// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
34cir::TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) {
35 type = type.getCanonicalType();
36 while (true) {
37 switch (type->getTypeClass()) {
38#define TYPE(name, parent)
39#define ABSTRACT_TYPE(name, parent)
40#define NON_CANONICAL_TYPE(name, parent) case Type::name:
41#define DEPENDENT_TYPE(name, parent) case Type::name:
42#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
43#include "clang/AST/TypeNodes.inc"
44 llvm_unreachable("non-canonical or dependent type in IR-generation");
45
46 case Type::Auto:
47 case Type::DeducedTemplateSpecialization:
48 llvm_unreachable("undeduced type in IR-generation");
49
50 // Various scalar types.
51 case Type::Builtin:
52 case Type::Pointer:
53 case Type::BlockPointer:
54 case Type::LValueReference:
55 case Type::RValueReference:
56 case Type::MemberPointer:
57 case Type::Vector:
58 case Type::ExtVector:
59 case Type::ConstantMatrix:
60 case Type::FunctionProto:
61 case Type::FunctionNoProto:
62 case Type::Enum:
63 case Type::ObjCObjectPointer:
64 case Type::Pipe:
65 case Type::BitInt:
66 case Type::HLSLAttributedResource:
67 case Type::HLSLInlineSpirv:
68 return cir::TEK_Scalar;
69
70 // Complexes.
71 case Type::Complex:
72 return cir::TEK_Complex;
73
74 // Arrays, records, and Objective-C objects.
75 case Type::ConstantArray:
76 case Type::IncompleteArray:
77 case Type::VariableArray:
78 case Type::Record:
79 case Type::ObjCObject:
80 case Type::ObjCInterface:
81 case Type::ArrayParameter:
82 return cir::TEK_Aggregate;
83
84 // We operate on atomic values according to their underlying type.
85 case Type::Atomic:
86 type = cast<AtomicType>(type)->getValueType();
87 continue;
88 }
89 llvm_unreachable("unknown type kind!");
90 }
91}
92
93mlir::Type CIRGenFunction::convertTypeForMem(QualType t) {
94 return cgm.getTypes().convertTypeForMem(t);
95}
96
97mlir::Type CIRGenFunction::convertType(QualType t) {
98 return cgm.getTypes().convertType(t);
99}
100
101mlir::Location CIRGenFunction::getLoc(SourceLocation srcLoc) {
102 // Some AST nodes might contain invalid source locations (e.g.
103 // CXXDefaultArgExpr), workaround that to still get something out.
104 if (srcLoc.isValid()) {
105 const SourceManager &sm = getContext().getSourceManager();
106 PresumedLoc pLoc = sm.getPresumedLoc(Loc: srcLoc);
107 StringRef filename = pLoc.getFilename();
108 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
109 pLoc.getLine(), pLoc.getColumn());
110 }
111 // Do our best...
112 assert(currSrcLoc && "expected to inherit some source location");
113 return *currSrcLoc;
114}
115
116mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
117 // Some AST nodes might contain invalid source locations (e.g.
118 // CXXDefaultArgExpr), workaround that to still get something out.
119 if (srcLoc.isValid()) {
120 mlir::Location beg = getLoc(srcLoc.getBegin());
121 mlir::Location end = getLoc(srcLoc.getEnd());
122 SmallVector<mlir::Location, 2> locs = {beg, end};
123 mlir::Attribute metadata;
124 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
125 }
126 if (currSrcLoc) {
127 return *currSrcLoc;
128 }
129 // We're brave, but time to give up.
130 return builder.getUnknownLoc();
131}
132
133mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
134 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
135 mlir::Attribute metadata;
136 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
137}
138
139bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
140 // Null statement, not a label!
141 if (!s)
142 return false;
143
144 // If this is a label, we have to emit the code, consider something like:
145 // if (0) { ... foo: bar(); } goto foo;
146 //
147 // TODO: If anyone cared, we could track __label__'s, since we know that you
148 // can't jump to one from outside their declared region.
149 if (isa<LabelStmt>(s))
150 return true;
151
152 // If this is a case/default statement, and we haven't seen a switch, we
153 // have to emit the code.
154 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
155 return true;
156
157 // If this is a switch statement, we want to ignore case statements when we
158 // recursively process the sub-statements of the switch. If we haven't
159 // encountered a switch statement, we treat case statements like labels, but
160 // if we are processing a switch statement, case statements are expected.
161 if (isa<SwitchStmt>(s))
162 ignoreCaseStmts = true;
163
164 // Scan subexpressions for verboten labels.
165 return std::any_of(s->child_begin(), s->child_end(),
166 [=](const Stmt *subStmt) {
167 return containsLabel(s: subStmt, ignoreCaseStmts);
168 });
169}
170
171/// If the specified expression does not fold to a constant, or if it does but
172/// contains a label, return false. If it constant folds return true and set
173/// the boolean result in Result.
174bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
175 bool allowLabels) {
176 llvm::APSInt resultInt;
177 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
178 return false;
179
180 resultBool = resultInt.getBoolValue();
181 return true;
182}
183
184/// If the specified expression does not fold to a constant, or if it does
185/// fold but contains a label, return false. If it constant folds, return
186/// true and set the folded value.
187bool CIRGenFunction::constantFoldsToSimpleInteger(const Expr *cond,
188 llvm::APSInt &resultInt,
189 bool allowLabels) {
190 // FIXME: Rename and handle conversion of other evaluatable things
191 // to bool.
192 Expr::EvalResult result;
193 if (!cond->EvaluateAsInt(Result&: result, Ctx: getContext()))
194 return false; // Not foldable, not integer or not fully evaluatable.
195
196 llvm::APSInt intValue = result.Val.getInt();
197 if (!allowLabels && containsLabel(cond))
198 return false; // Contains a label.
199
200 resultInt = intValue;
201 return true;
202}
203
204void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
205 CharUnits alignment) {
206 if (!type->isVoidType()) {
207 fnRetAlloca = emitAlloca("__retval", convertType(type), loc, alignment,
208 /*insertIntoFnEntryBlock=*/false);
209 }
210}
211
212void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
213 mlir::Location loc, CharUnits alignment,
214 bool isParam) {
215 const auto *namedVar = dyn_cast_or_null<NamedDecl>(var);
216 assert(namedVar && "Needs a named decl");
217 assert(!cir::MissingFeatures::cgfSymbolTable());
218
219 auto allocaOp = cast<cir::AllocaOp>(addrVal.getDefiningOp());
220 if (isParam)
221 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
222 if (ty->isReferenceType() || ty.isConstQualified())
223 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
224}
225
226void CIRGenFunction::LexicalScope::cleanup() {
227 CIRGenBuilderTy &builder = cgf.builder;
228 LexicalScope *localScope = cgf.curLexScope;
229
230 if (returnBlock != nullptr) {
231 // Write out the return block, which loads the value from `__retval` and
232 // issues the `cir.return`.
233 mlir::OpBuilder::InsertionGuard guard(builder);
234 builder.setInsertionPointToEnd(returnBlock);
235 (void)emitReturn(*returnLoc);
236 }
237
238 mlir::Block *curBlock = builder.getBlock();
239 if (isGlobalInit() && !curBlock)
240 return;
241 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
242 return;
243
244 // Get rid of any empty block at the end of the scope.
245 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
246 if (!entryBlock && curBlock->empty()) {
247 curBlock->erase();
248 if (returnBlock != nullptr && returnBlock->getUses().empty())
249 returnBlock->erase();
250 return;
251 }
252
253 // Reached the end of the scope.
254 {
255 mlir::OpBuilder::InsertionGuard guard(builder);
256 builder.setInsertionPointToEnd(curBlock);
257
258 if (localScope->depth == 0) {
259 // Reached the end of the function.
260 if (returnBlock != nullptr) {
261 if (returnBlock->getUses().empty())
262 returnBlock->erase();
263 else {
264 builder.create<cir::BrOp>(*returnLoc, returnBlock);
265 return;
266 }
267 }
268 emitImplicitReturn();
269 return;
270 }
271 // Reached the end of a non-function scope. Some scopes, such as those
272 // used with the ?: operator, can return a value.
273 if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
274 !retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
275 : builder.create<cir::YieldOp>(localScope->endLoc, retVal);
276 }
277 }
278}
279
280cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
281 CIRGenBuilderTy &builder = cgf.getBuilder();
282
283 if (!cgf.curFn.getFunctionType().hasVoidReturn()) {
284 // Load the value from `__retval` and return it via the `cir.return` op.
285 auto value = builder.create<cir::LoadOp>(
286 loc, cgf.curFn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
287 return builder.create<cir::ReturnOp>(loc,
288 llvm::ArrayRef(value.getResult()));
289 }
290 return builder.create<cir::ReturnOp>(loc);
291}
292
293// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
294// candidate for sharing between CIRGen and CodeGen.
295static bool mayDropFunctionReturn(const ASTContext &astContext,
296 QualType returnType) {
297 // We can't just discard the return value for a record type with a complex
298 // destructor or a non-trivially copyable type.
299 if (const RecordType *recordType =
300 returnType.getCanonicalType()->getAs<RecordType>()) {
301 if (const auto *classDecl = dyn_cast<CXXRecordDecl>(recordType->getDecl()))
302 return classDecl->hasTrivialDestructor();
303 }
304 return returnType.isTriviallyCopyableType(Context: astContext);
305}
306
307void CIRGenFunction::LexicalScope::emitImplicitReturn() {
308 CIRGenBuilderTy &builder = cgf.getBuilder();
309 LexicalScope *localScope = cgf.curLexScope;
310
311 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
312
313 // In C++, flowing off the end of a non-void function is always undefined
314 // behavior. In C, flowing off the end of a non-void function is undefined
315 // behavior only if the non-existent return value is used by the caller.
316 // That influences whether the terminating op is trap, unreachable, or
317 // return.
318 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
319 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
320 builder.getInsertionBlock()) {
321 bool shouldEmitUnreachable =
322 cgf.cgm.getCodeGenOpts().StrictReturn ||
323 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
324
325 if (shouldEmitUnreachable) {
326 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
327 builder.create<cir::TrapOp>(localScope->endLoc);
328 else
329 builder.create<cir::UnreachableOp>(localScope->endLoc);
330 builder.clearInsertionPoint();
331 return;
332 }
333 }
334
335 (void)emitReturn(localScope->endLoc);
336}
337
338void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
339 cir::FuncOp fn, cir::FuncType funcType,
340 FunctionArgList args, SourceLocation loc,
341 SourceLocation startLoc) {
342 assert(!curFn &&
343 "CIRGenFunction can only be used for one function at a time");
344
345 curFn = fn;
346
347 const Decl *d = gd.getDecl();
348 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
349 curFuncDecl = d->getNonClosureContext();
350
351 mlir::Block *entryBB = &fn.getBlocks().front();
352 builder.setInsertionPointToStart(entryBB);
353
354 // TODO(cir): this should live in `emitFunctionProlog
355 // Declare all the function arguments in the symbol table.
356 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
357 const VarDecl *paramVar = std::get<0>(nameValue);
358 mlir::Value paramVal = std::get<1>(nameValue);
359 CharUnits alignment = getContext().getDeclAlign(paramVar);
360 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
361 paramVal.setLoc(paramLoc);
362
363 mlir::Value addrVal =
364 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
365 convertType(paramVar->getType()), paramLoc, alignment,
366 /*insertIntoFnEntryBlock=*/true);
367
368 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
369 /*isParam=*/true);
370
371 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
372
373 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
374 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
375 assert(!cir::MissingFeatures::constructABIArgDirectExtend());
376 if (isPromoted)
377 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
378
379 // Location of the store to the param storage tracked as beginning of
380 // the function body.
381 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
382 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
383 }
384 assert(builder.getInsertionBlock() && "Should be valid");
385
386 // When the current function is not void, create an address to store the
387 // result value.
388 if (!returnType->isVoidType())
389 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
390 getContext().getTypeAlignInChars(returnType));
391
392 if (isa_and_nonnull<CXXMethodDecl>(d) &&
393 cast<CXXMethodDecl>(d)->isInstance()) {
394 cgm.getCXXABI().emitInstanceFunctionProlog(Loc: loc, cgf&: *this);
395
396 const auto *md = cast<CXXMethodDecl>(d);
397 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
398 cgm.errorNYI(loc, "lambda call operator");
399 } else {
400 // Not in a lambda; just use 'this' from the method.
401 // FIXME: Should we generate a new load for each use of 'this'? The fast
402 // register allocator would be happier...
403 cxxThisValue = cxxabiThisValue;
404 }
405
406 assert(!cir::MissingFeatures::sanitizers());
407 assert(!cir::MissingFeatures::emitTypeCheck());
408 }
409}
410
411void CIRGenFunction::finishFunction(SourceLocation endLoc) {}
412
413mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
414 auto result = mlir::LogicalResult::success();
415 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
416 emitCompoundStmtWithoutScope(s: *block);
417 else
418 result = emitStmt(body, /*useCurrentScope=*/true);
419
420 return result;
421}
422
423static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
424 // Remove any leftover blocks that are unreachable and empty, since they do
425 // not represent unreachable code useful for warnings nor anything deemed
426 // useful in general.
427 SmallVector<mlir::Block *> blocksToDelete;
428 for (mlir::Block &block : func.getBlocks()) {
429 if (block.empty() && block.getUses().empty())
430 blocksToDelete.push_back(&block);
431 }
432 for (mlir::Block *block : blocksToDelete)
433 block->erase();
434}
435
436cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
437 cir::FuncType funcType) {
438 const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
439 curGD = gd;
440
441 SourceLocation loc = funcDecl->getLocation();
442 Stmt *body = funcDecl->getBody();
443 SourceRange bodyRange =
444 body ? body->getSourceRange() : funcDecl->getLocation();
445
446 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
447 : builder.getUnknownLoc()};
448
449 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
450 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
451 };
452 const mlir::Location fusedLoc = mlir::FusedLoc::get(
453 &getMLIRContext(),
454 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
455 mlir::Block *entryBB = fn.addEntryBlock();
456
457 FunctionArgList args;
458 QualType retTy = buildFunctionArgList(gd, args);
459
460 {
461 LexicalScope lexScope(*this, fusedLoc, entryBB);
462
463 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
464
465 if (isa<CXXDestructorDecl>(funcDecl))
466 getCIRGenModule().errorNYI(bodyRange, "C++ destructor definition");
467 else if (isa<CXXConstructorDecl>(funcDecl))
468 getCIRGenModule().errorNYI(bodyRange, "C++ constructor definition");
469 else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
470 funcDecl->hasAttr<CUDAGlobalAttr>())
471 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
472 else if (isa<CXXMethodDecl>(funcDecl) &&
473 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker())
474 getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker");
475 else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
476 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
477 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator()))
478 getCIRGenModule().errorNYI(bodyRange, "Default assignment operator");
479 else if (body) {
480 if (mlir::failed(emitFunctionBody(body))) {
481 fn.erase();
482 return nullptr;
483 }
484 } else {
485 // Anything without a body should have been handled above.
486 llvm_unreachable("no definition for normal function");
487 }
488
489 if (mlir::failed(fn.verifyBody()))
490 return nullptr;
491
492 finishFunction(endLoc: bodyRange.getEnd());
493 }
494
495 eraseEmptyAndUnusedBlocks(fn);
496 return fn;
497}
498
499/// Given a value of type T* that may not be to a complete object, construct
500/// an l-vlaue withi the natural pointee alignment of T.
501LValue CIRGenFunction::makeNaturalAlignPointeeAddrLValue(mlir::Value val,
502 QualType ty) {
503 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
504 // assert on the result type first.
505 LValueBaseInfo baseInfo;
506 assert(!cir::MissingFeatures::opTBAA());
507 CharUnits align = cgm.getNaturalTypeAlignment(t: ty, baseInfo: &baseInfo);
508 return makeAddrLValue(addr: Address(val, align), ty, baseInfo);
509}
510
511clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl gd,
512 FunctionArgList &args) {
513 const auto *fd = cast<FunctionDecl>(gd.getDecl());
514 QualType retTy = fd->getReturnType();
515
516 const auto *md = dyn_cast<CXXMethodDecl>(fd);
517 if (md && md->isInstance()) {
518 if (cgm.getCXXABI().hasThisReturn(gd))
519 cgm.errorNYI(fd->getSourceRange(), "this return");
520 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
521 cgm.errorNYI(fd->getSourceRange(), "most derived return");
522 cgm.getCXXABI().buildThisParam(cgf&: *this, params&: args);
523 }
524
525 if (isa<CXXConstructorDecl>(fd))
526 cgm.errorNYI(fd->getSourceRange(),
527 "buildFunctionArgList: CXXConstructorDecl");
528
529 for (auto *param : fd->parameters())
530 args.push_back(param);
531
532 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
533 cgm.errorNYI(fd->getSourceRange(),
534 "buildFunctionArgList: implicit structor params");
535
536 return retTy;
537}
538
539/// Emit code to compute a designator that specifies the location
540/// of the expression.
541/// FIXME: document this function better.
542LValue CIRGenFunction::emitLValue(const Expr *e) {
543 // FIXME: ApplyDebugLocation DL(*this, e);
544 switch (e->getStmtClass()) {
545 default:
546 getCIRGenModule().errorNYI(e->getSourceRange(),
547 std::string("l-value not implemented for '") +
548 e->getStmtClassName() + "'");
549 return LValue();
550 case Expr::ArraySubscriptExprClass:
551 return emitArraySubscriptExpr(e: cast<ArraySubscriptExpr>(e));
552 case Expr::UnaryOperatorClass:
553 return emitUnaryOpLValue(e: cast<UnaryOperator>(e));
554 case Expr::StringLiteralClass:
555 return emitStringLiteralLValue(e: cast<StringLiteral>(e));
556 case Expr::MemberExprClass:
557 return emitMemberExpr(e: cast<MemberExpr>(e));
558 case Expr::BinaryOperatorClass:
559 return emitBinaryOperatorLValue(e: cast<BinaryOperator>(e));
560 case Expr::CompoundAssignOperatorClass: {
561 QualType ty = e->getType();
562 if (ty->getAs<AtomicType>()) {
563 cgm.errorNYI(e->getSourceRange(),
564 "CompoundAssignOperator with AtomicType");
565 return LValue();
566 }
567 if (!ty->isAnyComplexType())
568 return emitCompoundAssignmentLValue(e: cast<CompoundAssignOperator>(e));
569 cgm.errorNYI(e->getSourceRange(),
570 "CompoundAssignOperator with ComplexType");
571 return LValue();
572 }
573 case Expr::CallExprClass:
574 case Expr::CXXMemberCallExprClass:
575 case Expr::CXXOperatorCallExprClass:
576 case Expr::UserDefinedLiteralClass:
577 return emitCallExprLValue(e: cast<CallExpr>(e));
578 case Expr::ParenExprClass:
579 return emitLValue(e: cast<ParenExpr>(e)->getSubExpr());
580 case Expr::DeclRefExprClass:
581 return emitDeclRefLValue(e: cast<DeclRefExpr>(e));
582 case Expr::CStyleCastExprClass:
583 case Expr::CXXStaticCastExprClass:
584 case Expr::CXXDynamicCastExprClass:
585 case Expr::ImplicitCastExprClass:
586 return emitCastLValue(e: cast<CastExpr>(e));
587 }
588}
589
590void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
591 QualType ty) {
592 // Ignore empty classes in C++.
593 if (getLangOpts().CPlusPlus) {
594 if (const RecordType *rt = ty->getAs<RecordType>()) {
595 if (cast<CXXRecordDecl>(rt->getDecl())->isEmpty())
596 return;
597 }
598 }
599
600 // Cast the dest ptr to the appropriate i8 pointer type.
601 if (builder.isInt8Ty(destPtr.getElementType())) {
602 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
603 }
604
605 // Get size and alignment info for this aggregate.
606 const CharUnits size = getContext().getTypeSizeInChars(T: ty);
607 if (size.isZero()) {
608 // But note that getTypeInfo returns 0 for a VLA.
609 if (isa<VariableArrayType>(getContext().getAsArrayType(T: ty))) {
610 cgm.errorNYI(loc,
611 "emitNullInitialization for zero size VariableArrayType");
612 } else {
613 return;
614 }
615 }
616
617 // If the type contains a pointer to data member we can't memset it to zero.
618 // Instead, create a null constant and copy it to the destination.
619 // TODO: there are other patterns besides zero that we can usefully memset,
620 // like -1, which happens to be the pattern used by member-pointers.
621 if (!cgm.getTypes().isZeroInitializable(ty)) {
622 cgm.errorNYI(loc, "type is not zero initializable");
623 }
624
625 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
626 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
627 // respective address.
628 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
629 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
630 builder.createStore(loc, zeroValue, destPtr);
631}
632
633// TODO(cir): should be shared with LLVM codegen.
634bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *ce) {
635 const Expr *e = ce->getSubExpr();
636
637 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
638 return false;
639
640 if (isa<CXXThisExpr>(e->IgnoreParens())) {
641 // We always assume that 'this' is never null.
642 return false;
643 }
644
645 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
646 // And that glvalue casts are never null.
647 if (ice->isGLValue())
648 return false;
649 }
650
651 return true;
652}
653
654} // namespace clang::CIRGen
655

source code of clang/lib/CIR/CodeGen/CIRGenFunction.cpp