1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Stmt nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15
16#include "mlir/IR/Builders.h"
17#include "clang/AST/ExprCXX.h"
18#include "clang/AST/Stmt.h"
19#include "clang/AST/StmtOpenACC.h"
20#include "clang/CIR/MissingFeatures.h"
21
22using namespace clang;
23using namespace clang::CIRGen;
24using namespace cir;
25
26void CIRGenFunction::emitCompoundStmtWithoutScope(const CompoundStmt &s) {
27 for (auto *curStmt : s.body()) {
28 if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
29 getCIRGenModule().errorNYI(curStmt->getSourceRange(),
30 std::string("emitCompoundStmtWithoutScope: ") +
31 curStmt->getStmtClassName());
32 }
33}
34
35void CIRGenFunction::emitCompoundStmt(const CompoundStmt &s) {
36 mlir::Location scopeLoc = getLoc(s.getSourceRange());
37 mlir::OpBuilder::InsertPoint scopeInsPt;
38 builder.create<cir::ScopeOp>(
39 scopeLoc, [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
40 scopeInsPt = b.saveInsertionPoint();
41 });
42 {
43 mlir::OpBuilder::InsertionGuard guard(builder);
44 builder.restoreInsertionPoint(scopeInsPt);
45 LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
46 emitCompoundStmtWithoutScope(s);
47 }
48}
49
50void CIRGenFunction::emitStopPoint(const Stmt *s) {
51 assert(!cir::MissingFeatures::generateDebugInfo());
52}
53
54// Build CIR for a statement. useCurrentScope should be true if no new scopes
55// need to be created when finding a compound statement.
56mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
57 bool useCurrentScope,
58 ArrayRef<const Attr *> attr) {
59 if (mlir::succeeded(emitSimpleStmt(s, useCurrentScope)))
60 return mlir::success();
61
62 switch (s->getStmtClass()) {
63 case Stmt::NoStmtClass:
64 case Stmt::CXXCatchStmtClass:
65 case Stmt::SEHExceptStmtClass:
66 case Stmt::SEHFinallyStmtClass:
67 case Stmt::MSDependentExistsStmtClass:
68 llvm_unreachable("invalid statement class to emit generically");
69 case Stmt::BreakStmtClass:
70 case Stmt::NullStmtClass:
71 case Stmt::CompoundStmtClass:
72 case Stmt::ContinueStmtClass:
73 case Stmt::DeclStmtClass:
74 case Stmt::ReturnStmtClass:
75 llvm_unreachable("should have emitted these statements as simple");
76
77#define STMT(Type, Base)
78#define ABSTRACT_STMT(Op)
79#define EXPR(Type, Base) case Stmt::Type##Class:
80#include "clang/AST/StmtNodes.inc"
81 {
82 // Remember the block we came in on.
83 mlir::Block *incoming = builder.getInsertionBlock();
84 assert(incoming && "expression emission must have an insertion point");
85
86 emitIgnoredExpr(e: cast<Expr>(s));
87
88 mlir::Block *outgoing = builder.getInsertionBlock();
89 assert(outgoing && "expression emission cleared block!");
90 return mlir::success();
91 }
92 case Stmt::IfStmtClass:
93 return emitIfStmt(cast<IfStmt>(*s));
94 case Stmt::SwitchStmtClass:
95 return emitSwitchStmt(cast<SwitchStmt>(*s));
96 case Stmt::ForStmtClass:
97 return emitForStmt(cast<ForStmt>(*s));
98 case Stmt::WhileStmtClass:
99 return emitWhileStmt(cast<WhileStmt>(*s));
100 case Stmt::DoStmtClass:
101 return emitDoStmt(cast<DoStmt>(*s));
102 case Stmt::CXXForRangeStmtClass:
103 return emitCXXForRangeStmt(cast<CXXForRangeStmt>(*s), attr);
104 case Stmt::OpenACCComputeConstructClass:
105 return emitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*s));
106 case Stmt::OpenACCLoopConstructClass:
107 return emitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*s));
108 case Stmt::OpenACCCombinedConstructClass:
109 return emitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*s));
110 case Stmt::OpenACCDataConstructClass:
111 return emitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*s));
112 case Stmt::OpenACCEnterDataConstructClass:
113 return emitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*s));
114 case Stmt::OpenACCExitDataConstructClass:
115 return emitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*s));
116 case Stmt::OpenACCHostDataConstructClass:
117 return emitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*s));
118 case Stmt::OpenACCWaitConstructClass:
119 return emitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*s));
120 case Stmt::OpenACCInitConstructClass:
121 return emitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*s));
122 case Stmt::OpenACCShutdownConstructClass:
123 return emitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*s));
124 case Stmt::OpenACCSetConstructClass:
125 return emitOpenACCSetConstruct(cast<OpenACCSetConstruct>(*s));
126 case Stmt::OpenACCUpdateConstructClass:
127 return emitOpenACCUpdateConstruct(cast<OpenACCUpdateConstruct>(*s));
128 case Stmt::OpenACCCacheConstructClass:
129 return emitOpenACCCacheConstruct(cast<OpenACCCacheConstruct>(*s));
130 case Stmt::OpenACCAtomicConstructClass:
131 return emitOpenACCAtomicConstruct(cast<OpenACCAtomicConstruct>(*s));
132 case Stmt::OMPScopeDirectiveClass:
133 case Stmt::OMPErrorDirectiveClass:
134 case Stmt::LabelStmtClass:
135 case Stmt::AttributedStmtClass:
136 case Stmt::GotoStmtClass:
137 case Stmt::DefaultStmtClass:
138 case Stmt::CaseStmtClass:
139 case Stmt::SEHLeaveStmtClass:
140 case Stmt::SYCLKernelCallStmtClass:
141 case Stmt::CoroutineBodyStmtClass:
142 case Stmt::CoreturnStmtClass:
143 case Stmt::CXXTryStmtClass:
144 case Stmt::IndirectGotoStmtClass:
145 case Stmt::GCCAsmStmtClass:
146 case Stmt::MSAsmStmtClass:
147 case Stmt::OMPParallelDirectiveClass:
148 case Stmt::OMPTaskwaitDirectiveClass:
149 case Stmt::OMPTaskyieldDirectiveClass:
150 case Stmt::OMPBarrierDirectiveClass:
151 case Stmt::CapturedStmtClass:
152 case Stmt::ObjCAtTryStmtClass:
153 case Stmt::ObjCAtThrowStmtClass:
154 case Stmt::ObjCAtSynchronizedStmtClass:
155 case Stmt::ObjCForCollectionStmtClass:
156 case Stmt::ObjCAutoreleasePoolStmtClass:
157 case Stmt::SEHTryStmtClass:
158 case Stmt::OMPMetaDirectiveClass:
159 case Stmt::OMPCanonicalLoopClass:
160 case Stmt::OMPSimdDirectiveClass:
161 case Stmt::OMPTileDirectiveClass:
162 case Stmt::OMPUnrollDirectiveClass:
163 case Stmt::OMPForDirectiveClass:
164 case Stmt::OMPForSimdDirectiveClass:
165 case Stmt::OMPSectionsDirectiveClass:
166 case Stmt::OMPSectionDirectiveClass:
167 case Stmt::OMPSingleDirectiveClass:
168 case Stmt::OMPMasterDirectiveClass:
169 case Stmt::OMPCriticalDirectiveClass:
170 case Stmt::OMPParallelForDirectiveClass:
171 case Stmt::OMPParallelForSimdDirectiveClass:
172 case Stmt::OMPParallelMasterDirectiveClass:
173 case Stmt::OMPParallelSectionsDirectiveClass:
174 case Stmt::OMPTaskDirectiveClass:
175 case Stmt::OMPTaskgroupDirectiveClass:
176 case Stmt::OMPFlushDirectiveClass:
177 case Stmt::OMPDepobjDirectiveClass:
178 case Stmt::OMPScanDirectiveClass:
179 case Stmt::OMPOrderedDirectiveClass:
180 case Stmt::OMPAtomicDirectiveClass:
181 case Stmt::OMPTargetDirectiveClass:
182 case Stmt::OMPTeamsDirectiveClass:
183 case Stmt::OMPCancellationPointDirectiveClass:
184 case Stmt::OMPCancelDirectiveClass:
185 case Stmt::OMPTargetDataDirectiveClass:
186 case Stmt::OMPTargetEnterDataDirectiveClass:
187 case Stmt::OMPTargetExitDataDirectiveClass:
188 case Stmt::OMPTargetParallelDirectiveClass:
189 case Stmt::OMPTargetParallelForDirectiveClass:
190 case Stmt::OMPTaskLoopDirectiveClass:
191 case Stmt::OMPTaskLoopSimdDirectiveClass:
192 case Stmt::OMPMaskedTaskLoopDirectiveClass:
193 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
194 case Stmt::OMPMasterTaskLoopDirectiveClass:
195 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
196 case Stmt::OMPParallelGenericLoopDirectiveClass:
197 case Stmt::OMPParallelMaskedDirectiveClass:
198 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
199 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
200 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
201 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
202 case Stmt::OMPDistributeDirectiveClass:
203 case Stmt::OMPDistributeParallelForDirectiveClass:
204 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
205 case Stmt::OMPDistributeSimdDirectiveClass:
206 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
207 case Stmt::OMPTargetParallelForSimdDirectiveClass:
208 case Stmt::OMPTargetSimdDirectiveClass:
209 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
210 case Stmt::OMPTargetUpdateDirectiveClass:
211 case Stmt::OMPTeamsDistributeDirectiveClass:
212 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
213 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
214 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
215 case Stmt::OMPTeamsGenericLoopDirectiveClass:
216 case Stmt::OMPTargetTeamsDirectiveClass:
217 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
218 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
219 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
220 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
221 case Stmt::OMPInteropDirectiveClass:
222 case Stmt::OMPDispatchDirectiveClass:
223 case Stmt::OMPGenericLoopDirectiveClass:
224 case Stmt::OMPReverseDirectiveClass:
225 case Stmt::OMPInterchangeDirectiveClass:
226 case Stmt::OMPAssumeDirectiveClass:
227 case Stmt::OMPMaskedDirectiveClass:
228 case Stmt::OMPStripeDirectiveClass:
229 case Stmt::ObjCAtCatchStmtClass:
230 case Stmt::ObjCAtFinallyStmtClass:
231 cgm.errorNYI(s->getSourceRange(),
232 std::string("emitStmt: ") + s->getStmtClassName());
233 return mlir::failure();
234 }
235
236 llvm_unreachable("Unexpected statement class");
237}
238
239mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
240 bool useCurrentScope) {
241 switch (s->getStmtClass()) {
242 default:
243 return mlir::failure();
244 case Stmt::DeclStmtClass:
245 return emitDeclStmt(cast<DeclStmt>(*s));
246 case Stmt::CompoundStmtClass:
247 if (useCurrentScope)
248 emitCompoundStmtWithoutScope(s: cast<CompoundStmt>(Val: *s));
249 else
250 emitCompoundStmt(s: cast<CompoundStmt>(Val: *s));
251 break;
252 case Stmt::ContinueStmtClass:
253 return emitContinueStmt(cast<ContinueStmt>(*s));
254
255 // NullStmt doesn't need any handling, but we need to say we handled it.
256 case Stmt::NullStmtClass:
257 break;
258 case Stmt::CaseStmtClass:
259 case Stmt::DefaultStmtClass:
260 // If we reached here, we must not handling a switch case in the top level.
261 return emitSwitchCase(cast<SwitchCase>(*s),
262 /*buildingTopLevelCase=*/false);
263 break;
264
265 case Stmt::BreakStmtClass:
266 return emitBreakStmt(cast<BreakStmt>(*s));
267 case Stmt::ReturnStmtClass:
268 return emitReturnStmt(cast<ReturnStmt>(*s));
269 }
270
271 return mlir::success();
272}
273
274// Add a terminating yield on a body region if no other terminators are used.
275static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r,
276 mlir::Location loc) {
277 if (r.empty())
278 return;
279
280 SmallVector<mlir::Block *, 4> eraseBlocks;
281 unsigned numBlocks = r.getBlocks().size();
282 for (auto &block : r.getBlocks()) {
283 // Already cleanup after return operations, which might create
284 // empty blocks if emitted as last stmt.
285 if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() &&
286 block.hasNoSuccessors())
287 eraseBlocks.push_back(&block);
288
289 if (block.empty() ||
290 !block.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
291 mlir::OpBuilder::InsertionGuard guardCase(builder);
292 builder.setInsertionPointToEnd(&block);
293 builder.createYield(loc);
294 }
295 }
296
297 for (auto *b : eraseBlocks)
298 b->erase();
299}
300
301mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
302 mlir::LogicalResult res = mlir::success();
303 // The else branch of a consteval if statement is always the only branch
304 // that can be runtime evaluated.
305 const Stmt *constevalExecuted;
306 if (s.isConsteval()) {
307 constevalExecuted = s.isNegatedConsteval() ? s.getThen() : s.getElse();
308 if (!constevalExecuted) {
309 // No runtime code execution required
310 return res;
311 }
312 }
313
314 // C99 6.8.4.1: The first substatement is executed if the expression
315 // compares unequal to 0. The condition must be a scalar type.
316 auto ifStmtBuilder = [&]() -> mlir::LogicalResult {
317 if (s.isConsteval())
318 return emitStmt(constevalExecuted, /*useCurrentScope=*/true);
319
320 if (s.getInit())
321 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
322 return mlir::failure();
323
324 if (s.getConditionVariable())
325 emitDecl(*s.getConditionVariable());
326
327 // If the condition folds to a constant and this is an 'if constexpr',
328 // we simplify it early in CIRGen to avoid emitting the full 'if'.
329 bool condConstant;
330 if (constantFoldsToBool(s.getCond(), condConstant, s.isConstexpr())) {
331 if (s.isConstexpr()) {
332 // Handle "if constexpr" explicitly here to avoid generating some
333 // ill-formed code since in CIR the "if" is no longer simplified
334 // in this lambda like in Clang but postponed to other MLIR
335 // passes.
336 if (const Stmt *executed = condConstant ? s.getThen() : s.getElse())
337 return emitStmt(executed, /*useCurrentScope=*/true);
338 // There is nothing to execute at runtime.
339 // TODO(cir): there is still an empty cir.scope generated by the caller.
340 return mlir::success();
341 }
342 }
343
344 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
345 assert(!cir::MissingFeatures::incrementProfileCounter());
346 return emitIfOnBoolExpr(s.getCond(), s.getThen(), s.getElse());
347 };
348
349 // TODO: Add a new scoped symbol table.
350 // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
351 // The if scope contains the full source range for IfStmt.
352 mlir::Location scopeLoc = getLoc(s.getSourceRange());
353 builder.create<cir::ScopeOp>(
354 scopeLoc, /*scopeBuilder=*/
355 [&](mlir::OpBuilder &b, mlir::Location loc) {
356 LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()};
357 res = ifStmtBuilder();
358 });
359
360 return res;
361}
362
363mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
364 assert(builder.getInsertionBlock() && "expected valid insertion point");
365
366 for (const Decl *I : s.decls())
367 emitDecl(d: *I);
368
369 return mlir::success();
370}
371
372mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
373 mlir::Location loc = getLoc(s.getSourceRange());
374 const Expr *rv = s.getRetValue();
375
376 if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
377 s.getNRVOCandidate()->isNRVOVariable()) {
378 getCIRGenModule().errorNYI(s.getSourceRange(),
379 "named return value optimization");
380 } else if (!rv) {
381 // No return expression. Do nothing.
382 } else if (rv->getType()->isVoidType()) {
383 // Make sure not to return anything, but evaluate the expression
384 // for side effects.
385 if (rv) {
386 emitAnyExpr(e: rv);
387 }
388 } else if (cast<FunctionDecl>(Val: curGD.getDecl())
389 ->getReturnType()
390 ->isReferenceType()) {
391 // If this function returns a reference, take the address of the
392 // expression rather than the value.
393 RValue result = emitReferenceBindingToExpr(e: rv);
394 builder.CIRBaseBuilderTy::createStore(loc, result.getScalarVal(),
395 *fnRetAlloca);
396 } else {
397 mlir::Value value = nullptr;
398 switch (CIRGenFunction::getEvaluationKind(type: rv->getType())) {
399 case cir::TEK_Scalar:
400 value = emitScalarExpr(rv);
401 if (value) { // Change this to an assert once emitScalarExpr is complete
402 builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
403 }
404 break;
405 default:
406 getCIRGenModule().errorNYI(s.getSourceRange(),
407 "non-scalar function return type");
408 break;
409 }
410 }
411
412 auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
413 builder.create<cir::BrOp>(loc, retBlock);
414 builder.createBlock(builder.getBlock()->getParent());
415
416 return mlir::success();
417}
418
419mlir::LogicalResult
420CIRGenFunction::emitContinueStmt(const clang::ContinueStmt &s) {
421 builder.createContinue(getLoc(s.getContinueLoc()));
422
423 // Insert the new block to continue codegen after the continue statement.
424 builder.createBlock(builder.getBlock()->getParent());
425
426 return mlir::success();
427}
428
429mlir::LogicalResult CIRGenFunction::emitBreakStmt(const clang::BreakStmt &s) {
430 builder.createBreak(getLoc(s.getBreakLoc()));
431
432 // Insert the new block to continue codegen after the break statement.
433 builder.createBlock(builder.getBlock()->getParent());
434
435 return mlir::success();
436}
437
438template <typename T>
439mlir::LogicalResult
440CIRGenFunction::emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
441 mlir::ArrayAttr value, CaseOpKind kind,
442 bool buildingTopLevelCase) {
443
444 assert((isa<CaseStmt, DefaultStmt>(stmt)) &&
445 "only case or default stmt go here");
446
447 mlir::LogicalResult result = mlir::success();
448
449 mlir::Location loc = getLoc(stmt->getBeginLoc());
450
451 enum class SubStmtKind { Case, Default, Other };
452 SubStmtKind subStmtKind = SubStmtKind::Other;
453 const Stmt *sub = stmt->getSubStmt();
454
455 mlir::OpBuilder::InsertPoint insertPoint;
456 builder.create<CaseOp>(loc, value, kind, insertPoint);
457
458 {
459 mlir::OpBuilder::InsertionGuard guardSwitch(builder);
460 builder.restoreInsertionPoint(insertPoint);
461
462 if (isa<DefaultStmt>(Val: sub) && isa<CaseStmt>(stmt)) {
463 subStmtKind = SubStmtKind::Default;
464 builder.createYield(loc);
465 } else if (isa<CaseStmt>(Val: sub) && isa<DefaultStmt, CaseStmt>(stmt)) {
466 subStmtKind = SubStmtKind::Case;
467 builder.createYield(loc);
468 } else {
469 result = emitStmt(sub, /*useCurrentScope=*/!isa<CompoundStmt>(sub));
470 }
471
472 insertPoint = builder.saveInsertionPoint();
473 }
474
475 // If the substmt is default stmt or case stmt, try to handle the special case
476 // to make it into the simple form. e.g.
477 //
478 // swtich () {
479 // case 1:
480 // default:
481 // ...
482 // }
483 //
484 // we prefer generating
485 //
486 // cir.switch() {
487 // cir.case(equal, 1) {
488 // cir.yield
489 // }
490 // cir.case(default) {
491 // ...
492 // }
493 // }
494 //
495 // than
496 //
497 // cir.switch() {
498 // cir.case(equal, 1) {
499 // cir.case(default) {
500 // ...
501 // }
502 // }
503 // }
504 //
505 // We don't need to revert this if we find the current switch can't be in
506 // simple form later since the conversion itself should be harmless.
507 if (subStmtKind == SubStmtKind::Case) {
508 result = emitCaseStmt(*cast<CaseStmt>(sub), condType, buildingTopLevelCase);
509 } else if (subStmtKind == SubStmtKind::Default) {
510 result = emitDefaultStmt(*cast<DefaultStmt>(sub), condType,
511 buildingTopLevelCase);
512 } else if (buildingTopLevelCase) {
513 // If we're building a top level case, try to restore the insert point to
514 // the case we're building, then we can attach more random stmts to the
515 // case to make generating `cir.switch` operation to be a simple form.
516 builder.restoreInsertionPoint(insertPoint);
517 }
518
519 return result;
520}
521
522mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &s,
523 mlir::Type condType,
524 bool buildingTopLevelCase) {
525 cir::CaseOpKind kind;
526 mlir::ArrayAttr value;
527 llvm::APSInt intVal = s.getLHS()->EvaluateKnownConstInt(Ctx: getContext());
528
529 // If the case statement has an RHS value, it is representing a GNU
530 // case range statement, where LHS is the beginning of the range
531 // and RHS is the end of the range.
532 if (const Expr *rhs = s.getRHS()) {
533 llvm::APSInt endVal = rhs->EvaluateKnownConstInt(Ctx: getContext());
534 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal),
535 cir::IntAttr::get(condType, endVal)});
536 kind = cir::CaseOpKind::Range;
537 } else {
538 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal)});
539 kind = cir::CaseOpKind::Equal;
540 }
541
542 return emitCaseDefaultCascade(&s, condType, value, kind,
543 buildingTopLevelCase);
544}
545
546mlir::LogicalResult CIRGenFunction::emitDefaultStmt(const clang::DefaultStmt &s,
547 mlir::Type condType,
548 bool buildingTopLevelCase) {
549 return emitCaseDefaultCascade(&s, condType, builder.getArrayAttr({}),
550 cir::CaseOpKind::Default, buildingTopLevelCase);
551}
552
553mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &s,
554 bool buildingTopLevelCase) {
555 assert(!condTypeStack.empty() &&
556 "build switch case without specifying the type of the condition");
557
558 if (s.getStmtClass() == Stmt::CaseStmtClass)
559 return emitCaseStmt(cast<CaseStmt>(s), condTypeStack.back(),
560 buildingTopLevelCase);
561
562 if (s.getStmtClass() == Stmt::DefaultStmtClass)
563 return emitDefaultStmt(cast<DefaultStmt>(s), condTypeStack.back(),
564 buildingTopLevelCase);
565
566 llvm_unreachable("expect case or default stmt");
567}
568
569mlir::LogicalResult
570CIRGenFunction::emitCXXForRangeStmt(const CXXForRangeStmt &s,
571 ArrayRef<const Attr *> forAttrs) {
572 cir::ForOp forOp;
573
574 // TODO(cir): pass in array of attributes.
575 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
576 mlir::LogicalResult loopRes = mlir::success();
577 // Evaluate the first pieces before the loop.
578 if (s.getInit())
579 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
580 return mlir::failure();
581 if (emitStmt(s.getRangeStmt(), /*useCurrentScope=*/true).failed())
582 return mlir::failure();
583 if (emitStmt(s.getBeginStmt(), /*useCurrentScope=*/true).failed())
584 return mlir::failure();
585 if (emitStmt(s.getEndStmt(), /*useCurrentScope=*/true).failed())
586 return mlir::failure();
587
588 assert(!cir::MissingFeatures::loopInfoStack());
589 // From LLVM: if there are any cleanups between here and the loop-exit
590 // scope, create a block to stage a loop exit along.
591 // We probably already do the right thing because of ScopeOp, but make
592 // sure we handle all cases.
593 assert(!cir::MissingFeatures::requiresCleanups());
594
595 forOp = builder.createFor(
596 getLoc(s.getSourceRange()),
597 /*condBuilder=*/
598 [&](mlir::OpBuilder &b, mlir::Location loc) {
599 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
600 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
601 mlir::Value condVal = evaluateExprAsBool(s.getCond());
602 builder.createCondition(condVal);
603 },
604 /*bodyBuilder=*/
605 [&](mlir::OpBuilder &b, mlir::Location loc) {
606 // https://en.cppreference.com/w/cpp/language/for
607 // In C++ the scope of the init-statement and the scope of
608 // statement are one and the same.
609 bool useCurrentScope = true;
610 if (emitStmt(s.getLoopVarStmt(), useCurrentScope).failed())
611 loopRes = mlir::failure();
612 if (emitStmt(s.getBody(), useCurrentScope).failed())
613 loopRes = mlir::failure();
614 emitStopPoint(&s);
615 },
616 /*stepBuilder=*/
617 [&](mlir::OpBuilder &b, mlir::Location loc) {
618 if (s.getInc())
619 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
620 loopRes = mlir::failure();
621 builder.createYield(loc);
622 });
623 return loopRes;
624 };
625
626 mlir::LogicalResult res = mlir::success();
627 mlir::Location scopeLoc = getLoc(s.getSourceRange());
628 builder.create<cir::ScopeOp>(scopeLoc, /*scopeBuilder=*/
629 [&](mlir::OpBuilder &b, mlir::Location loc) {
630 // Create a cleanup scope for the condition
631 // variable cleanups. Logical equivalent from
632 // LLVM codegn for LexicalScope
633 // ConditionScope(*this, S.getSourceRange())...
634 LexicalScope lexScope{
635 *this, loc, builder.getInsertionBlock()};
636 res = forStmtBuilder();
637 });
638
639 if (res.failed())
640 return res;
641
642 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
643 return mlir::success();
644}
645
646mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &s) {
647 cir::ForOp forOp;
648
649 // TODO: pass in an array of attributes.
650 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
651 mlir::LogicalResult loopRes = mlir::success();
652 // Evaluate the first part before the loop.
653 if (s.getInit())
654 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
655 return mlir::failure();
656 assert(!cir::MissingFeatures::loopInfoStack());
657 // In the classic codegen, if there are any cleanups between here and the
658 // loop-exit scope, a block is created to stage the loop exit. We probably
659 // already do the right thing because of ScopeOp, but we need more testing
660 // to be sure we handle all cases.
661 assert(!cir::MissingFeatures::requiresCleanups());
662
663 forOp = builder.createFor(
664 getLoc(s.getSourceRange()),
665 /*condBuilder=*/
666 [&](mlir::OpBuilder &b, mlir::Location loc) {
667 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
668 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
669 mlir::Value condVal;
670 if (s.getCond()) {
671 // If the for statement has a condition scope,
672 // emit the local variable declaration.
673 if (s.getConditionVariable())
674 emitDecl(*s.getConditionVariable());
675 // C99 6.8.5p2/p4: The first substatement is executed if the
676 // expression compares unequal to 0. The condition must be a
677 // scalar type.
678 condVal = evaluateExprAsBool(s.getCond());
679 } else {
680 condVal = b.create<cir::ConstantOp>(loc, builder.getTrueAttr());
681 }
682 builder.createCondition(condVal);
683 },
684 /*bodyBuilder=*/
685 [&](mlir::OpBuilder &b, mlir::Location loc) {
686 // The scope of the for loop body is nested within the scope of the
687 // for loop's init-statement and condition.
688 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
689 loopRes = mlir::failure();
690 emitStopPoint(&s);
691 },
692 /*stepBuilder=*/
693 [&](mlir::OpBuilder &b, mlir::Location loc) {
694 if (s.getInc())
695 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
696 loopRes = mlir::failure();
697 builder.createYield(loc);
698 });
699 return loopRes;
700 };
701
702 auto res = mlir::success();
703 auto scopeLoc = getLoc(s.getSourceRange());
704 builder.create<cir::ScopeOp>(scopeLoc, /*scopeBuilder=*/
705 [&](mlir::OpBuilder &b, mlir::Location loc) {
706 LexicalScope lexScope{
707 *this, loc, builder.getInsertionBlock()};
708 res = forStmtBuilder();
709 });
710
711 if (res.failed())
712 return res;
713
714 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
715 return mlir::success();
716}
717
718mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &s) {
719 cir::DoWhileOp doWhileOp;
720
721 // TODO: pass in array of attributes.
722 auto doStmtBuilder = [&]() -> mlir::LogicalResult {
723 mlir::LogicalResult loopRes = mlir::success();
724 assert(!cir::MissingFeatures::loopInfoStack());
725 // From LLVM: if there are any cleanups between here and the loop-exit
726 // scope, create a block to stage a loop exit along.
727 // We probably already do the right thing because of ScopeOp, but make
728 // sure we handle all cases.
729 assert(!cir::MissingFeatures::requiresCleanups());
730
731 doWhileOp = builder.createDoWhile(
732 getLoc(s.getSourceRange()),
733 /*condBuilder=*/
734 [&](mlir::OpBuilder &b, mlir::Location loc) {
735 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
736 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
737 // C99 6.8.5p2/p4: The first substatement is executed if the
738 // expression compares unequal to 0. The condition must be a
739 // scalar type.
740 mlir::Value condVal = evaluateExprAsBool(s.getCond());
741 builder.createCondition(condVal);
742 },
743 /*bodyBuilder=*/
744 [&](mlir::OpBuilder &b, mlir::Location loc) {
745 // The scope of the do-while loop body is a nested scope.
746 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
747 loopRes = mlir::failure();
748 emitStopPoint(&s);
749 });
750 return loopRes;
751 };
752
753 mlir::LogicalResult res = mlir::success();
754 mlir::Location scopeLoc = getLoc(s.getSourceRange());
755 builder.create<cir::ScopeOp>(scopeLoc, /*scopeBuilder=*/
756 [&](mlir::OpBuilder &b, mlir::Location loc) {
757 LexicalScope lexScope{
758 *this, loc, builder.getInsertionBlock()};
759 res = doStmtBuilder();
760 });
761
762 if (res.failed())
763 return res;
764
765 terminateBody(builder, doWhileOp.getBody(), getLoc(s.getEndLoc()));
766 return mlir::success();
767}
768
769mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &s) {
770 cir::WhileOp whileOp;
771
772 // TODO: pass in array of attributes.
773 auto whileStmtBuilder = [&]() -> mlir::LogicalResult {
774 mlir::LogicalResult loopRes = mlir::success();
775 assert(!cir::MissingFeatures::loopInfoStack());
776 // From LLVM: if there are any cleanups between here and the loop-exit
777 // scope, create a block to stage a loop exit along.
778 // We probably already do the right thing because of ScopeOp, but make
779 // sure we handle all cases.
780 assert(!cir::MissingFeatures::requiresCleanups());
781
782 whileOp = builder.createWhile(
783 getLoc(s.getSourceRange()),
784 /*condBuilder=*/
785 [&](mlir::OpBuilder &b, mlir::Location loc) {
786 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
787 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
788 mlir::Value condVal;
789 // If the for statement has a condition scope,
790 // emit the local variable declaration.
791 if (s.getConditionVariable())
792 emitDecl(*s.getConditionVariable());
793 // C99 6.8.5p2/p4: The first substatement is executed if the
794 // expression compares unequal to 0. The condition must be a
795 // scalar type.
796 condVal = evaluateExprAsBool(s.getCond());
797 builder.createCondition(condVal);
798 },
799 /*bodyBuilder=*/
800 [&](mlir::OpBuilder &b, mlir::Location loc) {
801 // The scope of the while loop body is a nested scope.
802 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
803 loopRes = mlir::failure();
804 emitStopPoint(&s);
805 });
806 return loopRes;
807 };
808
809 mlir::LogicalResult res = mlir::success();
810 mlir::Location scopeLoc = getLoc(s.getSourceRange());
811 builder.create<cir::ScopeOp>(scopeLoc, /*scopeBuilder=*/
812 [&](mlir::OpBuilder &b, mlir::Location loc) {
813 LexicalScope lexScope{
814 *this, loc, builder.getInsertionBlock()};
815 res = whileStmtBuilder();
816 });
817
818 if (res.failed())
819 return res;
820
821 terminateBody(builder, whileOp.getBody(), getLoc(s.getEndLoc()));
822 return mlir::success();
823}
824
825mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *s) {
826 // It is rare but legal if the switch body is not a compound stmt. e.g.,
827 //
828 // switch(a)
829 // while(...) {
830 // case1
831 // ...
832 // case2
833 // ...
834 // }
835 if (!isa<CompoundStmt>(s))
836 return emitStmt(s, /*useCurrentScope=*/true);
837
838 auto *compoundStmt = cast<CompoundStmt>(Val: s);
839
840 mlir::Block *swtichBlock = builder.getBlock();
841 for (auto *c : compoundStmt->body()) {
842 if (auto *switchCase = dyn_cast<SwitchCase>(Val: c)) {
843 builder.setInsertionPointToEnd(swtichBlock);
844 // Reset insert point automatically, so that we can attach following
845 // random stmt to the region of previous built case op to try to make
846 // the being generated `cir.switch` to be in simple form.
847 if (mlir::failed(
848 emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true)))
849 return mlir::failure();
850
851 continue;
852 }
853
854 // Otherwise, just build the statements in the nearest case region.
855 if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa<CompoundStmt>(c))))
856 return mlir::failure();
857 }
858
859 return mlir::success();
860}
861
862mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const clang::SwitchStmt &s) {
863 // TODO: LLVM codegen does some early optimization to fold the condition and
864 // only emit live cases. CIR should use MLIR to achieve similar things,
865 // nothing to be done here.
866 // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))...
867 assert(!cir::MissingFeatures::constantFoldSwitchStatement());
868
869 SwitchOp swop;
870 auto switchStmtBuilder = [&]() -> mlir::LogicalResult {
871 if (s.getInit())
872 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
873 return mlir::failure();
874
875 if (s.getConditionVariable())
876 emitDecl(*s.getConditionVariable());
877
878 mlir::Value condV = emitScalarExpr(s.getCond());
879
880 // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts())
881 assert(!cir::MissingFeatures::pgoUse());
882 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
883 // TODO: if the switch has a condition wrapped by __builtin_unpredictable?
884 assert(!cir::MissingFeatures::insertBuiltinUnpredictable());
885
886 mlir::LogicalResult res = mlir::success();
887 swop = builder.create<SwitchOp>(
888 getLoc(s.getBeginLoc()), condV,
889 /*switchBuilder=*/
890 [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
891 curLexScope->setAsSwitch();
892
893 condTypeStack.push_back(condV.getType());
894
895 res = emitSwitchBody(s.getBody());
896
897 condTypeStack.pop_back();
898 });
899
900 return res;
901 };
902
903 // The switch scope contains the full source range for SwitchStmt.
904 mlir::Location scopeLoc = getLoc(s.getSourceRange());
905 mlir::LogicalResult res = mlir::success();
906 builder.create<cir::ScopeOp>(scopeLoc, /*scopeBuilder=*/
907 [&](mlir::OpBuilder &b, mlir::Location loc) {
908 LexicalScope lexScope{
909 *this, loc, builder.getInsertionBlock()};
910 res = switchStmtBuilder();
911 });
912
913 llvm::SmallVector<CaseOp> cases;
914 swop.collectCases(cases);
915 for (auto caseOp : cases)
916 terminateBody(builder, caseOp.getCaseRegion(), caseOp.getLoc());
917 terminateBody(builder, swop.getBody(), swop.getLoc());
918
919 return res;
920}
921

source code of clang/lib/CIR/CodeGen/CIRGenStmt.cpp