1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
21#include "clang/AST/StmtVisitor.h"
22#include "clang/Basic/Builtins.h"
23#include "clang/Basic/DiagnosticSema.h"
24#include "clang/Basic/PrettyStackTrace.h"
25#include "clang/Basic/SourceManager.h"
26#include "clang/Basic/TargetInfo.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
52 SourceLocation Loc;
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
60void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
83 EnsureInsertPoint();
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
87 EmitStopPoint(S);
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(Val: S)) {
93 EmitSimpleOMPExecutableDirective(D: *D);
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
99 case Stmt::NoStmtClass:
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(E: cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(S: cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(S: cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(S: cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(S: cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(S: cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(S: cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(S: cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(S: cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(S: cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(S: cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
173 EmitCapturedStmt(S: *CS, K: CS->getCapturedRegionKind());
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(S: cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(S: cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(S: cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(S: cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(S: cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(S: cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(S: cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(S: cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(S: cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(S: cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(S: cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(S: cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(S: cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(S: cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPForDirectiveClass:
226 EmitOMPForDirective(S: cast<OMPForDirective>(*S));
227 break;
228 case Stmt::OMPForSimdDirectiveClass:
229 EmitOMPForSimdDirective(S: cast<OMPForSimdDirective>(*S));
230 break;
231 case Stmt::OMPSectionsDirectiveClass:
232 EmitOMPSectionsDirective(S: cast<OMPSectionsDirective>(*S));
233 break;
234 case Stmt::OMPSectionDirectiveClass:
235 EmitOMPSectionDirective(S: cast<OMPSectionDirective>(*S));
236 break;
237 case Stmt::OMPSingleDirectiveClass:
238 EmitOMPSingleDirective(S: cast<OMPSingleDirective>(*S));
239 break;
240 case Stmt::OMPMasterDirectiveClass:
241 EmitOMPMasterDirective(S: cast<OMPMasterDirective>(*S));
242 break;
243 case Stmt::OMPCriticalDirectiveClass:
244 EmitOMPCriticalDirective(S: cast<OMPCriticalDirective>(*S));
245 break;
246 case Stmt::OMPParallelForDirectiveClass:
247 EmitOMPParallelForDirective(S: cast<OMPParallelForDirective>(*S));
248 break;
249 case Stmt::OMPParallelForSimdDirectiveClass:
250 EmitOMPParallelForSimdDirective(S: cast<OMPParallelForSimdDirective>(*S));
251 break;
252 case Stmt::OMPParallelMasterDirectiveClass:
253 EmitOMPParallelMasterDirective(S: cast<OMPParallelMasterDirective>(*S));
254 break;
255 case Stmt::OMPParallelSectionsDirectiveClass:
256 EmitOMPParallelSectionsDirective(S: cast<OMPParallelSectionsDirective>(*S));
257 break;
258 case Stmt::OMPTaskDirectiveClass:
259 EmitOMPTaskDirective(S: cast<OMPTaskDirective>(*S));
260 break;
261 case Stmt::OMPTaskyieldDirectiveClass:
262 EmitOMPTaskyieldDirective(S: cast<OMPTaskyieldDirective>(*S));
263 break;
264 case Stmt::OMPErrorDirectiveClass:
265 EmitOMPErrorDirective(S: cast<OMPErrorDirective>(*S));
266 break;
267 case Stmt::OMPBarrierDirectiveClass:
268 EmitOMPBarrierDirective(S: cast<OMPBarrierDirective>(*S));
269 break;
270 case Stmt::OMPTaskwaitDirectiveClass:
271 EmitOMPTaskwaitDirective(S: cast<OMPTaskwaitDirective>(*S));
272 break;
273 case Stmt::OMPTaskgroupDirectiveClass:
274 EmitOMPTaskgroupDirective(S: cast<OMPTaskgroupDirective>(*S));
275 break;
276 case Stmt::OMPFlushDirectiveClass:
277 EmitOMPFlushDirective(S: cast<OMPFlushDirective>(*S));
278 break;
279 case Stmt::OMPDepobjDirectiveClass:
280 EmitOMPDepobjDirective(S: cast<OMPDepobjDirective>(*S));
281 break;
282 case Stmt::OMPScanDirectiveClass:
283 EmitOMPScanDirective(S: cast<OMPScanDirective>(*S));
284 break;
285 case Stmt::OMPOrderedDirectiveClass:
286 EmitOMPOrderedDirective(S: cast<OMPOrderedDirective>(*S));
287 break;
288 case Stmt::OMPAtomicDirectiveClass:
289 EmitOMPAtomicDirective(S: cast<OMPAtomicDirective>(*S));
290 break;
291 case Stmt::OMPTargetDirectiveClass:
292 EmitOMPTargetDirective(S: cast<OMPTargetDirective>(*S));
293 break;
294 case Stmt::OMPTeamsDirectiveClass:
295 EmitOMPTeamsDirective(S: cast<OMPTeamsDirective>(*S));
296 break;
297 case Stmt::OMPCancellationPointDirectiveClass:
298 EmitOMPCancellationPointDirective(S: cast<OMPCancellationPointDirective>(*S));
299 break;
300 case Stmt::OMPCancelDirectiveClass:
301 EmitOMPCancelDirective(S: cast<OMPCancelDirective>(*S));
302 break;
303 case Stmt::OMPTargetDataDirectiveClass:
304 EmitOMPTargetDataDirective(S: cast<OMPTargetDataDirective>(*S));
305 break;
306 case Stmt::OMPTargetEnterDataDirectiveClass:
307 EmitOMPTargetEnterDataDirective(S: cast<OMPTargetEnterDataDirective>(*S));
308 break;
309 case Stmt::OMPTargetExitDataDirectiveClass:
310 EmitOMPTargetExitDataDirective(S: cast<OMPTargetExitDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetParallelDirectiveClass:
313 EmitOMPTargetParallelDirective(S: cast<OMPTargetParallelDirective>(*S));
314 break;
315 case Stmt::OMPTargetParallelForDirectiveClass:
316 EmitOMPTargetParallelForDirective(S: cast<OMPTargetParallelForDirective>(*S));
317 break;
318 case Stmt::OMPTaskLoopDirectiveClass:
319 EmitOMPTaskLoopDirective(S: cast<OMPTaskLoopDirective>(*S));
320 break;
321 case Stmt::OMPTaskLoopSimdDirectiveClass:
322 EmitOMPTaskLoopSimdDirective(S: cast<OMPTaskLoopSimdDirective>(*S));
323 break;
324 case Stmt::OMPMasterTaskLoopDirectiveClass:
325 EmitOMPMasterTaskLoopDirective(S: cast<OMPMasterTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPMaskedTaskLoopDirectiveClass:
328 llvm_unreachable("masked taskloop directive not supported yet.");
329 break;
330 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
331 EmitOMPMasterTaskLoopSimdDirective(
332 S: cast<OMPMasterTaskLoopSimdDirective>(*S));
333 break;
334 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
335 llvm_unreachable("masked taskloop simd directive not supported yet.");
336 break;
337 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
338 EmitOMPParallelMasterTaskLoopDirective(
339 S: cast<OMPParallelMasterTaskLoopDirective>(*S));
340 break;
341 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
342 llvm_unreachable("parallel masked taskloop directive not supported yet.");
343 break;
344 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
345 EmitOMPParallelMasterTaskLoopSimdDirective(
346 S: cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
347 break;
348 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
349 llvm_unreachable(
350 "parallel masked taskloop simd directive not supported yet.");
351 break;
352 case Stmt::OMPDistributeDirectiveClass:
353 EmitOMPDistributeDirective(S: cast<OMPDistributeDirective>(*S));
354 break;
355 case Stmt::OMPTargetUpdateDirectiveClass:
356 EmitOMPTargetUpdateDirective(S: cast<OMPTargetUpdateDirective>(*S));
357 break;
358 case Stmt::OMPDistributeParallelForDirectiveClass:
359 EmitOMPDistributeParallelForDirective(
360 S: cast<OMPDistributeParallelForDirective>(*S));
361 break;
362 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
363 EmitOMPDistributeParallelForSimdDirective(
364 S: cast<OMPDistributeParallelForSimdDirective>(*S));
365 break;
366 case Stmt::OMPDistributeSimdDirectiveClass:
367 EmitOMPDistributeSimdDirective(S: cast<OMPDistributeSimdDirective>(*S));
368 break;
369 case Stmt::OMPTargetParallelForSimdDirectiveClass:
370 EmitOMPTargetParallelForSimdDirective(
371 S: cast<OMPTargetParallelForSimdDirective>(*S));
372 break;
373 case Stmt::OMPTargetSimdDirectiveClass:
374 EmitOMPTargetSimdDirective(S: cast<OMPTargetSimdDirective>(*S));
375 break;
376 case Stmt::OMPTeamsDistributeDirectiveClass:
377 EmitOMPTeamsDistributeDirective(S: cast<OMPTeamsDistributeDirective>(*S));
378 break;
379 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
380 EmitOMPTeamsDistributeSimdDirective(
381 S: cast<OMPTeamsDistributeSimdDirective>(*S));
382 break;
383 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
384 EmitOMPTeamsDistributeParallelForSimdDirective(
385 S: cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
386 break;
387 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
388 EmitOMPTeamsDistributeParallelForDirective(
389 S: cast<OMPTeamsDistributeParallelForDirective>(*S));
390 break;
391 case Stmt::OMPTargetTeamsDirectiveClass:
392 EmitOMPTargetTeamsDirective(S: cast<OMPTargetTeamsDirective>(*S));
393 break;
394 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
395 EmitOMPTargetTeamsDistributeDirective(
396 S: cast<OMPTargetTeamsDistributeDirective>(*S));
397 break;
398 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
399 EmitOMPTargetTeamsDistributeParallelForDirective(
400 S: cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
401 break;
402 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
403 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
404 S: cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
405 break;
406 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
407 EmitOMPTargetTeamsDistributeSimdDirective(
408 S: cast<OMPTargetTeamsDistributeSimdDirective>(*S));
409 break;
410 case Stmt::OMPInteropDirectiveClass:
411 EmitOMPInteropDirective(S: cast<OMPInteropDirective>(*S));
412 break;
413 case Stmt::OMPDispatchDirectiveClass:
414 CGM.ErrorUnsupported(S, Type: "OpenMP dispatch directive");
415 break;
416 case Stmt::OMPScopeDirectiveClass:
417 llvm_unreachable("scope not supported with FE outlining");
418 case Stmt::OMPMaskedDirectiveClass:
419 EmitOMPMaskedDirective(S: cast<OMPMaskedDirective>(*S));
420 break;
421 case Stmt::OMPGenericLoopDirectiveClass:
422 EmitOMPGenericLoopDirective(S: cast<OMPGenericLoopDirective>(*S));
423 break;
424 case Stmt::OMPTeamsGenericLoopDirectiveClass:
425 EmitOMPTeamsGenericLoopDirective(S: cast<OMPTeamsGenericLoopDirective>(*S));
426 break;
427 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
428 EmitOMPTargetTeamsGenericLoopDirective(
429 S: cast<OMPTargetTeamsGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPParallelGenericLoopDirectiveClass:
432 EmitOMPParallelGenericLoopDirective(
433 S: cast<OMPParallelGenericLoopDirective>(*S));
434 break;
435 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
436 EmitOMPTargetParallelGenericLoopDirective(
437 S: cast<OMPTargetParallelGenericLoopDirective>(*S));
438 break;
439 case Stmt::OMPParallelMaskedDirectiveClass:
440 EmitOMPParallelMaskedDirective(S: cast<OMPParallelMaskedDirective>(*S));
441 break;
442 case Stmt::OpenACCComputeConstructClass:
443 EmitOpenACCComputeConstruct(S: cast<OpenACCComputeConstruct>(*S));
444 break;
445 }
446}
447
448bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
449 ArrayRef<const Attr *> Attrs) {
450 switch (S->getStmtClass()) {
451 default:
452 return false;
453 case Stmt::NullStmtClass:
454 break;
455 case Stmt::CompoundStmtClass:
456 EmitCompoundStmt(S: cast<CompoundStmt>(Val: *S));
457 break;
458 case Stmt::DeclStmtClass:
459 EmitDeclStmt(S: cast<DeclStmt>(Val: *S));
460 break;
461 case Stmt::LabelStmtClass:
462 EmitLabelStmt(S: cast<LabelStmt>(Val: *S));
463 break;
464 case Stmt::AttributedStmtClass:
465 EmitAttributedStmt(S: cast<AttributedStmt>(Val: *S));
466 break;
467 case Stmt::GotoStmtClass:
468 EmitGotoStmt(S: cast<GotoStmt>(Val: *S));
469 break;
470 case Stmt::BreakStmtClass:
471 EmitBreakStmt(S: cast<BreakStmt>(Val: *S));
472 break;
473 case Stmt::ContinueStmtClass:
474 EmitContinueStmt(S: cast<ContinueStmt>(Val: *S));
475 break;
476 case Stmt::DefaultStmtClass:
477 EmitDefaultStmt(S: cast<DefaultStmt>(Val: *S), Attrs);
478 break;
479 case Stmt::CaseStmtClass:
480 EmitCaseStmt(S: cast<CaseStmt>(Val: *S), Attrs);
481 break;
482 case Stmt::SEHLeaveStmtClass:
483 EmitSEHLeaveStmt(S: cast<SEHLeaveStmt>(Val: *S));
484 break;
485 }
486 return true;
487}
488
489/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
490/// this captures the expression result of the last sub-statement and returns it
491/// (for use by the statement expression extension).
492Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
493 AggValueSlot AggSlot) {
494 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
495 "LLVM IR generation of compound statement ('{}')");
496
497 // Keep track of the current cleanup stack depth, including debug scopes.
498 LexicalScope Scope(*this, S.getSourceRange());
499
500 return EmitCompoundStmtWithoutScope(S, GetLast, AVS: AggSlot);
501}
502
503Address
504CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
505 bool GetLast,
506 AggValueSlot AggSlot) {
507
508 const Stmt *ExprResult = S.getStmtExprResult();
509 assert((!GetLast || (GetLast && ExprResult)) &&
510 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
511
512 Address RetAlloca = Address::invalid();
513
514 for (auto *CurStmt : S.body()) {
515 if (GetLast && ExprResult == CurStmt) {
516 // We have to special case labels here. They are statements, but when put
517 // at the end of a statement expression, they yield the value of their
518 // subexpression. Handle this by walking through all labels we encounter,
519 // emitting them before we evaluate the subexpr.
520 // Similar issues arise for attributed statements.
521 while (!isa<Expr>(Val: ExprResult)) {
522 if (const auto *LS = dyn_cast<LabelStmt>(Val: ExprResult)) {
523 EmitLabel(D: LS->getDecl());
524 ExprResult = LS->getSubStmt();
525 } else if (const auto *AS = dyn_cast<AttributedStmt>(Val: ExprResult)) {
526 // FIXME: Update this if we ever have attributes that affect the
527 // semantics of an expression.
528 ExprResult = AS->getSubStmt();
529 } else {
530 llvm_unreachable("unknown value statement");
531 }
532 }
533
534 EnsureInsertPoint();
535
536 const Expr *E = cast<Expr>(Val: ExprResult);
537 QualType ExprTy = E->getType();
538 if (hasAggregateEvaluationKind(T: ExprTy)) {
539 EmitAggExpr(E, AS: AggSlot);
540 } else {
541 // We can't return an RValue here because there might be cleanups at
542 // the end of the StmtExpr. Because of that, we have to emit the result
543 // here into a temporary alloca.
544 RetAlloca = CreateMemTemp(T: ExprTy);
545 EmitAnyExprToMem(E, Location: RetAlloca, Quals: Qualifiers(),
546 /*IsInit*/ IsInitializer: false);
547 }
548 } else {
549 EmitStmt(S: CurStmt);
550 }
551 }
552
553 return RetAlloca;
554}
555
556void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
557 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(Val: BB->getTerminator());
558
559 // If there is a cleanup stack, then we it isn't worth trying to
560 // simplify this block (we would need to remove it from the scope map
561 // and cleanup entry).
562 if (!EHStack.empty())
563 return;
564
565 // Can only simplify direct branches.
566 if (!BI || !BI->isUnconditional())
567 return;
568
569 // Can only simplify empty blocks.
570 if (BI->getIterator() != BB->begin())
571 return;
572
573 BB->replaceAllUsesWith(V: BI->getSuccessor(i: 0));
574 BI->eraseFromParent();
575 BB->eraseFromParent();
576}
577
578void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
579 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
580
581 // Fall out of the current block (if necessary).
582 EmitBranch(Block: BB);
583
584 if (IsFinished && BB->use_empty()) {
585 delete BB;
586 return;
587 }
588
589 // Place the block after the current block, if possible, or else at
590 // the end of the function.
591 if (CurBB && CurBB->getParent())
592 CurFn->insert(Position: std::next(x: CurBB->getIterator()), BB);
593 else
594 CurFn->insert(Position: CurFn->end(), BB);
595 Builder.SetInsertPoint(BB);
596}
597
598void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
599 // Emit a branch from the current block to the target one if this
600 // was a real block. If this was just a fall-through block after a
601 // terminator, don't emit it.
602 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
603
604 if (!CurBB || CurBB->getTerminator()) {
605 // If there is no insert point or the previous block is already
606 // terminated, don't touch it.
607 } else {
608 // Otherwise, create a fall-through branch.
609 Builder.CreateBr(Dest: Target);
610 }
611
612 Builder.ClearInsertionPoint();
613}
614
615void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
616 bool inserted = false;
617 for (llvm::User *u : block->users()) {
618 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(Val: u)) {
619 CurFn->insert(Position: std::next(x: insn->getParent()->getIterator()), BB: block);
620 inserted = true;
621 break;
622 }
623 }
624
625 if (!inserted)
626 CurFn->insert(Position: CurFn->end(), BB: block);
627
628 Builder.SetInsertPoint(block);
629}
630
631CodeGenFunction::JumpDest
632CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
633 JumpDest &Dest = LabelMap[D];
634 if (Dest.isValid()) return Dest;
635
636 // Create, but don't insert, the new block.
637 Dest = JumpDest(createBasicBlock(name: D->getName()),
638 EHScopeStack::stable_iterator::invalid(),
639 NextCleanupDestIndex++);
640 return Dest;
641}
642
643void CodeGenFunction::EmitLabel(const LabelDecl *D) {
644 // Add this label to the current lexical scope if we're within any
645 // normal cleanups. Jumps "in" to this label --- when permitted by
646 // the language --- may need to be routed around such cleanups.
647 if (EHStack.hasNormalCleanups() && CurLexicalScope)
648 CurLexicalScope->addLabel(label: D);
649
650 JumpDest &Dest = LabelMap[D];
651
652 // If we didn't need a forward reference to this label, just go
653 // ahead and create a destination at the current scope.
654 if (!Dest.isValid()) {
655 Dest = getJumpDestInCurrentScope(D->getName());
656
657 // Otherwise, we need to give this label a target depth and remove
658 // it from the branch-fixups list.
659 } else {
660 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
661 Dest.setScopeDepth(EHStack.stable_begin());
662 ResolveBranchFixups(Target: Dest.getBlock());
663 }
664
665 EmitBlock(BB: Dest.getBlock());
666
667 // Emit debug info for labels.
668 if (CGDebugInfo *DI = getDebugInfo()) {
669 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
670 DI->setLocation(D->getLocation());
671 DI->EmitLabel(D, Builder);
672 }
673 }
674
675 incrementProfileCounter(S: D->getStmt());
676}
677
678/// Change the cleanup scope of the labels in this lexical scope to
679/// match the scope of the enclosing context.
680void CodeGenFunction::LexicalScope::rescopeLabels() {
681 assert(!Labels.empty());
682 EHScopeStack::stable_iterator innermostScope
683 = CGF.EHStack.getInnermostNormalCleanup();
684
685 // Change the scope depth of all the labels.
686 for (SmallVectorImpl<const LabelDecl*>::const_iterator
687 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
688 assert(CGF.LabelMap.count(*i));
689 JumpDest &dest = CGF.LabelMap.find(Val: *i)->second;
690 assert(dest.getScopeDepth().isValid());
691 assert(innermostScope.encloses(dest.getScopeDepth()));
692 dest.setScopeDepth(innermostScope);
693 }
694
695 // Reparent the labels if the new scope also has cleanups.
696 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
697 ParentScope->Labels.append(in_start: Labels.begin(), in_end: Labels.end());
698 }
699}
700
701
702void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
703 EmitLabel(D: S.getDecl());
704
705 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
706 if (getLangOpts().EHAsynch && S.isSideEntry())
707 EmitSehCppScopeBegin();
708
709 EmitStmt(S: S.getSubStmt());
710}
711
712void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
713 bool nomerge = false;
714 bool noinline = false;
715 bool alwaysinline = false;
716 const CallExpr *musttail = nullptr;
717
718 for (const auto *A : S.getAttrs()) {
719 switch (A->getKind()) {
720 default:
721 break;
722 case attr::NoMerge:
723 nomerge = true;
724 break;
725 case attr::NoInline:
726 noinline = true;
727 break;
728 case attr::AlwaysInline:
729 alwaysinline = true;
730 break;
731 case attr::MustTail: {
732 const Stmt *Sub = S.getSubStmt();
733 const ReturnStmt *R = cast<ReturnStmt>(Val: Sub);
734 musttail = cast<CallExpr>(Val: R->getRetValue()->IgnoreParens());
735 } break;
736 case attr::CXXAssume: {
737 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
738 if (getLangOpts().CXXAssumptions &&
739 !Assumption->HasSideEffects(Ctx: getContext())) {
740 llvm::Value *AssumptionVal = EvaluateExprAsBool(E: Assumption);
741 Builder.CreateAssumption(Cond: AssumptionVal);
742 }
743 } break;
744 }
745 }
746 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
747 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
748 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
749 SaveAndRestore save_musttail(MustTailCall, musttail);
750 EmitStmt(S: S.getSubStmt(), Attrs: S.getAttrs());
751}
752
753void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
754 // If this code is reachable then emit a stop point (if generating
755 // debug info). We have to do this ourselves because we are on the
756 // "simple" statement path.
757 if (HaveInsertPoint())
758 EmitStopPoint(S: &S);
759
760 EmitBranchThroughCleanup(Dest: getJumpDestForLabel(D: S.getLabel()));
761}
762
763
764void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
765 if (const LabelDecl *Target = S.getConstantTarget()) {
766 EmitBranchThroughCleanup(Dest: getJumpDestForLabel(D: Target));
767 return;
768 }
769
770 // Ensure that we have an i8* for our PHI node.
771 llvm::Value *V = Builder.CreateBitCast(V: EmitScalarExpr(E: S.getTarget()),
772 DestTy: Int8PtrTy, Name: "addr");
773 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
774
775 // Get the basic block for the indirect goto.
776 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
777
778 // The first instruction in the block has to be the PHI for the switch dest,
779 // add an entry for this branch.
780 cast<llvm::PHINode>(Val: IndGotoBB->begin())->addIncoming(V, BB: CurBB);
781
782 EmitBranch(Target: IndGotoBB);
783}
784
785void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
786 // The else branch of a consteval if statement is always the only branch that
787 // can be runtime evaluated.
788 if (S.isConsteval()) {
789 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
790 if (Executed) {
791 RunCleanupsScope ExecutedScope(*this);
792 EmitStmt(S: Executed);
793 }
794 return;
795 }
796
797 // C99 6.8.4.1: The first substatement is executed if the expression compares
798 // unequal to 0. The condition must be a scalar type.
799 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
800
801 if (S.getInit())
802 EmitStmt(S: S.getInit());
803
804 if (S.getConditionVariable())
805 EmitDecl(*S.getConditionVariable());
806
807 // If the condition constant folds and can be elided, try to avoid emitting
808 // the condition and the dead arm of the if/else.
809 bool CondConstant;
810 if (ConstantFoldsToSimpleInteger(Cond: S.getCond(), Result&: CondConstant,
811 AllowLabels: S.isConstexpr())) {
812 // Figure out which block (then or else) is executed.
813 const Stmt *Executed = S.getThen();
814 const Stmt *Skipped = S.getElse();
815 if (!CondConstant) // Condition false?
816 std::swap(a&: Executed, b&: Skipped);
817
818 // If the skipped block has no labels in it, just emit the executed block.
819 // This avoids emitting dead code and simplifies the CFG substantially.
820 if (S.isConstexpr() || !ContainsLabel(S: Skipped)) {
821 if (CondConstant)
822 incrementProfileCounter(&S);
823 if (Executed) {
824 RunCleanupsScope ExecutedScope(*this);
825 EmitStmt(S: Executed);
826 }
827 return;
828 }
829 }
830
831 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
832 // the conditional branch.
833 llvm::BasicBlock *ThenBlock = createBasicBlock(name: "if.then");
834 llvm::BasicBlock *ContBlock = createBasicBlock(name: "if.end");
835 llvm::BasicBlock *ElseBlock = ContBlock;
836 if (S.getElse())
837 ElseBlock = createBasicBlock(name: "if.else");
838
839 // Prefer the PGO based weights over the likelihood attribute.
840 // When the build isn't optimized the metadata isn't used, so don't generate
841 // it.
842 // Also, differentiate between disabled PGO and a never executed branch with
843 // PGO. Assuming PGO is in use:
844 // - we want to ignore the [[likely]] attribute if the branch is never
845 // executed,
846 // - assuming the profile is poor, preserving the attribute may still be
847 // beneficial.
848 // As an approximation, preserve the attribute only if both the branch and the
849 // parent context were not executed.
850 Stmt::Likelihood LH = Stmt::LH_None;
851 uint64_t ThenCount = getProfileCount(S: S.getThen());
852 if (!ThenCount && !getCurrentProfileCount() &&
853 CGM.getCodeGenOpts().OptimizationLevel)
854 LH = Stmt::getLikelihood(Then: S.getThen(), Else: S.getElse());
855
856 // When measuring MC/DC, always fully evaluate the condition up front using
857 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
858 // executing the body of the if.then or if.else. This is useful for when
859 // there is a 'return' within the body, but this is particularly beneficial
860 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
861 // updates are kept linear and consistent.
862 if (!CGM.getCodeGenOpts().MCDCCoverage)
863 EmitBranchOnBoolExpr(Cond: S.getCond(), TrueBlock: ThenBlock, FalseBlock: ElseBlock, TrueCount: ThenCount, LH);
864 else {
865 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
866 Builder.CreateCondBr(Cond: BoolCondVal, True: ThenBlock, False: ElseBlock);
867 }
868
869 // Emit the 'then' code.
870 EmitBlock(BB: ThenBlock);
871 if (llvm::EnableSingleByteCoverage)
872 incrementProfileCounter(S: S.getThen());
873 else
874 incrementProfileCounter(&S);
875 {
876 RunCleanupsScope ThenScope(*this);
877 EmitStmt(S: S.getThen());
878 }
879 EmitBranch(Target: ContBlock);
880
881 // Emit the 'else' code if present.
882 if (const Stmt *Else = S.getElse()) {
883 {
884 // There is no need to emit line number for an unconditional branch.
885 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
886 EmitBlock(BB: ElseBlock);
887 }
888 // When single byte coverage mode is enabled, add a counter to else block.
889 if (llvm::EnableSingleByteCoverage)
890 incrementProfileCounter(S: Else);
891 {
892 RunCleanupsScope ElseScope(*this);
893 EmitStmt(S: Else);
894 }
895 {
896 // There is no need to emit line number for an unconditional branch.
897 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
898 EmitBranch(Target: ContBlock);
899 }
900 }
901
902 // Emit the continuation block for code after the if.
903 EmitBlock(BB: ContBlock, IsFinished: true);
904
905 // When single byte coverage mode is enabled, add a counter to continuation
906 // block.
907 if (llvm::EnableSingleByteCoverage)
908 incrementProfileCounter(&S);
909}
910
911void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
912 ArrayRef<const Attr *> WhileAttrs) {
913 // Emit the header for the loop, which will also become
914 // the continue target.
915 JumpDest LoopHeader = getJumpDestInCurrentScope(Name: "while.cond");
916 EmitBlock(BB: LoopHeader.getBlock());
917
918 // Create an exit block for when the condition fails, which will
919 // also become the break target.
920 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "while.end");
921
922 // Store the blocks to use for break and continue.
923 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, LoopHeader));
924
925 // C++ [stmt.while]p2:
926 // When the condition of a while statement is a declaration, the
927 // scope of the variable that is declared extends from its point
928 // of declaration (3.3.2) to the end of the while statement.
929 // [...]
930 // The object created in a condition is destroyed and created
931 // with each iteration of the loop.
932 RunCleanupsScope ConditionScope(*this);
933
934 if (S.getConditionVariable())
935 EmitDecl(*S.getConditionVariable());
936
937 // Evaluate the conditional in the while header. C99 6.8.5.1: The
938 // evaluation of the controlling expression takes place before each
939 // execution of the loop body.
940 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
941
942 // while(1) is common, avoid extra exit blocks. Be sure
943 // to correctly handle break/continue though.
944 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(Val: BoolCondVal);
945 bool CondIsConstInt = C != nullptr;
946 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
947 const SourceRange &R = S.getSourceRange();
948 LoopStack.push(Header: LoopHeader.getBlock(), Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(),
949 Attrs: WhileAttrs, StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
950 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()),
951 MustProgress: checkIfLoopMustProgress(HasConstantCond: CondIsConstInt));
952
953 // When single byte coverage mode is enabled, add a counter to loop condition.
954 if (llvm::EnableSingleByteCoverage)
955 incrementProfileCounter(S.getCond());
956
957 // As long as the condition is true, go to the loop body.
958 llvm::BasicBlock *LoopBody = createBasicBlock(name: "while.body");
959 if (EmitBoolCondBranch) {
960 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
961 if (ConditionScope.requiresCleanups())
962 ExitBlock = createBasicBlock(name: "while.exit");
963 llvm::MDNode *Weights =
964 createProfileWeightsForLoop(S.getCond(), getProfileCount(S: S.getBody()));
965 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
966 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
967 Cond: BoolCondVal, LH: Stmt::getLikelihood(S: S.getBody()));
968 Builder.CreateCondBr(Cond: BoolCondVal, True: LoopBody, False: ExitBlock, BranchWeights: Weights);
969
970 if (ExitBlock != LoopExit.getBlock()) {
971 EmitBlock(BB: ExitBlock);
972 EmitBranchThroughCleanup(Dest: LoopExit);
973 }
974 } else if (const Attr *A = Stmt::getLikelihoodAttr(S: S.getBody())) {
975 CGM.getDiags().Report(A->getLocation(),
976 diag::warn_attribute_has_no_effect_on_infinite_loop)
977 << A << A->getRange();
978 CGM.getDiags().Report(
979 S.getWhileLoc(),
980 diag::note_attribute_has_no_effect_on_infinite_loop_here)
981 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
982 }
983
984 // Emit the loop body. We have to emit this in a cleanup scope
985 // because it might be a singleton DeclStmt.
986 {
987 RunCleanupsScope BodyScope(*this);
988 EmitBlock(BB: LoopBody);
989 // When single byte coverage mode is enabled, add a counter to the body.
990 if (llvm::EnableSingleByteCoverage)
991 incrementProfileCounter(S: S.getBody());
992 else
993 incrementProfileCounter(&S);
994 EmitStmt(S: S.getBody());
995 }
996
997 BreakContinueStack.pop_back();
998
999 // Immediately force cleanup.
1000 ConditionScope.ForceCleanup();
1001
1002 EmitStopPoint(&S);
1003 // Branch to the loop header again.
1004 EmitBranch(Target: LoopHeader.getBlock());
1005
1006 LoopStack.pop();
1007
1008 // Emit the exit block.
1009 EmitBlock(BB: LoopExit.getBlock(), IsFinished: true);
1010
1011 // The LoopHeader typically is just a branch if we skipped emitting
1012 // a branch, try to erase it.
1013 if (!EmitBoolCondBranch)
1014 SimplifyForwardingBlocks(BB: LoopHeader.getBlock());
1015
1016 // When single byte coverage mode is enabled, add a counter to continuation
1017 // block.
1018 if (llvm::EnableSingleByteCoverage)
1019 incrementProfileCounter(&S);
1020}
1021
1022void CodeGenFunction::EmitDoStmt(const DoStmt &S,
1023 ArrayRef<const Attr *> DoAttrs) {
1024 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "do.end");
1025 JumpDest LoopCond = getJumpDestInCurrentScope(Name: "do.cond");
1026
1027 uint64_t ParentCount = getCurrentProfileCount();
1028
1029 // Store the blocks to use for break and continue.
1030 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, LoopCond));
1031
1032 // Emit the body of the loop.
1033 llvm::BasicBlock *LoopBody = createBasicBlock(name: "do.body");
1034
1035 if (llvm::EnableSingleByteCoverage)
1036 EmitBlockWithFallThrough(BB: LoopBody, S: S.getBody());
1037 else
1038 EmitBlockWithFallThrough(BB: LoopBody, S: &S);
1039 {
1040 RunCleanupsScope BodyScope(*this);
1041 EmitStmt(S: S.getBody());
1042 }
1043
1044 EmitBlock(BB: LoopCond.getBlock());
1045 // When single byte coverage mode is enabled, add a counter to loop condition.
1046 if (llvm::EnableSingleByteCoverage)
1047 incrementProfileCounter(S.getCond());
1048
1049 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1050 // after each execution of the loop body."
1051
1052 // Evaluate the conditional in the while header.
1053 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1054 // compares unequal to 0. The condition must be a scalar type.
1055 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
1056
1057 BreakContinueStack.pop_back();
1058
1059 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1060 // to correctly handle break/continue though.
1061 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(Val: BoolCondVal);
1062 bool CondIsConstInt = C;
1063 bool EmitBoolCondBranch = !C || !C->isZero();
1064
1065 const SourceRange &R = S.getSourceRange();
1066 LoopStack.push(Header: LoopBody, Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(), Attrs: DoAttrs,
1067 StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
1068 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()),
1069 MustProgress: checkIfLoopMustProgress(HasConstantCond: CondIsConstInt));
1070
1071 // As long as the condition is true, iterate the loop.
1072 if (EmitBoolCondBranch) {
1073 uint64_t BackedgeCount = getProfileCount(S: S.getBody()) - ParentCount;
1074 Builder.CreateCondBr(
1075 Cond: BoolCondVal, True: LoopBody, False: LoopExit.getBlock(),
1076 BranchWeights: createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1077 }
1078
1079 LoopStack.pop();
1080
1081 // Emit the exit block.
1082 EmitBlock(BB: LoopExit.getBlock());
1083
1084 // The DoCond block typically is just a branch if we skipped
1085 // emitting a branch, try to erase it.
1086 if (!EmitBoolCondBranch)
1087 SimplifyForwardingBlocks(BB: LoopCond.getBlock());
1088
1089 // When single byte coverage mode is enabled, add a counter to continuation
1090 // block.
1091 if (llvm::EnableSingleByteCoverage)
1092 incrementProfileCounter(S: &S);
1093}
1094
1095void CodeGenFunction::EmitForStmt(const ForStmt &S,
1096 ArrayRef<const Attr *> ForAttrs) {
1097 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "for.end");
1098
1099 LexicalScope ForScope(*this, S.getSourceRange());
1100
1101 // Evaluate the first part before the loop.
1102 if (S.getInit())
1103 EmitStmt(S: S.getInit());
1104
1105 // Start the loop with a block that tests the condition.
1106 // If there's an increment, the continue scope will be overwritten
1107 // later.
1108 JumpDest CondDest = getJumpDestInCurrentScope(Name: "for.cond");
1109 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1110 EmitBlock(BB: CondBlock);
1111
1112 Expr::EvalResult Result;
1113 bool CondIsConstInt =
1114 !S.getCond() || S.getCond()->EvaluateAsInt(Result, Ctx: getContext());
1115
1116 const SourceRange &R = S.getSourceRange();
1117 LoopStack.push(Header: CondBlock, Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(), Attrs: ForAttrs,
1118 StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
1119 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()),
1120 MustProgress: checkIfLoopMustProgress(HasConstantCond: CondIsConstInt));
1121
1122 // Create a cleanup scope for the condition variable cleanups.
1123 LexicalScope ConditionScope(*this, S.getSourceRange());
1124
1125 // If the for loop doesn't have an increment we can just use the condition as
1126 // the continue block. Otherwise, if there is no condition variable, we can
1127 // form the continue block now. If there is a condition variable, we can't
1128 // form the continue block until after we've emitted the condition, because
1129 // the condition is in scope in the increment, but Sema's jump diagnostics
1130 // ensure that there are no continues from the condition variable that jump
1131 // to the loop increment.
1132 JumpDest Continue;
1133 if (!S.getInc())
1134 Continue = CondDest;
1135 else if (!S.getConditionVariable())
1136 Continue = getJumpDestInCurrentScope(Name: "for.inc");
1137 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, Continue));
1138
1139 if (S.getCond()) {
1140 // If the for statement has a condition scope, emit the local variable
1141 // declaration.
1142 if (S.getConditionVariable()) {
1143 EmitDecl(*S.getConditionVariable());
1144
1145 // We have entered the condition variable's scope, so we're now able to
1146 // jump to the continue block.
1147 Continue = S.getInc() ? getJumpDestInCurrentScope(Name: "for.inc") : CondDest;
1148 BreakContinueStack.back().ContinueBlock = Continue;
1149 }
1150
1151 // When single byte coverage mode is enabled, add a counter to loop
1152 // condition.
1153 if (llvm::EnableSingleByteCoverage)
1154 incrementProfileCounter(S.getCond());
1155
1156 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1157 // If there are any cleanups between here and the loop-exit scope,
1158 // create a block to stage a loop exit along.
1159 if (ForScope.requiresCleanups())
1160 ExitBlock = createBasicBlock(name: "for.cond.cleanup");
1161
1162 // As long as the condition is true, iterate the loop.
1163 llvm::BasicBlock *ForBody = createBasicBlock(name: "for.body");
1164
1165 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1166 // compares unequal to 0. The condition must be a scalar type.
1167 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
1168 llvm::MDNode *Weights =
1169 createProfileWeightsForLoop(S.getCond(), getProfileCount(S: S.getBody()));
1170 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1171 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1172 Cond: BoolCondVal, LH: Stmt::getLikelihood(S: S.getBody()));
1173
1174 Builder.CreateCondBr(Cond: BoolCondVal, True: ForBody, False: ExitBlock, BranchWeights: Weights);
1175
1176 if (ExitBlock != LoopExit.getBlock()) {
1177 EmitBlock(BB: ExitBlock);
1178 EmitBranchThroughCleanup(Dest: LoopExit);
1179 }
1180
1181 EmitBlock(BB: ForBody);
1182 } else {
1183 // Treat it as a non-zero constant. Don't even create a new block for the
1184 // body, just fall into it.
1185 }
1186
1187 // When single byte coverage mode is enabled, add a counter to the body.
1188 if (llvm::EnableSingleByteCoverage)
1189 incrementProfileCounter(S: S.getBody());
1190 else
1191 incrementProfileCounter(S: &S);
1192 {
1193 // Create a separate cleanup scope for the body, in case it is not
1194 // a compound statement.
1195 RunCleanupsScope BodyScope(*this);
1196 EmitStmt(S: S.getBody());
1197 }
1198
1199 // If there is an increment, emit it next.
1200 if (S.getInc()) {
1201 EmitBlock(BB: Continue.getBlock());
1202 EmitStmt(S.getInc());
1203 if (llvm::EnableSingleByteCoverage)
1204 incrementProfileCounter(S.getInc());
1205 }
1206
1207 BreakContinueStack.pop_back();
1208
1209 ConditionScope.ForceCleanup();
1210
1211 EmitStopPoint(S: &S);
1212 EmitBranch(Target: CondBlock);
1213
1214 ForScope.ForceCleanup();
1215
1216 LoopStack.pop();
1217
1218 // Emit the fall-through block.
1219 EmitBlock(BB: LoopExit.getBlock(), IsFinished: true);
1220
1221 // When single byte coverage mode is enabled, add a counter to continuation
1222 // block.
1223 if (llvm::EnableSingleByteCoverage)
1224 incrementProfileCounter(S: &S);
1225}
1226
1227void
1228CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1229 ArrayRef<const Attr *> ForAttrs) {
1230 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "for.end");
1231
1232 LexicalScope ForScope(*this, S.getSourceRange());
1233
1234 // Evaluate the first pieces before the loop.
1235 if (S.getInit())
1236 EmitStmt(S: S.getInit());
1237 EmitStmt(S: S.getRangeStmt());
1238 EmitStmt(S: S.getBeginStmt());
1239 EmitStmt(S: S.getEndStmt());
1240
1241 // Start the loop with a block that tests the condition.
1242 // If there's an increment, the continue scope will be overwritten
1243 // later.
1244 llvm::BasicBlock *CondBlock = createBasicBlock(name: "for.cond");
1245 EmitBlock(BB: CondBlock);
1246
1247 const SourceRange &R = S.getSourceRange();
1248 LoopStack.push(Header: CondBlock, Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(), Attrs: ForAttrs,
1249 StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
1250 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()));
1251
1252 // If there are any cleanups between here and the loop-exit scope,
1253 // create a block to stage a loop exit along.
1254 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1255 if (ForScope.requiresCleanups())
1256 ExitBlock = createBasicBlock(name: "for.cond.cleanup");
1257
1258 // The loop body, consisting of the specified body and the loop variable.
1259 llvm::BasicBlock *ForBody = createBasicBlock(name: "for.body");
1260
1261 // The body is executed if the expression, contextually converted
1262 // to bool, is true.
1263 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
1264 llvm::MDNode *Weights =
1265 createProfileWeightsForLoop(S.getCond(), getProfileCount(S: S.getBody()));
1266 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1267 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1268 Cond: BoolCondVal, LH: Stmt::getLikelihood(S: S.getBody()));
1269 Builder.CreateCondBr(Cond: BoolCondVal, True: ForBody, False: ExitBlock, BranchWeights: Weights);
1270
1271 if (ExitBlock != LoopExit.getBlock()) {
1272 EmitBlock(BB: ExitBlock);
1273 EmitBranchThroughCleanup(Dest: LoopExit);
1274 }
1275
1276 EmitBlock(BB: ForBody);
1277 if (llvm::EnableSingleByteCoverage)
1278 incrementProfileCounter(S: S.getBody());
1279 else
1280 incrementProfileCounter(S: &S);
1281
1282 // Create a block for the increment. In case of a 'continue', we jump there.
1283 JumpDest Continue = getJumpDestInCurrentScope(Name: "for.inc");
1284
1285 // Store the blocks to use for break and continue.
1286 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, Continue));
1287
1288 {
1289 // Create a separate cleanup scope for the loop variable and body.
1290 LexicalScope BodyScope(*this, S.getSourceRange());
1291 EmitStmt(S: S.getLoopVarStmt());
1292 EmitStmt(S: S.getBody());
1293 }
1294
1295 EmitStopPoint(S: &S);
1296 // If there is an increment, emit it next.
1297 EmitBlock(BB: Continue.getBlock());
1298 EmitStmt(S.getInc());
1299
1300 BreakContinueStack.pop_back();
1301
1302 EmitBranch(Target: CondBlock);
1303
1304 ForScope.ForceCleanup();
1305
1306 LoopStack.pop();
1307
1308 // Emit the fall-through block.
1309 EmitBlock(BB: LoopExit.getBlock(), IsFinished: true);
1310
1311 // When single byte coverage mode is enabled, add a counter to continuation
1312 // block.
1313 if (llvm::EnableSingleByteCoverage)
1314 incrementProfileCounter(S: &S);
1315}
1316
1317void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1318 if (RV.isScalar()) {
1319 Builder.CreateStore(Val: RV.getScalarVal(), Addr: ReturnValue);
1320 } else if (RV.isAggregate()) {
1321 LValue Dest = MakeAddrLValue(Addr: ReturnValue, T: Ty);
1322 LValue Src = MakeAddrLValue(Addr: RV.getAggregateAddress(), T: Ty);
1323 EmitAggregateCopy(Dest, Src, EltTy: Ty, MayOverlap: getOverlapForReturnValue());
1324 } else {
1325 EmitStoreOfComplex(V: RV.getComplexVal(), dest: MakeAddrLValue(Addr: ReturnValue, T: Ty),
1326 /*init*/ isInit: true);
1327 }
1328 EmitBranchThroughCleanup(Dest: ReturnBlock);
1329}
1330
1331namespace {
1332// RAII struct used to save and restore a return statment's result expression.
1333struct SaveRetExprRAII {
1334 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1335 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1336 CGF.RetExpr = RetExpr;
1337 }
1338 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1339 const Expr *OldRetExpr;
1340 CodeGenFunction &CGF;
1341};
1342} // namespace
1343
1344/// Determine if the given call uses the swiftasync calling convention.
1345static bool isSwiftAsyncCallee(const CallExpr *CE) {
1346 auto calleeQualType = CE->getCallee()->getType();
1347 const FunctionType *calleeType = nullptr;
1348 if (calleeQualType->isFunctionPointerType() ||
1349 calleeQualType->isFunctionReferenceType() ||
1350 calleeQualType->isBlockPointerType() ||
1351 calleeQualType->isMemberFunctionPointerType()) {
1352 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1353 } else if (auto *ty = dyn_cast<FunctionType>(Val&: calleeQualType)) {
1354 calleeType = ty;
1355 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(Val: CE)) {
1356 if (auto methodDecl = CMCE->getMethodDecl()) {
1357 // getMethodDecl() doesn't handle member pointers at the moment.
1358 calleeType = methodDecl->getType()->castAs<FunctionType>();
1359 } else {
1360 return false;
1361 }
1362 } else {
1363 return false;
1364 }
1365 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1366}
1367
1368/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1369/// if the function returns void, or may be missing one if the function returns
1370/// non-void. Fun stuff :).
1371void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1372 if (requiresReturnValueCheck()) {
1373 llvm::Constant *SLoc = EmitCheckSourceLocation(Loc: S.getBeginLoc());
1374 auto *SLocPtr =
1375 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1376 llvm::GlobalVariable::PrivateLinkage, SLoc);
1377 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1378 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: SLocPtr);
1379 assert(ReturnLocation.isValid() && "No valid return location");
1380 Builder.CreateStore(Val: SLocPtr, Addr: ReturnLocation);
1381 }
1382
1383 // Returning from an outlined SEH helper is UB, and we already warn on it.
1384 if (IsOutlinedSEHHelper) {
1385 Builder.CreateUnreachable();
1386 Builder.ClearInsertionPoint();
1387 }
1388
1389 // Emit the result value, even if unused, to evaluate the side effects.
1390 const Expr *RV = S.getRetValue();
1391
1392 // Record the result expression of the return statement. The recorded
1393 // expression is used to determine whether a block capture's lifetime should
1394 // end at the end of the full expression as opposed to the end of the scope
1395 // enclosing the block expression.
1396 //
1397 // This permits a small, easily-implemented exception to our over-conservative
1398 // rules about not jumping to statements following block literals with
1399 // non-trivial cleanups.
1400 SaveRetExprRAII SaveRetExpr(RV, *this);
1401
1402 RunCleanupsScope cleanupScope(*this);
1403 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(Val: RV))
1404 RV = EWC->getSubExpr();
1405
1406 // If we're in a swiftasynccall function, and the return expression is a
1407 // call to a swiftasynccall function, mark the call as the musttail call.
1408 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1409 if (RV && CurFnInfo &&
1410 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1411 if (auto CE = dyn_cast<CallExpr>(Val: RV)) {
1412 if (isSwiftAsyncCallee(CE)) {
1413 SaveMustTail.emplace(args&: MustTailCall, args&: CE);
1414 }
1415 }
1416 }
1417
1418 // FIXME: Clean this up by using an LValue for ReturnTemp,
1419 // EmitStoreThroughLValue, and EmitAnyExpr.
1420 // Check if the NRVO candidate was not globalized in OpenMP mode.
1421 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1422 S.getNRVOCandidate()->isNRVOVariable() &&
1423 (!getLangOpts().OpenMP ||
1424 !CGM.getOpenMPRuntime()
1425 .getAddressOfLocalVariable(CGF&: *this, VD: S.getNRVOCandidate())
1426 .isValid())) {
1427 // Apply the named return value optimization for this return statement,
1428 // which means doing nothing: the appropriate result has already been
1429 // constructed into the NRVO variable.
1430
1431 // If there is an NRVO flag for this variable, set it to 1 into indicate
1432 // that the cleanup code should not destroy the variable.
1433 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1434 Builder.CreateFlagStore(Value: Builder.getTrue(), Addr: NRVOFlag);
1435 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1436 // Make sure not to return anything, but evaluate the expression
1437 // for side effects.
1438 if (RV) {
1439 EmitAnyExpr(E: RV);
1440 }
1441 } else if (!RV) {
1442 // Do nothing (return value is left uninitialized)
1443 } else if (FnRetTy->isReferenceType()) {
1444 // If this function returns a reference, take the address of the expression
1445 // rather than the value.
1446 RValue Result = EmitReferenceBindingToExpr(E: RV);
1447 Builder.CreateStore(Val: Result.getScalarVal(), Addr: ReturnValue);
1448 } else {
1449 switch (getEvaluationKind(T: RV->getType())) {
1450 case TEK_Scalar:
1451 Builder.CreateStore(Val: EmitScalarExpr(E: RV), Addr: ReturnValue);
1452 break;
1453 case TEK_Complex:
1454 EmitComplexExprIntoLValue(E: RV, dest: MakeAddrLValue(Addr: ReturnValue, T: RV->getType()),
1455 /*isInit*/ true);
1456 break;
1457 case TEK_Aggregate:
1458 EmitAggExpr(E: RV, AS: AggValueSlot::forAddr(
1459 addr: ReturnValue, quals: Qualifiers(),
1460 isDestructed: AggValueSlot::IsDestructed,
1461 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
1462 isAliased: AggValueSlot::IsNotAliased,
1463 mayOverlap: getOverlapForReturnValue()));
1464 break;
1465 }
1466 }
1467
1468 ++NumReturnExprs;
1469 if (!RV || RV->isEvaluatable(Ctx: getContext()))
1470 ++NumSimpleReturnExprs;
1471
1472 cleanupScope.ForceCleanup();
1473 EmitBranchThroughCleanup(Dest: ReturnBlock);
1474}
1475
1476void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1477 // As long as debug info is modeled with instructions, we have to ensure we
1478 // have a place to insert here and write the stop point here.
1479 if (HaveInsertPoint())
1480 EmitStopPoint(S: &S);
1481
1482 for (const auto *I : S.decls())
1483 EmitDecl(D: *I);
1484}
1485
1486void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1487 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1488
1489 // If this code is reachable then emit a stop point (if generating
1490 // debug info). We have to do this ourselves because we are on the
1491 // "simple" statement path.
1492 if (HaveInsertPoint())
1493 EmitStopPoint(S: &S);
1494
1495 EmitBranchThroughCleanup(Dest: BreakContinueStack.back().BreakBlock);
1496}
1497
1498void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1499 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1500
1501 // If this code is reachable then emit a stop point (if generating
1502 // debug info). We have to do this ourselves because we are on the
1503 // "simple" statement path.
1504 if (HaveInsertPoint())
1505 EmitStopPoint(S: &S);
1506
1507 EmitBranchThroughCleanup(Dest: BreakContinueStack.back().ContinueBlock);
1508}
1509
1510/// EmitCaseStmtRange - If case statement range is not too big then
1511/// add multiple cases to switch instruction, one for each value within
1512/// the range. If range is too big then emit "if" condition check.
1513void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1514 ArrayRef<const Attr *> Attrs) {
1515 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1516
1517 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(Ctx: getContext());
1518 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(Ctx: getContext());
1519
1520 // Emit the code for this case. We do this first to make sure it is
1521 // properly chained from our predecessor before generating the
1522 // switch machinery to enter this block.
1523 llvm::BasicBlock *CaseDest = createBasicBlock(name: "sw.bb");
1524 EmitBlockWithFallThrough(CaseDest, &S);
1525 EmitStmt(S: S.getSubStmt());
1526
1527 // If range is empty, do nothing.
1528 if (LHS.isSigned() ? RHS.slt(RHS: LHS) : RHS.ult(RHS: LHS))
1529 return;
1530
1531 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1532 llvm::APInt Range = RHS - LHS;
1533 // FIXME: parameters such as this should not be hardcoded.
1534 if (Range.ult(RHS: llvm::APInt(Range.getBitWidth(), 64))) {
1535 // Range is small enough to add multiple switch instruction cases.
1536 uint64_t Total = getProfileCount(&S);
1537 unsigned NCases = Range.getZExtValue() + 1;
1538 // We only have one region counter for the entire set of cases here, so we
1539 // need to divide the weights evenly between the generated cases, ensuring
1540 // that the total weight is preserved. E.g., a weight of 5 over three cases
1541 // will be distributed as weights of 2, 2, and 1.
1542 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1543 for (unsigned I = 0; I != NCases; ++I) {
1544 if (SwitchWeights)
1545 SwitchWeights->push_back(Elt: Weight + (Rem ? 1 : 0));
1546 else if (SwitchLikelihood)
1547 SwitchLikelihood->push_back(Elt: LH);
1548
1549 if (Rem)
1550 Rem--;
1551 SwitchInsn->addCase(OnVal: Builder.getInt(AI: LHS), Dest: CaseDest);
1552 ++LHS;
1553 }
1554 return;
1555 }
1556
1557 // The range is too big. Emit "if" condition into a new block,
1558 // making sure to save and restore the current insertion point.
1559 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1560
1561 // Push this test onto the chain of range checks (which terminates
1562 // in the default basic block). The switch's default will be changed
1563 // to the top of this chain after switch emission is complete.
1564 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1565 CaseRangeBlock = createBasicBlock(name: "sw.caserange");
1566
1567 CurFn->insert(Position: CurFn->end(), BB: CaseRangeBlock);
1568 Builder.SetInsertPoint(CaseRangeBlock);
1569
1570 // Emit range check.
1571 llvm::Value *Diff =
1572 Builder.CreateSub(LHS: SwitchInsn->getCondition(), RHS: Builder.getInt(AI: LHS));
1573 llvm::Value *Cond =
1574 Builder.CreateICmpULE(LHS: Diff, RHS: Builder.getInt(AI: Range), Name: "inbounds");
1575
1576 llvm::MDNode *Weights = nullptr;
1577 if (SwitchWeights) {
1578 uint64_t ThisCount = getProfileCount(&S);
1579 uint64_t DefaultCount = (*SwitchWeights)[0];
1580 Weights = createProfileWeights(TrueCount: ThisCount, FalseCount: DefaultCount);
1581
1582 // Since we're chaining the switch default through each large case range, we
1583 // need to update the weight for the default, ie, the first case, to include
1584 // this case.
1585 (*SwitchWeights)[0] += ThisCount;
1586 } else if (SwitchLikelihood)
1587 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1588
1589 Builder.CreateCondBr(Cond, True: CaseDest, False: FalseDest, BranchWeights: Weights);
1590
1591 // Restore the appropriate insertion point.
1592 if (RestoreBB)
1593 Builder.SetInsertPoint(RestoreBB);
1594 else
1595 Builder.ClearInsertionPoint();
1596}
1597
1598void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1599 ArrayRef<const Attr *> Attrs) {
1600 // If there is no enclosing switch instance that we're aware of, then this
1601 // case statement and its block can be elided. This situation only happens
1602 // when we've constant-folded the switch, are emitting the constant case,
1603 // and part of the constant case includes another case statement. For
1604 // instance: switch (4) { case 4: do { case 5: } while (1); }
1605 if (!SwitchInsn) {
1606 EmitStmt(S: S.getSubStmt());
1607 return;
1608 }
1609
1610 // Handle case ranges.
1611 if (S.getRHS()) {
1612 EmitCaseStmtRange(S, Attrs);
1613 return;
1614 }
1615
1616 llvm::ConstantInt *CaseVal =
1617 Builder.getInt(AI: S.getLHS()->EvaluateKnownConstInt(Ctx: getContext()));
1618
1619 // Emit debuginfo for the case value if it is an enum value.
1620 const ConstantExpr *CE;
1621 if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: S.getLHS()))
1622 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1623 else
1624 CE = dyn_cast<ConstantExpr>(Val: S.getLHS());
1625 if (CE) {
1626 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1627 if (CGDebugInfo *Dbg = getDebugInfo())
1628 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1629 Dbg->EmitGlobalVariable(DE->getDecl(),
1630 APValue(llvm::APSInt(CaseVal->getValue())));
1631 }
1632
1633 if (SwitchLikelihood)
1634 SwitchLikelihood->push_back(Elt: Stmt::getLikelihood(Attrs));
1635
1636 // If the body of the case is just a 'break', try to not emit an empty block.
1637 // If we're profiling or we're not optimizing, leave the block in for better
1638 // debug and coverage analysis.
1639 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1640 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1641 isa<BreakStmt>(Val: S.getSubStmt())) {
1642 JumpDest Block = BreakContinueStack.back().BreakBlock;
1643
1644 // Only do this optimization if there are no cleanups that need emitting.
1645 if (isObviouslyBranchWithoutCleanups(Dest: Block)) {
1646 if (SwitchWeights)
1647 SwitchWeights->push_back(Elt: getProfileCount(&S));
1648 SwitchInsn->addCase(OnVal: CaseVal, Dest: Block.getBlock());
1649
1650 // If there was a fallthrough into this case, make sure to redirect it to
1651 // the end of the switch as well.
1652 if (Builder.GetInsertBlock()) {
1653 Builder.CreateBr(Dest: Block.getBlock());
1654 Builder.ClearInsertionPoint();
1655 }
1656 return;
1657 }
1658 }
1659
1660 llvm::BasicBlock *CaseDest = createBasicBlock(name: "sw.bb");
1661 EmitBlockWithFallThrough(CaseDest, &S);
1662 if (SwitchWeights)
1663 SwitchWeights->push_back(Elt: getProfileCount(&S));
1664 SwitchInsn->addCase(OnVal: CaseVal, Dest: CaseDest);
1665
1666 // Recursively emitting the statement is acceptable, but is not wonderful for
1667 // code where we have many case statements nested together, i.e.:
1668 // case 1:
1669 // case 2:
1670 // case 3: etc.
1671 // Handling this recursively will create a new block for each case statement
1672 // that falls through to the next case which is IR intensive. It also causes
1673 // deep recursion which can run into stack depth limitations. Handle
1674 // sequential non-range case statements specially.
1675 //
1676 // TODO When the next case has a likelihood attribute the code returns to the
1677 // recursive algorithm. Maybe improve this case if it becomes common practice
1678 // to use a lot of attributes.
1679 const CaseStmt *CurCase = &S;
1680 const CaseStmt *NextCase = dyn_cast<CaseStmt>(Val: S.getSubStmt());
1681
1682 // Otherwise, iteratively add consecutive cases to this switch stmt.
1683 while (NextCase && NextCase->getRHS() == nullptr) {
1684 CurCase = NextCase;
1685 llvm::ConstantInt *CaseVal =
1686 Builder.getInt(AI: CurCase->getLHS()->EvaluateKnownConstInt(Ctx: getContext()));
1687
1688 if (SwitchWeights)
1689 SwitchWeights->push_back(Elt: getProfileCount(NextCase));
1690 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1691 CaseDest = createBasicBlock(name: "sw.bb");
1692 EmitBlockWithFallThrough(CaseDest, CurCase);
1693 }
1694 // Since this loop is only executed when the CaseStmt has no attributes
1695 // use a hard-coded value.
1696 if (SwitchLikelihood)
1697 SwitchLikelihood->push_back(Elt: Stmt::LH_None);
1698
1699 SwitchInsn->addCase(OnVal: CaseVal, Dest: CaseDest);
1700 NextCase = dyn_cast<CaseStmt>(Val: CurCase->getSubStmt());
1701 }
1702
1703 // Generate a stop point for debug info if the case statement is
1704 // followed by a default statement. A fallthrough case before a
1705 // default case gets its own branch target.
1706 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1707 EmitStopPoint(CurCase);
1708
1709 // Normal default recursion for non-cases.
1710 EmitStmt(S: CurCase->getSubStmt());
1711}
1712
1713void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1714 ArrayRef<const Attr *> Attrs) {
1715 // If there is no enclosing switch instance that we're aware of, then this
1716 // default statement can be elided. This situation only happens when we've
1717 // constant-folded the switch.
1718 if (!SwitchInsn) {
1719 EmitStmt(S: S.getSubStmt());
1720 return;
1721 }
1722
1723 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1724 assert(DefaultBlock->empty() &&
1725 "EmitDefaultStmt: Default block already defined?");
1726
1727 if (SwitchLikelihood)
1728 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1729
1730 EmitBlockWithFallThrough(BB: DefaultBlock, S: &S);
1731
1732 EmitStmt(S: S.getSubStmt());
1733}
1734
1735/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1736/// constant value that is being switched on, see if we can dead code eliminate
1737/// the body of the switch to a simple series of statements to emit. Basically,
1738/// on a switch (5) we want to find these statements:
1739/// case 5:
1740/// printf(...); <--
1741/// ++i; <--
1742/// break;
1743///
1744/// and add them to the ResultStmts vector. If it is unsafe to do this
1745/// transformation (for example, one of the elided statements contains a label
1746/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1747/// should include statements after it (e.g. the printf() line is a substmt of
1748/// the case) then return CSFC_FallThrough. If we handled it and found a break
1749/// statement, then return CSFC_Success.
1750///
1751/// If Case is non-null, then we are looking for the specified case, checking
1752/// that nothing we jump over contains labels. If Case is null, then we found
1753/// the case and are looking for the break.
1754///
1755/// If the recursive walk actually finds our Case, then we set FoundCase to
1756/// true.
1757///
1758enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1759static CSFC_Result CollectStatementsForCase(const Stmt *S,
1760 const SwitchCase *Case,
1761 bool &FoundCase,
1762 SmallVectorImpl<const Stmt*> &ResultStmts) {
1763 // If this is a null statement, just succeed.
1764 if (!S)
1765 return Case ? CSFC_Success : CSFC_FallThrough;
1766
1767 // If this is the switchcase (case 4: or default) that we're looking for, then
1768 // we're in business. Just add the substatement.
1769 if (const SwitchCase *SC = dyn_cast<SwitchCase>(Val: S)) {
1770 if (S == Case) {
1771 FoundCase = true;
1772 return CollectStatementsForCase(S: SC->getSubStmt(), Case: nullptr, FoundCase,
1773 ResultStmts);
1774 }
1775
1776 // Otherwise, this is some other case or default statement, just ignore it.
1777 return CollectStatementsForCase(S: SC->getSubStmt(), Case, FoundCase,
1778 ResultStmts);
1779 }
1780
1781 // If we are in the live part of the code and we found our break statement,
1782 // return a success!
1783 if (!Case && isa<BreakStmt>(Val: S))
1784 return CSFC_Success;
1785
1786 // If this is a switch statement, then it might contain the SwitchCase, the
1787 // break, or neither.
1788 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(Val: S)) {
1789 // Handle this as two cases: we might be looking for the SwitchCase (if so
1790 // the skipped statements must be skippable) or we might already have it.
1791 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1792 bool StartedInLiveCode = FoundCase;
1793 unsigned StartSize = ResultStmts.size();
1794
1795 // If we've not found the case yet, scan through looking for it.
1796 if (Case) {
1797 // Keep track of whether we see a skipped declaration. The code could be
1798 // using the declaration even if it is skipped, so we can't optimize out
1799 // the decl if the kept statements might refer to it.
1800 bool HadSkippedDecl = false;
1801
1802 // If we're looking for the case, just see if we can skip each of the
1803 // substatements.
1804 for (; Case && I != E; ++I) {
1805 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(S: *I);
1806
1807 switch (CollectStatementsForCase(S: *I, Case, FoundCase, ResultStmts)) {
1808 case CSFC_Failure: return CSFC_Failure;
1809 case CSFC_Success:
1810 // A successful result means that either 1) that the statement doesn't
1811 // have the case and is skippable, or 2) does contain the case value
1812 // and also contains the break to exit the switch. In the later case,
1813 // we just verify the rest of the statements are elidable.
1814 if (FoundCase) {
1815 // If we found the case and skipped declarations, we can't do the
1816 // optimization.
1817 if (HadSkippedDecl)
1818 return CSFC_Failure;
1819
1820 for (++I; I != E; ++I)
1821 if (CodeGenFunction::ContainsLabel(S: *I, IgnoreCaseStmts: true))
1822 return CSFC_Failure;
1823 return CSFC_Success;
1824 }
1825 break;
1826 case CSFC_FallThrough:
1827 // If we have a fallthrough condition, then we must have found the
1828 // case started to include statements. Consider the rest of the
1829 // statements in the compound statement as candidates for inclusion.
1830 assert(FoundCase && "Didn't find case but returned fallthrough?");
1831 // We recursively found Case, so we're not looking for it anymore.
1832 Case = nullptr;
1833
1834 // If we found the case and skipped declarations, we can't do the
1835 // optimization.
1836 if (HadSkippedDecl)
1837 return CSFC_Failure;
1838 break;
1839 }
1840 }
1841
1842 if (!FoundCase)
1843 return CSFC_Success;
1844
1845 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1846 }
1847
1848 // If we have statements in our range, then we know that the statements are
1849 // live and need to be added to the set of statements we're tracking.
1850 bool AnyDecls = false;
1851 for (; I != E; ++I) {
1852 AnyDecls |= CodeGenFunction::mightAddDeclToScope(S: *I);
1853
1854 switch (CollectStatementsForCase(S: *I, Case: nullptr, FoundCase, ResultStmts)) {
1855 case CSFC_Failure: return CSFC_Failure;
1856 case CSFC_FallThrough:
1857 // A fallthrough result means that the statement was simple and just
1858 // included in ResultStmt, keep adding them afterwards.
1859 break;
1860 case CSFC_Success:
1861 // A successful result means that we found the break statement and
1862 // stopped statement inclusion. We just ensure that any leftover stmts
1863 // are skippable and return success ourselves.
1864 for (++I; I != E; ++I)
1865 if (CodeGenFunction::ContainsLabel(S: *I, IgnoreCaseStmts: true))
1866 return CSFC_Failure;
1867 return CSFC_Success;
1868 }
1869 }
1870
1871 // If we're about to fall out of a scope without hitting a 'break;', we
1872 // can't perform the optimization if there were any decls in that scope
1873 // (we'd lose their end-of-lifetime).
1874 if (AnyDecls) {
1875 // If the entire compound statement was live, there's one more thing we
1876 // can try before giving up: emit the whole thing as a single statement.
1877 // We can do that unless the statement contains a 'break;'.
1878 // FIXME: Such a break must be at the end of a construct within this one.
1879 // We could emit this by just ignoring the BreakStmts entirely.
1880 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1881 ResultStmts.resize(N: StartSize);
1882 ResultStmts.push_back(Elt: S);
1883 } else {
1884 return CSFC_Failure;
1885 }
1886 }
1887
1888 return CSFC_FallThrough;
1889 }
1890
1891 // Okay, this is some other statement that we don't handle explicitly, like a
1892 // for statement or increment etc. If we are skipping over this statement,
1893 // just verify it doesn't have labels, which would make it invalid to elide.
1894 if (Case) {
1895 if (CodeGenFunction::ContainsLabel(S, IgnoreCaseStmts: true))
1896 return CSFC_Failure;
1897 return CSFC_Success;
1898 }
1899
1900 // Otherwise, we want to include this statement. Everything is cool with that
1901 // so long as it doesn't contain a break out of the switch we're in.
1902 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1903
1904 // Otherwise, everything is great. Include the statement and tell the caller
1905 // that we fall through and include the next statement as well.
1906 ResultStmts.push_back(Elt: S);
1907 return CSFC_FallThrough;
1908}
1909
1910/// FindCaseStatementsForValue - Find the case statement being jumped to and
1911/// then invoke CollectStatementsForCase to find the list of statements to emit
1912/// for a switch on constant. See the comment above CollectStatementsForCase
1913/// for more details.
1914static bool FindCaseStatementsForValue(const SwitchStmt &S,
1915 const llvm::APSInt &ConstantCondValue,
1916 SmallVectorImpl<const Stmt*> &ResultStmts,
1917 ASTContext &C,
1918 const SwitchCase *&ResultCase) {
1919 // First step, find the switch case that is being branched to. We can do this
1920 // efficiently by scanning the SwitchCase list.
1921 const SwitchCase *Case = S.getSwitchCaseList();
1922 const DefaultStmt *DefaultCase = nullptr;
1923
1924 for (; Case; Case = Case->getNextSwitchCase()) {
1925 // It's either a default or case. Just remember the default statement in
1926 // case we're not jumping to any numbered cases.
1927 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Val: Case)) {
1928 DefaultCase = DS;
1929 continue;
1930 }
1931
1932 // Check to see if this case is the one we're looking for.
1933 const CaseStmt *CS = cast<CaseStmt>(Val: Case);
1934 // Don't handle case ranges yet.
1935 if (CS->getRHS()) return false;
1936
1937 // If we found our case, remember it as 'case'.
1938 if (CS->getLHS()->EvaluateKnownConstInt(Ctx: C) == ConstantCondValue)
1939 break;
1940 }
1941
1942 // If we didn't find a matching case, we use a default if it exists, or we
1943 // elide the whole switch body!
1944 if (!Case) {
1945 // It is safe to elide the body of the switch if it doesn't contain labels
1946 // etc. If it is safe, return successfully with an empty ResultStmts list.
1947 if (!DefaultCase)
1948 return !CodeGenFunction::ContainsLabel(&S);
1949 Case = DefaultCase;
1950 }
1951
1952 // Ok, we know which case is being jumped to, try to collect all the
1953 // statements that follow it. This can fail for a variety of reasons. Also,
1954 // check to see that the recursive walk actually found our case statement.
1955 // Insane cases like this can fail to find it in the recursive walk since we
1956 // don't handle every stmt kind:
1957 // switch (4) {
1958 // while (1) {
1959 // case 4: ...
1960 bool FoundCase = false;
1961 ResultCase = Case;
1962 return CollectStatementsForCase(S: S.getBody(), Case, FoundCase,
1963 ResultStmts) != CSFC_Failure &&
1964 FoundCase;
1965}
1966
1967static std::optional<SmallVector<uint64_t, 16>>
1968getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1969 // Are there enough branches to weight them?
1970 if (Likelihoods.size() <= 1)
1971 return std::nullopt;
1972
1973 uint64_t NumUnlikely = 0;
1974 uint64_t NumNone = 0;
1975 uint64_t NumLikely = 0;
1976 for (const auto LH : Likelihoods) {
1977 switch (LH) {
1978 case Stmt::LH_Unlikely:
1979 ++NumUnlikely;
1980 break;
1981 case Stmt::LH_None:
1982 ++NumNone;
1983 break;
1984 case Stmt::LH_Likely:
1985 ++NumLikely;
1986 break;
1987 }
1988 }
1989
1990 // Is there a likelihood attribute used?
1991 if (NumUnlikely == 0 && NumLikely == 0)
1992 return std::nullopt;
1993
1994 // When multiple cases share the same code they can be combined during
1995 // optimization. In that case the weights of the branch will be the sum of
1996 // the individual weights. Make sure the combined sum of all neutral cases
1997 // doesn't exceed the value of a single likely attribute.
1998 // The additions both avoid divisions by 0 and make sure the weights of None
1999 // don't exceed the weight of Likely.
2000 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2001 const uint64_t None = Likely / (NumNone + 1);
2002 const uint64_t Unlikely = 0;
2003
2004 SmallVector<uint64_t, 16> Result;
2005 Result.reserve(N: Likelihoods.size());
2006 for (const auto LH : Likelihoods) {
2007 switch (LH) {
2008 case Stmt::LH_Unlikely:
2009 Result.push_back(Elt: Unlikely);
2010 break;
2011 case Stmt::LH_None:
2012 Result.push_back(Elt: None);
2013 break;
2014 case Stmt::LH_Likely:
2015 Result.push_back(Elt: Likely);
2016 break;
2017 }
2018 }
2019
2020 return Result;
2021}
2022
2023void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
2024 // Handle nested switch statements.
2025 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2026 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2027 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2028 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2029
2030 // See if we can constant fold the condition of the switch and therefore only
2031 // emit the live case statement (if any) of the switch.
2032 llvm::APSInt ConstantCondValue;
2033 if (ConstantFoldsToSimpleInteger(Cond: S.getCond(), Result&: ConstantCondValue)) {
2034 SmallVector<const Stmt*, 4> CaseStmts;
2035 const SwitchCase *Case = nullptr;
2036 if (FindCaseStatementsForValue(S, ConstantCondValue, ResultStmts&: CaseStmts,
2037 C&: getContext(), ResultCase&: Case)) {
2038 if (Case)
2039 incrementProfileCounter(S: Case);
2040 RunCleanupsScope ExecutedScope(*this);
2041
2042 if (S.getInit())
2043 EmitStmt(S: S.getInit());
2044
2045 // Emit the condition variable if needed inside the entire cleanup scope
2046 // used by this special case for constant folded switches.
2047 if (S.getConditionVariable())
2048 EmitDecl(*S.getConditionVariable());
2049
2050 // At this point, we are no longer "within" a switch instance, so
2051 // we can temporarily enforce this to ensure that any embedded case
2052 // statements are not emitted.
2053 SwitchInsn = nullptr;
2054
2055 // Okay, we can dead code eliminate everything except this case. Emit the
2056 // specified series of statements and we're good.
2057 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2058 EmitStmt(S: CaseStmts[i]);
2059 incrementProfileCounter(&S);
2060
2061 // Now we want to restore the saved switch instance so that nested
2062 // switches continue to function properly
2063 SwitchInsn = SavedSwitchInsn;
2064
2065 return;
2066 }
2067 }
2068
2069 JumpDest SwitchExit = getJumpDestInCurrentScope(Name: "sw.epilog");
2070
2071 RunCleanupsScope ConditionScope(*this);
2072
2073 if (S.getInit())
2074 EmitStmt(S: S.getInit());
2075
2076 if (S.getConditionVariable())
2077 EmitDecl(*S.getConditionVariable());
2078 llvm::Value *CondV = EmitScalarExpr(E: S.getCond());
2079
2080 // Create basic block to hold stuff that comes after switch
2081 // statement. We also need to create a default block now so that
2082 // explicit case ranges tests can have a place to jump to on
2083 // failure.
2084 llvm::BasicBlock *DefaultBlock = createBasicBlock(name: "sw.default");
2085 SwitchInsn = Builder.CreateSwitch(V: CondV, Dest: DefaultBlock);
2086 if (PGO.haveRegionCounts()) {
2087 // Walk the SwitchCase list to find how many there are.
2088 uint64_t DefaultCount = 0;
2089 unsigned NumCases = 0;
2090 for (const SwitchCase *Case = S.getSwitchCaseList();
2091 Case;
2092 Case = Case->getNextSwitchCase()) {
2093 if (isa<DefaultStmt>(Val: Case))
2094 DefaultCount = getProfileCount(S: Case);
2095 NumCases += 1;
2096 }
2097 SwitchWeights = new SmallVector<uint64_t, 16>();
2098 SwitchWeights->reserve(N: NumCases);
2099 // The default needs to be first. We store the edge count, so we already
2100 // know the right weight.
2101 SwitchWeights->push_back(Elt: DefaultCount);
2102 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2103 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2104 // Initialize the default case.
2105 SwitchLikelihood->push_back(Elt: Stmt::LH_None);
2106 }
2107
2108 CaseRangeBlock = DefaultBlock;
2109
2110 // Clear the insertion point to indicate we are in unreachable code.
2111 Builder.ClearInsertionPoint();
2112
2113 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2114 // then reuse last ContinueBlock.
2115 JumpDest OuterContinue;
2116 if (!BreakContinueStack.empty())
2117 OuterContinue = BreakContinueStack.back().ContinueBlock;
2118
2119 BreakContinueStack.push_back(Elt: BreakContinue(SwitchExit, OuterContinue));
2120
2121 // Emit switch body.
2122 EmitStmt(S: S.getBody());
2123
2124 BreakContinueStack.pop_back();
2125
2126 // Update the default block in case explicit case range tests have
2127 // been chained on top.
2128 SwitchInsn->setDefaultDest(CaseRangeBlock);
2129
2130 // If a default was never emitted:
2131 if (!DefaultBlock->getParent()) {
2132 // If we have cleanups, emit the default block so that there's a
2133 // place to jump through the cleanups from.
2134 if (ConditionScope.requiresCleanups()) {
2135 EmitBlock(BB: DefaultBlock);
2136
2137 // Otherwise, just forward the default block to the switch end.
2138 } else {
2139 DefaultBlock->replaceAllUsesWith(V: SwitchExit.getBlock());
2140 delete DefaultBlock;
2141 }
2142 }
2143
2144 ConditionScope.ForceCleanup();
2145
2146 // Emit continuation.
2147 EmitBlock(BB: SwitchExit.getBlock(), IsFinished: true);
2148 incrementProfileCounter(&S);
2149
2150 // If the switch has a condition wrapped by __builtin_unpredictable,
2151 // create metadata that specifies that the switch is unpredictable.
2152 // Don't bother if not optimizing because that metadata would not be used.
2153 auto *Call = dyn_cast<CallExpr>(Val: S.getCond());
2154 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2155 auto *FD = dyn_cast_or_null<FunctionDecl>(Val: Call->getCalleeDecl());
2156 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2157 llvm::MDBuilder MDHelper(getLLVMContext());
2158 SwitchInsn->setMetadata(KindID: llvm::LLVMContext::MD_unpredictable,
2159 Node: MDHelper.createUnpredictable());
2160 }
2161 }
2162
2163 if (SwitchWeights) {
2164 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2165 "switch weights do not match switch cases");
2166 // If there's only one jump destination there's no sense weighting it.
2167 if (SwitchWeights->size() > 1)
2168 SwitchInsn->setMetadata(KindID: llvm::LLVMContext::MD_prof,
2169 Node: createProfileWeights(Weights: *SwitchWeights));
2170 delete SwitchWeights;
2171 } else if (SwitchLikelihood) {
2172 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2173 "switch likelihoods do not match switch cases");
2174 std::optional<SmallVector<uint64_t, 16>> LHW =
2175 getLikelihoodWeights(Likelihoods: *SwitchLikelihood);
2176 if (LHW) {
2177 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2178 SwitchInsn->setMetadata(KindID: llvm::LLVMContext::MD_prof,
2179 Node: createProfileWeights(Weights: *LHW));
2180 }
2181 delete SwitchLikelihood;
2182 }
2183 SwitchInsn = SavedSwitchInsn;
2184 SwitchWeights = SavedSwitchWeights;
2185 SwitchLikelihood = SavedSwitchLikelihood;
2186 CaseRangeBlock = SavedCRBlock;
2187}
2188
2189static std::string
2190SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2191 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2192 std::string Result;
2193
2194 while (*Constraint) {
2195 switch (*Constraint) {
2196 default:
2197 Result += Target.convertConstraint(Constraint);
2198 break;
2199 // Ignore these
2200 case '*':
2201 case '?':
2202 case '!':
2203 case '=': // Will see this and the following in mult-alt constraints.
2204 case '+':
2205 break;
2206 case '#': // Ignore the rest of the constraint alternative.
2207 while (Constraint[1] && Constraint[1] != ',')
2208 Constraint++;
2209 break;
2210 case '&':
2211 case '%':
2212 Result += *Constraint;
2213 while (Constraint[1] && Constraint[1] == *Constraint)
2214 Constraint++;
2215 break;
2216 case ',':
2217 Result += "|";
2218 break;
2219 case 'g':
2220 Result += "imr";
2221 break;
2222 case '[': {
2223 assert(OutCons &&
2224 "Must pass output names to constraints with a symbolic name");
2225 unsigned Index;
2226 bool result = Target.resolveSymbolicName(Name&: Constraint, OutputConstraints: *OutCons, Index);
2227 assert(result && "Could not resolve symbolic name"); (void)result;
2228 Result += llvm::utostr(X: Index);
2229 break;
2230 }
2231 }
2232
2233 Constraint++;
2234 }
2235
2236 return Result;
2237}
2238
2239/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2240/// as using a particular register add that as a constraint that will be used
2241/// in this asm stmt.
2242static std::string
2243AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2244 const TargetInfo &Target, CodeGenModule &CGM,
2245 const AsmStmt &Stmt, const bool EarlyClobber,
2246 std::string *GCCReg = nullptr) {
2247 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(Val: &AsmExpr);
2248 if (!AsmDeclRef)
2249 return Constraint;
2250 const ValueDecl &Value = *AsmDeclRef->getDecl();
2251 const VarDecl *Variable = dyn_cast<VarDecl>(Val: &Value);
2252 if (!Variable)
2253 return Constraint;
2254 if (Variable->getStorageClass() != SC_Register)
2255 return Constraint;
2256 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2257 if (!Attr)
2258 return Constraint;
2259 StringRef Register = Attr->getLabel();
2260 assert(Target.isValidGCCRegisterName(Register));
2261 // We're using validateOutputConstraint here because we only care if
2262 // this is a register constraint.
2263 TargetInfo::ConstraintInfo Info(Constraint, "");
2264 if (Target.validateOutputConstraint(Info) &&
2265 !Info.allowsRegister()) {
2266 CGM.ErrorUnsupported(S: &Stmt, Type: "__asm__");
2267 return Constraint;
2268 }
2269 // Canonicalize the register here before returning it.
2270 Register = Target.getNormalizedGCCRegisterName(Name: Register);
2271 if (GCCReg != nullptr)
2272 *GCCReg = Register.str();
2273 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2274}
2275
2276std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2277 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2278 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2279 if (Info.allowsRegister() || !Info.allowsMemory()) {
2280 if (CodeGenFunction::hasScalarEvaluationKind(T: InputType))
2281 return {EmitLoadOfLValue(V: InputValue, Loc).getScalarVal(), nullptr};
2282
2283 llvm::Type *Ty = ConvertType(T: InputType);
2284 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2285 if ((Size <= 64 && llvm::isPowerOf2_64(Value: Size)) ||
2286 getTargetHooks().isScalarizableAsmOperand(CGF&: *this, Ty)) {
2287 Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: Size);
2288
2289 return {
2290 Builder.CreateLoad(Addr: InputValue.getAddress(CGF&: *this).withElementType(ElemTy: Ty)),
2291 nullptr};
2292 }
2293 }
2294
2295 Address Addr = InputValue.getAddress(CGF&: *this);
2296 ConstraintStr += '*';
2297 return {InputValue.getPointer(CGF&: *this), Addr.getElementType()};
2298}
2299
2300std::pair<llvm::Value *, llvm::Type *>
2301CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2302 const Expr *InputExpr,
2303 std::string &ConstraintStr) {
2304 // If this can't be a register or memory, i.e., has to be a constant
2305 // (immediate or symbolic), try to emit it as such.
2306 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2307 if (Info.requiresImmediateConstant()) {
2308 Expr::EvalResult EVResult;
2309 InputExpr->EvaluateAsRValue(Result&: EVResult, Ctx: getContext(), InConstantContext: true);
2310
2311 llvm::APSInt IntResult;
2312 if (EVResult.Val.toIntegralConstant(Result&: IntResult, SrcTy: InputExpr->getType(),
2313 Ctx: getContext()))
2314 return {llvm::ConstantInt::get(Context&: getLLVMContext(), V: IntResult), nullptr};
2315 }
2316
2317 Expr::EvalResult Result;
2318 if (InputExpr->EvaluateAsInt(Result, Ctx: getContext()))
2319 return {llvm::ConstantInt::get(Context&: getLLVMContext(), V: Result.Val.getInt()),
2320 nullptr};
2321 }
2322
2323 if (Info.allowsRegister() || !Info.allowsMemory())
2324 if (CodeGenFunction::hasScalarEvaluationKind(T: InputExpr->getType()))
2325 return {EmitScalarExpr(E: InputExpr), nullptr};
2326 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2327 return {EmitScalarExpr(E: InputExpr), nullptr};
2328 InputExpr = InputExpr->IgnoreParenNoopCasts(Ctx: getContext());
2329 LValue Dest = EmitLValue(E: InputExpr);
2330 return EmitAsmInputLValue(Info, InputValue: Dest, InputType: InputExpr->getType(), ConstraintStr,
2331 Loc: InputExpr->getExprLoc());
2332}
2333
2334/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2335/// asm call instruction. The !srcloc MDNode contains a list of constant
2336/// integers which are the source locations of the start of each line in the
2337/// asm.
2338static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2339 CodeGenFunction &CGF) {
2340 SmallVector<llvm::Metadata *, 8> Locs;
2341 // Add the location of the first line to the MDNode.
2342 Locs.push_back(Elt: llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
2343 Ty: CGF.Int64Ty, V: Str->getBeginLoc().getRawEncoding())));
2344 StringRef StrVal = Str->getString();
2345 if (!StrVal.empty()) {
2346 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2347 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2348 unsigned StartToken = 0;
2349 unsigned ByteOffset = 0;
2350
2351 // Add the location of the start of each subsequent line of the asm to the
2352 // MDNode.
2353 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2354 if (StrVal[i] != '\n') continue;
2355 SourceLocation LineLoc = Str->getLocationOfByte(
2356 ByteNo: i + 1, SM, Features: LangOpts, Target: CGF.getTarget(), StartToken: &StartToken, StartTokenByteOffset: &ByteOffset);
2357 Locs.push_back(Elt: llvm::ConstantAsMetadata::get(
2358 C: llvm::ConstantInt::get(Ty: CGF.Int64Ty, V: LineLoc.getRawEncoding())));
2359 }
2360 }
2361
2362 return llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: Locs);
2363}
2364
2365static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2366 bool HasUnwindClobber, bool ReadOnly,
2367 bool ReadNone, bool NoMerge, const AsmStmt &S,
2368 const std::vector<llvm::Type *> &ResultRegTypes,
2369 const std::vector<llvm::Type *> &ArgElemTypes,
2370 CodeGenFunction &CGF,
2371 std::vector<llvm::Value *> &RegResults) {
2372 if (!HasUnwindClobber)
2373 Result.addFnAttr(llvm::Attribute::NoUnwind);
2374
2375 if (NoMerge)
2376 Result.addFnAttr(llvm::Attribute::NoMerge);
2377 // Attach readnone and readonly attributes.
2378 if (!HasSideEffect) {
2379 if (ReadNone)
2380 Result.setDoesNotAccessMemory();
2381 else if (ReadOnly)
2382 Result.setOnlyReadsMemory();
2383 }
2384
2385 // Add elementtype attribute for indirect constraints.
2386 for (auto Pair : llvm::enumerate(First: ArgElemTypes)) {
2387 if (Pair.value()) {
2388 auto Attr = llvm::Attribute::get(
2389 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2390 Result.addParamAttr(Pair.index(), Attr);
2391 }
2392 }
2393
2394 // Slap the source location of the inline asm into a !srcloc metadata on the
2395 // call.
2396 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(Val: &S))
2397 Result.setMetadata(Kind: "srcloc",
2398 Node: getAsmSrcLocInfo(Str: gccAsmStmt->getAsmString(), CGF));
2399 else {
2400 // At least put the line number on MS inline asm blobs.
2401 llvm::Constant *Loc =
2402 llvm::ConstantInt::get(Ty: CGF.Int64Ty, V: S.getAsmLoc().getRawEncoding());
2403 Result.setMetadata(Kind: "srcloc",
2404 Node: llvm::MDNode::get(Context&: CGF.getLLVMContext(),
2405 MDs: llvm::ConstantAsMetadata::get(C: Loc)));
2406 }
2407
2408 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2409 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2410 // convergent (meaning, they may call an intrinsically convergent op, such
2411 // as bar.sync, and so can't have certain optimizations applied around
2412 // them).
2413 Result.addFnAttr(llvm::Attribute::Convergent);
2414 // Extract all of the register value results from the asm.
2415 if (ResultRegTypes.size() == 1) {
2416 RegResults.push_back(x: &Result);
2417 } else {
2418 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2419 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(Agg: &Result, Idxs: i, Name: "asmresult");
2420 RegResults.push_back(x: Tmp);
2421 }
2422 }
2423}
2424
2425static void
2426EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
2427 const llvm::ArrayRef<llvm::Value *> RegResults,
2428 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2429 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2430 const llvm::ArrayRef<LValue> ResultRegDests,
2431 const llvm::ArrayRef<QualType> ResultRegQualTys,
2432 const llvm::BitVector &ResultTypeRequiresCast,
2433 const llvm::BitVector &ResultRegIsFlagReg) {
2434 CGBuilderTy &Builder = CGF.Builder;
2435 CodeGenModule &CGM = CGF.CGM;
2436 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2437
2438 assert(RegResults.size() == ResultRegTypes.size());
2439 assert(RegResults.size() == ResultTruncRegTypes.size());
2440 assert(RegResults.size() == ResultRegDests.size());
2441 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2442 // in which case its size may grow.
2443 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2444 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2445
2446 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2447 llvm::Value *Tmp = RegResults[i];
2448 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2449
2450 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2451 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2452 // value.
2453 llvm::Constant *Two = llvm::ConstantInt::get(Ty: Tmp->getType(), V: 2);
2454 llvm::Value *IsBooleanValue =
2455 Builder.CreateCmp(Pred: llvm::CmpInst::ICMP_ULT, LHS: Tmp, RHS: Two);
2456 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2457 Builder.CreateCall(Callee: FnAssume, Args: IsBooleanValue);
2458 }
2459
2460 // If the result type of the LLVM IR asm doesn't match the result type of
2461 // the expression, do the conversion.
2462 if (ResultRegTypes[i] != TruncTy) {
2463
2464 // Truncate the integer result to the right size, note that TruncTy can be
2465 // a pointer.
2466 if (TruncTy->isFloatingPointTy())
2467 Tmp = Builder.CreateFPTrunc(V: Tmp, DestTy: TruncTy);
2468 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2469 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(Ty: TruncTy);
2470 Tmp = Builder.CreateTrunc(
2471 V: Tmp, DestTy: llvm::IntegerType::get(C&: CTX, NumBits: (unsigned)ResSize));
2472 Tmp = Builder.CreateIntToPtr(V: Tmp, DestTy: TruncTy);
2473 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2474 uint64_t TmpSize =
2475 CGM.getDataLayout().getTypeSizeInBits(Ty: Tmp->getType());
2476 Tmp = Builder.CreatePtrToInt(
2477 V: Tmp, DestTy: llvm::IntegerType::get(C&: CTX, NumBits: (unsigned)TmpSize));
2478 Tmp = Builder.CreateTrunc(V: Tmp, DestTy: TruncTy);
2479 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2480 Tmp = Builder.CreateZExtOrTrunc(V: Tmp, DestTy: TruncTy);
2481 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2482 Tmp = Builder.CreateBitCast(V: Tmp, DestTy: TruncTy);
2483 }
2484 }
2485
2486 LValue Dest = ResultRegDests[i];
2487 // ResultTypeRequiresCast elements correspond to the first
2488 // ResultTypeRequiresCast.size() elements of RegResults.
2489 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2490 unsigned Size = CGF.getContext().getTypeSize(T: ResultRegQualTys[i]);
2491 Address A = Dest.getAddress(CGF).withElementType(ElemTy: ResultRegTypes[i]);
2492 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, Ty: TruncTy)) {
2493 Builder.CreateStore(Val: Tmp, Addr: A);
2494 continue;
2495 }
2496
2497 QualType Ty =
2498 CGF.getContext().getIntTypeForBitwidth(DestWidth: Size, /*Signed=*/false);
2499 if (Ty.isNull()) {
2500 const Expr *OutExpr = S.getOutputExpr(i);
2501 CGM.getDiags().Report(OutExpr->getExprLoc(),
2502 diag::err_store_value_to_reg);
2503 return;
2504 }
2505 Dest = CGF.MakeAddrLValue(Addr: A, T: Ty);
2506 }
2507 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Tmp), Dst: Dest);
2508 }
2509}
2510
2511static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF,
2512 const AsmStmt &S) {
2513 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2514
2515 StringRef Asm;
2516 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(Val: &S))
2517 Asm = GCCAsm->getAsmString()->getString();
2518
2519 auto &Ctx = CGF->CGM.getLLVMContext();
2520
2521 auto StrTy = llvm::ConstantDataArray::getString(Context&: Ctx, Initializer: Asm);
2522 auto FnTy = llvm::FunctionType::get(Result: llvm::Type::getVoidTy(C&: Ctx),
2523 Params: {StrTy->getType()}, isVarArg: false);
2524 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, T: FnTy);
2525
2526 CGF->Builder.CreateCall(Callee: UBF, Args: {StrTy});
2527}
2528
2529void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2530 // Pop all cleanup blocks at the end of the asm statement.
2531 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2532
2533 // Assemble the final asm string.
2534 std::string AsmString = S.generateAsmString(C: getContext());
2535
2536 // Get all the output and input constraints together.
2537 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2538 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2539
2540 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2541 bool IsValidTargetAsm = true;
2542 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2543 StringRef Name;
2544 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(Val: &S))
2545 Name = GAS->getOutputName(i);
2546 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2547 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2548 if (IsHipStdPar && !IsValid)
2549 IsValidTargetAsm = false;
2550 else
2551 assert(IsValid && "Failed to parse output constraint");
2552 OutputConstraintInfos.push_back(Elt: Info);
2553 }
2554
2555 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2556 StringRef Name;
2557 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(Val: &S))
2558 Name = GAS->getInputName(i);
2559 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2560 bool IsValid =
2561 getTarget().validateInputConstraint(OutputConstraints: OutputConstraintInfos, info&: Info);
2562 if (IsHipStdPar && !IsValid)
2563 IsValidTargetAsm = false;
2564 else
2565 assert(IsValid && "Failed to parse input constraint");
2566 InputConstraintInfos.push_back(Elt: Info);
2567 }
2568
2569 if (!IsValidTargetAsm)
2570 return EmitHipStdParUnsupportedAsm(CGF: this, S);
2571
2572 std::string Constraints;
2573
2574 std::vector<LValue> ResultRegDests;
2575 std::vector<QualType> ResultRegQualTys;
2576 std::vector<llvm::Type *> ResultRegTypes;
2577 std::vector<llvm::Type *> ResultTruncRegTypes;
2578 std::vector<llvm::Type *> ArgTypes;
2579 std::vector<llvm::Type *> ArgElemTypes;
2580 std::vector<llvm::Value*> Args;
2581 llvm::BitVector ResultTypeRequiresCast;
2582 llvm::BitVector ResultRegIsFlagReg;
2583
2584 // Keep track of inout constraints.
2585 std::string InOutConstraints;
2586 std::vector<llvm::Value*> InOutArgs;
2587 std::vector<llvm::Type*> InOutArgTypes;
2588 std::vector<llvm::Type*> InOutArgElemTypes;
2589
2590 // Keep track of out constraints for tied input operand.
2591 std::vector<std::string> OutputConstraints;
2592
2593 // Keep track of defined physregs.
2594 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2595
2596 // An inline asm can be marked readonly if it meets the following conditions:
2597 // - it doesn't have any sideeffects
2598 // - it doesn't clobber memory
2599 // - it doesn't return a value by-reference
2600 // It can be marked readnone if it doesn't have any input memory constraints
2601 // in addition to meeting the conditions listed above.
2602 bool ReadOnly = true, ReadNone = true;
2603
2604 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2605 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2606
2607 // Simplify the output constraint.
2608 std::string OutputConstraint(S.getOutputConstraint(i));
2609 OutputConstraint = SimplifyConstraint(Constraint: OutputConstraint.c_str() + 1,
2610 Target: getTarget(), OutCons: &OutputConstraintInfos);
2611
2612 const Expr *OutExpr = S.getOutputExpr(i);
2613 OutExpr = OutExpr->IgnoreParenNoopCasts(Ctx: getContext());
2614
2615 std::string GCCReg;
2616 OutputConstraint = AddVariableConstraints(Constraint: OutputConstraint, AsmExpr: *OutExpr,
2617 Target: getTarget(), CGM, Stmt: S,
2618 EarlyClobber: Info.earlyClobber(),
2619 GCCReg: &GCCReg);
2620 // Give an error on multiple outputs to same physreg.
2621 if (!GCCReg.empty() && !PhysRegOutputs.insert(V: GCCReg).second)
2622 CGM.Error(loc: S.getAsmLoc(), error: "multiple outputs to hard register: " + GCCReg);
2623
2624 OutputConstraints.push_back(x: OutputConstraint);
2625 LValue Dest = EmitLValue(E: OutExpr);
2626 if (!Constraints.empty())
2627 Constraints += ',';
2628
2629 // If this is a register output, then make the inline asm return it
2630 // by-value. If this is a memory result, return the value by-reference.
2631 QualType QTy = OutExpr->getType();
2632 const bool IsScalarOrAggregate = hasScalarEvaluationKind(T: QTy) ||
2633 hasAggregateEvaluationKind(T: QTy);
2634 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2635
2636 Constraints += "=" + OutputConstraint;
2637 ResultRegQualTys.push_back(x: QTy);
2638 ResultRegDests.push_back(x: Dest);
2639
2640 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with(Prefix: "{@cc");
2641 ResultRegIsFlagReg.push_back(Val: IsFlagReg);
2642
2643 llvm::Type *Ty = ConvertTypeForMem(T: QTy);
2644 const bool RequiresCast = Info.allowsRegister() &&
2645 (getTargetHooks().isScalarizableAsmOperand(CGF&: *this, Ty) ||
2646 Ty->isAggregateType());
2647
2648 ResultTruncRegTypes.push_back(x: Ty);
2649 ResultTypeRequiresCast.push_back(Val: RequiresCast);
2650
2651 if (RequiresCast) {
2652 unsigned Size = getContext().getTypeSize(T: QTy);
2653 Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: Size);
2654 }
2655 ResultRegTypes.push_back(x: Ty);
2656 // If this output is tied to an input, and if the input is larger, then
2657 // we need to set the actual result type of the inline asm node to be the
2658 // same as the input type.
2659 if (Info.hasMatchingInput()) {
2660 unsigned InputNo;
2661 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2662 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2663 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2664 break;
2665 }
2666 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2667
2668 QualType InputTy = S.getInputExpr(i: InputNo)->getType();
2669 QualType OutputType = OutExpr->getType();
2670
2671 uint64_t InputSize = getContext().getTypeSize(T: InputTy);
2672 if (getContext().getTypeSize(T: OutputType) < InputSize) {
2673 // Form the asm to return the value as a larger integer or fp type.
2674 ResultRegTypes.back() = ConvertType(T: InputTy);
2675 }
2676 }
2677 if (llvm::Type* AdjTy =
2678 getTargetHooks().adjustInlineAsmType(CGF&: *this, Constraint: OutputConstraint,
2679 Ty: ResultRegTypes.back()))
2680 ResultRegTypes.back() = AdjTy;
2681 else {
2682 CGM.getDiags().Report(S.getAsmLoc(),
2683 diag::err_asm_invalid_type_in_input)
2684 << OutExpr->getType() << OutputConstraint;
2685 }
2686
2687 // Update largest vector width for any vector types.
2688 if (auto *VT = dyn_cast<llvm::VectorType>(Val: ResultRegTypes.back()))
2689 LargestVectorWidth =
2690 std::max(a: (uint64_t)LargestVectorWidth,
2691 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
2692 } else {
2693 Address DestAddr = Dest.getAddress(CGF&: *this);
2694 // Matrix types in memory are represented by arrays, but accessed through
2695 // vector pointers, with the alignment specified on the access operation.
2696 // For inline assembly, update pointer arguments to use vector pointers.
2697 // Otherwise there will be a mis-match if the matrix is also an
2698 // input-argument which is represented as vector.
2699 if (isa<MatrixType>(Val: OutExpr->getType().getCanonicalType()))
2700 DestAddr = DestAddr.withElementType(ElemTy: ConvertType(T: OutExpr->getType()));
2701
2702 ArgTypes.push_back(x: DestAddr.getType());
2703 ArgElemTypes.push_back(x: DestAddr.getElementType());
2704 Args.push_back(x: DestAddr.emitRawPointer(CGF&: *this));
2705 Constraints += "=*";
2706 Constraints += OutputConstraint;
2707 ReadOnly = ReadNone = false;
2708 }
2709
2710 if (Info.isReadWrite()) {
2711 InOutConstraints += ',';
2712
2713 const Expr *InputExpr = S.getOutputExpr(i);
2714 llvm::Value *Arg;
2715 llvm::Type *ArgElemType;
2716 std::tie(args&: Arg, args&: ArgElemType) = EmitAsmInputLValue(
2717 Info, InputValue: Dest, InputType: InputExpr->getType(), ConstraintStr&: InOutConstraints,
2718 Loc: InputExpr->getExprLoc());
2719
2720 if (llvm::Type* AdjTy =
2721 getTargetHooks().adjustInlineAsmType(CGF&: *this, Constraint: OutputConstraint,
2722 Ty: Arg->getType()))
2723 Arg = Builder.CreateBitCast(V: Arg, DestTy: AdjTy);
2724
2725 // Update largest vector width for any vector types.
2726 if (auto *VT = dyn_cast<llvm::VectorType>(Val: Arg->getType()))
2727 LargestVectorWidth =
2728 std::max(a: (uint64_t)LargestVectorWidth,
2729 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
2730 // Only tie earlyclobber physregs.
2731 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2732 InOutConstraints += llvm::utostr(X: i);
2733 else
2734 InOutConstraints += OutputConstraint;
2735
2736 InOutArgTypes.push_back(x: Arg->getType());
2737 InOutArgElemTypes.push_back(x: ArgElemType);
2738 InOutArgs.push_back(x: Arg);
2739 }
2740 }
2741
2742 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2743 // to the return value slot. Only do this when returning in registers.
2744 if (isa<MSAsmStmt>(Val: &S)) {
2745 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2746 if (RetAI.isDirect() || RetAI.isExtend()) {
2747 // Make a fake lvalue for the return value slot.
2748 LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
2749 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2750 CGF&: *this, ReturnValue: ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2751 ResultRegDests, AsmString, NumOutputs: S.getNumOutputs());
2752 SawAsmBlock = true;
2753 }
2754 }
2755
2756 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2757 const Expr *InputExpr = S.getInputExpr(i);
2758
2759 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2760
2761 if (Info.allowsMemory())
2762 ReadNone = false;
2763
2764 if (!Constraints.empty())
2765 Constraints += ',';
2766
2767 // Simplify the input constraint.
2768 std::string InputConstraint(S.getInputConstraint(i));
2769 InputConstraint = SimplifyConstraint(Constraint: InputConstraint.c_str(), Target: getTarget(),
2770 OutCons: &OutputConstraintInfos);
2771
2772 InputConstraint = AddVariableConstraints(
2773 Constraint: InputConstraint, AsmExpr: *InputExpr->IgnoreParenNoopCasts(Ctx: getContext()),
2774 Target: getTarget(), CGM, Stmt: S, EarlyClobber: false /* No EarlyClobber */);
2775
2776 std::string ReplaceConstraint (InputConstraint);
2777 llvm::Value *Arg;
2778 llvm::Type *ArgElemType;
2779 std::tie(args&: Arg, args&: ArgElemType) = EmitAsmInput(Info, InputExpr, ConstraintStr&: Constraints);
2780
2781 // If this input argument is tied to a larger output result, extend the
2782 // input to be the same size as the output. The LLVM backend wants to see
2783 // the input and output of a matching constraint be the same size. Note
2784 // that GCC does not define what the top bits are here. We use zext because
2785 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2786 if (Info.hasTiedOperand()) {
2787 unsigned Output = Info.getTiedOperand();
2788 QualType OutputType = S.getOutputExpr(i: Output)->getType();
2789 QualType InputTy = InputExpr->getType();
2790
2791 if (getContext().getTypeSize(T: OutputType) >
2792 getContext().getTypeSize(T: InputTy)) {
2793 // Use ptrtoint as appropriate so that we can do our extension.
2794 if (isa<llvm::PointerType>(Val: Arg->getType()))
2795 Arg = Builder.CreatePtrToInt(V: Arg, DestTy: IntPtrTy);
2796 llvm::Type *OutputTy = ConvertType(T: OutputType);
2797 if (isa<llvm::IntegerType>(Val: OutputTy))
2798 Arg = Builder.CreateZExt(V: Arg, DestTy: OutputTy);
2799 else if (isa<llvm::PointerType>(Val: OutputTy))
2800 Arg = Builder.CreateZExt(V: Arg, DestTy: IntPtrTy);
2801 else if (OutputTy->isFloatingPointTy())
2802 Arg = Builder.CreateFPExt(V: Arg, DestTy: OutputTy);
2803 }
2804 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2805 ReplaceConstraint = OutputConstraints[Output];
2806 }
2807 if (llvm::Type* AdjTy =
2808 getTargetHooks().adjustInlineAsmType(CGF&: *this, Constraint: ReplaceConstraint,
2809 Ty: Arg->getType()))
2810 Arg = Builder.CreateBitCast(V: Arg, DestTy: AdjTy);
2811 else
2812 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2813 << InputExpr->getType() << InputConstraint;
2814
2815 // Update largest vector width for any vector types.
2816 if (auto *VT = dyn_cast<llvm::VectorType>(Val: Arg->getType()))
2817 LargestVectorWidth =
2818 std::max(a: (uint64_t)LargestVectorWidth,
2819 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
2820
2821 ArgTypes.push_back(x: Arg->getType());
2822 ArgElemTypes.push_back(x: ArgElemType);
2823 Args.push_back(x: Arg);
2824 Constraints += InputConstraint;
2825 }
2826
2827 // Append the "input" part of inout constraints.
2828 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2829 ArgTypes.push_back(x: InOutArgTypes[i]);
2830 ArgElemTypes.push_back(x: InOutArgElemTypes[i]);
2831 Args.push_back(x: InOutArgs[i]);
2832 }
2833 Constraints += InOutConstraints;
2834
2835 // Labels
2836 SmallVector<llvm::BasicBlock *, 16> Transfer;
2837 llvm::BasicBlock *Fallthrough = nullptr;
2838 bool IsGCCAsmGoto = false;
2839 if (const auto *GS = dyn_cast<GCCAsmStmt>(Val: &S)) {
2840 IsGCCAsmGoto = GS->isAsmGoto();
2841 if (IsGCCAsmGoto) {
2842 for (const auto *E : GS->labels()) {
2843 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2844 Transfer.push_back(Dest.getBlock());
2845 if (!Constraints.empty())
2846 Constraints += ',';
2847 Constraints += "!i";
2848 }
2849 Fallthrough = createBasicBlock(name: "asm.fallthrough");
2850 }
2851 }
2852
2853 bool HasUnwindClobber = false;
2854
2855 // Clobbers
2856 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2857 StringRef Clobber = S.getClobber(i);
2858
2859 if (Clobber == "memory")
2860 ReadOnly = ReadNone = false;
2861 else if (Clobber == "unwind") {
2862 HasUnwindClobber = true;
2863 continue;
2864 } else if (Clobber != "cc") {
2865 Clobber = getTarget().getNormalizedGCCRegisterName(Name: Clobber);
2866 if (CGM.getCodeGenOpts().StackClashProtector &&
2867 getTarget().isSPRegName(Clobber)) {
2868 CGM.getDiags().Report(S.getAsmLoc(),
2869 diag::warn_stack_clash_protection_inline_asm);
2870 }
2871 }
2872
2873 if (isa<MSAsmStmt>(Val: &S)) {
2874 if (Clobber == "eax" || Clobber == "edx") {
2875 if (Constraints.find(s: "=&A") != std::string::npos)
2876 continue;
2877 std::string::size_type position1 =
2878 Constraints.find(str: "={" + Clobber.str() + "}");
2879 if (position1 != std::string::npos) {
2880 Constraints.insert(pos: position1 + 1, s: "&");
2881 continue;
2882 }
2883 std::string::size_type position2 = Constraints.find(s: "=A");
2884 if (position2 != std::string::npos) {
2885 Constraints.insert(pos: position2 + 1, s: "&");
2886 continue;
2887 }
2888 }
2889 }
2890 if (!Constraints.empty())
2891 Constraints += ',';
2892
2893 Constraints += "~{";
2894 Constraints += Clobber;
2895 Constraints += '}';
2896 }
2897
2898 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2899 "unwind clobber can't be used with asm goto");
2900
2901 // Add machine specific clobbers
2902 std::string_view MachineClobbers = getTarget().getClobbers();
2903 if (!MachineClobbers.empty()) {
2904 if (!Constraints.empty())
2905 Constraints += ',';
2906 Constraints += MachineClobbers;
2907 }
2908
2909 llvm::Type *ResultType;
2910 if (ResultRegTypes.empty())
2911 ResultType = VoidTy;
2912 else if (ResultRegTypes.size() == 1)
2913 ResultType = ResultRegTypes[0];
2914 else
2915 ResultType = llvm::StructType::get(Context&: getLLVMContext(), Elements: ResultRegTypes);
2916
2917 llvm::FunctionType *FTy =
2918 llvm::FunctionType::get(Result: ResultType, Params: ArgTypes, isVarArg: false);
2919
2920 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2921
2922 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2923 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2924 ? llvm::InlineAsm::AD_ATT
2925 : llvm::InlineAsm::AD_Intel;
2926 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(Val: &S) ?
2927 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2928
2929 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2930 Ty: FTy, AsmString, Constraints, hasSideEffects: HasSideEffect,
2931 /* IsAlignStack */ isAlignStack: false, asmDialect: AsmDialect, canThrow: HasUnwindClobber);
2932 std::vector<llvm::Value*> RegResults;
2933 llvm::CallBrInst *CBR;
2934 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
2935 CBRRegResults;
2936 if (IsGCCAsmGoto) {
2937 CBR = Builder.CreateCallBr(Callee: IA, DefaultDest: Fallthrough, IndirectDests: Transfer, Args);
2938 EmitBlock(BB: Fallthrough);
2939 UpdateAsmCallInst(Result&: *CBR, HasSideEffect, HasUnwindClobber: false, ReadOnly, ReadNone,
2940 NoMerge: InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2941 CGF&: *this, RegResults);
2942 // Because we are emitting code top to bottom, we don't have enough
2943 // information at this point to know precisely whether we have a critical
2944 // edge. If we have outputs, split all indirect destinations.
2945 if (!RegResults.empty()) {
2946 unsigned i = 0;
2947 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
2948 llvm::Twine SynthName = Dest->getName() + ".split";
2949 llvm::BasicBlock *SynthBB = createBasicBlock(name: SynthName);
2950 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2951 Builder.SetInsertPoint(SynthBB);
2952
2953 if (ResultRegTypes.size() == 1) {
2954 CBRRegResults[SynthBB].push_back(Elt: CBR);
2955 } else {
2956 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
2957 llvm::Value *Tmp = Builder.CreateExtractValue(Agg: CBR, Idxs: j, Name: "asmresult");
2958 CBRRegResults[SynthBB].push_back(Elt: Tmp);
2959 }
2960 }
2961
2962 EmitBranch(Target: Dest);
2963 EmitBlock(BB: SynthBB);
2964 CBR->setIndirectDest(i: i++, B: SynthBB);
2965 }
2966 }
2967 } else if (HasUnwindClobber) {
2968 llvm::CallBase *Result = EmitCallOrInvoke(Callee: IA, Args, Name: "");
2969 UpdateAsmCallInst(Result&: *Result, HasSideEffect, HasUnwindClobber: true, ReadOnly, ReadNone,
2970 NoMerge: InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2971 CGF&: *this, RegResults);
2972 } else {
2973 llvm::CallInst *Result =
2974 Builder.CreateCall(Callee: IA, Args, OpBundles: getBundlesForFunclet(Callee: IA));
2975 UpdateAsmCallInst(Result&: *Result, HasSideEffect, HasUnwindClobber: false, ReadOnly, ReadNone,
2976 NoMerge: InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2977 CGF&: *this, RegResults);
2978 }
2979
2980 EmitAsmStores(CGF&: *this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
2981 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
2982 ResultRegIsFlagReg);
2983
2984 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
2985 // different insertion point; one for each indirect destination and with
2986 // CBRRegResults rather than RegResults.
2987 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
2988 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
2989 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2990 Builder.SetInsertPoint(TheBB: Succ, IP: --(Succ->end()));
2991 EmitAsmStores(CGF&: *this, S, RegResults: CBRRegResults[Succ], ResultRegTypes,
2992 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
2993 ResultTypeRequiresCast, ResultRegIsFlagReg);
2994 }
2995 }
2996}
2997
2998LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2999 const RecordDecl *RD = S.getCapturedRecordDecl();
3000 QualType RecordTy = getContext().getRecordType(Decl: RD);
3001
3002 // Initialize the captured struct.
3003 LValue SlotLV =
3004 MakeAddrLValue(Addr: CreateMemTemp(T: RecordTy, Name: "agg.captured"), T: RecordTy);
3005
3006 RecordDecl::field_iterator CurField = RD->field_begin();
3007 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3008 E = S.capture_init_end();
3009 I != E; ++I, ++CurField) {
3010 LValue LV = EmitLValueForFieldInitialization(Base: SlotLV, Field: *CurField);
3011 if (CurField->hasCapturedVLAType()) {
3012 EmitLambdaVLACapture(VAT: CurField->getCapturedVLAType(), LV);
3013 } else {
3014 EmitInitializerForField(Field: *CurField, LHS: LV, Init: *I);
3015 }
3016 }
3017
3018 return SlotLV;
3019}
3020
3021/// Generate an outlined function for the body of a CapturedStmt, store any
3022/// captured variables into the captured struct, and call the outlined function.
3023llvm::Function *
3024CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
3025 LValue CapStruct = InitCapturedStruct(S);
3026
3027 // Emit the CapturedDecl
3028 CodeGenFunction CGF(CGM, true);
3029 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3030 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3031 delete CGF.CapturedStmtInfo;
3032
3033 // Emit call to the helper function.
3034 EmitCallOrInvoke(Callee: F, Args: CapStruct.getPointer(CGF&: *this));
3035
3036 return F;
3037}
3038
3039Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
3040 LValue CapStruct = InitCapturedStruct(S);
3041 return CapStruct.getAddress(CGF&: *this);
3042}
3043
3044/// Creates the outlined function for a CapturedStmt.
3045llvm::Function *
3046CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
3047 assert(CapturedStmtInfo &&
3048 "CapturedStmtInfo should be set when generating the captured function");
3049 const CapturedDecl *CD = S.getCapturedDecl();
3050 const RecordDecl *RD = S.getCapturedRecordDecl();
3051 SourceLocation Loc = S.getBeginLoc();
3052 assert(CD->hasBody() && "missing CapturedDecl body");
3053
3054 // Build the argument list.
3055 ASTContext &Ctx = CGM.getContext();
3056 FunctionArgList Args;
3057 Args.append(in_start: CD->param_begin(), in_end: CD->param_end());
3058
3059 // Create the function declaration.
3060 const CGFunctionInfo &FuncInfo =
3061 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3062 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(Info: FuncInfo);
3063
3064 llvm::Function *F =
3065 llvm::Function::Create(Ty: FuncLLVMTy, Linkage: llvm::GlobalValue::InternalLinkage,
3066 N: CapturedStmtInfo->getHelperName(), M: &CGM.getModule());
3067 CGM.SetInternalFunctionAttributes(GD: CD, F, FI: FuncInfo);
3068 if (CD->isNothrow())
3069 F->addFnAttr(llvm::Attribute::NoUnwind);
3070
3071 // Generate the function.
3072 StartFunction(GD: CD, RetTy: Ctx.VoidTy, Fn: F, FnInfo: FuncInfo, Args, Loc: CD->getLocation(),
3073 StartLoc: CD->getBody()->getBeginLoc());
3074 // Set the context parameter in CapturedStmtInfo.
3075 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3076 CapturedStmtInfo->setContextValue(Builder.CreateLoad(Addr: DeclPtr));
3077
3078 // Initialize variable-length arrays.
3079 LValue Base = MakeNaturalAlignRawAddrLValue(
3080 V: CapturedStmtInfo->getContextValue(), T: Ctx.getTagDeclType(RD));
3081 for (auto *FD : RD->fields()) {
3082 if (FD->hasCapturedVLAType()) {
3083 auto *ExprArg =
3084 EmitLoadOfLValue(V: EmitLValueForField(Base, Field: FD), Loc: S.getBeginLoc())
3085 .getScalarVal();
3086 auto VAT = FD->getCapturedVLAType();
3087 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3088 }
3089 }
3090
3091 // If 'this' is captured, load it into CXXThisValue.
3092 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3093 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3094 LValue ThisLValue = EmitLValueForField(Base, Field: FD);
3095 CXXThisValue = EmitLoadOfLValue(V: ThisLValue, Loc).getScalarVal();
3096 }
3097
3098 PGO.assignRegionCounters(GD: GlobalDecl(CD), Fn: F);
3099 CapturedStmtInfo->EmitBody(CGF&: *this, S: CD->getBody());
3100 FinishFunction(EndLoc: CD->getBodyRBrace());
3101
3102 return F;
3103}
3104

source code of clang/lib/CodeGen/CGStmt.cpp