1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/StmtObjC.h"
32#include "clang/Basic/Builtins.h"
33#include "clang/Basic/CodeGenOptions.h"
34#include "clang/Basic/TargetBuiltins.h"
35#include "clang/Basic/TargetInfo.h"
36#include "clang/CodeGen/CGFunctionInfo.h"
37#include "clang/Frontend/FrontendDiagnostic.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/FPEnv.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/MDBuilder.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/Support/CRC.h"
48#include "llvm/Support/xxhash.h"
49#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
50#include "llvm/Transforms/Utils/PromoteMemToReg.h"
51#include <optional>
52
53using namespace clang;
54using namespace CodeGen;
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
61/// markers.
62static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
63 const LangOptions &LangOpts) {
64 if (CGOpts.DisableLifetimeMarkers)
65 return false;
66
67 // Sanitizers may use markers.
68 if (CGOpts.SanitizeAddressUseAfterScope ||
69 LangOpts.Sanitize.has(K: SanitizerKind::HWAddress) ||
70 LangOpts.Sanitize.has(K: SanitizerKind::Memory))
71 return true;
72
73 // For now, only in optimized builds.
74 return CGOpts.OptimizationLevel != 0;
75}
76
77CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
78 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
79 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
80 CGBuilderInserterTy(this)),
81 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
82 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
83 ShouldEmitLifetimeMarkers(
84 shouldEmitLifetimeMarkers(CGOpts: CGM.getCodeGenOpts(), LangOpts: CGM.getLangOpts())) {
85 if (!suppressNewContext)
86 CGM.getCXXABI().getMangleContext().startNewFunction();
87 EHStack.setCGF(this);
88
89 SetFastMathFlags(CurFPFeatures);
90}
91
92CodeGenFunction::~CodeGenFunction() {
93 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
94
95 if (getLangOpts().OpenMP && CurFn)
96 CGM.getOpenMPRuntime().functionFinished(CGF&: *this);
97
98 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
99 // outlining etc) at some point. Doing it once the function codegen is done
100 // seems to be a reasonable spot. We do it here, as opposed to the deletion
101 // time of the CodeGenModule, because we have to ensure the IR has not yet
102 // been "emitted" to the outside, thus, modifications are still sensible.
103 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
104 CGM.getOpenMPRuntime().getOMPBuilder().finalize(Fn: CurFn);
105}
106
107// Map the LangOption for exception behavior into
108// the corresponding enum in the IR.
109llvm::fp::ExceptionBehavior
110clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
111
112 switch (Kind) {
113 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
114 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
115 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
116 default:
117 llvm_unreachable("Unsupported FP Exception Behavior");
118 }
119}
120
121void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
122 llvm::FastMathFlags FMF;
123 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
124 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
125 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
126 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
127 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
128 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
129 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
130 Builder.setFastMathFlags(FMF);
131}
132
133CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
134 const Expr *E)
135 : CGF(CGF) {
136 ConstructorHelper(FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
137}
138
139CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
140 FPOptions FPFeatures)
141 : CGF(CGF) {
142 ConstructorHelper(FPFeatures);
143}
144
145void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
146 OldFPFeatures = CGF.CurFPFeatures;
147 CGF.CurFPFeatures = FPFeatures;
148
149 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
150 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
151
152 if (OldFPFeatures == FPFeatures)
153 return;
154
155 FMFGuard.emplace(args&: CGF.Builder);
156
157 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
158 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
159 auto NewExceptionBehavior =
160 ToConstrainedExceptMD(Kind: static_cast<LangOptions::FPExceptionModeKind>(
161 FPFeatures.getExceptionMode()));
162 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
163
164 CGF.SetFastMathFlags(FPFeatures);
165
166 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
167 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
168 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
169 (NewExceptionBehavior == llvm::fp::ebIgnore &&
170 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
171 "FPConstrained should be enabled on entire function");
172
173 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
174 auto OldValue =
175 CGF.CurFn->getFnAttribute(Kind: Name).getValueAsBool();
176 auto NewValue = OldValue & Value;
177 if (OldValue != NewValue)
178 CGF.CurFn->addFnAttr(Kind: Name, Val: llvm::toStringRef(B: NewValue));
179 };
180 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
181 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
182 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
183 mergeFnAttrValue(
184 "unsafe-fp-math",
185 FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
186 FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
187 FPFeatures.allowFPContractAcrossStatement());
188}
189
190CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
191 CGF.CurFPFeatures = OldFPFeatures;
192 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
193 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
194}
195
196static LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
197 bool ForPointeeType,
198 CodeGenFunction &CGF) {
199 LValueBaseInfo BaseInfo;
200 TBAAAccessInfo TBAAInfo;
201 CharUnits Alignment =
202 CGF.CGM.getNaturalTypeAlignment(T, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo, forPointeeType: ForPointeeType);
203 Address Addr = Address(V, CGF.ConvertTypeForMem(T), Alignment);
204 return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
205}
206
207LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
208 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, CGF&: *this);
209}
210
211LValue
212CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
213 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, CGF&: *this);
214}
215
216LValue CodeGenFunction::MakeNaturalAlignRawAddrLValue(llvm::Value *V,
217 QualType T) {
218 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, CGF&: *this);
219}
220
221LValue CodeGenFunction::MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V,
222 QualType T) {
223 return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, CGF&: *this);
224}
225
226llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
227 return CGM.getTypes().ConvertTypeForMem(T);
228}
229
230llvm::Type *CodeGenFunction::ConvertType(QualType T) {
231 return CGM.getTypes().ConvertType(T);
232}
233
234TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
235 type = type.getCanonicalType();
236 while (true) {
237 switch (type->getTypeClass()) {
238#define TYPE(name, parent)
239#define ABSTRACT_TYPE(name, parent)
240#define NON_CANONICAL_TYPE(name, parent) case Type::name:
241#define DEPENDENT_TYPE(name, parent) case Type::name:
242#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
243#include "clang/AST/TypeNodes.inc"
244 llvm_unreachable("non-canonical or dependent type in IR-generation");
245
246 case Type::Auto:
247 case Type::DeducedTemplateSpecialization:
248 llvm_unreachable("undeduced type in IR-generation");
249
250 // Various scalar types.
251 case Type::Builtin:
252 case Type::Pointer:
253 case Type::BlockPointer:
254 case Type::LValueReference:
255 case Type::RValueReference:
256 case Type::MemberPointer:
257 case Type::Vector:
258 case Type::ExtVector:
259 case Type::ConstantMatrix:
260 case Type::FunctionProto:
261 case Type::FunctionNoProto:
262 case Type::Enum:
263 case Type::ObjCObjectPointer:
264 case Type::Pipe:
265 case Type::BitInt:
266 return TEK_Scalar;
267
268 // Complexes.
269 case Type::Complex:
270 return TEK_Complex;
271
272 // Arrays, records, and Objective-C objects.
273 case Type::ConstantArray:
274 case Type::IncompleteArray:
275 case Type::VariableArray:
276 case Type::Record:
277 case Type::ObjCObject:
278 case Type::ObjCInterface:
279 case Type::ArrayParameter:
280 return TEK_Aggregate;
281
282 // We operate on atomic values according to their underlying type.
283 case Type::Atomic:
284 type = cast<AtomicType>(type)->getValueType();
285 continue;
286 }
287 llvm_unreachable("unknown type kind!");
288 }
289}
290
291llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
292 // For cleanliness, we try to avoid emitting the return block for
293 // simple cases.
294 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
295
296 if (CurBB) {
297 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
298
299 // We have a valid insert point, reuse it if it is empty or there are no
300 // explicit jumps to the return block.
301 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
302 ReturnBlock.getBlock()->replaceAllUsesWith(V: CurBB);
303 delete ReturnBlock.getBlock();
304 ReturnBlock = JumpDest();
305 } else
306 EmitBlock(BB: ReturnBlock.getBlock());
307 return llvm::DebugLoc();
308 }
309
310 // Otherwise, if the return block is the target of a single direct
311 // branch then we can just put the code in that block instead. This
312 // cleans up functions which started with a unified return block.
313 if (ReturnBlock.getBlock()->hasOneUse()) {
314 llvm::BranchInst *BI =
315 dyn_cast<llvm::BranchInst>(Val: *ReturnBlock.getBlock()->user_begin());
316 if (BI && BI->isUnconditional() &&
317 BI->getSuccessor(i: 0) == ReturnBlock.getBlock()) {
318 // Record/return the DebugLoc of the simple 'return' expression to be used
319 // later by the actual 'ret' instruction.
320 llvm::DebugLoc Loc = BI->getDebugLoc();
321 Builder.SetInsertPoint(BI->getParent());
322 BI->eraseFromParent();
323 delete ReturnBlock.getBlock();
324 ReturnBlock = JumpDest();
325 return Loc;
326 }
327 }
328
329 // FIXME: We are at an unreachable point, there is no reason to emit the block
330 // unless it has uses. However, we still need a place to put the debug
331 // region.end for now.
332
333 EmitBlock(BB: ReturnBlock.getBlock());
334 return llvm::DebugLoc();
335}
336
337static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
338 if (!BB) return;
339 if (!BB->use_empty()) {
340 CGF.CurFn->insert(Position: CGF.CurFn->end(), BB);
341 return;
342 }
343 delete BB;
344}
345
346void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
347 assert(BreakContinueStack.empty() &&
348 "mismatched push/pop in break/continue stack!");
349
350 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
351 && NumSimpleReturnExprs == NumReturnExprs
352 && ReturnBlock.getBlock()->use_empty();
353 // Usually the return expression is evaluated before the cleanup
354 // code. If the function contains only a simple return statement,
355 // such as a constant, the location before the cleanup code becomes
356 // the last useful breakpoint in the function, because the simple
357 // return expression will be evaluated after the cleanup code. To be
358 // safe, set the debug location for cleanup code to the location of
359 // the return statement. Otherwise the cleanup code should be at the
360 // end of the function's lexical scope.
361 //
362 // If there are multiple branches to the return block, the branch
363 // instructions will get the location of the return statements and
364 // all will be fine.
365 if (CGDebugInfo *DI = getDebugInfo()) {
366 if (OnlySimpleReturnStmts)
367 DI->EmitLocation(Builder, Loc: LastStopPoint);
368 else
369 DI->EmitLocation(Builder, Loc: EndLoc);
370 }
371
372 // Pop any cleanups that might have been associated with the
373 // parameters. Do this in whatever block we're currently in; it's
374 // important to do this before we enter the return block or return
375 // edges will be *really* confused.
376 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
377 bool HasOnlyLifetimeMarkers =
378 HasCleanups && EHStack.containsOnlyLifetimeMarkers(Old: PrologueCleanupDepth);
379 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
380
381 std::optional<ApplyDebugLocation> OAL;
382 if (HasCleanups) {
383 // Make sure the line table doesn't jump back into the body for
384 // the ret after it's been at EndLoc.
385 if (CGDebugInfo *DI = getDebugInfo()) {
386 if (OnlySimpleReturnStmts)
387 DI->EmitLocation(Builder, Loc: EndLoc);
388 else
389 // We may not have a valid end location. Try to apply it anyway, and
390 // fall back to an artificial location if needed.
391 OAL = ApplyDebugLocation::CreateDefaultArtificial(CGF&: *this, TemporaryLocation: EndLoc);
392 }
393
394 PopCleanupBlocks(OldCleanupStackSize: PrologueCleanupDepth);
395 }
396
397 // Emit function epilog (to return).
398 llvm::DebugLoc Loc = EmitReturnBlock();
399
400 if (ShouldInstrumentFunction()) {
401 if (CGM.getCodeGenOpts().InstrumentFunctions)
402 CurFn->addFnAttr(Kind: "instrument-function-exit", Val: "__cyg_profile_func_exit");
403 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
404 CurFn->addFnAttr(Kind: "instrument-function-exit-inlined",
405 Val: "__cyg_profile_func_exit");
406 }
407
408 // Emit debug descriptor for function end.
409 if (CGDebugInfo *DI = getDebugInfo())
410 DI->EmitFunctionEnd(Builder, Fn: CurFn);
411
412 // Reset the debug location to that of the simple 'return' expression, if any
413 // rather than that of the end of the function's scope '}'.
414 ApplyDebugLocation AL(*this, Loc);
415 EmitFunctionEpilog(FI: *CurFnInfo, EmitRetDbgLoc, EndLoc);
416 EmitEndEHSpec(D: CurCodeDecl);
417
418 assert(EHStack.empty() &&
419 "did not remove all scopes from cleanup stack!");
420
421 // If someone did an indirect goto, emit the indirect goto block at the end of
422 // the function.
423 if (IndirectBranch) {
424 EmitBlock(BB: IndirectBranch->getParent());
425 Builder.ClearInsertionPoint();
426 }
427
428 // If some of our locals escaped, insert a call to llvm.localescape in the
429 // entry block.
430 if (!EscapedLocals.empty()) {
431 // Invert the map from local to index into a simple vector. There should be
432 // no holes.
433 SmallVector<llvm::Value *, 4> EscapeArgs;
434 EscapeArgs.resize(N: EscapedLocals.size());
435 for (auto &Pair : EscapedLocals)
436 EscapeArgs[Pair.second] = Pair.first;
437 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
438 &CGM.getModule(), llvm::Intrinsic::localescape);
439 CGBuilderTy(*this, AllocaInsertPt).CreateCall(Callee: FrameEscapeFn, Args: EscapeArgs);
440 }
441
442 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
443 llvm::Instruction *Ptr = AllocaInsertPt;
444 AllocaInsertPt = nullptr;
445 Ptr->eraseFromParent();
446
447 // PostAllocaInsertPt, if created, was lazily created when it was required,
448 // remove it now since it was just created for our own convenience.
449 if (PostAllocaInsertPt) {
450 llvm::Instruction *PostPtr = PostAllocaInsertPt;
451 PostAllocaInsertPt = nullptr;
452 PostPtr->eraseFromParent();
453 }
454
455 // If someone took the address of a label but never did an indirect goto, we
456 // made a zero entry PHI node, which is illegal, zap it now.
457 if (IndirectBranch) {
458 llvm::PHINode *PN = cast<llvm::PHINode>(Val: IndirectBranch->getAddress());
459 if (PN->getNumIncomingValues() == 0) {
460 PN->replaceAllUsesWith(V: llvm::UndefValue::get(T: PN->getType()));
461 PN->eraseFromParent();
462 }
463 }
464
465 EmitIfUsed(CGF&: *this, BB: EHResumeBlock);
466 EmitIfUsed(CGF&: *this, BB: TerminateLandingPad);
467 EmitIfUsed(CGF&: *this, BB: TerminateHandler);
468 EmitIfUsed(CGF&: *this, BB: UnreachableBlock);
469
470 for (const auto &FuncletAndParent : TerminateFunclets)
471 EmitIfUsed(CGF&: *this, BB: FuncletAndParent.second);
472
473 if (CGM.getCodeGenOpts().EmitDeclMetadata)
474 EmitDeclMetadata();
475
476 for (const auto &R : DeferredReplacements) {
477 if (llvm::Value *Old = R.first) {
478 Old->replaceAllUsesWith(V: R.second);
479 cast<llvm::Instruction>(Val: Old)->eraseFromParent();
480 }
481 }
482 DeferredReplacements.clear();
483
484 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
485 // PHIs if the current function is a coroutine. We don't do it for all
486 // functions as it may result in slight increase in numbers of instructions
487 // if compiled with no optimizations. We do it for coroutine as the lifetime
488 // of CleanupDestSlot alloca make correct coroutine frame building very
489 // difficult.
490 if (NormalCleanupDest.isValid() && isCoroutine()) {
491 llvm::DominatorTree DT(*CurFn);
492 llvm::PromoteMemToReg(
493 Allocas: cast<llvm::AllocaInst>(Val: NormalCleanupDest.getPointer()), DT);
494 NormalCleanupDest = Address::invalid();
495 }
496
497 // Scan function arguments for vector width.
498 for (llvm::Argument &A : CurFn->args())
499 if (auto *VT = dyn_cast<llvm::VectorType>(Val: A.getType()))
500 LargestVectorWidth =
501 std::max(a: (uint64_t)LargestVectorWidth,
502 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
503
504 // Update vector width based on return type.
505 if (auto *VT = dyn_cast<llvm::VectorType>(Val: CurFn->getReturnType()))
506 LargestVectorWidth =
507 std::max(a: (uint64_t)LargestVectorWidth,
508 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
509
510 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
511 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
512
513 // Add the min-legal-vector-width attribute. This contains the max width from:
514 // 1. min-vector-width attribute used in the source program.
515 // 2. Any builtins used that have a vector width specified.
516 // 3. Values passed in and out of inline assembly.
517 // 4. Width of vector arguments and return types for this function.
518 // 5. Width of vector arguments and return types for functions called by this
519 // function.
520 if (getContext().getTargetInfo().getTriple().isX86())
521 CurFn->addFnAttr(Kind: "min-legal-vector-width",
522 Val: llvm::utostr(X: LargestVectorWidth));
523
524 // Add vscale_range attribute if appropriate.
525 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
526 getContext().getTargetInfo().getVScaleRange(LangOpts: getLangOpts());
527 if (VScaleRange) {
528 CurFn->addFnAttr(Attr: llvm::Attribute::getWithVScaleRangeArgs(
529 Context&: getLLVMContext(), MinValue: VScaleRange->first, MaxValue: VScaleRange->second));
530 }
531
532 // If we generated an unreachable return block, delete it now.
533 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
534 Builder.ClearInsertionPoint();
535 ReturnBlock.getBlock()->eraseFromParent();
536 }
537 if (ReturnValue.isValid()) {
538 auto *RetAlloca =
539 dyn_cast<llvm::AllocaInst>(Val: ReturnValue.emitRawPointer(CGF&: *this));
540 if (RetAlloca && RetAlloca->use_empty()) {
541 RetAlloca->eraseFromParent();
542 ReturnValue = Address::invalid();
543 }
544 }
545}
546
547/// ShouldInstrumentFunction - Return true if the current function should be
548/// instrumented with __cyg_profile_func_* calls
549bool CodeGenFunction::ShouldInstrumentFunction() {
550 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
551 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
552 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
553 return false;
554 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
555 return false;
556 return true;
557}
558
559bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
560 if (!CurFuncDecl)
561 return false;
562 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
563}
564
565/// ShouldXRayInstrument - Return true if the current function should be
566/// instrumented with XRay nop sleds.
567bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
568 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
569}
570
571/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
572/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
573bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
574 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
575 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
576 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
577 XRayInstrKind::Custom);
578}
579
580bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
581 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
582 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
583 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
584 XRayInstrKind::Typed);
585}
586
587llvm::ConstantInt *
588CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
589 // Remove any (C++17) exception specifications, to allow calling e.g. a
590 // noexcept function through a non-noexcept pointer.
591 if (!Ty->isFunctionNoProtoType())
592 Ty = getContext().getFunctionTypeWithExceptionSpec(Orig: Ty, ESI: EST_None);
593 std::string Mangled;
594 llvm::raw_string_ostream Out(Mangled);
595 CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(T: Ty, Out, NormalizeIntegers: false);
596 return llvm::ConstantInt::get(
597 Ty: CGM.Int32Ty, V: static_cast<uint32_t>(llvm::xxh3_64bits(data: Mangled)));
598}
599
600void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
601 llvm::Function *Fn) {
602 if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
603 return;
604
605 llvm::LLVMContext &Context = getLLVMContext();
606
607 CGM.GenKernelArgMetadata(FN: Fn, FD, CGF: this);
608
609 if (!getLangOpts().OpenCL)
610 return;
611
612 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
613 QualType HintQTy = A->getTypeHint();
614 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
615 bool IsSignedInteger =
616 HintQTy->isSignedIntegerType() ||
617 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
618 llvm::Metadata *AttrMDArgs[] = {
619 llvm::ConstantAsMetadata::get(C: llvm::UndefValue::get(
620 T: CGM.getTypes().ConvertType(T: A->getTypeHint()))),
621 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
622 Ty: llvm::IntegerType::get(C&: Context, NumBits: 32),
623 V: llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
624 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, MDs: AttrMDArgs));
625 }
626
627 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
628 llvm::Metadata *AttrMDArgs[] = {
629 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getXDim())),
630 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getYDim())),
631 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getZDim()))};
632 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, MDs: AttrMDArgs));
633 }
634
635 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
636 llvm::Metadata *AttrMDArgs[] = {
637 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getXDim())),
638 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getYDim())),
639 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getZDim()))};
640 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, MDs: AttrMDArgs));
641 }
642
643 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
644 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
645 llvm::Metadata *AttrMDArgs[] = {
646 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getSubGroupSize()))};
647 Fn->setMetadata("intel_reqd_sub_group_size",
648 llvm::MDNode::get(Context, MDs: AttrMDArgs));
649 }
650}
651
652/// Determine whether the function F ends with a return stmt.
653static bool endsWithReturn(const Decl* F) {
654 const Stmt *Body = nullptr;
655 if (auto *FD = dyn_cast_or_null<FunctionDecl>(Val: F))
656 Body = FD->getBody();
657 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(Val: F))
658 Body = OMD->getBody();
659
660 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Val: Body)) {
661 auto LastStmt = CS->body_rbegin();
662 if (LastStmt != CS->body_rend())
663 return isa<ReturnStmt>(Val: *LastStmt);
664 }
665 return false;
666}
667
668void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
669 if (SanOpts.has(K: SanitizerKind::Thread)) {
670 Fn->addFnAttr(Kind: "sanitize_thread_no_checking_at_run_time");
671 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
672 }
673}
674
675/// Check if the return value of this function requires sanitization.
676bool CodeGenFunction::requiresReturnValueCheck() const {
677 return requiresReturnValueNullabilityCheck() ||
678 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
679 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
680}
681
682static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
683 auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: D);
684 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
685 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
686 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
687 return false;
688
689 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
690 return false;
691
692 if (MD->getNumParams() == 2) {
693 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
694 if (!PT || !PT->isVoidPointerType() ||
695 !PT->getPointeeType().isConstQualified())
696 return false;
697 }
698
699 return true;
700}
701
702bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
703 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
704 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
705}
706
707bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
708 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
709 getTarget().getCXXABI().isMicrosoft() &&
710 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
711 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
712 });
713}
714
715/// Return the UBSan prologue signature for \p FD if one is available.
716static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
717 const FunctionDecl *FD) {
718 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD))
719 if (!MD->isStatic())
720 return nullptr;
721 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
722}
723
724void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
725 llvm::Function *Fn,
726 const CGFunctionInfo &FnInfo,
727 const FunctionArgList &Args,
728 SourceLocation Loc,
729 SourceLocation StartLoc) {
730 assert(!CurFn &&
731 "Do not use a CodeGenFunction object for more than one function");
732
733 const Decl *D = GD.getDecl();
734
735 DidCallStackSave = false;
736 CurCodeDecl = D;
737 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: D);
738 if (FD && FD->usesSEHTry())
739 CurSEHParent = GD;
740 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
741 FnRetTy = RetTy;
742 CurFn = Fn;
743 CurFnInfo = &FnInfo;
744 assert(CurFn->isDeclaration() && "Function already has body?");
745
746 // If this function is ignored for any of the enabled sanitizers,
747 // disable the sanitizer for the function.
748 do {
749#define SANITIZER(NAME, ID) \
750 if (SanOpts.empty()) \
751 break; \
752 if (SanOpts.has(SanitizerKind::ID)) \
753 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
754 SanOpts.set(SanitizerKind::ID, false);
755
756#include "clang/Basic/Sanitizers.def"
757#undef SANITIZER
758 } while (false);
759
760 if (D) {
761 const bool SanitizeBounds = SanOpts.hasOneOf(K: SanitizerKind::Bounds);
762 SanitizerMask no_sanitize_mask;
763 bool NoSanitizeCoverage = false;
764
765 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
766 no_sanitize_mask |= Attr->getMask();
767 // SanitizeCoverage is not handled by SanOpts.
768 if (Attr->hasCoverage())
769 NoSanitizeCoverage = true;
770 }
771
772 // Apply the no_sanitize* attributes to SanOpts.
773 SanOpts.Mask &= ~no_sanitize_mask;
774 if (no_sanitize_mask & SanitizerKind::Address)
775 SanOpts.set(K: SanitizerKind::KernelAddress, Value: false);
776 if (no_sanitize_mask & SanitizerKind::KernelAddress)
777 SanOpts.set(K: SanitizerKind::Address, Value: false);
778 if (no_sanitize_mask & SanitizerKind::HWAddress)
779 SanOpts.set(K: SanitizerKind::KernelHWAddress, Value: false);
780 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
781 SanOpts.set(K: SanitizerKind::HWAddress, Value: false);
782
783 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
784 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
785
786 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
787 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
788
789 // Some passes need the non-negated no_sanitize attribute. Pass them on.
790 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
791 if (no_sanitize_mask & SanitizerKind::Thread)
792 Fn->addFnAttr(Kind: "no_sanitize_thread");
793 }
794 }
795
796 if (ShouldSkipSanitizerInstrumentation()) {
797 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
798 } else {
799 // Apply sanitizer attributes to the function.
800 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
801 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
802 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
803 SanitizerKind::KernelHWAddress))
804 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
805 if (SanOpts.has(SanitizerKind::MemtagStack))
806 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
807 if (SanOpts.has(SanitizerKind::Thread))
808 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
809 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
810 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
811 }
812 if (SanOpts.has(SanitizerKind::SafeStack))
813 Fn->addFnAttr(llvm::Attribute::SafeStack);
814 if (SanOpts.has(SanitizerKind::ShadowCallStack))
815 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
816
817 // Apply fuzzing attribute to the function.
818 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
819 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
820
821 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
822 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
823 if (SanOpts.has(K: SanitizerKind::Thread)) {
824 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(Val: D)) {
825 const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(argIndex: 0);
826 if (OMD->getMethodFamily() == OMF_dealloc ||
827 OMD->getMethodFamily() == OMF_initialize ||
828 (OMD->getSelector().isUnarySelector() && II->isStr(Str: ".cxx_destruct"))) {
829 markAsIgnoreThreadCheckingAtRuntime(Fn);
830 }
831 }
832 }
833
834 // Ignore unrelated casts in STL allocate() since the allocator must cast
835 // from void* to T* before object initialization completes. Don't match on the
836 // namespace because not all allocators are in std::
837 if (D && SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
838 if (matchesStlAllocatorFn(D, Ctx: getContext()))
839 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
840 }
841
842 // Ignore null checks in coroutine functions since the coroutines passes
843 // are not aware of how to move the extra UBSan instructions across the split
844 // coroutine boundaries.
845 if (D && SanOpts.has(K: SanitizerKind::Null))
846 if (FD && FD->getBody() &&
847 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
848 SanOpts.Mask &= ~SanitizerKind::Null;
849
850 // Apply xray attributes to the function (as a string, for now)
851 bool AlwaysXRayAttr = false;
852 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
853 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
854 K: XRayInstrKind::FunctionEntry) ||
855 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
856 K: XRayInstrKind::FunctionExit)) {
857 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
858 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-always");
859 AlwaysXRayAttr = true;
860 }
861 if (XRayAttr->neverXRayInstrument())
862 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-never");
863 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
864 if (ShouldXRayInstrumentFunction())
865 Fn->addFnAttr("xray-log-args",
866 llvm::utostr(X: LogArgs->getArgumentCount()));
867 }
868 } else {
869 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
870 Fn->addFnAttr(
871 Kind: "xray-instruction-threshold",
872 Val: llvm::itostr(X: CGM.getCodeGenOpts().XRayInstructionThreshold));
873 }
874
875 if (ShouldXRayInstrumentFunction()) {
876 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
877 Fn->addFnAttr(Kind: "xray-ignore-loops");
878
879 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
880 K: XRayInstrKind::FunctionExit))
881 Fn->addFnAttr(Kind: "xray-skip-exit");
882
883 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
884 K: XRayInstrKind::FunctionEntry))
885 Fn->addFnAttr(Kind: "xray-skip-entry");
886
887 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
888 if (FuncGroups > 1) {
889 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
890 CurFn->getName().bytes_end());
891 auto Group = crc32(Data: FuncName) % FuncGroups;
892 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
893 !AlwaysXRayAttr)
894 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-never");
895 }
896 }
897
898 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
899 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
900 case ProfileList::Skip:
901 Fn->addFnAttr(llvm::Attribute::SkipProfile);
902 break;
903 case ProfileList::Forbid:
904 Fn->addFnAttr(llvm::Attribute::NoProfile);
905 break;
906 case ProfileList::Allow:
907 break;
908 }
909 }
910
911 unsigned Count, Offset;
912 if (const auto *Attr =
913 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
914 Count = Attr->getCount();
915 Offset = Attr->getOffset();
916 } else {
917 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
918 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
919 }
920 if (Count && Offset <= Count) {
921 Fn->addFnAttr(Kind: "patchable-function-entry", Val: std::to_string(val: Count - Offset));
922 if (Offset)
923 Fn->addFnAttr(Kind: "patchable-function-prefix", Val: std::to_string(val: Offset));
924 }
925 // Instruct that functions for COFF/CodeView targets should start with a
926 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
927 // backends as they don't need it -- instructions on these architectures are
928 // always atomically patchable at runtime.
929 if (CGM.getCodeGenOpts().HotPatch &&
930 getContext().getTargetInfo().getTriple().isX86() &&
931 getContext().getTargetInfo().getTriple().getEnvironment() !=
932 llvm::Triple::CODE16)
933 Fn->addFnAttr(Kind: "patchable-function", Val: "prologue-short-redirect");
934
935 // Add no-jump-tables value.
936 if (CGM.getCodeGenOpts().NoUseJumpTables)
937 Fn->addFnAttr(Kind: "no-jump-tables", Val: "true");
938
939 // Add no-inline-line-tables value.
940 if (CGM.getCodeGenOpts().NoInlineLineTables)
941 Fn->addFnAttr(Kind: "no-inline-line-tables");
942
943 // Add profile-sample-accurate value.
944 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
945 Fn->addFnAttr(Kind: "profile-sample-accurate");
946
947 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
948 Fn->addFnAttr(Kind: "use-sample-profile");
949
950 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
951 Fn->addFnAttr(Kind: "cfi-canonical-jump-table");
952
953 if (D && D->hasAttr<NoProfileFunctionAttr>())
954 Fn->addFnAttr(llvm::Attribute::NoProfile);
955
956 if (D) {
957 // Function attributes take precedence over command line flags.
958 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
959 switch (A->getThunkType()) {
960 case FunctionReturnThunksAttr::Kind::Keep:
961 break;
962 case FunctionReturnThunksAttr::Kind::Extern:
963 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
964 break;
965 }
966 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
967 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
968 }
969
970 if (FD && (getLangOpts().OpenCL ||
971 (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
972 // Add metadata for a kernel function.
973 EmitKernelMetadata(FD, Fn);
974 }
975
976 // If we are checking function types, emit a function type signature as
977 // prologue data.
978 if (FD && SanOpts.has(K: SanitizerKind::Function)) {
979 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
980 llvm::LLVMContext &Ctx = Fn->getContext();
981 llvm::MDBuilder MDB(Ctx);
982 Fn->setMetadata(
983 llvm::LLVMContext::MD_func_sanitize,
984 MDB.createRTTIPointerPrologue(
985 PrologueSig, RTTI: getUBSanFunctionTypeHash(Ty: FD->getType())));
986 }
987 }
988
989 // If we're checking nullability, we need to know whether we can check the
990 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
991 if (SanOpts.has(K: SanitizerKind::NullabilityReturn)) {
992 auto Nullability = FnRetTy->getNullability();
993 if (Nullability && *Nullability == NullabilityKind::NonNull &&
994 !FnRetTy->isRecordType()) {
995 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
996 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
997 RetValNullabilityPrecondition =
998 llvm::ConstantInt::getTrue(Context&: getLLVMContext());
999 }
1000 }
1001
1002 // If we're in C++ mode and the function name is "main", it is guaranteed
1003 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1004 // used within a program").
1005 //
1006 // OpenCL C 2.0 v2.2-11 s6.9.i:
1007 // Recursion is not supported.
1008 //
1009 // SYCL v1.2.1 s3.10:
1010 // kernels cannot include RTTI information, exception classes,
1011 // recursive code, virtual functions or make use of C++ libraries that
1012 // are not compiled for the device.
1013 if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
1014 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
1015 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1016 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1017
1018 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1019 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1020 ToConstrainedExceptMD(Kind: getLangOpts().getDefaultExceptionMode());
1021 Builder.setDefaultConstrainedRounding(RM);
1022 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1023 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1024 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1025 RM != llvm::RoundingMode::NearestTiesToEven))) {
1026 Builder.setIsFPConstrained(true);
1027 Fn->addFnAttr(llvm::Attribute::StrictFP);
1028 }
1029
1030 // If a custom alignment is used, force realigning to this alignment on
1031 // any main function which certainly will need it.
1032 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1033 CGM.getCodeGenOpts().StackAlignment))
1034 Fn->addFnAttr(Kind: "stackrealign");
1035
1036 // "main" doesn't need to zero out call-used registers.
1037 if (FD && FD->isMain())
1038 Fn->removeFnAttr(Kind: "zero-call-used-regs");
1039
1040 llvm::BasicBlock *EntryBB = createBasicBlock(name: "entry", parent: CurFn);
1041
1042 // Create a marker to make it easy to insert allocas into the entryblock
1043 // later. Don't create this with the builder, because we don't want it
1044 // folded.
1045 llvm::Value *Undef = llvm::UndefValue::get(T: Int32Ty);
1046 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1047
1048 ReturnBlock = getJumpDestInCurrentScope(Name: "return");
1049
1050 Builder.SetInsertPoint(EntryBB);
1051
1052 // If we're checking the return value, allocate space for a pointer to a
1053 // precise source location of the checked return statement.
1054 if (requiresReturnValueCheck()) {
1055 ReturnLocation = CreateDefaultAlignTempAlloca(Ty: Int8PtrTy, Name: "return.sloc.ptr");
1056 Builder.CreateStore(Val: llvm::ConstantPointerNull::get(T: Int8PtrTy),
1057 Addr: ReturnLocation);
1058 }
1059
1060 // Emit subprogram debug descriptor.
1061 if (CGDebugInfo *DI = getDebugInfo()) {
1062 // Reconstruct the type from the argument list so that implicit parameters,
1063 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1064 // convention.
1065 DI->emitFunctionStart(GD, Loc, ScopeLoc: StartLoc,
1066 FnType: DI->getFunctionType(FD, RetTy, Args), Fn: CurFn,
1067 CurFnIsThunk: CurFuncIsThunk);
1068 }
1069
1070 if (ShouldInstrumentFunction()) {
1071 if (CGM.getCodeGenOpts().InstrumentFunctions)
1072 CurFn->addFnAttr(Kind: "instrument-function-entry", Val: "__cyg_profile_func_enter");
1073 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1074 CurFn->addFnAttr(Kind: "instrument-function-entry-inlined",
1075 Val: "__cyg_profile_func_enter");
1076 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1077 CurFn->addFnAttr(Kind: "instrument-function-entry-inlined",
1078 Val: "__cyg_profile_func_enter_bare");
1079 }
1080
1081 // Since emitting the mcount call here impacts optimizations such as function
1082 // inlining, we just add an attribute to insert a mcount call in backend.
1083 // The attribute "counting-function" is set to mcount function name which is
1084 // architecture dependent.
1085 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1086 // Calls to fentry/mcount should not be generated if function has
1087 // the no_instrument_function attribute.
1088 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1089 if (CGM.getCodeGenOpts().CallFEntry)
1090 Fn->addFnAttr(Kind: "fentry-call", Val: "true");
1091 else {
1092 Fn->addFnAttr(Kind: "instrument-function-entry-inlined",
1093 Val: getTarget().getMCountName());
1094 }
1095 if (CGM.getCodeGenOpts().MNopMCount) {
1096 if (!CGM.getCodeGenOpts().CallFEntry)
1097 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1098 << "-mnop-mcount" << "-mfentry";
1099 Fn->addFnAttr(Kind: "mnop-mcount");
1100 }
1101
1102 if (CGM.getCodeGenOpts().RecordMCount) {
1103 if (!CGM.getCodeGenOpts().CallFEntry)
1104 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1105 << "-mrecord-mcount" << "-mfentry";
1106 Fn->addFnAttr(Kind: "mrecord-mcount");
1107 }
1108 }
1109 }
1110
1111 if (CGM.getCodeGenOpts().PackedStack) {
1112 if (getContext().getTargetInfo().getTriple().getArch() !=
1113 llvm::Triple::systemz)
1114 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1115 << "-mpacked-stack";
1116 Fn->addFnAttr(Kind: "packed-stack");
1117 }
1118
1119 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1120 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1121 Fn->addFnAttr(Kind: "warn-stack-size",
1122 Val: std::to_string(val: CGM.getCodeGenOpts().WarnStackSize));
1123
1124 if (RetTy->isVoidType()) {
1125 // Void type; nothing to return.
1126 ReturnValue = Address::invalid();
1127
1128 // Count the implicit return.
1129 if (!endsWithReturn(F: D))
1130 ++NumReturnExprs;
1131 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1132 // Indirect return; emit returned value directly into sret slot.
1133 // This reduces code size, and affects correctness in C++.
1134 auto AI = CurFn->arg_begin();
1135 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1136 ++AI;
1137 ReturnValue = makeNaturalAddressForPointer(
1138 Ptr: &*AI, T: RetTy, Alignment: CurFnInfo->getReturnInfo().getIndirectAlign(), ForPointeeType: false,
1139 BaseInfo: nullptr, TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull);
1140 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1141 ReturnValuePointer =
1142 CreateDefaultAlignTempAlloca(Ty: ReturnValue.getType(), Name: "result.ptr");
1143 Builder.CreateStore(Val: ReturnValue.emitRawPointer(CGF&: *this),
1144 Addr: ReturnValuePointer);
1145 }
1146 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1147 !hasScalarEvaluationKind(T: CurFnInfo->getReturnType())) {
1148 // Load the sret pointer from the argument struct and return into that.
1149 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1150 llvm::Function::arg_iterator EI = CurFn->arg_end();
1151 --EI;
1152 llvm::Value *Addr = Builder.CreateStructGEP(
1153 Ty: CurFnInfo->getArgStruct(), Ptr: &*EI, Idx);
1154 llvm::Type *Ty =
1155 cast<llvm::GetElementPtrInst>(Val: Addr)->getResultElementType();
1156 ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
1157 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1158 ReturnValue = Address(Addr, ConvertType(T: RetTy),
1159 CGM.getNaturalTypeAlignment(T: RetTy), KnownNonNull);
1160 } else {
1161 ReturnValue = CreateIRTemp(T: RetTy, Name: "retval");
1162
1163 // Tell the epilog emitter to autorelease the result. We do this
1164 // now so that various specialized functions can suppress it
1165 // during their IR-generation.
1166 if (getLangOpts().ObjCAutoRefCount &&
1167 !CurFnInfo->isReturnsRetained() &&
1168 RetTy->isObjCRetainableType())
1169 AutoreleaseResult = true;
1170 }
1171
1172 EmitStartEHSpec(D: CurCodeDecl);
1173
1174 PrologueCleanupDepth = EHStack.stable_begin();
1175
1176 // Emit OpenMP specific initialization of the device functions.
1177 if (getLangOpts().OpenMP && CurCodeDecl)
1178 CGM.getOpenMPRuntime().emitFunctionProlog(CGF&: *this, D: CurCodeDecl);
1179
1180 // Handle emitting HLSL entry functions.
1181 if (D && D->hasAttr<HLSLShaderAttr>())
1182 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1183
1184 EmitFunctionProlog(FI: *CurFnInfo, Fn: CurFn, Args);
1185
1186 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(Val: D);
1187 MD && !MD->isStatic()) {
1188 bool IsInLambda =
1189 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1190 if (MD->isImplicitObjectMemberFunction())
1191 CGM.getCXXABI().EmitInstanceFunctionProlog(CGF&: *this);
1192 if (IsInLambda) {
1193 // We're in a lambda; figure out the captures.
1194 MD->getParent()->getCaptureFields(Captures&: LambdaCaptureFields,
1195 ThisCapture&: LambdaThisCaptureField);
1196 if (LambdaThisCaptureField) {
1197 // If the lambda captures the object referred to by '*this' - either by
1198 // value or by reference, make sure CXXThisValue points to the correct
1199 // object.
1200
1201 // Get the lvalue for the field (which is a copy of the enclosing object
1202 // or contains the address of the enclosing object).
1203 LValue ThisFieldLValue = EmitLValueForLambdaField(Field: LambdaThisCaptureField);
1204 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1205 // If the enclosing object was captured by value, just use its
1206 // address. Sign this pointer.
1207 CXXThisValue = ThisFieldLValue.getPointer(CGF&: *this);
1208 } else {
1209 // Load the lvalue pointed to by the field, since '*this' was captured
1210 // by reference.
1211 CXXThisValue =
1212 EmitLoadOfLValue(V: ThisFieldLValue, Loc: SourceLocation()).getScalarVal();
1213 }
1214 }
1215 for (auto *FD : MD->getParent()->fields()) {
1216 if (FD->hasCapturedVLAType()) {
1217 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1218 SourceLocation()).getScalarVal();
1219 auto VAT = FD->getCapturedVLAType();
1220 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1221 }
1222 }
1223 } else if (MD->isImplicitObjectMemberFunction()) {
1224 // Not in a lambda; just use 'this' from the method.
1225 // FIXME: Should we generate a new load for each use of 'this'? The
1226 // fast register allocator would be happier...
1227 CXXThisValue = CXXABIThisValue;
1228 }
1229
1230 // Check the 'this' pointer once per function, if it's available.
1231 if (CXXABIThisValue) {
1232 SanitizerSet SkippedChecks;
1233 SkippedChecks.set(K: SanitizerKind::ObjectSize, Value: true);
1234 QualType ThisTy = MD->getThisType();
1235
1236 // If this is the call operator of a lambda with no captures, it
1237 // may have a static invoker function, which may call this operator with
1238 // a null 'this' pointer.
1239 if (isLambdaCallOperator(MD) && MD->getParent()->isCapturelessLambda())
1240 SkippedChecks.set(K: SanitizerKind::Null, Value: true);
1241
1242 EmitTypeCheck(
1243 TCK: isa<CXXConstructorDecl>(Val: MD) ? TCK_ConstructorCall : TCK_MemberCall,
1244 Loc, V: CXXABIThisValue, Type: ThisTy, Alignment: CXXABIThisAlignment, SkippedChecks);
1245 }
1246 }
1247
1248 // If any of the arguments have a variably modified type, make sure to
1249 // emit the type size, but only if the function is not naked. Naked functions
1250 // have no prolog to run this evaluation.
1251 if (!FD || !FD->hasAttr<NakedAttr>()) {
1252 for (const VarDecl *VD : Args) {
1253 // Dig out the type as written from ParmVarDecls; it's unclear whether
1254 // the standard (C99 6.9.1p10) requires this, but we're following the
1255 // precedent set by gcc.
1256 QualType Ty;
1257 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Val: VD))
1258 Ty = PVD->getOriginalType();
1259 else
1260 Ty = VD->getType();
1261
1262 if (Ty->isVariablyModifiedType())
1263 EmitVariablyModifiedType(Ty);
1264 }
1265 }
1266 // Emit a location at the end of the prologue.
1267 if (CGDebugInfo *DI = getDebugInfo())
1268 DI->EmitLocation(Builder, Loc: StartLoc);
1269 // TODO: Do we need to handle this in two places like we do with
1270 // target-features/target-cpu?
1271 if (CurFuncDecl)
1272 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1273 LargestVectorWidth = VecWidth->getVectorWidth();
1274}
1275
1276void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1277 incrementProfileCounter(S: Body);
1278 maybeCreateMCDCCondBitmap();
1279 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Val: Body))
1280 EmitCompoundStmtWithoutScope(S: *S);
1281 else
1282 EmitStmt(S: Body);
1283}
1284
1285/// When instrumenting to collect profile data, the counts for some blocks
1286/// such as switch cases need to not include the fall-through counts, so
1287/// emit a branch around the instrumentation code. When not instrumenting,
1288/// this just calls EmitBlock().
1289void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1290 const Stmt *S) {
1291 llvm::BasicBlock *SkipCountBB = nullptr;
1292 // Do not skip over the instrumentation when single byte coverage mode is
1293 // enabled.
1294 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1295 !llvm::EnableSingleByteCoverage) {
1296 // When instrumenting for profiling, the fallthrough to certain
1297 // statements needs to skip over the instrumentation code so that we
1298 // get an accurate count.
1299 SkipCountBB = createBasicBlock(name: "skipcount");
1300 EmitBranch(Block: SkipCountBB);
1301 }
1302 EmitBlock(BB);
1303 uint64_t CurrentCount = getCurrentProfileCount();
1304 incrementProfileCounter(S);
1305 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1306 if (SkipCountBB)
1307 EmitBlock(BB: SkipCountBB);
1308}
1309
1310/// Tries to mark the given function nounwind based on the
1311/// non-existence of any throwing calls within it. We believe this is
1312/// lightweight enough to do at -O0.
1313static void TryMarkNoThrow(llvm::Function *F) {
1314 // LLVM treats 'nounwind' on a function as part of the type, so we
1315 // can't do this on functions that can be overwritten.
1316 if (F->isInterposable()) return;
1317
1318 for (llvm::BasicBlock &BB : *F)
1319 for (llvm::Instruction &I : BB)
1320 if (I.mayThrow())
1321 return;
1322
1323 F->setDoesNotThrow();
1324}
1325
1326QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1327 FunctionArgList &Args) {
1328 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
1329 QualType ResTy = FD->getReturnType();
1330
1331 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD);
1332 if (MD && MD->isImplicitObjectMemberFunction()) {
1333 if (CGM.getCXXABI().HasThisReturn(GD))
1334 ResTy = MD->getThisType();
1335 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1336 ResTy = CGM.getContext().VoidPtrTy;
1337 CGM.getCXXABI().buildThisParam(CGF&: *this, Params&: Args);
1338 }
1339
1340 // The base version of an inheriting constructor whose constructed base is a
1341 // virtual base is not passed any arguments (because it doesn't actually call
1342 // the inherited constructor).
1343 bool PassedParams = true;
1344 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Val: FD))
1345 if (auto Inherited = CD->getInheritedConstructor())
1346 PassedParams =
1347 getTypes().inheritingCtorHasParams(Inherited, Type: GD.getCtorType());
1348
1349 if (PassedParams) {
1350 for (auto *Param : FD->parameters()) {
1351 Args.push_back(Param);
1352 if (!Param->hasAttr<PassObjectSizeAttr>())
1353 continue;
1354
1355 auto *Implicit = ImplicitParamDecl::Create(
1356 getContext(), Param->getDeclContext(), Param->getLocation(),
1357 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1358 SizeArguments[Param] = Implicit;
1359 Args.push_back(Elt: Implicit);
1360 }
1361 }
1362
1363 if (MD && (isa<CXXConstructorDecl>(Val: MD) || isa<CXXDestructorDecl>(Val: MD)))
1364 CGM.getCXXABI().addImplicitStructorParams(CGF&: *this, ResTy, Params&: Args);
1365
1366 return ResTy;
1367}
1368
1369void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1370 const CGFunctionInfo &FnInfo) {
1371 assert(Fn && "generating code for null Function");
1372 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
1373 CurGD = GD;
1374
1375 FunctionArgList Args;
1376 QualType ResTy = BuildFunctionArgList(GD, Args);
1377
1378 CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, Decl: FD);
1379
1380 if (FD->isInlineBuiltinDeclaration()) {
1381 // When generating code for a builtin with an inline declaration, use a
1382 // mangled name to hold the actual body, while keeping an external
1383 // definition in case the function pointer is referenced somewhere.
1384 std::string FDInlineName = (Fn->getName() + ".inline").str();
1385 llvm::Module *M = Fn->getParent();
1386 llvm::Function *Clone = M->getFunction(Name: FDInlineName);
1387 if (!Clone) {
1388 Clone = llvm::Function::Create(Ty: Fn->getFunctionType(),
1389 Linkage: llvm::GlobalValue::InternalLinkage,
1390 AddrSpace: Fn->getAddressSpace(), N: FDInlineName, M);
1391 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1392 }
1393 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1394 Fn = Clone;
1395 } else {
1396 // Detect the unusual situation where an inline version is shadowed by a
1397 // non-inline version. In that case we should pick the external one
1398 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1399 // to detect that situation before we reach codegen, so do some late
1400 // replacement.
1401 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1402 PD = PD->getPreviousDecl()) {
1403 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1404 std::string FDInlineName = (Fn->getName() + ".inline").str();
1405 llvm::Module *M = Fn->getParent();
1406 if (llvm::Function *Clone = M->getFunction(Name: FDInlineName)) {
1407 Clone->replaceAllUsesWith(V: Fn);
1408 Clone->eraseFromParent();
1409 }
1410 break;
1411 }
1412 }
1413 }
1414
1415 // Check if we should generate debug info for this function.
1416 if (FD->hasAttr<NoDebugAttr>()) {
1417 // Clear non-distinct debug info that was possibly attached to the function
1418 // due to an earlier declaration without the nodebug attribute
1419 Fn->setSubprogram(nullptr);
1420 // Disable debug info indefinitely for this function
1421 DebugInfo = nullptr;
1422 }
1423
1424 // The function might not have a body if we're generating thunks for a
1425 // function declaration.
1426 SourceRange BodyRange;
1427 if (Stmt *Body = FD->getBody())
1428 BodyRange = Body->getSourceRange();
1429 else
1430 BodyRange = FD->getLocation();
1431 CurEHLocation = BodyRange.getEnd();
1432
1433 // Use the location of the start of the function to determine where
1434 // the function definition is located. By default use the location
1435 // of the declaration as the location for the subprogram. A function
1436 // may lack a declaration in the source code if it is created by code
1437 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1438 SourceLocation Loc = FD->getLocation();
1439
1440 // If this is a function specialization then use the pattern body
1441 // as the location for the function.
1442 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1443 if (SpecDecl->hasBody(Definition&: SpecDecl))
1444 Loc = SpecDecl->getLocation();
1445
1446 Stmt *Body = FD->getBody();
1447
1448 if (Body) {
1449 // Coroutines always emit lifetime markers.
1450 if (isa<CoroutineBodyStmt>(Val: Body))
1451 ShouldEmitLifetimeMarkers = true;
1452
1453 // Initialize helper which will detect jumps which can cause invalid
1454 // lifetime markers.
1455 if (ShouldEmitLifetimeMarkers)
1456 Bypasses.Init(Body);
1457 }
1458
1459 // Emit the standard function prologue.
1460 StartFunction(GD, RetTy: ResTy, Fn, FnInfo, Args, Loc, StartLoc: BodyRange.getBegin());
1461
1462 // Save parameters for coroutine function.
1463 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Val: Body))
1464 llvm::append_range(C&: FnArgs, R: FD->parameters());
1465
1466 // Ensure that the function adheres to the forward progress guarantee, which
1467 // is required by certain optimizations.
1468 if (checkIfFunctionMustProgress())
1469 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1470
1471 // Generate the body of the function.
1472 PGO.assignRegionCounters(GD, Fn: CurFn);
1473 if (isa<CXXDestructorDecl>(Val: FD))
1474 EmitDestructorBody(Args);
1475 else if (isa<CXXConstructorDecl>(Val: FD))
1476 EmitConstructorBody(Args);
1477 else if (getLangOpts().CUDA &&
1478 !getLangOpts().CUDAIsDevice &&
1479 FD->hasAttr<CUDAGlobalAttr>())
1480 CGM.getCUDARuntime().emitDeviceStub(CGF&: *this, Args);
1481 else if (isa<CXXMethodDecl>(Val: FD) &&
1482 cast<CXXMethodDecl>(Val: FD)->isLambdaStaticInvoker()) {
1483 // The lambda static invoker function is special, because it forwards or
1484 // clones the body of the function call operator (but is actually static).
1485 EmitLambdaStaticInvokeBody(MD: cast<CXXMethodDecl>(Val: FD));
1486 } else if (isa<CXXMethodDecl>(Val: FD) &&
1487 isLambdaCallOperator(MD: cast<CXXMethodDecl>(Val: FD)) &&
1488 !FnInfo.isDelegateCall() &&
1489 cast<CXXMethodDecl>(Val: FD)->getParent()->getLambdaStaticInvoker() &&
1490 hasInAllocaArg(MD: cast<CXXMethodDecl>(Val: FD))) {
1491 // If emitting a lambda with static invoker on X86 Windows, change
1492 // the call operator body.
1493 // Make sure that this is a call operator with an inalloca arg and check
1494 // for delegate call to make sure this is the original call op and not the
1495 // new forwarding function for the static invoker.
1496 EmitLambdaInAllocaCallOpBody(MD: cast<CXXMethodDecl>(Val: FD));
1497 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(Val: FD) &&
1498 (cast<CXXMethodDecl>(Val: FD)->isCopyAssignmentOperator() ||
1499 cast<CXXMethodDecl>(Val: FD)->isMoveAssignmentOperator())) {
1500 // Implicit copy-assignment gets the same special treatment as implicit
1501 // copy-constructors.
1502 emitImplicitAssignmentOperatorBody(Args);
1503 } else if (Body) {
1504 EmitFunctionBody(Body);
1505 } else
1506 llvm_unreachable("no definition for emitted function");
1507
1508 // C++11 [stmt.return]p2:
1509 // Flowing off the end of a function [...] results in undefined behavior in
1510 // a value-returning function.
1511 // C11 6.9.1p12:
1512 // If the '}' that terminates a function is reached, and the value of the
1513 // function call is used by the caller, the behavior is undefined.
1514 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1515 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1516 bool ShouldEmitUnreachable =
1517 CGM.getCodeGenOpts().StrictReturn ||
1518 !CGM.MayDropFunctionReturn(Context: FD->getASTContext(), ReturnType: FD->getReturnType());
1519 if (SanOpts.has(K: SanitizerKind::Return)) {
1520 SanitizerScope SanScope(this);
1521 llvm::Value *IsFalse = Builder.getFalse();
1522 EmitCheck(Checked: std::make_pair(x&: IsFalse, y: SanitizerKind::Return),
1523 Check: SanitizerHandler::MissingReturn,
1524 StaticArgs: EmitCheckSourceLocation(Loc: FD->getLocation()), DynamicArgs: std::nullopt);
1525 } else if (ShouldEmitUnreachable) {
1526 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1527 EmitTrapCall(llvm::Intrinsic::trap);
1528 }
1529 if (SanOpts.has(K: SanitizerKind::Return) || ShouldEmitUnreachable) {
1530 Builder.CreateUnreachable();
1531 Builder.ClearInsertionPoint();
1532 }
1533 }
1534
1535 // Emit the standard function epilogue.
1536 FinishFunction(EndLoc: BodyRange.getEnd());
1537
1538 // If we haven't marked the function nothrow through other means, do
1539 // a quick pass now to see if we can.
1540 if (!CurFn->doesNotThrow())
1541 TryMarkNoThrow(F: CurFn);
1542}
1543
1544/// ContainsLabel - Return true if the statement contains a label in it. If
1545/// this statement is not executed normally, it not containing a label means
1546/// that we can just remove the code.
1547bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1548 // Null statement, not a label!
1549 if (!S) return false;
1550
1551 // If this is a label, we have to emit the code, consider something like:
1552 // if (0) { ... foo: bar(); } goto foo;
1553 //
1554 // TODO: If anyone cared, we could track __label__'s, since we know that you
1555 // can't jump to one from outside their declared region.
1556 if (isa<LabelStmt>(Val: S))
1557 return true;
1558
1559 // If this is a case/default statement, and we haven't seen a switch, we have
1560 // to emit the code.
1561 if (isa<SwitchCase>(Val: S) && !IgnoreCaseStmts)
1562 return true;
1563
1564 // If this is a switch statement, we want to ignore cases below it.
1565 if (isa<SwitchStmt>(Val: S))
1566 IgnoreCaseStmts = true;
1567
1568 // Scan subexpressions for verboten labels.
1569 for (const Stmt *SubStmt : S->children())
1570 if (ContainsLabel(S: SubStmt, IgnoreCaseStmts))
1571 return true;
1572
1573 return false;
1574}
1575
1576/// containsBreak - Return true if the statement contains a break out of it.
1577/// If the statement (recursively) contains a switch or loop with a break
1578/// inside of it, this is fine.
1579bool CodeGenFunction::containsBreak(const Stmt *S) {
1580 // Null statement, not a label!
1581 if (!S) return false;
1582
1583 // If this is a switch or loop that defines its own break scope, then we can
1584 // include it and anything inside of it.
1585 if (isa<SwitchStmt>(Val: S) || isa<WhileStmt>(Val: S) || isa<DoStmt>(Val: S) ||
1586 isa<ForStmt>(Val: S))
1587 return false;
1588
1589 if (isa<BreakStmt>(Val: S))
1590 return true;
1591
1592 // Scan subexpressions for verboten breaks.
1593 for (const Stmt *SubStmt : S->children())
1594 if (containsBreak(S: SubStmt))
1595 return true;
1596
1597 return false;
1598}
1599
1600bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1601 if (!S) return false;
1602
1603 // Some statement kinds add a scope and thus never add a decl to the current
1604 // scope. Note, this list is longer than the list of statements that might
1605 // have an unscoped decl nested within them, but this way is conservatively
1606 // correct even if more statement kinds are added.
1607 if (isa<IfStmt>(Val: S) || isa<SwitchStmt>(Val: S) || isa<WhileStmt>(Val: S) ||
1608 isa<DoStmt>(Val: S) || isa<ForStmt>(Val: S) || isa<CompoundStmt>(Val: S) ||
1609 isa<CXXForRangeStmt>(Val: S) || isa<CXXTryStmt>(Val: S) ||
1610 isa<ObjCForCollectionStmt>(Val: S) || isa<ObjCAtTryStmt>(Val: S))
1611 return false;
1612
1613 if (isa<DeclStmt>(Val: S))
1614 return true;
1615
1616 for (const Stmt *SubStmt : S->children())
1617 if (mightAddDeclToScope(S: SubStmt))
1618 return true;
1619
1620 return false;
1621}
1622
1623/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1624/// to a constant, or if it does but contains a label, return false. If it
1625/// constant folds return true and set the boolean result in Result.
1626bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1627 bool &ResultBool,
1628 bool AllowLabels) {
1629 // If MC/DC is enabled, disable folding so that we can instrument all
1630 // conditions to yield complete test vectors. We still keep track of
1631 // folded conditions during region mapping and visualization.
1632 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1633 CGM.getCodeGenOpts().MCDCCoverage)
1634 return false;
1635
1636 llvm::APSInt ResultInt;
1637 if (!ConstantFoldsToSimpleInteger(Cond, Result&: ResultInt, AllowLabels))
1638 return false;
1639
1640 ResultBool = ResultInt.getBoolValue();
1641 return true;
1642}
1643
1644/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1645/// to a constant, or if it does but contains a label, return false. If it
1646/// constant folds return true and set the folded value.
1647bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1648 llvm::APSInt &ResultInt,
1649 bool AllowLabels) {
1650 // FIXME: Rename and handle conversion of other evaluatable things
1651 // to bool.
1652 Expr::EvalResult Result;
1653 if (!Cond->EvaluateAsInt(Result, Ctx: getContext()))
1654 return false; // Not foldable, not integer or not fully evaluatable.
1655
1656 llvm::APSInt Int = Result.Val.getInt();
1657 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1658 return false; // Contains a label.
1659
1660 ResultInt = Int;
1661 return true;
1662}
1663
1664/// Strip parentheses and simplistic logical-NOT operators.
1665const Expr *CodeGenFunction::stripCond(const Expr *C) {
1666 while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(Val: C->IgnoreParens())) {
1667 if (Op->getOpcode() != UO_LNot)
1668 break;
1669 C = Op->getSubExpr();
1670 }
1671 return C->IgnoreParens();
1672}
1673
1674/// Determine whether the given condition is an instrumentable condition
1675/// (i.e. no "&&" or "||").
1676bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1677 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: stripCond(C));
1678 return (!BOp || !BOp->isLogicalOp());
1679}
1680
1681/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1682/// increments a profile counter based on the semantics of the given logical
1683/// operator opcode. This is used to instrument branch condition coverage for
1684/// logical operators.
1685void CodeGenFunction::EmitBranchToCounterBlock(
1686 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1687 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1688 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1689 // If not instrumenting, just emit a branch.
1690 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1691 if (!InstrumentRegions || !isInstrumentedCondition(C: Cond))
1692 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1693
1694 llvm::BasicBlock *ThenBlock = nullptr;
1695 llvm::BasicBlock *ElseBlock = nullptr;
1696 llvm::BasicBlock *NextBlock = nullptr;
1697
1698 // Create the block we'll use to increment the appropriate counter.
1699 llvm::BasicBlock *CounterIncrBlock = createBasicBlock(name: "lop.rhscnt");
1700
1701 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1702 // means we need to evaluate the condition and increment the counter on TRUE:
1703 //
1704 // if (Cond)
1705 // goto CounterIncrBlock;
1706 // else
1707 // goto FalseBlock;
1708 //
1709 // CounterIncrBlock:
1710 // Counter++;
1711 // goto TrueBlock;
1712
1713 if (LOp == BO_LAnd) {
1714 ThenBlock = CounterIncrBlock;
1715 ElseBlock = FalseBlock;
1716 NextBlock = TrueBlock;
1717 }
1718
1719 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1720 // we need to evaluate the condition and increment the counter on FALSE:
1721 //
1722 // if (Cond)
1723 // goto TrueBlock;
1724 // else
1725 // goto CounterIncrBlock;
1726 //
1727 // CounterIncrBlock:
1728 // Counter++;
1729 // goto FalseBlock;
1730
1731 else if (LOp == BO_LOr) {
1732 ThenBlock = TrueBlock;
1733 ElseBlock = CounterIncrBlock;
1734 NextBlock = FalseBlock;
1735 } else {
1736 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1737 }
1738
1739 // Emit Branch based on condition.
1740 EmitBranchOnBoolExpr(Cond, TrueBlock: ThenBlock, FalseBlock: ElseBlock, TrueCount, LH);
1741
1742 // Emit the block containing the counter increment(s).
1743 EmitBlock(BB: CounterIncrBlock);
1744
1745 // Increment corresponding counter; if index not provided, use Cond as index.
1746 incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1747
1748 // Go to the next block.
1749 EmitBranch(Block: NextBlock);
1750}
1751
1752/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1753/// statement) to the specified blocks. Based on the condition, this might try
1754/// to simplify the codegen of the conditional based on the branch.
1755/// \param LH The value of the likelihood attribute on the True branch.
1756/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1757/// ConditionalOperator (ternary) through a recursive call for the operator's
1758/// LHS and RHS nodes.
1759void CodeGenFunction::EmitBranchOnBoolExpr(
1760 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1761 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp) {
1762 Cond = Cond->IgnoreParens();
1763
1764 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Val: Cond)) {
1765 // Handle X && Y in a condition.
1766 if (CondBOp->getOpcode() == BO_LAnd) {
1767 MCDCLogOpStack.push_back(Elt: CondBOp);
1768
1769 // If we have "1 && X", simplify the code. "0 && X" would have constant
1770 // folded if the case was simple enough.
1771 bool ConstantBool = false;
1772 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getLHS(), ResultBool&: ConstantBool) &&
1773 ConstantBool) {
1774 // br(1 && X) -> br(X).
1775 incrementProfileCounter(CondBOp);
1776 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LAnd, TrueBlock,
1777 FalseBlock, TrueCount, LH);
1778 MCDCLogOpStack.pop_back();
1779 return;
1780 }
1781
1782 // If we have "X && 1", simplify the code to use an uncond branch.
1783 // "X && 0" would have been constant folded to 0.
1784 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getRHS(), ResultBool&: ConstantBool) &&
1785 ConstantBool) {
1786 // br(X && 1) -> br(X).
1787 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1788 FalseBlock, TrueCount, LH, CondBOp);
1789 MCDCLogOpStack.pop_back();
1790 return;
1791 }
1792
1793 // Emit the LHS as a conditional. If the LHS conditional is false, we
1794 // want to jump to the FalseBlock.
1795 llvm::BasicBlock *LHSTrue = createBasicBlock(name: "land.lhs.true");
1796 // The counter tells us how often we evaluate RHS, and all of TrueCount
1797 // can be propagated to that branch.
1798 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1799
1800 ConditionalEvaluation eval(*this);
1801 {
1802 ApplyDebugLocation DL(*this, Cond);
1803 // Propagate the likelihood attribute like __builtin_expect
1804 // __builtin_expect(X && Y, 1) -> X and Y are likely
1805 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1806 EmitBranchOnBoolExpr(Cond: CondBOp->getLHS(), TrueBlock: LHSTrue, FalseBlock, TrueCount: RHSCount,
1807 LH: LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1808 EmitBlock(BB: LHSTrue);
1809 }
1810
1811 incrementProfileCounter(CondBOp);
1812 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1813
1814 // Any temporaries created here are conditional.
1815 eval.begin(CGF&: *this);
1816 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LAnd, TrueBlock,
1817 FalseBlock, TrueCount, LH);
1818 eval.end(CGF&: *this);
1819 MCDCLogOpStack.pop_back();
1820 return;
1821 }
1822
1823 if (CondBOp->getOpcode() == BO_LOr) {
1824 MCDCLogOpStack.push_back(Elt: CondBOp);
1825
1826 // If we have "0 || X", simplify the code. "1 || X" would have constant
1827 // folded if the case was simple enough.
1828 bool ConstantBool = false;
1829 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getLHS(), ResultBool&: ConstantBool) &&
1830 !ConstantBool) {
1831 // br(0 || X) -> br(X).
1832 incrementProfileCounter(CondBOp);
1833 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LOr, TrueBlock,
1834 FalseBlock, TrueCount, LH);
1835 MCDCLogOpStack.pop_back();
1836 return;
1837 }
1838
1839 // If we have "X || 0", simplify the code to use an uncond branch.
1840 // "X || 1" would have been constant folded to 1.
1841 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getRHS(), ResultBool&: ConstantBool) &&
1842 !ConstantBool) {
1843 // br(X || 0) -> br(X).
1844 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1845 FalseBlock, TrueCount, LH, CondBOp);
1846 MCDCLogOpStack.pop_back();
1847 return;
1848 }
1849 // Emit the LHS as a conditional. If the LHS conditional is true, we
1850 // want to jump to the TrueBlock.
1851 llvm::BasicBlock *LHSFalse = createBasicBlock(name: "lor.lhs.false");
1852 // We have the count for entry to the RHS and for the whole expression
1853 // being true, so we can divy up True count between the short circuit and
1854 // the RHS.
1855 uint64_t LHSCount =
1856 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1857 uint64_t RHSCount = TrueCount - LHSCount;
1858
1859 ConditionalEvaluation eval(*this);
1860 {
1861 // Propagate the likelihood attribute like __builtin_expect
1862 // __builtin_expect(X || Y, 1) -> only Y is likely
1863 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1864 ApplyDebugLocation DL(*this, Cond);
1865 EmitBranchOnBoolExpr(Cond: CondBOp->getLHS(), TrueBlock, FalseBlock: LHSFalse, TrueCount: LHSCount,
1866 LH: LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1867 EmitBlock(BB: LHSFalse);
1868 }
1869
1870 incrementProfileCounter(CondBOp);
1871 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1872
1873 // Any temporaries created here are conditional.
1874 eval.begin(CGF&: *this);
1875 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LOr, TrueBlock, FalseBlock,
1876 TrueCount: RHSCount, LH);
1877
1878 eval.end(CGF&: *this);
1879 MCDCLogOpStack.pop_back();
1880 return;
1881 }
1882 }
1883
1884 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Val: Cond)) {
1885 // br(!x, t, f) -> br(x, f, t)
1886 // Avoid doing this optimization when instrumenting a condition for MC/DC.
1887 // LNot is taken as part of the condition for simplicity, and changing its
1888 // sense negatively impacts test vector tracking.
1889 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
1890 CGM.getCodeGenOpts().MCDCCoverage &&
1891 isInstrumentedCondition(C: Cond);
1892 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
1893 // Negate the count.
1894 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1895 // The values of the enum are chosen to make this negation possible.
1896 LH = static_cast<Stmt::Likelihood>(-LH);
1897 // Negate the condition and swap the destination blocks.
1898 return EmitBranchOnBoolExpr(Cond: CondUOp->getSubExpr(), TrueBlock: FalseBlock, FalseBlock: TrueBlock,
1899 TrueCount: FalseCount, LH);
1900 }
1901 }
1902
1903 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Val: Cond)) {
1904 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1905 llvm::BasicBlock *LHSBlock = createBasicBlock(name: "cond.true");
1906 llvm::BasicBlock *RHSBlock = createBasicBlock(name: "cond.false");
1907
1908 // The ConditionalOperator itself has no likelihood information for its
1909 // true and false branches. This matches the behavior of __builtin_expect.
1910 ConditionalEvaluation cond(*this);
1911 EmitBranchOnBoolExpr(Cond: CondOp->getCond(), TrueBlock: LHSBlock, FalseBlock: RHSBlock,
1912 TrueCount: getProfileCount(CondOp), LH: Stmt::LH_None);
1913
1914 // When computing PGO branch weights, we only know the overall count for
1915 // the true block. This code is essentially doing tail duplication of the
1916 // naive code-gen, introducing new edges for which counts are not
1917 // available. Divide the counts proportionally between the LHS and RHS of
1918 // the conditional operator.
1919 uint64_t LHSScaledTrueCount = 0;
1920 if (TrueCount) {
1921 double LHSRatio =
1922 getProfileCount(CondOp) / (double)getCurrentProfileCount();
1923 LHSScaledTrueCount = TrueCount * LHSRatio;
1924 }
1925
1926 cond.begin(CGF&: *this);
1927 EmitBlock(BB: LHSBlock);
1928 incrementProfileCounter(CondOp);
1929 {
1930 ApplyDebugLocation DL(*this, Cond);
1931 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1932 LHSScaledTrueCount, LH, CondOp);
1933 }
1934 cond.end(CGF&: *this);
1935
1936 cond.begin(CGF&: *this);
1937 EmitBlock(BB: RHSBlock);
1938 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1939 TrueCount - LHSScaledTrueCount, LH, CondOp);
1940 cond.end(CGF&: *this);
1941
1942 return;
1943 }
1944
1945 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Val: Cond)) {
1946 // Conditional operator handling can give us a throw expression as a
1947 // condition for a case like:
1948 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1949 // Fold this to:
1950 // br(c, throw x, br(y, t, f))
1951 EmitCXXThrowExpr(E: Throw, /*KeepInsertionPoint*/false);
1952 return;
1953 }
1954
1955 // Emit the code with the fully general case.
1956 llvm::Value *CondV;
1957 {
1958 ApplyDebugLocation DL(*this, Cond);
1959 CondV = EvaluateExprAsBool(E: Cond);
1960 }
1961
1962 // If not at the top of the logical operator nest, update MCDC temp with the
1963 // boolean result of the evaluated condition.
1964 if (!MCDCLogOpStack.empty()) {
1965 const Expr *MCDCBaseExpr = Cond;
1966 // When a nested ConditionalOperator (ternary) is encountered in a boolean
1967 // expression, MC/DC tracks the result of the ternary, and this is tied to
1968 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
1969 // this is the case, the ConditionalOperator expression is passed through
1970 // the ConditionalOp parameter and then used as the MCDC base expression.
1971 if (ConditionalOp)
1972 MCDCBaseExpr = ConditionalOp;
1973
1974 maybeUpdateMCDCCondBitmap(E: MCDCBaseExpr, Val: CondV);
1975 }
1976
1977 llvm::MDNode *Weights = nullptr;
1978 llvm::MDNode *Unpredictable = nullptr;
1979
1980 // If the branch has a condition wrapped by __builtin_unpredictable,
1981 // create metadata that specifies that the branch is unpredictable.
1982 // Don't bother if not optimizing because that metadata would not be used.
1983 auto *Call = dyn_cast<CallExpr>(Val: Cond->IgnoreImpCasts());
1984 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1985 auto *FD = dyn_cast_or_null<FunctionDecl>(Val: Call->getCalleeDecl());
1986 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1987 llvm::MDBuilder MDHelper(getLLVMContext());
1988 Unpredictable = MDHelper.createUnpredictable();
1989 }
1990 }
1991
1992 // If there is a Likelihood knowledge for the cond, lower it.
1993 // Note that if not optimizing this won't emit anything.
1994 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(Cond: CondV, LH);
1995 if (CondV != NewCondV)
1996 CondV = NewCondV;
1997 else {
1998 // Otherwise, lower profile counts. Note that we do this even at -O0.
1999 uint64_t CurrentCount = std::max(a: getCurrentProfileCount(), b: TrueCount);
2000 Weights = createProfileWeights(TrueCount, FalseCount: CurrentCount - TrueCount);
2001 }
2002
2003 Builder.CreateCondBr(Cond: CondV, True: TrueBlock, False: FalseBlock, BranchWeights: Weights, Unpredictable);
2004}
2005
2006/// ErrorUnsupported - Print out an error that codegen doesn't support the
2007/// specified stmt yet.
2008void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2009 CGM.ErrorUnsupported(S, Type);
2010}
2011
2012/// emitNonZeroVLAInit - Emit the "zero" initialization of a
2013/// variable-length array whose elements have a non-zero bit-pattern.
2014///
2015/// \param baseType the inner-most element type of the array
2016/// \param src - a char* pointing to the bit-pattern for a single
2017/// base element of the array
2018/// \param sizeInChars - the total size of the VLA, in chars
2019static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
2020 Address dest, Address src,
2021 llvm::Value *sizeInChars) {
2022 CGBuilderTy &Builder = CGF.Builder;
2023
2024 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(T: baseType);
2025 llvm::Value *baseSizeInChars
2026 = llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: baseSize.getQuantity());
2027
2028 Address begin = dest.withElementType(ElemTy: CGF.Int8Ty);
2029 llvm::Value *end = Builder.CreateInBoundsGEP(Ty: begin.getElementType(),
2030 Ptr: begin.emitRawPointer(CGF),
2031 IdxList: sizeInChars, Name: "vla.end");
2032
2033 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2034 llvm::BasicBlock *loopBB = CGF.createBasicBlock(name: "vla-init.loop");
2035 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "vla-init.cont");
2036
2037 // Make a loop over the VLA. C99 guarantees that the VLA element
2038 // count must be nonzero.
2039 CGF.EmitBlock(BB: loopBB);
2040
2041 llvm::PHINode *cur = Builder.CreatePHI(Ty: begin.getType(), NumReservedValues: 2, Name: "vla.cur");
2042 cur->addIncoming(V: begin.emitRawPointer(CGF), BB: originBB);
2043
2044 CharUnits curAlign =
2045 dest.getAlignment().alignmentOfArrayElement(elementSize: baseSize);
2046
2047 // memcpy the individual element bit-pattern.
2048 Builder.CreateMemCpy(Dest: Address(cur, CGF.Int8Ty, curAlign), Src: src, Size: baseSizeInChars,
2049 /*volatile*/ IsVolatile: false);
2050
2051 // Go to the next element.
2052 llvm::Value *next =
2053 Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: cur, IdxList: baseSizeInChars, Name: "vla.next");
2054
2055 // Leave if that's the end of the VLA.
2056 llvm::Value *done = Builder.CreateICmpEQ(LHS: next, RHS: end, Name: "vla-init.isdone");
2057 Builder.CreateCondBr(Cond: done, True: contBB, False: loopBB);
2058 cur->addIncoming(V: next, BB: loopBB);
2059
2060 CGF.EmitBlock(BB: contBB);
2061}
2062
2063void
2064CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
2065 // Ignore empty classes in C++.
2066 if (getLangOpts().CPlusPlus) {
2067 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2068 if (cast<CXXRecordDecl>(Val: RT->getDecl())->isEmpty())
2069 return;
2070 }
2071 }
2072
2073 if (DestPtr.getElementType() != Int8Ty)
2074 DestPtr = DestPtr.withElementType(ElemTy: Int8Ty);
2075
2076 // Get size and alignment info for this aggregate.
2077 CharUnits size = getContext().getTypeSizeInChars(T: Ty);
2078
2079 llvm::Value *SizeVal;
2080 const VariableArrayType *vla;
2081
2082 // Don't bother emitting a zero-byte memset.
2083 if (size.isZero()) {
2084 // But note that getTypeInfo returns 0 for a VLA.
2085 if (const VariableArrayType *vlaType =
2086 dyn_cast_or_null<VariableArrayType>(
2087 Val: getContext().getAsArrayType(T: Ty))) {
2088 auto VlaSize = getVLASize(vla: vlaType);
2089 SizeVal = VlaSize.NumElts;
2090 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2091 if (!eltSize.isOne())
2092 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: eltSize));
2093 vla = vlaType;
2094 } else {
2095 return;
2096 }
2097 } else {
2098 SizeVal = CGM.getSize(numChars: size);
2099 vla = nullptr;
2100 }
2101
2102 // If the type contains a pointer to data member we can't memset it to zero.
2103 // Instead, create a null constant and copy it to the destination.
2104 // TODO: there are other patterns besides zero that we can usefully memset,
2105 // like -1, which happens to be the pattern used by member-pointers.
2106 if (!CGM.getTypes().isZeroInitializable(T: Ty)) {
2107 // For a VLA, emit a single element, then splat that over the VLA.
2108 if (vla) Ty = getContext().getBaseElementType(vla);
2109
2110 llvm::Constant *NullConstant = CGM.EmitNullConstant(T: Ty);
2111
2112 llvm::GlobalVariable *NullVariable =
2113 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2114 /*isConstant=*/true,
2115 llvm::GlobalVariable::PrivateLinkage,
2116 NullConstant, Twine());
2117 CharUnits NullAlign = DestPtr.getAlignment();
2118 NullVariable->setAlignment(NullAlign.getAsAlign());
2119 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2120
2121 if (vla) return emitNonZeroVLAInit(CGF&: *this, baseType: Ty, dest: DestPtr, src: SrcPtr, sizeInChars: SizeVal);
2122
2123 // Get and call the appropriate llvm.memcpy overload.
2124 Builder.CreateMemCpy(Dest: DestPtr, Src: SrcPtr, Size: SizeVal, IsVolatile: false);
2125 return;
2126 }
2127
2128 // Otherwise, just memset the whole thing to zero. This is legal
2129 // because in LLVM, all default initializers (other than the ones we just
2130 // handled above) are guaranteed to have a bit pattern of all zeros.
2131 Builder.CreateMemSet(Dest: DestPtr, Value: Builder.getInt8(C: 0), Size: SizeVal, IsVolatile: false);
2132}
2133
2134llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2135 // Make sure that there is a block for the indirect goto.
2136 if (!IndirectBranch)
2137 GetIndirectGotoBlock();
2138
2139 llvm::BasicBlock *BB = getJumpDestForLabel(S: L).getBlock();
2140
2141 // Make sure the indirect branch includes all of the address-taken blocks.
2142 IndirectBranch->addDestination(Dest: BB);
2143 return llvm::BlockAddress::get(F: CurFn, BB);
2144}
2145
2146llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2147 // If we already made the indirect branch for indirect goto, return its block.
2148 if (IndirectBranch) return IndirectBranch->getParent();
2149
2150 CGBuilderTy TmpBuilder(*this, createBasicBlock(name: "indirectgoto"));
2151
2152 // Create the PHI node that indirect gotos will add entries to.
2153 llvm::Value *DestVal = TmpBuilder.CreatePHI(Ty: Int8PtrTy, NumReservedValues: 0,
2154 Name: "indirect.goto.dest");
2155
2156 // Create the indirect branch instruction.
2157 IndirectBranch = TmpBuilder.CreateIndirectBr(Addr: DestVal);
2158 return IndirectBranch->getParent();
2159}
2160
2161/// Computes the length of an array in elements, as well as the base
2162/// element type and a properly-typed first element pointer.
2163llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2164 QualType &baseType,
2165 Address &addr) {
2166 const ArrayType *arrayType = origArrayType;
2167
2168 // If it's a VLA, we have to load the stored size. Note that
2169 // this is the size of the VLA in bytes, not its size in elements.
2170 llvm::Value *numVLAElements = nullptr;
2171 if (isa<VariableArrayType>(Val: arrayType)) {
2172 numVLAElements = getVLASize(vla: cast<VariableArrayType>(Val: arrayType)).NumElts;
2173
2174 // Walk into all VLAs. This doesn't require changes to addr,
2175 // which has type T* where T is the first non-VLA element type.
2176 do {
2177 QualType elementType = arrayType->getElementType();
2178 arrayType = getContext().getAsArrayType(T: elementType);
2179
2180 // If we only have VLA components, 'addr' requires no adjustment.
2181 if (!arrayType) {
2182 baseType = elementType;
2183 return numVLAElements;
2184 }
2185 } while (isa<VariableArrayType>(Val: arrayType));
2186
2187 // We get out here only if we find a constant array type
2188 // inside the VLA.
2189 }
2190
2191 // We have some number of constant-length arrays, so addr should
2192 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2193 // down to the first element of addr.
2194 SmallVector<llvm::Value*, 8> gepIndices;
2195
2196 // GEP down to the array type.
2197 llvm::ConstantInt *zero = Builder.getInt32(C: 0);
2198 gepIndices.push_back(Elt: zero);
2199
2200 uint64_t countFromCLAs = 1;
2201 QualType eltType;
2202
2203 llvm::ArrayType *llvmArrayType =
2204 dyn_cast<llvm::ArrayType>(Val: addr.getElementType());
2205 while (llvmArrayType) {
2206 assert(isa<ConstantArrayType>(arrayType));
2207 assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2208 llvmArrayType->getNumElements());
2209
2210 gepIndices.push_back(Elt: zero);
2211 countFromCLAs *= llvmArrayType->getNumElements();
2212 eltType = arrayType->getElementType();
2213
2214 llvmArrayType =
2215 dyn_cast<llvm::ArrayType>(Val: llvmArrayType->getElementType());
2216 arrayType = getContext().getAsArrayType(T: arrayType->getElementType());
2217 assert((!llvmArrayType || arrayType) &&
2218 "LLVM and Clang types are out-of-synch");
2219 }
2220
2221 if (arrayType) {
2222 // From this point onwards, the Clang array type has been emitted
2223 // as some other type (probably a packed struct). Compute the array
2224 // size, and just emit the 'begin' expression as a bitcast.
2225 while (arrayType) {
2226 countFromCLAs *= cast<ConstantArrayType>(Val: arrayType)->getZExtSize();
2227 eltType = arrayType->getElementType();
2228 arrayType = getContext().getAsArrayType(T: eltType);
2229 }
2230
2231 llvm::Type *baseType = ConvertType(T: eltType);
2232 addr = addr.withElementType(ElemTy: baseType);
2233 } else {
2234 // Create the actual GEP.
2235 addr = Address(Builder.CreateInBoundsGEP(Ty: addr.getElementType(),
2236 Ptr: addr.emitRawPointer(CGF&: *this),
2237 IdxList: gepIndices, Name: "array.begin"),
2238 ConvertTypeForMem(T: eltType), addr.getAlignment());
2239 }
2240
2241 baseType = eltType;
2242
2243 llvm::Value *numElements
2244 = llvm::ConstantInt::get(Ty: SizeTy, V: countFromCLAs);
2245
2246 // If we had any VLA dimensions, factor them in.
2247 if (numVLAElements)
2248 numElements = Builder.CreateNUWMul(LHS: numVLAElements, RHS: numElements);
2249
2250 return numElements;
2251}
2252
2253CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2254 const VariableArrayType *vla = getContext().getAsVariableArrayType(T: type);
2255 assert(vla && "type was not a variable array type!");
2256 return getVLASize(vla);
2257}
2258
2259CodeGenFunction::VlaSizePair
2260CodeGenFunction::getVLASize(const VariableArrayType *type) {
2261 // The number of elements so far; always size_t.
2262 llvm::Value *numElements = nullptr;
2263
2264 QualType elementType;
2265 do {
2266 elementType = type->getElementType();
2267 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2268 assert(vlaSize && "no size for VLA!");
2269 assert(vlaSize->getType() == SizeTy);
2270
2271 if (!numElements) {
2272 numElements = vlaSize;
2273 } else {
2274 // It's undefined behavior if this wraps around, so mark it that way.
2275 // FIXME: Teach -fsanitize=undefined to trap this.
2276 numElements = Builder.CreateNUWMul(LHS: numElements, RHS: vlaSize);
2277 }
2278 } while ((type = getContext().getAsVariableArrayType(T: elementType)));
2279
2280 return { numElements, elementType };
2281}
2282
2283CodeGenFunction::VlaSizePair
2284CodeGenFunction::getVLAElements1D(QualType type) {
2285 const VariableArrayType *vla = getContext().getAsVariableArrayType(T: type);
2286 assert(vla && "type was not a variable array type!");
2287 return getVLAElements1D(vla);
2288}
2289
2290CodeGenFunction::VlaSizePair
2291CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2292 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2293 assert(VlaSize && "no size for VLA!");
2294 assert(VlaSize->getType() == SizeTy);
2295 return { VlaSize, Vla->getElementType() };
2296}
2297
2298void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2299 assert(type->isVariablyModifiedType() &&
2300 "Must pass variably modified type to EmitVLASizes!");
2301
2302 EnsureInsertPoint();
2303
2304 // We're going to walk down into the type and look for VLA
2305 // expressions.
2306 do {
2307 assert(type->isVariablyModifiedType());
2308
2309 const Type *ty = type.getTypePtr();
2310 switch (ty->getTypeClass()) {
2311
2312#define TYPE(Class, Base)
2313#define ABSTRACT_TYPE(Class, Base)
2314#define NON_CANONICAL_TYPE(Class, Base)
2315#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2316#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2317#include "clang/AST/TypeNodes.inc"
2318 llvm_unreachable("unexpected dependent type!");
2319
2320 // These types are never variably-modified.
2321 case Type::Builtin:
2322 case Type::Complex:
2323 case Type::Vector:
2324 case Type::ExtVector:
2325 case Type::ConstantMatrix:
2326 case Type::Record:
2327 case Type::Enum:
2328 case Type::Using:
2329 case Type::TemplateSpecialization:
2330 case Type::ObjCTypeParam:
2331 case Type::ObjCObject:
2332 case Type::ObjCInterface:
2333 case Type::ObjCObjectPointer:
2334 case Type::BitInt:
2335 llvm_unreachable("type class is never variably-modified!");
2336
2337 case Type::Elaborated:
2338 type = cast<ElaboratedType>(ty)->getNamedType();
2339 break;
2340
2341 case Type::Adjusted:
2342 type = cast<AdjustedType>(ty)->getAdjustedType();
2343 break;
2344
2345 case Type::Decayed:
2346 type = cast<DecayedType>(ty)->getPointeeType();
2347 break;
2348
2349 case Type::Pointer:
2350 type = cast<PointerType>(ty)->getPointeeType();
2351 break;
2352
2353 case Type::BlockPointer:
2354 type = cast<BlockPointerType>(ty)->getPointeeType();
2355 break;
2356
2357 case Type::LValueReference:
2358 case Type::RValueReference:
2359 type = cast<ReferenceType>(ty)->getPointeeType();
2360 break;
2361
2362 case Type::MemberPointer:
2363 type = cast<MemberPointerType>(ty)->getPointeeType();
2364 break;
2365
2366 case Type::ArrayParameter:
2367 case Type::ConstantArray:
2368 case Type::IncompleteArray:
2369 // Losing element qualification here is fine.
2370 type = cast<ArrayType>(ty)->getElementType();
2371 break;
2372
2373 case Type::VariableArray: {
2374 // Losing element qualification here is fine.
2375 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2376
2377 // Unknown size indication requires no size computation.
2378 // Otherwise, evaluate and record it.
2379 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2380 // It's possible that we might have emitted this already,
2381 // e.g. with a typedef and a pointer to it.
2382 llvm::Value *&entry = VLASizeMap[sizeExpr];
2383 if (!entry) {
2384 llvm::Value *size = EmitScalarExpr(E: sizeExpr);
2385
2386 // C11 6.7.6.2p5:
2387 // If the size is an expression that is not an integer constant
2388 // expression [...] each time it is evaluated it shall have a value
2389 // greater than zero.
2390 if (SanOpts.has(K: SanitizerKind::VLABound)) {
2391 SanitizerScope SanScope(this);
2392 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: size->getType());
2393 clang::QualType SEType = sizeExpr->getType();
2394 llvm::Value *CheckCondition =
2395 SEType->isSignedIntegerType()
2396 ? Builder.CreateICmpSGT(LHS: size, RHS: Zero)
2397 : Builder.CreateICmpUGT(LHS: size, RHS: Zero);
2398 llvm::Constant *StaticArgs[] = {
2399 EmitCheckSourceLocation(Loc: sizeExpr->getBeginLoc()),
2400 EmitCheckTypeDescriptor(T: SEType)};
2401 EmitCheck(Checked: std::make_pair(CheckCondition, SanitizerKind::VLABound),
2402 Check: SanitizerHandler::VLABoundNotPositive, StaticArgs, DynamicArgs: size);
2403 }
2404
2405 // Always zexting here would be wrong if it weren't
2406 // undefined behavior to have a negative bound.
2407 // FIXME: What about when size's type is larger than size_t?
2408 entry = Builder.CreateIntCast(V: size, DestTy: SizeTy, /*signed*/ isSigned: false);
2409 }
2410 }
2411 type = vat->getElementType();
2412 break;
2413 }
2414
2415 case Type::FunctionProto:
2416 case Type::FunctionNoProto:
2417 type = cast<FunctionType>(ty)->getReturnType();
2418 break;
2419
2420 case Type::Paren:
2421 case Type::TypeOf:
2422 case Type::UnaryTransform:
2423 case Type::Attributed:
2424 case Type::BTFTagAttributed:
2425 case Type::SubstTemplateTypeParm:
2426 case Type::MacroQualified:
2427 case Type::CountAttributed:
2428 // Keep walking after single level desugaring.
2429 type = type.getSingleStepDesugaredType(Context: getContext());
2430 break;
2431
2432 case Type::Typedef:
2433 case Type::Decltype:
2434 case Type::Auto:
2435 case Type::DeducedTemplateSpecialization:
2436 case Type::PackIndexing:
2437 // Stop walking: nothing to do.
2438 return;
2439
2440 case Type::TypeOfExpr:
2441 // Stop walking: emit typeof expression.
2442 EmitIgnoredExpr(E: cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2443 return;
2444
2445 case Type::Atomic:
2446 type = cast<AtomicType>(ty)->getValueType();
2447 break;
2448
2449 case Type::Pipe:
2450 type = cast<PipeType>(ty)->getElementType();
2451 break;
2452 }
2453 } while (type->isVariablyModifiedType());
2454}
2455
2456Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2457 if (getContext().getBuiltinVaListType()->isArrayType())
2458 return EmitPointerWithAlignment(Addr: E);
2459 return EmitLValue(E).getAddress(CGF&: *this);
2460}
2461
2462Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2463 return EmitLValue(E).getAddress(CGF&: *this);
2464}
2465
2466void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2467 const APValue &Init) {
2468 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2469 if (CGDebugInfo *Dbg = getDebugInfo())
2470 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2471 Dbg->EmitGlobalVariable(VD: E->getDecl(), Init);
2472}
2473
2474CodeGenFunction::PeepholeProtection
2475CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2476 // At the moment, the only aggressive peephole we do in IR gen
2477 // is trunc(zext) folding, but if we add more, we can easily
2478 // extend this protection.
2479
2480 if (!rvalue.isScalar()) return PeepholeProtection();
2481 llvm::Value *value = rvalue.getScalarVal();
2482 if (!isa<llvm::ZExtInst>(Val: value)) return PeepholeProtection();
2483
2484 // Just make an extra bitcast.
2485 assert(HaveInsertPoint());
2486 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2487 Builder.GetInsertBlock());
2488
2489 PeepholeProtection protection;
2490 protection.Inst = inst;
2491 return protection;
2492}
2493
2494void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2495 if (!protection.Inst) return;
2496
2497 // In theory, we could try to duplicate the peepholes now, but whatever.
2498 protection.Inst->eraseFromParent();
2499}
2500
2501void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2502 QualType Ty, SourceLocation Loc,
2503 SourceLocation AssumptionLoc,
2504 llvm::Value *Alignment,
2505 llvm::Value *OffsetValue) {
2506 if (Alignment->getType() != IntPtrTy)
2507 Alignment =
2508 Builder.CreateIntCast(V: Alignment, DestTy: IntPtrTy, isSigned: false, Name: "casted.align");
2509 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2510 OffsetValue =
2511 Builder.CreateIntCast(V: OffsetValue, DestTy: IntPtrTy, isSigned: true, Name: "casted.offset");
2512 llvm::Value *TheCheck = nullptr;
2513 if (SanOpts.has(K: SanitizerKind::Alignment)) {
2514 llvm::Value *PtrIntValue =
2515 Builder.CreatePtrToInt(V: PtrValue, DestTy: IntPtrTy, Name: "ptrint");
2516
2517 if (OffsetValue) {
2518 bool IsOffsetZero = false;
2519 if (const auto *CI = dyn_cast<llvm::ConstantInt>(Val: OffsetValue))
2520 IsOffsetZero = CI->isZero();
2521
2522 if (!IsOffsetZero)
2523 PtrIntValue = Builder.CreateSub(LHS: PtrIntValue, RHS: OffsetValue, Name: "offsetptr");
2524 }
2525
2526 llvm::Value *Zero = llvm::ConstantInt::get(Ty: IntPtrTy, V: 0);
2527 llvm::Value *Mask =
2528 Builder.CreateSub(LHS: Alignment, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: 1));
2529 llvm::Value *MaskedPtr = Builder.CreateAnd(LHS: PtrIntValue, RHS: Mask, Name: "maskedptr");
2530 TheCheck = Builder.CreateICmpEQ(LHS: MaskedPtr, RHS: Zero, Name: "maskcond");
2531 }
2532 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2533 DL: CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2534
2535 if (!SanOpts.has(K: SanitizerKind::Alignment))
2536 return;
2537 emitAlignmentAssumptionCheck(Ptr: PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2538 OffsetValue, TheCheck, Assumption);
2539}
2540
2541void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2542 const Expr *E,
2543 SourceLocation AssumptionLoc,
2544 llvm::Value *Alignment,
2545 llvm::Value *OffsetValue) {
2546 QualType Ty = E->getType();
2547 SourceLocation Loc = E->getExprLoc();
2548
2549 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2550 OffsetValue);
2551}
2552
2553llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2554 llvm::Value *AnnotatedVal,
2555 StringRef AnnotationStr,
2556 SourceLocation Location,
2557 const AnnotateAttr *Attr) {
2558 SmallVector<llvm::Value *, 5> Args = {
2559 AnnotatedVal,
2560 CGM.EmitAnnotationString(Str: AnnotationStr),
2561 CGM.EmitAnnotationUnit(Loc: Location),
2562 CGM.EmitAnnotationLineNo(L: Location),
2563 };
2564 if (Attr)
2565 Args.push_back(Elt: CGM.EmitAnnotationArgs(Attr));
2566 return Builder.CreateCall(Callee: AnnotationFn, Args);
2567}
2568
2569void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2570 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2571 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2572 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2573 {V->getType(), CGM.ConstGlobalsPtrTy}),
2574 V, I->getAnnotation(), D->getLocation(), I);
2575}
2576
2577Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2578 Address Addr) {
2579 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2580 llvm::Value *V = Addr.emitRawPointer(CGF&: *this);
2581 llvm::Type *VTy = V->getType();
2582 auto *PTy = dyn_cast<llvm::PointerType>(Val: VTy);
2583 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2584 llvm::PointerType *IntrinTy =
2585 llvm::PointerType::get(C&: CGM.getLLVMContext(), AddressSpace: AS);
2586 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2587 {IntrinTy, CGM.ConstGlobalsPtrTy});
2588
2589 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2590 // FIXME Always emit the cast inst so we can differentiate between
2591 // annotation on the first field of a struct and annotation on the struct
2592 // itself.
2593 if (VTy != IntrinTy)
2594 V = Builder.CreateBitCast(V, IntrinTy);
2595 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2596 V = Builder.CreateBitCast(V, VTy);
2597 }
2598
2599 return Address(V, Addr.getElementType(), Addr.getAlignment());
2600}
2601
2602CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2603
2604CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2605 : CGF(CGF) {
2606 assert(!CGF->IsSanitizerScope);
2607 CGF->IsSanitizerScope = true;
2608}
2609
2610CodeGenFunction::SanitizerScope::~SanitizerScope() {
2611 CGF->IsSanitizerScope = false;
2612}
2613
2614void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2615 const llvm::Twine &Name,
2616 llvm::BasicBlock *BB,
2617 llvm::BasicBlock::iterator InsertPt) const {
2618 LoopStack.InsertHelper(I);
2619 if (IsSanitizerScope)
2620 I->setNoSanitizeMetadata();
2621}
2622
2623void CGBuilderInserter::InsertHelper(
2624 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2625 llvm::BasicBlock::iterator InsertPt) const {
2626 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2627 if (CGF)
2628 CGF->InsertHelper(I, Name, BB, InsertPt);
2629}
2630
2631// Emits an error if we don't have a valid set of target features for the
2632// called function.
2633void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2634 const FunctionDecl *TargetDecl) {
2635 // SemaChecking cannot handle below x86 builtins because they have different
2636 // parameter ranges with different TargetAttribute of caller.
2637 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2638 unsigned BuiltinID = TargetDecl->getBuiltinID();
2639 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2640 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2641 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2642 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2643 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl);
2644 llvm::StringMap<bool> TargetFetureMap;
2645 CGM.getContext().getFunctionFeatureMap(FeatureMap&: TargetFetureMap, FD);
2646 llvm::APSInt Result =
2647 *(E->getArg(Arg: 2)->getIntegerConstantExpr(Ctx: CGM.getContext()));
2648 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2649 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2650 << TargetDecl->getDeclName() << "avx";
2651 }
2652 }
2653 return checkTargetFeatures(Loc: E->getBeginLoc(), TargetDecl);
2654}
2655
2656// Emits an error if we don't have a valid set of target features for the
2657// called function.
2658void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2659 const FunctionDecl *TargetDecl) {
2660 // Early exit if this is an indirect call.
2661 if (!TargetDecl)
2662 return;
2663
2664 // Get the current enclosing function if it exists. If it doesn't
2665 // we can't check the target features anyhow.
2666 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl);
2667 if (!FD)
2668 return;
2669
2670 // Grab the required features for the call. For a builtin this is listed in
2671 // the td file with the default cpu, for an always_inline function this is any
2672 // listed cpu and any listed features.
2673 unsigned BuiltinID = TargetDecl->getBuiltinID();
2674 std::string MissingFeature;
2675 llvm::StringMap<bool> CallerFeatureMap;
2676 CGM.getContext().getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2677 // When compiling in HipStdPar mode we have to be conservative in rejecting
2678 // target specific features in the FE, and defer the possible error to the
2679 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2680 // referenced by an accelerator executable function, we emit an error.
2681 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2682 if (BuiltinID) {
2683 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(ID: BuiltinID));
2684 if (!Builtin::evaluateRequiredTargetFeatures(
2685 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2686 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2687 << TargetDecl->getDeclName()
2688 << FeatureList;
2689 }
2690 } else if (!TargetDecl->isMultiVersion() &&
2691 TargetDecl->hasAttr<TargetAttr>()) {
2692 // Get the required features for the callee.
2693
2694 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2695 ParsedTargetAttr ParsedAttr =
2696 CGM.getContext().filterFunctionTargetAttrs(TD);
2697
2698 SmallVector<StringRef, 1> ReqFeatures;
2699 llvm::StringMap<bool> CalleeFeatureMap;
2700 CGM.getContext().getFunctionFeatureMap(FeatureMap&: CalleeFeatureMap, TargetDecl);
2701
2702 for (const auto &F : ParsedAttr.Features) {
2703 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2704 ReqFeatures.push_back(StringRef(F).substr(1));
2705 }
2706
2707 for (const auto &F : CalleeFeatureMap) {
2708 // Only positive features are "required".
2709 if (F.getValue())
2710 ReqFeatures.push_back(Elt: F.getKey());
2711 }
2712 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2713 if (!CallerFeatureMap.lookup(Feature)) {
2714 MissingFeature = Feature.str();
2715 return false;
2716 }
2717 return true;
2718 }) && !IsHipStdPar)
2719 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2720 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2721 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2722 llvm::StringMap<bool> CalleeFeatureMap;
2723 CGM.getContext().getFunctionFeatureMap(FeatureMap&: CalleeFeatureMap, TargetDecl);
2724
2725 for (const auto &F : CalleeFeatureMap) {
2726 if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2727 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2728 !IsHipStdPar)
2729 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2730 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2731 }
2732 }
2733}
2734
2735void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2736 if (!CGM.getCodeGenOpts().SanitizeStats)
2737 return;
2738
2739 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2740 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2741 CGM.getSanStats().create(B&: IRB, SK: SSK);
2742}
2743
2744void CodeGenFunction::EmitKCFIOperandBundle(
2745 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2746 const FunctionProtoType *FP =
2747 Callee.getAbstractInfo().getCalleeFunctionProtoType();
2748 if (FP)
2749 Bundles.emplace_back(Args: "kcfi", Args: CGM.CreateKCFITypeId(T: FP->desugar()));
2750}
2751
2752llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
2753 const MultiVersionResolverOption &RO) {
2754 llvm::SmallVector<StringRef, 8> CondFeatures;
2755 for (const StringRef &Feature : RO.Conditions.Features) {
2756 // Form condition for features which are not yet enabled in target
2757 if (!getContext().getTargetInfo().hasFeature(Feature))
2758 CondFeatures.push_back(Elt: Feature);
2759 }
2760 if (!CondFeatures.empty()) {
2761 return EmitAArch64CpuSupports(FeatureStrs: CondFeatures);
2762 }
2763 return nullptr;
2764}
2765
2766llvm::Value *CodeGenFunction::FormX86ResolverCondition(
2767 const MultiVersionResolverOption &RO) {
2768 llvm::Value *Condition = nullptr;
2769
2770 if (!RO.Conditions.Architecture.empty()) {
2771 StringRef Arch = RO.Conditions.Architecture;
2772 // If arch= specifies an x86-64 micro-architecture level, test the feature
2773 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2774 if (Arch.starts_with(Prefix: "x86-64"))
2775 Condition = EmitX86CpuSupports(FeatureStrs: {Arch});
2776 else
2777 Condition = EmitX86CpuIs(CPUStr: Arch);
2778 }
2779
2780 if (!RO.Conditions.Features.empty()) {
2781 llvm::Value *FeatureCond = EmitX86CpuSupports(FeatureStrs: RO.Conditions.Features);
2782 Condition =
2783 Condition ? Builder.CreateAnd(LHS: Condition, RHS: FeatureCond) : FeatureCond;
2784 }
2785 return Condition;
2786}
2787
2788static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2789 llvm::Function *Resolver,
2790 CGBuilderTy &Builder,
2791 llvm::Function *FuncToReturn,
2792 bool SupportsIFunc) {
2793 if (SupportsIFunc) {
2794 Builder.CreateRet(V: FuncToReturn);
2795 return;
2796 }
2797
2798 llvm::SmallVector<llvm::Value *, 10> Args(
2799 llvm::make_pointer_range(Range: Resolver->args()));
2800
2801 llvm::CallInst *Result = Builder.CreateCall(Callee: FuncToReturn, Args);
2802 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2803
2804 if (Resolver->getReturnType()->isVoidTy())
2805 Builder.CreateRetVoid();
2806 else
2807 Builder.CreateRet(V: Result);
2808}
2809
2810void CodeGenFunction::EmitMultiVersionResolver(
2811 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2812
2813 llvm::Triple::ArchType ArchType =
2814 getContext().getTargetInfo().getTriple().getArch();
2815
2816 switch (ArchType) {
2817 case llvm::Triple::x86:
2818 case llvm::Triple::x86_64:
2819 EmitX86MultiVersionResolver(Resolver, Options);
2820 return;
2821 case llvm::Triple::aarch64:
2822 EmitAArch64MultiVersionResolver(Resolver, Options);
2823 return;
2824
2825 default:
2826 assert(false && "Only implemented for x86 and AArch64 targets");
2827 }
2828}
2829
2830void CodeGenFunction::EmitAArch64MultiVersionResolver(
2831 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2832 assert(!Options.empty() && "No multiversion resolver options found");
2833 assert(Options.back().Conditions.Features.size() == 0 &&
2834 "Default case must be last");
2835 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2836 assert(SupportsIFunc &&
2837 "Multiversion resolver requires target IFUNC support");
2838 bool AArch64CpuInitialized = false;
2839 llvm::BasicBlock *CurBlock = createBasicBlock(name: "resolver_entry", parent: Resolver);
2840
2841 for (const MultiVersionResolverOption &RO : Options) {
2842 Builder.SetInsertPoint(CurBlock);
2843 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
2844
2845 // The 'default' or 'all features enabled' case.
2846 if (!Condition) {
2847 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, FuncToReturn: RO.Function,
2848 SupportsIFunc);
2849 return;
2850 }
2851
2852 if (!AArch64CpuInitialized) {
2853 Builder.SetInsertPoint(TheBB: CurBlock, IP: CurBlock->begin());
2854 EmitAArch64CpuInit();
2855 AArch64CpuInitialized = true;
2856 Builder.SetInsertPoint(CurBlock);
2857 }
2858
2859 llvm::BasicBlock *RetBlock = createBasicBlock(name: "resolver_return", parent: Resolver);
2860 CGBuilderTy RetBuilder(*this, RetBlock);
2861 CreateMultiVersionResolverReturn(CGM, Resolver, Builder&: RetBuilder, FuncToReturn: RO.Function,
2862 SupportsIFunc);
2863 CurBlock = createBasicBlock(name: "resolver_else", parent: Resolver);
2864 Builder.CreateCondBr(Cond: Condition, True: RetBlock, False: CurBlock);
2865 }
2866
2867 // If no default, emit an unreachable.
2868 Builder.SetInsertPoint(CurBlock);
2869 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2870 TrapCall->setDoesNotReturn();
2871 TrapCall->setDoesNotThrow();
2872 Builder.CreateUnreachable();
2873 Builder.ClearInsertionPoint();
2874}
2875
2876void CodeGenFunction::EmitX86MultiVersionResolver(
2877 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2878
2879 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2880
2881 // Main function's basic block.
2882 llvm::BasicBlock *CurBlock = createBasicBlock(name: "resolver_entry", parent: Resolver);
2883 Builder.SetInsertPoint(CurBlock);
2884 EmitX86CpuInit();
2885
2886 for (const MultiVersionResolverOption &RO : Options) {
2887 Builder.SetInsertPoint(CurBlock);
2888 llvm::Value *Condition = FormX86ResolverCondition(RO);
2889
2890 // The 'default' or 'generic' case.
2891 if (!Condition) {
2892 assert(&RO == Options.end() - 1 &&
2893 "Default or Generic case must be last");
2894 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, FuncToReturn: RO.Function,
2895 SupportsIFunc);
2896 return;
2897 }
2898
2899 llvm::BasicBlock *RetBlock = createBasicBlock(name: "resolver_return", parent: Resolver);
2900 CGBuilderTy RetBuilder(*this, RetBlock);
2901 CreateMultiVersionResolverReturn(CGM, Resolver, Builder&: RetBuilder, FuncToReturn: RO.Function,
2902 SupportsIFunc);
2903 CurBlock = createBasicBlock(name: "resolver_else", parent: Resolver);
2904 Builder.CreateCondBr(Cond: Condition, True: RetBlock, False: CurBlock);
2905 }
2906
2907 // If no generic/default, emit an unreachable.
2908 Builder.SetInsertPoint(CurBlock);
2909 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2910 TrapCall->setDoesNotReturn();
2911 TrapCall->setDoesNotThrow();
2912 Builder.CreateUnreachable();
2913 Builder.ClearInsertionPoint();
2914}
2915
2916// Loc - where the diagnostic will point, where in the source code this
2917// alignment has failed.
2918// SecondaryLoc - if present (will be present if sufficiently different from
2919// Loc), the diagnostic will additionally point a "Note:" to this location.
2920// It should be the location where the __attribute__((assume_aligned))
2921// was written e.g.
2922void CodeGenFunction::emitAlignmentAssumptionCheck(
2923 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2924 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2925 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2926 llvm::Instruction *Assumption) {
2927 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2928 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2929 llvm::Intrinsic::getDeclaration(
2930 Builder.GetInsertBlock()->getParent()->getParent(),
2931 llvm::Intrinsic::assume) &&
2932 "Assumption should be a call to llvm.assume().");
2933 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2934 "Assumption should be the last instruction of the basic block, "
2935 "since the basic block is still being generated.");
2936
2937 if (!SanOpts.has(K: SanitizerKind::Alignment))
2938 return;
2939
2940 // Don't check pointers to volatile data. The behavior here is implementation-
2941 // defined.
2942 if (Ty->getPointeeType().isVolatileQualified())
2943 return;
2944
2945 // We need to temorairly remove the assumption so we can insert the
2946 // sanitizer check before it, else the check will be dropped by optimizations.
2947 Assumption->removeFromParent();
2948
2949 {
2950 SanitizerScope SanScope(this);
2951
2952 if (!OffsetValue)
2953 OffsetValue = Builder.getInt1(V: false); // no offset.
2954
2955 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2956 EmitCheckSourceLocation(Loc: SecondaryLoc),
2957 EmitCheckTypeDescriptor(T: Ty)};
2958 llvm::Value *DynamicData[] = {EmitCheckValue(V: Ptr),
2959 EmitCheckValue(V: Alignment),
2960 EmitCheckValue(V: OffsetValue)};
2961 EmitCheck(Checked: {std::make_pair(x&: TheCheck, y: SanitizerKind::Alignment)},
2962 Check: SanitizerHandler::AlignmentAssumption, StaticArgs: StaticData, DynamicArgs: DynamicData);
2963 }
2964
2965 // We are now in the (new, empty) "cont" basic block.
2966 // Reintroduce the assumption.
2967 Builder.Insert(I: Assumption);
2968 // FIXME: Assumption still has it's original basic block as it's Parent.
2969}
2970
2971llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2972 if (CGDebugInfo *DI = getDebugInfo())
2973 return DI->SourceLocToDebugLoc(Loc: Location);
2974
2975 return llvm::DebugLoc();
2976}
2977
2978llvm::Value *
2979CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2980 Stmt::Likelihood LH) {
2981 switch (LH) {
2982 case Stmt::LH_None:
2983 return Cond;
2984 case Stmt::LH_Likely:
2985 case Stmt::LH_Unlikely:
2986 // Don't generate llvm.expect on -O0 as the backend won't use it for
2987 // anything.
2988 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2989 return Cond;
2990 llvm::Type *CondTy = Cond->getType();
2991 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2992 llvm::Function *FnExpect =
2993 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2994 llvm::Value *ExpectedValueOfCond =
2995 llvm::ConstantInt::getBool(Ty: CondTy, V: LH == Stmt::LH_Likely);
2996 return Builder.CreateCall(Callee: FnExpect, Args: {Cond, ExpectedValueOfCond},
2997 Name: Cond->getName() + ".expval");
2998 }
2999 llvm_unreachable("Unknown Likelihood");
3000}
3001
3002llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3003 unsigned NumElementsDst,
3004 const llvm::Twine &Name) {
3005 auto *SrcTy = cast<llvm::FixedVectorType>(Val: SrcVec->getType());
3006 unsigned NumElementsSrc = SrcTy->getNumElements();
3007 if (NumElementsSrc == NumElementsDst)
3008 return SrcVec;
3009
3010 std::vector<int> ShuffleMask(NumElementsDst, -1);
3011 for (unsigned MaskIdx = 0;
3012 MaskIdx < std::min<>(a: NumElementsDst, b: NumElementsSrc); ++MaskIdx)
3013 ShuffleMask[MaskIdx] = MaskIdx;
3014
3015 return Builder.CreateShuffleVector(V: SrcVec, Mask: ShuffleMask, Name);
3016}
3017

source code of clang/lib/CodeGen/CodeGenFunction.cpp