1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-function state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CodeGenModule.h"
22#include "CodeGenPGO.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/ASTLambda.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclCXX.h"
29#include "clang/AST/Expr.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/StmtObjC.h"
32#include "clang/Basic/Builtins.h"
33#include "clang/Basic/CodeGenOptions.h"
34#include "clang/Basic/TargetBuiltins.h"
35#include "clang/Basic/TargetInfo.h"
36#include "clang/CodeGen/CGFunctionInfo.h"
37#include "clang/Frontend/FrontendDiagnostic.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/ScopeExit.h"
40#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/FPEnv.h"
44#include "llvm/IR/Instruction.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/MDBuilder.h"
48#include "llvm/Support/CRC.h"
49#include "llvm/Support/xxhash.h"
50#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
51#include "llvm/Transforms/Utils/PromoteMemToReg.h"
52#include <optional>
53
54using namespace clang;
55using namespace CodeGen;
56
57namespace llvm {
58extern cl::opt<bool> EnableSingleByteCoverage;
59} // namespace llvm
60
61/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
62/// markers.
63static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
64 const LangOptions &LangOpts) {
65 if (CGOpts.DisableLifetimeMarkers)
66 return false;
67
68 // Sanitizers may use markers.
69 if (CGOpts.SanitizeAddressUseAfterScope ||
70 LangOpts.Sanitize.has(K: SanitizerKind::HWAddress) ||
71 LangOpts.Sanitize.has(K: SanitizerKind::Memory))
72 return true;
73
74 // For now, only in optimized builds.
75 return CGOpts.OptimizationLevel != 0;
76}
77
78CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
79 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
80 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
81 CGBuilderInserterTy(this)),
82 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
83 DebugInfo(CGM.getModuleDebugInfo()),
84 PGO(std::make_unique<CodeGenPGO>(args&: cgm)),
85 ShouldEmitLifetimeMarkers(
86 shouldEmitLifetimeMarkers(CGOpts: CGM.getCodeGenOpts(), LangOpts: CGM.getLangOpts())) {
87 if (!suppressNewContext)
88 CGM.getCXXABI().getMangleContext().startNewFunction();
89 EHStack.setCGF(this);
90
91 SetFastMathFlags(CurFPFeatures);
92}
93
94CodeGenFunction::~CodeGenFunction() {
95 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
96 assert(DeferredDeactivationCleanupStack.empty() &&
97 "missed to deactivate a cleanup");
98
99 if (getLangOpts().OpenMP && CurFn)
100 CGM.getOpenMPRuntime().functionFinished(CGF&: *this);
101
102 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
103 // outlining etc) at some point. Doing it once the function codegen is done
104 // seems to be a reasonable spot. We do it here, as opposed to the deletion
105 // time of the CodeGenModule, because we have to ensure the IR has not yet
106 // been "emitted" to the outside, thus, modifications are still sensible.
107 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
108 CGM.getOpenMPRuntime().getOMPBuilder().finalize(Fn: CurFn);
109}
110
111// Map the LangOption for exception behavior into
112// the corresponding enum in the IR.
113llvm::fp::ExceptionBehavior
114clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
115
116 switch (Kind) {
117 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
118 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
119 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
120 default:
121 llvm_unreachable("Unsupported FP Exception Behavior");
122 }
123}
124
125void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
126 llvm::FastMathFlags FMF;
127 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
128 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
129 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
130 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
131 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
132 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
133 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
134 Builder.setFastMathFlags(FMF);
135}
136
137CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
138 const Expr *E)
139 : CGF(CGF) {
140 ConstructorHelper(FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
141}
142
143CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
144 FPOptions FPFeatures)
145 : CGF(CGF) {
146 ConstructorHelper(FPFeatures);
147}
148
149void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
150 OldFPFeatures = CGF.CurFPFeatures;
151 CGF.CurFPFeatures = FPFeatures;
152
153 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
154 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
155
156 if (OldFPFeatures == FPFeatures)
157 return;
158
159 FMFGuard.emplace(args&: CGF.Builder);
160
161 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
162 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
163 auto NewExceptionBehavior =
164 ToConstrainedExceptMD(Kind: static_cast<LangOptions::FPExceptionModeKind>(
165 FPFeatures.getExceptionMode()));
166 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
167
168 CGF.SetFastMathFlags(FPFeatures);
169
170 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
171 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
172 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
173 (NewExceptionBehavior == llvm::fp::ebIgnore &&
174 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
175 "FPConstrained should be enabled on entire function");
176
177 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
178 auto OldValue =
179 CGF.CurFn->getFnAttribute(Kind: Name).getValueAsBool();
180 auto NewValue = OldValue & Value;
181 if (OldValue != NewValue)
182 CGF.CurFn->addFnAttr(Kind: Name, Val: llvm::toStringRef(B: NewValue));
183 };
184 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
185 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
186 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
187 mergeFnAttrValue(
188 "unsafe-fp-math",
189 FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
190 FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
191 FPFeatures.allowFPContractAcrossStatement());
192}
193
194CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
195 CGF.CurFPFeatures = OldFPFeatures;
196 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
197 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
198}
199
200static LValue
201makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType,
202 bool MightBeSigned, CodeGenFunction &CGF,
203 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
204 LValueBaseInfo BaseInfo;
205 TBAAAccessInfo TBAAInfo;
206 CharUnits Alignment =
207 CGF.CGM.getNaturalTypeAlignment(T, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo, forPointeeType: ForPointeeType);
208 Address Addr =
209 MightBeSigned
210 ? CGF.makeNaturalAddressForPointer(Ptr: V, T, Alignment, ForPointeeType: false, BaseInfo: nullptr,
211 TBAAInfo: nullptr, IsKnownNonNull)
212 : Address(V, CGF.ConvertTypeForMem(T), Alignment, IsKnownNonNull);
213 return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
214}
215
216LValue
217CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
218 KnownNonNull_t IsKnownNonNull) {
219 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
220 /*MightBeSigned*/ true, CGF&: *this,
221 IsKnownNonNull);
222}
223
224LValue
225CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
226 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
227 /*MightBeSigned*/ true, CGF&: *this);
228}
229
230LValue CodeGenFunction::MakeNaturalAlignRawAddrLValue(llvm::Value *V,
231 QualType T) {
232 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
233 /*MightBeSigned*/ false, CGF&: *this);
234}
235
236LValue CodeGenFunction::MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V,
237 QualType T) {
238 return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
239 /*MightBeSigned*/ false, CGF&: *this);
240}
241
242llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
243 return CGM.getTypes().ConvertTypeForMem(T);
244}
245
246llvm::Type *CodeGenFunction::ConvertType(QualType T) {
247 return CGM.getTypes().ConvertType(T);
248}
249
250llvm::Type *CodeGenFunction::convertTypeForLoadStore(QualType ASTTy,
251 llvm::Type *LLVMTy) {
252 return CGM.getTypes().convertTypeForLoadStore(T: ASTTy, LLVMTy);
253}
254
255TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
256 type = type.getCanonicalType();
257 while (true) {
258 switch (type->getTypeClass()) {
259#define TYPE(name, parent)
260#define ABSTRACT_TYPE(name, parent)
261#define NON_CANONICAL_TYPE(name, parent) case Type::name:
262#define DEPENDENT_TYPE(name, parent) case Type::name:
263#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
264#include "clang/AST/TypeNodes.inc"
265 llvm_unreachable("non-canonical or dependent type in IR-generation");
266
267 case Type::Auto:
268 case Type::DeducedTemplateSpecialization:
269 llvm_unreachable("undeduced type in IR-generation");
270
271 // Various scalar types.
272 case Type::Builtin:
273 case Type::Pointer:
274 case Type::BlockPointer:
275 case Type::LValueReference:
276 case Type::RValueReference:
277 case Type::MemberPointer:
278 case Type::Vector:
279 case Type::ExtVector:
280 case Type::ConstantMatrix:
281 case Type::FunctionProto:
282 case Type::FunctionNoProto:
283 case Type::Enum:
284 case Type::ObjCObjectPointer:
285 case Type::Pipe:
286 case Type::BitInt:
287 case Type::HLSLAttributedResource:
288 case Type::HLSLInlineSpirv:
289 return TEK_Scalar;
290
291 // Complexes.
292 case Type::Complex:
293 return TEK_Complex;
294
295 // Arrays, records, and Objective-C objects.
296 case Type::ConstantArray:
297 case Type::IncompleteArray:
298 case Type::VariableArray:
299 case Type::Record:
300 case Type::ObjCObject:
301 case Type::ObjCInterface:
302 case Type::ArrayParameter:
303 return TEK_Aggregate;
304
305 // We operate on atomic values according to their underlying type.
306 case Type::Atomic:
307 type = cast<AtomicType>(type)->getValueType();
308 continue;
309 }
310 llvm_unreachable("unknown type kind!");
311 }
312}
313
314llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
315 // For cleanliness, we try to avoid emitting the return block for
316 // simple cases.
317 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
318
319 if (CurBB) {
320 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
321
322 // We have a valid insert point, reuse it if it is empty or there are no
323 // explicit jumps to the return block.
324 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
325 ReturnBlock.getBlock()->replaceAllUsesWith(V: CurBB);
326 delete ReturnBlock.getBlock();
327 ReturnBlock = JumpDest();
328 } else
329 EmitBlock(BB: ReturnBlock.getBlock());
330 return llvm::DebugLoc();
331 }
332
333 // Otherwise, if the return block is the target of a single direct
334 // branch then we can just put the code in that block instead. This
335 // cleans up functions which started with a unified return block.
336 if (ReturnBlock.getBlock()->hasOneUse()) {
337 llvm::BranchInst *BI =
338 dyn_cast<llvm::BranchInst>(Val: *ReturnBlock.getBlock()->user_begin());
339 if (BI && BI->isUnconditional() &&
340 BI->getSuccessor(i: 0) == ReturnBlock.getBlock()) {
341 // Record/return the DebugLoc of the simple 'return' expression to be used
342 // later by the actual 'ret' instruction.
343 llvm::DebugLoc Loc = BI->getDebugLoc();
344 Builder.SetInsertPoint(BI->getParent());
345 BI->eraseFromParent();
346 delete ReturnBlock.getBlock();
347 ReturnBlock = JumpDest();
348 return Loc;
349 }
350 }
351
352 // FIXME: We are at an unreachable point, there is no reason to emit the block
353 // unless it has uses. However, we still need a place to put the debug
354 // region.end for now.
355
356 EmitBlock(BB: ReturnBlock.getBlock());
357 return llvm::DebugLoc();
358}
359
360static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
361 if (!BB) return;
362 if (!BB->use_empty()) {
363 CGF.CurFn->insert(Position: CGF.CurFn->end(), BB);
364 return;
365 }
366 delete BB;
367}
368
369void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
370 assert(BreakContinueStack.empty() &&
371 "mismatched push/pop in break/continue stack!");
372 assert(LifetimeExtendedCleanupStack.empty() &&
373 "mismatched push/pop of cleanups in EHStack!");
374 assert(DeferredDeactivationCleanupStack.empty() &&
375 "mismatched activate/deactivate of cleanups!");
376
377 if (CGM.shouldEmitConvergenceTokens()) {
378 ConvergenceTokenStack.pop_back();
379 assert(ConvergenceTokenStack.empty() &&
380 "mismatched push/pop in convergence stack!");
381 }
382
383 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
384 && NumSimpleReturnExprs == NumReturnExprs
385 && ReturnBlock.getBlock()->use_empty();
386 // Usually the return expression is evaluated before the cleanup
387 // code. If the function contains only a simple return statement,
388 // such as a constant, the location before the cleanup code becomes
389 // the last useful breakpoint in the function, because the simple
390 // return expression will be evaluated after the cleanup code. To be
391 // safe, set the debug location for cleanup code to the location of
392 // the return statement. Otherwise the cleanup code should be at the
393 // end of the function's lexical scope.
394 //
395 // If there are multiple branches to the return block, the branch
396 // instructions will get the location of the return statements and
397 // all will be fine.
398 if (CGDebugInfo *DI = getDebugInfo()) {
399 if (OnlySimpleReturnStmts)
400 DI->EmitLocation(Builder, Loc: LastStopPoint);
401 else
402 DI->EmitLocation(Builder, Loc: EndLoc);
403 }
404
405 // Pop any cleanups that might have been associated with the
406 // parameters. Do this in whatever block we're currently in; it's
407 // important to do this before we enter the return block or return
408 // edges will be *really* confused.
409 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
410 bool HasOnlyNoopCleanups =
411 HasCleanups && EHStack.containsOnlyNoopCleanups(Old: PrologueCleanupDepth);
412 bool EmitRetDbgLoc = !HasCleanups || HasOnlyNoopCleanups;
413
414 std::optional<ApplyDebugLocation> OAL;
415 if (HasCleanups) {
416 // Make sure the line table doesn't jump back into the body for
417 // the ret after it's been at EndLoc.
418 if (CGDebugInfo *DI = getDebugInfo()) {
419 if (OnlySimpleReturnStmts)
420 DI->EmitLocation(Builder, Loc: EndLoc);
421 else
422 // We may not have a valid end location. Try to apply it anyway, and
423 // fall back to an artificial location if needed.
424 OAL = ApplyDebugLocation::CreateDefaultArtificial(CGF&: *this, TemporaryLocation: EndLoc);
425 }
426
427 PopCleanupBlocks(OldCleanupStackSize: PrologueCleanupDepth);
428 }
429
430 // Emit function epilog (to return).
431 llvm::DebugLoc Loc = EmitReturnBlock();
432
433 if (ShouldInstrumentFunction()) {
434 if (CGM.getCodeGenOpts().InstrumentFunctions)
435 CurFn->addFnAttr(Kind: "instrument-function-exit", Val: "__cyg_profile_func_exit");
436 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
437 CurFn->addFnAttr(Kind: "instrument-function-exit-inlined",
438 Val: "__cyg_profile_func_exit");
439 }
440
441 // Emit debug descriptor for function end.
442 if (CGDebugInfo *DI = getDebugInfo())
443 DI->EmitFunctionEnd(Builder, Fn: CurFn);
444
445 // Reset the debug location to that of the simple 'return' expression, if any
446 // rather than that of the end of the function's scope '}'.
447 uint64_t RetKeyInstructionsAtomGroup = Loc ? Loc->getAtomGroup() : 0;
448 ApplyDebugLocation AL(*this, Loc);
449 EmitFunctionEpilog(FI: *CurFnInfo, EmitRetDbgLoc, EndLoc,
450 RetKeyInstructionsSourceAtom: RetKeyInstructionsAtomGroup);
451 EmitEndEHSpec(D: CurCodeDecl);
452
453 assert(EHStack.empty() &&
454 "did not remove all scopes from cleanup stack!");
455
456 // If someone did an indirect goto, emit the indirect goto block at the end of
457 // the function.
458 if (IndirectBranch) {
459 EmitBlock(BB: IndirectBranch->getParent());
460 Builder.ClearInsertionPoint();
461 }
462
463 // If some of our locals escaped, insert a call to llvm.localescape in the
464 // entry block.
465 if (!EscapedLocals.empty()) {
466 // Invert the map from local to index into a simple vector. There should be
467 // no holes.
468 SmallVector<llvm::Value *, 4> EscapeArgs;
469 EscapeArgs.resize(N: EscapedLocals.size());
470 for (auto &Pair : EscapedLocals)
471 EscapeArgs[Pair.second] = Pair.first;
472 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getOrInsertDeclaration(
473 &CGM.getModule(), llvm::Intrinsic::localescape);
474 CGBuilderTy(*this, AllocaInsertPt).CreateCall(Callee: FrameEscapeFn, Args: EscapeArgs);
475 }
476
477 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
478 llvm::Instruction *Ptr = AllocaInsertPt;
479 AllocaInsertPt = nullptr;
480 Ptr->eraseFromParent();
481
482 // PostAllocaInsertPt, if created, was lazily created when it was required,
483 // remove it now since it was just created for our own convenience.
484 if (PostAllocaInsertPt) {
485 llvm::Instruction *PostPtr = PostAllocaInsertPt;
486 PostAllocaInsertPt = nullptr;
487 PostPtr->eraseFromParent();
488 }
489
490 // If someone took the address of a label but never did an indirect goto, we
491 // made a zero entry PHI node, which is illegal, zap it now.
492 if (IndirectBranch) {
493 llvm::PHINode *PN = cast<llvm::PHINode>(Val: IndirectBranch->getAddress());
494 if (PN->getNumIncomingValues() == 0) {
495 PN->replaceAllUsesWith(V: llvm::PoisonValue::get(T: PN->getType()));
496 PN->eraseFromParent();
497 }
498 }
499
500 EmitIfUsed(CGF&: *this, BB: EHResumeBlock);
501 EmitIfUsed(CGF&: *this, BB: TerminateLandingPad);
502 EmitIfUsed(CGF&: *this, BB: TerminateHandler);
503 EmitIfUsed(CGF&: *this, BB: UnreachableBlock);
504
505 for (const auto &FuncletAndParent : TerminateFunclets)
506 EmitIfUsed(CGF&: *this, BB: FuncletAndParent.second);
507
508 if (CGM.getCodeGenOpts().EmitDeclMetadata)
509 EmitDeclMetadata();
510
511 for (const auto &R : DeferredReplacements) {
512 if (llvm::Value *Old = R.first) {
513 Old->replaceAllUsesWith(V: R.second);
514 cast<llvm::Instruction>(Val: Old)->eraseFromParent();
515 }
516 }
517 DeferredReplacements.clear();
518
519 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
520 // PHIs if the current function is a coroutine. We don't do it for all
521 // functions as it may result in slight increase in numbers of instructions
522 // if compiled with no optimizations. We do it for coroutine as the lifetime
523 // of CleanupDestSlot alloca make correct coroutine frame building very
524 // difficult.
525 if (NormalCleanupDest.isValid() && isCoroutine()) {
526 llvm::DominatorTree DT(*CurFn);
527 llvm::PromoteMemToReg(
528 Allocas: cast<llvm::AllocaInst>(Val: NormalCleanupDest.getPointer()), DT);
529 NormalCleanupDest = Address::invalid();
530 }
531
532 // Scan function arguments for vector width.
533 for (llvm::Argument &A : CurFn->args())
534 if (auto *VT = dyn_cast<llvm::VectorType>(Val: A.getType()))
535 LargestVectorWidth =
536 std::max(a: (uint64_t)LargestVectorWidth,
537 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
538
539 // Update vector width based on return type.
540 if (auto *VT = dyn_cast<llvm::VectorType>(Val: CurFn->getReturnType()))
541 LargestVectorWidth =
542 std::max(a: (uint64_t)LargestVectorWidth,
543 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
544
545 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
546 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
547
548 // Add the min-legal-vector-width attribute. This contains the max width from:
549 // 1. min-vector-width attribute used in the source program.
550 // 2. Any builtins used that have a vector width specified.
551 // 3. Values passed in and out of inline assembly.
552 // 4. Width of vector arguments and return types for this function.
553 // 5. Width of vector arguments and return types for functions called by this
554 // function.
555 if (getContext().getTargetInfo().getTriple().isX86())
556 CurFn->addFnAttr(Kind: "min-legal-vector-width",
557 Val: llvm::utostr(X: LargestVectorWidth));
558
559 // If we generated an unreachable return block, delete it now.
560 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
561 Builder.ClearInsertionPoint();
562 ReturnBlock.getBlock()->eraseFromParent();
563 }
564 if (ReturnValue.isValid()) {
565 auto *RetAlloca =
566 dyn_cast<llvm::AllocaInst>(Val: ReturnValue.emitRawPointer(CGF&: *this));
567 if (RetAlloca && RetAlloca->use_empty()) {
568 RetAlloca->eraseFromParent();
569 ReturnValue = Address::invalid();
570 }
571 }
572}
573
574/// ShouldInstrumentFunction - Return true if the current function should be
575/// instrumented with __cyg_profile_func_* calls
576bool CodeGenFunction::ShouldInstrumentFunction() {
577 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
578 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
579 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
580 return false;
581 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
582 return false;
583 return true;
584}
585
586bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
587 if (!CurFuncDecl)
588 return false;
589 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
590}
591
592/// ShouldXRayInstrument - Return true if the current function should be
593/// instrumented with XRay nop sleds.
594bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
595 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
596}
597
598/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
599/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
600bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
601 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
602 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
603 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
604 XRayInstrKind::Custom);
605}
606
607bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
608 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
609 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
610 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
611 XRayInstrKind::Typed);
612}
613
614llvm::ConstantInt *
615CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
616 // Remove any (C++17) exception specifications, to allow calling e.g. a
617 // noexcept function through a non-noexcept pointer.
618 if (!Ty->isFunctionNoProtoType())
619 Ty = getContext().getFunctionTypeWithExceptionSpec(Orig: Ty, ESI: EST_None);
620 std::string Mangled;
621 llvm::raw_string_ostream Out(Mangled);
622 CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(T: Ty, Out, NormalizeIntegers: false);
623 return llvm::ConstantInt::get(
624 Ty: CGM.Int32Ty, V: static_cast<uint32_t>(llvm::xxh3_64bits(data: Mangled)));
625}
626
627void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
628 llvm::Function *Fn) {
629 if (!FD->hasAttr<DeviceKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
630 return;
631
632 llvm::LLVMContext &Context = getLLVMContext();
633
634 CGM.GenKernelArgMetadata(FN: Fn, FD, CGF: this);
635
636 if (!(getLangOpts().OpenCL ||
637 (getLangOpts().CUDA &&
638 getContext().getTargetInfo().getTriple().isSPIRV())))
639 return;
640
641 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
642 QualType HintQTy = A->getTypeHint();
643 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
644 bool IsSignedInteger =
645 HintQTy->isSignedIntegerType() ||
646 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
647 llvm::Metadata *AttrMDArgs[] = {
648 llvm::ConstantAsMetadata::get(C: llvm::PoisonValue::get(
649 T: CGM.getTypes().ConvertType(T: A->getTypeHint()))),
650 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
651 Ty: llvm::IntegerType::get(C&: Context, NumBits: 32),
652 V: llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
653 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, MDs: AttrMDArgs));
654 }
655
656 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
657 auto Eval = [&](Expr *E) {
658 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
659 };
660 llvm::Metadata *AttrMDArgs[] = {
661 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: Eval(A->getXDim()))),
662 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: Eval(A->getYDim()))),
663 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: Eval(A->getZDim())))};
664 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, MDs: AttrMDArgs));
665 }
666
667 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
668 auto Eval = [&](Expr *E) {
669 return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
670 };
671 llvm::Metadata *AttrMDArgs[] = {
672 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: Eval(A->getXDim()))),
673 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: Eval(A->getYDim()))),
674 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: Eval(A->getZDim())))};
675 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, MDs: AttrMDArgs));
676 }
677
678 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
679 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
680 llvm::Metadata *AttrMDArgs[] = {
681 llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: A->getSubGroupSize()))};
682 Fn->setMetadata("intel_reqd_sub_group_size",
683 llvm::MDNode::get(Context, MDs: AttrMDArgs));
684 }
685}
686
687/// Determine whether the function F ends with a return stmt.
688static bool endsWithReturn(const Decl* F) {
689 const Stmt *Body = nullptr;
690 if (auto *FD = dyn_cast_or_null<FunctionDecl>(Val: F))
691 Body = FD->getBody();
692 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(Val: F))
693 Body = OMD->getBody();
694
695 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Val: Body)) {
696 auto LastStmt = CS->body_rbegin();
697 if (LastStmt != CS->body_rend())
698 return isa<ReturnStmt>(Val: *LastStmt);
699 }
700 return false;
701}
702
703void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
704 if (SanOpts.has(K: SanitizerKind::Thread)) {
705 Fn->addFnAttr(Kind: "sanitize_thread_no_checking_at_run_time");
706 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
707 }
708}
709
710/// Check if the return value of this function requires sanitization.
711bool CodeGenFunction::requiresReturnValueCheck() const {
712 return requiresReturnValueNullabilityCheck() ||
713 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
714 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
715}
716
717static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
718 auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: D);
719 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
720 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
721 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
722 return false;
723
724 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
725 return false;
726
727 if (MD->getNumParams() == 2) {
728 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
729 if (!PT || !PT->isVoidPointerType() ||
730 !PT->getPointeeType().isConstQualified())
731 return false;
732 }
733
734 return true;
735}
736
737bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
738 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
739 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
740}
741
742bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
743 return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
744 getTarget().getCXXABI().isMicrosoft() &&
745 llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
746 return isInAllocaArgument(CGM.getCXXABI(), P->getType());
747 });
748}
749
750/// Return the UBSan prologue signature for \p FD if one is available.
751static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
752 const FunctionDecl *FD) {
753 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD))
754 if (!MD->isStatic())
755 return nullptr;
756 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
757}
758
759void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
760 llvm::Function *Fn,
761 const CGFunctionInfo &FnInfo,
762 const FunctionArgList &Args,
763 SourceLocation Loc,
764 SourceLocation StartLoc) {
765 assert(!CurFn &&
766 "Do not use a CodeGenFunction object for more than one function");
767
768 const Decl *D = GD.getDecl();
769
770 DidCallStackSave = false;
771 CurCodeDecl = D;
772 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: D);
773 if (FD && FD->usesSEHTry())
774 CurSEHParent = GD;
775 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
776 FnRetTy = RetTy;
777 CurFn = Fn;
778 CurFnInfo = &FnInfo;
779 assert(CurFn->isDeclaration() && "Function already has body?");
780
781 // If this function is ignored for any of the enabled sanitizers,
782 // disable the sanitizer for the function.
783 do {
784#define SANITIZER(NAME, ID) \
785 if (SanOpts.empty()) \
786 break; \
787 if (SanOpts.has(SanitizerKind::ID)) \
788 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
789 SanOpts.set(SanitizerKind::ID, false);
790
791#include "clang/Basic/Sanitizers.def"
792#undef SANITIZER
793 } while (false);
794
795 if (D) {
796 const bool SanitizeBounds = SanOpts.hasOneOf(K: SanitizerKind::Bounds);
797 SanitizerMask no_sanitize_mask;
798 bool NoSanitizeCoverage = false;
799
800 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
801 no_sanitize_mask |= Attr->getMask();
802 // SanitizeCoverage is not handled by SanOpts.
803 if (Attr->hasCoverage())
804 NoSanitizeCoverage = true;
805 }
806
807 // Apply the no_sanitize* attributes to SanOpts.
808 SanOpts.Mask &= ~no_sanitize_mask;
809 if (no_sanitize_mask & SanitizerKind::Address)
810 SanOpts.set(K: SanitizerKind::KernelAddress, Value: false);
811 if (no_sanitize_mask & SanitizerKind::KernelAddress)
812 SanOpts.set(K: SanitizerKind::Address, Value: false);
813 if (no_sanitize_mask & SanitizerKind::HWAddress)
814 SanOpts.set(K: SanitizerKind::KernelHWAddress, Value: false);
815 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
816 SanOpts.set(K: SanitizerKind::HWAddress, Value: false);
817
818 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
819 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
820
821 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
822 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
823
824 // Some passes need the non-negated no_sanitize attribute. Pass them on.
825 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
826 if (no_sanitize_mask & SanitizerKind::Thread)
827 Fn->addFnAttr(Kind: "no_sanitize_thread");
828 }
829 }
830
831 if (ShouldSkipSanitizerInstrumentation()) {
832 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
833 } else {
834 // Apply sanitizer attributes to the function.
835 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
836 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
837 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
838 SanitizerKind::KernelHWAddress))
839 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
840 if (SanOpts.has(SanitizerKind::MemtagStack))
841 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
842 if (SanOpts.has(SanitizerKind::Thread))
843 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
844 if (SanOpts.has(SanitizerKind::Type))
845 Fn->addFnAttr(llvm::Attribute::SanitizeType);
846 if (SanOpts.has(SanitizerKind::NumericalStability))
847 Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
848 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
849 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
850 }
851 if (SanOpts.has(SanitizerKind::SafeStack))
852 Fn->addFnAttr(llvm::Attribute::SafeStack);
853 if (SanOpts.has(SanitizerKind::ShadowCallStack))
854 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
855
856 if (SanOpts.has(K: SanitizerKind::Realtime))
857 if (FD && FD->getASTContext().hasAnyFunctionEffects())
858 for (const FunctionEffectWithCondition &Fe : FD->getFunctionEffects()) {
859 if (Fe.Effect.kind() == FunctionEffect::Kind::NonBlocking)
860 Fn->addFnAttr(llvm::Attribute::SanitizeRealtime);
861 else if (Fe.Effect.kind() == FunctionEffect::Kind::Blocking)
862 Fn->addFnAttr(llvm::Attribute::SanitizeRealtimeBlocking);
863 }
864
865 // Apply fuzzing attribute to the function.
866 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
867 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
868
869 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
870 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
871 if (SanOpts.has(K: SanitizerKind::Thread)) {
872 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(Val: D)) {
873 const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(argIndex: 0);
874 if (OMD->getMethodFamily() == OMF_dealloc ||
875 OMD->getMethodFamily() == OMF_initialize ||
876 (OMD->getSelector().isUnarySelector() && II->isStr(Str: ".cxx_destruct"))) {
877 markAsIgnoreThreadCheckingAtRuntime(Fn);
878 }
879 }
880 }
881
882 // Ignore unrelated casts in STL allocate() since the allocator must cast
883 // from void* to T* before object initialization completes. Don't match on the
884 // namespace because not all allocators are in std::
885 if (D && SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
886 if (matchesStlAllocatorFn(D, Ctx: getContext()))
887 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
888 }
889
890 // Ignore null checks in coroutine functions since the coroutines passes
891 // are not aware of how to move the extra UBSan instructions across the split
892 // coroutine boundaries.
893 if (D && SanOpts.has(K: SanitizerKind::Null))
894 if (FD && FD->getBody() &&
895 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
896 SanOpts.Mask &= ~SanitizerKind::Null;
897
898 // Apply xray attributes to the function (as a string, for now)
899 bool AlwaysXRayAttr = false;
900 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
901 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
902 K: XRayInstrKind::FunctionEntry) ||
903 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
904 K: XRayInstrKind::FunctionExit)) {
905 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
906 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-always");
907 AlwaysXRayAttr = true;
908 }
909 if (XRayAttr->neverXRayInstrument())
910 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-never");
911 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
912 if (ShouldXRayInstrumentFunction())
913 Fn->addFnAttr("xray-log-args",
914 llvm::utostr(X: LogArgs->getArgumentCount()));
915 }
916 } else {
917 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
918 Fn->addFnAttr(
919 Kind: "xray-instruction-threshold",
920 Val: llvm::itostr(X: CGM.getCodeGenOpts().XRayInstructionThreshold));
921 }
922
923 if (ShouldXRayInstrumentFunction()) {
924 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
925 Fn->addFnAttr(Kind: "xray-ignore-loops");
926
927 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
928 K: XRayInstrKind::FunctionExit))
929 Fn->addFnAttr(Kind: "xray-skip-exit");
930
931 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
932 K: XRayInstrKind::FunctionEntry))
933 Fn->addFnAttr(Kind: "xray-skip-entry");
934
935 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
936 if (FuncGroups > 1) {
937 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
938 CurFn->getName().bytes_end());
939 auto Group = crc32(Data: FuncName) % FuncGroups;
940 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
941 !AlwaysXRayAttr)
942 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-never");
943 }
944 }
945
946 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
947 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
948 case ProfileList::Skip:
949 Fn->addFnAttr(llvm::Attribute::SkipProfile);
950 break;
951 case ProfileList::Forbid:
952 Fn->addFnAttr(llvm::Attribute::NoProfile);
953 break;
954 case ProfileList::Allow:
955 break;
956 }
957 }
958
959 unsigned Count, Offset;
960 StringRef Section;
961 if (const auto *Attr =
962 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
963 Count = Attr->getCount();
964 Offset = Attr->getOffset();
965 Section = Attr->getSection();
966 } else {
967 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
968 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
969 }
970 if (Section.empty())
971 Section = CGM.getCodeGenOpts().PatchableFunctionEntrySection;
972 if (Count && Offset <= Count) {
973 Fn->addFnAttr(Kind: "patchable-function-entry", Val: std::to_string(val: Count - Offset));
974 if (Offset)
975 Fn->addFnAttr(Kind: "patchable-function-prefix", Val: std::to_string(val: Offset));
976 if (!Section.empty())
977 Fn->addFnAttr(Kind: "patchable-function-entry-section", Val: Section);
978 }
979 // Instruct that functions for COFF/CodeView targets should start with a
980 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
981 // backends as they don't need it -- instructions on these architectures are
982 // always atomically patchable at runtime.
983 if (CGM.getCodeGenOpts().HotPatch &&
984 getContext().getTargetInfo().getTriple().isX86() &&
985 getContext().getTargetInfo().getTriple().getEnvironment() !=
986 llvm::Triple::CODE16)
987 Fn->addFnAttr(Kind: "patchable-function", Val: "prologue-short-redirect");
988
989 // Add no-jump-tables value.
990 if (CGM.getCodeGenOpts().NoUseJumpTables)
991 Fn->addFnAttr(Kind: "no-jump-tables", Val: "true");
992
993 // Add no-inline-line-tables value.
994 if (CGM.getCodeGenOpts().NoInlineLineTables)
995 Fn->addFnAttr(Kind: "no-inline-line-tables");
996
997 // Add profile-sample-accurate value.
998 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
999 Fn->addFnAttr(Kind: "profile-sample-accurate");
1000
1001 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
1002 Fn->addFnAttr(Kind: "use-sample-profile");
1003
1004 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
1005 Fn->addFnAttr(Kind: "cfi-canonical-jump-table");
1006
1007 if (D && D->hasAttr<NoProfileFunctionAttr>())
1008 Fn->addFnAttr(llvm::Attribute::NoProfile);
1009
1010 if (D && D->hasAttr<HybridPatchableAttr>())
1011 Fn->addFnAttr(llvm::Attribute::HybridPatchable);
1012
1013 if (D) {
1014 // Function attributes take precedence over command line flags.
1015 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
1016 switch (A->getThunkType()) {
1017 case FunctionReturnThunksAttr::Kind::Keep:
1018 break;
1019 case FunctionReturnThunksAttr::Kind::Extern:
1020 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1021 break;
1022 }
1023 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
1024 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1025 }
1026
1027 if (FD && (getLangOpts().OpenCL ||
1028 (getLangOpts().CUDA &&
1029 getContext().getTargetInfo().getTriple().isSPIRV()) ||
1030 ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
1031 getLangOpts().CUDAIsDevice))) {
1032 // Add metadata for a kernel function.
1033 EmitKernelMetadata(FD, Fn);
1034 }
1035
1036 if (FD && FD->hasAttr<ClspvLibclcBuiltinAttr>()) {
1037 Fn->setMetadata(Kind: "clspv_libclc_builtin",
1038 Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: {}));
1039 }
1040
1041 // If we are checking function types, emit a function type signature as
1042 // prologue data.
1043 if (FD && SanOpts.has(K: SanitizerKind::Function)) {
1044 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
1045 llvm::LLVMContext &Ctx = Fn->getContext();
1046 llvm::MDBuilder MDB(Ctx);
1047 Fn->setMetadata(
1048 llvm::LLVMContext::MD_func_sanitize,
1049 MDB.createRTTIPointerPrologue(
1050 PrologueSig, RTTI: getUBSanFunctionTypeHash(Ty: FD->getType())));
1051 }
1052 }
1053
1054 // If we're checking nullability, we need to know whether we can check the
1055 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
1056 if (SanOpts.has(K: SanitizerKind::NullabilityReturn)) {
1057 auto Nullability = FnRetTy->getNullability();
1058 if (Nullability && *Nullability == NullabilityKind::NonNull &&
1059 !FnRetTy->isRecordType()) {
1060 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1061 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
1062 RetValNullabilityPrecondition =
1063 llvm::ConstantInt::getTrue(Context&: getLLVMContext());
1064 }
1065 }
1066
1067 // If we're in C++ mode and the function name is "main", it is guaranteed
1068 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1069 // used within a program").
1070 //
1071 // OpenCL C 2.0 v2.2-11 s6.9.i:
1072 // Recursion is not supported.
1073 //
1074 // HLSL
1075 // Recursion is not supported.
1076 //
1077 // SYCL v1.2.1 s3.10:
1078 // kernels cannot include RTTI information, exception classes,
1079 // recursive code, virtual functions or make use of C++ libraries that
1080 // are not compiled for the device.
1081 if (FD &&
1082 ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
1083 getLangOpts().HLSL || getLangOpts().SYCLIsDevice ||
1084 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1085 Fn->addFnAttr(llvm::Attribute::NoRecurse);
1086
1087 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1088 llvm::fp::ExceptionBehavior FPExceptionBehavior =
1089 ToConstrainedExceptMD(Kind: getLangOpts().getDefaultExceptionMode());
1090 Builder.setDefaultConstrainedRounding(RM);
1091 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1092 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1093 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1094 RM != llvm::RoundingMode::NearestTiesToEven))) {
1095 Builder.setIsFPConstrained(true);
1096 Fn->addFnAttr(llvm::Attribute::StrictFP);
1097 }
1098
1099 // If a custom alignment is used, force realigning to this alignment on
1100 // any main function which certainly will need it.
1101 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1102 CGM.getCodeGenOpts().StackAlignment))
1103 Fn->addFnAttr(Kind: "stackrealign");
1104
1105 // "main" doesn't need to zero out call-used registers.
1106 if (FD && FD->isMain())
1107 Fn->removeFnAttr(Kind: "zero-call-used-regs");
1108
1109 // Add vscale_range attribute if appropriate.
1110 llvm::StringMap<bool> FeatureMap;
1111 bool IsArmStreaming = false;
1112 if (FD) {
1113 getContext().getFunctionFeatureMap(FeatureMap, FD);
1114 IsArmStreaming = IsArmStreamingFunction(FD, IncludeLocallyStreaming: true);
1115 }
1116 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
1117 getContext().getTargetInfo().getVScaleRange(LangOpts: getLangOpts(), IsArmStreamingFunction: IsArmStreaming,
1118 FeatureMap: &FeatureMap);
1119 if (VScaleRange) {
1120 CurFn->addFnAttr(Attr: llvm::Attribute::getWithVScaleRangeArgs(
1121 Context&: getLLVMContext(), MinValue: VScaleRange->first, MaxValue: VScaleRange->second));
1122 }
1123
1124 llvm::BasicBlock *EntryBB = createBasicBlock(name: "entry", parent: CurFn);
1125
1126 // Create a marker to make it easy to insert allocas into the entryblock
1127 // later. Don't create this with the builder, because we don't want it
1128 // folded.
1129 llvm::Value *Poison = llvm::PoisonValue::get(T: Int32Ty);
1130 AllocaInsertPt = new llvm::BitCastInst(Poison, Int32Ty, "allocapt", EntryBB);
1131
1132 ReturnBlock = getJumpDestInCurrentScope(Name: "return");
1133
1134 Builder.SetInsertPoint(EntryBB);
1135
1136 // If we're checking the return value, allocate space for a pointer to a
1137 // precise source location of the checked return statement.
1138 if (requiresReturnValueCheck()) {
1139 ReturnLocation = CreateDefaultAlignTempAlloca(Ty: Int8PtrTy, Name: "return.sloc.ptr");
1140 Builder.CreateStore(Val: llvm::ConstantPointerNull::get(T: Int8PtrTy),
1141 Addr: ReturnLocation);
1142 }
1143
1144 // Emit subprogram debug descriptor.
1145 if (CGDebugInfo *DI = getDebugInfo()) {
1146 // Reconstruct the type from the argument list so that implicit parameters,
1147 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1148 // convention.
1149 DI->emitFunctionStart(GD, Loc, ScopeLoc: StartLoc,
1150 FnType: DI->getFunctionType(FD, RetTy, Args), Fn: CurFn,
1151 CurFnIsThunk: CurFuncIsThunk);
1152 }
1153
1154 if (ShouldInstrumentFunction()) {
1155 if (CGM.getCodeGenOpts().InstrumentFunctions)
1156 CurFn->addFnAttr(Kind: "instrument-function-entry", Val: "__cyg_profile_func_enter");
1157 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1158 CurFn->addFnAttr(Kind: "instrument-function-entry-inlined",
1159 Val: "__cyg_profile_func_enter");
1160 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1161 CurFn->addFnAttr(Kind: "instrument-function-entry-inlined",
1162 Val: "__cyg_profile_func_enter_bare");
1163 }
1164
1165 // Since emitting the mcount call here impacts optimizations such as function
1166 // inlining, we just add an attribute to insert a mcount call in backend.
1167 // The attribute "counting-function" is set to mcount function name which is
1168 // architecture dependent.
1169 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1170 // Calls to fentry/mcount should not be generated if function has
1171 // the no_instrument_function attribute.
1172 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1173 if (CGM.getCodeGenOpts().CallFEntry)
1174 Fn->addFnAttr(Kind: "fentry-call", Val: "true");
1175 else {
1176 Fn->addFnAttr(Kind: "instrument-function-entry-inlined",
1177 Val: getTarget().getMCountName());
1178 }
1179 if (CGM.getCodeGenOpts().MNopMCount) {
1180 if (!CGM.getCodeGenOpts().CallFEntry)
1181 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1182 << "-mnop-mcount" << "-mfentry";
1183 Fn->addFnAttr(Kind: "mnop-mcount");
1184 }
1185
1186 if (CGM.getCodeGenOpts().RecordMCount) {
1187 if (!CGM.getCodeGenOpts().CallFEntry)
1188 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1189 << "-mrecord-mcount" << "-mfentry";
1190 Fn->addFnAttr(Kind: "mrecord-mcount");
1191 }
1192 }
1193 }
1194
1195 if (CGM.getCodeGenOpts().PackedStack) {
1196 if (getContext().getTargetInfo().getTriple().getArch() !=
1197 llvm::Triple::systemz)
1198 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1199 << "-mpacked-stack";
1200 Fn->addFnAttr(Kind: "packed-stack");
1201 }
1202
1203 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1204 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1205 Fn->addFnAttr(Kind: "warn-stack-size",
1206 Val: std::to_string(val: CGM.getCodeGenOpts().WarnStackSize));
1207
1208 if (RetTy->isVoidType()) {
1209 // Void type; nothing to return.
1210 ReturnValue = Address::invalid();
1211
1212 // Count the implicit return.
1213 if (!endsWithReturn(F: D))
1214 ++NumReturnExprs;
1215 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1216 // Indirect return; emit returned value directly into sret slot.
1217 // This reduces code size, and affects correctness in C++.
1218 auto AI = CurFn->arg_begin();
1219 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1220 ++AI;
1221 ReturnValue = makeNaturalAddressForPointer(
1222 Ptr: &*AI, T: RetTy, Alignment: CurFnInfo->getReturnInfo().getIndirectAlign(), ForPointeeType: false,
1223 BaseInfo: nullptr, TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull);
1224 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1225 ReturnValuePointer =
1226 CreateDefaultAlignTempAlloca(Ty: ReturnValue.getType(), Name: "result.ptr");
1227 Builder.CreateStore(Val: ReturnValue.emitRawPointer(CGF&: *this),
1228 Addr: ReturnValuePointer);
1229 }
1230 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1231 !hasScalarEvaluationKind(T: CurFnInfo->getReturnType())) {
1232 // Load the sret pointer from the argument struct and return into that.
1233 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1234 llvm::Function::arg_iterator EI = CurFn->arg_end();
1235 --EI;
1236 llvm::Value *Addr = Builder.CreateStructGEP(
1237 Ty: CurFnInfo->getArgStruct(), Ptr: &*EI, Idx);
1238 llvm::Type *Ty =
1239 cast<llvm::GetElementPtrInst>(Val: Addr)->getResultElementType();
1240 ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
1241 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1242 ReturnValue = Address(Addr, ConvertType(T: RetTy),
1243 CGM.getNaturalTypeAlignment(T: RetTy), KnownNonNull);
1244 } else {
1245 ReturnValue = CreateIRTemp(T: RetTy, Name: "retval");
1246
1247 // Tell the epilog emitter to autorelease the result. We do this
1248 // now so that various specialized functions can suppress it
1249 // during their IR-generation.
1250 if (getLangOpts().ObjCAutoRefCount &&
1251 !CurFnInfo->isReturnsRetained() &&
1252 RetTy->isObjCRetainableType())
1253 AutoreleaseResult = true;
1254 }
1255
1256 EmitStartEHSpec(D: CurCodeDecl);
1257
1258 PrologueCleanupDepth = EHStack.stable_begin();
1259
1260 // Emit OpenMP specific initialization of the device functions.
1261 if (getLangOpts().OpenMP && CurCodeDecl)
1262 CGM.getOpenMPRuntime().emitFunctionProlog(CGF&: *this, D: CurCodeDecl);
1263
1264 if (FD && getLangOpts().HLSL) {
1265 // Handle emitting HLSL entry functions.
1266 if (FD->hasAttr<HLSLShaderAttr>()) {
1267 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1268 }
1269 CGM.getHLSLRuntime().setHLSLFunctionAttributes(FD, Fn);
1270 }
1271
1272 EmitFunctionProlog(FI: *CurFnInfo, Fn: CurFn, Args);
1273
1274 if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(Val: D);
1275 MD && !MD->isStatic()) {
1276 bool IsInLambda =
1277 MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1278 if (MD->isImplicitObjectMemberFunction())
1279 CGM.getCXXABI().EmitInstanceFunctionProlog(CGF&: *this);
1280 if (IsInLambda) {
1281 // We're in a lambda; figure out the captures.
1282 MD->getParent()->getCaptureFields(Captures&: LambdaCaptureFields,
1283 ThisCapture&: LambdaThisCaptureField);
1284 if (LambdaThisCaptureField) {
1285 // If the lambda captures the object referred to by '*this' - either by
1286 // value or by reference, make sure CXXThisValue points to the correct
1287 // object.
1288
1289 // Get the lvalue for the field (which is a copy of the enclosing object
1290 // or contains the address of the enclosing object).
1291 LValue ThisFieldLValue = EmitLValueForLambdaField(Field: LambdaThisCaptureField);
1292 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1293 // If the enclosing object was captured by value, just use its
1294 // address. Sign this pointer.
1295 CXXThisValue = ThisFieldLValue.getPointer(CGF&: *this);
1296 } else {
1297 // Load the lvalue pointed to by the field, since '*this' was captured
1298 // by reference.
1299 CXXThisValue =
1300 EmitLoadOfLValue(V: ThisFieldLValue, Loc: SourceLocation()).getScalarVal();
1301 }
1302 }
1303 for (auto *FD : MD->getParent()->fields()) {
1304 if (FD->hasCapturedVLAType()) {
1305 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1306 SourceLocation()).getScalarVal();
1307 auto VAT = FD->getCapturedVLAType();
1308 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1309 }
1310 }
1311 } else if (MD->isImplicitObjectMemberFunction()) {
1312 // Not in a lambda; just use 'this' from the method.
1313 // FIXME: Should we generate a new load for each use of 'this'? The
1314 // fast register allocator would be happier...
1315 CXXThisValue = CXXABIThisValue;
1316 }
1317
1318 // Check the 'this' pointer once per function, if it's available.
1319 if (CXXABIThisValue) {
1320 SanitizerSet SkippedChecks;
1321 SkippedChecks.set(K: SanitizerKind::ObjectSize, Value: true);
1322 QualType ThisTy = MD->getThisType();
1323
1324 // If this is the call operator of a lambda with no captures, it
1325 // may have a static invoker function, which may call this operator with
1326 // a null 'this' pointer.
1327 if (isLambdaCallOperator(MD) && MD->getParent()->isCapturelessLambda())
1328 SkippedChecks.set(K: SanitizerKind::Null, Value: true);
1329
1330 EmitTypeCheck(
1331 TCK: isa<CXXConstructorDecl>(Val: MD) ? TCK_ConstructorCall : TCK_MemberCall,
1332 Loc, V: CXXABIThisValue, Type: ThisTy, Alignment: CXXABIThisAlignment, SkippedChecks);
1333 }
1334 }
1335
1336 // If any of the arguments have a variably modified type, make sure to
1337 // emit the type size, but only if the function is not naked. Naked functions
1338 // have no prolog to run this evaluation.
1339 if (!FD || !FD->hasAttr<NakedAttr>()) {
1340 for (const VarDecl *VD : Args) {
1341 // Dig out the type as written from ParmVarDecls; it's unclear whether
1342 // the standard (C99 6.9.1p10) requires this, but we're following the
1343 // precedent set by gcc.
1344 QualType Ty;
1345 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Val: VD))
1346 Ty = PVD->getOriginalType();
1347 else
1348 Ty = VD->getType();
1349
1350 if (Ty->isVariablyModifiedType())
1351 EmitVariablyModifiedType(Ty);
1352 }
1353 }
1354 // Emit a location at the end of the prologue.
1355 if (CGDebugInfo *DI = getDebugInfo())
1356 DI->EmitLocation(Builder, Loc: StartLoc);
1357 // TODO: Do we need to handle this in two places like we do with
1358 // target-features/target-cpu?
1359 if (CurFuncDecl)
1360 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1361 LargestVectorWidth = VecWidth->getVectorWidth();
1362
1363 if (CGM.shouldEmitConvergenceTokens())
1364 ConvergenceTokenStack.push_back(Elt: getOrEmitConvergenceEntryToken(F: CurFn));
1365}
1366
1367void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1368 incrementProfileCounter(S: Body);
1369 maybeCreateMCDCCondBitmap();
1370 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Val: Body))
1371 EmitCompoundStmtWithoutScope(S: *S);
1372 else
1373 EmitStmt(S: Body);
1374}
1375
1376/// When instrumenting to collect profile data, the counts for some blocks
1377/// such as switch cases need to not include the fall-through counts, so
1378/// emit a branch around the instrumentation code. When not instrumenting,
1379/// this just calls EmitBlock().
1380void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1381 const Stmt *S) {
1382 llvm::BasicBlock *SkipCountBB = nullptr;
1383 // Do not skip over the instrumentation when single byte coverage mode is
1384 // enabled.
1385 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1386 !llvm::EnableSingleByteCoverage) {
1387 // When instrumenting for profiling, the fallthrough to certain
1388 // statements needs to skip over the instrumentation code so that we
1389 // get an accurate count.
1390 SkipCountBB = createBasicBlock(name: "skipcount");
1391 EmitBranch(Block: SkipCountBB);
1392 }
1393 EmitBlock(BB);
1394 uint64_t CurrentCount = getCurrentProfileCount();
1395 incrementProfileCounter(S);
1396 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1397 if (SkipCountBB)
1398 EmitBlock(BB: SkipCountBB);
1399}
1400
1401/// Tries to mark the given function nounwind based on the
1402/// non-existence of any throwing calls within it. We believe this is
1403/// lightweight enough to do at -O0.
1404static void TryMarkNoThrow(llvm::Function *F) {
1405 // LLVM treats 'nounwind' on a function as part of the type, so we
1406 // can't do this on functions that can be overwritten.
1407 if (F->isInterposable()) return;
1408
1409 for (llvm::BasicBlock &BB : *F)
1410 for (llvm::Instruction &I : BB)
1411 if (I.mayThrow())
1412 return;
1413
1414 F->setDoesNotThrow();
1415}
1416
1417QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1418 FunctionArgList &Args) {
1419 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
1420 QualType ResTy = FD->getReturnType();
1421
1422 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD);
1423 if (MD && MD->isImplicitObjectMemberFunction()) {
1424 if (CGM.getCXXABI().HasThisReturn(GD))
1425 ResTy = MD->getThisType();
1426 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1427 ResTy = CGM.getContext().VoidPtrTy;
1428 CGM.getCXXABI().buildThisParam(CGF&: *this, Params&: Args);
1429 }
1430
1431 // The base version of an inheriting constructor whose constructed base is a
1432 // virtual base is not passed any arguments (because it doesn't actually call
1433 // the inherited constructor).
1434 bool PassedParams = true;
1435 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Val: FD))
1436 if (auto Inherited = CD->getInheritedConstructor())
1437 PassedParams =
1438 getTypes().inheritingCtorHasParams(Inherited, Type: GD.getCtorType());
1439
1440 if (PassedParams) {
1441 for (auto *Param : FD->parameters()) {
1442 Args.push_back(Param);
1443 if (!Param->hasAttr<PassObjectSizeAttr>())
1444 continue;
1445
1446 auto *Implicit = ImplicitParamDecl::Create(
1447 getContext(), Param->getDeclContext(), Param->getLocation(),
1448 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1449 SizeArguments[Param] = Implicit;
1450 Args.push_back(Elt: Implicit);
1451 }
1452 }
1453
1454 if (MD && (isa<CXXConstructorDecl>(Val: MD) || isa<CXXDestructorDecl>(Val: MD)))
1455 CGM.getCXXABI().addImplicitStructorParams(CGF&: *this, ResTy, Params&: Args);
1456
1457 return ResTy;
1458}
1459
1460void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1461 const CGFunctionInfo &FnInfo) {
1462 assert(Fn && "generating code for null Function");
1463 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
1464 CurGD = GD;
1465
1466 FunctionArgList Args;
1467 QualType ResTy = BuildFunctionArgList(GD, Args);
1468
1469 CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, Decl: FD);
1470
1471 if (FD->isInlineBuiltinDeclaration()) {
1472 // When generating code for a builtin with an inline declaration, use a
1473 // mangled name to hold the actual body, while keeping an external
1474 // definition in case the function pointer is referenced somewhere.
1475 std::string FDInlineName = (Fn->getName() + ".inline").str();
1476 llvm::Module *M = Fn->getParent();
1477 llvm::Function *Clone = M->getFunction(Name: FDInlineName);
1478 if (!Clone) {
1479 Clone = llvm::Function::Create(Ty: Fn->getFunctionType(),
1480 Linkage: llvm::GlobalValue::InternalLinkage,
1481 AddrSpace: Fn->getAddressSpace(), N: FDInlineName, M);
1482 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1483 }
1484 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1485 Fn = Clone;
1486 } else {
1487 // Detect the unusual situation where an inline version is shadowed by a
1488 // non-inline version. In that case we should pick the external one
1489 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1490 // to detect that situation before we reach codegen, so do some late
1491 // replacement.
1492 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1493 PD = PD->getPreviousDecl()) {
1494 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1495 std::string FDInlineName = (Fn->getName() + ".inline").str();
1496 llvm::Module *M = Fn->getParent();
1497 if (llvm::Function *Clone = M->getFunction(Name: FDInlineName)) {
1498 Clone->replaceAllUsesWith(V: Fn);
1499 Clone->eraseFromParent();
1500 }
1501 break;
1502 }
1503 }
1504 }
1505
1506 // Check if we should generate debug info for this function.
1507 if (FD->hasAttr<NoDebugAttr>()) {
1508 // Clear non-distinct debug info that was possibly attached to the function
1509 // due to an earlier declaration without the nodebug attribute
1510 Fn->setSubprogram(nullptr);
1511 // Disable debug info indefinitely for this function
1512 DebugInfo = nullptr;
1513 }
1514 // Finalize function debug info on exit.
1515 auto Cleanup = llvm::make_scope_exit(F: [this] {
1516 if (CGDebugInfo *DI = getDebugInfo())
1517 DI->completeFunction();
1518 });
1519
1520 // The function might not have a body if we're generating thunks for a
1521 // function declaration.
1522 SourceRange BodyRange;
1523 if (Stmt *Body = FD->getBody())
1524 BodyRange = Body->getSourceRange();
1525 else
1526 BodyRange = FD->getLocation();
1527 CurEHLocation = BodyRange.getEnd();
1528
1529 // Use the location of the start of the function to determine where
1530 // the function definition is located. By default use the location
1531 // of the declaration as the location for the subprogram. A function
1532 // may lack a declaration in the source code if it is created by code
1533 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1534 SourceLocation Loc = FD->getLocation();
1535
1536 // If this is a function specialization then use the pattern body
1537 // as the location for the function.
1538 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1539 if (SpecDecl->hasBody(Definition&: SpecDecl))
1540 Loc = SpecDecl->getLocation();
1541
1542 Stmt *Body = FD->getBody();
1543
1544 if (Body) {
1545 // Coroutines always emit lifetime markers.
1546 if (isa<CoroutineBodyStmt>(Val: Body))
1547 ShouldEmitLifetimeMarkers = true;
1548
1549 // Initialize helper which will detect jumps which can cause invalid
1550 // lifetime markers.
1551 if (ShouldEmitLifetimeMarkers)
1552 Bypasses.Init(CGM, Body);
1553 }
1554
1555 // Emit the standard function prologue.
1556 StartFunction(GD, RetTy: ResTy, Fn, FnInfo, Args, Loc, StartLoc: BodyRange.getBegin());
1557
1558 // Save parameters for coroutine function.
1559 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Val: Body))
1560 llvm::append_range(C&: FnArgs, R: FD->parameters());
1561
1562 // Ensure that the function adheres to the forward progress guarantee, which
1563 // is required by certain optimizations.
1564 // In C++11 and up, the attribute will be removed if the body contains a
1565 // trivial empty loop.
1566 if (checkIfFunctionMustProgress())
1567 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1568
1569 // Generate the body of the function.
1570 PGO->assignRegionCounters(GD, Fn: CurFn);
1571 if (isa<CXXDestructorDecl>(Val: FD))
1572 EmitDestructorBody(Args);
1573 else if (isa<CXXConstructorDecl>(Val: FD))
1574 EmitConstructorBody(Args);
1575 else if (getLangOpts().CUDA &&
1576 !getLangOpts().CUDAIsDevice &&
1577 FD->hasAttr<CUDAGlobalAttr>())
1578 CGM.getCUDARuntime().emitDeviceStub(CGF&: *this, Args);
1579 else if (isa<CXXMethodDecl>(Val: FD) &&
1580 cast<CXXMethodDecl>(Val: FD)->isLambdaStaticInvoker()) {
1581 // The lambda static invoker function is special, because it forwards or
1582 // clones the body of the function call operator (but is actually static).
1583 EmitLambdaStaticInvokeBody(MD: cast<CXXMethodDecl>(Val: FD));
1584 } else if (isa<CXXMethodDecl>(Val: FD) &&
1585 isLambdaCallOperator(MD: cast<CXXMethodDecl>(Val: FD)) &&
1586 !FnInfo.isDelegateCall() &&
1587 cast<CXXMethodDecl>(Val: FD)->getParent()->getLambdaStaticInvoker() &&
1588 hasInAllocaArg(MD: cast<CXXMethodDecl>(Val: FD))) {
1589 // If emitting a lambda with static invoker on X86 Windows, change
1590 // the call operator body.
1591 // Make sure that this is a call operator with an inalloca arg and check
1592 // for delegate call to make sure this is the original call op and not the
1593 // new forwarding function for the static invoker.
1594 EmitLambdaInAllocaCallOpBody(MD: cast<CXXMethodDecl>(Val: FD));
1595 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(Val: FD) &&
1596 (cast<CXXMethodDecl>(Val: FD)->isCopyAssignmentOperator() ||
1597 cast<CXXMethodDecl>(Val: FD)->isMoveAssignmentOperator())) {
1598 // Implicit copy-assignment gets the same special treatment as implicit
1599 // copy-constructors.
1600 emitImplicitAssignmentOperatorBody(Args);
1601 } else if (DeviceKernelAttr::isOpenCLSpelling(
1602 FD->getAttr<DeviceKernelAttr>()) &&
1603 GD.getKernelReferenceKind() == KernelReferenceKind::Kernel) {
1604 CallArgList CallArgs;
1605 for (unsigned i = 0; i < Args.size(); ++i) {
1606 Address ArgAddr = GetAddrOfLocalVar(VD: Args[i]);
1607 QualType ArgQualType = Args[i]->getType();
1608 RValue ArgRValue = convertTempToRValue(addr: ArgAddr, type: ArgQualType, Loc);
1609 CallArgs.add(rvalue: ArgRValue, type: ArgQualType);
1610 }
1611 GlobalDecl GDStub = GlobalDecl(FD, KernelReferenceKind::Stub);
1612 const FunctionType *FT = cast<FunctionType>(FD->getType());
1613 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
1614 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
1615 Args: CallArgs, Ty: FT, /*ChainCall=*/false);
1616 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(Info: FnInfo);
1617 llvm::Constant *GDStubFunctionPointer =
1618 CGM.getRawFunctionPointer(GD: GDStub, Ty: FTy);
1619 CGCallee GDStubCallee = CGCallee::forDirect(functionPtr: GDStubFunctionPointer, abstractInfo: GDStub);
1620 EmitCall(CallInfo: FnInfo, Callee: GDStubCallee, ReturnValue: ReturnValueSlot(), Args: CallArgs, CallOrInvoke: nullptr, IsMustTail: false,
1621 Loc);
1622 } else if (Body) {
1623 EmitFunctionBody(Body);
1624 } else
1625 llvm_unreachable("no definition for emitted function");
1626
1627 // C++11 [stmt.return]p2:
1628 // Flowing off the end of a function [...] results in undefined behavior in
1629 // a value-returning function.
1630 // C11 6.9.1p12:
1631 // If the '}' that terminates a function is reached, and the value of the
1632 // function call is used by the caller, the behavior is undefined.
1633 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1634 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1635 bool ShouldEmitUnreachable =
1636 CGM.getCodeGenOpts().StrictReturn ||
1637 !CGM.MayDropFunctionReturn(Context: FD->getASTContext(), ReturnType: FD->getReturnType());
1638 if (SanOpts.has(K: SanitizerKind::Return)) {
1639 auto CheckOrdinal = SanitizerKind::SO_Return;
1640 auto CheckHandler = SanitizerHandler::MissingReturn;
1641 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
1642 llvm::Value *IsFalse = Builder.getFalse();
1643 EmitCheck(Checked: std::make_pair(x&: IsFalse, y&: CheckOrdinal), Check: CheckHandler,
1644 StaticArgs: EmitCheckSourceLocation(Loc: FD->getLocation()), DynamicArgs: {});
1645 } else if (ShouldEmitUnreachable) {
1646 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1647 EmitTrapCall(llvm::Intrinsic::trap);
1648 }
1649 if (SanOpts.has(K: SanitizerKind::Return) || ShouldEmitUnreachable) {
1650 Builder.CreateUnreachable();
1651 Builder.ClearInsertionPoint();
1652 }
1653 }
1654
1655 // Emit the standard function epilogue.
1656 FinishFunction(EndLoc: BodyRange.getEnd());
1657
1658 PGO->verifyCounterMap();
1659
1660 // If we haven't marked the function nothrow through other means, do
1661 // a quick pass now to see if we can.
1662 if (!CurFn->doesNotThrow())
1663 TryMarkNoThrow(F: CurFn);
1664}
1665
1666/// ContainsLabel - Return true if the statement contains a label in it. If
1667/// this statement is not executed normally, it not containing a label means
1668/// that we can just remove the code.
1669bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1670 // Null statement, not a label!
1671 if (!S) return false;
1672
1673 // If this is a label, we have to emit the code, consider something like:
1674 // if (0) { ... foo: bar(); } goto foo;
1675 //
1676 // TODO: If anyone cared, we could track __label__'s, since we know that you
1677 // can't jump to one from outside their declared region.
1678 if (isa<LabelStmt>(Val: S))
1679 return true;
1680
1681 // If this is a case/default statement, and we haven't seen a switch, we have
1682 // to emit the code.
1683 if (isa<SwitchCase>(Val: S) && !IgnoreCaseStmts)
1684 return true;
1685
1686 // If this is a switch statement, we want to ignore cases below it.
1687 if (isa<SwitchStmt>(Val: S))
1688 IgnoreCaseStmts = true;
1689
1690 // Scan subexpressions for verboten labels.
1691 for (const Stmt *SubStmt : S->children())
1692 if (ContainsLabel(S: SubStmt, IgnoreCaseStmts))
1693 return true;
1694
1695 return false;
1696}
1697
1698/// containsBreak - Return true if the statement contains a break out of it.
1699/// If the statement (recursively) contains a switch or loop with a break
1700/// inside of it, this is fine.
1701bool CodeGenFunction::containsBreak(const Stmt *S) {
1702 // Null statement, not a label!
1703 if (!S) return false;
1704
1705 // If this is a switch or loop that defines its own break scope, then we can
1706 // include it and anything inside of it.
1707 if (isa<SwitchStmt>(Val: S) || isa<WhileStmt>(Val: S) || isa<DoStmt>(Val: S) ||
1708 isa<ForStmt>(Val: S))
1709 return false;
1710
1711 if (isa<BreakStmt>(Val: S))
1712 return true;
1713
1714 // Scan subexpressions for verboten breaks.
1715 for (const Stmt *SubStmt : S->children())
1716 if (containsBreak(S: SubStmt))
1717 return true;
1718
1719 return false;
1720}
1721
1722bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1723 if (!S) return false;
1724
1725 // Some statement kinds add a scope and thus never add a decl to the current
1726 // scope. Note, this list is longer than the list of statements that might
1727 // have an unscoped decl nested within them, but this way is conservatively
1728 // correct even if more statement kinds are added.
1729 if (isa<IfStmt>(Val: S) || isa<SwitchStmt>(Val: S) || isa<WhileStmt>(Val: S) ||
1730 isa<DoStmt>(Val: S) || isa<ForStmt>(Val: S) || isa<CompoundStmt>(Val: S) ||
1731 isa<CXXForRangeStmt>(Val: S) || isa<CXXTryStmt>(Val: S) ||
1732 isa<ObjCForCollectionStmt>(Val: S) || isa<ObjCAtTryStmt>(Val: S))
1733 return false;
1734
1735 if (isa<DeclStmt>(Val: S))
1736 return true;
1737
1738 for (const Stmt *SubStmt : S->children())
1739 if (mightAddDeclToScope(S: SubStmt))
1740 return true;
1741
1742 return false;
1743}
1744
1745/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1746/// to a constant, or if it does but contains a label, return false. If it
1747/// constant folds return true and set the boolean result in Result.
1748bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1749 bool &ResultBool,
1750 bool AllowLabels) {
1751 // If MC/DC is enabled, disable folding so that we can instrument all
1752 // conditions to yield complete test vectors. We still keep track of
1753 // folded conditions during region mapping and visualization.
1754 if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1755 CGM.getCodeGenOpts().MCDCCoverage)
1756 return false;
1757
1758 llvm::APSInt ResultInt;
1759 if (!ConstantFoldsToSimpleInteger(Cond, Result&: ResultInt, AllowLabels))
1760 return false;
1761
1762 ResultBool = ResultInt.getBoolValue();
1763 return true;
1764}
1765
1766/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1767/// to a constant, or if it does but contains a label, return false. If it
1768/// constant folds return true and set the folded value.
1769bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1770 llvm::APSInt &ResultInt,
1771 bool AllowLabels) {
1772 // FIXME: Rename and handle conversion of other evaluatable things
1773 // to bool.
1774 Expr::EvalResult Result;
1775 if (!Cond->EvaluateAsInt(Result, Ctx: getContext()))
1776 return false; // Not foldable, not integer or not fully evaluatable.
1777
1778 llvm::APSInt Int = Result.Val.getInt();
1779 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1780 return false; // Contains a label.
1781
1782 PGO->markStmtMaybeUsed(Cond);
1783 ResultInt = Int;
1784 return true;
1785}
1786
1787/// Strip parentheses and simplistic logical-NOT operators.
1788const Expr *CodeGenFunction::stripCond(const Expr *C) {
1789 while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(Val: C->IgnoreParens())) {
1790 if (Op->getOpcode() != UO_LNot)
1791 break;
1792 C = Op->getSubExpr();
1793 }
1794 return C->IgnoreParens();
1795}
1796
1797/// Determine whether the given condition is an instrumentable condition
1798/// (i.e. no "&&" or "||").
1799bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1800 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: stripCond(C));
1801 return (!BOp || !BOp->isLogicalOp());
1802}
1803
1804/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1805/// increments a profile counter based on the semantics of the given logical
1806/// operator opcode. This is used to instrument branch condition coverage for
1807/// logical operators.
1808void CodeGenFunction::EmitBranchToCounterBlock(
1809 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1810 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1811 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1812 // If not instrumenting, just emit a branch.
1813 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1814 if (!InstrumentRegions || !isInstrumentedCondition(C: Cond))
1815 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1816
1817 const Stmt *CntrStmt = (CntrIdx ? CntrIdx : Cond);
1818
1819 llvm::BasicBlock *ThenBlock = nullptr;
1820 llvm::BasicBlock *ElseBlock = nullptr;
1821 llvm::BasicBlock *NextBlock = nullptr;
1822
1823 // Create the block we'll use to increment the appropriate counter.
1824 llvm::BasicBlock *CounterIncrBlock = createBasicBlock(name: "lop.rhscnt");
1825
1826 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1827 // means we need to evaluate the condition and increment the counter on TRUE:
1828 //
1829 // if (Cond)
1830 // goto CounterIncrBlock;
1831 // else
1832 // goto FalseBlock;
1833 //
1834 // CounterIncrBlock:
1835 // Counter++;
1836 // goto TrueBlock;
1837
1838 if (LOp == BO_LAnd) {
1839 ThenBlock = CounterIncrBlock;
1840 ElseBlock = FalseBlock;
1841 NextBlock = TrueBlock;
1842 }
1843
1844 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1845 // we need to evaluate the condition and increment the counter on FALSE:
1846 //
1847 // if (Cond)
1848 // goto TrueBlock;
1849 // else
1850 // goto CounterIncrBlock;
1851 //
1852 // CounterIncrBlock:
1853 // Counter++;
1854 // goto FalseBlock;
1855
1856 else if (LOp == BO_LOr) {
1857 ThenBlock = TrueBlock;
1858 ElseBlock = CounterIncrBlock;
1859 NextBlock = FalseBlock;
1860 } else {
1861 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1862 }
1863
1864 // Emit Branch based on condition.
1865 EmitBranchOnBoolExpr(Cond, TrueBlock: ThenBlock, FalseBlock: ElseBlock, TrueCount, LH);
1866
1867 // Emit the block containing the counter increment(s).
1868 EmitBlock(BB: CounterIncrBlock);
1869
1870 // Increment corresponding counter; if index not provided, use Cond as index.
1871 incrementProfileCounter(S: CntrStmt);
1872
1873 // Go to the next block.
1874 EmitBranch(Block: NextBlock);
1875}
1876
1877/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1878/// statement) to the specified blocks. Based on the condition, this might try
1879/// to simplify the codegen of the conditional based on the branch.
1880/// \param LH The value of the likelihood attribute on the True branch.
1881/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1882/// ConditionalOperator (ternary) through a recursive call for the operator's
1883/// LHS and RHS nodes.
1884void CodeGenFunction::EmitBranchOnBoolExpr(
1885 const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1886 uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp,
1887 const VarDecl *ConditionalDecl) {
1888 Cond = Cond->IgnoreParens();
1889
1890 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Val: Cond)) {
1891 // Handle X && Y in a condition.
1892 if (CondBOp->getOpcode() == BO_LAnd) {
1893 MCDCLogOpStack.push_back(Elt: CondBOp);
1894
1895 // If we have "1 && X", simplify the code. "0 && X" would have constant
1896 // folded if the case was simple enough.
1897 bool ConstantBool = false;
1898 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getLHS(), ResultBool&: ConstantBool) &&
1899 ConstantBool) {
1900 // br(1 && X) -> br(X).
1901 incrementProfileCounter(CondBOp);
1902 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LAnd, TrueBlock,
1903 FalseBlock, TrueCount, LH);
1904 MCDCLogOpStack.pop_back();
1905 return;
1906 }
1907
1908 // If we have "X && 1", simplify the code to use an uncond branch.
1909 // "X && 0" would have been constant folded to 0.
1910 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getRHS(), ResultBool&: ConstantBool) &&
1911 ConstantBool) {
1912 // br(X && 1) -> br(X).
1913 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1914 FalseBlock, TrueCount, LH, CondBOp);
1915 MCDCLogOpStack.pop_back();
1916 return;
1917 }
1918
1919 // Emit the LHS as a conditional. If the LHS conditional is false, we
1920 // want to jump to the FalseBlock.
1921 llvm::BasicBlock *LHSTrue = createBasicBlock(name: "land.lhs.true");
1922 // The counter tells us how often we evaluate RHS, and all of TrueCount
1923 // can be propagated to that branch.
1924 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1925
1926 ConditionalEvaluation eval(*this);
1927 {
1928 ApplyDebugLocation DL(*this, Cond);
1929 // Propagate the likelihood attribute like __builtin_expect
1930 // __builtin_expect(X && Y, 1) -> X and Y are likely
1931 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1932 EmitBranchOnBoolExpr(Cond: CondBOp->getLHS(), TrueBlock: LHSTrue, FalseBlock, TrueCount: RHSCount,
1933 LH: LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1934 EmitBlock(BB: LHSTrue);
1935 }
1936
1937 incrementProfileCounter(CondBOp);
1938 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1939
1940 // Any temporaries created here are conditional.
1941 eval.begin(CGF&: *this);
1942 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LAnd, TrueBlock,
1943 FalseBlock, TrueCount, LH);
1944 eval.end(CGF&: *this);
1945 MCDCLogOpStack.pop_back();
1946 return;
1947 }
1948
1949 if (CondBOp->getOpcode() == BO_LOr) {
1950 MCDCLogOpStack.push_back(Elt: CondBOp);
1951
1952 // If we have "0 || X", simplify the code. "1 || X" would have constant
1953 // folded if the case was simple enough.
1954 bool ConstantBool = false;
1955 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getLHS(), ResultBool&: ConstantBool) &&
1956 !ConstantBool) {
1957 // br(0 || X) -> br(X).
1958 incrementProfileCounter(CondBOp);
1959 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LOr, TrueBlock,
1960 FalseBlock, TrueCount, LH);
1961 MCDCLogOpStack.pop_back();
1962 return;
1963 }
1964
1965 // If we have "X || 0", simplify the code to use an uncond branch.
1966 // "X || 1" would have been constant folded to 1.
1967 if (ConstantFoldsToSimpleInteger(Cond: CondBOp->getRHS(), ResultBool&: ConstantBool) &&
1968 !ConstantBool) {
1969 // br(X || 0) -> br(X).
1970 EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1971 FalseBlock, TrueCount, LH, CondBOp);
1972 MCDCLogOpStack.pop_back();
1973 return;
1974 }
1975 // Emit the LHS as a conditional. If the LHS conditional is true, we
1976 // want to jump to the TrueBlock.
1977 llvm::BasicBlock *LHSFalse = createBasicBlock(name: "lor.lhs.false");
1978 // We have the count for entry to the RHS and for the whole expression
1979 // being true, so we can divy up True count between the short circuit and
1980 // the RHS.
1981 uint64_t LHSCount =
1982 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1983 uint64_t RHSCount = TrueCount - LHSCount;
1984
1985 ConditionalEvaluation eval(*this);
1986 {
1987 // Propagate the likelihood attribute like __builtin_expect
1988 // __builtin_expect(X || Y, 1) -> only Y is likely
1989 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1990 ApplyDebugLocation DL(*this, Cond);
1991 EmitBranchOnBoolExpr(Cond: CondBOp->getLHS(), TrueBlock, FalseBlock: LHSFalse, TrueCount: LHSCount,
1992 LH: LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1993 EmitBlock(BB: LHSFalse);
1994 }
1995
1996 incrementProfileCounter(CondBOp);
1997 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1998
1999 // Any temporaries created here are conditional.
2000 eval.begin(CGF&: *this);
2001 EmitBranchToCounterBlock(Cond: CondBOp->getRHS(), LOp: BO_LOr, TrueBlock, FalseBlock,
2002 TrueCount: RHSCount, LH);
2003
2004 eval.end(CGF&: *this);
2005 MCDCLogOpStack.pop_back();
2006 return;
2007 }
2008 }
2009
2010 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Val: Cond)) {
2011 // br(!x, t, f) -> br(x, f, t)
2012 // Avoid doing this optimization when instrumenting a condition for MC/DC.
2013 // LNot is taken as part of the condition for simplicity, and changing its
2014 // sense negatively impacts test vector tracking.
2015 bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
2016 CGM.getCodeGenOpts().MCDCCoverage &&
2017 isInstrumentedCondition(C: Cond);
2018 if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
2019 // Negate the count.
2020 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
2021 // The values of the enum are chosen to make this negation possible.
2022 LH = static_cast<Stmt::Likelihood>(-LH);
2023 // Negate the condition and swap the destination blocks.
2024 return EmitBranchOnBoolExpr(Cond: CondUOp->getSubExpr(), TrueBlock: FalseBlock, FalseBlock: TrueBlock,
2025 TrueCount: FalseCount, LH);
2026 }
2027 }
2028
2029 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Val: Cond)) {
2030 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
2031 llvm::BasicBlock *LHSBlock = createBasicBlock(name: "cond.true");
2032 llvm::BasicBlock *RHSBlock = createBasicBlock(name: "cond.false");
2033
2034 // The ConditionalOperator itself has no likelihood information for its
2035 // true and false branches. This matches the behavior of __builtin_expect.
2036 ConditionalEvaluation cond(*this);
2037 EmitBranchOnBoolExpr(Cond: CondOp->getCond(), TrueBlock: LHSBlock, FalseBlock: RHSBlock,
2038 TrueCount: getProfileCount(CondOp), LH: Stmt::LH_None);
2039
2040 // When computing PGO branch weights, we only know the overall count for
2041 // the true block. This code is essentially doing tail duplication of the
2042 // naive code-gen, introducing new edges for which counts are not
2043 // available. Divide the counts proportionally between the LHS and RHS of
2044 // the conditional operator.
2045 uint64_t LHSScaledTrueCount = 0;
2046 if (TrueCount) {
2047 double LHSRatio =
2048 getProfileCount(CondOp) / (double)getCurrentProfileCount();
2049 LHSScaledTrueCount = TrueCount * LHSRatio;
2050 }
2051
2052 cond.begin(CGF&: *this);
2053 EmitBlock(BB: LHSBlock);
2054 incrementProfileCounter(CondOp);
2055 {
2056 ApplyDebugLocation DL(*this, Cond);
2057 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
2058 LHSScaledTrueCount, LH, CondOp);
2059 }
2060 cond.end(CGF&: *this);
2061
2062 cond.begin(CGF&: *this);
2063 EmitBlock(BB: RHSBlock);
2064 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
2065 TrueCount - LHSScaledTrueCount, LH, CondOp);
2066 cond.end(CGF&: *this);
2067
2068 return;
2069 }
2070
2071 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Val: Cond)) {
2072 // Conditional operator handling can give us a throw expression as a
2073 // condition for a case like:
2074 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
2075 // Fold this to:
2076 // br(c, throw x, br(y, t, f))
2077 EmitCXXThrowExpr(E: Throw, /*KeepInsertionPoint*/false);
2078 return;
2079 }
2080
2081 // Emit the code with the fully general case.
2082 llvm::Value *CondV;
2083 {
2084 ApplyDebugLocation DL(*this, Cond);
2085 CondV = EvaluateExprAsBool(E: Cond);
2086 }
2087
2088 MaybeEmitDeferredVarDeclInit(var: ConditionalDecl);
2089
2090 // If not at the top of the logical operator nest, update MCDC temp with the
2091 // boolean result of the evaluated condition.
2092 if (!MCDCLogOpStack.empty()) {
2093 const Expr *MCDCBaseExpr = Cond;
2094 // When a nested ConditionalOperator (ternary) is encountered in a boolean
2095 // expression, MC/DC tracks the result of the ternary, and this is tied to
2096 // the ConditionalOperator expression and not the ternary's LHS or RHS. If
2097 // this is the case, the ConditionalOperator expression is passed through
2098 // the ConditionalOp parameter and then used as the MCDC base expression.
2099 if (ConditionalOp)
2100 MCDCBaseExpr = ConditionalOp;
2101
2102 maybeUpdateMCDCCondBitmap(E: MCDCBaseExpr, Val: CondV);
2103 }
2104
2105 llvm::MDNode *Weights = nullptr;
2106 llvm::MDNode *Unpredictable = nullptr;
2107
2108 // If the branch has a condition wrapped by __builtin_unpredictable,
2109 // create metadata that specifies that the branch is unpredictable.
2110 // Don't bother if not optimizing because that metadata would not be used.
2111 auto *Call = dyn_cast<CallExpr>(Val: Cond->IgnoreImpCasts());
2112 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2113 auto *FD = dyn_cast_or_null<FunctionDecl>(Val: Call->getCalleeDecl());
2114 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2115 llvm::MDBuilder MDHelper(getLLVMContext());
2116 Unpredictable = MDHelper.createUnpredictable();
2117 }
2118 }
2119
2120 // If there is a Likelihood knowledge for the cond, lower it.
2121 // Note that if not optimizing this won't emit anything.
2122 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(Cond: CondV, LH);
2123 if (CondV != NewCondV)
2124 CondV = NewCondV;
2125 else {
2126 // Otherwise, lower profile counts. Note that we do this even at -O0.
2127 uint64_t CurrentCount = std::max(a: getCurrentProfileCount(), b: TrueCount);
2128 Weights = createProfileWeights(TrueCount, FalseCount: CurrentCount - TrueCount);
2129 }
2130
2131 llvm::Instruction *BrInst = Builder.CreateCondBr(Cond: CondV, True: TrueBlock, False: FalseBlock,
2132 BranchWeights: Weights, Unpredictable);
2133 addInstToNewSourceAtom(KeyInstruction: BrInst, Backup: CondV);
2134
2135 switch (HLSLControlFlowAttr) {
2136 case HLSLControlFlowHintAttr::Microsoft_branch:
2137 case HLSLControlFlowHintAttr::Microsoft_flatten: {
2138 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2139
2140 llvm::ConstantInt *BranchHintConstant =
2141 HLSLControlFlowAttr ==
2142 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2143 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2144 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2145
2146 SmallVector<llvm::Metadata *, 2> Vals(
2147 {MDHelper.createString(Str: "hlsl.controlflow.hint"),
2148 MDHelper.createConstant(C: BranchHintConstant)});
2149 BrInst->setMetadata(Kind: "hlsl.controlflow.hint",
2150 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Vals));
2151 break;
2152 }
2153 // This is required to avoid warnings during compilation
2154 case HLSLControlFlowHintAttr::SpellingNotCalculated:
2155 break;
2156 }
2157}
2158
2159/// ErrorUnsupported - Print out an error that codegen doesn't support the
2160/// specified stmt yet.
2161void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2162 CGM.ErrorUnsupported(S, Type);
2163}
2164
2165/// emitNonZeroVLAInit - Emit the "zero" initialization of a
2166/// variable-length array whose elements have a non-zero bit-pattern.
2167///
2168/// \param baseType the inner-most element type of the array
2169/// \param src - a char* pointing to the bit-pattern for a single
2170/// base element of the array
2171/// \param sizeInChars - the total size of the VLA, in chars
2172static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
2173 Address dest, Address src,
2174 llvm::Value *sizeInChars) {
2175 CGBuilderTy &Builder = CGF.Builder;
2176
2177 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(T: baseType);
2178 llvm::Value *baseSizeInChars
2179 = llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: baseSize.getQuantity());
2180
2181 Address begin = dest.withElementType(ElemTy: CGF.Int8Ty);
2182 llvm::Value *end = Builder.CreateInBoundsGEP(Ty: begin.getElementType(),
2183 Ptr: begin.emitRawPointer(CGF),
2184 IdxList: sizeInChars, Name: "vla.end");
2185
2186 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2187 llvm::BasicBlock *loopBB = CGF.createBasicBlock(name: "vla-init.loop");
2188 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "vla-init.cont");
2189
2190 // Make a loop over the VLA. C99 guarantees that the VLA element
2191 // count must be nonzero.
2192 CGF.EmitBlock(BB: loopBB);
2193
2194 llvm::PHINode *cur = Builder.CreatePHI(Ty: begin.getType(), NumReservedValues: 2, Name: "vla.cur");
2195 cur->addIncoming(V: begin.emitRawPointer(CGF), BB: originBB);
2196
2197 CharUnits curAlign =
2198 dest.getAlignment().alignmentOfArrayElement(elementSize: baseSize);
2199
2200 // memcpy the individual element bit-pattern.
2201 Builder.CreateMemCpy(Dest: Address(cur, CGF.Int8Ty, curAlign), Src: src, Size: baseSizeInChars,
2202 /*volatile*/ IsVolatile: false);
2203
2204 // Go to the next element.
2205 llvm::Value *next =
2206 Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: cur, IdxList: baseSizeInChars, Name: "vla.next");
2207
2208 // Leave if that's the end of the VLA.
2209 llvm::Value *done = Builder.CreateICmpEQ(LHS: next, RHS: end, Name: "vla-init.isdone");
2210 Builder.CreateCondBr(Cond: done, True: contBB, False: loopBB);
2211 cur->addIncoming(V: next, BB: loopBB);
2212
2213 CGF.EmitBlock(BB: contBB);
2214}
2215
2216void
2217CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
2218 // Ignore empty classes in C++.
2219 if (getLangOpts().CPlusPlus) {
2220 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2221 if (cast<CXXRecordDecl>(Val: RT->getDecl())->isEmpty())
2222 return;
2223 }
2224 }
2225
2226 if (DestPtr.getElementType() != Int8Ty)
2227 DestPtr = DestPtr.withElementType(ElemTy: Int8Ty);
2228
2229 // Get size and alignment info for this aggregate.
2230 CharUnits size = getContext().getTypeSizeInChars(T: Ty);
2231
2232 llvm::Value *SizeVal;
2233 const VariableArrayType *vla;
2234
2235 // Don't bother emitting a zero-byte memset.
2236 if (size.isZero()) {
2237 // But note that getTypeInfo returns 0 for a VLA.
2238 if (const VariableArrayType *vlaType =
2239 dyn_cast_or_null<VariableArrayType>(
2240 Val: getContext().getAsArrayType(T: Ty))) {
2241 auto VlaSize = getVLASize(vla: vlaType);
2242 SizeVal = VlaSize.NumElts;
2243 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2244 if (!eltSize.isOne())
2245 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: eltSize));
2246 vla = vlaType;
2247 } else {
2248 return;
2249 }
2250 } else {
2251 SizeVal = CGM.getSize(numChars: size);
2252 vla = nullptr;
2253 }
2254
2255 // If the type contains a pointer to data member we can't memset it to zero.
2256 // Instead, create a null constant and copy it to the destination.
2257 // TODO: there are other patterns besides zero that we can usefully memset,
2258 // like -1, which happens to be the pattern used by member-pointers.
2259 if (!CGM.getTypes().isZeroInitializable(T: Ty)) {
2260 // For a VLA, emit a single element, then splat that over the VLA.
2261 if (vla) Ty = getContext().getBaseElementType(vla);
2262
2263 llvm::Constant *NullConstant = CGM.EmitNullConstant(T: Ty);
2264
2265 llvm::GlobalVariable *NullVariable =
2266 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2267 /*isConstant=*/true,
2268 llvm::GlobalVariable::PrivateLinkage,
2269 NullConstant, Twine());
2270 CharUnits NullAlign = DestPtr.getAlignment();
2271 NullVariable->setAlignment(NullAlign.getAsAlign());
2272 Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2273
2274 if (vla) return emitNonZeroVLAInit(CGF&: *this, baseType: Ty, dest: DestPtr, src: SrcPtr, sizeInChars: SizeVal);
2275
2276 // Get and call the appropriate llvm.memcpy overload.
2277 Builder.CreateMemCpy(Dest: DestPtr, Src: SrcPtr, Size: SizeVal, IsVolatile: false);
2278 return;
2279 }
2280
2281 // Otherwise, just memset the whole thing to zero. This is legal
2282 // because in LLVM, all default initializers (other than the ones we just
2283 // handled above) are guaranteed to have a bit pattern of all zeros.
2284 Builder.CreateMemSet(Dest: DestPtr, Value: Builder.getInt8(C: 0), Size: SizeVal, IsVolatile: false);
2285}
2286
2287llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2288 // Make sure that there is a block for the indirect goto.
2289 if (!IndirectBranch)
2290 GetIndirectGotoBlock();
2291
2292 llvm::BasicBlock *BB = getJumpDestForLabel(S: L).getBlock();
2293
2294 // Make sure the indirect branch includes all of the address-taken blocks.
2295 IndirectBranch->addDestination(Dest: BB);
2296 return llvm::BlockAddress::get(Ty: CurFn->getType(), BB);
2297}
2298
2299llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2300 // If we already made the indirect branch for indirect goto, return its block.
2301 if (IndirectBranch) return IndirectBranch->getParent();
2302
2303 CGBuilderTy TmpBuilder(*this, createBasicBlock(name: "indirectgoto"));
2304
2305 // Create the PHI node that indirect gotos will add entries to.
2306 llvm::Value *DestVal = TmpBuilder.CreatePHI(Ty: Int8PtrTy, NumReservedValues: 0,
2307 Name: "indirect.goto.dest");
2308
2309 // Create the indirect branch instruction.
2310 IndirectBranch = TmpBuilder.CreateIndirectBr(Addr: DestVal);
2311 return IndirectBranch->getParent();
2312}
2313
2314/// Computes the length of an array in elements, as well as the base
2315/// element type and a properly-typed first element pointer.
2316llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2317 QualType &baseType,
2318 Address &addr) {
2319 const ArrayType *arrayType = origArrayType;
2320
2321 // If it's a VLA, we have to load the stored size. Note that
2322 // this is the size of the VLA in bytes, not its size in elements.
2323 llvm::Value *numVLAElements = nullptr;
2324 if (isa<VariableArrayType>(Val: arrayType)) {
2325 numVLAElements = getVLASize(vla: cast<VariableArrayType>(Val: arrayType)).NumElts;
2326
2327 // Walk into all VLAs. This doesn't require changes to addr,
2328 // which has type T* where T is the first non-VLA element type.
2329 do {
2330 QualType elementType = arrayType->getElementType();
2331 arrayType = getContext().getAsArrayType(T: elementType);
2332
2333 // If we only have VLA components, 'addr' requires no adjustment.
2334 if (!arrayType) {
2335 baseType = elementType;
2336 return numVLAElements;
2337 }
2338 } while (isa<VariableArrayType>(Val: arrayType));
2339
2340 // We get out here only if we find a constant array type
2341 // inside the VLA.
2342 }
2343
2344 // We have some number of constant-length arrays, so addr should
2345 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2346 // down to the first element of addr.
2347 SmallVector<llvm::Value*, 8> gepIndices;
2348
2349 // GEP down to the array type.
2350 llvm::ConstantInt *zero = Builder.getInt32(C: 0);
2351 gepIndices.push_back(Elt: zero);
2352
2353 uint64_t countFromCLAs = 1;
2354 QualType eltType;
2355
2356 llvm::ArrayType *llvmArrayType =
2357 dyn_cast<llvm::ArrayType>(Val: addr.getElementType());
2358 while (llvmArrayType) {
2359 assert(isa<ConstantArrayType>(arrayType));
2360 assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2361 llvmArrayType->getNumElements());
2362
2363 gepIndices.push_back(Elt: zero);
2364 countFromCLAs *= llvmArrayType->getNumElements();
2365 eltType = arrayType->getElementType();
2366
2367 llvmArrayType =
2368 dyn_cast<llvm::ArrayType>(Val: llvmArrayType->getElementType());
2369 arrayType = getContext().getAsArrayType(T: arrayType->getElementType());
2370 assert((!llvmArrayType || arrayType) &&
2371 "LLVM and Clang types are out-of-synch");
2372 }
2373
2374 if (arrayType) {
2375 // From this point onwards, the Clang array type has been emitted
2376 // as some other type (probably a packed struct). Compute the array
2377 // size, and just emit the 'begin' expression as a bitcast.
2378 while (arrayType) {
2379 countFromCLAs *= cast<ConstantArrayType>(Val: arrayType)->getZExtSize();
2380 eltType = arrayType->getElementType();
2381 arrayType = getContext().getAsArrayType(T: eltType);
2382 }
2383
2384 llvm::Type *baseType = ConvertType(T: eltType);
2385 addr = addr.withElementType(ElemTy: baseType);
2386 } else {
2387 // Create the actual GEP.
2388 addr = Address(Builder.CreateInBoundsGEP(Ty: addr.getElementType(),
2389 Ptr: addr.emitRawPointer(CGF&: *this),
2390 IdxList: gepIndices, Name: "array.begin"),
2391 ConvertTypeForMem(T: eltType), addr.getAlignment());
2392 }
2393
2394 baseType = eltType;
2395
2396 llvm::Value *numElements
2397 = llvm::ConstantInt::get(Ty: SizeTy, V: countFromCLAs);
2398
2399 // If we had any VLA dimensions, factor them in.
2400 if (numVLAElements)
2401 numElements = Builder.CreateNUWMul(LHS: numVLAElements, RHS: numElements);
2402
2403 return numElements;
2404}
2405
2406CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2407 const VariableArrayType *vla = getContext().getAsVariableArrayType(T: type);
2408 assert(vla && "type was not a variable array type!");
2409 return getVLASize(vla);
2410}
2411
2412CodeGenFunction::VlaSizePair
2413CodeGenFunction::getVLASize(const VariableArrayType *type) {
2414 // The number of elements so far; always size_t.
2415 llvm::Value *numElements = nullptr;
2416
2417 QualType elementType;
2418 do {
2419 elementType = type->getElementType();
2420 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2421 assert(vlaSize && "no size for VLA!");
2422 assert(vlaSize->getType() == SizeTy);
2423
2424 if (!numElements) {
2425 numElements = vlaSize;
2426 } else {
2427 // It's undefined behavior if this wraps around, so mark it that way.
2428 // FIXME: Teach -fsanitize=undefined to trap this.
2429 numElements = Builder.CreateNUWMul(LHS: numElements, RHS: vlaSize);
2430 }
2431 } while ((type = getContext().getAsVariableArrayType(T: elementType)));
2432
2433 return { numElements, elementType };
2434}
2435
2436CodeGenFunction::VlaSizePair
2437CodeGenFunction::getVLAElements1D(QualType type) {
2438 const VariableArrayType *vla = getContext().getAsVariableArrayType(T: type);
2439 assert(vla && "type was not a variable array type!");
2440 return getVLAElements1D(vla);
2441}
2442
2443CodeGenFunction::VlaSizePair
2444CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2445 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2446 assert(VlaSize && "no size for VLA!");
2447 assert(VlaSize->getType() == SizeTy);
2448 return { VlaSize, Vla->getElementType() };
2449}
2450
2451void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2452 assert(type->isVariablyModifiedType() &&
2453 "Must pass variably modified type to EmitVLASizes!");
2454
2455 EnsureInsertPoint();
2456
2457 // We're going to walk down into the type and look for VLA
2458 // expressions.
2459 do {
2460 assert(type->isVariablyModifiedType());
2461
2462 const Type *ty = type.getTypePtr();
2463 switch (ty->getTypeClass()) {
2464
2465#define TYPE(Class, Base)
2466#define ABSTRACT_TYPE(Class, Base)
2467#define NON_CANONICAL_TYPE(Class, Base)
2468#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2469#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2470#include "clang/AST/TypeNodes.inc"
2471 llvm_unreachable("unexpected dependent type!");
2472
2473 // These types are never variably-modified.
2474 case Type::Builtin:
2475 case Type::Complex:
2476 case Type::Vector:
2477 case Type::ExtVector:
2478 case Type::ConstantMatrix:
2479 case Type::Record:
2480 case Type::Enum:
2481 case Type::Using:
2482 case Type::TemplateSpecialization:
2483 case Type::ObjCTypeParam:
2484 case Type::ObjCObject:
2485 case Type::ObjCInterface:
2486 case Type::ObjCObjectPointer:
2487 case Type::BitInt:
2488 case Type::HLSLInlineSpirv:
2489 llvm_unreachable("type class is never variably-modified!");
2490
2491 case Type::Elaborated:
2492 type = cast<ElaboratedType>(ty)->getNamedType();
2493 break;
2494
2495 case Type::Adjusted:
2496 type = cast<AdjustedType>(ty)->getAdjustedType();
2497 break;
2498
2499 case Type::Decayed:
2500 type = cast<DecayedType>(ty)->getPointeeType();
2501 break;
2502
2503 case Type::Pointer:
2504 type = cast<PointerType>(ty)->getPointeeType();
2505 break;
2506
2507 case Type::BlockPointer:
2508 type = cast<BlockPointerType>(ty)->getPointeeType();
2509 break;
2510
2511 case Type::LValueReference:
2512 case Type::RValueReference:
2513 type = cast<ReferenceType>(ty)->getPointeeType();
2514 break;
2515
2516 case Type::MemberPointer:
2517 type = cast<MemberPointerType>(ty)->getPointeeType();
2518 break;
2519
2520 case Type::ArrayParameter:
2521 case Type::ConstantArray:
2522 case Type::IncompleteArray:
2523 // Losing element qualification here is fine.
2524 type = cast<ArrayType>(ty)->getElementType();
2525 break;
2526
2527 case Type::VariableArray: {
2528 // Losing element qualification here is fine.
2529 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2530
2531 // Unknown size indication requires no size computation.
2532 // Otherwise, evaluate and record it.
2533 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2534 // It's possible that we might have emitted this already,
2535 // e.g. with a typedef and a pointer to it.
2536 llvm::Value *&entry = VLASizeMap[sizeExpr];
2537 if (!entry) {
2538 llvm::Value *size = EmitScalarExpr(E: sizeExpr);
2539
2540 // C11 6.7.6.2p5:
2541 // If the size is an expression that is not an integer constant
2542 // expression [...] each time it is evaluated it shall have a value
2543 // greater than zero.
2544 if (SanOpts.has(K: SanitizerKind::VLABound)) {
2545 auto CheckOrdinal = SanitizerKind::SO_VLABound;
2546 auto CheckHandler = SanitizerHandler::VLABoundNotPositive;
2547 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2548 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: size->getType());
2549 clang::QualType SEType = sizeExpr->getType();
2550 llvm::Value *CheckCondition =
2551 SEType->isSignedIntegerType()
2552 ? Builder.CreateICmpSGT(LHS: size, RHS: Zero)
2553 : Builder.CreateICmpUGT(LHS: size, RHS: Zero);
2554 llvm::Constant *StaticArgs[] = {
2555 EmitCheckSourceLocation(Loc: sizeExpr->getBeginLoc()),
2556 EmitCheckTypeDescriptor(T: SEType)};
2557 EmitCheck(Checked: std::make_pair(CheckCondition, CheckOrdinal),
2558 Check: CheckHandler, StaticArgs, DynamicArgs: size);
2559 }
2560
2561 // Always zexting here would be wrong if it weren't
2562 // undefined behavior to have a negative bound.
2563 // FIXME: What about when size's type is larger than size_t?
2564 entry = Builder.CreateIntCast(V: size, DestTy: SizeTy, /*signed*/ isSigned: false);
2565 }
2566 }
2567 type = vat->getElementType();
2568 break;
2569 }
2570
2571 case Type::FunctionProto:
2572 case Type::FunctionNoProto:
2573 type = cast<FunctionType>(ty)->getReturnType();
2574 break;
2575
2576 case Type::Paren:
2577 case Type::TypeOf:
2578 case Type::UnaryTransform:
2579 case Type::Attributed:
2580 case Type::BTFTagAttributed:
2581 case Type::HLSLAttributedResource:
2582 case Type::SubstTemplateTypeParm:
2583 case Type::MacroQualified:
2584 case Type::CountAttributed:
2585 // Keep walking after single level desugaring.
2586 type = type.getSingleStepDesugaredType(Context: getContext());
2587 break;
2588
2589 case Type::Typedef:
2590 case Type::Decltype:
2591 case Type::Auto:
2592 case Type::DeducedTemplateSpecialization:
2593 case Type::PackIndexing:
2594 // Stop walking: nothing to do.
2595 return;
2596
2597 case Type::TypeOfExpr:
2598 // Stop walking: emit typeof expression.
2599 EmitIgnoredExpr(E: cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2600 return;
2601
2602 case Type::Atomic:
2603 type = cast<AtomicType>(ty)->getValueType();
2604 break;
2605
2606 case Type::Pipe:
2607 type = cast<PipeType>(ty)->getElementType();
2608 break;
2609 }
2610 } while (type->isVariablyModifiedType());
2611}
2612
2613Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2614 if (getContext().getBuiltinVaListType()->isArrayType())
2615 return EmitPointerWithAlignment(Addr: E);
2616 return EmitLValue(E).getAddress();
2617}
2618
2619Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2620 return EmitLValue(E).getAddress();
2621}
2622
2623void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2624 const APValue &Init) {
2625 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2626 if (CGDebugInfo *Dbg = getDebugInfo())
2627 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2628 Dbg->EmitGlobalVariable(VD: E->getDecl(), Init);
2629}
2630
2631CodeGenFunction::PeepholeProtection
2632CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2633 // At the moment, the only aggressive peephole we do in IR gen
2634 // is trunc(zext) folding, but if we add more, we can easily
2635 // extend this protection.
2636
2637 if (!rvalue.isScalar()) return PeepholeProtection();
2638 llvm::Value *value = rvalue.getScalarVal();
2639 if (!isa<llvm::ZExtInst>(Val: value)) return PeepholeProtection();
2640
2641 // Just make an extra bitcast.
2642 assert(HaveInsertPoint());
2643 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2644 Builder.GetInsertBlock());
2645
2646 PeepholeProtection protection;
2647 protection.Inst = inst;
2648 return protection;
2649}
2650
2651void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2652 if (!protection.Inst) return;
2653
2654 // In theory, we could try to duplicate the peepholes now, but whatever.
2655 protection.Inst->eraseFromParent();
2656}
2657
2658void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2659 QualType Ty, SourceLocation Loc,
2660 SourceLocation AssumptionLoc,
2661 llvm::Value *Alignment,
2662 llvm::Value *OffsetValue) {
2663 if (Alignment->getType() != IntPtrTy)
2664 Alignment =
2665 Builder.CreateIntCast(V: Alignment, DestTy: IntPtrTy, isSigned: false, Name: "casted.align");
2666 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2667 OffsetValue =
2668 Builder.CreateIntCast(V: OffsetValue, DestTy: IntPtrTy, isSigned: true, Name: "casted.offset");
2669 llvm::Value *TheCheck = nullptr;
2670 if (SanOpts.has(K: SanitizerKind::Alignment)) {
2671 llvm::Value *PtrIntValue =
2672 Builder.CreatePtrToInt(V: PtrValue, DestTy: IntPtrTy, Name: "ptrint");
2673
2674 if (OffsetValue) {
2675 bool IsOffsetZero = false;
2676 if (const auto *CI = dyn_cast<llvm::ConstantInt>(Val: OffsetValue))
2677 IsOffsetZero = CI->isZero();
2678
2679 if (!IsOffsetZero)
2680 PtrIntValue = Builder.CreateSub(LHS: PtrIntValue, RHS: OffsetValue, Name: "offsetptr");
2681 }
2682
2683 llvm::Value *Zero = llvm::ConstantInt::get(Ty: IntPtrTy, V: 0);
2684 llvm::Value *Mask =
2685 Builder.CreateSub(LHS: Alignment, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: 1));
2686 llvm::Value *MaskedPtr = Builder.CreateAnd(LHS: PtrIntValue, RHS: Mask, Name: "maskedptr");
2687 TheCheck = Builder.CreateICmpEQ(LHS: MaskedPtr, RHS: Zero, Name: "maskcond");
2688 }
2689 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2690 DL: CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2691
2692 if (!SanOpts.has(K: SanitizerKind::Alignment))
2693 return;
2694 emitAlignmentAssumptionCheck(Ptr: PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2695 OffsetValue, TheCheck, Assumption);
2696}
2697
2698void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2699 const Expr *E,
2700 SourceLocation AssumptionLoc,
2701 llvm::Value *Alignment,
2702 llvm::Value *OffsetValue) {
2703 QualType Ty = E->getType();
2704 SourceLocation Loc = E->getExprLoc();
2705
2706 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2707 OffsetValue);
2708}
2709
2710llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2711 llvm::Value *AnnotatedVal,
2712 StringRef AnnotationStr,
2713 SourceLocation Location,
2714 const AnnotateAttr *Attr) {
2715 SmallVector<llvm::Value *, 5> Args = {
2716 AnnotatedVal,
2717 CGM.EmitAnnotationString(Str: AnnotationStr),
2718 CGM.EmitAnnotationUnit(Loc: Location),
2719 CGM.EmitAnnotationLineNo(L: Location),
2720 };
2721 if (Attr)
2722 Args.push_back(Elt: CGM.EmitAnnotationArgs(Attr));
2723 return Builder.CreateCall(Callee: AnnotationFn, Args);
2724}
2725
2726void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2727 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2728 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2729 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2730 {V->getType(), CGM.ConstGlobalsPtrTy}),
2731 V, I->getAnnotation(), D->getLocation(), I);
2732}
2733
2734Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2735 Address Addr) {
2736 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2737 llvm::Value *V = Addr.emitRawPointer(CGF&: *this);
2738 llvm::Type *VTy = V->getType();
2739 auto *PTy = dyn_cast<llvm::PointerType>(Val: VTy);
2740 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2741 llvm::PointerType *IntrinTy =
2742 llvm::PointerType::get(C&: CGM.getLLVMContext(), AddressSpace: AS);
2743 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2744 {IntrinTy, CGM.ConstGlobalsPtrTy});
2745
2746 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2747 // FIXME Always emit the cast inst so we can differentiate between
2748 // annotation on the first field of a struct and annotation on the struct
2749 // itself.
2750 if (VTy != IntrinTy)
2751 V = Builder.CreateBitCast(V, IntrinTy);
2752 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2753 V = Builder.CreateBitCast(V, VTy);
2754 }
2755
2756 return Address(V, Addr.getElementType(), Addr.getAlignment());
2757}
2758
2759CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2760
2761CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2762 : CGF(CGF) {
2763 assert(!CGF->IsSanitizerScope);
2764 CGF->IsSanitizerScope = true;
2765}
2766
2767CodeGenFunction::SanitizerScope::~SanitizerScope() {
2768 CGF->IsSanitizerScope = false;
2769}
2770
2771void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2772 const llvm::Twine &Name,
2773 llvm::BasicBlock::iterator InsertPt) const {
2774 LoopStack.InsertHelper(I);
2775 if (IsSanitizerScope)
2776 I->setNoSanitizeMetadata();
2777}
2778
2779void CGBuilderInserter::InsertHelper(
2780 llvm::Instruction *I, const llvm::Twine &Name,
2781 llvm::BasicBlock::iterator InsertPt) const {
2782 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, InsertPt);
2783 if (CGF)
2784 CGF->InsertHelper(I, Name, InsertPt);
2785}
2786
2787// Emits an error if we don't have a valid set of target features for the
2788// called function.
2789void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2790 const FunctionDecl *TargetDecl) {
2791 // SemaChecking cannot handle below x86 builtins because they have different
2792 // parameter ranges with different TargetAttribute of caller.
2793 if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2794 unsigned BuiltinID = TargetDecl->getBuiltinID();
2795 if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2796 BuiltinID == X86::BI__builtin_ia32_cmpss ||
2797 BuiltinID == X86::BI__builtin_ia32_cmppd ||
2798 BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2799 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl);
2800 llvm::StringMap<bool> TargetFetureMap;
2801 CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2802 llvm::APSInt Result =
2803 *(E->getArg(Arg: 2)->getIntegerConstantExpr(Ctx: CGM.getContext()));
2804 if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2805 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2806 << TargetDecl->getDeclName() << "avx";
2807 }
2808 }
2809 return checkTargetFeatures(Loc: E->getBeginLoc(), TargetDecl);
2810}
2811
2812// Emits an error if we don't have a valid set of target features for the
2813// called function.
2814void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2815 const FunctionDecl *TargetDecl) {
2816 // Early exit if this is an indirect call.
2817 if (!TargetDecl)
2818 return;
2819
2820 // Get the current enclosing function if it exists. If it doesn't
2821 // we can't check the target features anyhow.
2822 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl);
2823 if (!FD)
2824 return;
2825
2826 // Grab the required features for the call. For a builtin this is listed in
2827 // the td file with the default cpu, for an always_inline function this is any
2828 // listed cpu and any listed features.
2829 unsigned BuiltinID = TargetDecl->getBuiltinID();
2830 std::string MissingFeature;
2831 llvm::StringMap<bool> CallerFeatureMap;
2832 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2833 // When compiling in HipStdPar mode we have to be conservative in rejecting
2834 // target specific features in the FE, and defer the possible error to the
2835 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2836 // referenced by an accelerator executable function, we emit an error.
2837 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2838 if (BuiltinID) {
2839 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(ID: BuiltinID));
2840 if (!Builtin::evaluateRequiredTargetFeatures(
2841 FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2842 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2843 << TargetDecl->getDeclName()
2844 << FeatureList;
2845 }
2846 } else if (!TargetDecl->isMultiVersion() &&
2847 TargetDecl->hasAttr<TargetAttr>()) {
2848 // Get the required features for the callee.
2849
2850 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2851 ParsedTargetAttr ParsedAttr =
2852 CGM.getContext().filterFunctionTargetAttrs(TD);
2853
2854 SmallVector<StringRef, 1> ReqFeatures;
2855 llvm::StringMap<bool> CalleeFeatureMap;
2856 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2857
2858 for (const auto &F : ParsedAttr.Features) {
2859 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2860 ReqFeatures.push_back(StringRef(F).substr(1));
2861 }
2862
2863 for (const auto &F : CalleeFeatureMap) {
2864 // Only positive features are "required".
2865 if (F.getValue())
2866 ReqFeatures.push_back(Elt: F.getKey());
2867 }
2868 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2869 if (!CallerFeatureMap.lookup(Feature)) {
2870 MissingFeature = Feature.str();
2871 return false;
2872 }
2873 return true;
2874 }) && !IsHipStdPar)
2875 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2876 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2877 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2878 llvm::StringMap<bool> CalleeFeatureMap;
2879 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2880
2881 for (const auto &F : CalleeFeatureMap) {
2882 if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2883 !CallerFeatureMap.find(F.getKey())->getValue()) &&
2884 !IsHipStdPar)
2885 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2886 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2887 }
2888 }
2889}
2890
2891void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2892 if (!CGM.getCodeGenOpts().SanitizeStats)
2893 return;
2894
2895 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2896 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2897 CGM.getSanStats().create(B&: IRB, SK: SSK);
2898}
2899
2900void CodeGenFunction::EmitKCFIOperandBundle(
2901 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2902 const FunctionProtoType *FP =
2903 Callee.getAbstractInfo().getCalleeFunctionProtoType();
2904 if (FP)
2905 Bundles.emplace_back(Args: "kcfi", Args: CGM.CreateKCFITypeId(T: FP->desugar()));
2906}
2907
2908llvm::Value *
2909CodeGenFunction::FormAArch64ResolverCondition(const FMVResolverOption &RO) {
2910 return RO.Features.empty() ? nullptr : EmitAArch64CpuSupports(FeatureStrs: RO.Features);
2911}
2912
2913llvm::Value *
2914CodeGenFunction::FormX86ResolverCondition(const FMVResolverOption &RO) {
2915 llvm::Value *Condition = nullptr;
2916
2917 if (RO.Architecture) {
2918 StringRef Arch = *RO.Architecture;
2919 // If arch= specifies an x86-64 micro-architecture level, test the feature
2920 // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2921 if (Arch.starts_with(Prefix: "x86-64"))
2922 Condition = EmitX86CpuSupports(FeatureStrs: {Arch});
2923 else
2924 Condition = EmitX86CpuIs(CPUStr: Arch);
2925 }
2926
2927 if (!RO.Features.empty()) {
2928 llvm::Value *FeatureCond = EmitX86CpuSupports(FeatureStrs: RO.Features);
2929 Condition =
2930 Condition ? Builder.CreateAnd(LHS: Condition, RHS: FeatureCond) : FeatureCond;
2931 }
2932 return Condition;
2933}
2934
2935static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2936 llvm::Function *Resolver,
2937 CGBuilderTy &Builder,
2938 llvm::Function *FuncToReturn,
2939 bool SupportsIFunc) {
2940 if (SupportsIFunc) {
2941 Builder.CreateRet(V: FuncToReturn);
2942 return;
2943 }
2944
2945 llvm::SmallVector<llvm::Value *, 10> Args(
2946 llvm::make_pointer_range(Range: Resolver->args()));
2947
2948 llvm::CallInst *Result = Builder.CreateCall(Callee: FuncToReturn, Args);
2949 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2950
2951 if (Resolver->getReturnType()->isVoidTy())
2952 Builder.CreateRetVoid();
2953 else
2954 Builder.CreateRet(V: Result);
2955}
2956
2957void CodeGenFunction::EmitMultiVersionResolver(
2958 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
2959
2960 llvm::Triple::ArchType ArchType =
2961 getContext().getTargetInfo().getTriple().getArch();
2962
2963 switch (ArchType) {
2964 case llvm::Triple::x86:
2965 case llvm::Triple::x86_64:
2966 EmitX86MultiVersionResolver(Resolver, Options);
2967 return;
2968 case llvm::Triple::aarch64:
2969 EmitAArch64MultiVersionResolver(Resolver, Options);
2970 return;
2971 case llvm::Triple::riscv32:
2972 case llvm::Triple::riscv64:
2973 EmitRISCVMultiVersionResolver(Resolver, Options);
2974 return;
2975
2976 default:
2977 assert(false && "Only implemented for x86, AArch64 and RISC-V targets");
2978 }
2979}
2980
2981void CodeGenFunction::EmitRISCVMultiVersionResolver(
2982 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
2983
2984 if (getContext().getTargetInfo().getTriple().getOS() !=
2985 llvm::Triple::OSType::Linux) {
2986 CGM.getDiags().Report(diag::err_os_unsupport_riscv_fmv);
2987 return;
2988 }
2989
2990 llvm::BasicBlock *CurBlock = createBasicBlock(name: "resolver_entry", parent: Resolver);
2991 Builder.SetInsertPoint(CurBlock);
2992 EmitRISCVCpuInit();
2993
2994 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2995 bool HasDefault = false;
2996 unsigned DefaultIndex = 0;
2997
2998 // Check the each candidate function.
2999 for (unsigned Index = 0; Index < Options.size(); Index++) {
3000
3001 if (Options[Index].Features.empty()) {
3002 HasDefault = true;
3003 DefaultIndex = Index;
3004 continue;
3005 }
3006
3007 Builder.SetInsertPoint(CurBlock);
3008
3009 // FeaturesCondition: The bitmask of the required extension has been
3010 // enabled by the runtime object.
3011 // (__riscv_feature_bits.features[i] & REQUIRED_BITMASK) ==
3012 // REQUIRED_BITMASK
3013 //
3014 // When condition is met, return this version of the function.
3015 // Otherwise, try the next version.
3016 //
3017 // if (FeaturesConditionVersion1)
3018 // return Version1;
3019 // else if (FeaturesConditionVersion2)
3020 // return Version2;
3021 // else if (FeaturesConditionVersion3)
3022 // return Version3;
3023 // ...
3024 // else
3025 // return DefaultVersion;
3026
3027 // TODO: Add a condition to check the length before accessing elements.
3028 // Without checking the length first, we may access an incorrect memory
3029 // address when using different versions.
3030 llvm::SmallVector<StringRef, 8> CurrTargetAttrFeats;
3031 llvm::SmallVector<std::string, 8> TargetAttrFeats;
3032
3033 for (StringRef Feat : Options[Index].Features) {
3034 std::vector<std::string> FeatStr =
3035 getContext().getTargetInfo().parseTargetAttr(Str: Feat).Features;
3036
3037 assert(FeatStr.size() == 1 && "Feature string not delimited");
3038
3039 std::string &CurrFeat = FeatStr.front();
3040 if (CurrFeat[0] == '+')
3041 TargetAttrFeats.push_back(Elt: CurrFeat.substr(pos: 1));
3042 }
3043
3044 if (TargetAttrFeats.empty())
3045 continue;
3046
3047 for (std::string &Feat : TargetAttrFeats)
3048 CurrTargetAttrFeats.push_back(Elt: Feat);
3049
3050 Builder.SetInsertPoint(CurBlock);
3051 llvm::Value *FeatsCondition = EmitRISCVCpuSupports(FeaturesStrs: CurrTargetAttrFeats);
3052
3053 llvm::BasicBlock *RetBlock = createBasicBlock(name: "resolver_return", parent: Resolver);
3054 CGBuilderTy RetBuilder(*this, RetBlock);
3055 CreateMultiVersionResolverReturn(CGM, Resolver, Builder&: RetBuilder,
3056 FuncToReturn: Options[Index].Function, SupportsIFunc);
3057 llvm::BasicBlock *ElseBlock = createBasicBlock(name: "resolver_else", parent: Resolver);
3058
3059 Builder.SetInsertPoint(CurBlock);
3060 Builder.CreateCondBr(Cond: FeatsCondition, True: RetBlock, False: ElseBlock);
3061
3062 CurBlock = ElseBlock;
3063 }
3064
3065 // Finally, emit the default one.
3066 if (HasDefault) {
3067 Builder.SetInsertPoint(CurBlock);
3068 CreateMultiVersionResolverReturn(
3069 CGM, Resolver, Builder, FuncToReturn: Options[DefaultIndex].Function, SupportsIFunc);
3070 return;
3071 }
3072
3073 // If no generic/default, emit an unreachable.
3074 Builder.SetInsertPoint(CurBlock);
3075 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3076 TrapCall->setDoesNotReturn();
3077 TrapCall->setDoesNotThrow();
3078 Builder.CreateUnreachable();
3079 Builder.ClearInsertionPoint();
3080}
3081
3082void CodeGenFunction::EmitAArch64MultiVersionResolver(
3083 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3084 assert(!Options.empty() && "No multiversion resolver options found");
3085 assert(Options.back().Features.size() == 0 && "Default case must be last");
3086 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3087 assert(SupportsIFunc &&
3088 "Multiversion resolver requires target IFUNC support");
3089 bool AArch64CpuInitialized = false;
3090 llvm::BasicBlock *CurBlock = createBasicBlock(name: "resolver_entry", parent: Resolver);
3091
3092 for (const FMVResolverOption &RO : Options) {
3093 Builder.SetInsertPoint(CurBlock);
3094 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
3095
3096 // The 'default' or 'all features enabled' case.
3097 if (!Condition) {
3098 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, FuncToReturn: RO.Function,
3099 SupportsIFunc);
3100 return;
3101 }
3102
3103 if (!AArch64CpuInitialized) {
3104 Builder.SetInsertPoint(TheBB: CurBlock, IP: CurBlock->begin());
3105 EmitAArch64CpuInit();
3106 AArch64CpuInitialized = true;
3107 Builder.SetInsertPoint(CurBlock);
3108 }
3109
3110 llvm::BasicBlock *RetBlock = createBasicBlock(name: "resolver_return", parent: Resolver);
3111 CGBuilderTy RetBuilder(*this, RetBlock);
3112 CreateMultiVersionResolverReturn(CGM, Resolver, Builder&: RetBuilder, FuncToReturn: RO.Function,
3113 SupportsIFunc);
3114 CurBlock = createBasicBlock(name: "resolver_else", parent: Resolver);
3115 Builder.CreateCondBr(Cond: Condition, True: RetBlock, False: CurBlock);
3116 }
3117
3118 // If no default, emit an unreachable.
3119 Builder.SetInsertPoint(CurBlock);
3120 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3121 TrapCall->setDoesNotReturn();
3122 TrapCall->setDoesNotThrow();
3123 Builder.CreateUnreachable();
3124 Builder.ClearInsertionPoint();
3125}
3126
3127void CodeGenFunction::EmitX86MultiVersionResolver(
3128 llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3129
3130 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3131
3132 // Main function's basic block.
3133 llvm::BasicBlock *CurBlock = createBasicBlock(name: "resolver_entry", parent: Resolver);
3134 Builder.SetInsertPoint(CurBlock);
3135 EmitX86CpuInit();
3136
3137 for (const FMVResolverOption &RO : Options) {
3138 Builder.SetInsertPoint(CurBlock);
3139 llvm::Value *Condition = FormX86ResolverCondition(RO);
3140
3141 // The 'default' or 'generic' case.
3142 if (!Condition) {
3143 assert(&RO == Options.end() - 1 &&
3144 "Default or Generic case must be last");
3145 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, FuncToReturn: RO.Function,
3146 SupportsIFunc);
3147 return;
3148 }
3149
3150 llvm::BasicBlock *RetBlock = createBasicBlock(name: "resolver_return", parent: Resolver);
3151 CGBuilderTy RetBuilder(*this, RetBlock);
3152 CreateMultiVersionResolverReturn(CGM, Resolver, Builder&: RetBuilder, FuncToReturn: RO.Function,
3153 SupportsIFunc);
3154 CurBlock = createBasicBlock(name: "resolver_else", parent: Resolver);
3155 Builder.CreateCondBr(Cond: Condition, True: RetBlock, False: CurBlock);
3156 }
3157
3158 // If no generic/default, emit an unreachable.
3159 Builder.SetInsertPoint(CurBlock);
3160 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3161 TrapCall->setDoesNotReturn();
3162 TrapCall->setDoesNotThrow();
3163 Builder.CreateUnreachable();
3164 Builder.ClearInsertionPoint();
3165}
3166
3167// Loc - where the diagnostic will point, where in the source code this
3168// alignment has failed.
3169// SecondaryLoc - if present (will be present if sufficiently different from
3170// Loc), the diagnostic will additionally point a "Note:" to this location.
3171// It should be the location where the __attribute__((assume_aligned))
3172// was written e.g.
3173void CodeGenFunction::emitAlignmentAssumptionCheck(
3174 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
3175 SourceLocation SecondaryLoc, llvm::Value *Alignment,
3176 llvm::Value *OffsetValue, llvm::Value *TheCheck,
3177 llvm::Instruction *Assumption) {
3178 assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
3179 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
3180 llvm::Intrinsic::getOrInsertDeclaration(
3181 Builder.GetInsertBlock()->getParent()->getParent(),
3182 llvm::Intrinsic::assume) &&
3183 "Assumption should be a call to llvm.assume().");
3184 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
3185 "Assumption should be the last instruction of the basic block, "
3186 "since the basic block is still being generated.");
3187
3188 if (!SanOpts.has(K: SanitizerKind::Alignment))
3189 return;
3190
3191 // Don't check pointers to volatile data. The behavior here is implementation-
3192 // defined.
3193 if (Ty->getPointeeType().isVolatileQualified())
3194 return;
3195
3196 // We need to temorairly remove the assumption so we can insert the
3197 // sanitizer check before it, else the check will be dropped by optimizations.
3198 Assumption->removeFromParent();
3199
3200 {
3201 auto CheckOrdinal = SanitizerKind::SO_Alignment;
3202 auto CheckHandler = SanitizerHandler::AlignmentAssumption;
3203 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
3204
3205 if (!OffsetValue)
3206 OffsetValue = Builder.getInt1(V: false); // no offset.
3207
3208 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
3209 EmitCheckSourceLocation(Loc: SecondaryLoc),
3210 EmitCheckTypeDescriptor(T: Ty)};
3211 llvm::Value *DynamicData[] = {Ptr, Alignment, OffsetValue};
3212 EmitCheck(Checked: {std::make_pair(x&: TheCheck, y&: CheckOrdinal)}, Check: CheckHandler,
3213 StaticArgs: StaticData, DynamicArgs: DynamicData);
3214 }
3215
3216 // We are now in the (new, empty) "cont" basic block.
3217 // Reintroduce the assumption.
3218 Builder.Insert(I: Assumption);
3219 // FIXME: Assumption still has it's original basic block as it's Parent.
3220}
3221
3222llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
3223 if (CGDebugInfo *DI = getDebugInfo())
3224 return DI->SourceLocToDebugLoc(Loc: Location);
3225
3226 return llvm::DebugLoc();
3227}
3228
3229llvm::Value *
3230CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
3231 Stmt::Likelihood LH) {
3232 switch (LH) {
3233 case Stmt::LH_None:
3234 return Cond;
3235 case Stmt::LH_Likely:
3236 case Stmt::LH_Unlikely:
3237 // Don't generate llvm.expect on -O0 as the backend won't use it for
3238 // anything.
3239 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3240 return Cond;
3241 llvm::Type *CondTy = Cond->getType();
3242 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
3243 llvm::Function *FnExpect =
3244 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
3245 llvm::Value *ExpectedValueOfCond =
3246 llvm::ConstantInt::getBool(Ty: CondTy, V: LH == Stmt::LH_Likely);
3247 return Builder.CreateCall(Callee: FnExpect, Args: {Cond, ExpectedValueOfCond},
3248 Name: Cond->getName() + ".expval");
3249 }
3250 llvm_unreachable("Unknown Likelihood");
3251}
3252
3253llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3254 unsigned NumElementsDst,
3255 const llvm::Twine &Name) {
3256 auto *SrcTy = cast<llvm::FixedVectorType>(Val: SrcVec->getType());
3257 unsigned NumElementsSrc = SrcTy->getNumElements();
3258 if (NumElementsSrc == NumElementsDst)
3259 return SrcVec;
3260
3261 std::vector<int> ShuffleMask(NumElementsDst, -1);
3262 for (unsigned MaskIdx = 0;
3263 MaskIdx < std::min<>(a: NumElementsDst, b: NumElementsSrc); ++MaskIdx)
3264 ShuffleMask[MaskIdx] = MaskIdx;
3265
3266 return Builder.CreateShuffleVector(V: SrcVec, Mask: ShuffleMask, Name);
3267}
3268
3269void CodeGenFunction::EmitPointerAuthOperandBundle(
3270 const CGPointerAuthInfo &PointerAuth,
3271 SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
3272 if (!PointerAuth.isSigned())
3273 return;
3274
3275 auto *Key = Builder.getInt32(C: PointerAuth.getKey());
3276
3277 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3278 if (!Discriminator)
3279 Discriminator = Builder.getSize(N: 0);
3280
3281 llvm::Value *Args[] = {Key, Discriminator};
3282 Bundles.emplace_back(Args: "ptrauth", Args);
3283}
3284
3285static llvm::Value *EmitPointerAuthCommon(CodeGenFunction &CGF,
3286 const CGPointerAuthInfo &PointerAuth,
3287 llvm::Value *Pointer,
3288 unsigned IntrinsicID) {
3289 if (!PointerAuth)
3290 return Pointer;
3291
3292 auto Key = CGF.Builder.getInt32(C: PointerAuth.getKey());
3293
3294 llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3295 if (!Discriminator) {
3296 Discriminator = CGF.Builder.getSize(N: 0);
3297 }
3298
3299 // Convert the pointer to intptr_t before signing it.
3300 auto OrigType = Pointer->getType();
3301 Pointer = CGF.Builder.CreatePtrToInt(V: Pointer, DestTy: CGF.IntPtrTy);
3302
3303 // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator)
3304 auto Intrinsic = CGF.CGM.getIntrinsic(IID: IntrinsicID);
3305 Pointer = CGF.EmitRuntimeCall(callee: Intrinsic, args: {Pointer, Key, Discriminator});
3306
3307 // Convert back to the original type.
3308 Pointer = CGF.Builder.CreateIntToPtr(V: Pointer, DestTy: OrigType);
3309 return Pointer;
3310}
3311
3312llvm::Value *
3313CodeGenFunction::EmitPointerAuthSign(const CGPointerAuthInfo &PointerAuth,
3314 llvm::Value *Pointer) {
3315 if (!PointerAuth.shouldSign())
3316 return Pointer;
3317 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3318 llvm::Intrinsic::ptrauth_sign);
3319}
3320
3321static llvm::Value *EmitStrip(CodeGenFunction &CGF,
3322 const CGPointerAuthInfo &PointerAuth,
3323 llvm::Value *Pointer) {
3324 auto StripIntrinsic = CGF.CGM.getIntrinsic(llvm::Intrinsic::ptrauth_strip);
3325
3326 auto Key = CGF.Builder.getInt32(C: PointerAuth.getKey());
3327 // Convert the pointer to intptr_t before signing it.
3328 auto OrigType = Pointer->getType();
3329 Pointer = CGF.EmitRuntimeCall(
3330 StripIntrinsic, {CGF.Builder.CreatePtrToInt(V: Pointer, DestTy: CGF.IntPtrTy), Key});
3331 return CGF.Builder.CreateIntToPtr(V: Pointer, DestTy: OrigType);
3332}
3333
3334llvm::Value *
3335CodeGenFunction::EmitPointerAuthAuth(const CGPointerAuthInfo &PointerAuth,
3336 llvm::Value *Pointer) {
3337 if (PointerAuth.shouldStrip()) {
3338 return EmitStrip(CGF&: *this, PointerAuth, Pointer);
3339 }
3340 if (!PointerAuth.shouldAuth()) {
3341 return Pointer;
3342 }
3343
3344 return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3345 llvm::Intrinsic::ptrauth_auth);
3346}
3347
3348void CodeGenFunction::addInstToCurrentSourceAtom(
3349 llvm::Instruction *KeyInstruction, llvm::Value *Backup) {
3350 if (CGDebugInfo *DI = getDebugInfo())
3351 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3352}
3353
3354void CodeGenFunction::addInstToSpecificSourceAtom(
3355 llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom) {
3356 if (CGDebugInfo *DI = getDebugInfo())
3357 DI->addInstToSpecificSourceAtom(KeyInstruction, Backup, Atom);
3358}
3359
3360void CodeGenFunction::addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
3361 llvm::Value *Backup) {
3362 if (CGDebugInfo *DI = getDebugInfo()) {
3363 ApplyAtomGroup Grp(getDebugInfo());
3364 DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3365 }
3366}
3367

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of clang/lib/CodeGen/CodeGenFunction.cpp