1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "PatternInit.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/Attr.h"
26#include "clang/AST/CharUnits.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/DeclOpenMP.h"
30#include "clang/Basic/CodeGenOptions.h"
31#include "clang/Basic/SourceManager.h"
32#include "clang/Basic/TargetInfo.h"
33#include "clang/CodeGen/CGFunctionInfo.h"
34#include "clang/Sema/Sema.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/GlobalVariable.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/Type.h"
40#include <optional>
41
42using namespace clang;
43using namespace CodeGen;
44
45static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
46 "Clang max alignment greater than what LLVM supports?");
47
48void CodeGenFunction::EmitDecl(const Decl &D) {
49 switch (D.getKind()) {
50 case Decl::BuiltinTemplate:
51 case Decl::TranslationUnit:
52 case Decl::ExternCContext:
53 case Decl::Namespace:
54 case Decl::UnresolvedUsingTypename:
55 case Decl::ClassTemplateSpecialization:
56 case Decl::ClassTemplatePartialSpecialization:
57 case Decl::VarTemplateSpecialization:
58 case Decl::VarTemplatePartialSpecialization:
59 case Decl::TemplateTypeParm:
60 case Decl::UnresolvedUsingValue:
61 case Decl::NonTypeTemplateParm:
62 case Decl::CXXDeductionGuide:
63 case Decl::CXXMethod:
64 case Decl::CXXConstructor:
65 case Decl::CXXDestructor:
66 case Decl::CXXConversion:
67 case Decl::Field:
68 case Decl::MSProperty:
69 case Decl::IndirectField:
70 case Decl::ObjCIvar:
71 case Decl::ObjCAtDefsField:
72 case Decl::ParmVar:
73 case Decl::ImplicitParam:
74 case Decl::ClassTemplate:
75 case Decl::VarTemplate:
76 case Decl::FunctionTemplate:
77 case Decl::TypeAliasTemplate:
78 case Decl::TemplateTemplateParm:
79 case Decl::ObjCMethod:
80 case Decl::ObjCCategory:
81 case Decl::ObjCProtocol:
82 case Decl::ObjCInterface:
83 case Decl::ObjCCategoryImpl:
84 case Decl::ObjCImplementation:
85 case Decl::ObjCProperty:
86 case Decl::ObjCCompatibleAlias:
87 case Decl::PragmaComment:
88 case Decl::PragmaDetectMismatch:
89 case Decl::AccessSpec:
90 case Decl::LinkageSpec:
91 case Decl::Export:
92 case Decl::ObjCPropertyImpl:
93 case Decl::FileScopeAsm:
94 case Decl::TopLevelStmt:
95 case Decl::Friend:
96 case Decl::FriendTemplate:
97 case Decl::Block:
98 case Decl::Captured:
99 case Decl::UsingShadow:
100 case Decl::ConstructorUsingShadow:
101 case Decl::ObjCTypeParam:
102 case Decl::Binding:
103 case Decl::UnresolvedUsingIfExists:
104 case Decl::HLSLBuffer:
105 llvm_unreachable("Declaration should not be in declstmts!");
106 case Decl::Record: // struct/union/class X;
107 case Decl::CXXRecord: // struct/union/class X; [C++]
108 if (CGDebugInfo *DI = getDebugInfo())
109 if (cast<RecordDecl>(Val: D).getDefinition())
110 DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: &D)));
111 return;
112 case Decl::Enum: // enum X;
113 if (CGDebugInfo *DI = getDebugInfo())
114 if (cast<EnumDecl>(Val: D).getDefinition())
115 DI->EmitAndRetainType(Ty: getContext().getEnumType(Decl: cast<EnumDecl>(Val: &D)));
116 return;
117 case Decl::Function: // void X();
118 case Decl::EnumConstant: // enum ? { X = ? }
119 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
120 case Decl::Label: // __label__ x;
121 case Decl::Import:
122 case Decl::MSGuid: // __declspec(uuid("..."))
123 case Decl::UnnamedGlobalConstant:
124 case Decl::TemplateParamObject:
125 case Decl::OMPThreadPrivate:
126 case Decl::OMPAllocate:
127 case Decl::OMPCapturedExpr:
128 case Decl::OMPRequires:
129 case Decl::Empty:
130 case Decl::Concept:
131 case Decl::ImplicitConceptSpecialization:
132 case Decl::LifetimeExtendedTemporary:
133 case Decl::RequiresExprBody:
134 // None of these decls require codegen support.
135 return;
136
137 case Decl::NamespaceAlias:
138 if (CGDebugInfo *DI = getDebugInfo())
139 DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val: D));
140 return;
141 case Decl::Using: // using X; [C++]
142 if (CGDebugInfo *DI = getDebugInfo())
143 DI->EmitUsingDecl(UD: cast<UsingDecl>(Val: D));
144 return;
145 case Decl::UsingEnum: // using enum X; [C++]
146 if (CGDebugInfo *DI = getDebugInfo())
147 DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val: D));
148 return;
149 case Decl::UsingPack:
150 for (auto *Using : cast<UsingPackDecl>(Val: D).expansions())
151 EmitDecl(*Using);
152 return;
153 case Decl::UsingDirective: // using namespace X; [C++]
154 if (CGDebugInfo *DI = getDebugInfo())
155 DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val: D));
156 return;
157 case Decl::Var:
158 case Decl::Decomposition: {
159 const VarDecl &VD = cast<VarDecl>(Val: D);
160 assert(VD.isLocalVarDecl() &&
161 "Should not see file-scope variables inside a function!");
162 EmitVarDecl(D: VD);
163 if (auto *DD = dyn_cast<DecompositionDecl>(Val: &VD))
164 for (auto *B : DD->bindings())
165 if (auto *HD = B->getHoldingVar())
166 EmitVarDecl(D: *HD);
167 return;
168 }
169
170 case Decl::OMPDeclareReduction:
171 return CGM.EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: &D), CGF: this);
172
173 case Decl::OMPDeclareMapper:
174 return CGM.EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: &D), CGF: this);
175
176 case Decl::Typedef: // typedef int X;
177 case Decl::TypeAlias: { // using X = int; [C++0x]
178 QualType Ty = cast<TypedefNameDecl>(Val: D).getUnderlyingType();
179 if (CGDebugInfo *DI = getDebugInfo())
180 DI->EmitAndRetainType(Ty);
181 if (Ty->isVariablyModifiedType())
182 EmitVariablyModifiedType(Ty);
183 return;
184 }
185 }
186}
187
188/// EmitVarDecl - This method handles emission of any variable declaration
189/// inside a function, including static vars etc.
190void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
191 if (D.hasExternalStorage())
192 // Don't emit it now, allow it to be emitted lazily on its first use.
193 return;
194
195 // Some function-scope variable does not have static storage but still
196 // needs to be emitted like a static variable, e.g. a function-scope
197 // variable in constant address space in OpenCL.
198 if (D.getStorageDuration() != SD_Automatic) {
199 // Static sampler variables translated to function calls.
200 if (D.getType()->isSamplerT())
201 return;
202
203 llvm::GlobalValue::LinkageTypes Linkage =
204 CGM.getLLVMLinkageVarDefinition(VD: &D);
205
206 // FIXME: We need to force the emission/use of a guard variable for
207 // some variables even if we can constant-evaluate them because
208 // we can't guarantee every translation unit will constant-evaluate them.
209
210 return EmitStaticVarDecl(D, Linkage);
211 }
212
213 if (D.getType().getAddressSpace() == LangAS::opencl_local)
214 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(CGF&: *this, D);
215
216 assert(D.hasLocalStorage());
217 return EmitAutoVarDecl(D);
218}
219
220static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
221 if (CGM.getLangOpts().CPlusPlus)
222 return CGM.getMangledName(GD: &D).str();
223
224 // If this isn't C++, we don't need a mangled name, just a pretty one.
225 assert(!D.isExternallyVisible() && "name shouldn't matter");
226 std::string ContextName;
227 const DeclContext *DC = D.getDeclContext();
228 if (auto *CD = dyn_cast<CapturedDecl>(DC))
229 DC = cast<DeclContext>(CD->getNonClosureContext());
230 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
231 ContextName = std::string(CGM.getMangledName(GD: FD));
232 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
233 ContextName = std::string(CGM.getBlockMangledName(GD: GlobalDecl(), BD: BD));
234 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
235 ContextName = OMD->getSelector().getAsString();
236 else
237 llvm_unreachable("Unknown context for static var decl");
238
239 ContextName += "." + D.getNameAsString();
240 return ContextName;
241}
242
243llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
244 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
245 // In general, we don't always emit static var decls once before we reference
246 // them. It is possible to reference them before emitting the function that
247 // contains them, and it is possible to emit the containing function multiple
248 // times.
249 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
250 return ExistingGV;
251
252 QualType Ty = D.getType();
253 assert(Ty->isConstantSizeType() && "VLAs can't be static");
254
255 // Use the label if the variable is renamed with the asm-label extension.
256 std::string Name;
257 if (D.hasAttr<AsmLabelAttr>())
258 Name = std::string(getMangledName(GD: &D));
259 else
260 Name = getStaticDeclName(CGM&: *this, D);
261
262 llvm::Type *LTy = getTypes().ConvertTypeForMem(T: Ty);
263 LangAS AS = GetGlobalVarAddressSpace(D: &D);
264 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
265
266 // OpenCL variables in local address space and CUDA shared
267 // variables cannot have an initializer.
268 llvm::Constant *Init = nullptr;
269 if (Ty.getAddressSpace() == LangAS::opencl_local ||
270 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
271 Init = llvm::UndefValue::get(T: LTy);
272 else
273 Init = EmitNullConstant(T: Ty);
274
275 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
276 getModule(), LTy, Ty.isConstant(Ctx: getContext()), Linkage, Init, Name,
277 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
278 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
279
280 if (supportsCOMDAT() && GV->isWeakForLinker())
281 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
282
283 if (D.getTLSKind())
284 setTLSMode(GV, D);
285
286 setGVProperties(GV, GD: &D);
287
288 // Make sure the result is of the correct type.
289 LangAS ExpectedAS = Ty.getAddressSpace();
290 llvm::Constant *Addr = GV;
291 if (AS != ExpectedAS) {
292 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
293 CGM&: *this, V: GV, SrcAddr: AS, DestAddr: ExpectedAS,
294 DestTy: llvm::PointerType::get(C&: getLLVMContext(),
295 AddressSpace: getContext().getTargetAddressSpace(AS: ExpectedAS)));
296 }
297
298 setStaticLocalDeclAddress(D: &D, C: Addr);
299
300 // Ensure that the static local gets initialized by making sure the parent
301 // function gets emitted eventually.
302 const Decl *DC = cast<Decl>(D.getDeclContext());
303
304 // We can't name blocks or captured statements directly, so try to emit their
305 // parents.
306 if (isa<BlockDecl>(Val: DC) || isa<CapturedDecl>(Val: DC)) {
307 DC = DC->getNonClosureContext();
308 // FIXME: Ensure that global blocks get emitted.
309 if (!DC)
310 return Addr;
311 }
312
313 GlobalDecl GD;
314 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
315 GD = GlobalDecl(CD, Ctor_Base);
316 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
317 GD = GlobalDecl(DD, Dtor_Base);
318 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
319 GD = GlobalDecl(FD);
320 else {
321 // Don't do anything for Obj-C method decls or global closures. We should
322 // never defer them.
323 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
324 }
325 if (GD.getDecl()) {
326 // Disable emission of the parent function for the OpenMP device codegen.
327 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
328 (void)GetAddrOfGlobal(GD);
329 }
330
331 return Addr;
332}
333
334/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
335/// global variable that has already been created for it. If the initializer
336/// has a different type than GV does, this may free GV and return a different
337/// one. Otherwise it just returns GV.
338llvm::GlobalVariable *
339CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
340 llvm::GlobalVariable *GV) {
341 ConstantEmitter emitter(*this);
342 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
343
344 // If constant emission failed, then this should be a C++ static
345 // initializer.
346 if (!Init) {
347 if (!getLangOpts().CPlusPlus)
348 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
349 else if (D.hasFlexibleArrayInit(Ctx: getContext()))
350 CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
351 else if (HaveInsertPoint()) {
352 // Since we have a static initializer, this global variable can't
353 // be constant.
354 GV->setConstant(false);
355
356 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/true);
357 }
358 return GV;
359 }
360
361#ifndef NDEBUG
362 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
363 D.getFlexibleArrayInitChars(Ctx: getContext());
364 CharUnits CstSize = CharUnits::fromQuantity(
365 Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: Init->getType()));
366 assert(VarSize == CstSize && "Emitted constant has unexpected size");
367#endif
368
369 // The initializer may differ in type from the global. Rewrite
370 // the global to match the initializer. (We have to do this
371 // because some types, like unions, can't be completely represented
372 // in the LLVM type system.)
373 if (GV->getValueType() != Init->getType()) {
374 llvm::GlobalVariable *OldGV = GV;
375
376 GV = new llvm::GlobalVariable(
377 CGM.getModule(), Init->getType(), OldGV->isConstant(),
378 OldGV->getLinkage(), Init, "",
379 /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
380 OldGV->getType()->getPointerAddressSpace());
381 GV->setVisibility(OldGV->getVisibility());
382 GV->setDSOLocal(OldGV->isDSOLocal());
383 GV->setComdat(OldGV->getComdat());
384
385 // Steal the name of the old global
386 GV->takeName(V: OldGV);
387
388 // Replace all uses of the old global with the new global
389 OldGV->replaceAllUsesWith(V: GV);
390
391 // Erase the old global, since it is no longer used.
392 OldGV->eraseFromParent();
393 }
394
395 bool NeedsDtor =
396 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
397
398 GV->setConstant(
399 D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
400 GV->setInitializer(Init);
401
402 emitter.finalize(global: GV);
403
404 if (NeedsDtor && HaveInsertPoint()) {
405 // We have a constant initializer, but a nontrivial destructor. We still
406 // need to perform a guarded "initialization" in order to register the
407 // destructor.
408 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/false);
409 }
410
411 return GV;
412}
413
414void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
415 llvm::GlobalValue::LinkageTypes Linkage) {
416 // Check to see if we already have a global variable for this
417 // declaration. This can happen when double-emitting function
418 // bodies, e.g. with complete and base constructors.
419 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
420 CharUnits alignment = getContext().getDeclAlign(&D);
421
422 // Store into LocalDeclMap before generating initializer to handle
423 // circular references.
424 llvm::Type *elemTy = ConvertTypeForMem(T: D.getType());
425 setAddrOfLocalVar(VD: &D, Addr: Address(addr, elemTy, alignment));
426
427 // We can't have a VLA here, but we can have a pointer to a VLA,
428 // even though that doesn't really make any sense.
429 // Make sure to evaluate VLA bounds now so that we have them for later.
430 if (D.getType()->isVariablyModifiedType())
431 EmitVariablyModifiedType(Ty: D.getType());
432
433 // Save the type in case adding the initializer forces a type change.
434 llvm::Type *expectedType = addr->getType();
435
436 llvm::GlobalVariable *var =
437 cast<llvm::GlobalVariable>(Val: addr->stripPointerCasts());
438
439 // CUDA's local and local static __shared__ variables should not
440 // have any non-empty initializers. This is ensured by Sema.
441 // Whatever initializer such variable may have when it gets here is
442 // a no-op and should not be emitted.
443 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
444 D.hasAttr<CUDASharedAttr>();
445 // If this value has an initializer, emit it.
446 if (D.getInit() && !isCudaSharedVar)
447 var = AddInitializerToStaticVarDecl(D, GV: var);
448
449 var->setAlignment(alignment.getAsAlign());
450
451 if (D.hasAttr<AnnotateAttr>())
452 CGM.AddGlobalAnnotations(&D, var);
453
454 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
455 var->addAttribute("bss-section", SA->getName());
456 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
457 var->addAttribute("data-section", SA->getName());
458 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
459 var->addAttribute("rodata-section", SA->getName());
460 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
461 var->addAttribute("relro-section", SA->getName());
462
463 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
464 var->setSection(SA->getName());
465
466 if (D.hasAttr<RetainAttr>())
467 CGM.addUsedGlobal(GV: var);
468 else if (D.hasAttr<UsedAttr>())
469 CGM.addUsedOrCompilerUsedGlobal(GV: var);
470
471 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
472 CGM.addUsedOrCompilerUsedGlobal(GV: var);
473
474 // We may have to cast the constant because of the initializer
475 // mismatch above.
476 //
477 // FIXME: It is really dangerous to store this in the map; if anyone
478 // RAUW's the GV uses of this constant will be invalid.
479 llvm::Constant *castedAddr =
480 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: var, Ty: expectedType);
481 LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
482 CGM.setStaticLocalDeclAddress(D: &D, C: castedAddr);
483
484 CGM.getSanitizerMetadata()->reportGlobal(GV: var, D);
485
486 // Emit global variable debug descriptor for static vars.
487 CGDebugInfo *DI = getDebugInfo();
488 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
489 DI->setLocation(D.getLocation());
490 DI->EmitGlobalVariable(GV: var, Decl: &D);
491 }
492}
493
494namespace {
495 struct DestroyObject final : EHScopeStack::Cleanup {
496 DestroyObject(Address addr, QualType type,
497 CodeGenFunction::Destroyer *destroyer,
498 bool useEHCleanupForArray)
499 : addr(addr), type(type), destroyer(destroyer),
500 useEHCleanupForArray(useEHCleanupForArray) {}
501
502 Address addr;
503 QualType type;
504 CodeGenFunction::Destroyer *destroyer;
505 bool useEHCleanupForArray;
506
507 void Emit(CodeGenFunction &CGF, Flags flags) override {
508 // Don't use an EH cleanup recursively from an EH cleanup.
509 bool useEHCleanupForArray =
510 flags.isForNormalCleanup() && this->useEHCleanupForArray;
511
512 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
513 }
514 };
515
516 template <class Derived>
517 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
518 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
519 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
520
521 llvm::Value *NRVOFlag;
522 Address Loc;
523 QualType Ty;
524
525 void Emit(CodeGenFunction &CGF, Flags flags) override {
526 // Along the exceptions path we always execute the dtor.
527 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
528
529 llvm::BasicBlock *SkipDtorBB = nullptr;
530 if (NRVO) {
531 // If we exited via NRVO, we skip the destructor call.
532 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock(name: "nrvo.unused");
533 SkipDtorBB = CGF.createBasicBlock(name: "nrvo.skipdtor");
534 llvm::Value *DidNRVO =
535 CGF.Builder.CreateFlagLoad(Addr: NRVOFlag, Name: "nrvo.val");
536 CGF.Builder.CreateCondBr(Cond: DidNRVO, True: SkipDtorBB, False: RunDtorBB);
537 CGF.EmitBlock(BB: RunDtorBB);
538 }
539
540 static_cast<Derived *>(this)->emitDestructorCall(CGF);
541
542 if (NRVO) CGF.EmitBlock(BB: SkipDtorBB);
543 }
544
545 virtual ~DestroyNRVOVariable() = default;
546 };
547
548 struct DestroyNRVOVariableCXX final
549 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
550 DestroyNRVOVariableCXX(Address addr, QualType type,
551 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
552 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
553 Dtor(Dtor) {}
554
555 const CXXDestructorDecl *Dtor;
556
557 void emitDestructorCall(CodeGenFunction &CGF) {
558 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
559 /*ForVirtualBase=*/false,
560 /*Delegating=*/false, Loc, Ty);
561 }
562 };
563
564 struct DestroyNRVOVariableC final
565 : DestroyNRVOVariable<DestroyNRVOVariableC> {
566 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
567 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
568
569 void emitDestructorCall(CodeGenFunction &CGF) {
570 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
571 }
572 };
573
574 struct CallStackRestore final : EHScopeStack::Cleanup {
575 Address Stack;
576 CallStackRestore(Address Stack) : Stack(Stack) {}
577 bool isRedundantBeforeReturn() override { return true; }
578 void Emit(CodeGenFunction &CGF, Flags flags) override {
579 llvm::Value *V = CGF.Builder.CreateLoad(Addr: Stack);
580 CGF.Builder.CreateStackRestore(Ptr: V);
581 }
582 };
583
584 struct KmpcAllocFree final : EHScopeStack::Cleanup {
585 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
586 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
587 : AddrSizePair(AddrSizePair) {}
588 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
589 auto &RT = CGF.CGM.getOpenMPRuntime();
590 RT.getKmpcFreeShared(CGF, AddrSizePair);
591 }
592 };
593
594 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
595 const VarDecl &Var;
596 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
597
598 void Emit(CodeGenFunction &CGF, Flags flags) override {
599 // Compute the address of the local variable, in case it's a
600 // byref or something.
601 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
602 Var.getType(), VK_LValue, SourceLocation());
603 llvm::Value *value = CGF.EmitLoadOfScalar(lvalue: CGF.EmitDeclRefLValue(E: &DRE),
604 Loc: SourceLocation());
605 CGF.EmitExtendGCLifetime(object: value);
606 }
607 };
608
609 struct CallCleanupFunction final : EHScopeStack::Cleanup {
610 llvm::Constant *CleanupFn;
611 const CGFunctionInfo &FnInfo;
612 const VarDecl &Var;
613
614 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
615 const VarDecl *Var)
616 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
617
618 void Emit(CodeGenFunction &CGF, Flags flags) override {
619 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
620 Var.getType(), VK_LValue, SourceLocation());
621 // Compute the address of the local variable, in case it's a byref
622 // or something.
623 llvm::Value *Addr = CGF.EmitDeclRefLValue(E: &DRE).getPointer(CGF);
624
625 // In some cases, the type of the function argument will be different from
626 // the type of the pointer. An example of this is
627 // void f(void* arg);
628 // __attribute__((cleanup(f))) void *g;
629 //
630 // To fix this we insert a bitcast here.
631 QualType ArgTy = FnInfo.arg_begin()->type;
632 llvm::Value *Arg =
633 CGF.Builder.CreateBitCast(V: Addr, DestTy: CGF.ConvertType(T: ArgTy));
634
635 CallArgList Args;
636 Args.add(rvalue: RValue::get(V: Arg),
637 type: CGF.getContext().getPointerType(Var.getType()));
638 auto Callee = CGCallee::forDirect(functionPtr: CleanupFn);
639 CGF.EmitCall(CallInfo: FnInfo, Callee, ReturnValue: ReturnValueSlot(), Args);
640 }
641 };
642} // end anonymous namespace
643
644/// EmitAutoVarWithLifetime - Does the setup required for an automatic
645/// variable with lifetime.
646static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
647 Address addr,
648 Qualifiers::ObjCLifetime lifetime) {
649 switch (lifetime) {
650 case Qualifiers::OCL_None:
651 llvm_unreachable("present but none");
652
653 case Qualifiers::OCL_ExplicitNone:
654 // nothing to do
655 break;
656
657 case Qualifiers::OCL_Strong: {
658 CodeGenFunction::Destroyer *destroyer =
659 (var.hasAttr<ObjCPreciseLifetimeAttr>()
660 ? CodeGenFunction::destroyARCStrongPrecise
661 : CodeGenFunction::destroyARCStrongImprecise);
662
663 CleanupKind cleanupKind = CGF.getARCCleanupKind();
664 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
665 cleanupKind & EHCleanup);
666 break;
667 }
668 case Qualifiers::OCL_Autoreleasing:
669 // nothing to do
670 break;
671
672 case Qualifiers::OCL_Weak:
673 // __weak objects always get EH cleanups; otherwise, exceptions
674 // could cause really nasty crashes instead of mere leaks.
675 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
676 CodeGenFunction::destroyARCWeak,
677 /*useEHCleanup*/ true);
678 break;
679 }
680}
681
682static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
683 if (const Expr *e = dyn_cast<Expr>(Val: s)) {
684 // Skip the most common kinds of expressions that make
685 // hierarchy-walking expensive.
686 s = e = e->IgnoreParenCasts();
687
688 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(Val: e))
689 return (ref->getDecl() == &var);
690 if (const BlockExpr *be = dyn_cast<BlockExpr>(Val: e)) {
691 const BlockDecl *block = be->getBlockDecl();
692 for (const auto &I : block->captures()) {
693 if (I.getVariable() == &var)
694 return true;
695 }
696 }
697 }
698
699 for (const Stmt *SubStmt : s->children())
700 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
701 if (SubStmt && isAccessedBy(var, s: SubStmt))
702 return true;
703
704 return false;
705}
706
707static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
708 if (!decl) return false;
709 if (!isa<VarDecl>(Val: decl)) return false;
710 const VarDecl *var = cast<VarDecl>(Val: decl);
711 return isAccessedBy(*var, e);
712}
713
714static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
715 const LValue &destLV, const Expr *init) {
716 bool needsCast = false;
717
718 while (auto castExpr = dyn_cast<CastExpr>(Val: init->IgnoreParens())) {
719 switch (castExpr->getCastKind()) {
720 // Look through casts that don't require representation changes.
721 case CK_NoOp:
722 case CK_BitCast:
723 case CK_BlockPointerToObjCPointerCast:
724 needsCast = true;
725 break;
726
727 // If we find an l-value to r-value cast from a __weak variable,
728 // emit this operation as a copy or move.
729 case CK_LValueToRValue: {
730 const Expr *srcExpr = castExpr->getSubExpr();
731 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
732 return false;
733
734 // Emit the source l-value.
735 LValue srcLV = CGF.EmitLValue(E: srcExpr);
736
737 // Handle a formal type change to avoid asserting.
738 auto srcAddr = srcLV.getAddress(CGF);
739 if (needsCast) {
740 srcAddr =
741 srcAddr.withElementType(ElemTy: destLV.getAddress(CGF).getElementType());
742 }
743
744 // If it was an l-value, use objc_copyWeak.
745 if (srcExpr->isLValue()) {
746 CGF.EmitARCCopyWeak(dst: destLV.getAddress(CGF), src: srcAddr);
747 } else {
748 assert(srcExpr->isXValue());
749 CGF.EmitARCMoveWeak(dst: destLV.getAddress(CGF), src: srcAddr);
750 }
751 return true;
752 }
753
754 // Stop at anything else.
755 default:
756 return false;
757 }
758
759 init = castExpr->getSubExpr();
760 }
761 return false;
762}
763
764static void drillIntoBlockVariable(CodeGenFunction &CGF,
765 LValue &lvalue,
766 const VarDecl *var) {
767 lvalue.setAddress(CGF.emitBlockByrefAddress(baseAddr: lvalue.getAddress(CGF), V: var));
768}
769
770void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
771 SourceLocation Loc) {
772 if (!SanOpts.has(K: SanitizerKind::NullabilityAssign))
773 return;
774
775 auto Nullability = LHS.getType()->getNullability();
776 if (!Nullability || *Nullability != NullabilityKind::NonNull)
777 return;
778
779 // Check if the right hand side of the assignment is nonnull, if the left
780 // hand side must be nonnull.
781 SanitizerScope SanScope(this);
782 llvm::Value *IsNotNull = Builder.CreateIsNotNull(Arg: RHS);
783 llvm::Constant *StaticData[] = {
784 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: LHS.getType()),
785 llvm::ConstantInt::get(Ty: Int8Ty, V: 0), // The LogAlignment info is unused.
786 llvm::ConstantInt::get(Ty: Int8Ty, V: TCK_NonnullAssign)};
787 EmitCheck(Checked: {{IsNotNull, SanitizerKind::NullabilityAssign}},
788 Check: SanitizerHandler::TypeMismatch, StaticArgs: StaticData, DynamicArgs: RHS);
789}
790
791void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
792 LValue lvalue, bool capturedByInit) {
793 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
794 if (!lifetime) {
795 llvm::Value *value = EmitScalarExpr(E: init);
796 if (capturedByInit)
797 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
798 EmitNullabilityCheck(LHS: lvalue, RHS: value, Loc: init->getExprLoc());
799 EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: lvalue, isInit: true);
800 return;
801 }
802
803 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(Val: init))
804 init = DIE->getExpr();
805
806 // If we're emitting a value with lifetime, we have to do the
807 // initialization *before* we leave the cleanup scopes.
808 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: init)) {
809 CodeGenFunction::RunCleanupsScope Scope(*this);
810 return EmitScalarInit(init: EWC->getSubExpr(), D, lvalue, capturedByInit);
811 }
812
813 // We have to maintain the illusion that the variable is
814 // zero-initialized. If the variable might be accessed in its
815 // initializer, zero-initialize before running the initializer, then
816 // actually perform the initialization with an assign.
817 bool accessedByInit = false;
818 if (lifetime != Qualifiers::OCL_ExplicitNone)
819 accessedByInit = (capturedByInit || isAccessedBy(decl: D, e: init));
820 if (accessedByInit) {
821 LValue tempLV = lvalue;
822 // Drill down to the __block object if necessary.
823 if (capturedByInit) {
824 // We can use a simple GEP for this because it can't have been
825 // moved yet.
826 tempLV.setAddress(emitBlockByrefAddress(baseAddr: tempLV.getAddress(CGF&: *this),
827 V: cast<VarDecl>(Val: D),
828 /*follow*/ followForward: false));
829 }
830
831 auto ty =
832 cast<llvm::PointerType>(Val: tempLV.getAddress(CGF&: *this).getElementType());
833 llvm::Value *zero = CGM.getNullPointer(T: ty, QT: tempLV.getType());
834
835 // If __weak, we want to use a barrier under certain conditions.
836 if (lifetime == Qualifiers::OCL_Weak)
837 EmitARCInitWeak(addr: tempLV.getAddress(CGF&: *this), value: zero);
838
839 // Otherwise just do a simple store.
840 else
841 EmitStoreOfScalar(value: zero, lvalue: tempLV, /* isInitialization */ isInit: true);
842 }
843
844 // Emit the initializer.
845 llvm::Value *value = nullptr;
846
847 switch (lifetime) {
848 case Qualifiers::OCL_None:
849 llvm_unreachable("present but none");
850
851 case Qualifiers::OCL_Strong: {
852 if (!D || !isa<VarDecl>(Val: D) || !cast<VarDecl>(Val: D)->isARCPseudoStrong()) {
853 value = EmitARCRetainScalarExpr(expr: init);
854 break;
855 }
856 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
857 // that we omit the retain, and causes non-autoreleased return values to be
858 // immediately released.
859 [[fallthrough]];
860 }
861
862 case Qualifiers::OCL_ExplicitNone:
863 value = EmitARCUnsafeUnretainedScalarExpr(expr: init);
864 break;
865
866 case Qualifiers::OCL_Weak: {
867 // If it's not accessed by the initializer, try to emit the
868 // initialization with a copy or move.
869 if (!accessedByInit && tryEmitARCCopyWeakInit(CGF&: *this, destLV: lvalue, init)) {
870 return;
871 }
872
873 // No way to optimize a producing initializer into this. It's not
874 // worth optimizing for, because the value will immediately
875 // disappear in the common case.
876 value = EmitScalarExpr(E: init);
877
878 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
879 if (accessedByInit)
880 EmitARCStoreWeak(addr: lvalue.getAddress(CGF&: *this), value, /*ignored*/ true);
881 else
882 EmitARCInitWeak(addr: lvalue.getAddress(CGF&: *this), value);
883 return;
884 }
885
886 case Qualifiers::OCL_Autoreleasing:
887 value = EmitARCRetainAutoreleaseScalarExpr(expr: init);
888 break;
889 }
890
891 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
892
893 EmitNullabilityCheck(LHS: lvalue, RHS: value, Loc: init->getExprLoc());
894
895 // If the variable might have been accessed by its initializer, we
896 // might have to initialize with a barrier. We have to do this for
897 // both __weak and __strong, but __weak got filtered out above.
898 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
899 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, Loc: init->getExprLoc());
900 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
901 EmitARCRelease(value: oldValue, precise: ARCImpreciseLifetime);
902 return;
903 }
904
905 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
906}
907
908/// Decide whether we can emit the non-zero parts of the specified initializer
909/// with equal or fewer than NumStores scalar stores.
910static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
911 unsigned &NumStores) {
912 // Zero and Undef never requires any extra stores.
913 if (isa<llvm::ConstantAggregateZero>(Val: Init) ||
914 isa<llvm::ConstantPointerNull>(Val: Init) ||
915 isa<llvm::UndefValue>(Val: Init))
916 return true;
917 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
918 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
919 isa<llvm::ConstantExpr>(Val: Init))
920 return Init->isNullValue() || NumStores--;
921
922 // See if we can emit each element.
923 if (isa<llvm::ConstantArray>(Val: Init) || isa<llvm::ConstantStruct>(Val: Init)) {
924 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
925 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
926 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
927 return false;
928 }
929 return true;
930 }
931
932 if (llvm::ConstantDataSequential *CDS =
933 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
934 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
935 llvm::Constant *Elt = CDS->getElementAsConstant(i);
936 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
937 return false;
938 }
939 return true;
940 }
941
942 // Anything else is hard and scary.
943 return false;
944}
945
946/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
947/// the scalar stores that would be required.
948static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
949 llvm::Constant *Init, Address Loc,
950 bool isVolatile, CGBuilderTy &Builder,
951 bool IsAutoInit) {
952 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
953 "called emitStoresForInitAfterBZero for zero or undef value.");
954
955 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
956 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
957 isa<llvm::ConstantExpr>(Val: Init)) {
958 auto *I = Builder.CreateStore(Val: Init, Addr: Loc, IsVolatile: isVolatile);
959 if (IsAutoInit)
960 I->addAnnotationMetadata(Annotation: "auto-init");
961 return;
962 }
963
964 if (llvm::ConstantDataSequential *CDS =
965 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
966 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
967 llvm::Constant *Elt = CDS->getElementAsConstant(i);
968
969 // If necessary, get a pointer to the element and emit it.
970 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
971 emitStoresForInitAfterBZero(
972 CGM, Init: Elt, Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i), isVolatile,
973 Builder, IsAutoInit);
974 }
975 return;
976 }
977
978 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
979 "Unknown value type!");
980
981 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
982 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
983
984 // If necessary, get a pointer to the element and emit it.
985 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
986 emitStoresForInitAfterBZero(CGM, Init: Elt,
987 Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i),
988 isVolatile, Builder, IsAutoInit);
989 }
990}
991
992/// Decide whether we should use bzero plus some stores to initialize a local
993/// variable instead of using a memcpy from a constant global. It is beneficial
994/// to use bzero if the global is all zeros, or mostly zeros and large.
995static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
996 uint64_t GlobalSize) {
997 // If a global is all zeros, always use a bzero.
998 if (isa<llvm::ConstantAggregateZero>(Val: Init)) return true;
999
1000 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
1001 // do it if it will require 6 or fewer scalar stores.
1002 // TODO: Should budget depends on the size? Avoiding a large global warrants
1003 // plopping in more stores.
1004 unsigned StoreBudget = 6;
1005 uint64_t SizeLimit = 32;
1006
1007 return GlobalSize > SizeLimit &&
1008 canEmitInitWithFewStoresAfterBZero(Init, NumStores&: StoreBudget);
1009}
1010
1011/// Decide whether we should use memset to initialize a local variable instead
1012/// of using a memcpy from a constant global. Assumes we've already decided to
1013/// not user bzero.
1014/// FIXME We could be more clever, as we are for bzero above, and generate
1015/// memset followed by stores. It's unclear that's worth the effort.
1016static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1017 uint64_t GlobalSize,
1018 const llvm::DataLayout &DL) {
1019 uint64_t SizeLimit = 32;
1020 if (GlobalSize <= SizeLimit)
1021 return nullptr;
1022 return llvm::isBytewiseValue(V: Init, DL);
1023}
1024
1025/// Decide whether we want to split a constant structure or array store into a
1026/// sequence of its fields' stores. This may cost us code size and compilation
1027/// speed, but plays better with store optimizations.
1028static bool shouldSplitConstantStore(CodeGenModule &CGM,
1029 uint64_t GlobalByteSize) {
1030 // Don't break things that occupy more than one cacheline.
1031 uint64_t ByteSizeLimit = 64;
1032 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1033 return false;
1034 if (GlobalByteSize <= ByteSizeLimit)
1035 return true;
1036 return false;
1037}
1038
1039enum class IsPattern { No, Yes };
1040
1041/// Generate a constant filled with either a pattern or zeroes.
1042static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1043 llvm::Type *Ty) {
1044 if (isPattern == IsPattern::Yes)
1045 return initializationPatternFor(CGM, Ty);
1046 else
1047 return llvm::Constant::getNullValue(Ty);
1048}
1049
1050static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1051 llvm::Constant *constant);
1052
1053/// Helper function for constWithPadding() to deal with padding in structures.
1054static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1055 IsPattern isPattern,
1056 llvm::StructType *STy,
1057 llvm::Constant *constant) {
1058 const llvm::DataLayout &DL = CGM.getDataLayout();
1059 const llvm::StructLayout *Layout = DL.getStructLayout(Ty: STy);
1060 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(C&: CGM.getLLVMContext());
1061 unsigned SizeSoFar = 0;
1062 SmallVector<llvm::Constant *, 8> Values;
1063 bool NestedIntact = true;
1064 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1065 unsigned CurOff = Layout->getElementOffset(Idx: i);
1066 if (SizeSoFar < CurOff) {
1067 assert(!STy->isPacked());
1068 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: CurOff - SizeSoFar);
1069 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1070 }
1071 llvm::Constant *CurOp;
1072 if (constant->isZeroValue())
1073 CurOp = llvm::Constant::getNullValue(Ty: STy->getElementType(N: i));
1074 else
1075 CurOp = cast<llvm::Constant>(Val: constant->getAggregateElement(Elt: i));
1076 auto *NewOp = constWithPadding(CGM, isPattern, constant: CurOp);
1077 if (CurOp != NewOp)
1078 NestedIntact = false;
1079 Values.push_back(Elt: NewOp);
1080 SizeSoFar = CurOff + DL.getTypeAllocSize(Ty: CurOp->getType());
1081 }
1082 unsigned TotalSize = Layout->getSizeInBytes();
1083 if (SizeSoFar < TotalSize) {
1084 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: TotalSize - SizeSoFar);
1085 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1086 }
1087 if (NestedIntact && Values.size() == STy->getNumElements())
1088 return constant;
1089 return llvm::ConstantStruct::getAnon(V: Values, Packed: STy->isPacked());
1090}
1091
1092/// Replace all padding bytes in a given constant with either a pattern byte or
1093/// 0x00.
1094static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1095 llvm::Constant *constant) {
1096 llvm::Type *OrigTy = constant->getType();
1097 if (const auto STy = dyn_cast<llvm::StructType>(Val: OrigTy))
1098 return constStructWithPadding(CGM, isPattern, STy, constant);
1099 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: OrigTy)) {
1100 llvm::SmallVector<llvm::Constant *, 8> Values;
1101 uint64_t Size = ArrayTy->getNumElements();
1102 if (!Size)
1103 return constant;
1104 llvm::Type *ElemTy = ArrayTy->getElementType();
1105 bool ZeroInitializer = constant->isNullValue();
1106 llvm::Constant *OpValue, *PaddedOp;
1107 if (ZeroInitializer) {
1108 OpValue = llvm::Constant::getNullValue(Ty: ElemTy);
1109 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1110 }
1111 for (unsigned Op = 0; Op != Size; ++Op) {
1112 if (!ZeroInitializer) {
1113 OpValue = constant->getAggregateElement(Elt: Op);
1114 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1115 }
1116 Values.push_back(Elt: PaddedOp);
1117 }
1118 auto *NewElemTy = Values[0]->getType();
1119 if (NewElemTy == ElemTy)
1120 return constant;
1121 auto *NewArrayTy = llvm::ArrayType::get(ElementType: NewElemTy, NumElements: Size);
1122 return llvm::ConstantArray::get(T: NewArrayTy, V: Values);
1123 }
1124 // FIXME: Add handling for tail padding in vectors. Vectors don't
1125 // have padding between or inside elements, but the total amount of
1126 // data can be less than the allocated size.
1127 return constant;
1128}
1129
1130Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1131 llvm::Constant *Constant,
1132 CharUnits Align) {
1133 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1134 if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC)) {
1135 if (const auto *CC = dyn_cast<CXXConstructorDecl>(Val: FD))
1136 return CC->getNameAsString();
1137 if (const auto *CD = dyn_cast<CXXDestructorDecl>(Val: FD))
1138 return CD->getNameAsString();
1139 return std::string(getMangledName(GD: FD));
1140 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(Val: DC)) {
1141 return OM->getNameAsString();
1142 } else if (isa<BlockDecl>(Val: DC)) {
1143 return "<block>";
1144 } else if (isa<CapturedDecl>(Val: DC)) {
1145 return "<captured>";
1146 } else {
1147 llvm_unreachable("expected a function or method");
1148 }
1149 };
1150
1151 // Form a simple per-variable cache of these values in case we find we
1152 // want to reuse them.
1153 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1154 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1155 auto *Ty = Constant->getType();
1156 bool isConstant = true;
1157 llvm::GlobalVariable *InsertBefore = nullptr;
1158 unsigned AS =
1159 getContext().getTargetAddressSpace(AS: GetGlobalConstantAddressSpace());
1160 std::string Name;
1161 if (D.hasGlobalStorage())
1162 Name = getMangledName(GD: &D).str() + ".const";
1163 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1164 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1165 else
1166 llvm_unreachable("local variable has no parent function or method");
1167 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1168 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1169 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1170 GV->setAlignment(Align.getAsAlign());
1171 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1172 CacheEntry = GV;
1173 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1174 CacheEntry->setAlignment(Align.getAsAlign());
1175 }
1176
1177 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1178}
1179
1180static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1181 const VarDecl &D,
1182 CGBuilderTy &Builder,
1183 llvm::Constant *Constant,
1184 CharUnits Align) {
1185 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1186 return SrcPtr.withElementType(ElemTy: CGM.Int8Ty);
1187}
1188
1189static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1190 Address Loc, bool isVolatile,
1191 CGBuilderTy &Builder,
1192 llvm::Constant *constant, bool IsAutoInit) {
1193 auto *Ty = constant->getType();
1194 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1195 if (!ConstantSize)
1196 return;
1197
1198 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1199 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1200 if (canDoSingleStore) {
1201 auto *I = Builder.CreateStore(Val: constant, Addr: Loc, IsVolatile: isVolatile);
1202 if (IsAutoInit)
1203 I->addAnnotationMetadata(Annotation: "auto-init");
1204 return;
1205 }
1206
1207 auto *SizeVal = llvm::ConstantInt::get(Ty: CGM.IntPtrTy, V: ConstantSize);
1208
1209 // If the initializer is all or mostly the same, codegen with bzero / memset
1210 // then do a few stores afterward.
1211 if (shouldUseBZeroPlusStoresToInitialize(Init: constant, GlobalSize: ConstantSize)) {
1212 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0),
1213 Size: SizeVal, IsVolatile: isVolatile);
1214 if (IsAutoInit)
1215 I->addAnnotationMetadata(Annotation: "auto-init");
1216
1217 bool valueAlreadyCorrect =
1218 constant->isNullValue() || isa<llvm::UndefValue>(Val: constant);
1219 if (!valueAlreadyCorrect) {
1220 Loc = Loc.withElementType(ElemTy: Ty);
1221 emitStoresForInitAfterBZero(CGM, Init: constant, Loc, isVolatile, Builder,
1222 IsAutoInit);
1223 }
1224 return;
1225 }
1226
1227 // If the initializer is a repeated byte pattern, use memset.
1228 llvm::Value *Pattern =
1229 shouldUseMemSetToInitialize(Init: constant, GlobalSize: ConstantSize, DL: CGM.getDataLayout());
1230 if (Pattern) {
1231 uint64_t Value = 0x00;
1232 if (!isa<llvm::UndefValue>(Val: Pattern)) {
1233 const llvm::APInt &AP = cast<llvm::ConstantInt>(Val: Pattern)->getValue();
1234 assert(AP.getBitWidth() <= 8);
1235 Value = AP.getLimitedValue();
1236 }
1237 auto *I = Builder.CreateMemSet(
1238 Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: Value), Size: SizeVal, IsVolatile: isVolatile);
1239 if (IsAutoInit)
1240 I->addAnnotationMetadata(Annotation: "auto-init");
1241 return;
1242 }
1243
1244 // If the initializer is small, use a handful of stores.
1245 if (shouldSplitConstantStore(CGM, GlobalByteSize: ConstantSize)) {
1246 if (auto *STy = dyn_cast<llvm::StructType>(Val: Ty)) {
1247 const llvm::StructLayout *Layout =
1248 CGM.getDataLayout().getStructLayout(Ty: STy);
1249 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1250 CharUnits CurOff = CharUnits::fromQuantity(Quantity: Layout->getElementOffset(Idx: i));
1251 Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1252 Addr: Loc.withElementType(ElemTy: CGM.Int8Ty), Offset: CurOff);
1253 emitStoresForConstant(CGM, D, Loc: EltPtr, isVolatile, Builder,
1254 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1255 }
1256 return;
1257 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Val: Ty)) {
1258 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1259 Address EltPtr = Builder.CreateConstGEP(
1260 Addr: Loc.withElementType(ElemTy: ATy->getElementType()), Index: i);
1261 emitStoresForConstant(CGM, D, Loc: EltPtr, isVolatile, Builder,
1262 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1263 }
1264 return;
1265 }
1266 }
1267
1268 // Copy from a global.
1269 auto *I =
1270 Builder.CreateMemCpy(Dest: Loc,
1271 Src: createUnnamedGlobalForMemcpyFrom(
1272 CGM, D, Builder, Constant: constant, Align: Loc.getAlignment()),
1273 Size: SizeVal, IsVolatile: isVolatile);
1274 if (IsAutoInit)
1275 I->addAnnotationMetadata(Annotation: "auto-init");
1276}
1277
1278static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1279 Address Loc, bool isVolatile,
1280 CGBuilderTy &Builder) {
1281 llvm::Type *ElTy = Loc.getElementType();
1282 llvm::Constant *constant =
1283 constWithPadding(CGM, isPattern: IsPattern::No, constant: llvm::Constant::getNullValue(Ty: ElTy));
1284 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1285 /*IsAutoInit=*/true);
1286}
1287
1288static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1289 Address Loc, bool isVolatile,
1290 CGBuilderTy &Builder) {
1291 llvm::Type *ElTy = Loc.getElementType();
1292 llvm::Constant *constant = constWithPadding(
1293 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1294 assert(!isa<llvm::UndefValue>(constant));
1295 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1296 /*IsAutoInit=*/true);
1297}
1298
1299static bool containsUndef(llvm::Constant *constant) {
1300 auto *Ty = constant->getType();
1301 if (isa<llvm::UndefValue>(Val: constant))
1302 return true;
1303 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1304 for (llvm::Use &Op : constant->operands())
1305 if (containsUndef(constant: cast<llvm::Constant>(Val&: Op)))
1306 return true;
1307 return false;
1308}
1309
1310static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1311 llvm::Constant *constant) {
1312 auto *Ty = constant->getType();
1313 if (isa<llvm::UndefValue>(Val: constant))
1314 return patternOrZeroFor(CGM, isPattern, Ty);
1315 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1316 return constant;
1317 if (!containsUndef(constant))
1318 return constant;
1319 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1320 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1321 auto *OpValue = cast<llvm::Constant>(Val: constant->getOperand(i: Op));
1322 Values[Op] = replaceUndef(CGM, isPattern, constant: OpValue);
1323 }
1324 if (Ty->isStructTy())
1325 return llvm::ConstantStruct::get(T: cast<llvm::StructType>(Val: Ty), V: Values);
1326 if (Ty->isArrayTy())
1327 return llvm::ConstantArray::get(T: cast<llvm::ArrayType>(Val: Ty), V: Values);
1328 assert(Ty->isVectorTy());
1329 return llvm::ConstantVector::get(V: Values);
1330}
1331
1332/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1333/// variable declaration with auto, register, or no storage class specifier.
1334/// These turn into simple stack objects, or GlobalValues depending on target.
1335void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1336 AutoVarEmission emission = EmitAutoVarAlloca(var: D);
1337 EmitAutoVarInit(emission);
1338 EmitAutoVarCleanups(emission);
1339}
1340
1341/// Emit a lifetime.begin marker if some criteria are satisfied.
1342/// \return a pointer to the temporary size Value if a marker was emitted, null
1343/// otherwise
1344llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1345 llvm::Value *Addr) {
1346 if (!ShouldEmitLifetimeMarkers)
1347 return nullptr;
1348
1349 assert(Addr->getType()->getPointerAddressSpace() ==
1350 CGM.getDataLayout().getAllocaAddrSpace() &&
1351 "Pointer should be in alloca address space");
1352 llvm::Value *SizeV = llvm::ConstantInt::get(
1353 Ty: Int64Ty, V: Size.isScalable() ? -1 : Size.getFixedValue());
1354 llvm::CallInst *C =
1355 Builder.CreateCall(Callee: CGM.getLLVMLifetimeStartFn(), Args: {SizeV, Addr});
1356 C->setDoesNotThrow();
1357 return SizeV;
1358}
1359
1360void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1361 assert(Addr->getType()->getPointerAddressSpace() ==
1362 CGM.getDataLayout().getAllocaAddrSpace() &&
1363 "Pointer should be in alloca address space");
1364 llvm::CallInst *C =
1365 Builder.CreateCall(Callee: CGM.getLLVMLifetimeEndFn(), Args: {Size, Addr});
1366 C->setDoesNotThrow();
1367}
1368
1369void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1370 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1371 // For each dimension stores its QualType and corresponding
1372 // size-expression Value.
1373 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1374 SmallVector<IdentifierInfo *, 4> VLAExprNames;
1375
1376 // Break down the array into individual dimensions.
1377 QualType Type1D = D.getType();
1378 while (getContext().getAsVariableArrayType(T: Type1D)) {
1379 auto VlaSize = getVLAElements1D(vla: Type1D);
1380 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1381 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1382 else {
1383 // Generate a locally unique name for the size expression.
1384 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1385 SmallString<12> Buffer;
1386 StringRef NameRef = Name.toStringRef(Out&: Buffer);
1387 auto &Ident = getContext().Idents.getOwn(Name: NameRef);
1388 VLAExprNames.push_back(Elt: &Ident);
1389 auto SizeExprAddr =
1390 CreateDefaultAlignTempAlloca(Ty: VlaSize.NumElts->getType(), Name: NameRef);
1391 Builder.CreateStore(Val: VlaSize.NumElts, Addr: SizeExprAddr);
1392 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1393 Type1D.getUnqualifiedType());
1394 }
1395 Type1D = VlaSize.Type;
1396 }
1397
1398 if (!EmitDebugInfo)
1399 return;
1400
1401 // Register each dimension's size-expression with a DILocalVariable,
1402 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1403 // to describe this array.
1404 unsigned NameIdx = 0;
1405 for (auto &VlaSize : Dimensions) {
1406 llvm::Metadata *MD;
1407 if (auto *C = dyn_cast<llvm::ConstantInt>(Val: VlaSize.NumElts))
1408 MD = llvm::ConstantAsMetadata::get(C);
1409 else {
1410 // Create an artificial VarDecl to generate debug info for.
1411 IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1412 auto QT = getContext().getIntTypeForBitwidth(
1413 DestWidth: SizeTy->getScalarSizeInBits(), Signed: false);
1414 auto *ArtificialDecl = VarDecl::Create(
1415 C&: getContext(), DC: const_cast<DeclContext *>(D.getDeclContext()),
1416 StartLoc: D.getLocation(), IdLoc: D.getLocation(), Id: NameIdent, T: QT,
1417 TInfo: getContext().CreateTypeSourceInfo(T: QT), S: SC_Auto);
1418 ArtificialDecl->setImplicit();
1419
1420 MD = DI->EmitDeclareOfAutoVariable(Decl: ArtificialDecl, AI: VlaSize.NumElts,
1421 Builder);
1422 }
1423 assert(MD && "No Size expression debug node created");
1424 DI->registerVLASizeExpression(Ty: VlaSize.Type, SizeExpr: MD);
1425 }
1426}
1427
1428/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1429/// local variable. Does not emit initialization or destruction.
1430CodeGenFunction::AutoVarEmission
1431CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1432 QualType Ty = D.getType();
1433 assert(
1434 Ty.getAddressSpace() == LangAS::Default ||
1435 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1436
1437 AutoVarEmission emission(D);
1438
1439 bool isEscapingByRef = D.isEscapingByref();
1440 emission.IsEscapingByRef = isEscapingByRef;
1441
1442 CharUnits alignment = getContext().getDeclAlign(&D);
1443
1444 // If the type is variably-modified, emit all the VLA sizes for it.
1445 if (Ty->isVariablyModifiedType())
1446 EmitVariablyModifiedType(Ty);
1447
1448 auto *DI = getDebugInfo();
1449 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1450
1451 Address address = Address::invalid();
1452 Address AllocaAddr = Address::invalid();
1453 Address OpenMPLocalAddr = Address::invalid();
1454 if (CGM.getLangOpts().OpenMPIRBuilder)
1455 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(CGF&: *this, VD: &D);
1456 else
1457 OpenMPLocalAddr =
1458 getLangOpts().OpenMP
1459 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
1460 : Address::invalid();
1461
1462 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1463
1464 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1465 address = OpenMPLocalAddr;
1466 AllocaAddr = OpenMPLocalAddr;
1467 } else if (Ty->isConstantSizeType()) {
1468 // If this value is an array or struct with a statically determinable
1469 // constant initializer, there are optimizations we can do.
1470 //
1471 // TODO: We should constant-evaluate the initializer of any variable,
1472 // as long as it is initialized by a constant expression. Currently,
1473 // isConstantInitializer produces wrong answers for structs with
1474 // reference or bitfield members, and a few other cases, and checking
1475 // for POD-ness protects us from some of these.
1476 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1477 (D.isConstexpr() ||
1478 ((Ty.isPODType(Context: getContext()) ||
1479 getContext().getBaseElementType(QT: Ty)->isObjCObjectPointerType()) &&
1480 D.getInit()->isConstantInitializer(Ctx&: getContext(), ForRef: false)))) {
1481
1482 // If the variable's a const type, and it's neither an NRVO
1483 // candidate nor a __block variable and has no mutable members,
1484 // emit it as a global instead.
1485 // Exception is if a variable is located in non-constant address space
1486 // in OpenCL.
1487 bool NeedsDtor =
1488 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
1489 if ((!getLangOpts().OpenCL ||
1490 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1491 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1492 !isEscapingByRef &&
1493 Ty.isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: !NeedsDtor))) {
1494 EmitStaticVarDecl(D, Linkage: llvm::GlobalValue::InternalLinkage);
1495
1496 // Signal this condition to later callbacks.
1497 emission.Addr = Address::invalid();
1498 assert(emission.wasEmittedAsGlobal());
1499 return emission;
1500 }
1501
1502 // Otherwise, tell the initialization code that we're in this case.
1503 emission.IsConstantAggregate = true;
1504 }
1505
1506 // A normal fixed sized variable becomes an alloca in the entry block,
1507 // unless:
1508 // - it's an NRVO variable.
1509 // - we are compiling OpenMP and it's an OpenMP local variable.
1510 if (NRVO) {
1511 // The named return value optimization: allocate this variable in the
1512 // return slot, so that we can elide the copy when returning this
1513 // variable (C++0x [class.copy]p34).
1514 address = ReturnValue;
1515 AllocaAddr = ReturnValue;
1516
1517 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1518 const auto *RD = RecordTy->getDecl();
1519 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1520 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1521 RD->isNonTrivialToPrimitiveDestroy()) {
1522 // Create a flag that is used to indicate when the NRVO was applied
1523 // to this variable. Set it to zero to indicate that NRVO was not
1524 // applied.
1525 llvm::Value *Zero = Builder.getFalse();
1526 Address NRVOFlag =
1527 CreateTempAlloca(Ty: Zero->getType(), align: CharUnits::One(), Name: "nrvo");
1528 EnsureInsertPoint();
1529 Builder.CreateStore(Val: Zero, Addr: NRVOFlag);
1530
1531 // Record the NRVO flag for this variable.
1532 NRVOFlags[&D] = NRVOFlag.getPointer();
1533 emission.NRVOFlag = NRVOFlag.getPointer();
1534 }
1535 }
1536 } else {
1537 CharUnits allocaAlignment;
1538 llvm::Type *allocaTy;
1539 if (isEscapingByRef) {
1540 auto &byrefInfo = getBlockByrefInfo(var: &D);
1541 allocaTy = byrefInfo.Type;
1542 allocaAlignment = byrefInfo.ByrefAlignment;
1543 } else {
1544 allocaTy = ConvertTypeForMem(T: Ty);
1545 allocaAlignment = alignment;
1546 }
1547
1548 // Create the alloca. Note that we set the name separately from
1549 // building the instruction so that it's there even in no-asserts
1550 // builds.
1551 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1552 /*ArraySize=*/nullptr, &AllocaAddr);
1553
1554 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1555 // the catch parameter starts in the catchpad instruction, and we can't
1556 // insert code in those basic blocks.
1557 bool IsMSCatchParam =
1558 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1559
1560 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1561 // if we don't have a valid insertion point (?).
1562 if (HaveInsertPoint() && !IsMSCatchParam) {
1563 // If there's a jump into the lifetime of this variable, its lifetime
1564 // gets broken up into several regions in IR, which requires more work
1565 // to handle correctly. For now, just omit the intrinsics; this is a
1566 // rare case, and it's better to just be conservatively correct.
1567 // PR28267.
1568 //
1569 // We have to do this in all language modes if there's a jump past the
1570 // declaration. We also have to do it in C if there's a jump to an
1571 // earlier point in the current block because non-VLA lifetimes begin as
1572 // soon as the containing block is entered, not when its variables
1573 // actually come into scope; suppressing the lifetime annotations
1574 // completely in this case is unnecessarily pessimistic, but again, this
1575 // is rare.
1576 if (!Bypasses.IsBypassed(D: &D) &&
1577 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1578 llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(Ty: allocaTy);
1579 emission.SizeForLifetimeMarkers =
1580 EmitLifetimeStart(Size, Addr: AllocaAddr.getPointer());
1581 }
1582 } else {
1583 assert(!emission.useLifetimeMarkers());
1584 }
1585 }
1586 } else {
1587 EnsureInsertPoint();
1588
1589 // Delayed globalization for variable length declarations. This ensures that
1590 // the expression representing the length has been emitted and can be used
1591 // by the definition of the VLA. Since this is an escaped declaration, in
1592 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1593 // deallocation call to __kmpc_free_shared() is emitted later.
1594 bool VarAllocated = false;
1595 if (getLangOpts().OpenMPIsTargetDevice) {
1596 auto &RT = CGM.getOpenMPRuntime();
1597 if (RT.isDelayedVariableLengthDecl(CGF&: *this, VD: &D)) {
1598 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1599 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1600 RT.getKmpcAllocShared(CGF&: *this, VD: &D);
1601
1602 // Save the address of the allocation:
1603 LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
1604 CGM.getContext().getDeclAlign(&D),
1605 AlignmentSource::Decl);
1606 address = Base.getAddress(CGF&: *this);
1607
1608 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1609 // appropriate location at the end of the scope of the
1610 // __kmpc_alloc_shared functions:
1611 pushKmpcAllocFree(Kind: NormalCleanup, AddrSizePair);
1612
1613 // Mark variable as allocated:
1614 VarAllocated = true;
1615 }
1616 }
1617
1618 if (!VarAllocated) {
1619 if (!DidCallStackSave) {
1620 // Save the stack.
1621 Address Stack =
1622 CreateDefaultAlignTempAlloca(Ty: AllocaInt8PtrTy, Name: "saved_stack");
1623
1624 llvm::Value *V = Builder.CreateStackSave();
1625 assert(V->getType() == AllocaInt8PtrTy);
1626 Builder.CreateStore(Val: V, Addr: Stack);
1627
1628 DidCallStackSave = true;
1629
1630 // Push a cleanup block and restore the stack there.
1631 // FIXME: in general circumstances, this should be an EH cleanup.
1632 pushStackRestore(kind: NormalCleanup, SPMem: Stack);
1633 }
1634
1635 auto VlaSize = getVLASize(vla: Ty);
1636 llvm::Type *llvmTy = ConvertTypeForMem(T: VlaSize.Type);
1637
1638 // Allocate memory for the array.
1639 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1640 &AllocaAddr);
1641 }
1642
1643 // If we have debug info enabled, properly describe the VLA dimensions for
1644 // this type by registering the vla size expression for each of the
1645 // dimensions.
1646 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1647 }
1648
1649 setAddrOfLocalVar(VD: &D, Addr: address);
1650 emission.Addr = address;
1651 emission.AllocaAddr = AllocaAddr;
1652
1653 // Emit debug info for local var declaration.
1654 if (EmitDebugInfo && HaveInsertPoint()) {
1655 Address DebugAddr = address;
1656 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1657 DI->setLocation(D.getLocation());
1658
1659 // If NRVO, use a pointer to the return address.
1660 if (UsePointerValue) {
1661 DebugAddr = ReturnValuePointer;
1662 AllocaAddr = ReturnValuePointer;
1663 }
1664 (void)DI->EmitDeclareOfAutoVariable(Decl: &D, AI: AllocaAddr.getPointer(), Builder,
1665 UsePointerValue);
1666 }
1667
1668 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1669 EmitVarAnnotations(D: &D, V: address.getPointer());
1670
1671 // Make sure we call @llvm.lifetime.end.
1672 if (emission.useLifetimeMarkers())
1673 EHStack.pushCleanup<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker,
1674 A: emission.getOriginalAllocatedAddress(),
1675 A: emission.getSizeForLifetimeMarkers());
1676
1677 return emission;
1678}
1679
1680static bool isCapturedBy(const VarDecl &, const Expr *);
1681
1682/// Determines whether the given __block variable is potentially
1683/// captured by the given statement.
1684static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1685 if (const Expr *E = dyn_cast<Expr>(Val: S))
1686 return isCapturedBy(Var, E);
1687 for (const Stmt *SubStmt : S->children())
1688 if (isCapturedBy(Var, S: SubStmt))
1689 return true;
1690 return false;
1691}
1692
1693/// Determines whether the given __block variable is potentially
1694/// captured by the given expression.
1695static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1696 // Skip the most common kinds of expressions that make
1697 // hierarchy-walking expensive.
1698 E = E->IgnoreParenCasts();
1699
1700 if (const BlockExpr *BE = dyn_cast<BlockExpr>(Val: E)) {
1701 const BlockDecl *Block = BE->getBlockDecl();
1702 for (const auto &I : Block->captures()) {
1703 if (I.getVariable() == &Var)
1704 return true;
1705 }
1706
1707 // No need to walk into the subexpressions.
1708 return false;
1709 }
1710
1711 if (const StmtExpr *SE = dyn_cast<StmtExpr>(Val: E)) {
1712 const CompoundStmt *CS = SE->getSubStmt();
1713 for (const auto *BI : CS->body())
1714 if (const auto *BIE = dyn_cast<Expr>(Val: BI)) {
1715 if (isCapturedBy(Var, E: BIE))
1716 return true;
1717 }
1718 else if (const auto *DS = dyn_cast<DeclStmt>(Val: BI)) {
1719 // special case declarations
1720 for (const auto *I : DS->decls()) {
1721 if (const auto *VD = dyn_cast<VarDecl>(Val: (I))) {
1722 const Expr *Init = VD->getInit();
1723 if (Init && isCapturedBy(Var, E: Init))
1724 return true;
1725 }
1726 }
1727 }
1728 else
1729 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1730 // Later, provide code to poke into statements for capture analysis.
1731 return true;
1732 return false;
1733 }
1734
1735 for (const Stmt *SubStmt : E->children())
1736 if (isCapturedBy(Var, SubStmt))
1737 return true;
1738
1739 return false;
1740}
1741
1742/// Determine whether the given initializer is trivial in the sense
1743/// that it requires no code to be generated.
1744bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1745 if (!Init)
1746 return true;
1747
1748 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Val: Init))
1749 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1750 if (Constructor->isTrivial() &&
1751 Constructor->isDefaultConstructor() &&
1752 !Construct->requiresZeroInitialization())
1753 return true;
1754
1755 return false;
1756}
1757
1758void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1759 const VarDecl &D,
1760 Address Loc) {
1761 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1762 auto trivialAutoVarInitMaxSize =
1763 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1764 CharUnits Size = getContext().getTypeSizeInChars(T: type);
1765 bool isVolatile = type.isVolatileQualified();
1766 if (!Size.isZero()) {
1767 // We skip auto-init variables by their alloc size. Take this as an example:
1768 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1769 // All Foo type variables will be skipped. Ideally, we only skip the buff
1770 // array and still auto-init X in this example.
1771 // TODO: Improve the size filtering to by member size.
1772 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Ty: Loc.getElementType());
1773 switch (trivialAutoVarInit) {
1774 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1775 llvm_unreachable("Uninitialized handled by caller");
1776 case LangOptions::TrivialAutoVarInitKind::Zero:
1777 if (CGM.stopAutoInit())
1778 return;
1779 if (trivialAutoVarInitMaxSize > 0 &&
1780 allocSize > trivialAutoVarInitMaxSize)
1781 return;
1782 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1783 break;
1784 case LangOptions::TrivialAutoVarInitKind::Pattern:
1785 if (CGM.stopAutoInit())
1786 return;
1787 if (trivialAutoVarInitMaxSize > 0 &&
1788 allocSize > trivialAutoVarInitMaxSize)
1789 return;
1790 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1791 break;
1792 }
1793 return;
1794 }
1795
1796 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1797 // them, so emit a memcpy with the VLA size to initialize each element.
1798 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1799 // will catch that code, but there exists code which generates zero-sized
1800 // VLAs. Be nice and initialize whatever they requested.
1801 const auto *VlaType = getContext().getAsVariableArrayType(T: type);
1802 if (!VlaType)
1803 return;
1804 auto VlaSize = getVLASize(vla: VlaType);
1805 auto SizeVal = VlaSize.NumElts;
1806 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1807 switch (trivialAutoVarInit) {
1808 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1809 llvm_unreachable("Uninitialized handled by caller");
1810
1811 case LangOptions::TrivialAutoVarInitKind::Zero: {
1812 if (CGM.stopAutoInit())
1813 return;
1814 if (!EltSize.isOne())
1815 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1816 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: Int8Ty, V: 0),
1817 Size: SizeVal, IsVolatile: isVolatile);
1818 I->addAnnotationMetadata(Annotation: "auto-init");
1819 break;
1820 }
1821
1822 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1823 if (CGM.stopAutoInit())
1824 return;
1825 llvm::Type *ElTy = Loc.getElementType();
1826 llvm::Constant *Constant = constWithPadding(
1827 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1828 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1829 llvm::BasicBlock *SetupBB = createBasicBlock(name: "vla-setup.loop");
1830 llvm::BasicBlock *LoopBB = createBasicBlock(name: "vla-init.loop");
1831 llvm::BasicBlock *ContBB = createBasicBlock(name: "vla-init.cont");
1832 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1833 LHS: SizeVal, RHS: llvm::ConstantInt::get(Ty: SizeVal->getType(), V: 0),
1834 Name: "vla.iszerosized");
1835 Builder.CreateCondBr(Cond: IsZeroSizedVLA, True: ContBB, False: SetupBB);
1836 EmitBlock(BB: SetupBB);
1837 if (!EltSize.isOne())
1838 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1839 llvm::Value *BaseSizeInChars =
1840 llvm::ConstantInt::get(Ty: IntPtrTy, V: EltSize.getQuantity());
1841 Address Begin = Loc.withElementType(ElemTy: Int8Ty);
1842 llvm::Value *End = Builder.CreateInBoundsGEP(
1843 Ty: Begin.getElementType(), Ptr: Begin.getPointer(), IdxList: SizeVal, Name: "vla.end");
1844 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1845 EmitBlock(BB: LoopBB);
1846 llvm::PHINode *Cur = Builder.CreatePHI(Ty: Begin.getType(), NumReservedValues: 2, Name: "vla.cur");
1847 Cur->addIncoming(V: Begin.getPointer(), BB: OriginBB);
1848 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(elementSize: EltSize);
1849 auto *I =
1850 Builder.CreateMemCpy(Dest: Address(Cur, Int8Ty, CurAlign),
1851 Src: createUnnamedGlobalForMemcpyFrom(
1852 CGM, D, Builder, Constant, Align: ConstantAlign),
1853 Size: BaseSizeInChars, IsVolatile: isVolatile);
1854 I->addAnnotationMetadata("auto-init");
1855 llvm::Value *Next =
1856 Builder.CreateInBoundsGEP(Ty: Int8Ty, Ptr: Cur, IdxList: BaseSizeInChars, Name: "vla.next");
1857 llvm::Value *Done = Builder.CreateICmpEQ(LHS: Next, RHS: End, Name: "vla-init.isdone");
1858 Builder.CreateCondBr(Cond: Done, True: ContBB, False: LoopBB);
1859 Cur->addIncoming(V: Next, BB: LoopBB);
1860 EmitBlock(BB: ContBB);
1861 } break;
1862 }
1863}
1864
1865void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1866 assert(emission.Variable && "emission was not valid!");
1867
1868 // If this was emitted as a global constant, we're done.
1869 if (emission.wasEmittedAsGlobal()) return;
1870
1871 const VarDecl &D = *emission.Variable;
1872 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF&: *this, TemporaryLocation: D.getLocation());
1873 QualType type = D.getType();
1874
1875 // If this local has an initializer, emit it now.
1876 const Expr *Init = D.getInit();
1877
1878 // If we are at an unreachable point, we don't need to emit the initializer
1879 // unless it contains a label.
1880 if (!HaveInsertPoint()) {
1881 if (!Init || !ContainsLabel(Init)) return;
1882 EnsureInsertPoint();
1883 }
1884
1885 // Initialize the structure of a __block variable.
1886 if (emission.IsEscapingByRef)
1887 emitByrefStructureInit(emission);
1888
1889 // Initialize the variable here if it doesn't have a initializer and it is a
1890 // C struct that is non-trivial to initialize or an array containing such a
1891 // struct.
1892 if (!Init &&
1893 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1894 QualType::PDIK_Struct) {
1895 LValue Dst = MakeAddrLValue(Addr: emission.getAllocatedAddress(), T: type);
1896 if (emission.IsEscapingByRef)
1897 drillIntoBlockVariable(CGF&: *this, lvalue&: Dst, var: &D);
1898 defaultInitNonTrivialCStructVar(Dst);
1899 return;
1900 }
1901
1902 // Check whether this is a byref variable that's potentially
1903 // captured and moved by its own initializer. If so, we'll need to
1904 // emit the initializer first, then copy into the variable.
1905 bool capturedByInit =
1906 Init && emission.IsEscapingByRef && isCapturedBy(Var: D, E: Init);
1907
1908 bool locIsByrefHeader = !capturedByInit;
1909 const Address Loc =
1910 locIsByrefHeader ? emission.getObjectAddress(CGF&: *this) : emission.Addr;
1911
1912 // Note: constexpr already initializes everything correctly.
1913 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1914 (D.isConstexpr()
1915 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1916 : (D.getAttr<UninitializedAttr>()
1917 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1918 : getContext().getLangOpts().getTrivialAutoVarInit()));
1919
1920 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1921 if (trivialAutoVarInit ==
1922 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1923 return;
1924
1925 // Only initialize a __block's storage: we always initialize the header.
1926 if (emission.IsEscapingByRef && !locIsByrefHeader)
1927 Loc = emitBlockByrefAddress(baseAddr: Loc, V: &D, /*follow=*/followForward: false);
1928
1929 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1930 };
1931
1932 if (isTrivialInitializer(Init))
1933 return initializeWhatIsTechnicallyUninitialized(Loc);
1934
1935 llvm::Constant *constant = nullptr;
1936 if (emission.IsConstantAggregate ||
1937 D.mightBeUsableInConstantExpressions(C: getContext())) {
1938 assert(!capturedByInit && "constant init contains a capturing block?");
1939 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1940 if (constant && !constant->isZeroValue() &&
1941 (trivialAutoVarInit !=
1942 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1943 IsPattern isPattern =
1944 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1945 ? IsPattern::Yes
1946 : IsPattern::No;
1947 // C guarantees that brace-init with fewer initializers than members in
1948 // the aggregate will initialize the rest of the aggregate as-if it were
1949 // static initialization. In turn static initialization guarantees that
1950 // padding is initialized to zero bits. We could instead pattern-init if D
1951 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1952 // behavior.
1953 constant = constWithPadding(CGM, isPattern: IsPattern::No,
1954 constant: replaceUndef(CGM, isPattern, constant));
1955 }
1956 }
1957
1958 if (!constant) {
1959 initializeWhatIsTechnicallyUninitialized(Loc);
1960 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
1961 lv.setNonGC(true);
1962 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1963 }
1964
1965 if (!emission.IsConstantAggregate) {
1966 // For simple scalar/complex initialization, store the value directly.
1967 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
1968 lv.setNonGC(true);
1969 return EmitStoreThroughLValue(Src: RValue::get(V: constant), Dst: lv, isInit: true);
1970 }
1971
1972 emitStoresForConstant(CGM, D, Loc: Loc.withElementType(ElemTy: CGM.Int8Ty),
1973 isVolatile: type.isVolatileQualified(), Builder, constant,
1974 /*IsAutoInit=*/false);
1975}
1976
1977/// Emit an expression as an initializer for an object (variable, field, etc.)
1978/// at the given location. The expression is not necessarily the normal
1979/// initializer for the object, and the address is not necessarily
1980/// its normal location.
1981///
1982/// \param init the initializing expression
1983/// \param D the object to act as if we're initializing
1984/// \param lvalue the lvalue to initialize
1985/// \param capturedByInit true if \p D is a __block variable
1986/// whose address is potentially changed by the initializer
1987void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1988 LValue lvalue, bool capturedByInit) {
1989 QualType type = D->getType();
1990
1991 if (type->isReferenceType()) {
1992 RValue rvalue = EmitReferenceBindingToExpr(E: init);
1993 if (capturedByInit)
1994 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
1995 EmitStoreThroughLValue(Src: rvalue, Dst: lvalue, isInit: true);
1996 return;
1997 }
1998 switch (getEvaluationKind(T: type)) {
1999 case TEK_Scalar:
2000 EmitScalarInit(init, D, lvalue, capturedByInit);
2001 return;
2002 case TEK_Complex: {
2003 ComplexPairTy complex = EmitComplexExpr(E: init);
2004 if (capturedByInit)
2005 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
2006 EmitStoreOfComplex(V: complex, dest: lvalue, /*init*/ isInit: true);
2007 return;
2008 }
2009 case TEK_Aggregate:
2010 if (type->isAtomicType()) {
2011 EmitAtomicInit(E: const_cast<Expr*>(init), lvalue);
2012 } else {
2013 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
2014 if (isa<VarDecl>(Val: D))
2015 Overlap = AggValueSlot::DoesNotOverlap;
2016 else if (auto *FD = dyn_cast<FieldDecl>(Val: D))
2017 Overlap = getOverlapForFieldInit(FD);
2018 // TODO: how can we delay here if D is captured by its initializer?
2019 EmitAggExpr(E: init, AS: AggValueSlot::forLValue(
2020 LV: lvalue, CGF&: *this, isDestructed: AggValueSlot::IsDestructed,
2021 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
2022 isAliased: AggValueSlot::IsNotAliased, mayOverlap: Overlap));
2023 }
2024 return;
2025 }
2026 llvm_unreachable("bad evaluation kind");
2027}
2028
2029/// Enter a destroy cleanup for the given local variable.
2030void CodeGenFunction::emitAutoVarTypeCleanup(
2031 const CodeGenFunction::AutoVarEmission &emission,
2032 QualType::DestructionKind dtorKind) {
2033 assert(dtorKind != QualType::DK_none);
2034
2035 // Note that for __block variables, we want to destroy the
2036 // original stack object, not the possibly forwarded object.
2037 Address addr = emission.getObjectAddress(CGF&: *this);
2038
2039 const VarDecl *var = emission.Variable;
2040 QualType type = var->getType();
2041
2042 CleanupKind cleanupKind = NormalAndEHCleanup;
2043 CodeGenFunction::Destroyer *destroyer = nullptr;
2044
2045 switch (dtorKind) {
2046 case QualType::DK_none:
2047 llvm_unreachable("no cleanup for trivially-destructible variable");
2048
2049 case QualType::DK_cxx_destructor:
2050 // If there's an NRVO flag on the emission, we need a different
2051 // cleanup.
2052 if (emission.NRVOFlag) {
2053 assert(!type->isArrayType());
2054 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2055 EHStack.pushCleanup<DestroyNRVOVariableCXX>(Kind: cleanupKind, A: addr, A: type, A: dtor,
2056 A: emission.NRVOFlag);
2057 return;
2058 }
2059 break;
2060
2061 case QualType::DK_objc_strong_lifetime:
2062 // Suppress cleanups for pseudo-strong variables.
2063 if (var->isARCPseudoStrong()) return;
2064
2065 // Otherwise, consider whether to use an EH cleanup or not.
2066 cleanupKind = getARCCleanupKind();
2067
2068 // Use the imprecise destroyer by default.
2069 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2070 destroyer = CodeGenFunction::destroyARCStrongImprecise;
2071 break;
2072
2073 case QualType::DK_objc_weak_lifetime:
2074 break;
2075
2076 case QualType::DK_nontrivial_c_struct:
2077 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2078 if (emission.NRVOFlag) {
2079 assert(!type->isArrayType());
2080 EHStack.pushCleanup<DestroyNRVOVariableC>(Kind: cleanupKind, A: addr,
2081 A: emission.NRVOFlag, A: type);
2082 return;
2083 }
2084 break;
2085 }
2086
2087 // If we haven't chosen a more specific destroyer, use the default.
2088 if (!destroyer) destroyer = getDestroyer(destructionKind: dtorKind);
2089
2090 // Use an EH cleanup in array destructors iff the destructor itself
2091 // is being pushed as an EH cleanup.
2092 bool useEHCleanup = (cleanupKind & EHCleanup);
2093 EHStack.pushCleanup<DestroyObject>(Kind: cleanupKind, A: addr, A: type, A: destroyer,
2094 A: useEHCleanup);
2095}
2096
2097void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2098 assert(emission.Variable && "emission was not valid!");
2099
2100 // If this was emitted as a global constant, we're done.
2101 if (emission.wasEmittedAsGlobal()) return;
2102
2103 // If we don't have an insertion point, we're done. Sema prevents
2104 // us from jumping into any of these scopes anyway.
2105 if (!HaveInsertPoint()) return;
2106
2107 const VarDecl &D = *emission.Variable;
2108
2109 // Check the type for a cleanup.
2110 if (QualType::DestructionKind dtorKind = D.needsDestruction(Ctx: getContext()))
2111 emitAutoVarTypeCleanup(emission, dtorKind);
2112
2113 // In GC mode, honor objc_precise_lifetime.
2114 if (getLangOpts().getGC() != LangOptions::NonGC &&
2115 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2116 EHStack.pushCleanup<ExtendGCLifetime>(Kind: NormalCleanup, A: &D);
2117 }
2118
2119 // Handle the cleanup attribute.
2120 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2121 const FunctionDecl *FD = CA->getFunctionDecl();
2122
2123 llvm::Constant *F = CGM.GetAddrOfFunction(GD: FD);
2124 assert(F && "Could not find function!");
2125
2126 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2127 EHStack.pushCleanup<CallCleanupFunction>(Kind: NormalAndEHCleanup, A: F, A: &Info, A: &D);
2128 }
2129
2130 // If this is a block variable, call _Block_object_destroy
2131 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2132 // mode.
2133 if (emission.IsEscapingByRef &&
2134 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2135 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2136 if (emission.Variable->getType().isObjCGCWeak())
2137 Flags |= BLOCK_FIELD_IS_WEAK;
2138 enterByrefCleanup(Kind: NormalAndEHCleanup, Addr: emission.Addr, Flags,
2139 /*LoadBlockVarAddr*/ false,
2140 CanThrow: cxxDestructorCanThrow(T: emission.Variable->getType()));
2141 }
2142}
2143
2144CodeGenFunction::Destroyer *
2145CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2146 switch (kind) {
2147 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2148 case QualType::DK_cxx_destructor:
2149 return destroyCXXObject;
2150 case QualType::DK_objc_strong_lifetime:
2151 return destroyARCStrongPrecise;
2152 case QualType::DK_objc_weak_lifetime:
2153 return destroyARCWeak;
2154 case QualType::DK_nontrivial_c_struct:
2155 return destroyNonTrivialCStruct;
2156 }
2157 llvm_unreachable("Unknown DestructionKind");
2158}
2159
2160/// pushEHDestroy - Push the standard destructor for the given type as
2161/// an EH-only cleanup.
2162void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2163 Address addr, QualType type) {
2164 assert(dtorKind && "cannot push destructor for trivial type");
2165 assert(needsEHCleanup(dtorKind));
2166
2167 pushDestroy(kind: EHCleanup, addr, type, destroyer: getDestroyer(kind: dtorKind), useEHCleanupForArray: true);
2168}
2169
2170/// pushDestroy - Push the standard destructor for the given type as
2171/// at least a normal cleanup.
2172void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2173 Address addr, QualType type) {
2174 assert(dtorKind && "cannot push destructor for trivial type");
2175
2176 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2177 pushDestroy(kind: cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind),
2178 useEHCleanupForArray: cleanupKind & EHCleanup);
2179}
2180
2181void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2182 QualType type, Destroyer *destroyer,
2183 bool useEHCleanupForArray) {
2184 pushFullExprCleanup<DestroyObject>(kind: cleanupKind, A: addr, A: type,
2185 A: destroyer, A: useEHCleanupForArray);
2186}
2187
2188void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2189 EHStack.pushCleanup<CallStackRestore>(Kind, A: SPMem);
2190}
2191
2192void CodeGenFunction::pushKmpcAllocFree(
2193 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2194 EHStack.pushCleanup<KmpcAllocFree>(Kind, A: AddrSizePair);
2195}
2196
2197void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2198 Address addr, QualType type,
2199 Destroyer *destroyer,
2200 bool useEHCleanupForArray) {
2201 // If we're not in a conditional branch, we don't need to bother generating a
2202 // conditional cleanup.
2203 if (!isInConditionalBranch()) {
2204 // Push an EH-only cleanup for the object now.
2205 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2206 // around in case a temporary's destructor throws an exception.
2207 if (cleanupKind & EHCleanup)
2208 EHStack.pushCleanup<DestroyObject>(
2209 Kind: static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), A: addr, A: type,
2210 A: destroyer, A: useEHCleanupForArray);
2211
2212 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2213 Kind: cleanupKind, ActiveFlag: Address::invalid(), A: addr, A: type, A: destroyer, A: useEHCleanupForArray);
2214 }
2215
2216 // Otherwise, we should only destroy the object if it's been initialized.
2217 // Re-use the active flag and saved address across both the EH and end of
2218 // scope cleanups.
2219
2220 using SavedType = typename DominatingValue<Address>::saved_type;
2221 using ConditionalCleanupType =
2222 EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2223 Destroyer *, bool>;
2224
2225 Address ActiveFlag = createCleanupActiveFlag();
2226 SavedType SavedAddr = saveValueInCond(value: addr);
2227
2228 if (cleanupKind & EHCleanup) {
2229 EHStack.pushCleanup<ConditionalCleanupType>(
2230 Kind: static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), A: SavedAddr, A: type,
2231 A: destroyer, A: useEHCleanupForArray);
2232 initFullExprCleanupWithFlag(ActiveFlag);
2233 }
2234
2235 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2236 Kind: cleanupKind, ActiveFlag, A: SavedAddr, A: type, A: destroyer,
2237 A: useEHCleanupForArray);
2238}
2239
2240/// emitDestroy - Immediately perform the destruction of the given
2241/// object.
2242///
2243/// \param addr - the address of the object; a type*
2244/// \param type - the type of the object; if an array type, all
2245/// objects are destroyed in reverse order
2246/// \param destroyer - the function to call to destroy individual
2247/// elements
2248/// \param useEHCleanupForArray - whether an EH cleanup should be
2249/// used when destroying array elements, in case one of the
2250/// destructions throws an exception
2251void CodeGenFunction::emitDestroy(Address addr, QualType type,
2252 Destroyer *destroyer,
2253 bool useEHCleanupForArray) {
2254 const ArrayType *arrayType = getContext().getAsArrayType(T: type);
2255 if (!arrayType)
2256 return destroyer(*this, addr, type);
2257
2258 llvm::Value *length = emitArrayLength(arrayType, baseType&: type, addr);
2259
2260 CharUnits elementAlign =
2261 addr.getAlignment()
2262 .alignmentOfArrayElement(elementSize: getContext().getTypeSizeInChars(T: type));
2263
2264 // Normally we have to check whether the array is zero-length.
2265 bool checkZeroLength = true;
2266
2267 // But if the array length is constant, we can suppress that.
2268 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(Val: length)) {
2269 // ...and if it's constant zero, we can just skip the entire thing.
2270 if (constLength->isZero()) return;
2271 checkZeroLength = false;
2272 }
2273
2274 llvm::Value *begin = addr.getPointer();
2275 llvm::Value *end =
2276 Builder.CreateInBoundsGEP(Ty: addr.getElementType(), Ptr: begin, IdxList: length);
2277 emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2278 checkZeroLength, useEHCleanup: useEHCleanupForArray);
2279}
2280
2281/// emitArrayDestroy - Destroys all the elements of the given array,
2282/// beginning from last to first. The array cannot be zero-length.
2283///
2284/// \param begin - a type* denoting the first element of the array
2285/// \param end - a type* denoting one past the end of the array
2286/// \param elementType - the element type of the array
2287/// \param destroyer - the function to call to destroy elements
2288/// \param useEHCleanup - whether to push an EH cleanup to destroy
2289/// the remaining elements in case the destruction of a single
2290/// element throws
2291void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2292 llvm::Value *end,
2293 QualType elementType,
2294 CharUnits elementAlign,
2295 Destroyer *destroyer,
2296 bool checkZeroLength,
2297 bool useEHCleanup) {
2298 assert(!elementType->isArrayType());
2299
2300 // The basic structure here is a do-while loop, because we don't
2301 // need to check for the zero-element case.
2302 llvm::BasicBlock *bodyBB = createBasicBlock(name: "arraydestroy.body");
2303 llvm::BasicBlock *doneBB = createBasicBlock(name: "arraydestroy.done");
2304
2305 if (checkZeroLength) {
2306 llvm::Value *isEmpty = Builder.CreateICmpEQ(LHS: begin, RHS: end,
2307 Name: "arraydestroy.isempty");
2308 Builder.CreateCondBr(Cond: isEmpty, True: doneBB, False: bodyBB);
2309 }
2310
2311 // Enter the loop body, making that address the current address.
2312 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2313 EmitBlock(BB: bodyBB);
2314 llvm::PHINode *elementPast =
2315 Builder.CreatePHI(Ty: begin->getType(), NumReservedValues: 2, Name: "arraydestroy.elementPast");
2316 elementPast->addIncoming(V: end, BB: entryBB);
2317
2318 // Shift the address back by one element.
2319 llvm::Value *negativeOne = llvm::ConstantInt::get(Ty: SizeTy, V: -1, IsSigned: true);
2320 llvm::Type *llvmElementType = ConvertTypeForMem(T: elementType);
2321 llvm::Value *element = Builder.CreateInBoundsGEP(
2322 Ty: llvmElementType, Ptr: elementPast, IdxList: negativeOne, Name: "arraydestroy.element");
2323
2324 if (useEHCleanup)
2325 pushRegularPartialArrayCleanup(arrayBegin: begin, arrayEnd: element, elementType, elementAlignment: elementAlign,
2326 destroyer);
2327
2328 // Perform the actual destruction there.
2329 destroyer(*this, Address(element, llvmElementType, elementAlign),
2330 elementType);
2331
2332 if (useEHCleanup)
2333 PopCleanupBlock();
2334
2335 // Check whether we've reached the end.
2336 llvm::Value *done = Builder.CreateICmpEQ(LHS: element, RHS: begin, Name: "arraydestroy.done");
2337 Builder.CreateCondBr(Cond: done, True: doneBB, False: bodyBB);
2338 elementPast->addIncoming(V: element, BB: Builder.GetInsertBlock());
2339
2340 // Done.
2341 EmitBlock(BB: doneBB);
2342}
2343
2344/// Perform partial array destruction as if in an EH cleanup. Unlike
2345/// emitArrayDestroy, the element type here may still be an array type.
2346static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2347 llvm::Value *begin, llvm::Value *end,
2348 QualType type, CharUnits elementAlign,
2349 CodeGenFunction::Destroyer *destroyer) {
2350 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
2351
2352 // If the element type is itself an array, drill down.
2353 unsigned arrayDepth = 0;
2354 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(T: type)) {
2355 // VLAs don't require a GEP index to walk into.
2356 if (!isa<VariableArrayType>(Val: arrayType))
2357 arrayDepth++;
2358 type = arrayType->getElementType();
2359 }
2360
2361 if (arrayDepth) {
2362 llvm::Value *zero = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 0);
2363
2364 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2365 begin = CGF.Builder.CreateInBoundsGEP(
2366 Ty: elemTy, Ptr: begin, IdxList: gepIndices, Name: "pad.arraybegin");
2367 end = CGF.Builder.CreateInBoundsGEP(
2368 Ty: elemTy, Ptr: end, IdxList: gepIndices, Name: "pad.arrayend");
2369 }
2370
2371 // Destroy the array. We don't ever need an EH cleanup because we
2372 // assume that we're in an EH cleanup ourselves, so a throwing
2373 // destructor causes an immediate terminate.
2374 CGF.emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2375 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2376}
2377
2378namespace {
2379 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2380 /// array destroy where the end pointer is regularly determined and
2381 /// does not need to be loaded from a local.
2382 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2383 llvm::Value *ArrayBegin;
2384 llvm::Value *ArrayEnd;
2385 QualType ElementType;
2386 CodeGenFunction::Destroyer *Destroyer;
2387 CharUnits ElementAlign;
2388 public:
2389 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2390 QualType elementType, CharUnits elementAlign,
2391 CodeGenFunction::Destroyer *destroyer)
2392 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2393 ElementType(elementType), Destroyer(destroyer),
2394 ElementAlign(elementAlign) {}
2395
2396 void Emit(CodeGenFunction &CGF, Flags flags) override {
2397 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2398 ElementType, ElementAlign, Destroyer);
2399 }
2400 };
2401
2402 /// IrregularPartialArrayDestroy - a cleanup which performs a
2403 /// partial array destroy where the end pointer is irregularly
2404 /// determined and must be loaded from a local.
2405 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2406 llvm::Value *ArrayBegin;
2407 Address ArrayEndPointer;
2408 QualType ElementType;
2409 CodeGenFunction::Destroyer *Destroyer;
2410 CharUnits ElementAlign;
2411 public:
2412 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2413 Address arrayEndPointer,
2414 QualType elementType,
2415 CharUnits elementAlign,
2416 CodeGenFunction::Destroyer *destroyer)
2417 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2418 ElementType(elementType), Destroyer(destroyer),
2419 ElementAlign(elementAlign) {}
2420
2421 void Emit(CodeGenFunction &CGF, Flags flags) override {
2422 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(Addr: ArrayEndPointer);
2423 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2424 ElementType, ElementAlign, Destroyer);
2425 }
2426 };
2427} // end anonymous namespace
2428
2429/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2430/// already-constructed elements of the given array. The cleanup
2431/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2432///
2433/// \param elementType - the immediate element type of the array;
2434/// possibly still an array type
2435void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2436 Address arrayEndPointer,
2437 QualType elementType,
2438 CharUnits elementAlign,
2439 Destroyer *destroyer) {
2440 pushFullExprCleanup<IrregularPartialArrayDestroy>(kind: EHCleanup,
2441 A: arrayBegin, A: arrayEndPointer,
2442 A: elementType, A: elementAlign,
2443 A: destroyer);
2444}
2445
2446/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2447/// already-constructed elements of the given array. The cleanup
2448/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2449///
2450/// \param elementType - the immediate element type of the array;
2451/// possibly still an array type
2452void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2453 llvm::Value *arrayEnd,
2454 QualType elementType,
2455 CharUnits elementAlign,
2456 Destroyer *destroyer) {
2457 pushFullExprCleanup<RegularPartialArrayDestroy>(kind: EHCleanup,
2458 A: arrayBegin, A: arrayEnd,
2459 A: elementType, A: elementAlign,
2460 A: destroyer);
2461}
2462
2463/// Lazily declare the @llvm.lifetime.start intrinsic.
2464llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2465 if (LifetimeStartFn)
2466 return LifetimeStartFn;
2467 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2468 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2469 return LifetimeStartFn;
2470}
2471
2472/// Lazily declare the @llvm.lifetime.end intrinsic.
2473llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2474 if (LifetimeEndFn)
2475 return LifetimeEndFn;
2476 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2477 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2478 return LifetimeEndFn;
2479}
2480
2481namespace {
2482 /// A cleanup to perform a release of an object at the end of a
2483 /// function. This is used to balance out the incoming +1 of a
2484 /// ns_consumed argument when we can't reasonably do that just by
2485 /// not doing the initial retain for a __block argument.
2486 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2487 ConsumeARCParameter(llvm::Value *param,
2488 ARCPreciseLifetime_t precise)
2489 : Param(param), Precise(precise) {}
2490
2491 llvm::Value *Param;
2492 ARCPreciseLifetime_t Precise;
2493
2494 void Emit(CodeGenFunction &CGF, Flags flags) override {
2495 CGF.EmitARCRelease(value: Param, precise: Precise);
2496 }
2497 };
2498} // end anonymous namespace
2499
2500/// Emit an alloca (or GlobalValue depending on target)
2501/// for the specified parameter and set up LocalDeclMap.
2502void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2503 unsigned ArgNo) {
2504 bool NoDebugInfo = false;
2505 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2506 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2507 "Invalid argument to EmitParmDecl");
2508
2509 // Set the name of the parameter's initial value to make IR easier to
2510 // read. Don't modify the names of globals.
2511 if (!isa<llvm::GlobalValue>(Val: Arg.getAnyValue()))
2512 Arg.getAnyValue()->setName(D.getName());
2513
2514 QualType Ty = D.getType();
2515
2516 // Use better IR generation for certain implicit parameters.
2517 if (auto IPD = dyn_cast<ImplicitParamDecl>(Val: &D)) {
2518 // The only implicit argument a block has is its literal.
2519 // This may be passed as an inalloca'ed value on Windows x86.
2520 if (BlockInfo) {
2521 llvm::Value *V = Arg.isIndirect()
2522 ? Builder.CreateLoad(Addr: Arg.getIndirectAddress())
2523 : Arg.getDirectValue();
2524 setBlockContextParameter(D: IPD, argNum: ArgNo, ptr: V);
2525 return;
2526 }
2527 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2528 // debug info of TLS variables.
2529 NoDebugInfo =
2530 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2531 }
2532
2533 Address DeclPtr = Address::invalid();
2534 Address AllocaPtr = Address::invalid();
2535 bool DoStore = false;
2536 bool IsScalar = hasScalarEvaluationKind(T: Ty);
2537 bool UseIndirectDebugAddress = false;
2538
2539 // If we already have a pointer to the argument, reuse the input pointer.
2540 if (Arg.isIndirect()) {
2541 DeclPtr = Arg.getIndirectAddress();
2542 DeclPtr = DeclPtr.withElementType(ElemTy: ConvertTypeForMem(T: Ty));
2543 // Indirect argument is in alloca address space, which may be different
2544 // from the default address space.
2545 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2546 auto *V = DeclPtr.getPointer();
2547 AllocaPtr = DeclPtr;
2548
2549 // For truly ABI indirect arguments -- those that are not `byval` -- store
2550 // the address of the argument on the stack to preserve debug information.
2551 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2552 if (ArgInfo.isIndirect())
2553 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2554 if (UseIndirectDebugAddress) {
2555 auto PtrTy = getContext().getPointerType(T: Ty);
2556 AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
2557 D.getName() + ".indirect_addr");
2558 EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
2559 }
2560
2561 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2562 auto DestLangAS =
2563 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2564 if (SrcLangAS != DestLangAS) {
2565 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2566 CGM.getDataLayout().getAllocaAddrSpace());
2567 auto DestAS = getContext().getTargetAddressSpace(AS: DestLangAS);
2568 auto *T = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: DestAS);
2569 DeclPtr =
2570 DeclPtr.withPointer(NewPointer: getTargetHooks().performAddrSpaceCast(
2571 *this, V, SrcLangAS, DestLangAS, T, true),
2572 IsKnownNonNull: DeclPtr.isKnownNonNull());
2573 }
2574
2575 // Push a destructor cleanup for this parameter if the ABI requires it.
2576 // Don't push a cleanup in a thunk for a method that will also emit a
2577 // cleanup.
2578 if (Ty->isRecordType() && !CurFuncIsThunk &&
2579 Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2580 if (QualType::DestructionKind DtorKind =
2581 D.needsDestruction(Ctx: getContext())) {
2582 assert((DtorKind == QualType::DK_cxx_destructor ||
2583 DtorKind == QualType::DK_nontrivial_c_struct) &&
2584 "unexpected destructor type");
2585 pushDestroy(dtorKind: DtorKind, addr: DeclPtr, type: Ty);
2586 CalleeDestructedParamCleanups[cast<ParmVarDecl>(Val: &D)] =
2587 EHStack.stable_begin();
2588 }
2589 }
2590 } else {
2591 // Check if the parameter address is controlled by OpenMP runtime.
2592 Address OpenMPLocalAddr =
2593 getLangOpts().OpenMP
2594 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
2595 : Address::invalid();
2596 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2597 DeclPtr = OpenMPLocalAddr;
2598 AllocaPtr = DeclPtr;
2599 } else {
2600 // Otherwise, create a temporary to hold the value.
2601 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2602 D.getName() + ".addr", &AllocaPtr);
2603 }
2604 DoStore = true;
2605 }
2606
2607 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2608
2609 LValue lv = MakeAddrLValue(Addr: DeclPtr, T: Ty);
2610 if (IsScalar) {
2611 Qualifiers qs = Ty.getQualifiers();
2612 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2613 // We honor __attribute__((ns_consumed)) for types with lifetime.
2614 // For __strong, it's handled by just skipping the initial retain;
2615 // otherwise we have to balance out the initial +1 with an extra
2616 // cleanup to do the release at the end of the function.
2617 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2618
2619 // If a parameter is pseudo-strong then we can omit the implicit retain.
2620 if (D.isARCPseudoStrong()) {
2621 assert(lt == Qualifiers::OCL_Strong &&
2622 "pseudo-strong variable isn't strong?");
2623 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2624 lt = Qualifiers::OCL_ExplicitNone;
2625 }
2626
2627 // Load objects passed indirectly.
2628 if (Arg.isIndirect() && !ArgVal)
2629 ArgVal = Builder.CreateLoad(Addr: DeclPtr);
2630
2631 if (lt == Qualifiers::OCL_Strong) {
2632 if (!isConsumed) {
2633 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2634 // use objc_storeStrong(&dest, value) for retaining the
2635 // object. But first, store a null into 'dest' because
2636 // objc_storeStrong attempts to release its old value.
2637 llvm::Value *Null = CGM.EmitNullConstant(T: D.getType());
2638 EmitStoreOfScalar(value: Null, lvalue: lv, /* isInitialization */ isInit: true);
2639 EmitARCStoreStrongCall(addr: lv.getAddress(CGF&: *this), value: ArgVal, resultIgnored: true);
2640 DoStore = false;
2641 }
2642 else
2643 // Don't use objc_retainBlock for block pointers, because we
2644 // don't want to Block_copy something just because we got it
2645 // as a parameter.
2646 ArgVal = EmitARCRetainNonBlock(value: ArgVal);
2647 }
2648 } else {
2649 // Push the cleanup for a consumed parameter.
2650 if (isConsumed) {
2651 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2652 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2653 EHStack.pushCleanup<ConsumeARCParameter>(Kind: getARCCleanupKind(), A: ArgVal,
2654 A: precise);
2655 }
2656
2657 if (lt == Qualifiers::OCL_Weak) {
2658 EmitARCInitWeak(addr: DeclPtr, value: ArgVal);
2659 DoStore = false; // The weak init is a store, no need to do two.
2660 }
2661 }
2662
2663 // Enter the cleanup scope.
2664 EmitAutoVarWithLifetime(CGF&: *this, var: D, addr: DeclPtr, lifetime: lt);
2665 }
2666 }
2667
2668 // Store the initial value into the alloca.
2669 if (DoStore)
2670 EmitStoreOfScalar(value: ArgVal, lvalue: lv, /* isInitialization */ isInit: true);
2671
2672 setAddrOfLocalVar(VD: &D, Addr: DeclPtr);
2673
2674 // Emit debug info for param declarations in non-thunk functions.
2675 if (CGDebugInfo *DI = getDebugInfo()) {
2676 if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
2677 !NoDebugInfo) {
2678 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2679 Decl: &D, AI: AllocaPtr.getPointer(), ArgNo, Builder, UsePointerValue: UseIndirectDebugAddress);
2680 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(Val: &D))
2681 DI->getParamDbgMappings().insert(KV: {Var, DILocalVar});
2682 }
2683 }
2684
2685 if (D.hasAttr<AnnotateAttr>())
2686 EmitVarAnnotations(D: &D, V: DeclPtr.getPointer());
2687
2688 // We can only check return value nullability if all arguments to the
2689 // function satisfy their nullability preconditions. This makes it necessary
2690 // to emit null checks for args in the function body itself.
2691 if (requiresReturnValueNullabilityCheck()) {
2692 auto Nullability = Ty->getNullability();
2693 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2694 SanitizerScope SanScope(this);
2695 RetValNullabilityPrecondition =
2696 Builder.CreateAnd(LHS: RetValNullabilityPrecondition,
2697 RHS: Builder.CreateIsNotNull(Arg: Arg.getAnyValue()));
2698 }
2699 }
2700}
2701
2702void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2703 CodeGenFunction *CGF) {
2704 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2705 return;
2706 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2707}
2708
2709void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2710 CodeGenFunction *CGF) {
2711 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2712 (!LangOpts.EmitAllDecls && !D->isUsed()))
2713 return;
2714 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2715}
2716
2717void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2718 getOpenMPRuntime().processRequiresDirective(D);
2719}
2720
2721void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2722 for (const Expr *E : D->varlists()) {
2723 const auto *DE = cast<DeclRefExpr>(Val: E);
2724 const auto *VD = cast<VarDecl>(Val: DE->getDecl());
2725
2726 // Skip all but globals.
2727 if (!VD->hasGlobalStorage())
2728 continue;
2729
2730 // Check if the global has been materialized yet or not. If not, we are done
2731 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2732 // we already emitted the global we might have done so before the
2733 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2734 // (potentially). While not pretty, common practise is to remove the old IR
2735 // global and generate a new one, so we do that here too. Uses are replaced
2736 // properly.
2737 StringRef MangledName = getMangledName(GD: VD);
2738 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
2739 if (!Entry)
2740 continue;
2741
2742 // We can also keep the existing global if the address space is what we
2743 // expect it to be, if not, it is replaced.
2744 QualType ASTTy = VD->getType();
2745 clang::LangAS GVAS = GetGlobalVarAddressSpace(D: VD);
2746 auto TargetAS = getContext().getTargetAddressSpace(AS: GVAS);
2747 if (Entry->getType()->getAddressSpace() == TargetAS)
2748 continue;
2749
2750 // Make a new global with the correct type / address space.
2751 llvm::Type *Ty = getTypes().ConvertTypeForMem(T: ASTTy);
2752 llvm::PointerType *PTy = llvm::PointerType::get(ElementType: Ty, AddressSpace: TargetAS);
2753
2754 // Replace all uses of the old global with a cast. Since we mutate the type
2755 // in place we neeed an intermediate that takes the spot of the old entry
2756 // until we can create the cast.
2757 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2758 getModule(), Entry->getValueType(), false,
2759 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2760 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2761 Entry->replaceAllUsesWith(V: DummyGV);
2762
2763 Entry->mutateType(Ty: PTy);
2764 llvm::Constant *NewPtrForOldDecl =
2765 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2766 C: Entry, Ty: DummyGV->getType());
2767
2768 // Now we have a casted version of the changed global, the dummy can be
2769 // replaced and deleted.
2770 DummyGV->replaceAllUsesWith(V: NewPtrForOldDecl);
2771 DummyGV->eraseFromParent();
2772 }
2773}
2774
2775std::optional<CharUnits>
2776CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
2777 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2778 if (Expr *Alignment = AA->getAlignment()) {
2779 unsigned UserAlign =
2780 Alignment->EvaluateKnownConstInt(Ctx: getContext()).getExtValue();
2781 CharUnits NaturalAlign =
2782 getNaturalTypeAlignment(T: VD->getType().getNonReferenceType());
2783
2784 // OpenMP5.1 pg 185 lines 7-10
2785 // Each item in the align modifier list must be aligned to the maximum
2786 // of the specified alignment and the type's natural alignment.
2787 return CharUnits::fromQuantity(
2788 Quantity: std::max<unsigned>(a: UserAlign, b: NaturalAlign.getQuantity()));
2789 }
2790 }
2791 return std::nullopt;
2792}
2793

source code of clang/lib/CodeGen/CGDecl.cpp