1 | //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This coordinates the per-module state used while generating code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CodeGenModule.h" |
14 | #include "ABIInfo.h" |
15 | #include "CGBlocks.h" |
16 | #include "CGCUDARuntime.h" |
17 | #include "CGCXXABI.h" |
18 | #include "CGCall.h" |
19 | #include "CGDebugInfo.h" |
20 | #include "CGHLSLRuntime.h" |
21 | #include "CGObjCRuntime.h" |
22 | #include "CGOpenCLRuntime.h" |
23 | #include "CGOpenMPRuntime.h" |
24 | #include "CGOpenMPRuntimeGPU.h" |
25 | #include "CodeGenFunction.h" |
26 | #include "CodeGenPGO.h" |
27 | #include "ConstantEmitter.h" |
28 | #include "CoverageMappingGen.h" |
29 | #include "TargetInfo.h" |
30 | #include "clang/AST/ASTContext.h" |
31 | #include "clang/AST/ASTLambda.h" |
32 | #include "clang/AST/CharUnits.h" |
33 | #include "clang/AST/Decl.h" |
34 | #include "clang/AST/DeclCXX.h" |
35 | #include "clang/AST/DeclObjC.h" |
36 | #include "clang/AST/DeclTemplate.h" |
37 | #include "clang/AST/Mangle.h" |
38 | #include "clang/AST/RecursiveASTVisitor.h" |
39 | #include "clang/AST/StmtVisitor.h" |
40 | #include "clang/Basic/Builtins.h" |
41 | #include "clang/Basic/CodeGenOptions.h" |
42 | #include "clang/Basic/Diagnostic.h" |
43 | #include "clang/Basic/Module.h" |
44 | #include "clang/Basic/SourceManager.h" |
45 | #include "clang/Basic/TargetInfo.h" |
46 | #include "clang/Basic/Version.h" |
47 | #include "clang/CodeGen/BackendUtil.h" |
48 | #include "clang/CodeGen/ConstantInitBuilder.h" |
49 | #include "clang/Frontend/FrontendDiagnostic.h" |
50 | #include "llvm/ADT/STLExtras.h" |
51 | #include "llvm/ADT/StringExtras.h" |
52 | #include "llvm/ADT/StringSwitch.h" |
53 | #include "llvm/Analysis/TargetLibraryInfo.h" |
54 | #include "llvm/BinaryFormat/ELF.h" |
55 | #include "llvm/IR/AttributeMask.h" |
56 | #include "llvm/IR/CallingConv.h" |
57 | #include "llvm/IR/DataLayout.h" |
58 | #include "llvm/IR/Intrinsics.h" |
59 | #include "llvm/IR/LLVMContext.h" |
60 | #include "llvm/IR/Module.h" |
61 | #include "llvm/IR/ProfileSummary.h" |
62 | #include "llvm/ProfileData/InstrProfReader.h" |
63 | #include "llvm/ProfileData/SampleProf.h" |
64 | #include "llvm/Support/CRC.h" |
65 | #include "llvm/Support/CodeGen.h" |
66 | #include "llvm/Support/CommandLine.h" |
67 | #include "llvm/Support/ConvertUTF.h" |
68 | #include "llvm/Support/ErrorHandling.h" |
69 | #include "llvm/Support/TimeProfiler.h" |
70 | #include "llvm/Support/xxhash.h" |
71 | #include "llvm/TargetParser/RISCVISAInfo.h" |
72 | #include "llvm/TargetParser/Triple.h" |
73 | #include "llvm/TargetParser/X86TargetParser.h" |
74 | #include "llvm/Transforms/Utils/BuildLibCalls.h" |
75 | #include <optional> |
76 | #include <set> |
77 | |
78 | using namespace clang; |
79 | using namespace CodeGen; |
80 | |
81 | static llvm::cl::opt<bool> LimitedCoverage( |
82 | "limited-coverage-experimental" , llvm::cl::Hidden, |
83 | llvm::cl::desc("Emit limited coverage mapping information (experimental)" )); |
84 | |
85 | static const char AnnotationSection[] = "llvm.metadata" ; |
86 | |
87 | static CGCXXABI *createCXXABI(CodeGenModule &CGM) { |
88 | switch (CGM.getContext().getCXXABIKind()) { |
89 | case TargetCXXABI::AppleARM64: |
90 | case TargetCXXABI::Fuchsia: |
91 | case TargetCXXABI::GenericAArch64: |
92 | case TargetCXXABI::GenericARM: |
93 | case TargetCXXABI::iOS: |
94 | case TargetCXXABI::WatchOS: |
95 | case TargetCXXABI::GenericMIPS: |
96 | case TargetCXXABI::GenericItanium: |
97 | case TargetCXXABI::WebAssembly: |
98 | case TargetCXXABI::XL: |
99 | return CreateItaniumCXXABI(CGM); |
100 | case TargetCXXABI::Microsoft: |
101 | return CreateMicrosoftCXXABI(CGM); |
102 | } |
103 | |
104 | llvm_unreachable("invalid C++ ABI kind" ); |
105 | } |
106 | |
107 | static std::unique_ptr<TargetCodeGenInfo> |
108 | createTargetCodeGenInfo(CodeGenModule &CGM) { |
109 | const TargetInfo &Target = CGM.getTarget(); |
110 | const llvm::Triple &Triple = Target.getTriple(); |
111 | const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts(); |
112 | |
113 | switch (Triple.getArch()) { |
114 | default: |
115 | return createDefaultTargetCodeGenInfo(CGM); |
116 | |
117 | case llvm::Triple::m68k: |
118 | return createM68kTargetCodeGenInfo(CGM); |
119 | case llvm::Triple::mips: |
120 | case llvm::Triple::mipsel: |
121 | if (Triple.getOS() == llvm::Triple::NaCl) |
122 | return createPNaClTargetCodeGenInfo(CGM); |
123 | else if (Triple.getOS() == llvm::Triple::Win32) |
124 | return createWindowsMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true); |
125 | return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true); |
126 | |
127 | case llvm::Triple::mips64: |
128 | case llvm::Triple::mips64el: |
129 | return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/false); |
130 | |
131 | case llvm::Triple::avr: { |
132 | // For passing parameters, R8~R25 are used on avr, and R18~R25 are used |
133 | // on avrtiny. For passing return value, R18~R25 are used on avr, and |
134 | // R22~R25 are used on avrtiny. |
135 | unsigned NPR = Target.getABI() == "avrtiny" ? 6 : 18; |
136 | unsigned NRR = Target.getABI() == "avrtiny" ? 4 : 8; |
137 | return createAVRTargetCodeGenInfo(CGM, NPR, NRR); |
138 | } |
139 | |
140 | case llvm::Triple::aarch64: |
141 | case llvm::Triple::aarch64_32: |
142 | case llvm::Triple::aarch64_be: { |
143 | AArch64ABIKind Kind = AArch64ABIKind::AAPCS; |
144 | if (Target.getABI() == "darwinpcs" ) |
145 | Kind = AArch64ABIKind::DarwinPCS; |
146 | else if (Triple.isOSWindows()) |
147 | return createWindowsAArch64TargetCodeGenInfo(CGM, K: AArch64ABIKind::Win64); |
148 | else if (Target.getABI() == "aapcs-soft" ) |
149 | Kind = AArch64ABIKind::AAPCSSoft; |
150 | else if (Target.getABI() == "pauthtest" ) |
151 | Kind = AArch64ABIKind::PAuthTest; |
152 | |
153 | return createAArch64TargetCodeGenInfo(CGM, Kind); |
154 | } |
155 | |
156 | case llvm::Triple::wasm32: |
157 | case llvm::Triple::wasm64: { |
158 | WebAssemblyABIKind Kind = WebAssemblyABIKind::MVP; |
159 | if (Target.getABI() == "experimental-mv" ) |
160 | Kind = WebAssemblyABIKind::ExperimentalMV; |
161 | return createWebAssemblyTargetCodeGenInfo(CGM, K: Kind); |
162 | } |
163 | |
164 | case llvm::Triple::arm: |
165 | case llvm::Triple::armeb: |
166 | case llvm::Triple::thumb: |
167 | case llvm::Triple::thumbeb: { |
168 | if (Triple.getOS() == llvm::Triple::Win32) |
169 | return createWindowsARMTargetCodeGenInfo(CGM, K: ARMABIKind::AAPCS_VFP); |
170 | |
171 | ARMABIKind Kind = ARMABIKind::AAPCS; |
172 | StringRef ABIStr = Target.getABI(); |
173 | if (ABIStr == "apcs-gnu" ) |
174 | Kind = ARMABIKind::APCS; |
175 | else if (ABIStr == "aapcs16" ) |
176 | Kind = ARMABIKind::AAPCS16_VFP; |
177 | else if (CodeGenOpts.FloatABI == "hard" || |
178 | (CodeGenOpts.FloatABI != "soft" && Triple.isHardFloatABI())) |
179 | Kind = ARMABIKind::AAPCS_VFP; |
180 | |
181 | return createARMTargetCodeGenInfo(CGM, Kind); |
182 | } |
183 | |
184 | case llvm::Triple::ppc: { |
185 | if (Triple.isOSAIX()) |
186 | return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/false); |
187 | |
188 | bool IsSoftFloat = |
189 | CodeGenOpts.FloatABI == "soft" || Target.hasFeature(Feature: "spe" ); |
190 | return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat); |
191 | } |
192 | case llvm::Triple::ppcle: { |
193 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
194 | return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat); |
195 | } |
196 | case llvm::Triple::ppc64: |
197 | if (Triple.isOSAIX()) |
198 | return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/true); |
199 | |
200 | if (Triple.isOSBinFormatELF()) { |
201 | PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv1; |
202 | if (Target.getABI() == "elfv2" ) |
203 | Kind = PPC64_SVR4_ABIKind::ELFv2; |
204 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
205 | |
206 | return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat); |
207 | } |
208 | return createPPC64TargetCodeGenInfo(CGM); |
209 | case llvm::Triple::ppc64le: { |
210 | assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!" ); |
211 | PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv2; |
212 | if (Target.getABI() == "elfv1" ) |
213 | Kind = PPC64_SVR4_ABIKind::ELFv1; |
214 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
215 | |
216 | return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat); |
217 | } |
218 | |
219 | case llvm::Triple::nvptx: |
220 | case llvm::Triple::nvptx64: |
221 | return createNVPTXTargetCodeGenInfo(CGM); |
222 | |
223 | case llvm::Triple::msp430: |
224 | return createMSP430TargetCodeGenInfo(CGM); |
225 | |
226 | case llvm::Triple::riscv32: |
227 | case llvm::Triple::riscv64: { |
228 | StringRef ABIStr = Target.getABI(); |
229 | unsigned XLen = Target.getPointerWidth(AddrSpace: LangAS::Default); |
230 | unsigned ABIFLen = 0; |
231 | if (ABIStr.ends_with(Suffix: "f" )) |
232 | ABIFLen = 32; |
233 | else if (ABIStr.ends_with(Suffix: "d" )) |
234 | ABIFLen = 64; |
235 | bool EABI = ABIStr.ends_with(Suffix: "e" ); |
236 | return createRISCVTargetCodeGenInfo(CGM, XLen, FLen: ABIFLen, EABI); |
237 | } |
238 | |
239 | case llvm::Triple::systemz: { |
240 | bool SoftFloat = CodeGenOpts.FloatABI == "soft" ; |
241 | bool HasVector = !SoftFloat && Target.getABI() == "vector" ; |
242 | return createSystemZTargetCodeGenInfo(CGM, HasVector, SoftFloatABI: SoftFloat); |
243 | } |
244 | |
245 | case llvm::Triple::tce: |
246 | case llvm::Triple::tcele: |
247 | return createTCETargetCodeGenInfo(CGM); |
248 | |
249 | case llvm::Triple::x86: { |
250 | bool IsDarwinVectorABI = Triple.isOSDarwin(); |
251 | bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); |
252 | |
253 | if (Triple.getOS() == llvm::Triple::Win32) { |
254 | return createWinX86_32TargetCodeGenInfo( |
255 | CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI, |
256 | NumRegisterParameters: CodeGenOpts.NumRegisterParameters); |
257 | } |
258 | return createX86_32TargetCodeGenInfo( |
259 | CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI, |
260 | NumRegisterParameters: CodeGenOpts.NumRegisterParameters, SoftFloatABI: CodeGenOpts.FloatABI == "soft" ); |
261 | } |
262 | |
263 | case llvm::Triple::x86_64: { |
264 | StringRef ABI = Target.getABI(); |
265 | X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 |
266 | : ABI == "avx" ? X86AVXABILevel::AVX |
267 | : X86AVXABILevel::None); |
268 | |
269 | switch (Triple.getOS()) { |
270 | case llvm::Triple::UEFI: |
271 | case llvm::Triple::Win32: |
272 | return createWinX86_64TargetCodeGenInfo(CGM, AVXLevel); |
273 | default: |
274 | return createX86_64TargetCodeGenInfo(CGM, AVXLevel); |
275 | } |
276 | } |
277 | case llvm::Triple::hexagon: |
278 | return createHexagonTargetCodeGenInfo(CGM); |
279 | case llvm::Triple::lanai: |
280 | return createLanaiTargetCodeGenInfo(CGM); |
281 | case llvm::Triple::r600: |
282 | return createAMDGPUTargetCodeGenInfo(CGM); |
283 | case llvm::Triple::amdgcn: |
284 | return createAMDGPUTargetCodeGenInfo(CGM); |
285 | case llvm::Triple::sparc: |
286 | return createSparcV8TargetCodeGenInfo(CGM); |
287 | case llvm::Triple::sparcv9: |
288 | return createSparcV9TargetCodeGenInfo(CGM); |
289 | case llvm::Triple::xcore: |
290 | return createXCoreTargetCodeGenInfo(CGM); |
291 | case llvm::Triple::arc: |
292 | return createARCTargetCodeGenInfo(CGM); |
293 | case llvm::Triple::spir: |
294 | case llvm::Triple::spir64: |
295 | return createCommonSPIRTargetCodeGenInfo(CGM); |
296 | case llvm::Triple::spirv32: |
297 | case llvm::Triple::spirv64: |
298 | case llvm::Triple::spirv: |
299 | return createSPIRVTargetCodeGenInfo(CGM); |
300 | case llvm::Triple::dxil: |
301 | return createDirectXTargetCodeGenInfo(CGM); |
302 | case llvm::Triple::ve: |
303 | return createVETargetCodeGenInfo(CGM); |
304 | case llvm::Triple::csky: { |
305 | bool IsSoftFloat = !Target.hasFeature(Feature: "hard-float-abi" ); |
306 | bool hasFP64 = |
307 | Target.hasFeature(Feature: "fpuv2_df" ) || Target.hasFeature(Feature: "fpuv3_df" ); |
308 | return createCSKYTargetCodeGenInfo(CGM, FLen: IsSoftFloat ? 0 |
309 | : hasFP64 ? 64 |
310 | : 32); |
311 | } |
312 | case llvm::Triple::bpfeb: |
313 | case llvm::Triple::bpfel: |
314 | return createBPFTargetCodeGenInfo(CGM); |
315 | case llvm::Triple::loongarch32: |
316 | case llvm::Triple::loongarch64: { |
317 | StringRef ABIStr = Target.getABI(); |
318 | unsigned ABIFRLen = 0; |
319 | if (ABIStr.ends_with(Suffix: "f" )) |
320 | ABIFRLen = 32; |
321 | else if (ABIStr.ends_with(Suffix: "d" )) |
322 | ABIFRLen = 64; |
323 | return createLoongArchTargetCodeGenInfo( |
324 | CGM, GRLen: Target.getPointerWidth(AddrSpace: LangAS::Default), FLen: ABIFRLen); |
325 | } |
326 | } |
327 | } |
328 | |
329 | const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { |
330 | if (!TheTargetCodeGenInfo) |
331 | TheTargetCodeGenInfo = createTargetCodeGenInfo(CGM&: *this); |
332 | return *TheTargetCodeGenInfo; |
333 | } |
334 | |
335 | CodeGenModule::(ASTContext &C, |
336 | IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS, |
337 | const HeaderSearchOptions &HSO, |
338 | const PreprocessorOptions &PPO, |
339 | const CodeGenOptions &CGO, llvm::Module &M, |
340 | DiagnosticsEngine &diags, |
341 | CoverageSourceInfo *CoverageInfo) |
342 | : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO), |
343 | PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags), |
344 | Target(C.getTargetInfo()), ABI(createCXXABI(CGM&: *this)), |
345 | VMContext(M.getContext()), VTables(*this), StackHandler(diags), |
346 | SanitizerMD(new SanitizerMetadata(*this)), |
347 | AtomicOpts(Target.getAtomicOpts()) { |
348 | |
349 | // Initialize the type cache. |
350 | Types.reset(p: new CodeGenTypes(*this)); |
351 | llvm::LLVMContext &LLVMContext = M.getContext(); |
352 | VoidTy = llvm::Type::getVoidTy(C&: LLVMContext); |
353 | Int8Ty = llvm::Type::getInt8Ty(C&: LLVMContext); |
354 | Int16Ty = llvm::Type::getInt16Ty(C&: LLVMContext); |
355 | Int32Ty = llvm::Type::getInt32Ty(C&: LLVMContext); |
356 | Int64Ty = llvm::Type::getInt64Ty(C&: LLVMContext); |
357 | HalfTy = llvm::Type::getHalfTy(C&: LLVMContext); |
358 | BFloatTy = llvm::Type::getBFloatTy(C&: LLVMContext); |
359 | FloatTy = llvm::Type::getFloatTy(C&: LLVMContext); |
360 | DoubleTy = llvm::Type::getDoubleTy(C&: LLVMContext); |
361 | PointerWidthInBits = C.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default); |
362 | PointerAlignInBytes = |
363 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getPointerAlign(AddrSpace: LangAS::Default)) |
364 | .getQuantity(); |
365 | SizeSizeInBytes = |
366 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getMaxPointerWidth()).getQuantity(); |
367 | IntAlignInBytes = |
368 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getIntAlign()).getQuantity(); |
369 | CharTy = |
370 | llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getCharWidth()); |
371 | IntTy = llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getIntWidth()); |
372 | IntPtrTy = llvm::IntegerType::get(C&: LLVMContext, |
373 | NumBits: C.getTargetInfo().getMaxPointerWidth()); |
374 | Int8PtrTy = llvm::PointerType::get(C&: LLVMContext, |
375 | AddressSpace: C.getTargetAddressSpace(AS: LangAS::Default)); |
376 | const llvm::DataLayout &DL = M.getDataLayout(); |
377 | AllocaInt8PtrTy = |
378 | llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getAllocaAddrSpace()); |
379 | GlobalsInt8PtrTy = |
380 | llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getDefaultGlobalsAddressSpace()); |
381 | ConstGlobalsPtrTy = llvm::PointerType::get( |
382 | C&: LLVMContext, AddressSpace: C.getTargetAddressSpace(AS: GetGlobalConstantAddressSpace())); |
383 | ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace(); |
384 | |
385 | // Build C++20 Module initializers. |
386 | // TODO: Add Microsoft here once we know the mangling required for the |
387 | // initializers. |
388 | CXX20ModuleInits = |
389 | LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() == |
390 | ItaniumMangleContext::MK_Itanium; |
391 | |
392 | RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC(); |
393 | |
394 | if (LangOpts.ObjC) |
395 | createObjCRuntime(); |
396 | if (LangOpts.OpenCL) |
397 | createOpenCLRuntime(); |
398 | if (LangOpts.OpenMP) |
399 | createOpenMPRuntime(); |
400 | if (LangOpts.CUDA) |
401 | createCUDARuntime(); |
402 | if (LangOpts.HLSL) |
403 | createHLSLRuntime(); |
404 | |
405 | // Enable TBAA unless it's suppressed. TSan and TySan need TBAA even at O0. |
406 | if (LangOpts.Sanitize.hasOneOf(K: SanitizerKind::Thread | SanitizerKind::Type) || |
407 | (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)) |
408 | TBAA.reset(p: new CodeGenTBAA(Context, getTypes(), TheModule, CodeGenOpts, |
409 | getLangOpts())); |
410 | |
411 | // If debug info or coverage generation is enabled, create the CGDebugInfo |
412 | // object. |
413 | if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo || |
414 | CodeGenOpts.CoverageNotesFile.size() || |
415 | CodeGenOpts.CoverageDataFile.size()) |
416 | DebugInfo.reset(p: new CGDebugInfo(*this)); |
417 | |
418 | Block.GlobalUniqueCount = 0; |
419 | |
420 | if (C.getLangOpts().ObjC) |
421 | ObjCData.reset(p: new ObjCEntrypoints()); |
422 | |
423 | if (CodeGenOpts.hasProfileClangUse()) { |
424 | auto ReaderOrErr = llvm::IndexedInstrProfReader::create( |
425 | Path: CodeGenOpts.ProfileInstrumentUsePath, FS&: *FS, |
426 | RemappingPath: CodeGenOpts.ProfileRemappingFile); |
427 | // We're checking for profile read errors in CompilerInvocation, so if |
428 | // there was an error it should've already been caught. If it hasn't been |
429 | // somehow, trip an assertion. |
430 | assert(ReaderOrErr); |
431 | PGOReader = std::move(ReaderOrErr.get()); |
432 | } |
433 | |
434 | // If coverage mapping generation is enabled, create the |
435 | // CoverageMappingModuleGen object. |
436 | if (CodeGenOpts.CoverageMapping) |
437 | CoverageMapping.reset(p: new CoverageMappingModuleGen(*this, *CoverageInfo)); |
438 | |
439 | // Generate the module name hash here if needed. |
440 | if (CodeGenOpts.UniqueInternalLinkageNames && |
441 | !getModule().getSourceFileName().empty()) { |
442 | std::string Path = getModule().getSourceFileName(); |
443 | // Check if a path substitution is needed from the MacroPrefixMap. |
444 | for (const auto &Entry : LangOpts.MacroPrefixMap) |
445 | if (Path.rfind(str: Entry.first, pos: 0) != std::string::npos) { |
446 | Path = Entry.second + Path.substr(pos: Entry.first.size()); |
447 | break; |
448 | } |
449 | ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(FName: Path); |
450 | } |
451 | |
452 | // Record mregparm value now so it is visible through all of codegen. |
453 | if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86) |
454 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "NumRegisterParameters" , |
455 | Val: CodeGenOpts.NumRegisterParameters); |
456 | } |
457 | |
458 | CodeGenModule::~CodeGenModule() {} |
459 | |
460 | void CodeGenModule::createObjCRuntime() { |
461 | // This is just isGNUFamily(), but we want to force implementors of |
462 | // new ABIs to decide how best to do this. |
463 | switch (LangOpts.ObjCRuntime.getKind()) { |
464 | case ObjCRuntime::GNUstep: |
465 | case ObjCRuntime::GCC: |
466 | case ObjCRuntime::ObjFW: |
467 | ObjCRuntime.reset(p: CreateGNUObjCRuntime(CGM&: *this)); |
468 | return; |
469 | |
470 | case ObjCRuntime::FragileMacOSX: |
471 | case ObjCRuntime::MacOSX: |
472 | case ObjCRuntime::iOS: |
473 | case ObjCRuntime::WatchOS: |
474 | ObjCRuntime.reset(p: CreateMacObjCRuntime(CGM&: *this)); |
475 | return; |
476 | } |
477 | llvm_unreachable("bad runtime kind" ); |
478 | } |
479 | |
480 | void CodeGenModule::createOpenCLRuntime() { |
481 | OpenCLRuntime.reset(p: new CGOpenCLRuntime(*this)); |
482 | } |
483 | |
484 | void CodeGenModule::createOpenMPRuntime() { |
485 | // Select a specialized code generation class based on the target, if any. |
486 | // If it does not exist use the default implementation. |
487 | switch (getTriple().getArch()) { |
488 | case llvm::Triple::nvptx: |
489 | case llvm::Triple::nvptx64: |
490 | case llvm::Triple::amdgcn: |
491 | case llvm::Triple::spirv64: |
492 | assert( |
493 | getLangOpts().OpenMPIsTargetDevice && |
494 | "OpenMP AMDGPU/NVPTX/SPIRV is only prepared to deal with device code." ); |
495 | OpenMPRuntime.reset(new CGOpenMPRuntimeGPU(*this)); |
496 | break; |
497 | default: |
498 | if (LangOpts.OpenMPSimd) |
499 | OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this)); |
500 | else |
501 | OpenMPRuntime.reset(p: new CGOpenMPRuntime(*this)); |
502 | break; |
503 | } |
504 | } |
505 | |
506 | void CodeGenModule::createCUDARuntime() { |
507 | CUDARuntime.reset(p: CreateNVCUDARuntime(CGM&: *this)); |
508 | } |
509 | |
510 | void CodeGenModule::createHLSLRuntime() { |
511 | HLSLRuntime.reset(p: new CGHLSLRuntime(*this)); |
512 | } |
513 | |
514 | void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) { |
515 | Replacements[Name] = C; |
516 | } |
517 | |
518 | void CodeGenModule::applyReplacements() { |
519 | for (auto &I : Replacements) { |
520 | StringRef MangledName = I.first; |
521 | llvm::Constant *Replacement = I.second; |
522 | llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName); |
523 | if (!Entry) |
524 | continue; |
525 | auto *OldF = cast<llvm::Function>(Val: Entry); |
526 | auto *NewF = dyn_cast<llvm::Function>(Val: Replacement); |
527 | if (!NewF) { |
528 | if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Val: Replacement)) { |
529 | NewF = dyn_cast<llvm::Function>(Val: Alias->getAliasee()); |
530 | } else { |
531 | auto *CE = cast<llvm::ConstantExpr>(Val: Replacement); |
532 | assert(CE->getOpcode() == llvm::Instruction::BitCast || |
533 | CE->getOpcode() == llvm::Instruction::GetElementPtr); |
534 | NewF = dyn_cast<llvm::Function>(Val: CE->getOperand(i_nocapture: 0)); |
535 | } |
536 | } |
537 | |
538 | // Replace old with new, but keep the old order. |
539 | OldF->replaceAllUsesWith(V: Replacement); |
540 | if (NewF) { |
541 | NewF->removeFromParent(); |
542 | OldF->getParent()->getFunctionList().insertAfter(where: OldF->getIterator(), |
543 | New: NewF); |
544 | } |
545 | OldF->eraseFromParent(); |
546 | } |
547 | } |
548 | |
549 | void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) { |
550 | GlobalValReplacements.push_back(Elt: std::make_pair(x&: GV, y&: C)); |
551 | } |
552 | |
553 | void CodeGenModule::applyGlobalValReplacements() { |
554 | for (auto &I : GlobalValReplacements) { |
555 | llvm::GlobalValue *GV = I.first; |
556 | llvm::Constant *C = I.second; |
557 | |
558 | GV->replaceAllUsesWith(V: C); |
559 | GV->eraseFromParent(); |
560 | } |
561 | } |
562 | |
563 | // This is only used in aliases that we created and we know they have a |
564 | // linear structure. |
565 | static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) { |
566 | const llvm::Constant *C; |
567 | if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: GV)) |
568 | C = GA->getAliasee(); |
569 | else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(Val: GV)) |
570 | C = GI->getResolver(); |
571 | else |
572 | return GV; |
573 | |
574 | const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(Val: C->stripPointerCasts()); |
575 | if (!AliaseeGV) |
576 | return nullptr; |
577 | |
578 | const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject(); |
579 | if (FinalGV == GV) |
580 | return nullptr; |
581 | |
582 | return FinalGV; |
583 | } |
584 | |
585 | static bool checkAliasedGlobal( |
586 | const ASTContext &Context, DiagnosticsEngine &Diags, SourceLocation Location, |
587 | bool IsIFunc, const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV, |
588 | const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames, |
589 | SourceRange AliasRange) { |
590 | GV = getAliasedGlobal(GV: Alias); |
591 | if (!GV) { |
592 | Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc; |
593 | return false; |
594 | } |
595 | |
596 | if (GV->hasCommonLinkage()) { |
597 | const llvm::Triple &Triple = Context.getTargetInfo().getTriple(); |
598 | if (Triple.getObjectFormat() == llvm::Triple::XCOFF) { |
599 | Diags.Report(Location, diag::err_alias_to_common); |
600 | return false; |
601 | } |
602 | } |
603 | |
604 | if (GV->isDeclaration()) { |
605 | Diags.Report(Location, diag::err_alias_to_undefined) << IsIFunc << IsIFunc; |
606 | Diags.Report(Location, diag::note_alias_requires_mangled_name) |
607 | << IsIFunc << IsIFunc; |
608 | // Provide a note if the given function is not found and exists as a |
609 | // mangled name. |
610 | for (const auto &[Decl, Name] : MangledDeclNames) { |
611 | if (const auto *ND = dyn_cast<NamedDecl>(Val: Decl.getDecl())) { |
612 | IdentifierInfo *II = ND->getIdentifier(); |
613 | if (II && II->getName() == GV->getName()) { |
614 | Diags.Report(Location, diag::note_alias_mangled_name_alternative) |
615 | << Name |
616 | << FixItHint::CreateReplacement( |
617 | AliasRange, |
618 | (Twine(IsIFunc ? "ifunc" : "alias" ) + "(\"" + Name + "\")" ) |
619 | .str()); |
620 | } |
621 | } |
622 | } |
623 | return false; |
624 | } |
625 | |
626 | if (IsIFunc) { |
627 | // Check resolver function type. |
628 | const auto *F = dyn_cast<llvm::Function>(Val: GV); |
629 | if (!F) { |
630 | Diags.Report(Location, diag::err_alias_to_undefined) |
631 | << IsIFunc << IsIFunc; |
632 | return false; |
633 | } |
634 | |
635 | llvm::FunctionType *FTy = F->getFunctionType(); |
636 | if (!FTy->getReturnType()->isPointerTy()) { |
637 | Diags.Report(Location, diag::err_ifunc_resolver_return); |
638 | return false; |
639 | } |
640 | } |
641 | |
642 | return true; |
643 | } |
644 | |
645 | // Emit a warning if toc-data attribute is requested for global variables that |
646 | // have aliases and remove the toc-data attribute. |
647 | static void checkAliasForTocData(llvm::GlobalVariable *GVar, |
648 | const CodeGenOptions &CodeGenOpts, |
649 | DiagnosticsEngine &Diags, |
650 | SourceLocation Location) { |
651 | if (GVar->hasAttribute(Kind: "toc-data" )) { |
652 | auto GVId = GVar->getName(); |
653 | // Is this a global variable specified by the user as local? |
654 | if ((llvm::binary_search(Range: CodeGenOpts.TocDataVarsUserSpecified, Value&: GVId))) { |
655 | Diags.Report(Location, diag::warn_toc_unsupported_type) |
656 | << GVId << "the variable has an alias" ; |
657 | } |
658 | llvm::AttributeSet CurrAttributes = GVar->getAttributes(); |
659 | llvm::AttributeSet NewAttributes = |
660 | CurrAttributes.removeAttribute(C&: GVar->getContext(), Kind: "toc-data" ); |
661 | GVar->setAttributes(NewAttributes); |
662 | } |
663 | } |
664 | |
665 | void CodeGenModule::checkAliases() { |
666 | // Check if the constructed aliases are well formed. It is really unfortunate |
667 | // that we have to do this in CodeGen, but we only construct mangled names |
668 | // and aliases during codegen. |
669 | bool Error = false; |
670 | DiagnosticsEngine &Diags = getDiags(); |
671 | for (const GlobalDecl &GD : Aliases) { |
672 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
673 | SourceLocation Location; |
674 | SourceRange Range; |
675 | bool IsIFunc = D->hasAttr<IFuncAttr>(); |
676 | if (const Attr *A = D->getDefiningAttr()) { |
677 | Location = A->getLocation(); |
678 | Range = A->getRange(); |
679 | } else |
680 | llvm_unreachable("Not an alias or ifunc?" ); |
681 | |
682 | StringRef MangledName = getMangledName(GD); |
683 | llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName); |
684 | const llvm::GlobalValue *GV = nullptr; |
685 | if (!checkAliasedGlobal(Context: getContext(), Diags, Location, IsIFunc, Alias, GV, |
686 | MangledDeclNames, AliasRange: Range)) { |
687 | Error = true; |
688 | continue; |
689 | } |
690 | |
691 | if (getContext().getTargetInfo().getTriple().isOSAIX()) |
692 | if (const llvm::GlobalVariable *GVar = |
693 | dyn_cast<const llvm::GlobalVariable>(Val: GV)) |
694 | checkAliasForTocData(GVar: const_cast<llvm::GlobalVariable *>(GVar), |
695 | CodeGenOpts: getCodeGenOpts(), Diags, Location); |
696 | |
697 | llvm::Constant *Aliasee = |
698 | IsIFunc ? cast<llvm::GlobalIFunc>(Val: Alias)->getResolver() |
699 | : cast<llvm::GlobalAlias>(Val: Alias)->getAliasee(); |
700 | |
701 | llvm::GlobalValue *AliaseeGV; |
702 | if (auto CE = dyn_cast<llvm::ConstantExpr>(Val: Aliasee)) |
703 | AliaseeGV = cast<llvm::GlobalValue>(Val: CE->getOperand(i_nocapture: 0)); |
704 | else |
705 | AliaseeGV = cast<llvm::GlobalValue>(Val: Aliasee); |
706 | |
707 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) { |
708 | StringRef AliasSection = SA->getName(); |
709 | if (AliasSection != AliaseeGV->getSection()) |
710 | Diags.Report(SA->getLocation(), diag::warn_alias_with_section) |
711 | << AliasSection << IsIFunc << IsIFunc; |
712 | } |
713 | |
714 | // We have to handle alias to weak aliases in here. LLVM itself disallows |
715 | // this since the object semantics would not match the IL one. For |
716 | // compatibility with gcc we implement it by just pointing the alias |
717 | // to its aliasee's aliasee. We also warn, since the user is probably |
718 | // expecting the link to be weak. |
719 | if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: AliaseeGV)) { |
720 | if (GA->isInterposable()) { |
721 | Diags.Report(Location, diag::warn_alias_to_weak_alias) |
722 | << GV->getName() << GA->getName() << IsIFunc; |
723 | Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
724 | C: GA->getAliasee(), Ty: Alias->getType()); |
725 | |
726 | if (IsIFunc) |
727 | cast<llvm::GlobalIFunc>(Val: Alias)->setResolver(Aliasee); |
728 | else |
729 | cast<llvm::GlobalAlias>(Val: Alias)->setAliasee(Aliasee); |
730 | } |
731 | } |
732 | // ifunc resolvers are usually implemented to run before sanitizer |
733 | // initialization. Disable instrumentation to prevent the ordering issue. |
734 | if (IsIFunc) |
735 | cast<llvm::Function>(Aliasee)->addFnAttr( |
736 | llvm::Attribute::DisableSanitizerInstrumentation); |
737 | } |
738 | if (!Error) |
739 | return; |
740 | |
741 | for (const GlobalDecl &GD : Aliases) { |
742 | StringRef MangledName = getMangledName(GD); |
743 | llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName); |
744 | Alias->replaceAllUsesWith(V: llvm::PoisonValue::get(T: Alias->getType())); |
745 | Alias->eraseFromParent(); |
746 | } |
747 | } |
748 | |
749 | void CodeGenModule::clear() { |
750 | DeferredDeclsToEmit.clear(); |
751 | EmittedDeferredDecls.clear(); |
752 | DeferredAnnotations.clear(); |
753 | if (OpenMPRuntime) |
754 | OpenMPRuntime->clear(); |
755 | } |
756 | |
757 | void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags, |
758 | StringRef MainFile) { |
759 | if (!hasDiagnostics()) |
760 | return; |
761 | if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) { |
762 | if (MainFile.empty()) |
763 | MainFile = "<stdin>" ; |
764 | Diags.Report(diag::warn_profile_data_unprofiled) << MainFile; |
765 | } else { |
766 | if (Mismatched > 0) |
767 | Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched; |
768 | |
769 | if (Missing > 0) |
770 | Diags.Report(diag::warn_profile_data_missing) << Visited << Missing; |
771 | } |
772 | } |
773 | |
774 | static std::optional<llvm::GlobalValue::VisibilityTypes> |
775 | getLLVMVisibility(clang::LangOptions::VisibilityFromDLLStorageClassKinds K) { |
776 | // Map to LLVM visibility. |
777 | switch (K) { |
778 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Keep: |
779 | return std::nullopt; |
780 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Default: |
781 | return llvm::GlobalValue::DefaultVisibility; |
782 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Hidden: |
783 | return llvm::GlobalValue::HiddenVisibility; |
784 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Protected: |
785 | return llvm::GlobalValue::ProtectedVisibility; |
786 | } |
787 | llvm_unreachable("unknown option value!" ); |
788 | } |
789 | |
790 | static void |
791 | setLLVMVisibility(llvm::GlobalValue &GV, |
792 | std::optional<llvm::GlobalValue::VisibilityTypes> V) { |
793 | if (!V) |
794 | return; |
795 | |
796 | // Reset DSO locality before setting the visibility. This removes |
797 | // any effects that visibility options and annotations may have |
798 | // had on the DSO locality. Setting the visibility will implicitly set |
799 | // appropriate globals to DSO Local; however, this will be pessimistic |
800 | // w.r.t. to the normal compiler IRGen. |
801 | GV.setDSOLocal(false); |
802 | GV.setVisibility(*V); |
803 | } |
804 | |
805 | static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO, |
806 | llvm::Module &M) { |
807 | if (!LO.VisibilityFromDLLStorageClass) |
808 | return; |
809 | |
810 | std::optional<llvm::GlobalValue::VisibilityTypes> DLLExportVisibility = |
811 | getLLVMVisibility(K: LO.getDLLExportVisibility()); |
812 | |
813 | std::optional<llvm::GlobalValue::VisibilityTypes> |
814 | NoDLLStorageClassVisibility = |
815 | getLLVMVisibility(K: LO.getNoDLLStorageClassVisibility()); |
816 | |
817 | std::optional<llvm::GlobalValue::VisibilityTypes> |
818 | ExternDeclDLLImportVisibility = |
819 | getLLVMVisibility(K: LO.getExternDeclDLLImportVisibility()); |
820 | |
821 | std::optional<llvm::GlobalValue::VisibilityTypes> |
822 | ExternDeclNoDLLStorageClassVisibility = |
823 | getLLVMVisibility(K: LO.getExternDeclNoDLLStorageClassVisibility()); |
824 | |
825 | for (llvm::GlobalValue &GV : M.global_values()) { |
826 | if (GV.hasAppendingLinkage() || GV.hasLocalLinkage()) |
827 | continue; |
828 | |
829 | if (GV.isDeclarationForLinker()) |
830 | setLLVMVisibility(GV, V: GV.getDLLStorageClass() == |
831 | llvm::GlobalValue::DLLImportStorageClass |
832 | ? ExternDeclDLLImportVisibility |
833 | : ExternDeclNoDLLStorageClassVisibility); |
834 | else |
835 | setLLVMVisibility(GV, V: GV.getDLLStorageClass() == |
836 | llvm::GlobalValue::DLLExportStorageClass |
837 | ? DLLExportVisibility |
838 | : NoDLLStorageClassVisibility); |
839 | |
840 | GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
841 | } |
842 | } |
843 | |
844 | static bool isStackProtectorOn(const LangOptions &LangOpts, |
845 | const llvm::Triple &Triple, |
846 | clang::LangOptions::StackProtectorMode Mode) { |
847 | if (Triple.isGPU()) |
848 | return false; |
849 | return LangOpts.getStackProtector() == Mode; |
850 | } |
851 | |
852 | void CodeGenModule::Release() { |
853 | Module *Primary = getContext().getCurrentNamedModule(); |
854 | if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule()) |
855 | EmitModuleInitializers(Primary); |
856 | EmitDeferred(); |
857 | DeferredDecls.insert_range(R&: EmittedDeferredDecls); |
858 | EmittedDeferredDecls.clear(); |
859 | EmitVTablesOpportunistically(); |
860 | applyGlobalValReplacements(); |
861 | applyReplacements(); |
862 | emitMultiVersionFunctions(); |
863 | |
864 | if (Context.getLangOpts().IncrementalExtensions && |
865 | GlobalTopLevelStmtBlockInFlight.first) { |
866 | const TopLevelStmtDecl *TLSD = GlobalTopLevelStmtBlockInFlight.second; |
867 | GlobalTopLevelStmtBlockInFlight.first->FinishFunction(EndLoc: TLSD->getEndLoc()); |
868 | GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr}; |
869 | } |
870 | |
871 | // Module implementations are initialized the same way as a regular TU that |
872 | // imports one or more modules. |
873 | if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition()) |
874 | EmitCXXModuleInitFunc(Primary); |
875 | else |
876 | EmitCXXGlobalInitFunc(); |
877 | EmitCXXGlobalCleanUpFunc(); |
878 | registerGlobalDtorsWithAtExit(); |
879 | EmitCXXThreadLocalInitFunc(); |
880 | if (ObjCRuntime) |
881 | if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction()) |
882 | AddGlobalCtor(Ctor: ObjCInitFunction); |
883 | if (Context.getLangOpts().CUDA && CUDARuntime) { |
884 | if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule()) |
885 | AddGlobalCtor(Ctor: CudaCtorFunction); |
886 | } |
887 | if (OpenMPRuntime) { |
888 | OpenMPRuntime->createOffloadEntriesAndInfoMetadata(); |
889 | OpenMPRuntime->clear(); |
890 | } |
891 | if (PGOReader) { |
892 | getModule().setProfileSummary( |
893 | M: PGOReader->getSummary(/* UseCS */ false).getMD(Context&: VMContext), |
894 | Kind: llvm::ProfileSummary::PSK_Instr); |
895 | if (PGOStats.hasDiagnostics()) |
896 | PGOStats.reportDiagnostics(Diags&: getDiags(), MainFile: getCodeGenOpts().MainFileName); |
897 | } |
898 | llvm::stable_sort(Range&: GlobalCtors, C: [](const Structor &L, const Structor &R) { |
899 | return L.LexOrder < R.LexOrder; |
900 | }); |
901 | EmitCtorList(Fns&: GlobalCtors, GlobalName: "llvm.global_ctors" ); |
902 | EmitCtorList(Fns&: GlobalDtors, GlobalName: "llvm.global_dtors" ); |
903 | EmitGlobalAnnotations(); |
904 | EmitStaticExternCAliases(); |
905 | checkAliases(); |
906 | EmitDeferredUnusedCoverageMappings(); |
907 | CodeGenPGO(*this).setValueProfilingFlag(getModule()); |
908 | CodeGenPGO(*this).setProfileVersion(getModule()); |
909 | if (CoverageMapping) |
910 | CoverageMapping->emit(); |
911 | if (CodeGenOpts.SanitizeCfiCrossDso) { |
912 | CodeGenFunction(*this).EmitCfiCheckFail(); |
913 | CodeGenFunction(*this).EmitCfiCheckStub(); |
914 | } |
915 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) |
916 | finalizeKCFITypes(); |
917 | emitAtAvailableLinkGuard(); |
918 | if (Context.getTargetInfo().getTriple().isWasm()) |
919 | EmitMainVoidAlias(); |
920 | |
921 | if (getTriple().isAMDGPU() || |
922 | (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) { |
923 | // Emit amdhsa_code_object_version module flag, which is code object version |
924 | // times 100. |
925 | if (getTarget().getTargetOpts().CodeObjectVersion != |
926 | llvm::CodeObjectVersionKind::COV_None) { |
927 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
928 | Key: "amdhsa_code_object_version" , |
929 | Val: getTarget().getTargetOpts().CodeObjectVersion); |
930 | } |
931 | |
932 | // Currently, "-mprintf-kind" option is only supported for HIP |
933 | if (LangOpts.HIP) { |
934 | auto *MDStr = llvm::MDString::get( |
935 | Context&: getLLVMContext(), Str: (getTarget().getTargetOpts().AMDGPUPrintfKindVal == |
936 | TargetOptions::AMDGPUPrintfKind::Hostcall) |
937 | ? "hostcall" |
938 | : "buffered" ); |
939 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "amdgpu_printf_kind" , |
940 | Val: MDStr); |
941 | } |
942 | } |
943 | |
944 | // Emit a global array containing all external kernels or device variables |
945 | // used by host functions and mark it as used for CUDA/HIP. This is necessary |
946 | // to get kernels or device variables in archives linked in even if these |
947 | // kernels or device variables are only used in host functions. |
948 | if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) { |
949 | SmallVector<llvm::Constant *, 8> UsedArray; |
950 | for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) { |
951 | GlobalDecl GD; |
952 | if (auto *FD = dyn_cast<FunctionDecl>(D)) |
953 | GD = GlobalDecl(FD, KernelReferenceKind::Kernel); |
954 | else |
955 | GD = GlobalDecl(D); |
956 | UsedArray.push_back(llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
957 | GetAddrOfGlobal(GD), Int8PtrTy)); |
958 | } |
959 | |
960 | llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: Int8PtrTy, NumElements: UsedArray.size()); |
961 | |
962 | auto *GV = new llvm::GlobalVariable( |
963 | getModule(), ATy, false, llvm::GlobalValue::InternalLinkage, |
964 | llvm::ConstantArray::get(T: ATy, V: UsedArray), "__clang_gpu_used_external" ); |
965 | addCompilerUsedGlobal(GV); |
966 | } |
967 | if (LangOpts.HIP && !getLangOpts().OffloadingNewDriver) { |
968 | // Emit a unique ID so that host and device binaries from the same |
969 | // compilation unit can be associated. |
970 | auto *GV = new llvm::GlobalVariable( |
971 | getModule(), Int8Ty, false, llvm::GlobalValue::ExternalLinkage, |
972 | llvm::Constant::getNullValue(Ty: Int8Ty), |
973 | "__hip_cuid_" + getContext().getCUIDHash()); |
974 | getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
975 | addCompilerUsedGlobal(GV); |
976 | } |
977 | emitLLVMUsed(); |
978 | if (SanStats) |
979 | SanStats->finish(); |
980 | |
981 | if (CodeGenOpts.Autolink && |
982 | (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) { |
983 | EmitModuleLinkOptions(); |
984 | } |
985 | |
986 | // On ELF we pass the dependent library specifiers directly to the linker |
987 | // without manipulating them. This is in contrast to other platforms where |
988 | // they are mapped to a specific linker option by the compiler. This |
989 | // difference is a result of the greater variety of ELF linkers and the fact |
990 | // that ELF linkers tend to handle libraries in a more complicated fashion |
991 | // than on other platforms. This forces us to defer handling the dependent |
992 | // libs to the linker. |
993 | // |
994 | // CUDA/HIP device and host libraries are different. Currently there is no |
995 | // way to differentiate dependent libraries for host or device. Existing |
996 | // usage of #pragma comment(lib, *) is intended for host libraries on |
997 | // Windows. Therefore emit llvm.dependent-libraries only for host. |
998 | if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) { |
999 | auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.dependent-libraries" ); |
1000 | for (auto *MD : ELFDependentLibraries) |
1001 | NMD->addOperand(M: MD); |
1002 | } |
1003 | |
1004 | if (CodeGenOpts.DwarfVersion) { |
1005 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "Dwarf Version" , |
1006 | Val: CodeGenOpts.DwarfVersion); |
1007 | } |
1008 | |
1009 | if (CodeGenOpts.Dwarf64) |
1010 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "DWARF64" , Val: 1); |
1011 | |
1012 | if (Context.getLangOpts().SemanticInterposition) |
1013 | // Require various optimization to respect semantic interposition. |
1014 | getModule().setSemanticInterposition(true); |
1015 | |
1016 | if (CodeGenOpts.EmitCodeView) { |
1017 | // Indicate that we want CodeView in the metadata. |
1018 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeView" , Val: 1); |
1019 | } |
1020 | if (CodeGenOpts.CodeViewGHash) { |
1021 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeViewGHash" , Val: 1); |
1022 | } |
1023 | if (CodeGenOpts.ControlFlowGuard) { |
1024 | // Function ID tables and checks for Control Flow Guard (cfguard=2). |
1025 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "cfguard" , Val: 2); |
1026 | } else if (CodeGenOpts.ControlFlowGuardNoChecks) { |
1027 | // Function ID tables for Control Flow Guard (cfguard=1). |
1028 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "cfguard" , Val: 1); |
1029 | } |
1030 | if (CodeGenOpts.EHContGuard) { |
1031 | // Function ID tables for EH Continuation Guard. |
1032 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ehcontguard" , Val: 1); |
1033 | } |
1034 | if (Context.getLangOpts().Kernel) { |
1035 | // Note if we are compiling with /kernel. |
1036 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ms-kernel" , Val: 1); |
1037 | } |
1038 | if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) { |
1039 | // We don't support LTO with 2 with different StrictVTablePointers |
1040 | // FIXME: we could support it by stripping all the information introduced |
1041 | // by StrictVTablePointers. |
1042 | |
1043 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "StrictVTablePointers" ,Val: 1); |
1044 | |
1045 | llvm::Metadata *Ops[2] = { |
1046 | llvm::MDString::get(Context&: VMContext, Str: "StrictVTablePointers" ), |
1047 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1048 | Ty: llvm::Type::getInt32Ty(C&: VMContext), V: 1))}; |
1049 | |
1050 | getModule().addModuleFlag(Behavior: llvm::Module::Require, |
1051 | Key: "StrictVTablePointersRequirement" , |
1052 | Val: llvm::MDNode::get(Context&: VMContext, MDs: Ops)); |
1053 | } |
1054 | if (getModuleDebugInfo()) |
1055 | // We support a single version in the linked module. The LLVM |
1056 | // parser will drop debug info with a different version number |
1057 | // (and warn about it, too). |
1058 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "Debug Info Version" , |
1059 | Val: llvm::DEBUG_METADATA_VERSION); |
1060 | |
1061 | // We need to record the widths of enums and wchar_t, so that we can generate |
1062 | // the correct build attributes in the ARM backend. wchar_size is also used by |
1063 | // TargetLibraryInfo. |
1064 | uint64_t WCharWidth = |
1065 | Context.getTypeSizeInChars(T: Context.getWideCharType()).getQuantity(); |
1066 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "wchar_size" , Val: WCharWidth); |
1067 | |
1068 | if (getTriple().isOSzOS()) { |
1069 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, |
1070 | Key: "zos_product_major_version" , |
1071 | Val: uint32_t(CLANG_VERSION_MAJOR)); |
1072 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, |
1073 | Key: "zos_product_minor_version" , |
1074 | Val: uint32_t(CLANG_VERSION_MINOR)); |
1075 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "zos_product_patchlevel" , |
1076 | Val: uint32_t(CLANG_VERSION_PATCHLEVEL)); |
1077 | std::string ProductId = getClangVendor() + "clang" ; |
1078 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_product_id" , |
1079 | Val: llvm::MDString::get(Context&: VMContext, Str: ProductId)); |
1080 | |
1081 | // Record the language because we need it for the PPA2. |
1082 | StringRef lang_str = languageToString( |
1083 | L: LangStandard::getLangStandardForKind(K: LangOpts.LangStd).Language); |
1084 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_cu_language" , |
1085 | Val: llvm::MDString::get(Context&: VMContext, Str: lang_str)); |
1086 | |
1087 | time_t TT = PreprocessorOpts.SourceDateEpoch |
1088 | ? *PreprocessorOpts.SourceDateEpoch |
1089 | : std::time(timer: nullptr); |
1090 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "zos_translation_time" , |
1091 | Val: static_cast<uint64_t>(TT)); |
1092 | |
1093 | // Multiple modes will be supported here. |
1094 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_le_char_mode" , |
1095 | Val: llvm::MDString::get(Context&: VMContext, Str: "ascii" )); |
1096 | } |
1097 | |
1098 | llvm::Triple T = Context.getTargetInfo().getTriple(); |
1099 | if (T.isARM() || T.isThumb()) { |
1100 | // The minimum width of an enum in bytes |
1101 | uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4; |
1102 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "min_enum_size" , Val: EnumWidth); |
1103 | } |
1104 | |
1105 | if (T.isRISCV()) { |
1106 | StringRef ABIStr = Target.getABI(); |
1107 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1108 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "target-abi" , |
1109 | Val: llvm::MDString::get(Context&: Ctx, Str: ABIStr)); |
1110 | |
1111 | // Add the canonical ISA string as metadata so the backend can set the ELF |
1112 | // attributes correctly. We use AppendUnique so LTO will keep all of the |
1113 | // unique ISA strings that were linked together. |
1114 | const std::vector<std::string> &Features = |
1115 | getTarget().getTargetOpts().Features; |
1116 | auto ParseResult = |
1117 | llvm::RISCVISAInfo::parseFeatures(XLen: T.isRISCV64() ? 64 : 32, Features); |
1118 | if (!errorToBool(Err: ParseResult.takeError())) |
1119 | getModule().addModuleFlag( |
1120 | Behavior: llvm::Module::AppendUnique, Key: "riscv-isa" , |
1121 | Val: llvm::MDNode::get( |
1122 | Context&: Ctx, MDs: llvm::MDString::get(Context&: Ctx, Str: (*ParseResult)->toString()))); |
1123 | } |
1124 | |
1125 | if (CodeGenOpts.SanitizeCfiCrossDso) { |
1126 | // Indicate that we want cross-DSO control flow integrity checks. |
1127 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "Cross-DSO CFI" , Val: 1); |
1128 | } |
1129 | |
1130 | if (CodeGenOpts.WholeProgramVTables) { |
1131 | // Indicate whether VFE was enabled for this module, so that the |
1132 | // vcall_visibility metadata added under whole program vtables is handled |
1133 | // appropriately in the optimizer. |
1134 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "Virtual Function Elim" , |
1135 | Val: CodeGenOpts.VirtualFunctionElimination); |
1136 | } |
1137 | |
1138 | if (LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) { |
1139 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1140 | Key: "CFI Canonical Jump Tables" , |
1141 | Val: CodeGenOpts.SanitizeCfiCanonicalJumpTables); |
1142 | } |
1143 | |
1144 | if (CodeGenOpts.SanitizeCfiICallNormalizeIntegers) { |
1145 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "cfi-normalize-integers" , |
1146 | Val: 1); |
1147 | } |
1148 | |
1149 | if (!CodeGenOpts.UniqueSourceFileIdentifier.empty()) { |
1150 | getModule().addModuleFlag( |
1151 | Behavior: llvm::Module::Append, Key: "Unique Source File Identifier" , |
1152 | Val: llvm::MDTuple::get( |
1153 | Context&: TheModule.getContext(), |
1154 | MDs: llvm::MDString::get(Context&: TheModule.getContext(), |
1155 | Str: CodeGenOpts.UniqueSourceFileIdentifier))); |
1156 | } |
1157 | |
1158 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) { |
1159 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi" , Val: 1); |
1160 | // KCFI assumes patchable-function-prefix is the same for all indirectly |
1161 | // called functions. Store the expected offset for code generation. |
1162 | if (CodeGenOpts.PatchableFunctionEntryOffset) |
1163 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-offset" , |
1164 | Val: CodeGenOpts.PatchableFunctionEntryOffset); |
1165 | if (CodeGenOpts.SanitizeKcfiArity) |
1166 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-arity" , Val: 1); |
1167 | } |
1168 | |
1169 | if (CodeGenOpts.CFProtectionReturn && |
1170 | Target.checkCFProtectionReturnSupported(Diags&: getDiags())) { |
1171 | // Indicate that we want to instrument return control flow protection. |
1172 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-return" , |
1173 | Val: 1); |
1174 | } |
1175 | |
1176 | if (CodeGenOpts.CFProtectionBranch && |
1177 | Target.checkCFProtectionBranchSupported(Diags&: getDiags())) { |
1178 | // Indicate that we want to instrument branch control flow protection. |
1179 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-branch" , |
1180 | Val: 1); |
1181 | |
1182 | auto Scheme = CodeGenOpts.getCFBranchLabelScheme(); |
1183 | if (Target.checkCFBranchLabelSchemeSupported(Scheme, Diags&: getDiags())) { |
1184 | if (Scheme == CFBranchLabelSchemeKind::Default) |
1185 | Scheme = Target.getDefaultCFBranchLabelScheme(); |
1186 | getModule().addModuleFlag( |
1187 | Behavior: llvm::Module::Error, Key: "cf-branch-label-scheme" , |
1188 | Val: llvm::MDString::get(Context&: getLLVMContext(), |
1189 | Str: getCFBranchLabelSchemeFlagVal(Scheme))); |
1190 | } |
1191 | } |
1192 | |
1193 | if (CodeGenOpts.FunctionReturnThunks) |
1194 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "function_return_thunk_extern" , Val: 1); |
1195 | |
1196 | if (CodeGenOpts.IndirectBranchCSPrefix) |
1197 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "indirect_branch_cs_prefix" , Val: 1); |
1198 | |
1199 | // Add module metadata for return address signing (ignoring |
1200 | // non-leaf/all) and stack tagging. These are actually turned on by function |
1201 | // attributes, but we use module metadata to emit build attributes. This is |
1202 | // needed for LTO, where the function attributes are inside bitcode |
1203 | // serialised into a global variable by the time build attributes are |
1204 | // emitted, so we can't access them. LTO objects could be compiled with |
1205 | // different flags therefore module flags are set to "Min" behavior to achieve |
1206 | // the same end result of the normal build where e.g BTI is off if any object |
1207 | // doesn't support it. |
1208 | if (Context.getTargetInfo().hasFeature(Feature: "ptrauth" ) && |
1209 | LangOpts.getSignReturnAddressScope() != |
1210 | LangOptions::SignReturnAddressScopeKind::None) |
1211 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1212 | Key: "sign-return-address-buildattr" , Val: 1); |
1213 | if (LangOpts.Sanitize.has(K: SanitizerKind::MemtagStack)) |
1214 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1215 | Key: "tag-stack-memory-buildattr" , Val: 1); |
1216 | |
1217 | if (T.isARM() || T.isThumb() || T.isAArch64()) { |
1218 | if (LangOpts.BranchTargetEnforcement) |
1219 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-target-enforcement" , |
1220 | Val: 1); |
1221 | if (LangOpts.BranchProtectionPAuthLR) |
1222 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-protection-pauth-lr" , |
1223 | Val: 1); |
1224 | if (LangOpts.GuardedControlStack) |
1225 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "guarded-control-stack" , Val: 1); |
1226 | if (LangOpts.hasSignReturnAddress()) |
1227 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address" , Val: 1); |
1228 | if (LangOpts.isSignReturnAddressScopeAll()) |
1229 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address-all" , |
1230 | Val: 1); |
1231 | if (!LangOpts.isSignReturnAddressWithAKey()) |
1232 | getModule().addModuleFlag(Behavior: llvm::Module::Min, |
1233 | Key: "sign-return-address-with-bkey" , Val: 1); |
1234 | |
1235 | if (LangOpts.PointerAuthELFGOT) |
1236 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-elf-got" , Val: 1); |
1237 | |
1238 | if (getTriple().isOSLinux()) { |
1239 | if (LangOpts.PointerAuthCalls) |
1240 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-sign-personality" , |
1241 | Val: 1); |
1242 | assert(getTriple().isOSBinFormatELF()); |
1243 | using namespace llvm::ELF; |
1244 | uint64_t PAuthABIVersion = |
1245 | (LangOpts.PointerAuthIntrinsics |
1246 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INTRINSICS) | |
1247 | (LangOpts.PointerAuthCalls |
1248 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_CALLS) | |
1249 | (LangOpts.PointerAuthReturns |
1250 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_RETURNS) | |
1251 | (LangOpts.PointerAuthAuthTraps |
1252 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_AUTHTRAPS) | |
1253 | (LangOpts.PointerAuthVTPtrAddressDiscrimination |
1254 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRADDRDISCR) | |
1255 | (LangOpts.PointerAuthVTPtrTypeDiscrimination |
1256 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR) | |
1257 | (LangOpts.PointerAuthInitFini |
1258 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI) | |
1259 | (LangOpts.PointerAuthInitFiniAddressDiscrimination |
1260 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINIADDRDISC) | |
1261 | (LangOpts.PointerAuthELFGOT |
1262 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT) | |
1263 | (LangOpts.PointerAuthIndirectGotos |
1264 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOTOS) | |
1265 | (LangOpts.PointerAuthTypeInfoVTPtrDiscrimination |
1266 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_TYPEINFOVPTRDISCR) | |
1267 | (LangOpts.PointerAuthFunctionTypeDiscrimination |
1268 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR); |
1269 | static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR == |
1270 | AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST, |
1271 | "Update when new enum items are defined" ); |
1272 | if (PAuthABIVersion != 0) { |
1273 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
1274 | Key: "aarch64-elf-pauthabi-platform" , |
1275 | Val: AARCH64_PAUTH_PLATFORM_LLVM_LINUX); |
1276 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
1277 | Key: "aarch64-elf-pauthabi-version" , |
1278 | Val: PAuthABIVersion); |
1279 | } |
1280 | } |
1281 | } |
1282 | |
1283 | if (CodeGenOpts.StackClashProtector) |
1284 | getModule().addModuleFlag( |
1285 | Behavior: llvm::Module::Override, Key: "probe-stack" , |
1286 | Val: llvm::MDString::get(Context&: TheModule.getContext(), Str: "inline-asm" )); |
1287 | |
1288 | if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096) |
1289 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "stack-probe-size" , |
1290 | Val: CodeGenOpts.StackProbeSize); |
1291 | |
1292 | if (!CodeGenOpts.MemoryProfileOutput.empty()) { |
1293 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1294 | getModule().addModuleFlag( |
1295 | Behavior: llvm::Module::Error, Key: "MemProfProfileFilename" , |
1296 | Val: llvm::MDString::get(Context&: Ctx, Str: CodeGenOpts.MemoryProfileOutput)); |
1297 | } |
1298 | |
1299 | if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) { |
1300 | // Indicate whether __nvvm_reflect should be configured to flush denormal |
1301 | // floating point values to 0. (This corresponds to its "__CUDA_FTZ" |
1302 | // property.) |
1303 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "nvvm-reflect-ftz" , |
1304 | Val: CodeGenOpts.FP32DenormalMode.Output != |
1305 | llvm::DenormalMode::IEEE); |
1306 | } |
1307 | |
1308 | if (LangOpts.EHAsynch) |
1309 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "eh-asynch" , Val: 1); |
1310 | |
1311 | // Emit Import Call section. |
1312 | if (CodeGenOpts.ImportCallOptimization) |
1313 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "import-call-optimization" , |
1314 | Val: 1); |
1315 | |
1316 | // Enable unwind v2 (epilog). |
1317 | if (CodeGenOpts.WinX64EHUnwindV2) |
1318 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "winx64-eh-unwindv2" , Val: 1); |
1319 | |
1320 | // Indicate whether this Module was compiled with -fopenmp |
1321 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) |
1322 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp" , Val: LangOpts.OpenMP); |
1323 | if (getLangOpts().OpenMPIsTargetDevice) |
1324 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp-device" , |
1325 | Val: LangOpts.OpenMP); |
1326 | |
1327 | // Emit OpenCL specific module metadata: OpenCL/SPIR version. |
1328 | if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) { |
1329 | EmitOpenCLMetadata(); |
1330 | // Emit SPIR version. |
1331 | if (getTriple().isSPIR()) { |
1332 | // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the |
1333 | // opencl.spir.version named metadata. |
1334 | // C++ for OpenCL has a distinct mapping for version compatibility with |
1335 | // OpenCL. |
1336 | auto Version = LangOpts.getOpenCLCompatibleVersion(); |
1337 | llvm::Metadata *SPIRVerElts[] = { |
1338 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1339 | Ty: Int32Ty, V: Version / 100)), |
1340 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1341 | Ty: Int32Ty, V: (Version / 100 > 1) ? 0 : 2))}; |
1342 | llvm::NamedMDNode *SPIRVerMD = |
1343 | TheModule.getOrInsertNamedMetadata(Name: "opencl.spir.version" ); |
1344 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1345 | SPIRVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: SPIRVerElts)); |
1346 | } |
1347 | } |
1348 | |
1349 | // HLSL related end of code gen work items. |
1350 | if (LangOpts.HLSL) |
1351 | getHLSLRuntime().finishCodeGen(); |
1352 | |
1353 | if (uint32_t PLevel = Context.getLangOpts().PICLevel) { |
1354 | assert(PLevel < 3 && "Invalid PIC Level" ); |
1355 | getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel)); |
1356 | if (Context.getLangOpts().PIE) |
1357 | getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel)); |
1358 | } |
1359 | |
1360 | if (getCodeGenOpts().CodeModel.size() > 0) { |
1361 | unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel) |
1362 | .Case(S: "tiny" , Value: llvm::CodeModel::Tiny) |
1363 | .Case(S: "small" , Value: llvm::CodeModel::Small) |
1364 | .Case(S: "kernel" , Value: llvm::CodeModel::Kernel) |
1365 | .Case(S: "medium" , Value: llvm::CodeModel::Medium) |
1366 | .Case(S: "large" , Value: llvm::CodeModel::Large) |
1367 | .Default(Value: ~0u); |
1368 | if (CM != ~0u) { |
1369 | llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM); |
1370 | getModule().setCodeModel(codeModel); |
1371 | |
1372 | if ((CM == llvm::CodeModel::Medium || CM == llvm::CodeModel::Large) && |
1373 | Context.getTargetInfo().getTriple().getArch() == |
1374 | llvm::Triple::x86_64) { |
1375 | getModule().setLargeDataThreshold(getCodeGenOpts().LargeDataThreshold); |
1376 | } |
1377 | } |
1378 | } |
1379 | |
1380 | if (CodeGenOpts.NoPLT) |
1381 | getModule().setRtLibUseGOT(); |
1382 | if (getTriple().isOSBinFormatELF() && |
1383 | CodeGenOpts.DirectAccessExternalData != |
1384 | getModule().getDirectAccessExternalData()) { |
1385 | getModule().setDirectAccessExternalData( |
1386 | CodeGenOpts.DirectAccessExternalData); |
1387 | } |
1388 | if (CodeGenOpts.UnwindTables) |
1389 | getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables)); |
1390 | |
1391 | switch (CodeGenOpts.getFramePointer()) { |
1392 | case CodeGenOptions::FramePointerKind::None: |
1393 | // 0 ("none") is the default. |
1394 | break; |
1395 | case CodeGenOptions::FramePointerKind::Reserved: |
1396 | getModule().setFramePointer(llvm::FramePointerKind::Reserved); |
1397 | break; |
1398 | case CodeGenOptions::FramePointerKind::NonLeaf: |
1399 | getModule().setFramePointer(llvm::FramePointerKind::NonLeaf); |
1400 | break; |
1401 | case CodeGenOptions::FramePointerKind::All: |
1402 | getModule().setFramePointer(llvm::FramePointerKind::All); |
1403 | break; |
1404 | } |
1405 | |
1406 | SimplifyPersonality(); |
1407 | |
1408 | if (getCodeGenOpts().EmitDeclMetadata) |
1409 | EmitDeclMetadata(); |
1410 | |
1411 | if (getCodeGenOpts().CoverageNotesFile.size() || |
1412 | getCodeGenOpts().CoverageDataFile.size()) |
1413 | EmitCoverageFile(); |
1414 | |
1415 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1416 | DI->finalize(); |
1417 | |
1418 | if (getCodeGenOpts().EmitVersionIdentMetadata) |
1419 | EmitVersionIdentMetadata(); |
1420 | |
1421 | if (!getCodeGenOpts().RecordCommandLine.empty()) |
1422 | EmitCommandLineMetadata(); |
1423 | |
1424 | if (!getCodeGenOpts().StackProtectorGuard.empty()) |
1425 | getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard); |
1426 | if (!getCodeGenOpts().StackProtectorGuardReg.empty()) |
1427 | getModule().setStackProtectorGuardReg( |
1428 | getCodeGenOpts().StackProtectorGuardReg); |
1429 | if (!getCodeGenOpts().StackProtectorGuardSymbol.empty()) |
1430 | getModule().setStackProtectorGuardSymbol( |
1431 | getCodeGenOpts().StackProtectorGuardSymbol); |
1432 | if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX) |
1433 | getModule().setStackProtectorGuardOffset( |
1434 | getCodeGenOpts().StackProtectorGuardOffset); |
1435 | if (getCodeGenOpts().StackAlignment) |
1436 | getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment); |
1437 | if (getCodeGenOpts().SkipRaxSetup) |
1438 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "SkipRaxSetup" , Val: 1); |
1439 | if (getLangOpts().RegCall4) |
1440 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "RegCallv4" , Val: 1); |
1441 | |
1442 | if (getContext().getTargetInfo().getMaxTLSAlign()) |
1443 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "MaxTLSAlign" , |
1444 | Val: getContext().getTargetInfo().getMaxTLSAlign()); |
1445 | |
1446 | getTargetCodeGenInfo().emitTargetGlobals(CGM&: *this); |
1447 | |
1448 | getTargetCodeGenInfo().emitTargetMetadata(CGM&: *this, MangledDeclNames); |
1449 | |
1450 | EmitBackendOptionsMetadata(CodeGenOpts: getCodeGenOpts()); |
1451 | |
1452 | // If there is device offloading code embed it in the host now. |
1453 | EmbedObject(M: &getModule(), CGOpts: CodeGenOpts, Diags&: getDiags()); |
1454 | |
1455 | // Set visibility from DLL storage class |
1456 | // We do this at the end of LLVM IR generation; after any operation |
1457 | // that might affect the DLL storage class or the visibility, and |
1458 | // before anything that might act on these. |
1459 | setVisibilityFromDLLStorageClass(LO: LangOpts, M&: getModule()); |
1460 | |
1461 | // Check the tail call symbols are truly undefined. |
1462 | if (getTriple().isPPC() && !MustTailCallUndefinedGlobals.empty()) { |
1463 | for (auto &I : MustTailCallUndefinedGlobals) { |
1464 | if (!I.first->isDefined()) |
1465 | getDiags().Report(I.second, diag::err_ppc_impossible_musttail) << 2; |
1466 | else { |
1467 | StringRef MangledName = getMangledName(GD: GlobalDecl(I.first)); |
1468 | llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName); |
1469 | if (!Entry || Entry->isWeakForLinker() || |
1470 | Entry->isDeclarationForLinker()) |
1471 | getDiags().Report(I.second, diag::err_ppc_impossible_musttail) << 2; |
1472 | } |
1473 | } |
1474 | } |
1475 | } |
1476 | |
1477 | void CodeGenModule::EmitOpenCLMetadata() { |
1478 | // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the |
1479 | // opencl.ocl.version named metadata node. |
1480 | // C++ for OpenCL has a distinct mapping for versions compatible with OpenCL. |
1481 | auto CLVersion = LangOpts.getOpenCLCompatibleVersion(); |
1482 | |
1483 | auto EmitVersion = [this](StringRef MDName, int Version) { |
1484 | llvm::Metadata *OCLVerElts[] = { |
1485 | llvm::ConstantAsMetadata::get( |
1486 | C: llvm::ConstantInt::get(Ty: Int32Ty, V: Version / 100)), |
1487 | llvm::ConstantAsMetadata::get( |
1488 | C: llvm::ConstantInt::get(Ty: Int32Ty, V: (Version % 100) / 10))}; |
1489 | llvm::NamedMDNode *OCLVerMD = TheModule.getOrInsertNamedMetadata(Name: MDName); |
1490 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1491 | OCLVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: OCLVerElts)); |
1492 | }; |
1493 | |
1494 | EmitVersion("opencl.ocl.version" , CLVersion); |
1495 | if (LangOpts.OpenCLCPlusPlus) { |
1496 | // In addition to the OpenCL compatible version, emit the C++ version. |
1497 | EmitVersion("opencl.cxx.version" , LangOpts.OpenCLCPlusPlusVersion); |
1498 | } |
1499 | } |
1500 | |
1501 | void CodeGenModule::EmitBackendOptionsMetadata( |
1502 | const CodeGenOptions &CodeGenOpts) { |
1503 | if (getTriple().isRISCV()) { |
1504 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "SmallDataLimit" , |
1505 | Val: CodeGenOpts.SmallDataLimit); |
1506 | } |
1507 | } |
1508 | |
1509 | void CodeGenModule::UpdateCompletedType(const TagDecl *TD) { |
1510 | // Make sure that this type is translated. |
1511 | getTypes().UpdateCompletedType(TD); |
1512 | } |
1513 | |
1514 | void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { |
1515 | // Make sure that this type is translated. |
1516 | getTypes().RefreshTypeCacheForClass(RD); |
1517 | } |
1518 | |
1519 | llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) { |
1520 | if (!TBAA) |
1521 | return nullptr; |
1522 | return TBAA->getTypeInfo(QTy); |
1523 | } |
1524 | |
1525 | TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) { |
1526 | if (!TBAA) |
1527 | return TBAAAccessInfo(); |
1528 | if (getLangOpts().CUDAIsDevice) { |
1529 | // As CUDA builtin surface/texture types are replaced, skip generating TBAA |
1530 | // access info. |
1531 | if (AccessType->isCUDADeviceBuiltinSurfaceType()) { |
1532 | if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() != |
1533 | nullptr) |
1534 | return TBAAAccessInfo(); |
1535 | } else if (AccessType->isCUDADeviceBuiltinTextureType()) { |
1536 | if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() != |
1537 | nullptr) |
1538 | return TBAAAccessInfo(); |
1539 | } |
1540 | } |
1541 | return TBAA->getAccessInfo(AccessType); |
1542 | } |
1543 | |
1544 | TBAAAccessInfo |
1545 | CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) { |
1546 | if (!TBAA) |
1547 | return TBAAAccessInfo(); |
1548 | return TBAA->getVTablePtrAccessInfo(VTablePtrType); |
1549 | } |
1550 | |
1551 | llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) { |
1552 | if (!TBAA) |
1553 | return nullptr; |
1554 | return TBAA->getTBAAStructInfo(QTy); |
1555 | } |
1556 | |
1557 | llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) { |
1558 | if (!TBAA) |
1559 | return nullptr; |
1560 | return TBAA->getBaseTypeInfo(QTy); |
1561 | } |
1562 | |
1563 | llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) { |
1564 | if (!TBAA) |
1565 | return nullptr; |
1566 | return TBAA->getAccessTagInfo(Info); |
1567 | } |
1568 | |
1569 | TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, |
1570 | TBAAAccessInfo TargetInfo) { |
1571 | if (!TBAA) |
1572 | return TBAAAccessInfo(); |
1573 | return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo); |
1574 | } |
1575 | |
1576 | TBAAAccessInfo |
1577 | CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, |
1578 | TBAAAccessInfo InfoB) { |
1579 | if (!TBAA) |
1580 | return TBAAAccessInfo(); |
1581 | return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB); |
1582 | } |
1583 | |
1584 | TBAAAccessInfo |
1585 | CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, |
1586 | TBAAAccessInfo SrcInfo) { |
1587 | if (!TBAA) |
1588 | return TBAAAccessInfo(); |
1589 | return TBAA->mergeTBAAInfoForConditionalOperator(InfoA: DestInfo, InfoB: SrcInfo); |
1590 | } |
1591 | |
1592 | void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst, |
1593 | TBAAAccessInfo TBAAInfo) { |
1594 | if (llvm::MDNode *Tag = getTBAAAccessTagInfo(Info: TBAAInfo)) |
1595 | Inst->setMetadata(KindID: llvm::LLVMContext::MD_tbaa, Node: Tag); |
1596 | } |
1597 | |
1598 | void CodeGenModule::DecorateInstructionWithInvariantGroup( |
1599 | llvm::Instruction *I, const CXXRecordDecl *RD) { |
1600 | I->setMetadata(KindID: llvm::LLVMContext::MD_invariant_group, |
1601 | Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: {})); |
1602 | } |
1603 | |
1604 | void CodeGenModule::Error(SourceLocation loc, StringRef message) { |
1605 | unsigned diagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, FormatString: "%0" ); |
1606 | getDiags().Report(Loc: Context.getFullLoc(Loc: loc), DiagID: diagID) << message; |
1607 | } |
1608 | |
1609 | /// ErrorUnsupported - Print out an error that codegen doesn't support the |
1610 | /// specified stmt yet. |
1611 | void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) { |
1612 | unsigned DiagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, |
1613 | FormatString: "cannot compile this %0 yet" ); |
1614 | std::string Msg = Type; |
1615 | getDiags().Report(Loc: Context.getFullLoc(Loc: S->getBeginLoc()), DiagID) |
1616 | << Msg << S->getSourceRange(); |
1617 | } |
1618 | |
1619 | /// ErrorUnsupported - Print out an error that codegen doesn't support the |
1620 | /// specified decl yet. |
1621 | void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) { |
1622 | unsigned DiagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, |
1623 | FormatString: "cannot compile this %0 yet" ); |
1624 | std::string Msg = Type; |
1625 | getDiags().Report(Loc: Context.getFullLoc(Loc: D->getLocation()), DiagID) << Msg; |
1626 | } |
1627 | |
1628 | void CodeGenModule::runWithSufficientStackSpace(SourceLocation Loc, |
1629 | llvm::function_ref<void()> Fn) { |
1630 | StackHandler.runWithSufficientStackSpace(Loc, Fn); |
1631 | } |
1632 | |
1633 | llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) { |
1634 | return llvm::ConstantInt::get(Ty: SizeTy, V: size.getQuantity()); |
1635 | } |
1636 | |
1637 | void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV, |
1638 | const NamedDecl *D) const { |
1639 | // Internal definitions always have default visibility. |
1640 | if (GV->hasLocalLinkage()) { |
1641 | GV->setVisibility(llvm::GlobalValue::DefaultVisibility); |
1642 | return; |
1643 | } |
1644 | if (!D) |
1645 | return; |
1646 | |
1647 | // Set visibility for definitions, and for declarations if requested globally |
1648 | // or set explicitly. |
1649 | LinkageInfo LV = D->getLinkageAndVisibility(); |
1650 | |
1651 | // OpenMP declare target variables must be visible to the host so they can |
1652 | // be registered. We require protected visibility unless the variable has |
1653 | // the DT_nohost modifier and does not need to be registered. |
1654 | if (Context.getLangOpts().OpenMP && |
1655 | Context.getLangOpts().OpenMPIsTargetDevice && isa<VarDecl>(D) && |
1656 | D->hasAttr<OMPDeclareTargetDeclAttr>() && |
1657 | D->getAttr<OMPDeclareTargetDeclAttr>()->getDevType() != |
1658 | OMPDeclareTargetDeclAttr::DT_NoHost && |
1659 | LV.getVisibility() == HiddenVisibility) { |
1660 | GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); |
1661 | return; |
1662 | } |
1663 | |
1664 | if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) { |
1665 | // Reject incompatible dlllstorage and visibility annotations. |
1666 | if (!LV.isVisibilityExplicit()) |
1667 | return; |
1668 | if (GV->hasDLLExportStorageClass()) { |
1669 | if (LV.getVisibility() == HiddenVisibility) |
1670 | getDiags().Report(D->getLocation(), |
1671 | diag::err_hidden_visibility_dllexport); |
1672 | } else if (LV.getVisibility() != DefaultVisibility) { |
1673 | getDiags().Report(D->getLocation(), |
1674 | diag::err_non_default_visibility_dllimport); |
1675 | } |
1676 | return; |
1677 | } |
1678 | |
1679 | if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls || |
1680 | !GV->isDeclarationForLinker()) |
1681 | GV->setVisibility(GetLLVMVisibility(V: LV.getVisibility())); |
1682 | } |
1683 | |
1684 | static bool shouldAssumeDSOLocal(const CodeGenModule &CGM, |
1685 | llvm::GlobalValue *GV) { |
1686 | if (GV->hasLocalLinkage()) |
1687 | return true; |
1688 | |
1689 | if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage()) |
1690 | return true; |
1691 | |
1692 | // DLLImport explicitly marks the GV as external. |
1693 | if (GV->hasDLLImportStorageClass()) |
1694 | return false; |
1695 | |
1696 | const llvm::Triple &TT = CGM.getTriple(); |
1697 | const auto &CGOpts = CGM.getCodeGenOpts(); |
1698 | if (TT.isOSCygMing()) { |
1699 | // In MinGW, variables without DLLImport can still be automatically |
1700 | // imported from a DLL by the linker; don't mark variables that |
1701 | // potentially could come from another DLL as DSO local. |
1702 | |
1703 | // With EmulatedTLS, TLS variables can be autoimported from other DLLs |
1704 | // (and this actually happens in the public interface of libstdc++), so |
1705 | // such variables can't be marked as DSO local. (Native TLS variables |
1706 | // can't be dllimported at all, though.) |
1707 | if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(Val: GV) && |
1708 | (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS) && |
1709 | CGOpts.AutoImport) |
1710 | return false; |
1711 | } |
1712 | |
1713 | // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols |
1714 | // remain unresolved in the link, they can be resolved to zero, which is |
1715 | // outside the current DSO. |
1716 | if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage()) |
1717 | return false; |
1718 | |
1719 | // Every other GV is local on COFF. |
1720 | // Make an exception for windows OS in the triple: Some firmware builds use |
1721 | // *-win32-macho triples. This (accidentally?) produced windows relocations |
1722 | // without GOT tables in older clang versions; Keep this behaviour. |
1723 | // FIXME: even thread local variables? |
1724 | if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO())) |
1725 | return true; |
1726 | |
1727 | // Only handle COFF and ELF for now. |
1728 | if (!TT.isOSBinFormatELF()) |
1729 | return false; |
1730 | |
1731 | // If this is not an executable, don't assume anything is local. |
1732 | llvm::Reloc::Model RM = CGOpts.RelocationModel; |
1733 | const auto &LOpts = CGM.getLangOpts(); |
1734 | if (RM != llvm::Reloc::Static && !LOpts.PIE) { |
1735 | // On ELF, if -fno-semantic-interposition is specified and the target |
1736 | // supports local aliases, there will be neither CC1 |
1737 | // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set |
1738 | // dso_local on the function if using a local alias is preferable (can avoid |
1739 | // PLT indirection). |
1740 | if (!(isa<llvm::Function>(Val: GV) && GV->canBenefitFromLocalAlias())) |
1741 | return false; |
1742 | return !(CGM.getLangOpts().SemanticInterposition || |
1743 | CGM.getLangOpts().HalfNoSemanticInterposition); |
1744 | } |
1745 | |
1746 | // A definition cannot be preempted from an executable. |
1747 | if (!GV->isDeclarationForLinker()) |
1748 | return true; |
1749 | |
1750 | // Most PIC code sequences that assume that a symbol is local cannot produce a |
1751 | // 0 if it turns out the symbol is undefined. While this is ABI and relocation |
1752 | // depended, it seems worth it to handle it here. |
1753 | if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage()) |
1754 | return false; |
1755 | |
1756 | // PowerPC64 prefers TOC indirection to avoid copy relocations. |
1757 | if (TT.isPPC64()) |
1758 | return false; |
1759 | |
1760 | if (CGOpts.DirectAccessExternalData) { |
1761 | // If -fdirect-access-external-data (default for -fno-pic), set dso_local |
1762 | // for non-thread-local variables. If the symbol is not defined in the |
1763 | // executable, a copy relocation will be needed at link time. dso_local is |
1764 | // excluded for thread-local variables because they generally don't support |
1765 | // copy relocations. |
1766 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: GV)) |
1767 | if (!Var->isThreadLocal()) |
1768 | return true; |
1769 | |
1770 | // -fno-pic sets dso_local on a function declaration to allow direct |
1771 | // accesses when taking its address (similar to a data symbol). If the |
1772 | // function is not defined in the executable, a canonical PLT entry will be |
1773 | // needed at link time. -fno-direct-access-external-data can avoid the |
1774 | // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as |
1775 | // it could just cause trouble without providing perceptible benefits. |
1776 | if (isa<llvm::Function>(Val: GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static) |
1777 | return true; |
1778 | } |
1779 | |
1780 | // If we can use copy relocations we can assume it is local. |
1781 | |
1782 | // Otherwise don't assume it is local. |
1783 | return false; |
1784 | } |
1785 | |
1786 | void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const { |
1787 | GV->setDSOLocal(shouldAssumeDSOLocal(CGM: *this, GV)); |
1788 | } |
1789 | |
1790 | void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV, |
1791 | GlobalDecl GD) const { |
1792 | const auto *D = dyn_cast<NamedDecl>(Val: GD.getDecl()); |
1793 | // C++ destructors have a few C++ ABI specific special cases. |
1794 | if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(Val: D)) { |
1795 | getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, DT: GD.getDtorType()); |
1796 | return; |
1797 | } |
1798 | setDLLImportDLLExport(GV, D); |
1799 | } |
1800 | |
1801 | void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV, |
1802 | const NamedDecl *D) const { |
1803 | if (D && D->isExternallyVisible()) { |
1804 | if (D->hasAttr<DLLImportAttr>()) |
1805 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); |
1806 | else if ((D->hasAttr<DLLExportAttr>() || |
1807 | shouldMapVisibilityToDLLExport(D)) && |
1808 | !GV->isDeclarationForLinker()) |
1809 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); |
1810 | } |
1811 | } |
1812 | |
1813 | void CodeGenModule::setGVProperties(llvm::GlobalValue *GV, |
1814 | GlobalDecl GD) const { |
1815 | setDLLImportDLLExport(GV, GD); |
1816 | setGVPropertiesAux(GV, D: dyn_cast<NamedDecl>(Val: GD.getDecl())); |
1817 | } |
1818 | |
1819 | void CodeGenModule::setGVProperties(llvm::GlobalValue *GV, |
1820 | const NamedDecl *D) const { |
1821 | setDLLImportDLLExport(GV, D); |
1822 | setGVPropertiesAux(GV, D); |
1823 | } |
1824 | |
1825 | void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV, |
1826 | const NamedDecl *D) const { |
1827 | setGlobalVisibility(GV, D); |
1828 | setDSOLocal(GV); |
1829 | GV->setPartition(CodeGenOpts.SymbolPartition); |
1830 | } |
1831 | |
1832 | static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) { |
1833 | return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S) |
1834 | .Case(S: "global-dynamic" , Value: llvm::GlobalVariable::GeneralDynamicTLSModel) |
1835 | .Case(S: "local-dynamic" , Value: llvm::GlobalVariable::LocalDynamicTLSModel) |
1836 | .Case(S: "initial-exec" , Value: llvm::GlobalVariable::InitialExecTLSModel) |
1837 | .Case(S: "local-exec" , Value: llvm::GlobalVariable::LocalExecTLSModel); |
1838 | } |
1839 | |
1840 | llvm::GlobalVariable::ThreadLocalMode |
1841 | CodeGenModule::GetDefaultLLVMTLSModel() const { |
1842 | switch (CodeGenOpts.getDefaultTLSModel()) { |
1843 | case CodeGenOptions::GeneralDynamicTLSModel: |
1844 | return llvm::GlobalVariable::GeneralDynamicTLSModel; |
1845 | case CodeGenOptions::LocalDynamicTLSModel: |
1846 | return llvm::GlobalVariable::LocalDynamicTLSModel; |
1847 | case CodeGenOptions::InitialExecTLSModel: |
1848 | return llvm::GlobalVariable::InitialExecTLSModel; |
1849 | case CodeGenOptions::LocalExecTLSModel: |
1850 | return llvm::GlobalVariable::LocalExecTLSModel; |
1851 | } |
1852 | llvm_unreachable("Invalid TLS model!" ); |
1853 | } |
1854 | |
1855 | void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const { |
1856 | assert(D.getTLSKind() && "setting TLS mode on non-TLS var!" ); |
1857 | |
1858 | llvm::GlobalValue::ThreadLocalMode TLM; |
1859 | TLM = GetDefaultLLVMTLSModel(); |
1860 | |
1861 | // Override the TLS model if it is explicitly specified. |
1862 | if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) { |
1863 | TLM = GetLLVMTLSModel(Attr->getModel()); |
1864 | } |
1865 | |
1866 | GV->setThreadLocalMode(TLM); |
1867 | } |
1868 | |
1869 | static std::string getCPUSpecificMangling(const CodeGenModule &CGM, |
1870 | StringRef Name) { |
1871 | const TargetInfo &Target = CGM.getTarget(); |
1872 | return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str(); |
1873 | } |
1874 | |
1875 | static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM, |
1876 | const CPUSpecificAttr *Attr, |
1877 | unsigned CPUIndex, |
1878 | raw_ostream &Out) { |
1879 | // cpu_specific gets the current name, dispatch gets the resolver if IFunc is |
1880 | // supported. |
1881 | if (Attr) |
1882 | Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName()); |
1883 | else if (CGM.getTarget().supportsIFunc()) |
1884 | Out << ".resolver" ; |
1885 | } |
1886 | |
1887 | // Returns true if GD is a function decl with internal linkage and |
1888 | // needs a unique suffix after the mangled name. |
1889 | static bool isUniqueInternalLinkageDecl(GlobalDecl GD, |
1890 | CodeGenModule &CGM) { |
1891 | const Decl *D = GD.getDecl(); |
1892 | return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(Val: D) && |
1893 | (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage); |
1894 | } |
1895 | |
1896 | static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD, |
1897 | const NamedDecl *ND, |
1898 | bool OmitMultiVersionMangling = false) { |
1899 | SmallString<256> Buffer; |
1900 | llvm::raw_svector_ostream Out(Buffer); |
1901 | MangleContext &MC = CGM.getCXXABI().getMangleContext(); |
1902 | if (!CGM.getModuleNameHash().empty()) |
1903 | MC.needsUniqueInternalLinkageNames(); |
1904 | bool ShouldMangle = MC.shouldMangleDeclName(D: ND); |
1905 | if (ShouldMangle) |
1906 | MC.mangleName(GD: GD.getWithDecl(ND), Out); |
1907 | else { |
1908 | IdentifierInfo *II = ND->getIdentifier(); |
1909 | assert(II && "Attempt to mangle unnamed decl." ); |
1910 | const auto *FD = dyn_cast<FunctionDecl>(Val: ND); |
1911 | |
1912 | if (FD && |
1913 | FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) { |
1914 | if (CGM.getLangOpts().RegCall4) |
1915 | Out << "__regcall4__" << II->getName(); |
1916 | else |
1917 | Out << "__regcall3__" << II->getName(); |
1918 | } else if (FD && FD->hasAttr<CUDAGlobalAttr>() && |
1919 | GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { |
1920 | Out << "__device_stub__" << II->getName(); |
1921 | } else if (FD && |
1922 | DeviceKernelAttr::isOpenCLSpelling( |
1923 | FD->getAttr<DeviceKernelAttr>()) && |
1924 | GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { |
1925 | Out << "__clang_ocl_kern_imp_" << II->getName(); |
1926 | } else { |
1927 | Out << II->getName(); |
1928 | } |
1929 | } |
1930 | |
1931 | // Check if the module name hash should be appended for internal linkage |
1932 | // symbols. This should come before multi-version target suffixes are |
1933 | // appended. This is to keep the name and module hash suffix of the |
1934 | // internal linkage function together. The unique suffix should only be |
1935 | // added when name mangling is done to make sure that the final name can |
1936 | // be properly demangled. For example, for C functions without prototypes, |
1937 | // name mangling is not done and the unique suffix should not be appeneded |
1938 | // then. |
1939 | if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) { |
1940 | assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames && |
1941 | "Hash computed when not explicitly requested" ); |
1942 | Out << CGM.getModuleNameHash(); |
1943 | } |
1944 | |
1945 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
1946 | if (FD->isMultiVersion() && !OmitMultiVersionMangling) { |
1947 | switch (FD->getMultiVersionKind()) { |
1948 | case MultiVersionKind::CPUDispatch: |
1949 | case MultiVersionKind::CPUSpecific: |
1950 | AppendCPUSpecificCPUDispatchMangling(CGM, |
1951 | FD->getAttr<CPUSpecificAttr>(), |
1952 | GD.getMultiVersionIndex(), Out); |
1953 | break; |
1954 | case MultiVersionKind::Target: { |
1955 | auto *Attr = FD->getAttr<TargetAttr>(); |
1956 | assert(Attr && "Expected TargetAttr to be present " |
1957 | "for attribute mangling" ); |
1958 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
1959 | Info.appendAttributeMangling(Attr, Out); |
1960 | break; |
1961 | } |
1962 | case MultiVersionKind::TargetVersion: { |
1963 | auto *Attr = FD->getAttr<TargetVersionAttr>(); |
1964 | assert(Attr && "Expected TargetVersionAttr to be present " |
1965 | "for attribute mangling" ); |
1966 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
1967 | Info.appendAttributeMangling(Attr, Out); |
1968 | break; |
1969 | } |
1970 | case MultiVersionKind::TargetClones: { |
1971 | auto *Attr = FD->getAttr<TargetClonesAttr>(); |
1972 | assert(Attr && "Expected TargetClonesAttr to be present " |
1973 | "for attribute mangling" ); |
1974 | unsigned Index = GD.getMultiVersionIndex(); |
1975 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
1976 | Info.appendAttributeMangling(Attr, Index, Out); |
1977 | break; |
1978 | } |
1979 | case MultiVersionKind::None: |
1980 | llvm_unreachable("None multiversion type isn't valid here" ); |
1981 | } |
1982 | } |
1983 | |
1984 | // Make unique name for device side static file-scope variable for HIP. |
1985 | if (CGM.getContext().shouldExternalize(ND) && |
1986 | CGM.getLangOpts().GPURelocatableDeviceCode && |
1987 | CGM.getLangOpts().CUDAIsDevice) |
1988 | CGM.printPostfixForExternalizedDecl(Out, ND); |
1989 | |
1990 | return std::string(Out.str()); |
1991 | } |
1992 | |
1993 | void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD, |
1994 | const FunctionDecl *FD, |
1995 | StringRef &CurName) { |
1996 | if (!FD->isMultiVersion()) |
1997 | return; |
1998 | |
1999 | // Get the name of what this would be without the 'target' attribute. This |
2000 | // allows us to lookup the version that was emitted when this wasn't a |
2001 | // multiversion function. |
2002 | std::string NonTargetName = |
2003 | getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true); |
2004 | GlobalDecl OtherGD; |
2005 | if (lookupRepresentativeDecl(MangledName: NonTargetName, Result&: OtherGD)) { |
2006 | assert(OtherGD.getCanonicalDecl() |
2007 | .getDecl() |
2008 | ->getAsFunction() |
2009 | ->isMultiVersion() && |
2010 | "Other GD should now be a multiversioned function" ); |
2011 | // OtherFD is the version of this function that was mangled BEFORE |
2012 | // becoming a MultiVersion function. It potentially needs to be updated. |
2013 | const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl() |
2014 | .getDecl() |
2015 | ->getAsFunction() |
2016 | ->getMostRecentDecl(); |
2017 | std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD); |
2018 | // This is so that if the initial version was already the 'default' |
2019 | // version, we don't try to update it. |
2020 | if (OtherName != NonTargetName) { |
2021 | // Remove instead of erase, since others may have stored the StringRef |
2022 | // to this. |
2023 | const auto ExistingRecord = Manglings.find(Key: NonTargetName); |
2024 | if (ExistingRecord != std::end(cont&: Manglings)) |
2025 | Manglings.remove(KeyValue: &(*ExistingRecord)); |
2026 | auto Result = Manglings.insert(KV: std::make_pair(x&: OtherName, y&: OtherGD)); |
2027 | StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] = |
2028 | Result.first->first(); |
2029 | // If this is the current decl is being created, make sure we update the name. |
2030 | if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl()) |
2031 | CurName = OtherNameRef; |
2032 | if (llvm::GlobalValue *Entry = GetGlobalValue(Ref: NonTargetName)) |
2033 | Entry->setName(OtherName); |
2034 | } |
2035 | } |
2036 | } |
2037 | |
2038 | StringRef CodeGenModule::getMangledName(GlobalDecl GD) { |
2039 | GlobalDecl CanonicalGD = GD.getCanonicalDecl(); |
2040 | |
2041 | // Some ABIs don't have constructor variants. Make sure that base and |
2042 | // complete constructors get mangled the same. |
2043 | if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: CanonicalGD.getDecl())) { |
2044 | if (!getTarget().getCXXABI().hasConstructorVariants()) { |
2045 | CXXCtorType OrigCtorType = GD.getCtorType(); |
2046 | assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete); |
2047 | if (OrigCtorType == Ctor_Base) |
2048 | CanonicalGD = GlobalDecl(CD, Ctor_Complete); |
2049 | } |
2050 | } |
2051 | |
2052 | // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a |
2053 | // static device variable depends on whether the variable is referenced by |
2054 | // a host or device host function. Therefore the mangled name cannot be |
2055 | // cached. |
2056 | if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(D: GD.getDecl())) { |
2057 | auto FoundName = MangledDeclNames.find(Key: CanonicalGD); |
2058 | if (FoundName != MangledDeclNames.end()) |
2059 | return FoundName->second; |
2060 | } |
2061 | |
2062 | // Keep the first result in the case of a mangling collision. |
2063 | const auto *ND = cast<NamedDecl>(Val: GD.getDecl()); |
2064 | std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND); |
2065 | |
2066 | // Ensure either we have different ABIs between host and device compilations, |
2067 | // says host compilation following MSVC ABI but device compilation follows |
2068 | // Itanium C++ ABI or, if they follow the same ABI, kernel names after |
2069 | // mangling should be the same after name stubbing. The later checking is |
2070 | // very important as the device kernel name being mangled in host-compilation |
2071 | // is used to resolve the device binaries to be executed. Inconsistent naming |
2072 | // result in undefined behavior. Even though we cannot check that naming |
2073 | // directly between host- and device-compilations, the host- and |
2074 | // device-mangling in host compilation could help catching certain ones. |
2075 | assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || |
2076 | getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice || |
2077 | (getContext().getAuxTargetInfo() && |
2078 | (getContext().getAuxTargetInfo()->getCXXABI() != |
2079 | getContext().getTargetInfo().getCXXABI())) || |
2080 | getCUDARuntime().getDeviceSideName(ND) == |
2081 | getMangledNameImpl( |
2082 | *this, |
2083 | GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), |
2084 | ND)); |
2085 | |
2086 | // This invariant should hold true in the future. |
2087 | // Prior work: |
2088 | // https://discourse.llvm.org/t/rfc-clang-diagnostic-for-demangling-failures/82835/8 |
2089 | // https://github.com/llvm/llvm-project/issues/111345 |
2090 | // assert(!((StringRef(MangledName).starts_with("_Z") || |
2091 | // StringRef(MangledName).starts_with("?")) && |
2092 | // !GD.getDecl()->hasAttr<AsmLabelAttr>() && |
2093 | // llvm::demangle(MangledName) == MangledName) && |
2094 | // "LLVM demangler must demangle clang-generated names"); |
2095 | |
2096 | auto Result = Manglings.insert(KV: std::make_pair(x&: MangledName, y&: GD)); |
2097 | return MangledDeclNames[CanonicalGD] = Result.first->first(); |
2098 | } |
2099 | |
2100 | StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD, |
2101 | const BlockDecl *BD) { |
2102 | MangleContext &MangleCtx = getCXXABI().getMangleContext(); |
2103 | const Decl *D = GD.getDecl(); |
2104 | |
2105 | SmallString<256> Buffer; |
2106 | llvm::raw_svector_ostream Out(Buffer); |
2107 | if (!D) |
2108 | MangleCtx.mangleGlobalBlock(BD, |
2109 | dyn_cast_or_null<VarDecl>(Val: initializedGlobalDecl.getDecl()), Out); |
2110 | else if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: D)) |
2111 | MangleCtx.mangleCtorBlock(CD, CT: GD.getCtorType(), BD, Out); |
2112 | else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: D)) |
2113 | MangleCtx.mangleDtorBlock(CD: DD, DT: GD.getDtorType(), BD, Out); |
2114 | else |
2115 | MangleCtx.mangleBlock(DC: cast<DeclContext>(Val: D), BD, Out); |
2116 | |
2117 | auto Result = Manglings.insert(KV: std::make_pair(x: Out.str(), y&: BD)); |
2118 | return Result.first->first(); |
2119 | } |
2120 | |
2121 | const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) { |
2122 | auto it = MangledDeclNames.begin(); |
2123 | while (it != MangledDeclNames.end()) { |
2124 | if (it->second == Name) |
2125 | return it->first; |
2126 | it++; |
2127 | } |
2128 | return GlobalDecl(); |
2129 | } |
2130 | |
2131 | llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) { |
2132 | return getModule().getNamedValue(Name); |
2133 | } |
2134 | |
2135 | /// AddGlobalCtor - Add a function to the list that will be called before |
2136 | /// main() runs. |
2137 | void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority, |
2138 | unsigned LexOrder, |
2139 | llvm::Constant *AssociatedData) { |
2140 | // FIXME: Type coercion of void()* types. |
2141 | GlobalCtors.push_back(x: Structor(Priority, LexOrder, Ctor, AssociatedData)); |
2142 | } |
2143 | |
2144 | /// AddGlobalDtor - Add a function to the list that will be called |
2145 | /// when the module is unloaded. |
2146 | void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority, |
2147 | bool IsDtorAttrFunc) { |
2148 | if (CodeGenOpts.RegisterGlobalDtorsWithAtExit && |
2149 | (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) { |
2150 | DtorsUsingAtExit[Priority].push_back(NewVal: Dtor); |
2151 | return; |
2152 | } |
2153 | |
2154 | // FIXME: Type coercion of void()* types. |
2155 | GlobalDtors.push_back(x: Structor(Priority, ~0U, Dtor, nullptr)); |
2156 | } |
2157 | |
2158 | void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) { |
2159 | if (Fns.empty()) return; |
2160 | |
2161 | const PointerAuthSchema &InitFiniAuthSchema = |
2162 | getCodeGenOpts().PointerAuth.InitFiniPointers; |
2163 | |
2164 | // Ctor function type is ptr. |
2165 | llvm::PointerType *PtrTy = llvm::PointerType::get( |
2166 | C&: getLLVMContext(), AddressSpace: TheModule.getDataLayout().getProgramAddressSpace()); |
2167 | |
2168 | // Get the type of a ctor entry, { i32, ptr, ptr }. |
2169 | llvm::StructType *CtorStructTy = llvm::StructType::get(elt1: Int32Ty, elts: PtrTy, elts: PtrTy); |
2170 | |
2171 | // Construct the constructor and destructor arrays. |
2172 | ConstantInitBuilder Builder(*this); |
2173 | auto Ctors = Builder.beginArray(eltTy: CtorStructTy); |
2174 | for (const auto &I : Fns) { |
2175 | auto Ctor = Ctors.beginStruct(ty: CtorStructTy); |
2176 | Ctor.addInt(intTy: Int32Ty, value: I.Priority); |
2177 | if (InitFiniAuthSchema) { |
2178 | llvm::Constant *StorageAddress = |
2179 | (InitFiniAuthSchema.isAddressDiscriminated() |
2180 | ? llvm::ConstantExpr::getIntToPtr( |
2181 | C: llvm::ConstantInt::get( |
2182 | Ty: IntPtrTy, |
2183 | V: llvm::ConstantPtrAuth::AddrDiscriminator_CtorsDtors), |
2184 | Ty: PtrTy) |
2185 | : nullptr); |
2186 | llvm::Constant *SignedCtorPtr = getConstantSignedPointer( |
2187 | Pointer: I.Initializer, Key: InitFiniAuthSchema.getKey(), StorageAddress, |
2188 | OtherDiscriminator: llvm::ConstantInt::get( |
2189 | Ty: SizeTy, V: InitFiniAuthSchema.getConstantDiscrimination())); |
2190 | Ctor.add(value: SignedCtorPtr); |
2191 | } else { |
2192 | Ctor.add(value: I.Initializer); |
2193 | } |
2194 | if (I.AssociatedData) |
2195 | Ctor.add(value: I.AssociatedData); |
2196 | else |
2197 | Ctor.addNullPointer(ptrTy: PtrTy); |
2198 | Ctor.finishAndAddTo(parent&: Ctors); |
2199 | } |
2200 | |
2201 | auto List = Ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(), |
2202 | /*constant*/ false, |
2203 | llvm::GlobalValue::AppendingLinkage); |
2204 | |
2205 | // The LTO linker doesn't seem to like it when we set an alignment |
2206 | // on appending variables. Take it off as a workaround. |
2207 | List->setAlignment(std::nullopt); |
2208 | |
2209 | Fns.clear(); |
2210 | } |
2211 | |
2212 | llvm::GlobalValue::LinkageTypes |
2213 | CodeGenModule::getFunctionLinkage(GlobalDecl GD) { |
2214 | const auto *D = cast<FunctionDecl>(Val: GD.getDecl()); |
2215 | |
2216 | GVALinkage Linkage = getContext().GetGVALinkageForFunction(FD: D); |
2217 | |
2218 | if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: D)) |
2219 | return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, DT: GD.getDtorType()); |
2220 | |
2221 | return getLLVMLinkageForDeclarator(D, Linkage); |
2222 | } |
2223 | |
2224 | llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) { |
2225 | llvm::MDString *MDS = dyn_cast<llvm::MDString>(Val: MD); |
2226 | if (!MDS) return nullptr; |
2227 | |
2228 | return llvm::ConstantInt::get(Ty: Int64Ty, V: llvm::MD5Hash(Str: MDS->getString())); |
2229 | } |
2230 | |
2231 | llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) { |
2232 | if (auto *FnType = T->getAs<FunctionProtoType>()) |
2233 | T = getContext().getFunctionType( |
2234 | ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(), |
2235 | EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None)); |
2236 | |
2237 | std::string OutName; |
2238 | llvm::raw_string_ostream Out(OutName); |
2239 | getCXXABI().getMangleContext().mangleCanonicalTypeName( |
2240 | T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers); |
2241 | |
2242 | if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers) |
2243 | Out << ".normalized" ; |
2244 | |
2245 | return llvm::ConstantInt::get(Ty: Int32Ty, |
2246 | V: static_cast<uint32_t>(llvm::xxHash64(Data: OutName))); |
2247 | } |
2248 | |
2249 | void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD, |
2250 | const CGFunctionInfo &Info, |
2251 | llvm::Function *F, bool IsThunk) { |
2252 | unsigned CallingConv; |
2253 | llvm::AttributeList PAL; |
2254 | ConstructAttributeList(Name: F->getName(), Info, CalleeInfo: GD, Attrs&: PAL, CallingConv, |
2255 | /*AttrOnCallSite=*/false, IsThunk); |
2256 | if (CallingConv == llvm::CallingConv::X86_VectorCall && |
2257 | getTarget().getTriple().isWindowsArm64EC()) { |
2258 | SourceLocation Loc; |
2259 | if (const Decl *D = GD.getDecl()) |
2260 | Loc = D->getLocation(); |
2261 | |
2262 | Error(loc: Loc, message: "__vectorcall calling convention is not currently supported" ); |
2263 | } |
2264 | F->setAttributes(PAL); |
2265 | F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); |
2266 | } |
2267 | |
2268 | static void removeImageAccessQualifier(std::string& TyName) { |
2269 | std::string ReadOnlyQual("__read_only" ); |
2270 | std::string::size_type ReadOnlyPos = TyName.find(str: ReadOnlyQual); |
2271 | if (ReadOnlyPos != std::string::npos) |
2272 | // "+ 1" for the space after access qualifier. |
2273 | TyName.erase(pos: ReadOnlyPos, n: ReadOnlyQual.size() + 1); |
2274 | else { |
2275 | std::string WriteOnlyQual("__write_only" ); |
2276 | std::string::size_type WriteOnlyPos = TyName.find(str: WriteOnlyQual); |
2277 | if (WriteOnlyPos != std::string::npos) |
2278 | TyName.erase(pos: WriteOnlyPos, n: WriteOnlyQual.size() + 1); |
2279 | else { |
2280 | std::string ReadWriteQual("__read_write" ); |
2281 | std::string::size_type ReadWritePos = TyName.find(str: ReadWriteQual); |
2282 | if (ReadWritePos != std::string::npos) |
2283 | TyName.erase(pos: ReadWritePos, n: ReadWriteQual.size() + 1); |
2284 | } |
2285 | } |
2286 | } |
2287 | |
2288 | // Returns the address space id that should be produced to the |
2289 | // kernel_arg_addr_space metadata. This is always fixed to the ids |
2290 | // as specified in the SPIR 2.0 specification in order to differentiate |
2291 | // for example in clGetKernelArgInfo() implementation between the address |
2292 | // spaces with targets without unique mapping to the OpenCL address spaces |
2293 | // (basically all single AS CPUs). |
2294 | static unsigned ArgInfoAddressSpace(LangAS AS) { |
2295 | switch (AS) { |
2296 | case LangAS::opencl_global: |
2297 | return 1; |
2298 | case LangAS::opencl_constant: |
2299 | return 2; |
2300 | case LangAS::opencl_local: |
2301 | return 3; |
2302 | case LangAS::opencl_generic: |
2303 | return 4; // Not in SPIR 2.0 specs. |
2304 | case LangAS::opencl_global_device: |
2305 | return 5; |
2306 | case LangAS::opencl_global_host: |
2307 | return 6; |
2308 | default: |
2309 | return 0; // Assume private. |
2310 | } |
2311 | } |
2312 | |
2313 | void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn, |
2314 | const FunctionDecl *FD, |
2315 | CodeGenFunction *CGF) { |
2316 | assert(((FD && CGF) || (!FD && !CGF)) && |
2317 | "Incorrect use - FD and CGF should either be both null or not!" ); |
2318 | // Create MDNodes that represent the kernel arg metadata. |
2319 | // Each MDNode is a list in the form of "key", N number of values which is |
2320 | // the same number of values as their are kernel arguments. |
2321 | |
2322 | const PrintingPolicy &Policy = Context.getPrintingPolicy(); |
2323 | |
2324 | // MDNode for the kernel argument address space qualifiers. |
2325 | SmallVector<llvm::Metadata *, 8> addressQuals; |
2326 | |
2327 | // MDNode for the kernel argument access qualifiers (images only). |
2328 | SmallVector<llvm::Metadata *, 8> accessQuals; |
2329 | |
2330 | // MDNode for the kernel argument type names. |
2331 | SmallVector<llvm::Metadata *, 8> argTypeNames; |
2332 | |
2333 | // MDNode for the kernel argument base type names. |
2334 | SmallVector<llvm::Metadata *, 8> argBaseTypeNames; |
2335 | |
2336 | // MDNode for the kernel argument type qualifiers. |
2337 | SmallVector<llvm::Metadata *, 8> argTypeQuals; |
2338 | |
2339 | // MDNode for the kernel argument names. |
2340 | SmallVector<llvm::Metadata *, 8> argNames; |
2341 | |
2342 | if (FD && CGF) |
2343 | for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { |
2344 | const ParmVarDecl *parm = FD->getParamDecl(i); |
2345 | // Get argument name. |
2346 | argNames.push_back(Elt: llvm::MDString::get(VMContext, parm->getName())); |
2347 | |
2348 | if (!getLangOpts().OpenCL) |
2349 | continue; |
2350 | QualType ty = parm->getType(); |
2351 | std::string typeQuals; |
2352 | |
2353 | // Get image and pipe access qualifier: |
2354 | if (ty->isImageType() || ty->isPipeType()) { |
2355 | const Decl *PDecl = parm; |
2356 | if (const auto *TD = ty->getAs<TypedefType>()) |
2357 | PDecl = TD->getDecl(); |
2358 | const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>(); |
2359 | if (A && A->isWriteOnly()) |
2360 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "write_only" )); |
2361 | else if (A && A->isReadWrite()) |
2362 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_write" )); |
2363 | else |
2364 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_only" )); |
2365 | } else |
2366 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "none" )); |
2367 | |
2368 | auto getTypeSpelling = [&](QualType Ty) { |
2369 | auto typeName = Ty.getUnqualifiedType().getAsString(Policy); |
2370 | |
2371 | if (Ty.isCanonical()) { |
2372 | StringRef typeNameRef = typeName; |
2373 | // Turn "unsigned type" to "utype" |
2374 | if (typeNameRef.consume_front(Prefix: "unsigned " )) |
2375 | return std::string("u" ) + typeNameRef.str(); |
2376 | if (typeNameRef.consume_front(Prefix: "signed " )) |
2377 | return typeNameRef.str(); |
2378 | } |
2379 | |
2380 | return typeName; |
2381 | }; |
2382 | |
2383 | if (ty->isPointerType()) { |
2384 | QualType pointeeTy = ty->getPointeeType(); |
2385 | |
2386 | // Get address qualifier. |
2387 | addressQuals.push_back( |
2388 | Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32( |
2389 | C: ArgInfoAddressSpace(AS: pointeeTy.getAddressSpace())))); |
2390 | |
2391 | // Get argument type name. |
2392 | std::string typeName = getTypeSpelling(pointeeTy) + "*" ; |
2393 | std::string baseTypeName = |
2394 | getTypeSpelling(pointeeTy.getCanonicalType()) + "*" ; |
2395 | argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName)); |
2396 | argBaseTypeNames.push_back( |
2397 | Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName)); |
2398 | |
2399 | // Get argument type qualifiers: |
2400 | if (ty.isRestrictQualified()) |
2401 | typeQuals = "restrict" ; |
2402 | if (pointeeTy.isConstQualified() || |
2403 | (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) |
2404 | typeQuals += typeQuals.empty() ? "const" : " const" ; |
2405 | if (pointeeTy.isVolatileQualified()) |
2406 | typeQuals += typeQuals.empty() ? "volatile" : " volatile" ; |
2407 | } else { |
2408 | uint32_t AddrSpc = 0; |
2409 | bool isPipe = ty->isPipeType(); |
2410 | if (ty->isImageType() || isPipe) |
2411 | AddrSpc = ArgInfoAddressSpace(AS: LangAS::opencl_global); |
2412 | |
2413 | addressQuals.push_back( |
2414 | Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32(C: AddrSpc))); |
2415 | |
2416 | // Get argument type name. |
2417 | ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty; |
2418 | std::string typeName = getTypeSpelling(ty); |
2419 | std::string baseTypeName = getTypeSpelling(ty.getCanonicalType()); |
2420 | |
2421 | // Remove access qualifiers on images |
2422 | // (as they are inseparable from type in clang implementation, |
2423 | // but OpenCL spec provides a special query to get access qualifier |
2424 | // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): |
2425 | if (ty->isImageType()) { |
2426 | removeImageAccessQualifier(TyName&: typeName); |
2427 | removeImageAccessQualifier(TyName&: baseTypeName); |
2428 | } |
2429 | |
2430 | argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName)); |
2431 | argBaseTypeNames.push_back( |
2432 | Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName)); |
2433 | |
2434 | if (isPipe) |
2435 | typeQuals = "pipe" ; |
2436 | } |
2437 | argTypeQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeQuals)); |
2438 | } |
2439 | |
2440 | if (getLangOpts().OpenCL) { |
2441 | Fn->setMetadata(Kind: "kernel_arg_addr_space" , |
2442 | Node: llvm::MDNode::get(Context&: VMContext, MDs: addressQuals)); |
2443 | Fn->setMetadata(Kind: "kernel_arg_access_qual" , |
2444 | Node: llvm::MDNode::get(Context&: VMContext, MDs: accessQuals)); |
2445 | Fn->setMetadata(Kind: "kernel_arg_type" , |
2446 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeNames)); |
2447 | Fn->setMetadata(Kind: "kernel_arg_base_type" , |
2448 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argBaseTypeNames)); |
2449 | Fn->setMetadata(Kind: "kernel_arg_type_qual" , |
2450 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeQuals)); |
2451 | } |
2452 | if (getCodeGenOpts().EmitOpenCLArgMetadata || |
2453 | getCodeGenOpts().HIPSaveKernelArgName) |
2454 | Fn->setMetadata(Kind: "kernel_arg_name" , |
2455 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argNames)); |
2456 | } |
2457 | |
2458 | /// Determines whether the language options require us to model |
2459 | /// unwind exceptions. We treat -fexceptions as mandating this |
2460 | /// except under the fragile ObjC ABI with only ObjC exceptions |
2461 | /// enabled. This means, for example, that C with -fexceptions |
2462 | /// enables this. |
2463 | static bool hasUnwindExceptions(const LangOptions &LangOpts) { |
2464 | // If exceptions are completely disabled, obviously this is false. |
2465 | if (!LangOpts.Exceptions) return false; |
2466 | |
2467 | // If C++ exceptions are enabled, this is true. |
2468 | if (LangOpts.CXXExceptions) return true; |
2469 | |
2470 | // If ObjC exceptions are enabled, this depends on the ABI. |
2471 | if (LangOpts.ObjCExceptions) { |
2472 | return LangOpts.ObjCRuntime.hasUnwindExceptions(); |
2473 | } |
2474 | |
2475 | return true; |
2476 | } |
2477 | |
2478 | static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM, |
2479 | const CXXMethodDecl *MD) { |
2480 | // Check that the type metadata can ever actually be used by a call. |
2481 | if (!CGM.getCodeGenOpts().LTOUnit || |
2482 | !CGM.HasHiddenLTOVisibility(RD: MD->getParent())) |
2483 | return false; |
2484 | |
2485 | // Only functions whose address can be taken with a member function pointer |
2486 | // need this sort of type metadata. |
2487 | return MD->isImplicitObjectMemberFunction() && !MD->isVirtual() && |
2488 | !isa<CXXConstructorDecl, CXXDestructorDecl>(Val: MD); |
2489 | } |
2490 | |
2491 | SmallVector<const CXXRecordDecl *, 0> |
2492 | CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) { |
2493 | llvm::SetVector<const CXXRecordDecl *> MostBases; |
2494 | |
2495 | std::function<void (const CXXRecordDecl *)> CollectMostBases; |
2496 | CollectMostBases = [&](const CXXRecordDecl *RD) { |
2497 | if (RD->getNumBases() == 0) |
2498 | MostBases.insert(X: RD); |
2499 | for (const CXXBaseSpecifier &B : RD->bases()) |
2500 | CollectMostBases(B.getType()->getAsCXXRecordDecl()); |
2501 | }; |
2502 | CollectMostBases(RD); |
2503 | return MostBases.takeVector(); |
2504 | } |
2505 | |
2506 | void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D, |
2507 | llvm::Function *F) { |
2508 | llvm::AttrBuilder B(F->getContext()); |
2509 | |
2510 | if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables) |
2511 | B.addUWTableAttr(Kind: llvm::UWTableKind(CodeGenOpts.UnwindTables)); |
2512 | |
2513 | if (CodeGenOpts.StackClashProtector) |
2514 | B.addAttribute(A: "probe-stack" , V: "inline-asm" ); |
2515 | |
2516 | if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096) |
2517 | B.addAttribute(A: "stack-probe-size" , |
2518 | V: std::to_string(val: CodeGenOpts.StackProbeSize)); |
2519 | |
2520 | if (!hasUnwindExceptions(LangOpts)) |
2521 | B.addAttribute(llvm::Attribute::NoUnwind); |
2522 | |
2523 | if (D && D->hasAttr<NoStackProtectorAttr>()) |
2524 | ; // Do nothing. |
2525 | else if (D && D->hasAttr<StrictGuardStackCheckAttr>() && |
2526 | isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPOn)) |
2527 | B.addAttribute(llvm::Attribute::StackProtectStrong); |
2528 | else if (isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPOn)) |
2529 | B.addAttribute(llvm::Attribute::StackProtect); |
2530 | else if (isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPStrong)) |
2531 | B.addAttribute(llvm::Attribute::StackProtectStrong); |
2532 | else if (isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPReq)) |
2533 | B.addAttribute(llvm::Attribute::StackProtectReq); |
2534 | |
2535 | if (!D) { |
2536 | // Non-entry HLSL functions must always be inlined. |
2537 | if (getLangOpts().HLSL && !F->hasFnAttribute(llvm::Attribute::NoInline)) |
2538 | B.addAttribute(llvm::Attribute::AlwaysInline); |
2539 | // If we don't have a declaration to control inlining, the function isn't |
2540 | // explicitly marked as alwaysinline for semantic reasons, and inlining is |
2541 | // disabled, mark the function as noinline. |
2542 | else if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) && |
2543 | CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) |
2544 | B.addAttribute(llvm::Attribute::NoInline); |
2545 | |
2546 | F->addFnAttrs(Attrs: B); |
2547 | return; |
2548 | } |
2549 | |
2550 | // Handle SME attributes that apply to function definitions, |
2551 | // rather than to function prototypes. |
2552 | if (D->hasAttr<ArmLocallyStreamingAttr>()) |
2553 | B.addAttribute(A: "aarch64_pstate_sm_body" ); |
2554 | |
2555 | if (auto *Attr = D->getAttr<ArmNewAttr>()) { |
2556 | if (Attr->isNewZA()) |
2557 | B.addAttribute(A: "aarch64_new_za" ); |
2558 | if (Attr->isNewZT0()) |
2559 | B.addAttribute(A: "aarch64_new_zt0" ); |
2560 | } |
2561 | |
2562 | // Track whether we need to add the optnone LLVM attribute, |
2563 | // starting with the default for this optimization level. |
2564 | bool ShouldAddOptNone = |
2565 | !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0; |
2566 | // We can't add optnone in the following cases, it won't pass the verifier. |
2567 | ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>(); |
2568 | ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>(); |
2569 | |
2570 | // Non-entry HLSL functions must always be inlined. |
2571 | if (getLangOpts().HLSL && !F->hasFnAttribute(llvm::Attribute::NoInline) && |
2572 | !D->hasAttr<NoInlineAttr>()) { |
2573 | B.addAttribute(llvm::Attribute::AlwaysInline); |
2574 | } else if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) && |
2575 | !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) { |
2576 | // Add optnone, but do so only if the function isn't always_inline. |
2577 | B.addAttribute(llvm::Attribute::OptimizeNone); |
2578 | |
2579 | // OptimizeNone implies noinline; we should not be inlining such functions. |
2580 | B.addAttribute(llvm::Attribute::NoInline); |
2581 | |
2582 | // We still need to handle naked functions even though optnone subsumes |
2583 | // much of their semantics. |
2584 | if (D->hasAttr<NakedAttr>()) |
2585 | B.addAttribute(llvm::Attribute::Naked); |
2586 | |
2587 | // OptimizeNone wins over OptimizeForSize and MinSize. |
2588 | F->removeFnAttr(llvm::Attribute::OptimizeForSize); |
2589 | F->removeFnAttr(llvm::Attribute::MinSize); |
2590 | } else if (D->hasAttr<NakedAttr>()) { |
2591 | // Naked implies noinline: we should not be inlining such functions. |
2592 | B.addAttribute(llvm::Attribute::Naked); |
2593 | B.addAttribute(llvm::Attribute::NoInline); |
2594 | } else if (D->hasAttr<NoDuplicateAttr>()) { |
2595 | B.addAttribute(llvm::Attribute::NoDuplicate); |
2596 | } else if (D->hasAttr<NoInlineAttr>() && |
2597 | !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) { |
2598 | // Add noinline if the function isn't always_inline. |
2599 | B.addAttribute(llvm::Attribute::NoInline); |
2600 | } else if (D->hasAttr<AlwaysInlineAttr>() && |
2601 | !F->hasFnAttribute(llvm::Attribute::NoInline)) { |
2602 | // (noinline wins over always_inline, and we can't specify both in IR) |
2603 | B.addAttribute(llvm::Attribute::AlwaysInline); |
2604 | } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { |
2605 | // If we're not inlining, then force everything that isn't always_inline to |
2606 | // carry an explicit noinline attribute. |
2607 | if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline)) |
2608 | B.addAttribute(llvm::Attribute::NoInline); |
2609 | } else { |
2610 | // Otherwise, propagate the inline hint attribute and potentially use its |
2611 | // absence to mark things as noinline. |
2612 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
2613 | // Search function and template pattern redeclarations for inline. |
2614 | auto CheckForInline = [](const FunctionDecl *FD) { |
2615 | auto CheckRedeclForInline = [](const FunctionDecl *Redecl) { |
2616 | return Redecl->isInlineSpecified(); |
2617 | }; |
2618 | if (any_of(FD->redecls(), CheckRedeclForInline)) |
2619 | return true; |
2620 | const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(); |
2621 | if (!Pattern) |
2622 | return false; |
2623 | return any_of(Pattern->redecls(), CheckRedeclForInline); |
2624 | }; |
2625 | if (CheckForInline(FD)) { |
2626 | B.addAttribute(llvm::Attribute::InlineHint); |
2627 | } else if (CodeGenOpts.getInlining() == |
2628 | CodeGenOptions::OnlyHintInlining && |
2629 | !FD->isInlined() && |
2630 | !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) { |
2631 | B.addAttribute(llvm::Attribute::NoInline); |
2632 | } |
2633 | } |
2634 | } |
2635 | |
2636 | // Add other optimization related attributes if we are optimizing this |
2637 | // function. |
2638 | if (!D->hasAttr<OptimizeNoneAttr>()) { |
2639 | if (D->hasAttr<ColdAttr>()) { |
2640 | if (!ShouldAddOptNone) |
2641 | B.addAttribute(llvm::Attribute::OptimizeForSize); |
2642 | B.addAttribute(llvm::Attribute::Cold); |
2643 | } |
2644 | if (D->hasAttr<HotAttr>()) |
2645 | B.addAttribute(llvm::Attribute::Hot); |
2646 | if (D->hasAttr<MinSizeAttr>()) |
2647 | B.addAttribute(llvm::Attribute::MinSize); |
2648 | } |
2649 | |
2650 | F->addFnAttrs(Attrs: B); |
2651 | |
2652 | unsigned alignment = D->getMaxAlignment() / Context.getCharWidth(); |
2653 | if (alignment) |
2654 | F->setAlignment(llvm::Align(alignment)); |
2655 | |
2656 | if (!D->hasAttr<AlignedAttr>()) |
2657 | if (LangOpts.FunctionAlignment) |
2658 | F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment)); |
2659 | |
2660 | // Some C++ ABIs require 2-byte alignment for member functions, in order to |
2661 | // reserve a bit for differentiating between virtual and non-virtual member |
2662 | // functions. If the current target's C++ ABI requires this and this is a |
2663 | // member function, set its alignment accordingly. |
2664 | if (getTarget().getCXXABI().areMemberFunctionsAligned()) { |
2665 | if (isa<CXXMethodDecl>(Val: D) && F->getPointerAlignment(DL: getDataLayout()) < 2) |
2666 | F->setAlignment(std::max(a: llvm::Align(2), b: F->getAlign().valueOrOne())); |
2667 | } |
2668 | |
2669 | // In the cross-dso CFI mode with canonical jump tables, we want !type |
2670 | // attributes on definitions only. |
2671 | if (CodeGenOpts.SanitizeCfiCrossDso && |
2672 | CodeGenOpts.SanitizeCfiCanonicalJumpTables) { |
2673 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
2674 | // Skip available_externally functions. They won't be codegen'ed in the |
2675 | // current module anyway. |
2676 | if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally) |
2677 | createFunctionTypeMetadataForIcall(FD, F); |
2678 | } |
2679 | } |
2680 | |
2681 | // Emit type metadata on member functions for member function pointer checks. |
2682 | // These are only ever necessary on definitions; we're guaranteed that the |
2683 | // definition will be present in the LTO unit as a result of LTO visibility. |
2684 | auto *MD = dyn_cast<CXXMethodDecl>(Val: D); |
2685 | if (MD && requiresMemberFunctionPointerTypeMetadata(CGM&: *this, MD)) { |
2686 | for (const CXXRecordDecl *Base : getMostBaseClasses(RD: MD->getParent())) { |
2687 | llvm::Metadata *Id = |
2688 | CreateMetadataIdentifierForType(T: Context.getMemberPointerType( |
2689 | T: MD->getType(), /*Qualifier=*/nullptr, Cls: Base)); |
2690 | F->addTypeMetadata(Offset: 0, TypeID: Id); |
2691 | } |
2692 | } |
2693 | } |
2694 | |
2695 | void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) { |
2696 | const Decl *D = GD.getDecl(); |
2697 | if (isa_and_nonnull<NamedDecl>(Val: D)) |
2698 | setGVProperties(GV, GD); |
2699 | else |
2700 | GV->setVisibility(llvm::GlobalValue::DefaultVisibility); |
2701 | |
2702 | if (D && D->hasAttr<UsedAttr>()) |
2703 | addUsedOrCompilerUsedGlobal(GV); |
2704 | |
2705 | if (const auto *VD = dyn_cast_if_present<VarDecl>(Val: D); |
2706 | VD && |
2707 | ((CodeGenOpts.KeepPersistentStorageVariables && |
2708 | (VD->getStorageDuration() == SD_Static || |
2709 | VD->getStorageDuration() == SD_Thread)) || |
2710 | (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && |
2711 | VD->getType().isConstQualified()))) |
2712 | addUsedOrCompilerUsedGlobal(GV); |
2713 | } |
2714 | |
2715 | bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD, |
2716 | llvm::AttrBuilder &Attrs, |
2717 | bool SetTargetFeatures) { |
2718 | // Add target-cpu and target-features attributes to functions. If |
2719 | // we have a decl for the function and it has a target attribute then |
2720 | // parse that and add it to the feature set. |
2721 | StringRef TargetCPU = getTarget().getTargetOpts().CPU; |
2722 | StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU; |
2723 | std::vector<std::string> Features; |
2724 | const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: GD.getDecl()); |
2725 | FD = FD ? FD->getMostRecentDecl() : FD; |
2726 | const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr; |
2727 | const auto *TV = FD ? FD->getAttr<TargetVersionAttr>() : nullptr; |
2728 | assert((!TD || !TV) && "both target_version and target specified" ); |
2729 | const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr; |
2730 | const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr; |
2731 | bool AddedAttr = false; |
2732 | if (TD || TV || SD || TC) { |
2733 | llvm::StringMap<bool> FeatureMap; |
2734 | getContext().getFunctionFeatureMap(FeatureMap, GD); |
2735 | |
2736 | // Produce the canonical string for this set of features. |
2737 | for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap) |
2738 | Features.push_back(x: (Entry.getValue() ? "+" : "-" ) + Entry.getKey().str()); |
2739 | |
2740 | // Now add the target-cpu and target-features to the function. |
2741 | // While we populated the feature map above, we still need to |
2742 | // get and parse the target attribute so we can get the cpu for |
2743 | // the function. |
2744 | if (TD) { |
2745 | ParsedTargetAttr ParsedAttr = |
2746 | Target.parseTargetAttr(Str: TD->getFeaturesStr()); |
2747 | if (!ParsedAttr.CPU.empty() && |
2748 | getTarget().isValidCPUName(Name: ParsedAttr.CPU)) { |
2749 | TargetCPU = ParsedAttr.CPU; |
2750 | TuneCPU = "" ; // Clear the tune CPU. |
2751 | } |
2752 | if (!ParsedAttr.Tune.empty() && |
2753 | getTarget().isValidCPUName(Name: ParsedAttr.Tune)) |
2754 | TuneCPU = ParsedAttr.Tune; |
2755 | } |
2756 | |
2757 | if (SD) { |
2758 | // Apply the given CPU name as the 'tune-cpu' so that the optimizer can |
2759 | // favor this processor. |
2760 | TuneCPU = SD->getCPUName(GD.getMultiVersionIndex())->getName(); |
2761 | } |
2762 | } else { |
2763 | // Otherwise just add the existing target cpu and target features to the |
2764 | // function. |
2765 | Features = getTarget().getTargetOpts().Features; |
2766 | } |
2767 | |
2768 | if (!TargetCPU.empty()) { |
2769 | Attrs.addAttribute(A: "target-cpu" , V: TargetCPU); |
2770 | AddedAttr = true; |
2771 | } |
2772 | if (!TuneCPU.empty()) { |
2773 | Attrs.addAttribute(A: "tune-cpu" , V: TuneCPU); |
2774 | AddedAttr = true; |
2775 | } |
2776 | if (!Features.empty() && SetTargetFeatures) { |
2777 | llvm::erase_if(C&: Features, P: [&](const std::string& F) { |
2778 | return getTarget().isReadOnlyFeature(Feature: F.substr(pos: 1)); |
2779 | }); |
2780 | llvm::sort(C&: Features); |
2781 | Attrs.addAttribute(A: "target-features" , V: llvm::join(R&: Features, Separator: "," )); |
2782 | AddedAttr = true; |
2783 | } |
2784 | // Add metadata for AArch64 Function Multi Versioning. |
2785 | if (getTarget().getTriple().isAArch64()) { |
2786 | llvm::SmallVector<StringRef, 8> Feats; |
2787 | bool IsDefault = false; |
2788 | if (TV) { |
2789 | IsDefault = TV->isDefaultVersion(); |
2790 | TV->getFeatures(Feats); |
2791 | } else if (TC) { |
2792 | IsDefault = TC->isDefaultVersion(GD.getMultiVersionIndex()); |
2793 | TC->getFeatures(Feats, GD.getMultiVersionIndex()); |
2794 | } |
2795 | if (IsDefault) { |
2796 | Attrs.addAttribute(A: "fmv-features" ); |
2797 | AddedAttr = true; |
2798 | } else if (!Feats.empty()) { |
2799 | // Sort features and remove duplicates. |
2800 | std::set<StringRef> OrderedFeats(Feats.begin(), Feats.end()); |
2801 | std::string FMVFeatures; |
2802 | for (StringRef F : OrderedFeats) |
2803 | FMVFeatures.append(str: "," + F.str()); |
2804 | Attrs.addAttribute(A: "fmv-features" , V: FMVFeatures.substr(pos: 1)); |
2805 | AddedAttr = true; |
2806 | } |
2807 | } |
2808 | return AddedAttr; |
2809 | } |
2810 | |
2811 | void CodeGenModule::setNonAliasAttributes(GlobalDecl GD, |
2812 | llvm::GlobalObject *GO) { |
2813 | const Decl *D = GD.getDecl(); |
2814 | SetCommonAttributes(GD, GV: GO); |
2815 | |
2816 | if (D) { |
2817 | if (auto *GV = dyn_cast<llvm::GlobalVariable>(Val: GO)) { |
2818 | if (D->hasAttr<RetainAttr>()) |
2819 | addUsedGlobal(GV); |
2820 | if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>()) |
2821 | GV->addAttribute("bss-section" , SA->getName()); |
2822 | if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>()) |
2823 | GV->addAttribute("data-section" , SA->getName()); |
2824 | if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>()) |
2825 | GV->addAttribute("rodata-section" , SA->getName()); |
2826 | if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>()) |
2827 | GV->addAttribute("relro-section" , SA->getName()); |
2828 | } |
2829 | |
2830 | if (auto *F = dyn_cast<llvm::Function>(Val: GO)) { |
2831 | if (D->hasAttr<RetainAttr>()) |
2832 | addUsedGlobal(GV: F); |
2833 | if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>()) |
2834 | if (!D->getAttr<SectionAttr>()) |
2835 | F->setSection(SA->getName()); |
2836 | |
2837 | llvm::AttrBuilder Attrs(F->getContext()); |
2838 | if (GetCPUAndFeaturesAttributes(GD, Attrs)) { |
2839 | // We know that GetCPUAndFeaturesAttributes will always have the |
2840 | // newest set, since it has the newest possible FunctionDecl, so the |
2841 | // new ones should replace the old. |
2842 | llvm::AttributeMask RemoveAttrs; |
2843 | RemoveAttrs.addAttribute(A: "target-cpu" ); |
2844 | RemoveAttrs.addAttribute(A: "target-features" ); |
2845 | RemoveAttrs.addAttribute(A: "fmv-features" ); |
2846 | RemoveAttrs.addAttribute(A: "tune-cpu" ); |
2847 | F->removeFnAttrs(Attrs: RemoveAttrs); |
2848 | F->addFnAttrs(Attrs); |
2849 | } |
2850 | } |
2851 | |
2852 | if (const auto *CSA = D->getAttr<CodeSegAttr>()) |
2853 | GO->setSection(CSA->getName()); |
2854 | else if (const auto *SA = D->getAttr<SectionAttr>()) |
2855 | GO->setSection(SA->getName()); |
2856 | } |
2857 | |
2858 | getTargetCodeGenInfo().setTargetAttributes(D, GV: GO, M&: *this); |
2859 | } |
2860 | |
2861 | void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD, |
2862 | llvm::Function *F, |
2863 | const CGFunctionInfo &FI) { |
2864 | const Decl *D = GD.getDecl(); |
2865 | SetLLVMFunctionAttributes(GD, Info: FI, F, /*IsThunk=*/false); |
2866 | SetLLVMFunctionAttributesForDefinition(D, F); |
2867 | |
2868 | F->setLinkage(llvm::Function::InternalLinkage); |
2869 | |
2870 | setNonAliasAttributes(GD, GO: F); |
2871 | } |
2872 | |
2873 | static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) { |
2874 | // Set linkage and visibility in case we never see a definition. |
2875 | LinkageInfo LV = ND->getLinkageAndVisibility(); |
2876 | // Don't set internal linkage on declarations. |
2877 | // "extern_weak" is overloaded in LLVM; we probably should have |
2878 | // separate linkage types for this. |
2879 | if (isExternallyVisible(LV.getLinkage()) && |
2880 | (ND->hasAttr<WeakAttr>() || ND->isWeakImported())) |
2881 | GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage); |
2882 | } |
2883 | |
2884 | void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD, |
2885 | llvm::Function *F) { |
2886 | // Only if we are checking indirect calls. |
2887 | if (!LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) |
2888 | return; |
2889 | |
2890 | // Non-static class methods are handled via vtable or member function pointer |
2891 | // checks elsewhere. |
2892 | if (isa<CXXMethodDecl>(Val: FD) && !cast<CXXMethodDecl>(Val: FD)->isStatic()) |
2893 | return; |
2894 | |
2895 | llvm::Metadata *MD = CreateMetadataIdentifierForType(T: FD->getType()); |
2896 | F->addTypeMetadata(Offset: 0, TypeID: MD); |
2897 | F->addTypeMetadata(Offset: 0, TypeID: CreateMetadataIdentifierGeneralized(T: FD->getType())); |
2898 | |
2899 | // Emit a hash-based bit set entry for cross-DSO calls. |
2900 | if (CodeGenOpts.SanitizeCfiCrossDso) |
2901 | if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD)) |
2902 | F->addTypeMetadata(Offset: 0, TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId)); |
2903 | } |
2904 | |
2905 | void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) { |
2906 | llvm::LLVMContext &Ctx = F->getContext(); |
2907 | llvm::MDBuilder MDB(Ctx); |
2908 | F->setMetadata(llvm::LLVMContext::MD_kcfi_type, |
2909 | llvm::MDNode::get( |
2910 | Context&: Ctx, MDs: MDB.createConstant(C: CreateKCFITypeId(T: FD->getType())))); |
2911 | } |
2912 | |
2913 | static bool allowKCFIIdentifier(StringRef Name) { |
2914 | // KCFI type identifier constants are only necessary for external assembly |
2915 | // functions, which means it's safe to skip unusual names. Subset of |
2916 | // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar(). |
2917 | return llvm::all_of(Range&: Name, P: [](const char &C) { |
2918 | return llvm::isAlnum(C) || C == '_' || C == '.'; |
2919 | }); |
2920 | } |
2921 | |
2922 | void CodeGenModule::finalizeKCFITypes() { |
2923 | llvm::Module &M = getModule(); |
2924 | for (auto &F : M.functions()) { |
2925 | // Remove KCFI type metadata from non-address-taken local functions. |
2926 | bool AddressTaken = F.hasAddressTaken(); |
2927 | if (!AddressTaken && F.hasLocalLinkage()) |
2928 | F.eraseMetadata(KindID: llvm::LLVMContext::MD_kcfi_type); |
2929 | |
2930 | // Generate a constant with the expected KCFI type identifier for all |
2931 | // address-taken function declarations to support annotating indirectly |
2932 | // called assembly functions. |
2933 | if (!AddressTaken || !F.isDeclaration()) |
2934 | continue; |
2935 | |
2936 | const llvm::ConstantInt *Type; |
2937 | if (const llvm::MDNode *MD = F.getMetadata(KindID: llvm::LLVMContext::MD_kcfi_type)) |
2938 | Type = llvm::mdconst::extract<llvm::ConstantInt>(MD: MD->getOperand(I: 0)); |
2939 | else |
2940 | continue; |
2941 | |
2942 | StringRef Name = F.getName(); |
2943 | if (!allowKCFIIdentifier(Name)) |
2944 | continue; |
2945 | |
2946 | std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" + |
2947 | Name + ", " + Twine(Type->getZExtValue()) + "\n" ) |
2948 | .str(); |
2949 | M.appendModuleInlineAsm(Asm); |
2950 | } |
2951 | } |
2952 | |
2953 | void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F, |
2954 | bool IsIncompleteFunction, |
2955 | bool IsThunk) { |
2956 | |
2957 | if (F->getIntrinsicID() != llvm::Intrinsic::not_intrinsic) { |
2958 | // If this is an intrinsic function, the attributes will have been set |
2959 | // when the function was created. |
2960 | return; |
2961 | } |
2962 | |
2963 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2964 | |
2965 | if (!IsIncompleteFunction) |
2966 | SetLLVMFunctionAttributes(GD, Info: getTypes().arrangeGlobalDeclaration(GD), F, |
2967 | IsThunk); |
2968 | |
2969 | // Add the Returned attribute for "this", except for iOS 5 and earlier |
2970 | // where substantial code, including the libstdc++ dylib, was compiled with |
2971 | // GCC and does not actually return "this". |
2972 | if (!IsThunk && getCXXABI().HasThisReturn(GD) && |
2973 | !(getTriple().isiOS() && getTriple().isOSVersionLT(Major: 6))) { |
2974 | assert(!F->arg_empty() && |
2975 | F->arg_begin()->getType() |
2976 | ->canLosslesslyBitCastTo(F->getReturnType()) && |
2977 | "unexpected this return" ); |
2978 | F->addParamAttr(0, llvm::Attribute::Returned); |
2979 | } |
2980 | |
2981 | // Only a few attributes are set on declarations; these may later be |
2982 | // overridden by a definition. |
2983 | |
2984 | setLinkageForGV(F, FD); |
2985 | setGVProperties(GV: F, GD: FD); |
2986 | |
2987 | // Setup target-specific attributes. |
2988 | if (!IsIncompleteFunction && F->isDeclaration()) |
2989 | getTargetCodeGenInfo().setTargetAttributes(FD, F, *this); |
2990 | |
2991 | if (const auto *CSA = FD->getAttr<CodeSegAttr>()) |
2992 | F->setSection(CSA->getName()); |
2993 | else if (const auto *SA = FD->getAttr<SectionAttr>()) |
2994 | F->setSection(SA->getName()); |
2995 | |
2996 | if (const auto *EA = FD->getAttr<ErrorAttr>()) { |
2997 | if (EA->isError()) |
2998 | F->addFnAttr("dontcall-error" , EA->getUserDiagnostic()); |
2999 | else if (EA->isWarning()) |
3000 | F->addFnAttr("dontcall-warn" , EA->getUserDiagnostic()); |
3001 | } |
3002 | |
3003 | // If we plan on emitting this inline builtin, we can't treat it as a builtin. |
3004 | if (FD->isInlineBuiltinDeclaration()) { |
3005 | const FunctionDecl *FDBody; |
3006 | bool HasBody = FD->hasBody(Definition&: FDBody); |
3007 | (void)HasBody; |
3008 | assert(HasBody && "Inline builtin declarations should always have an " |
3009 | "available body!" ); |
3010 | if (shouldEmitFunction(FDBody)) |
3011 | F->addFnAttr(llvm::Attribute::NoBuiltin); |
3012 | } |
3013 | |
3014 | if (FD->isReplaceableGlobalAllocationFunction()) { |
3015 | // A replaceable global allocation function does not act like a builtin by |
3016 | // default, only if it is invoked by a new-expression or delete-expression. |
3017 | F->addFnAttr(llvm::Attribute::NoBuiltin); |
3018 | } |
3019 | |
3020 | if (isa<CXXConstructorDecl>(Val: FD) || isa<CXXDestructorDecl>(Val: FD)) |
3021 | F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3022 | else if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) |
3023 | if (MD->isVirtual()) |
3024 | F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3025 | |
3026 | // Don't emit entries for function declarations in the cross-DSO mode. This |
3027 | // is handled with better precision by the receiving DSO. But if jump tables |
3028 | // are non-canonical then we need type metadata in order to produce the local |
3029 | // jump table. |
3030 | if (!CodeGenOpts.SanitizeCfiCrossDso || |
3031 | !CodeGenOpts.SanitizeCfiCanonicalJumpTables) |
3032 | createFunctionTypeMetadataForIcall(FD, F); |
3033 | |
3034 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) |
3035 | setKCFIType(FD, F); |
3036 | |
3037 | if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>()) |
3038 | getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn: F); |
3039 | |
3040 | if (CodeGenOpts.InlineMaxStackSize != UINT_MAX) |
3041 | F->addFnAttr(Kind: "inline-max-stacksize" , Val: llvm::utostr(X: CodeGenOpts.InlineMaxStackSize)); |
3042 | |
3043 | if (const auto *CB = FD->getAttr<CallbackAttr>()) { |
3044 | // Annotate the callback behavior as metadata: |
3045 | // - The callback callee (as argument number). |
3046 | // - The callback payloads (as argument numbers). |
3047 | llvm::LLVMContext &Ctx = F->getContext(); |
3048 | llvm::MDBuilder MDB(Ctx); |
3049 | |
3050 | // The payload indices are all but the first one in the encoding. The first |
3051 | // identifies the callback callee. |
3052 | int CalleeIdx = *CB->encoding_begin(); |
3053 | ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end()); |
3054 | F->addMetadata(KindID: llvm::LLVMContext::MD_callback, |
3055 | MD&: *llvm::MDNode::get(Context&: Ctx, MDs: {MDB.createCallbackEncoding( |
3056 | CalleeArgNo: CalleeIdx, Arguments: PayloadIndices, |
3057 | /* VarArgsArePassed */ false)})); |
3058 | } |
3059 | } |
3060 | |
3061 | void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) { |
3062 | assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) && |
3063 | "Only globals with definition can force usage." ); |
3064 | LLVMUsed.emplace_back(args&: GV); |
3065 | } |
3066 | |
3067 | void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) { |
3068 | assert(!GV->isDeclaration() && |
3069 | "Only globals with definition can force usage." ); |
3070 | LLVMCompilerUsed.emplace_back(args&: GV); |
3071 | } |
3072 | |
3073 | void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) { |
3074 | assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) && |
3075 | "Only globals with definition can force usage." ); |
3076 | if (getTriple().isOSBinFormatELF()) |
3077 | LLVMCompilerUsed.emplace_back(args&: GV); |
3078 | else |
3079 | LLVMUsed.emplace_back(args&: GV); |
3080 | } |
3081 | |
3082 | static void emitUsed(CodeGenModule &CGM, StringRef Name, |
3083 | std::vector<llvm::WeakTrackingVH> &List) { |
3084 | // Don't create llvm.used if there is no need. |
3085 | if (List.empty()) |
3086 | return; |
3087 | |
3088 | // Convert List to what ConstantArray needs. |
3089 | SmallVector<llvm::Constant*, 8> UsedArray; |
3090 | UsedArray.resize(N: List.size()); |
3091 | for (unsigned i = 0, e = List.size(); i != e; ++i) { |
3092 | UsedArray[i] = |
3093 | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
3094 | C: cast<llvm::Constant>(Val: &*List[i]), Ty: CGM.Int8PtrTy); |
3095 | } |
3096 | |
3097 | if (UsedArray.empty()) |
3098 | return; |
3099 | llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: CGM.Int8PtrTy, NumElements: UsedArray.size()); |
3100 | |
3101 | auto *GV = new llvm::GlobalVariable( |
3102 | CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage, |
3103 | llvm::ConstantArray::get(T: ATy, V: UsedArray), Name); |
3104 | |
3105 | GV->setSection("llvm.metadata" ); |
3106 | } |
3107 | |
3108 | void CodeGenModule::emitLLVMUsed() { |
3109 | emitUsed(CGM&: *this, Name: "llvm.used" , List&: LLVMUsed); |
3110 | emitUsed(CGM&: *this, Name: "llvm.compiler.used" , List&: LLVMCompilerUsed); |
3111 | } |
3112 | |
3113 | void CodeGenModule::AppendLinkerOptions(StringRef Opts) { |
3114 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opts); |
3115 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts)); |
3116 | } |
3117 | |
3118 | void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) { |
3119 | llvm::SmallString<32> Opt; |
3120 | getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt); |
3121 | if (Opt.empty()) |
3122 | return; |
3123 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt); |
3124 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts)); |
3125 | } |
3126 | |
3127 | void CodeGenModule::AddDependentLib(StringRef Lib) { |
3128 | auto &C = getLLVMContext(); |
3129 | if (getTarget().getTriple().isOSBinFormatELF()) { |
3130 | ELFDependentLibraries.push_back( |
3131 | Elt: llvm::MDNode::get(Context&: C, MDs: llvm::MDString::get(Context&: C, Str: Lib))); |
3132 | return; |
3133 | } |
3134 | |
3135 | llvm::SmallString<24> Opt; |
3136 | getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt); |
3137 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt); |
3138 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: C, MDs: MDOpts)); |
3139 | } |
3140 | |
3141 | /// Add link options implied by the given module, including modules |
3142 | /// it depends on, using a postorder walk. |
3143 | static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod, |
3144 | SmallVectorImpl<llvm::MDNode *> &Metadata, |
3145 | llvm::SmallPtrSet<Module *, 16> &Visited) { |
3146 | // Import this module's parent. |
3147 | if (Mod->Parent && Visited.insert(Ptr: Mod->Parent).second) { |
3148 | addLinkOptionsPostorder(CGM, Mod: Mod->Parent, Metadata, Visited); |
3149 | } |
3150 | |
3151 | // Import this module's dependencies. |
3152 | for (Module *Import : llvm::reverse(C&: Mod->Imports)) { |
3153 | if (Visited.insert(Ptr: Import).second) |
3154 | addLinkOptionsPostorder(CGM, Mod: Import, Metadata, Visited); |
3155 | } |
3156 | |
3157 | // Add linker options to link against the libraries/frameworks |
3158 | // described by this module. |
3159 | llvm::LLVMContext &Context = CGM.getLLVMContext(); |
3160 | bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF(); |
3161 | |
3162 | // For modules that use export_as for linking, use that module |
3163 | // name instead. |
3164 | if (Mod->UseExportAsModuleLinkName) |
3165 | return; |
3166 | |
3167 | for (const Module::LinkLibrary &LL : llvm::reverse(C&: Mod->LinkLibraries)) { |
3168 | // Link against a framework. Frameworks are currently Darwin only, so we |
3169 | // don't to ask TargetCodeGenInfo for the spelling of the linker option. |
3170 | if (LL.IsFramework) { |
3171 | llvm::Metadata *Args[2] = {llvm::MDString::get(Context, Str: "-framework" ), |
3172 | llvm::MDString::get(Context, Str: LL.Library)}; |
3173 | |
3174 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args)); |
3175 | continue; |
3176 | } |
3177 | |
3178 | // Link against a library. |
3179 | if (IsELF) { |
3180 | llvm::Metadata *Args[2] = { |
3181 | llvm::MDString::get(Context, Str: "lib" ), |
3182 | llvm::MDString::get(Context, Str: LL.Library), |
3183 | }; |
3184 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args)); |
3185 | } else { |
3186 | llvm::SmallString<24> Opt; |
3187 | CGM.getTargetCodeGenInfo().getDependentLibraryOption(Lib: LL.Library, Opt); |
3188 | auto *OptString = llvm::MDString::get(Context, Str: Opt); |
3189 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: OptString)); |
3190 | } |
3191 | } |
3192 | } |
3193 | |
3194 | void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) { |
3195 | assert(Primary->isNamedModuleUnit() && |
3196 | "We should only emit module initializers for named modules." ); |
3197 | |
3198 | // Emit the initializers in the order that sub-modules appear in the |
3199 | // source, first Global Module Fragments, if present. |
3200 | if (auto GMF = Primary->getGlobalModuleFragment()) { |
3201 | for (Decl *D : getContext().getModuleInitializers(M: GMF)) { |
3202 | if (isa<ImportDecl>(Val: D)) |
3203 | continue; |
3204 | assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?" ); |
3205 | EmitTopLevelDecl(D); |
3206 | } |
3207 | } |
3208 | // Second any associated with the module, itself. |
3209 | for (Decl *D : getContext().getModuleInitializers(M: Primary)) { |
3210 | // Skip import decls, the inits for those are called explicitly. |
3211 | if (isa<ImportDecl>(Val: D)) |
3212 | continue; |
3213 | EmitTopLevelDecl(D); |
3214 | } |
3215 | // Third any associated with the Privat eMOdule Fragment, if present. |
3216 | if (auto PMF = Primary->getPrivateModuleFragment()) { |
3217 | for (Decl *D : getContext().getModuleInitializers(M: PMF)) { |
3218 | // Skip import decls, the inits for those are called explicitly. |
3219 | if (isa<ImportDecl>(Val: D)) |
3220 | continue; |
3221 | assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?" ); |
3222 | EmitTopLevelDecl(D); |
3223 | } |
3224 | } |
3225 | } |
3226 | |
3227 | void CodeGenModule::EmitModuleLinkOptions() { |
3228 | // Collect the set of all of the modules we want to visit to emit link |
3229 | // options, which is essentially the imported modules and all of their |
3230 | // non-explicit child modules. |
3231 | llvm::SetVector<clang::Module *> LinkModules; |
3232 | llvm::SmallPtrSet<clang::Module *, 16> Visited; |
3233 | SmallVector<clang::Module *, 16> Stack; |
3234 | |
3235 | // Seed the stack with imported modules. |
3236 | for (Module *M : ImportedModules) { |
3237 | // Do not add any link flags when an implementation TU of a module imports |
3238 | // a header of that same module. |
3239 | if (M->getTopLevelModuleName() == getLangOpts().CurrentModule && |
3240 | !getLangOpts().isCompilingModule()) |
3241 | continue; |
3242 | if (Visited.insert(Ptr: M).second) |
3243 | Stack.push_back(Elt: M); |
3244 | } |
3245 | |
3246 | // Find all of the modules to import, making a little effort to prune |
3247 | // non-leaf modules. |
3248 | while (!Stack.empty()) { |
3249 | clang::Module *Mod = Stack.pop_back_val(); |
3250 | |
3251 | bool AnyChildren = false; |
3252 | |
3253 | // Visit the submodules of this module. |
3254 | for (const auto &SM : Mod->submodules()) { |
3255 | // Skip explicit children; they need to be explicitly imported to be |
3256 | // linked against. |
3257 | if (SM->IsExplicit) |
3258 | continue; |
3259 | |
3260 | if (Visited.insert(Ptr: SM).second) { |
3261 | Stack.push_back(Elt: SM); |
3262 | AnyChildren = true; |
3263 | } |
3264 | } |
3265 | |
3266 | // We didn't find any children, so add this module to the list of |
3267 | // modules to link against. |
3268 | if (!AnyChildren) { |
3269 | LinkModules.insert(X: Mod); |
3270 | } |
3271 | } |
3272 | |
3273 | // Add link options for all of the imported modules in reverse topological |
3274 | // order. We don't do anything to try to order import link flags with respect |
3275 | // to linker options inserted by things like #pragma comment(). |
3276 | SmallVector<llvm::MDNode *, 16> MetadataArgs; |
3277 | Visited.clear(); |
3278 | for (Module *M : LinkModules) |
3279 | if (Visited.insert(Ptr: M).second) |
3280 | addLinkOptionsPostorder(CGM&: *this, Mod: M, Metadata&: MetadataArgs, Visited); |
3281 | std::reverse(first: MetadataArgs.begin(), last: MetadataArgs.end()); |
3282 | LinkerOptionsMetadata.append(in_start: MetadataArgs.begin(), in_end: MetadataArgs.end()); |
3283 | |
3284 | // Add the linker options metadata flag. |
3285 | if (!LinkerOptionsMetadata.empty()) { |
3286 | auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.linker.options" ); |
3287 | for (auto *MD : LinkerOptionsMetadata) |
3288 | NMD->addOperand(M: MD); |
3289 | } |
3290 | } |
3291 | |
3292 | void CodeGenModule::EmitDeferred() { |
3293 | // Emit deferred declare target declarations. |
3294 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) |
3295 | getOpenMPRuntime().emitDeferredTargetDecls(); |
3296 | |
3297 | // Emit code for any potentially referenced deferred decls. Since a |
3298 | // previously unused static decl may become used during the generation of code |
3299 | // for a static function, iterate until no changes are made. |
3300 | |
3301 | if (!DeferredVTables.empty()) { |
3302 | EmitDeferredVTables(); |
3303 | |
3304 | // Emitting a vtable doesn't directly cause more vtables to |
3305 | // become deferred, although it can cause functions to be |
3306 | // emitted that then need those vtables. |
3307 | assert(DeferredVTables.empty()); |
3308 | } |
3309 | |
3310 | // Emit CUDA/HIP static device variables referenced by host code only. |
3311 | // Note we should not clear CUDADeviceVarODRUsedByHost since it is still |
3312 | // needed for further handling. |
3313 | if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) |
3314 | llvm::append_range(DeferredDeclsToEmit, |
3315 | getContext().CUDADeviceVarODRUsedByHost); |
3316 | |
3317 | // Stop if we're out of both deferred vtables and deferred declarations. |
3318 | if (DeferredDeclsToEmit.empty()) |
3319 | return; |
3320 | |
3321 | // Grab the list of decls to emit. If EmitGlobalDefinition schedules more |
3322 | // work, it will not interfere with this. |
3323 | std::vector<GlobalDecl> CurDeclsToEmit; |
3324 | CurDeclsToEmit.swap(x&: DeferredDeclsToEmit); |
3325 | |
3326 | for (GlobalDecl &D : CurDeclsToEmit) { |
3327 | // Functions declared with the sycl_kernel_entry_point attribute are |
3328 | // emitted normally during host compilation. During device compilation, |
3329 | // a SYCL kernel caller offload entry point function is generated and |
3330 | // emitted in place of each of these functions. |
3331 | if (const auto *FD = D.getDecl()->getAsFunction()) { |
3332 | if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>() && |
3333 | FD->isDefined()) { |
3334 | // Functions with an invalid sycl_kernel_entry_point attribute are |
3335 | // ignored during device compilation. |
3336 | if (!FD->getAttr<SYCLKernelEntryPointAttr>()->isInvalidAttr()) { |
3337 | // Generate and emit the SYCL kernel caller function. |
3338 | EmitSYCLKernelCaller(KernelEntryPointFn: FD, Ctx&: getContext()); |
3339 | // Recurse to emit any symbols directly or indirectly referenced |
3340 | // by the SYCL kernel caller function. |
3341 | EmitDeferred(); |
3342 | } |
3343 | // Do not emit the sycl_kernel_entry_point attributed function. |
3344 | continue; |
3345 | } |
3346 | } |
3347 | |
3348 | // We should call GetAddrOfGlobal with IsForDefinition set to true in order |
3349 | // to get GlobalValue with exactly the type we need, not something that |
3350 | // might had been created for another decl with the same mangled name but |
3351 | // different type. |
3352 | llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>( |
3353 | Val: GetAddrOfGlobal(GD: D, IsForDefinition: ForDefinition)); |
3354 | |
3355 | // In case of different address spaces, we may still get a cast, even with |
3356 | // IsForDefinition equal to true. Query mangled names table to get |
3357 | // GlobalValue. |
3358 | if (!GV) |
3359 | GV = GetGlobalValue(Name: getMangledName(GD: D)); |
3360 | |
3361 | // Make sure GetGlobalValue returned non-null. |
3362 | assert(GV); |
3363 | |
3364 | // Check to see if we've already emitted this. This is necessary |
3365 | // for a couple of reasons: first, decls can end up in the |
3366 | // deferred-decls queue multiple times, and second, decls can end |
3367 | // up with definitions in unusual ways (e.g. by an extern inline |
3368 | // function acquiring a strong function redefinition). Just |
3369 | // ignore these cases. |
3370 | if (!GV->isDeclaration()) |
3371 | continue; |
3372 | |
3373 | // If this is OpenMP, check if it is legal to emit this global normally. |
3374 | if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD: D)) |
3375 | continue; |
3376 | |
3377 | // Otherwise, emit the definition and move on to the next one. |
3378 | EmitGlobalDefinition(D, GV); |
3379 | |
3380 | // If we found out that we need to emit more decls, do that recursively. |
3381 | // This has the advantage that the decls are emitted in a DFS and related |
3382 | // ones are close together, which is convenient for testing. |
3383 | if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { |
3384 | EmitDeferred(); |
3385 | assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); |
3386 | } |
3387 | } |
3388 | } |
3389 | |
3390 | void CodeGenModule::EmitVTablesOpportunistically() { |
3391 | // Try to emit external vtables as available_externally if they have emitted |
3392 | // all inlined virtual functions. It runs after EmitDeferred() and therefore |
3393 | // is not allowed to create new references to things that need to be emitted |
3394 | // lazily. Note that it also uses fact that we eagerly emitting RTTI. |
3395 | |
3396 | assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables()) |
3397 | && "Only emit opportunistic vtables with optimizations" ); |
3398 | |
3399 | for (const CXXRecordDecl *RD : OpportunisticVTables) { |
3400 | assert(getVTables().isVTableExternal(RD) && |
3401 | "This queue should only contain external vtables" ); |
3402 | if (getCXXABI().canSpeculativelyEmitVTable(RD)) |
3403 | VTables.GenerateClassData(RD); |
3404 | } |
3405 | OpportunisticVTables.clear(); |
3406 | } |
3407 | |
3408 | void CodeGenModule::EmitGlobalAnnotations() { |
3409 | for (const auto& [MangledName, VD] : DeferredAnnotations) { |
3410 | llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName); |
3411 | if (GV) |
3412 | AddGlobalAnnotations(D: VD, GV); |
3413 | } |
3414 | DeferredAnnotations.clear(); |
3415 | |
3416 | if (Annotations.empty()) |
3417 | return; |
3418 | |
3419 | // Create a new global variable for the ConstantStruct in the Module. |
3420 | llvm::Constant *Array = llvm::ConstantArray::get(T: llvm::ArrayType::get( |
3421 | ElementType: Annotations[0]->getType(), NumElements: Annotations.size()), V: Annotations); |
3422 | auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false, |
3423 | llvm::GlobalValue::AppendingLinkage, |
3424 | Array, "llvm.global.annotations" ); |
3425 | gv->setSection(AnnotationSection); |
3426 | } |
3427 | |
3428 | llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) { |
3429 | llvm::Constant *&AStr = AnnotationStrings[Str]; |
3430 | if (AStr) |
3431 | return AStr; |
3432 | |
3433 | // Not found yet, create a new global. |
3434 | llvm::Constant *s = llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Str); |
3435 | auto *gv = new llvm::GlobalVariable( |
3436 | getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s, |
3437 | ".str" , nullptr, llvm::GlobalValue::NotThreadLocal, |
3438 | ConstGlobalsPtrTy->getAddressSpace()); |
3439 | gv->setSection(AnnotationSection); |
3440 | gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3441 | AStr = gv; |
3442 | return gv; |
3443 | } |
3444 | |
3445 | llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) { |
3446 | SourceManager &SM = getContext().getSourceManager(); |
3447 | PresumedLoc PLoc = SM.getPresumedLoc(Loc); |
3448 | if (PLoc.isValid()) |
3449 | return EmitAnnotationString(Str: PLoc.getFilename()); |
3450 | return EmitAnnotationString(Str: SM.getBufferName(Loc)); |
3451 | } |
3452 | |
3453 | llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) { |
3454 | SourceManager &SM = getContext().getSourceManager(); |
3455 | PresumedLoc PLoc = SM.getPresumedLoc(Loc: L); |
3456 | unsigned LineNo = PLoc.isValid() ? PLoc.getLine() : |
3457 | SM.getExpansionLineNumber(Loc: L); |
3458 | return llvm::ConstantInt::get(Ty: Int32Ty, V: LineNo); |
3459 | } |
3460 | |
3461 | llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) { |
3462 | ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()}; |
3463 | if (Exprs.empty()) |
3464 | return llvm::ConstantPointerNull::get(T: ConstGlobalsPtrTy); |
3465 | |
3466 | llvm::FoldingSetNodeID ID; |
3467 | for (Expr *E : Exprs) { |
3468 | ID.Add(cast<clang::ConstantExpr>(E)->getAPValueResult()); |
3469 | } |
3470 | llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()]; |
3471 | if (Lookup) |
3472 | return Lookup; |
3473 | |
3474 | llvm::SmallVector<llvm::Constant *, 4> LLVMArgs; |
3475 | LLVMArgs.reserve(N: Exprs.size()); |
3476 | ConstantEmitter ConstEmiter(*this); |
3477 | llvm::transform(Range&: Exprs, d_first: std::back_inserter(x&: LLVMArgs), F: [&](const Expr *E) { |
3478 | const auto *CE = cast<clang::ConstantExpr>(Val: E); |
3479 | return ConstEmiter.emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), |
3480 | CE->getType()); |
3481 | }); |
3482 | auto *Struct = llvm::ConstantStruct::getAnon(V: LLVMArgs); |
3483 | auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true, |
3484 | llvm::GlobalValue::PrivateLinkage, Struct, |
3485 | ".args" ); |
3486 | GV->setSection(AnnotationSection); |
3487 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3488 | |
3489 | Lookup = GV; |
3490 | return GV; |
3491 | } |
3492 | |
3493 | llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV, |
3494 | const AnnotateAttr *AA, |
3495 | SourceLocation L) { |
3496 | // Get the globals for file name, annotation, and the line number. |
3497 | llvm::Constant *AnnoGV = EmitAnnotationString(Str: AA->getAnnotation()), |
3498 | *UnitGV = EmitAnnotationUnit(Loc: L), |
3499 | *LineNoCst = EmitAnnotationLineNo(L), |
3500 | *Args = EmitAnnotationArgs(Attr: AA); |
3501 | |
3502 | llvm::Constant *GVInGlobalsAS = GV; |
3503 | if (GV->getAddressSpace() != |
3504 | getDataLayout().getDefaultGlobalsAddressSpace()) { |
3505 | GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast( |
3506 | C: GV, |
3507 | Ty: llvm::PointerType::get( |
3508 | C&: GV->getContext(), AddressSpace: getDataLayout().getDefaultGlobalsAddressSpace())); |
3509 | } |
3510 | |
3511 | // Create the ConstantStruct for the global annotation. |
3512 | llvm::Constant *Fields[] = { |
3513 | GVInGlobalsAS, AnnoGV, UnitGV, LineNoCst, Args, |
3514 | }; |
3515 | return llvm::ConstantStruct::getAnon(V: Fields); |
3516 | } |
3517 | |
3518 | void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D, |
3519 | llvm::GlobalValue *GV) { |
3520 | assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute" ); |
3521 | // Get the struct elements for these annotations. |
3522 | for (const auto *I : D->specific_attrs<AnnotateAttr>()) |
3523 | Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation())); |
3524 | } |
3525 | |
3526 | bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn, |
3527 | SourceLocation Loc) const { |
3528 | const auto &NoSanitizeL = getContext().getNoSanitizeList(); |
3529 | // NoSanitize by function name. |
3530 | if (NoSanitizeL.containsFunction(Mask: Kind, FunctionName: Fn->getName())) |
3531 | return true; |
3532 | // NoSanitize by location. Check "mainfile" prefix. |
3533 | auto &SM = Context.getSourceManager(); |
3534 | FileEntryRef MainFile = *SM.getFileEntryRefForID(FID: SM.getMainFileID()); |
3535 | if (NoSanitizeL.containsMainFile(Mask: Kind, FileName: MainFile.getName())) |
3536 | return true; |
3537 | |
3538 | // Check "src" prefix. |
3539 | if (Loc.isValid()) |
3540 | return NoSanitizeL.containsLocation(Mask: Kind, Loc); |
3541 | // If location is unknown, this may be a compiler-generated function. Assume |
3542 | // it's located in the main file. |
3543 | return NoSanitizeL.containsFile(Mask: Kind, FileName: MainFile.getName()); |
3544 | } |
3545 | |
3546 | bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, |
3547 | llvm::GlobalVariable *GV, |
3548 | SourceLocation Loc, QualType Ty, |
3549 | StringRef Category) const { |
3550 | const auto &NoSanitizeL = getContext().getNoSanitizeList(); |
3551 | if (NoSanitizeL.containsGlobal(Mask: Kind, GlobalName: GV->getName(), Category)) |
3552 | return true; |
3553 | auto &SM = Context.getSourceManager(); |
3554 | if (NoSanitizeL.containsMainFile( |
3555 | Mask: Kind, FileName: SM.getFileEntryRefForID(FID: SM.getMainFileID())->getName(), |
3556 | Category)) |
3557 | return true; |
3558 | if (NoSanitizeL.containsLocation(Mask: Kind, Loc, Category)) |
3559 | return true; |
3560 | |
3561 | // Check global type. |
3562 | if (!Ty.isNull()) { |
3563 | // Drill down the array types: if global variable of a fixed type is |
3564 | // not sanitized, we also don't instrument arrays of them. |
3565 | while (auto AT = dyn_cast<ArrayType>(Val: Ty.getTypePtr())) |
3566 | Ty = AT->getElementType(); |
3567 | Ty = Ty.getCanonicalType().getUnqualifiedType(); |
3568 | // Only record types (classes, structs etc.) are ignored. |
3569 | if (Ty->isRecordType()) { |
3570 | std::string TypeStr = Ty.getAsString(Policy: getContext().getPrintingPolicy()); |
3571 | if (NoSanitizeL.containsType(Mask: Kind, MangledTypeName: TypeStr, Category)) |
3572 | return true; |
3573 | } |
3574 | } |
3575 | return false; |
3576 | } |
3577 | |
3578 | bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, |
3579 | StringRef Category) const { |
3580 | const auto &XRayFilter = getContext().getXRayFilter(); |
3581 | using ImbueAttr = XRayFunctionFilter::ImbueAttribute; |
3582 | auto Attr = ImbueAttr::NONE; |
3583 | if (Loc.isValid()) |
3584 | Attr = XRayFilter.shouldImbueLocation(Loc, Category); |
3585 | if (Attr == ImbueAttr::NONE) |
3586 | Attr = XRayFilter.shouldImbueFunction(FunctionName: Fn->getName()); |
3587 | switch (Attr) { |
3588 | case ImbueAttr::NONE: |
3589 | return false; |
3590 | case ImbueAttr::ALWAYS: |
3591 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-always" ); |
3592 | break; |
3593 | case ImbueAttr::ALWAYS_ARG1: |
3594 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-always" ); |
3595 | Fn->addFnAttr(Kind: "xray-log-args" , Val: "1" ); |
3596 | break; |
3597 | case ImbueAttr::NEVER: |
3598 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-never" ); |
3599 | break; |
3600 | } |
3601 | return true; |
3602 | } |
3603 | |
3604 | ProfileList::ExclusionType |
3605 | CodeGenModule::isFunctionBlockedByProfileList(llvm::Function *Fn, |
3606 | SourceLocation Loc) const { |
3607 | const auto &ProfileList = getContext().getProfileList(); |
3608 | // If the profile list is empty, then instrument everything. |
3609 | if (ProfileList.isEmpty()) |
3610 | return ProfileList::Allow; |
3611 | CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr(); |
3612 | // First, check the function name. |
3613 | if (auto V = ProfileList.isFunctionExcluded(FunctionName: Fn->getName(), Kind)) |
3614 | return *V; |
3615 | // Next, check the source location. |
3616 | if (Loc.isValid()) |
3617 | if (auto V = ProfileList.isLocationExcluded(Loc, Kind)) |
3618 | return *V; |
3619 | // If location is unknown, this may be a compiler-generated function. Assume |
3620 | // it's located in the main file. |
3621 | auto &SM = Context.getSourceManager(); |
3622 | if (auto MainFile = SM.getFileEntryRefForID(FID: SM.getMainFileID())) |
3623 | if (auto V = ProfileList.isFileExcluded(FileName: MainFile->getName(), Kind)) |
3624 | return *V; |
3625 | return ProfileList.getDefault(Kind); |
3626 | } |
3627 | |
3628 | ProfileList::ExclusionType |
3629 | CodeGenModule::isFunctionBlockedFromProfileInstr(llvm::Function *Fn, |
3630 | SourceLocation Loc) const { |
3631 | auto V = isFunctionBlockedByProfileList(Fn, Loc); |
3632 | if (V != ProfileList::Allow) |
3633 | return V; |
3634 | |
3635 | auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups; |
3636 | if (NumGroups > 1) { |
3637 | auto Group = llvm::crc32(Data: arrayRefFromStringRef(Input: Fn->getName())) % NumGroups; |
3638 | if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup) |
3639 | return ProfileList::Skip; |
3640 | } |
3641 | return ProfileList::Allow; |
3642 | } |
3643 | |
3644 | bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) { |
3645 | // Never defer when EmitAllDecls is specified. |
3646 | if (LangOpts.EmitAllDecls) |
3647 | return true; |
3648 | |
3649 | const auto *VD = dyn_cast<VarDecl>(Val: Global); |
3650 | if (VD && |
3651 | ((CodeGenOpts.KeepPersistentStorageVariables && |
3652 | (VD->getStorageDuration() == SD_Static || |
3653 | VD->getStorageDuration() == SD_Thread)) || |
3654 | (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && |
3655 | VD->getType().isConstQualified()))) |
3656 | return true; |
3657 | |
3658 | return getContext().DeclMustBeEmitted(Global); |
3659 | } |
3660 | |
3661 | bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { |
3662 | // In OpenMP 5.0 variables and function may be marked as |
3663 | // device_type(host/nohost) and we should not emit them eagerly unless we sure |
3664 | // that they must be emitted on the host/device. To be sure we need to have |
3665 | // seen a declare target with an explicit mentioning of the function, we know |
3666 | // we have if the level of the declare target attribute is -1. Note that we |
3667 | // check somewhere else if we should emit this at all. |
3668 | if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) { |
3669 | std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = |
3670 | OMPDeclareTargetDeclAttr::getActiveAttr(Global); |
3671 | if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1) |
3672 | return false; |
3673 | } |
3674 | |
3675 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) { |
3676 | if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) |
3677 | // Implicit template instantiations may change linkage if they are later |
3678 | // explicitly instantiated, so they should not be emitted eagerly. |
3679 | return false; |
3680 | // Defer until all versions have been semantically checked. |
3681 | if (FD->hasAttr<TargetVersionAttr>() && !FD->isMultiVersion()) |
3682 | return false; |
3683 | // Defer emission of SYCL kernel entry point functions during device |
3684 | // compilation. |
3685 | if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>()) |
3686 | return false; |
3687 | } |
3688 | if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) { |
3689 | if (Context.getInlineVariableDefinitionKind(VD) == |
3690 | ASTContext::InlineVariableDefinitionKind::WeakUnknown) |
3691 | // A definition of an inline constexpr static data member may change |
3692 | // linkage later if it's redeclared outside the class. |
3693 | return false; |
3694 | if (CXX20ModuleInits && VD->getOwningModule() && |
3695 | !VD->getOwningModule()->isModuleMapModule()) { |
3696 | // For CXX20, module-owned initializers need to be deferred, since it is |
3697 | // not known at this point if they will be run for the current module or |
3698 | // as part of the initializer for an imported one. |
3699 | return false; |
3700 | } |
3701 | } |
3702 | // If OpenMP is enabled and threadprivates must be generated like TLS, delay |
3703 | // codegen for global variables, because they may be marked as threadprivate. |
3704 | if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS && |
3705 | getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) && |
3706 | !Global->getType().isConstantStorage(getContext(), false, false) && |
3707 | !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global)) |
3708 | return false; |
3709 | |
3710 | return true; |
3711 | } |
3712 | |
3713 | ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) { |
3714 | StringRef Name = getMangledName(GD); |
3715 | |
3716 | // The UUID descriptor should be pointer aligned. |
3717 | CharUnits Alignment = CharUnits::fromQuantity(Quantity: PointerAlignInBytes); |
3718 | |
3719 | // Look for an existing global. |
3720 | if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name)) |
3721 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3722 | |
3723 | ConstantEmitter Emitter(*this); |
3724 | llvm::Constant *Init; |
3725 | |
3726 | APValue &V = GD->getAsAPValue(); |
3727 | if (!V.isAbsent()) { |
3728 | // If possible, emit the APValue version of the initializer. In particular, |
3729 | // this gets the type of the constant right. |
3730 | Init = Emitter.emitForInitializer( |
3731 | value: GD->getAsAPValue(), destAddrSpace: GD->getType().getAddressSpace(), destType: GD->getType()); |
3732 | } else { |
3733 | // As a fallback, directly construct the constant. |
3734 | // FIXME: This may get padding wrong under esoteric struct layout rules. |
3735 | // MSVC appears to create a complete type 'struct __s_GUID' that it |
3736 | // presumably uses to represent these constants. |
3737 | MSGuidDecl::Parts Parts = GD->getParts(); |
3738 | llvm::Constant *Fields[4] = { |
3739 | llvm::ConstantInt::get(Ty: Int32Ty, V: Parts.Part1), |
3740 | llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part2), |
3741 | llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part3), |
3742 | llvm::ConstantDataArray::getRaw( |
3743 | Data: StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), NumElements: 8, |
3744 | ElementTy: Int8Ty)}; |
3745 | Init = llvm::ConstantStruct::getAnon(V: Fields); |
3746 | } |
3747 | |
3748 | auto *GV = new llvm::GlobalVariable( |
3749 | getModule(), Init->getType(), |
3750 | /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name); |
3751 | if (supportsCOMDAT()) |
3752 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
3753 | setDSOLocal(GV); |
3754 | |
3755 | if (!V.isAbsent()) { |
3756 | Emitter.finalize(global: GV); |
3757 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3758 | } |
3759 | |
3760 | llvm::Type *Ty = getTypes().ConvertTypeForMem(T: GD->getType()); |
3761 | return ConstantAddress(GV, Ty, Alignment); |
3762 | } |
3763 | |
3764 | ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl( |
3765 | const UnnamedGlobalConstantDecl *GCD) { |
3766 | CharUnits Alignment = getContext().getTypeAlignInChars(GCD->getType()); |
3767 | |
3768 | llvm::GlobalVariable **Entry = nullptr; |
3769 | Entry = &UnnamedGlobalConstantDeclMap[GCD]; |
3770 | if (*Entry) |
3771 | return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment); |
3772 | |
3773 | ConstantEmitter Emitter(*this); |
3774 | llvm::Constant *Init; |
3775 | |
3776 | const APValue &V = GCD->getValue(); |
3777 | |
3778 | assert(!V.isAbsent()); |
3779 | Init = Emitter.emitForInitializer(value: V, destAddrSpace: GCD->getType().getAddressSpace(), |
3780 | destType: GCD->getType()); |
3781 | |
3782 | auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(), |
3783 | /*isConstant=*/true, |
3784 | llvm::GlobalValue::PrivateLinkage, Init, |
3785 | ".constant" ); |
3786 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3787 | GV->setAlignment(Alignment.getAsAlign()); |
3788 | |
3789 | Emitter.finalize(global: GV); |
3790 | |
3791 | *Entry = GV; |
3792 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3793 | } |
3794 | |
3795 | ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject( |
3796 | const TemplateParamObjectDecl *TPO) { |
3797 | StringRef Name = getMangledName(TPO); |
3798 | CharUnits Alignment = getNaturalTypeAlignment(T: TPO->getType()); |
3799 | |
3800 | if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name)) |
3801 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3802 | |
3803 | ConstantEmitter Emitter(*this); |
3804 | llvm::Constant *Init = Emitter.emitForInitializer( |
3805 | value: TPO->getValue(), destAddrSpace: TPO->getType().getAddressSpace(), destType: TPO->getType()); |
3806 | |
3807 | if (!Init) { |
3808 | ErrorUnsupported(TPO, "template parameter object" ); |
3809 | return ConstantAddress::invalid(); |
3810 | } |
3811 | |
3812 | llvm::GlobalValue::LinkageTypes Linkage = |
3813 | isExternallyVisible(TPO->getLinkageAndVisibility().getLinkage()) |
3814 | ? llvm::GlobalValue::LinkOnceODRLinkage |
3815 | : llvm::GlobalValue::InternalLinkage; |
3816 | auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(), |
3817 | /*isConstant=*/true, Linkage, Init, Name); |
3818 | setGVProperties(GV, TPO); |
3819 | if (supportsCOMDAT() && Linkage == llvm::GlobalValue::LinkOnceODRLinkage) |
3820 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
3821 | Emitter.finalize(global: GV); |
3822 | |
3823 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3824 | } |
3825 | |
3826 | ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) { |
3827 | const AliasAttr *AA = VD->getAttr<AliasAttr>(); |
3828 | assert(AA && "No alias?" ); |
3829 | |
3830 | CharUnits Alignment = getContext().getDeclAlign(VD); |
3831 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: VD->getType()); |
3832 | |
3833 | // See if there is already something with the target's name in the module. |
3834 | llvm::GlobalValue *Entry = GetGlobalValue(Name: AA->getAliasee()); |
3835 | if (Entry) |
3836 | return ConstantAddress(Entry, DeclTy, Alignment); |
3837 | |
3838 | llvm::Constant *Aliasee; |
3839 | if (isa<llvm::FunctionType>(Val: DeclTy)) |
3840 | Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, |
3841 | D: GlobalDecl(cast<FunctionDecl>(Val: VD)), |
3842 | /*ForVTable=*/false); |
3843 | else |
3844 | Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default, |
3845 | D: nullptr); |
3846 | |
3847 | auto *F = cast<llvm::GlobalValue>(Val: Aliasee); |
3848 | F->setLinkage(llvm::Function::ExternalWeakLinkage); |
3849 | WeakRefReferences.insert(Ptr: F); |
3850 | |
3851 | return ConstantAddress(Aliasee, DeclTy, Alignment); |
3852 | } |
3853 | |
3854 | template <typename AttrT> static bool hasImplicitAttr(const ValueDecl *D) { |
3855 | if (!D) |
3856 | return false; |
3857 | if (auto *A = D->getAttr<AttrT>()) |
3858 | return A->isImplicit(); |
3859 | return D->isImplicit(); |
3860 | } |
3861 | |
3862 | bool CodeGenModule::shouldEmitCUDAGlobalVar(const VarDecl *Global) const { |
3863 | assert(LangOpts.CUDA && "Should not be called by non-CUDA languages" ); |
3864 | // We need to emit host-side 'shadows' for all global |
3865 | // device-side variables because the CUDA runtime needs their |
3866 | // size and host-side address in order to provide access to |
3867 | // their device-side incarnations. |
3868 | return !LangOpts.CUDAIsDevice || Global->hasAttr<CUDADeviceAttr>() || |
3869 | Global->hasAttr<CUDAConstantAttr>() || |
3870 | Global->hasAttr<CUDASharedAttr>() || |
3871 | Global->getType()->isCUDADeviceBuiltinSurfaceType() || |
3872 | Global->getType()->isCUDADeviceBuiltinTextureType(); |
3873 | } |
3874 | |
3875 | void CodeGenModule::EmitGlobal(GlobalDecl GD) { |
3876 | const auto *Global = cast<ValueDecl>(Val: GD.getDecl()); |
3877 | |
3878 | // Weak references don't produce any output by themselves. |
3879 | if (Global->hasAttr<WeakRefAttr>()) |
3880 | return; |
3881 | |
3882 | // If this is an alias definition (which otherwise looks like a declaration) |
3883 | // emit it now. |
3884 | if (Global->hasAttr<AliasAttr>()) |
3885 | return EmitAliasDefinition(GD); |
3886 | |
3887 | // IFunc like an alias whose value is resolved at runtime by calling resolver. |
3888 | if (Global->hasAttr<IFuncAttr>()) |
3889 | return emitIFuncDefinition(GD); |
3890 | |
3891 | // If this is a cpu_dispatch multiversion function, emit the resolver. |
3892 | if (Global->hasAttr<CPUDispatchAttr>()) |
3893 | return emitCPUDispatchDefinition(GD); |
3894 | |
3895 | // If this is CUDA, be selective about which declarations we emit. |
3896 | // Non-constexpr non-lambda implicit host device functions are not emitted |
3897 | // unless they are used on device side. |
3898 | if (LangOpts.CUDA) { |
3899 | assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) && |
3900 | "Expected Variable or Function" ); |
3901 | if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) { |
3902 | if (!shouldEmitCUDAGlobalVar(Global: VD)) |
3903 | return; |
3904 | } else if (LangOpts.CUDAIsDevice) { |
3905 | const auto *FD = dyn_cast<FunctionDecl>(Val: Global); |
3906 | if ((!Global->hasAttr<CUDADeviceAttr>() || |
3907 | (LangOpts.OffloadImplicitHostDeviceTemplates && |
3908 | hasImplicitAttr<CUDAHostAttr>(FD) && |
3909 | hasImplicitAttr<CUDADeviceAttr>(FD) && !FD->isConstexpr() && |
3910 | !isLambdaCallOperator(FD) && |
3911 | !getContext().CUDAImplicitHostDeviceFunUsedByDevice.count(FD))) && |
3912 | !Global->hasAttr<CUDAGlobalAttr>() && |
3913 | !(LangOpts.HIPStdPar && isa<FunctionDecl>(Global) && |
3914 | !Global->hasAttr<CUDAHostAttr>())) |
3915 | return; |
3916 | // Device-only functions are the only things we skip. |
3917 | } else if (!Global->hasAttr<CUDAHostAttr>() && |
3918 | Global->hasAttr<CUDADeviceAttr>()) |
3919 | return; |
3920 | } |
3921 | |
3922 | if (LangOpts.OpenMP) { |
3923 | // If this is OpenMP, check if it is legal to emit this global normally. |
3924 | if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD)) |
3925 | return; |
3926 | if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Val: Global)) { |
3927 | if (MustBeEmitted(Global)) |
3928 | EmitOMPDeclareReduction(D: DRD); |
3929 | return; |
3930 | } |
3931 | if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Val: Global)) { |
3932 | if (MustBeEmitted(Global)) |
3933 | EmitOMPDeclareMapper(D: DMD); |
3934 | return; |
3935 | } |
3936 | } |
3937 | |
3938 | // Ignore declarations, they will be emitted on their first use. |
3939 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) { |
3940 | if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()) && |
3941 | FD->doesThisDeclarationHaveABody()) |
3942 | addDeferredDeclToEmit(GD: GlobalDecl(FD, KernelReferenceKind::Stub)); |
3943 | |
3944 | // Update deferred annotations with the latest declaration if the function |
3945 | // function was already used or defined. |
3946 | if (FD->hasAttr<AnnotateAttr>()) { |
3947 | StringRef MangledName = getMangledName(GD); |
3948 | if (GetGlobalValue(Name: MangledName)) |
3949 | DeferredAnnotations[MangledName] = FD; |
3950 | } |
3951 | |
3952 | // Forward declarations are emitted lazily on first use. |
3953 | if (!FD->doesThisDeclarationHaveABody()) { |
3954 | if (!FD->doesDeclarationForceExternallyVisibleDefinition() && |
3955 | (!FD->isMultiVersion() || !getTarget().getTriple().isAArch64())) |
3956 | return; |
3957 | |
3958 | StringRef MangledName = getMangledName(GD); |
3959 | |
3960 | // Compute the function info and LLVM type. |
3961 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
3962 | llvm::Type *Ty = getTypes().GetFunctionType(Info: FI); |
3963 | |
3964 | GetOrCreateLLVMFunction(MangledName, Ty, D: GD, /*ForVTable=*/false, |
3965 | /*DontDefer=*/false); |
3966 | return; |
3967 | } |
3968 | } else { |
3969 | const auto *VD = cast<VarDecl>(Val: Global); |
3970 | assert(VD->isFileVarDecl() && "Cannot emit local var decl as global." ); |
3971 | if (VD->isThisDeclarationADefinition() != VarDecl::Definition && |
3972 | !Context.isMSStaticDataMemberInlineDefinition(VD)) { |
3973 | if (LangOpts.OpenMP) { |
3974 | // Emit declaration of the must-be-emitted declare target variable. |
3975 | if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
3976 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) { |
3977 | |
3978 | // If this variable has external storage and doesn't require special |
3979 | // link handling we defer to its canonical definition. |
3980 | if (VD->hasExternalStorage() && |
3981 | Res != OMPDeclareTargetDeclAttr::MT_Link) |
3982 | return; |
3983 | |
3984 | bool UnifiedMemoryEnabled = |
3985 | getOpenMPRuntime().hasRequiresUnifiedSharedMemory(); |
3986 | if ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
3987 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
3988 | !UnifiedMemoryEnabled) { |
3989 | (void)GetAddrOfGlobalVar(D: VD); |
3990 | } else { |
3991 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
3992 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
3993 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
3994 | UnifiedMemoryEnabled)) && |
3995 | "Link clause or to clause with unified memory expected." ); |
3996 | (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
3997 | } |
3998 | |
3999 | return; |
4000 | } |
4001 | } |
4002 | // If this declaration may have caused an inline variable definition to |
4003 | // change linkage, make sure that it's emitted. |
4004 | if (Context.getInlineVariableDefinitionKind(VD) == |
4005 | ASTContext::InlineVariableDefinitionKind::Strong) |
4006 | GetAddrOfGlobalVar(D: VD); |
4007 | return; |
4008 | } |
4009 | } |
4010 | |
4011 | // Defer code generation to first use when possible, e.g. if this is an inline |
4012 | // function. If the global must always be emitted, do it eagerly if possible |
4013 | // to benefit from cache locality. |
4014 | if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { |
4015 | // Emit the definition if it can't be deferred. |
4016 | EmitGlobalDefinition(D: GD); |
4017 | addEmittedDeferredDecl(GD); |
4018 | return; |
4019 | } |
4020 | |
4021 | // If we're deferring emission of a C++ variable with an |
4022 | // initializer, remember the order in which it appeared in the file. |
4023 | if (getLangOpts().CPlusPlus && isa<VarDecl>(Val: Global) && |
4024 | cast<VarDecl>(Val: Global)->hasInit()) { |
4025 | DelayedCXXInitPosition[Global] = CXXGlobalInits.size(); |
4026 | CXXGlobalInits.push_back(x: nullptr); |
4027 | } |
4028 | |
4029 | StringRef MangledName = getMangledName(GD); |
4030 | if (GetGlobalValue(Name: MangledName) != nullptr) { |
4031 | // The value has already been used and should therefore be emitted. |
4032 | addDeferredDeclToEmit(GD); |
4033 | } else if (MustBeEmitted(Global)) { |
4034 | // The value must be emitted, but cannot be emitted eagerly. |
4035 | assert(!MayBeEmittedEagerly(Global)); |
4036 | addDeferredDeclToEmit(GD); |
4037 | } else { |
4038 | // Otherwise, remember that we saw a deferred decl with this name. The |
4039 | // first use of the mangled name will cause it to move into |
4040 | // DeferredDeclsToEmit. |
4041 | DeferredDecls[MangledName] = GD; |
4042 | } |
4043 | } |
4044 | |
4045 | // Check if T is a class type with a destructor that's not dllimport. |
4046 | static bool HasNonDllImportDtor(QualType T) { |
4047 | if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>()) |
4048 | if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
4049 | if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>()) |
4050 | return true; |
4051 | |
4052 | return false; |
4053 | } |
4054 | |
4055 | namespace { |
4056 | struct FunctionIsDirectlyRecursive |
4057 | : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> { |
4058 | const StringRef Name; |
4059 | const Builtin::Context &BI; |
4060 | FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) |
4061 | : Name(N), BI(C) {} |
4062 | |
4063 | bool VisitCallExpr(const CallExpr *E) { |
4064 | const FunctionDecl *FD = E->getDirectCallee(); |
4065 | if (!FD) |
4066 | return false; |
4067 | AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>(); |
4068 | if (Attr && Name == Attr->getLabel()) |
4069 | return true; |
4070 | unsigned BuiltinID = FD->getBuiltinID(); |
4071 | if (!BuiltinID || !BI.isLibFunction(ID: BuiltinID)) |
4072 | return false; |
4073 | std::string BuiltinNameStr = BI.getName(ID: BuiltinID); |
4074 | StringRef BuiltinName = BuiltinNameStr; |
4075 | return BuiltinName.consume_front(Prefix: "__builtin_" ) && Name == BuiltinName; |
4076 | } |
4077 | |
4078 | bool VisitStmt(const Stmt *S) { |
4079 | for (const Stmt *Child : S->children()) |
4080 | if (Child && this->Visit(Child)) |
4081 | return true; |
4082 | return false; |
4083 | } |
4084 | }; |
4085 | |
4086 | // Make sure we're not referencing non-imported vars or functions. |
4087 | struct DLLImportFunctionVisitor |
4088 | : public RecursiveASTVisitor<DLLImportFunctionVisitor> { |
4089 | bool SafeToInline = true; |
4090 | |
4091 | bool shouldVisitImplicitCode() const { return true; } |
4092 | |
4093 | bool VisitVarDecl(VarDecl *VD) { |
4094 | if (VD->getTLSKind()) { |
4095 | // A thread-local variable cannot be imported. |
4096 | SafeToInline = false; |
4097 | return SafeToInline; |
4098 | } |
4099 | |
4100 | // A variable definition might imply a destructor call. |
4101 | if (VD->isThisDeclarationADefinition()) |
4102 | SafeToInline = !HasNonDllImportDtor(VD->getType()); |
4103 | |
4104 | return SafeToInline; |
4105 | } |
4106 | |
4107 | bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { |
4108 | if (const auto *D = E->getTemporary()->getDestructor()) |
4109 | SafeToInline = D->hasAttr<DLLImportAttr>(); |
4110 | return SafeToInline; |
4111 | } |
4112 | |
4113 | bool VisitDeclRefExpr(DeclRefExpr *E) { |
4114 | ValueDecl *VD = E->getDecl(); |
4115 | if (isa<FunctionDecl>(VD)) |
4116 | SafeToInline = VD->hasAttr<DLLImportAttr>(); |
4117 | else if (VarDecl *V = dyn_cast<VarDecl>(VD)) |
4118 | SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>(); |
4119 | return SafeToInline; |
4120 | } |
4121 | |
4122 | bool VisitCXXConstructExpr(CXXConstructExpr *E) { |
4123 | SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>(); |
4124 | return SafeToInline; |
4125 | } |
4126 | |
4127 | bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { |
4128 | CXXMethodDecl *M = E->getMethodDecl(); |
4129 | if (!M) { |
4130 | // Call through a pointer to member function. This is safe to inline. |
4131 | SafeToInline = true; |
4132 | } else { |
4133 | SafeToInline = M->hasAttr<DLLImportAttr>(); |
4134 | } |
4135 | return SafeToInline; |
4136 | } |
4137 | |
4138 | bool VisitCXXDeleteExpr(CXXDeleteExpr *E) { |
4139 | SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>(); |
4140 | return SafeToInline; |
4141 | } |
4142 | |
4143 | bool VisitCXXNewExpr(CXXNewExpr *E) { |
4144 | SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>(); |
4145 | return SafeToInline; |
4146 | } |
4147 | }; |
4148 | } |
4149 | |
4150 | // isTriviallyRecursive - Check if this function calls another |
4151 | // decl that, because of the asm attribute or the other decl being a builtin, |
4152 | // ends up pointing to itself. |
4153 | bool |
4154 | CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) { |
4155 | StringRef Name; |
4156 | if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) { |
4157 | // asm labels are a special kind of mangling we have to support. |
4158 | AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>(); |
4159 | if (!Attr) |
4160 | return false; |
4161 | Name = Attr->getLabel(); |
4162 | } else { |
4163 | Name = FD->getName(); |
4164 | } |
4165 | |
4166 | FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo); |
4167 | const Stmt *Body = FD->getBody(); |
4168 | return Body ? Walker.Visit(Body) : false; |
4169 | } |
4170 | |
4171 | bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) { |
4172 | if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage) |
4173 | return true; |
4174 | |
4175 | const auto *F = cast<FunctionDecl>(Val: GD.getDecl()); |
4176 | // Inline builtins declaration must be emitted. They often are fortified |
4177 | // functions. |
4178 | if (F->isInlineBuiltinDeclaration()) |
4179 | return true; |
4180 | |
4181 | if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>()) |
4182 | return false; |
4183 | |
4184 | // We don't import function bodies from other named module units since that |
4185 | // behavior may break ABI compatibility of the current unit. |
4186 | if (const Module *M = F->getOwningModule(); |
4187 | M && M->getTopLevelModule()->isNamedModule() && |
4188 | getContext().getCurrentNamedModule() != M->getTopLevelModule()) { |
4189 | // There are practices to mark template member function as always-inline |
4190 | // and mark the template as extern explicit instantiation but not give |
4191 | // the definition for member function. So we have to emit the function |
4192 | // from explicitly instantiation with always-inline. |
4193 | // |
4194 | // See https://github.com/llvm/llvm-project/issues/86893 for details. |
4195 | // |
4196 | // TODO: Maybe it is better to give it a warning if we call a non-inline |
4197 | // function from other module units which is marked as always-inline. |
4198 | if (!F->isTemplateInstantiation() || !F->hasAttr<AlwaysInlineAttr>()) { |
4199 | return false; |
4200 | } |
4201 | } |
4202 | |
4203 | if (F->hasAttr<NoInlineAttr>()) |
4204 | return false; |
4205 | |
4206 | if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) { |
4207 | // Check whether it would be safe to inline this dllimport function. |
4208 | DLLImportFunctionVisitor Visitor; |
4209 | Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F)); |
4210 | if (!Visitor.SafeToInline) |
4211 | return false; |
4212 | |
4213 | if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: F)) { |
4214 | // Implicit destructor invocations aren't captured in the AST, so the |
4215 | // check above can't see them. Check for them manually here. |
4216 | for (const Decl *Member : Dtor->getParent()->decls()) |
4217 | if (isa<FieldDecl>(Member)) |
4218 | if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType())) |
4219 | return false; |
4220 | for (const CXXBaseSpecifier &B : Dtor->getParent()->bases()) |
4221 | if (HasNonDllImportDtor(B.getType())) |
4222 | return false; |
4223 | } |
4224 | } |
4225 | |
4226 | // PR9614. Avoid cases where the source code is lying to us. An available |
4227 | // externally function should have an equivalent function somewhere else, |
4228 | // but a function that calls itself through asm label/`__builtin_` trickery is |
4229 | // clearly not equivalent to the real implementation. |
4230 | // This happens in glibc's btowc and in some configure checks. |
4231 | return !isTriviallyRecursive(FD: F); |
4232 | } |
4233 | |
4234 | bool CodeGenModule::shouldOpportunisticallyEmitVTables() { |
4235 | return CodeGenOpts.OptimizationLevel > 0; |
4236 | } |
4237 | |
4238 | void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD, |
4239 | llvm::GlobalValue *GV) { |
4240 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4241 | |
4242 | if (FD->isCPUSpecificMultiVersion()) { |
4243 | auto *Spec = FD->getAttr<CPUSpecificAttr>(); |
4244 | for (unsigned I = 0; I < Spec->cpus_size(); ++I) |
4245 | EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr); |
4246 | } else if (auto *TC = FD->getAttr<TargetClonesAttr>()) { |
4247 | for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) |
4248 | if (TC->isFirstOfVersion(I)) |
4249 | EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr); |
4250 | } else |
4251 | EmitGlobalFunctionDefinition(GD, GV); |
4252 | |
4253 | // Ensure that the resolver function is also emitted. |
4254 | if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) { |
4255 | // On AArch64 defer the resolver emission until the entire TU is processed. |
4256 | if (getTarget().getTriple().isAArch64()) |
4257 | AddDeferredMultiVersionResolverToEmit(GD); |
4258 | else |
4259 | GetOrCreateMultiVersionResolver(GD); |
4260 | } |
4261 | } |
4262 | |
4263 | void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) { |
4264 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
4265 | |
4266 | PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(), |
4267 | Context.getSourceManager(), |
4268 | "Generating code for declaration" ); |
4269 | |
4270 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
4271 | // At -O0, don't generate IR for functions with available_externally |
4272 | // linkage. |
4273 | if (!shouldEmitFunction(GD)) |
4274 | return; |
4275 | |
4276 | llvm::TimeTraceScope TimeScope("CodeGen Function" , [&]() { |
4277 | std::string Name; |
4278 | llvm::raw_string_ostream OS(Name); |
4279 | FD->getNameForDiagnostic(OS, Policy: getContext().getPrintingPolicy(), |
4280 | /*Qualified=*/true); |
4281 | return Name; |
4282 | }); |
4283 | |
4284 | if (const auto *Method = dyn_cast<CXXMethodDecl>(Val: D)) { |
4285 | // Make sure to emit the definition(s) before we emit the thunks. |
4286 | // This is necessary for the generation of certain thunks. |
4287 | if (isa<CXXConstructorDecl>(Val: Method) || isa<CXXDestructorDecl>(Val: Method)) |
4288 | ABI->emitCXXStructor(GD); |
4289 | else if (FD->isMultiVersion()) |
4290 | EmitMultiVersionFunctionDefinition(GD, GV); |
4291 | else |
4292 | EmitGlobalFunctionDefinition(GD, GV); |
4293 | |
4294 | if (Method->isVirtual()) |
4295 | getVTables().EmitThunks(GD); |
4296 | |
4297 | return; |
4298 | } |
4299 | |
4300 | if (FD->isMultiVersion()) |
4301 | return EmitMultiVersionFunctionDefinition(GD, GV); |
4302 | return EmitGlobalFunctionDefinition(GD, GV); |
4303 | } |
4304 | |
4305 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) |
4306 | return EmitGlobalVarDefinition(D: VD, IsTentative: !VD->hasDefinition()); |
4307 | |
4308 | llvm_unreachable("Invalid argument to EmitGlobalDefinition()" ); |
4309 | } |
4310 | |
4311 | static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, |
4312 | llvm::Function *NewFn); |
4313 | |
4314 | static uint64_t getFMVPriority(const TargetInfo &TI, |
4315 | const CodeGenFunction::FMVResolverOption &RO) { |
4316 | llvm::SmallVector<StringRef, 8> Features{RO.Features}; |
4317 | if (RO.Architecture) |
4318 | Features.push_back(Elt: *RO.Architecture); |
4319 | return TI.getFMVPriority(Features); |
4320 | } |
4321 | |
4322 | // Multiversion functions should be at most 'WeakODRLinkage' so that a different |
4323 | // TU can forward declare the function without causing problems. Particularly |
4324 | // in the cases of CPUDispatch, this causes issues. This also makes sure we |
4325 | // work with internal linkage functions, so that the same function name can be |
4326 | // used with internal linkage in multiple TUs. |
4327 | static llvm::GlobalValue::LinkageTypes |
4328 | getMultiversionLinkage(CodeGenModule &CGM, GlobalDecl GD) { |
4329 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4330 | if (FD->getFormalLinkage() == Linkage::Internal) |
4331 | return llvm::GlobalValue::InternalLinkage; |
4332 | return llvm::GlobalValue::WeakODRLinkage; |
4333 | } |
4334 | |
4335 | void CodeGenModule::emitMultiVersionFunctions() { |
4336 | std::vector<GlobalDecl> MVFuncsToEmit; |
4337 | MultiVersionFuncs.swap(x&: MVFuncsToEmit); |
4338 | for (GlobalDecl GD : MVFuncsToEmit) { |
4339 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4340 | assert(FD && "Expected a FunctionDecl" ); |
4341 | |
4342 | auto createFunction = [&](const FunctionDecl *Decl, unsigned MVIdx = 0) { |
4343 | GlobalDecl CurGD{Decl->isDefined() ? Decl->getDefinition() : Decl, MVIdx}; |
4344 | StringRef MangledName = getMangledName(GD: CurGD); |
4345 | llvm::Constant *Func = GetGlobalValue(Name: MangledName); |
4346 | if (!Func) { |
4347 | if (Decl->isDefined()) { |
4348 | EmitGlobalFunctionDefinition(GD: CurGD, GV: nullptr); |
4349 | Func = GetGlobalValue(Name: MangledName); |
4350 | } else { |
4351 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD: CurGD); |
4352 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
4353 | Func = GetAddrOfFunction(GD: CurGD, Ty, /*ForVTable=*/false, |
4354 | /*DontDefer=*/false, IsForDefinition: ForDefinition); |
4355 | } |
4356 | assert(Func && "This should have just been created" ); |
4357 | } |
4358 | return cast<llvm::Function>(Val: Func); |
4359 | }; |
4360 | |
4361 | // For AArch64, a resolver is only emitted if a function marked with |
4362 | // target_version("default")) or target_clones("default") is defined |
4363 | // in this TU. For other architectures it is always emitted. |
4364 | bool ShouldEmitResolver = !getTarget().getTriple().isAArch64(); |
4365 | SmallVector<CodeGenFunction::FMVResolverOption, 10> Options; |
4366 | |
4367 | getContext().forEachMultiversionedFunctionVersion( |
4368 | FD, Pred: [&](const FunctionDecl *CurFD) { |
4369 | llvm::SmallVector<StringRef, 8> Feats; |
4370 | bool IsDefined = CurFD->getDefinition() != nullptr; |
4371 | |
4372 | if (const auto *TA = CurFD->getAttr<TargetAttr>()) { |
4373 | assert(getTarget().getTriple().isX86() && "Unsupported target" ); |
4374 | TA->getX86AddedFeatures(Feats); |
4375 | llvm::Function *Func = createFunction(CurFD); |
4376 | Options.emplace_back(Func, Feats, TA->getX86Architecture()); |
4377 | } else if (const auto *TVA = CurFD->getAttr<TargetVersionAttr>()) { |
4378 | if (TVA->isDefaultVersion() && IsDefined) |
4379 | ShouldEmitResolver = true; |
4380 | llvm::Function *Func = createFunction(CurFD); |
4381 | char Delim = getTarget().getTriple().isAArch64() ? '+' : ','; |
4382 | TVA->getFeatures(Feats, Delim); |
4383 | Options.emplace_back(Args&: Func, Args&: Feats); |
4384 | } else if (const auto *TC = CurFD->getAttr<TargetClonesAttr>()) { |
4385 | for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) { |
4386 | if (!TC->isFirstOfVersion(I)) |
4387 | continue; |
4388 | if (TC->isDefaultVersion(I) && IsDefined) |
4389 | ShouldEmitResolver = true; |
4390 | llvm::Function *Func = createFunction(CurFD, I); |
4391 | Feats.clear(); |
4392 | if (getTarget().getTriple().isX86()) { |
4393 | TC->getX86Feature(Feats, I); |
4394 | Options.emplace_back(Func, Feats, TC->getX86Architecture(I)); |
4395 | } else { |
4396 | char Delim = getTarget().getTriple().isAArch64() ? '+' : ','; |
4397 | TC->getFeatures(Feats, I, Delim); |
4398 | Options.emplace_back(Args&: Func, Args&: Feats); |
4399 | } |
4400 | } |
4401 | } else |
4402 | llvm_unreachable("unexpected MultiVersionKind" ); |
4403 | }); |
4404 | |
4405 | if (!ShouldEmitResolver) |
4406 | continue; |
4407 | |
4408 | llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD); |
4409 | if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: ResolverConstant)) { |
4410 | ResolverConstant = IFunc->getResolver(); |
4411 | if (FD->isTargetClonesMultiVersion() && |
4412 | !getTarget().getTriple().isAArch64()) { |
4413 | std::string MangledName = getMangledNameImpl( |
4414 | *this, GD, FD, /*OmitMultiVersionMangling=*/true); |
4415 | if (!GetGlobalValue(Name: MangledName + ".ifunc" )) { |
4416 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4417 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4418 | // In prior versions of Clang, the mangling for ifuncs incorrectly |
4419 | // included an .ifunc suffix. This alias is generated for backward |
4420 | // compatibility. It is deprecated, and may be removed in the future. |
4421 | auto *Alias = llvm::GlobalAlias::create( |
4422 | Ty: DeclTy, AddressSpace: 0, Linkage: getMultiversionLinkage(CGM&: *this, GD), |
4423 | Name: MangledName + ".ifunc" , Aliasee: IFunc, Parent: &getModule()); |
4424 | SetCommonAttributes(GD: FD, GV: Alias); |
4425 | } |
4426 | } |
4427 | } |
4428 | llvm::Function *ResolverFunc = cast<llvm::Function>(Val: ResolverConstant); |
4429 | |
4430 | ResolverFunc->setLinkage(getMultiversionLinkage(CGM&: *this, GD)); |
4431 | |
4432 | if (!ResolverFunc->hasLocalLinkage() && supportsCOMDAT()) |
4433 | ResolverFunc->setComdat( |
4434 | getModule().getOrInsertComdat(Name: ResolverFunc->getName())); |
4435 | |
4436 | const TargetInfo &TI = getTarget(); |
4437 | llvm::stable_sort( |
4438 | Range&: Options, C: [&TI](const CodeGenFunction::FMVResolverOption &LHS, |
4439 | const CodeGenFunction::FMVResolverOption &RHS) { |
4440 | return getFMVPriority(TI, RO: LHS) > getFMVPriority(TI, RO: RHS); |
4441 | }); |
4442 | CodeGenFunction CGF(*this); |
4443 | CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options); |
4444 | } |
4445 | |
4446 | // Ensure that any additions to the deferred decls list caused by emitting a |
4447 | // variant are emitted. This can happen when the variant itself is inline and |
4448 | // calls a function without linkage. |
4449 | if (!MVFuncsToEmit.empty()) |
4450 | EmitDeferred(); |
4451 | |
4452 | // Ensure that any additions to the multiversion funcs list from either the |
4453 | // deferred decls or the multiversion functions themselves are emitted. |
4454 | if (!MultiVersionFuncs.empty()) |
4455 | emitMultiVersionFunctions(); |
4456 | } |
4457 | |
4458 | static void replaceDeclarationWith(llvm::GlobalValue *Old, |
4459 | llvm::Constant *New) { |
4460 | assert(cast<llvm::Function>(Old)->isDeclaration() && "Not a declaration" ); |
4461 | New->takeName(V: Old); |
4462 | Old->replaceAllUsesWith(V: New); |
4463 | Old->eraseFromParent(); |
4464 | } |
4465 | |
4466 | void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) { |
4467 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4468 | assert(FD && "Not a FunctionDecl?" ); |
4469 | assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?" ); |
4470 | const auto *DD = FD->getAttr<CPUDispatchAttr>(); |
4471 | assert(DD && "Not a cpu_dispatch Function?" ); |
4472 | |
4473 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4474 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4475 | |
4476 | StringRef ResolverName = getMangledName(GD); |
4477 | UpdateMultiVersionNames(GD, FD, CurName&: ResolverName); |
4478 | |
4479 | llvm::Type *ResolverType; |
4480 | GlobalDecl ResolverGD; |
4481 | if (getTarget().supportsIFunc()) { |
4482 | ResolverType = llvm::FunctionType::get( |
4483 | llvm::PointerType::get(getLLVMContext(), |
4484 | getTypes().getTargetAddressSpace(T: FD->getType())), |
4485 | false); |
4486 | } |
4487 | else { |
4488 | ResolverType = DeclTy; |
4489 | ResolverGD = GD; |
4490 | } |
4491 | |
4492 | auto *ResolverFunc = cast<llvm::Function>(Val: GetOrCreateLLVMFunction( |
4493 | MangledName: ResolverName, Ty: ResolverType, D: ResolverGD, /*ForVTable=*/false)); |
4494 | ResolverFunc->setLinkage(getMultiversionLinkage(CGM&: *this, GD)); |
4495 | if (supportsCOMDAT()) |
4496 | ResolverFunc->setComdat( |
4497 | getModule().getOrInsertComdat(Name: ResolverFunc->getName())); |
4498 | |
4499 | SmallVector<CodeGenFunction::FMVResolverOption, 10> Options; |
4500 | const TargetInfo &Target = getTarget(); |
4501 | unsigned Index = 0; |
4502 | for (const IdentifierInfo *II : DD->cpus()) { |
4503 | // Get the name of the target function so we can look it up/create it. |
4504 | std::string MangledName = getMangledNameImpl(*this, GD, FD, true) + |
4505 | getCPUSpecificMangling(*this, II->getName()); |
4506 | |
4507 | llvm::Constant *Func = GetGlobalValue(MangledName); |
4508 | |
4509 | if (!Func) { |
4510 | GlobalDecl ExistingDecl = Manglings.lookup(MangledName); |
4511 | if (ExistingDecl.getDecl() && |
4512 | ExistingDecl.getDecl()->getAsFunction()->isDefined()) { |
4513 | EmitGlobalFunctionDefinition(ExistingDecl, nullptr); |
4514 | Func = GetGlobalValue(MangledName); |
4515 | } else { |
4516 | if (!ExistingDecl.getDecl()) |
4517 | ExistingDecl = GD.getWithMultiVersionIndex(Index); |
4518 | |
4519 | Func = GetOrCreateLLVMFunction( |
4520 | MangledName, DeclTy, ExistingDecl, |
4521 | /*ForVTable=*/false, /*DontDefer=*/true, |
4522 | /*IsThunk=*/false, llvm::AttributeList(), ForDefinition); |
4523 | } |
4524 | } |
4525 | |
4526 | llvm::SmallVector<StringRef, 32> Features; |
4527 | Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features); |
4528 | llvm::transform(Features, Features.begin(), |
4529 | [](StringRef Str) { return Str.substr(1); }); |
4530 | llvm::erase_if(Features, [&Target](StringRef Feat) { |
4531 | return !Target.validateCpuSupports(Feat); |
4532 | }); |
4533 | Options.emplace_back(cast<llvm::Function>(Func), Features); |
4534 | ++Index; |
4535 | } |
4536 | |
4537 | llvm::stable_sort(Range&: Options, C: [](const CodeGenFunction::FMVResolverOption &LHS, |
4538 | const CodeGenFunction::FMVResolverOption &RHS) { |
4539 | return llvm::X86::getCpuSupportsMask(FeatureStrs: LHS.Features) > |
4540 | llvm::X86::getCpuSupportsMask(FeatureStrs: RHS.Features); |
4541 | }); |
4542 | |
4543 | // If the list contains multiple 'default' versions, such as when it contains |
4544 | // 'pentium' and 'generic', don't emit the call to the generic one (since we |
4545 | // always run on at least a 'pentium'). We do this by deleting the 'least |
4546 | // advanced' (read, lowest mangling letter). |
4547 | while (Options.size() > 1 && llvm::all_of(Range: llvm::X86::getCpuSupportsMask( |
4548 | FeatureStrs: (Options.end() - 2)->Features), |
4549 | P: [](auto X) { return X == 0; })) { |
4550 | StringRef LHSName = (Options.end() - 2)->Function->getName(); |
4551 | StringRef RHSName = (Options.end() - 1)->Function->getName(); |
4552 | if (LHSName.compare(RHS: RHSName) < 0) |
4553 | Options.erase(CI: Options.end() - 2); |
4554 | else |
4555 | Options.erase(CI: Options.end() - 1); |
4556 | } |
4557 | |
4558 | CodeGenFunction CGF(*this); |
4559 | CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options); |
4560 | |
4561 | if (getTarget().supportsIFunc()) { |
4562 | llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(CGM&: *this, GD); |
4563 | auto *IFunc = cast<llvm::GlobalValue>(Val: GetOrCreateMultiVersionResolver(GD)); |
4564 | unsigned AS = IFunc->getType()->getPointerAddressSpace(); |
4565 | |
4566 | // Fix up function declarations that were created for cpu_specific before |
4567 | // cpu_dispatch was known |
4568 | if (!isa<llvm::GlobalIFunc>(Val: IFunc)) { |
4569 | auto *GI = llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: "" , |
4570 | Resolver: ResolverFunc, Parent: &getModule()); |
4571 | replaceDeclarationWith(Old: IFunc, New: GI); |
4572 | IFunc = GI; |
4573 | } |
4574 | |
4575 | std::string AliasName = getMangledNameImpl( |
4576 | *this, GD, FD, /*OmitMultiVersionMangling=*/true); |
4577 | llvm::Constant *AliasFunc = GetGlobalValue(Name: AliasName); |
4578 | if (!AliasFunc) { |
4579 | auto *GA = llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: AliasName, |
4580 | Aliasee: IFunc, Parent: &getModule()); |
4581 | SetCommonAttributes(GD, GV: GA); |
4582 | } |
4583 | } |
4584 | } |
4585 | |
4586 | /// Adds a declaration to the list of multi version functions if not present. |
4587 | void CodeGenModule::AddDeferredMultiVersionResolverToEmit(GlobalDecl GD) { |
4588 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4589 | assert(FD && "Not a FunctionDecl?" ); |
4590 | |
4591 | if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) { |
4592 | std::string MangledName = |
4593 | getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true); |
4594 | if (!DeferredResolversToEmit.insert(key: MangledName).second) |
4595 | return; |
4596 | } |
4597 | MultiVersionFuncs.push_back(x: GD); |
4598 | } |
4599 | |
4600 | /// If a dispatcher for the specified mangled name is not in the module, create |
4601 | /// and return it. The dispatcher is either an llvm Function with the specified |
4602 | /// type, or a global ifunc. |
4603 | llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) { |
4604 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4605 | assert(FD && "Not a FunctionDecl?" ); |
4606 | |
4607 | std::string MangledName = |
4608 | getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true); |
4609 | |
4610 | // Holds the name of the resolver, in ifunc mode this is the ifunc (which has |
4611 | // a separate resolver). |
4612 | std::string ResolverName = MangledName; |
4613 | if (getTarget().supportsIFunc()) { |
4614 | switch (FD->getMultiVersionKind()) { |
4615 | case MultiVersionKind::None: |
4616 | llvm_unreachable("unexpected MultiVersionKind::None for resolver" ); |
4617 | case MultiVersionKind::Target: |
4618 | case MultiVersionKind::CPUSpecific: |
4619 | case MultiVersionKind::CPUDispatch: |
4620 | ResolverName += ".ifunc" ; |
4621 | break; |
4622 | case MultiVersionKind::TargetClones: |
4623 | case MultiVersionKind::TargetVersion: |
4624 | break; |
4625 | } |
4626 | } else if (FD->isTargetMultiVersion()) { |
4627 | ResolverName += ".resolver" ; |
4628 | } |
4629 | |
4630 | bool ShouldReturnIFunc = |
4631 | getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion(); |
4632 | |
4633 | // If the resolver has already been created, just return it. This lookup may |
4634 | // yield a function declaration instead of a resolver on AArch64. That is |
4635 | // because we didn't know whether a resolver will be generated when we first |
4636 | // encountered a use of the symbol named after this resolver. Therefore, |
4637 | // targets which support ifuncs should not return here unless we actually |
4638 | // found an ifunc. |
4639 | llvm::GlobalValue *ResolverGV = GetGlobalValue(Name: ResolverName); |
4640 | if (ResolverGV && (isa<llvm::GlobalIFunc>(Val: ResolverGV) || !ShouldReturnIFunc)) |
4641 | return ResolverGV; |
4642 | |
4643 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4644 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4645 | |
4646 | // The resolver needs to be created. For target and target_clones, defer |
4647 | // creation until the end of the TU. |
4648 | if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion()) |
4649 | AddDeferredMultiVersionResolverToEmit(GD); |
4650 | |
4651 | // For cpu_specific, don't create an ifunc yet because we don't know if the |
4652 | // cpu_dispatch will be emitted in this translation unit. |
4653 | if (ShouldReturnIFunc) { |
4654 | unsigned AS = getTypes().getTargetAddressSpace(T: FD->getType()); |
4655 | llvm::Type *ResolverType = llvm::FunctionType::get( |
4656 | Result: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS), isVarArg: false); |
4657 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
4658 | MangledName: MangledName + ".resolver" , Ty: ResolverType, D: GlobalDecl{}, |
4659 | /*ForVTable=*/false); |
4660 | llvm::GlobalIFunc *GIF = |
4661 | llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage: getMultiversionLinkage(CGM&: *this, GD), |
4662 | Name: "" , Resolver, Parent: &getModule()); |
4663 | GIF->setName(ResolverName); |
4664 | SetCommonAttributes(GD: FD, GV: GIF); |
4665 | if (ResolverGV) |
4666 | replaceDeclarationWith(Old: ResolverGV, New: GIF); |
4667 | return GIF; |
4668 | } |
4669 | |
4670 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
4671 | MangledName: ResolverName, Ty: DeclTy, D: GlobalDecl{}, /*ForVTable=*/false); |
4672 | assert(isa<llvm::GlobalValue>(Resolver) && !ResolverGV && |
4673 | "Resolver should be created for the first time" ); |
4674 | SetCommonAttributes(GD: FD, GV: cast<llvm::GlobalValue>(Val: Resolver)); |
4675 | return Resolver; |
4676 | } |
4677 | |
4678 | bool CodeGenModule::shouldDropDLLAttribute(const Decl *D, |
4679 | const llvm::GlobalValue *GV) const { |
4680 | auto SC = GV->getDLLStorageClass(); |
4681 | if (SC == llvm::GlobalValue::DefaultStorageClass) |
4682 | return false; |
4683 | const Decl *MRD = D->getMostRecentDecl(); |
4684 | return (((SC == llvm::GlobalValue::DLLImportStorageClass && |
4685 | !MRD->hasAttr<DLLImportAttr>()) || |
4686 | (SC == llvm::GlobalValue::DLLExportStorageClass && |
4687 | !MRD->hasAttr<DLLExportAttr>())) && |
4688 | !shouldMapVisibilityToDLLExport(cast<NamedDecl>(MRD))); |
4689 | } |
4690 | |
4691 | /// GetOrCreateLLVMFunction - If the specified mangled name is not in the |
4692 | /// module, create and return an llvm Function with the specified type. If there |
4693 | /// is something in the module with the specified name, return it potentially |
4694 | /// bitcasted to the right type. |
4695 | /// |
4696 | /// If D is non-null, it specifies a decl that correspond to this. This is used |
4697 | /// to set the attributes on the function when it is first created. |
4698 | llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction( |
4699 | StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable, |
4700 | bool DontDefer, bool IsThunk, llvm::AttributeList , |
4701 | ForDefinition_t IsForDefinition) { |
4702 | const Decl *D = GD.getDecl(); |
4703 | |
4704 | std::string NameWithoutMultiVersionMangling; |
4705 | if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D)) { |
4706 | // For the device mark the function as one that should be emitted. |
4707 | if (getLangOpts().OpenMPIsTargetDevice && OpenMPRuntime && |
4708 | !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() && |
4709 | !DontDefer && !IsForDefinition) { |
4710 | if (const FunctionDecl *FDDef = FD->getDefinition()) { |
4711 | GlobalDecl GDDef; |
4712 | if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: FDDef)) |
4713 | GDDef = GlobalDecl(CD, GD.getCtorType()); |
4714 | else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: FDDef)) |
4715 | GDDef = GlobalDecl(DD, GD.getDtorType()); |
4716 | else |
4717 | GDDef = GlobalDecl(FDDef); |
4718 | EmitGlobal(GD: GDDef); |
4719 | } |
4720 | } |
4721 | |
4722 | // Any attempts to use a MultiVersion function should result in retrieving |
4723 | // the iFunc instead. Name Mangling will handle the rest of the changes. |
4724 | if (FD->isMultiVersion()) { |
4725 | UpdateMultiVersionNames(GD, FD, CurName&: MangledName); |
4726 | if (!IsForDefinition) { |
4727 | // On AArch64 we do not immediatelly emit an ifunc resolver when a |
4728 | // function is used. Instead we defer the emission until we see a |
4729 | // default definition. In the meantime we just reference the symbol |
4730 | // without FMV mangling (it may or may not be replaced later). |
4731 | if (getTarget().getTriple().isAArch64()) { |
4732 | AddDeferredMultiVersionResolverToEmit(GD); |
4733 | NameWithoutMultiVersionMangling = getMangledNameImpl( |
4734 | *this, GD, FD, /*OmitMultiVersionMangling=*/true); |
4735 | } else |
4736 | return GetOrCreateMultiVersionResolver(GD); |
4737 | } |
4738 | } |
4739 | } |
4740 | |
4741 | if (!NameWithoutMultiVersionMangling.empty()) |
4742 | MangledName = NameWithoutMultiVersionMangling; |
4743 | |
4744 | // Lookup the entry, lazily creating it if necessary. |
4745 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
4746 | if (Entry) { |
4747 | if (WeakRefReferences.erase(Ptr: Entry)) { |
4748 | const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D); |
4749 | if (FD && !FD->hasAttr<WeakAttr>()) |
4750 | Entry->setLinkage(llvm::Function::ExternalLinkage); |
4751 | } |
4752 | |
4753 | // Handle dropped DLL attributes. |
4754 | if (D && shouldDropDLLAttribute(D, GV: Entry)) { |
4755 | Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
4756 | setDSOLocal(Entry); |
4757 | } |
4758 | |
4759 | // If there are two attempts to define the same mangled name, issue an |
4760 | // error. |
4761 | if (IsForDefinition && !Entry->isDeclaration()) { |
4762 | GlobalDecl OtherGD; |
4763 | // Check that GD is not yet in DiagnosedConflictingDefinitions is required |
4764 | // to make sure that we issue an error only once. |
4765 | if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
4766 | (GD.getCanonicalDecl().getDecl() != |
4767 | OtherGD.getCanonicalDecl().getDecl()) && |
4768 | DiagnosedConflictingDefinitions.insert(V: GD).second) { |
4769 | getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name) |
4770 | << MangledName; |
4771 | getDiags().Report(OtherGD.getDecl()->getLocation(), |
4772 | diag::note_previous_definition); |
4773 | } |
4774 | } |
4775 | |
4776 | if ((isa<llvm::Function>(Val: Entry) || isa<llvm::GlobalAlias>(Val: Entry)) && |
4777 | (Entry->getValueType() == Ty)) { |
4778 | return Entry; |
4779 | } |
4780 | |
4781 | // Make sure the result is of the correct type. |
4782 | // (If function is requested for a definition, we always need to create a new |
4783 | // function, not just return a bitcast.) |
4784 | if (!IsForDefinition) |
4785 | return Entry; |
4786 | } |
4787 | |
4788 | // This function doesn't have a complete type (for example, the return |
4789 | // type is an incomplete struct). Use a fake type instead, and make |
4790 | // sure not to try to set attributes. |
4791 | bool IsIncompleteFunction = false; |
4792 | |
4793 | llvm::FunctionType *FTy; |
4794 | if (isa<llvm::FunctionType>(Val: Ty)) { |
4795 | FTy = cast<llvm::FunctionType>(Val: Ty); |
4796 | } else { |
4797 | FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false); |
4798 | IsIncompleteFunction = true; |
4799 | } |
4800 | |
4801 | llvm::Function *F = |
4802 | llvm::Function::Create(Ty: FTy, Linkage: llvm::Function::ExternalLinkage, |
4803 | N: Entry ? StringRef() : MangledName, M: &getModule()); |
4804 | |
4805 | // Store the declaration associated with this function so it is potentially |
4806 | // updated by further declarations or definitions and emitted at the end. |
4807 | if (D && D->hasAttr<AnnotateAttr>()) |
4808 | DeferredAnnotations[MangledName] = cast<ValueDecl>(Val: D); |
4809 | |
4810 | // If we already created a function with the same mangled name (but different |
4811 | // type) before, take its name and add it to the list of functions to be |
4812 | // replaced with F at the end of CodeGen. |
4813 | // |
4814 | // This happens if there is a prototype for a function (e.g. "int f()") and |
4815 | // then a definition of a different type (e.g. "int f(int x)"). |
4816 | if (Entry) { |
4817 | F->takeName(V: Entry); |
4818 | |
4819 | // This might be an implementation of a function without a prototype, in |
4820 | // which case, try to do special replacement of calls which match the new |
4821 | // prototype. The really key thing here is that we also potentially drop |
4822 | // arguments from the call site so as to make a direct call, which makes the |
4823 | // inliner happier and suppresses a number of optimizer warnings (!) about |
4824 | // dropping arguments. |
4825 | if (!Entry->use_empty()) { |
4826 | ReplaceUsesOfNonProtoTypeWithRealFunction(Old: Entry, NewFn: F); |
4827 | Entry->removeDeadConstantUsers(); |
4828 | } |
4829 | |
4830 | addGlobalValReplacement(GV: Entry, C: F); |
4831 | } |
4832 | |
4833 | assert(F->getName() == MangledName && "name was uniqued!" ); |
4834 | if (D) |
4835 | SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk); |
4836 | if (ExtraAttrs.hasFnAttrs()) { |
4837 | llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs()); |
4838 | F->addFnAttrs(Attrs: B); |
4839 | } |
4840 | |
4841 | if (!DontDefer) { |
4842 | // All MSVC dtors other than the base dtor are linkonce_odr and delegate to |
4843 | // each other bottoming out with the base dtor. Therefore we emit non-base |
4844 | // dtors on usage, even if there is no dtor definition in the TU. |
4845 | if (isa_and_nonnull<CXXDestructorDecl>(Val: D) && |
4846 | getCXXABI().useThunkForDtorVariant(Dtor: cast<CXXDestructorDecl>(Val: D), |
4847 | DT: GD.getDtorType())) |
4848 | addDeferredDeclToEmit(GD); |
4849 | |
4850 | // This is the first use or definition of a mangled name. If there is a |
4851 | // deferred decl with this name, remember that we need to emit it at the end |
4852 | // of the file. |
4853 | auto DDI = DeferredDecls.find(Val: MangledName); |
4854 | if (DDI != DeferredDecls.end()) { |
4855 | // Move the potentially referenced deferred decl to the |
4856 | // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we |
4857 | // don't need it anymore). |
4858 | addDeferredDeclToEmit(GD: DDI->second); |
4859 | DeferredDecls.erase(I: DDI); |
4860 | |
4861 | // Otherwise, there are cases we have to worry about where we're |
4862 | // using a declaration for which we must emit a definition but where |
4863 | // we might not find a top-level definition: |
4864 | // - member functions defined inline in their classes |
4865 | // - friend functions defined inline in some class |
4866 | // - special member functions with implicit definitions |
4867 | // If we ever change our AST traversal to walk into class methods, |
4868 | // this will be unnecessary. |
4869 | // |
4870 | // We also don't emit a definition for a function if it's going to be an |
4871 | // entry in a vtable, unless it's already marked as used. |
4872 | } else if (getLangOpts().CPlusPlus && D) { |
4873 | // Look for a declaration that's lexically in a record. |
4874 | for (const auto *FD = cast<FunctionDecl>(Val: D)->getMostRecentDecl(); FD; |
4875 | FD = FD->getPreviousDecl()) { |
4876 | if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) { |
4877 | if (FD->doesThisDeclarationHaveABody()) { |
4878 | addDeferredDeclToEmit(GD: GD.getWithDecl(D: FD)); |
4879 | break; |
4880 | } |
4881 | } |
4882 | } |
4883 | } |
4884 | } |
4885 | |
4886 | // Make sure the result is of the requested type. |
4887 | if (!IsIncompleteFunction) { |
4888 | assert(F->getFunctionType() == Ty); |
4889 | return F; |
4890 | } |
4891 | |
4892 | return F; |
4893 | } |
4894 | |
4895 | /// GetAddrOfFunction - Return the address of the given function. If Ty is |
4896 | /// non-null, then this function will use the specified type if it has to |
4897 | /// create it (this occurs when we see a definition of the function). |
4898 | llvm::Constant * |
4899 | CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable, |
4900 | bool DontDefer, |
4901 | ForDefinition_t IsForDefinition) { |
4902 | // If there was no specific requested type, just convert it now. |
4903 | if (!Ty) { |
4904 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4905 | Ty = getTypes().ConvertType(T: FD->getType()); |
4906 | if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()) && |
4907 | GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { |
4908 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4909 | Ty = getTypes().GetFunctionType(Info: FI); |
4910 | } |
4911 | } |
4912 | |
4913 | // Devirtualized destructor calls may come through here instead of via |
4914 | // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead |
4915 | // of the complete destructor when necessary. |
4916 | if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: GD.getDecl())) { |
4917 | if (getTarget().getCXXABI().isMicrosoft() && |
4918 | GD.getDtorType() == Dtor_Complete && |
4919 | DD->getParent()->getNumVBases() == 0) |
4920 | GD = GlobalDecl(DD, Dtor_Base); |
4921 | } |
4922 | |
4923 | StringRef MangledName = getMangledName(GD); |
4924 | auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer, |
4925 | /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(), |
4926 | IsForDefinition); |
4927 | // Returns kernel handle for HIP kernel stub function. |
4928 | if (LangOpts.CUDA && !LangOpts.CUDAIsDevice && |
4929 | cast<FunctionDecl>(GD.getDecl())->hasAttr<CUDAGlobalAttr>()) { |
4930 | auto *Handle = getCUDARuntime().getKernelHandle( |
4931 | Stub: cast<llvm::Function>(Val: F->stripPointerCasts()), GD); |
4932 | if (IsForDefinition) |
4933 | return F; |
4934 | return Handle; |
4935 | } |
4936 | return F; |
4937 | } |
4938 | |
4939 | llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) { |
4940 | llvm::GlobalValue *F = |
4941 | cast<llvm::GlobalValue>(Val: GetAddrOfFunction(Decl)->stripPointerCasts()); |
4942 | |
4943 | return llvm::NoCFIValue::get(GV: F); |
4944 | } |
4945 | |
4946 | static const FunctionDecl * |
4947 | GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) { |
4948 | TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl(); |
4949 | DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl); |
4950 | |
4951 | IdentifierInfo &CII = C.Idents.get(Name); |
4952 | for (const auto *Result : DC->lookup(Name: &CII)) |
4953 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result)) |
4954 | return FD; |
4955 | |
4956 | if (!C.getLangOpts().CPlusPlus) |
4957 | return nullptr; |
4958 | |
4959 | // Demangle the premangled name from getTerminateFn() |
4960 | IdentifierInfo &CXXII = |
4961 | (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ" ) |
4962 | ? C.Idents.get(Name: "terminate" ) |
4963 | : C.Idents.get(Name); |
4964 | |
4965 | for (const auto &N : {"__cxxabiv1" , "std" }) { |
4966 | IdentifierInfo &NS = C.Idents.get(Name: N); |
4967 | for (const auto *Result : DC->lookup(Name: &NS)) { |
4968 | const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Val: Result); |
4969 | if (auto *LSD = dyn_cast<LinkageSpecDecl>(Result)) |
4970 | for (const auto *Result : LSD->lookup(&NS)) |
4971 | if ((ND = dyn_cast<NamespaceDecl>(Result))) |
4972 | break; |
4973 | |
4974 | if (ND) |
4975 | for (const auto *Result : ND->lookup(&CXXII)) |
4976 | if (const auto *FD = dyn_cast<FunctionDecl>(Result)) |
4977 | return FD; |
4978 | } |
4979 | } |
4980 | |
4981 | return nullptr; |
4982 | } |
4983 | |
4984 | static void setWindowsItaniumDLLImport(CodeGenModule &CGM, bool Local, |
4985 | llvm::Function *F, StringRef Name) { |
4986 | // In Windows Itanium environments, try to mark runtime functions |
4987 | // dllimport. For Mingw and MSVC, don't. We don't really know if the user |
4988 | // will link their standard library statically or dynamically. Marking |
4989 | // functions imported when they are not imported can cause linker errors |
4990 | // and warnings. |
4991 | if (!Local && CGM.getTriple().isWindowsItaniumEnvironment() && |
4992 | !CGM.getCodeGenOpts().LTOVisibilityPublicStd) { |
4993 | const FunctionDecl *FD = GetRuntimeFunctionDecl(C&: CGM.getContext(), Name); |
4994 | if (!FD || FD->hasAttr<DLLImportAttr>()) { |
4995 | F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); |
4996 | F->setLinkage(llvm::GlobalValue::ExternalLinkage); |
4997 | } |
4998 | } |
4999 | } |
5000 | |
5001 | llvm::FunctionCallee CodeGenModule::CreateRuntimeFunction( |
5002 | QualType ReturnTy, ArrayRef<QualType> ArgTys, StringRef Name, |
5003 | llvm::AttributeList , bool Local, bool AssumeConvergent) { |
5004 | if (AssumeConvergent) { |
5005 | ExtraAttrs = |
5006 | ExtraAttrs.addFnAttribute(VMContext, llvm::Attribute::Convergent); |
5007 | } |
5008 | |
5009 | QualType FTy = Context.getFunctionType(ResultTy: ReturnTy, Args: ArgTys, |
5010 | EPI: FunctionProtoType::ExtProtoInfo()); |
5011 | const CGFunctionInfo &Info = getTypes().arrangeFreeFunctionType( |
5012 | Ty: Context.getCanonicalType(T: FTy).castAs<FunctionProtoType>()); |
5013 | auto *ConvTy = getTypes().GetFunctionType(Info); |
5014 | llvm::Constant *C = GetOrCreateLLVMFunction( |
5015 | MangledName: Name, Ty: ConvTy, GD: GlobalDecl(), /*ForVTable=*/false, |
5016 | /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs); |
5017 | |
5018 | if (auto *F = dyn_cast<llvm::Function>(C)) { |
5019 | if (F->empty()) { |
5020 | SetLLVMFunctionAttributes(GD: GlobalDecl(), Info, F: F, /*IsThunk*/ false); |
5021 | // FIXME: Set calling-conv properly in ExtProtoInfo |
5022 | F->setCallingConv(getRuntimeCC()); |
5023 | setWindowsItaniumDLLImport(*this, Local, F, Name); |
5024 | setDSOLocal(F); |
5025 | } |
5026 | } |
5027 | return {ConvTy, C}; |
5028 | } |
5029 | |
5030 | /// CreateRuntimeFunction - Create a new runtime function with the specified |
5031 | /// type and name. |
5032 | llvm::FunctionCallee |
5033 | CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name, |
5034 | llvm::AttributeList , bool Local, |
5035 | bool AssumeConvergent) { |
5036 | if (AssumeConvergent) { |
5037 | ExtraAttrs = |
5038 | ExtraAttrs.addFnAttribute(VMContext, llvm::Attribute::Convergent); |
5039 | } |
5040 | |
5041 | llvm::Constant *C = |
5042 | GetOrCreateLLVMFunction(MangledName: Name, Ty: FTy, GD: GlobalDecl(), /*ForVTable=*/false, |
5043 | /*DontDefer=*/false, /*IsThunk=*/false, |
5044 | ExtraAttrs); |
5045 | |
5046 | if (auto *F = dyn_cast<llvm::Function>(Val: C)) { |
5047 | if (F->empty()) { |
5048 | F->setCallingConv(getRuntimeCC()); |
5049 | setWindowsItaniumDLLImport(CGM&: *this, Local, F, Name); |
5050 | setDSOLocal(F); |
5051 | // FIXME: We should use CodeGenModule::SetLLVMFunctionAttributes() instead |
5052 | // of trying to approximate the attributes using the LLVM function |
5053 | // signature. The other overload of CreateRuntimeFunction does this; it |
5054 | // should be used for new code. |
5055 | markRegisterParameterAttributes(F); |
5056 | } |
5057 | } |
5058 | |
5059 | return {FTy, C}; |
5060 | } |
5061 | |
5062 | /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module, |
5063 | /// create and return an llvm GlobalVariable with the specified type and address |
5064 | /// space. If there is something in the module with the specified name, return |
5065 | /// it potentially bitcasted to the right type. |
5066 | /// |
5067 | /// If D is non-null, it specifies a decl that correspond to this. This is used |
5068 | /// to set the attributes on the global when it is first created. |
5069 | /// |
5070 | /// If IsForDefinition is true, it is guaranteed that an actual global with |
5071 | /// type Ty will be returned, not conversion of a variable with the same |
5072 | /// mangled name but some other type. |
5073 | llvm::Constant * |
5074 | CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty, |
5075 | LangAS AddrSpace, const VarDecl *D, |
5076 | ForDefinition_t IsForDefinition) { |
5077 | // Lookup the entry, lazily creating it if necessary. |
5078 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
5079 | unsigned TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace); |
5080 | if (Entry) { |
5081 | if (WeakRefReferences.erase(Ptr: Entry)) { |
5082 | if (D && !D->hasAttr<WeakAttr>()) |
5083 | Entry->setLinkage(llvm::Function::ExternalLinkage); |
5084 | } |
5085 | |
5086 | // Handle dropped DLL attributes. |
5087 | if (D && shouldDropDLLAttribute(D, Entry)) |
5088 | Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
5089 | |
5090 | if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D) |
5091 | getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: Entry); |
5092 | |
5093 | if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS) |
5094 | return Entry; |
5095 | |
5096 | // If there are two attempts to define the same mangled name, issue an |
5097 | // error. |
5098 | if (IsForDefinition && !Entry->isDeclaration()) { |
5099 | GlobalDecl OtherGD; |
5100 | const VarDecl *OtherD; |
5101 | |
5102 | // Check that D is not yet in DiagnosedConflictingDefinitions is required |
5103 | // to make sure that we issue an error only once. |
5104 | if (D && lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
5105 | (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) && |
5106 | (OtherD = dyn_cast<VarDecl>(Val: OtherGD.getDecl())) && |
5107 | OtherD->hasInit() && |
5108 | DiagnosedConflictingDefinitions.insert(V: D).second) { |
5109 | getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name) |
5110 | << MangledName; |
5111 | getDiags().Report(OtherGD.getDecl()->getLocation(), |
5112 | diag::note_previous_definition); |
5113 | } |
5114 | } |
5115 | |
5116 | // Make sure the result is of the correct type. |
5117 | if (Entry->getType()->getAddressSpace() != TargetAS) |
5118 | return llvm::ConstantExpr::getAddrSpaceCast( |
5119 | C: Entry, Ty: llvm::PointerType::get(C&: Ty->getContext(), AddressSpace: TargetAS)); |
5120 | |
5121 | // (If global is requested for a definition, we always need to create a new |
5122 | // global, not just return a bitcast.) |
5123 | if (!IsForDefinition) |
5124 | return Entry; |
5125 | } |
5126 | |
5127 | auto DAddrSpace = GetGlobalVarAddressSpace(D); |
5128 | |
5129 | auto *GV = new llvm::GlobalVariable( |
5130 | getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr, |
5131 | MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal, |
5132 | getContext().getTargetAddressSpace(AS: DAddrSpace)); |
5133 | |
5134 | // If we already created a global with the same mangled name (but different |
5135 | // type) before, take its name and remove it from its parent. |
5136 | if (Entry) { |
5137 | GV->takeName(V: Entry); |
5138 | |
5139 | if (!Entry->use_empty()) { |
5140 | Entry->replaceAllUsesWith(V: GV); |
5141 | } |
5142 | |
5143 | Entry->eraseFromParent(); |
5144 | } |
5145 | |
5146 | // This is the first use or definition of a mangled name. If there is a |
5147 | // deferred decl with this name, remember that we need to emit it at the end |
5148 | // of the file. |
5149 | auto DDI = DeferredDecls.find(Val: MangledName); |
5150 | if (DDI != DeferredDecls.end()) { |
5151 | // Move the potentially referenced deferred decl to the DeferredDeclsToEmit |
5152 | // list, and remove it from DeferredDecls (since we don't need it anymore). |
5153 | addDeferredDeclToEmit(GD: DDI->second); |
5154 | DeferredDecls.erase(I: DDI); |
5155 | } |
5156 | |
5157 | // Handle things which are present even on external declarations. |
5158 | if (D) { |
5159 | if (LangOpts.OpenMP && !LangOpts.OpenMPSimd) |
5160 | getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: GV); |
5161 | |
5162 | // FIXME: This code is overly simple and should be merged with other global |
5163 | // handling. |
5164 | GV->setConstant(D->getType().isConstantStorage(getContext(), false, false)); |
5165 | |
5166 | GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); |
5167 | |
5168 | setLinkageForGV(GV, D); |
5169 | |
5170 | if (D->getTLSKind()) { |
5171 | if (D->getTLSKind() == VarDecl::TLS_Dynamic) |
5172 | CXXThreadLocals.push_back(x: D); |
5173 | setTLSMode(GV, D: *D); |
5174 | } |
5175 | |
5176 | setGVProperties(GV, GD: D); |
5177 | |
5178 | // If required by the ABI, treat declarations of static data members with |
5179 | // inline initializers as definitions. |
5180 | if (getContext().isMSStaticDataMemberInlineDefinition(VD: D)) { |
5181 | EmitGlobalVarDefinition(D); |
5182 | } |
5183 | |
5184 | // Emit section information for extern variables. |
5185 | if (D->hasExternalStorage()) { |
5186 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) |
5187 | GV->setSection(SA->getName()); |
5188 | } |
5189 | |
5190 | // Handle XCore specific ABI requirements. |
5191 | if (getTriple().getArch() == llvm::Triple::xcore && |
5192 | D->getLanguageLinkage() == CLanguageLinkage && |
5193 | D->getType().isConstant(Context) && |
5194 | isExternallyVisible(D->getLinkageAndVisibility().getLinkage())) |
5195 | GV->setSection(".cp.rodata" ); |
5196 | |
5197 | // Handle code model attribute |
5198 | if (const auto *CMA = D->getAttr<CodeModelAttr>()) |
5199 | GV->setCodeModel(CMA->getModel()); |
5200 | |
5201 | // Check if we a have a const declaration with an initializer, we may be |
5202 | // able to emit it as available_externally to expose it's value to the |
5203 | // optimizer. |
5204 | if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() && |
5205 | D->getType().isConstQualified() && !GV->hasInitializer() && |
5206 | !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) { |
5207 | const auto *Record = |
5208 | Context.getBaseElementType(D->getType())->getAsCXXRecordDecl(); |
5209 | bool HasMutableFields = Record && Record->hasMutableFields(); |
5210 | if (!HasMutableFields) { |
5211 | const VarDecl *InitDecl; |
5212 | const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl); |
5213 | if (InitExpr) { |
5214 | ConstantEmitter emitter(*this); |
5215 | llvm::Constant *Init = emitter.tryEmitForInitializer(D: *InitDecl); |
5216 | if (Init) { |
5217 | auto *InitType = Init->getType(); |
5218 | if (GV->getValueType() != InitType) { |
5219 | // The type of the initializer does not match the definition. |
5220 | // This happens when an initializer has a different type from |
5221 | // the type of the global (because of padding at the end of a |
5222 | // structure for instance). |
5223 | GV->setName(StringRef()); |
5224 | // Make a new global with the correct type, this is now guaranteed |
5225 | // to work. |
5226 | auto *NewGV = cast<llvm::GlobalVariable>( |
5227 | Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition) |
5228 | ->stripPointerCasts()); |
5229 | |
5230 | // Erase the old global, since it is no longer used. |
5231 | GV->eraseFromParent(); |
5232 | GV = NewGV; |
5233 | } else { |
5234 | GV->setInitializer(Init); |
5235 | GV->setConstant(true); |
5236 | GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); |
5237 | } |
5238 | emitter.finalize(global: GV); |
5239 | } |
5240 | } |
5241 | } |
5242 | } |
5243 | } |
5244 | |
5245 | if (D && |
5246 | D->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly) { |
5247 | getTargetCodeGenInfo().setTargetAttributes(D, GV, *this); |
5248 | // External HIP managed variables needed to be recorded for transformation |
5249 | // in both device and host compilations. |
5250 | if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() && |
5251 | D->hasExternalStorage()) |
5252 | getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV); |
5253 | } |
5254 | |
5255 | if (D) |
5256 | SanitizerMD->reportGlobal(GV, D: *D); |
5257 | |
5258 | LangAS ExpectedAS = |
5259 | D ? D->getType().getAddressSpace() |
5260 | : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default); |
5261 | assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS); |
5262 | if (DAddrSpace != ExpectedAS) { |
5263 | return getTargetCodeGenInfo().performAddrSpaceCast( |
5264 | CGM&: *this, V: GV, SrcAddr: DAddrSpace, |
5265 | DestTy: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetAS)); |
5266 | } |
5267 | |
5268 | return GV; |
5269 | } |
5270 | |
5271 | llvm::Constant * |
5272 | CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { |
5273 | const Decl *D = GD.getDecl(); |
5274 | |
5275 | if (isa<CXXConstructorDecl>(Val: D) || isa<CXXDestructorDecl>(Val: D)) |
5276 | return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr, |
5277 | /*DontDefer=*/false, IsForDefinition); |
5278 | |
5279 | if (isa<CXXMethodDecl>(Val: D)) { |
5280 | auto FInfo = |
5281 | &getTypes().arrangeCXXMethodDeclaration(MD: cast<CXXMethodDecl>(Val: D)); |
5282 | auto Ty = getTypes().GetFunctionType(Info: *FInfo); |
5283 | return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, |
5284 | IsForDefinition); |
5285 | } |
5286 | |
5287 | if (isa<FunctionDecl>(Val: D)) { |
5288 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
5289 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
5290 | return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, |
5291 | IsForDefinition); |
5292 | } |
5293 | |
5294 | return GetAddrOfGlobalVar(D: cast<VarDecl>(Val: D), /*Ty=*/nullptr, IsForDefinition); |
5295 | } |
5296 | |
5297 | llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable( |
5298 | StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage, |
5299 | llvm::Align Alignment) { |
5300 | llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name); |
5301 | llvm::GlobalVariable *OldGV = nullptr; |
5302 | |
5303 | if (GV) { |
5304 | // Check if the variable has the right type. |
5305 | if (GV->getValueType() == Ty) |
5306 | return GV; |
5307 | |
5308 | // Because C++ name mangling, the only way we can end up with an already |
5309 | // existing global with the same name is if it has been declared extern "C". |
5310 | assert(GV->isDeclaration() && "Declaration has wrong type!" ); |
5311 | OldGV = GV; |
5312 | } |
5313 | |
5314 | // Create a new variable. |
5315 | GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true, |
5316 | Linkage, nullptr, Name); |
5317 | |
5318 | if (OldGV) { |
5319 | // Replace occurrences of the old variable if needed. |
5320 | GV->takeName(V: OldGV); |
5321 | |
5322 | if (!OldGV->use_empty()) { |
5323 | OldGV->replaceAllUsesWith(V: GV); |
5324 | } |
5325 | |
5326 | OldGV->eraseFromParent(); |
5327 | } |
5328 | |
5329 | if (supportsCOMDAT() && GV->isWeakForLinker() && |
5330 | !GV->hasAvailableExternallyLinkage()) |
5331 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
5332 | |
5333 | GV->setAlignment(Alignment); |
5334 | |
5335 | return GV; |
5336 | } |
5337 | |
5338 | /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the |
5339 | /// given global variable. If Ty is non-null and if the global doesn't exist, |
5340 | /// then it will be created with the specified type instead of whatever the |
5341 | /// normal requested type would be. If IsForDefinition is true, it is guaranteed |
5342 | /// that an actual global with type Ty will be returned, not conversion of a |
5343 | /// variable with the same mangled name but some other type. |
5344 | llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D, |
5345 | llvm::Type *Ty, |
5346 | ForDefinition_t IsForDefinition) { |
5347 | assert(D->hasGlobalStorage() && "Not a global variable" ); |
5348 | QualType ASTTy = D->getType(); |
5349 | if (!Ty) |
5350 | Ty = getTypes().ConvertTypeForMem(T: ASTTy); |
5351 | |
5352 | StringRef MangledName = getMangledName(GD: D); |
5353 | return GetOrCreateLLVMGlobal(MangledName, Ty, AddrSpace: ASTTy.getAddressSpace(), D, |
5354 | IsForDefinition); |
5355 | } |
5356 | |
5357 | /// CreateRuntimeVariable - Create a new runtime global variable with the |
5358 | /// specified type and name. |
5359 | llvm::Constant * |
5360 | CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty, |
5361 | StringRef Name) { |
5362 | LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global |
5363 | : LangAS::Default; |
5364 | auto *Ret = GetOrCreateLLVMGlobal(MangledName: Name, Ty, AddrSpace, D: nullptr); |
5365 | setDSOLocal(cast<llvm::GlobalValue>(Val: Ret->stripPointerCasts())); |
5366 | return Ret; |
5367 | } |
5368 | |
5369 | void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) { |
5370 | assert(!D->getInit() && "Cannot emit definite definitions here!" ); |
5371 | |
5372 | StringRef MangledName = getMangledName(GD: D); |
5373 | llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName); |
5374 | |
5375 | // We already have a definition, not declaration, with the same mangled name. |
5376 | // Emitting of declaration is not required (and actually overwrites emitted |
5377 | // definition). |
5378 | if (GV && !GV->isDeclaration()) |
5379 | return; |
5380 | |
5381 | // If we have not seen a reference to this variable yet, place it into the |
5382 | // deferred declarations table to be emitted if needed later. |
5383 | if (!MustBeEmitted(D) && !GV) { |
5384 | DeferredDecls[MangledName] = D; |
5385 | return; |
5386 | } |
5387 | |
5388 | // The tentative definition is the only definition. |
5389 | EmitGlobalVarDefinition(D); |
5390 | } |
5391 | |
5392 | // Return a GlobalDecl. Use the base variants for destructors and constructors. |
5393 | static GlobalDecl getBaseVariantGlobalDecl(const NamedDecl *D) { |
5394 | if (auto const *CD = dyn_cast<const CXXConstructorDecl>(Val: D)) |
5395 | return GlobalDecl(CD, CXXCtorType::Ctor_Base); |
5396 | else if (auto const *DD = dyn_cast<const CXXDestructorDecl>(Val: D)) |
5397 | return GlobalDecl(DD, CXXDtorType::Dtor_Base); |
5398 | return GlobalDecl(D); |
5399 | } |
5400 | |
5401 | void CodeGenModule::EmitExternalDeclaration(const DeclaratorDecl *D) { |
5402 | CGDebugInfo *DI = getModuleDebugInfo(); |
5403 | if (!DI || !getCodeGenOpts().hasReducedDebugInfo()) |
5404 | return; |
5405 | |
5406 | GlobalDecl GD = getBaseVariantGlobalDecl(D); |
5407 | if (!GD) |
5408 | return; |
5409 | |
5410 | llvm::Constant *Addr = GetAddrOfGlobal(GD)->stripPointerCasts(); |
5411 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) { |
5412 | DI->EmitExternalVariable( |
5413 | GV: cast<llvm::GlobalVariable>(Val: Addr->stripPointerCasts()), Decl: VD); |
5414 | } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
5415 | llvm::Function *Fn = cast<llvm::Function>(Val: Addr); |
5416 | if (!Fn->getSubprogram()) |
5417 | DI->EmitFunctionDecl(GD, Loc: FD->getLocation(), FnType: FD->getType(), Fn); |
5418 | } |
5419 | } |
5420 | |
5421 | CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const { |
5422 | return Context.toCharUnitsFromBits( |
5423 | BitSize: getDataLayout().getTypeStoreSizeInBits(Ty)); |
5424 | } |
5425 | |
5426 | LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) { |
5427 | if (LangOpts.OpenCL) { |
5428 | LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global; |
5429 | assert(AS == LangAS::opencl_global || |
5430 | AS == LangAS::opencl_global_device || |
5431 | AS == LangAS::opencl_global_host || |
5432 | AS == LangAS::opencl_constant || |
5433 | AS == LangAS::opencl_local || |
5434 | AS >= LangAS::FirstTargetAddressSpace); |
5435 | return AS; |
5436 | } |
5437 | |
5438 | if (LangOpts.SYCLIsDevice && |
5439 | (!D || D->getType().getAddressSpace() == LangAS::Default)) |
5440 | return LangAS::sycl_global; |
5441 | |
5442 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) { |
5443 | if (D) { |
5444 | if (D->hasAttr<CUDAConstantAttr>()) |
5445 | return LangAS::cuda_constant; |
5446 | if (D->hasAttr<CUDASharedAttr>()) |
5447 | return LangAS::cuda_shared; |
5448 | if (D->hasAttr<CUDADeviceAttr>()) |
5449 | return LangAS::cuda_device; |
5450 | if (D->getType().isConstQualified()) |
5451 | return LangAS::cuda_constant; |
5452 | } |
5453 | return LangAS::cuda_device; |
5454 | } |
5455 | |
5456 | if (LangOpts.OpenMP) { |
5457 | LangAS AS; |
5458 | if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(VD: D, AS)) |
5459 | return AS; |
5460 | } |
5461 | return getTargetCodeGenInfo().getGlobalVarAddressSpace(CGM&: *this, D); |
5462 | } |
5463 | |
5464 | LangAS CodeGenModule::GetGlobalConstantAddressSpace() const { |
5465 | // OpenCL v1.2 s6.5.3: a string literal is in the constant address space. |
5466 | if (LangOpts.OpenCL) |
5467 | return LangAS::opencl_constant; |
5468 | if (LangOpts.SYCLIsDevice) |
5469 | return LangAS::sycl_global; |
5470 | if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV()) |
5471 | // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V) |
5472 | // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up |
5473 | // with OpVariable instructions with Generic storage class which is not |
5474 | // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V |
5475 | // UniformConstant storage class is not viable as pointers to it may not be |
5476 | // casted to Generic pointers which are used to model HIP's "flat" pointers. |
5477 | return LangAS::cuda_device; |
5478 | if (auto AS = getTarget().getConstantAddressSpace()) |
5479 | return *AS; |
5480 | return LangAS::Default; |
5481 | } |
5482 | |
5483 | // In address space agnostic languages, string literals are in default address |
5484 | // space in AST. However, certain targets (e.g. amdgcn) request them to be |
5485 | // emitted in constant address space in LLVM IR. To be consistent with other |
5486 | // parts of AST, string literal global variables in constant address space |
5487 | // need to be casted to default address space before being put into address |
5488 | // map and referenced by other part of CodeGen. |
5489 | // In OpenCL, string literals are in constant address space in AST, therefore |
5490 | // they should not be casted to default address space. |
5491 | static llvm::Constant * |
5492 | castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM, |
5493 | llvm::GlobalVariable *GV) { |
5494 | llvm::Constant *Cast = GV; |
5495 | if (!CGM.getLangOpts().OpenCL) { |
5496 | auto AS = CGM.GetGlobalConstantAddressSpace(); |
5497 | if (AS != LangAS::Default) |
5498 | Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast( |
5499 | CGM, V: GV, SrcAddr: AS, |
5500 | DestTy: llvm::PointerType::get( |
5501 | C&: CGM.getLLVMContext(), |
5502 | AddressSpace: CGM.getContext().getTargetAddressSpace(AS: LangAS::Default))); |
5503 | } |
5504 | return Cast; |
5505 | } |
5506 | |
5507 | template<typename SomeDecl> |
5508 | void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D, |
5509 | llvm::GlobalValue *GV) { |
5510 | if (!getLangOpts().CPlusPlus) |
5511 | return; |
5512 | |
5513 | // Must have 'used' attribute, or else inline assembly can't rely on |
5514 | // the name existing. |
5515 | if (!D->template hasAttr<UsedAttr>()) |
5516 | return; |
5517 | |
5518 | // Must have internal linkage and an ordinary name. |
5519 | if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal) |
5520 | return; |
5521 | |
5522 | // Must be in an extern "C" context. Entities declared directly within |
5523 | // a record are not extern "C" even if the record is in such a context. |
5524 | const SomeDecl *First = D->getFirstDecl(); |
5525 | if (First->getDeclContext()->isRecord() || !First->isInExternCContext()) |
5526 | return; |
5527 | |
5528 | // OK, this is an internal linkage entity inside an extern "C" linkage |
5529 | // specification. Make a note of that so we can give it the "expected" |
5530 | // mangled name if nothing else is using that name. |
5531 | std::pair<StaticExternCMap::iterator, bool> R = |
5532 | StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV)); |
5533 | |
5534 | // If we have multiple internal linkage entities with the same name |
5535 | // in extern "C" regions, none of them gets that name. |
5536 | if (!R.second) |
5537 | R.first->second = nullptr; |
5538 | } |
5539 | |
5540 | static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) { |
5541 | if (!CGM.supportsCOMDAT()) |
5542 | return false; |
5543 | |
5544 | if (D.hasAttr<SelectAnyAttr>()) |
5545 | return true; |
5546 | |
5547 | GVALinkage Linkage; |
5548 | if (auto *VD = dyn_cast<VarDecl>(Val: &D)) |
5549 | Linkage = CGM.getContext().GetGVALinkageForVariable(VD); |
5550 | else |
5551 | Linkage = CGM.getContext().GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: &D)); |
5552 | |
5553 | switch (Linkage) { |
5554 | case GVA_Internal: |
5555 | case GVA_AvailableExternally: |
5556 | case GVA_StrongExternal: |
5557 | return false; |
5558 | case GVA_DiscardableODR: |
5559 | case GVA_StrongODR: |
5560 | return true; |
5561 | } |
5562 | llvm_unreachable("No such linkage" ); |
5563 | } |
5564 | |
5565 | bool CodeGenModule::supportsCOMDAT() const { |
5566 | return getTriple().supportsCOMDAT(); |
5567 | } |
5568 | |
5569 | void CodeGenModule::maybeSetTrivialComdat(const Decl &D, |
5570 | llvm::GlobalObject &GO) { |
5571 | if (!shouldBeInCOMDAT(CGM&: *this, D)) |
5572 | return; |
5573 | GO.setComdat(TheModule.getOrInsertComdat(Name: GO.getName())); |
5574 | } |
5575 | |
5576 | const ABIInfo &CodeGenModule::getABIInfo() { |
5577 | return getTargetCodeGenInfo().getABIInfo(); |
5578 | } |
5579 | |
5580 | /// Pass IsTentative as true if you want to create a tentative definition. |
5581 | void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D, |
5582 | bool IsTentative) { |
5583 | // OpenCL global variables of sampler type are translated to function calls, |
5584 | // therefore no need to be translated. |
5585 | QualType ASTTy = D->getType(); |
5586 | if (getLangOpts().OpenCL && ASTTy->isSamplerT()) |
5587 | return; |
5588 | |
5589 | // HLSL default buffer constants will be emitted during HLSLBufferDecl codegen |
5590 | if (getLangOpts().HLSL && |
5591 | D->getType().getAddressSpace() == LangAS::hlsl_constant) |
5592 | return; |
5593 | |
5594 | // If this is OpenMP device, check if it is legal to emit this global |
5595 | // normally. |
5596 | if (LangOpts.OpenMPIsTargetDevice && OpenMPRuntime && |
5597 | OpenMPRuntime->emitTargetGlobalVariable(GD: D)) |
5598 | return; |
5599 | |
5600 | llvm::TrackingVH<llvm::Constant> Init; |
5601 | bool NeedsGlobalCtor = false; |
5602 | // Whether the definition of the variable is available externally. |
5603 | // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable |
5604 | // since this is the job for its original source. |
5605 | bool IsDefinitionAvailableExternally = |
5606 | getContext().GetGVALinkageForVariable(VD: D) == GVA_AvailableExternally; |
5607 | bool NeedsGlobalDtor = |
5608 | !IsDefinitionAvailableExternally && |
5609 | D->needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor; |
5610 | |
5611 | // It is helpless to emit the definition for an available_externally variable |
5612 | // which can't be marked as const. |
5613 | // We don't need to check if it needs global ctor or dtor. See the above |
5614 | // comment for ideas. |
5615 | if (IsDefinitionAvailableExternally && |
5616 | (!D->hasConstantInitialization() || |
5617 | // TODO: Update this when we have interface to check constexpr |
5618 | // destructor. |
5619 | D->needsDestruction(Ctx: getContext()) || |
5620 | !D->getType().isConstantStorage(getContext(), true, true))) |
5621 | return; |
5622 | |
5623 | const VarDecl *InitDecl; |
5624 | const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl); |
5625 | |
5626 | std::optional<ConstantEmitter> emitter; |
5627 | |
5628 | // CUDA E.2.4.1 "__shared__ variables cannot have an initialization |
5629 | // as part of their declaration." Sema has already checked for |
5630 | // error cases, so we just need to set Init to UndefValue. |
5631 | bool IsCUDASharedVar = |
5632 | getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>(); |
5633 | // Shadows of initialized device-side global variables are also left |
5634 | // undefined. |
5635 | // Managed Variables should be initialized on both host side and device side. |
5636 | bool IsCUDAShadowVar = |
5637 | !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() && |
5638 | (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() || |
5639 | D->hasAttr<CUDASharedAttr>()); |
5640 | bool IsCUDADeviceShadowVar = |
5641 | getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() && |
5642 | (D->getType()->isCUDADeviceBuiltinSurfaceType() || |
5643 | D->getType()->isCUDADeviceBuiltinTextureType()); |
5644 | if (getLangOpts().CUDA && |
5645 | (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) |
5646 | Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy)); |
5647 | else if (D->hasAttr<LoaderUninitializedAttr>()) |
5648 | Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy)); |
5649 | else if (!InitExpr) { |
5650 | // This is a tentative definition; tentative definitions are |
5651 | // implicitly initialized with { 0 }. |
5652 | // |
5653 | // Note that tentative definitions are only emitted at the end of |
5654 | // a translation unit, so they should never have incomplete |
5655 | // type. In addition, EmitTentativeDefinition makes sure that we |
5656 | // never attempt to emit a tentative definition if a real one |
5657 | // exists. A use may still exists, however, so we still may need |
5658 | // to do a RAUW. |
5659 | assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type" ); |
5660 | Init = EmitNullConstant(T: D->getType()); |
5661 | } else { |
5662 | initializedGlobalDecl = GlobalDecl(D); |
5663 | emitter.emplace(args&: *this); |
5664 | llvm::Constant *Initializer = emitter->tryEmitForInitializer(D: *InitDecl); |
5665 | if (!Initializer) { |
5666 | QualType T = InitExpr->getType(); |
5667 | if (D->getType()->isReferenceType()) |
5668 | T = D->getType(); |
5669 | |
5670 | if (getLangOpts().HLSL && |
5671 | D->getType().getTypePtr()->isHLSLResourceRecord()) { |
5672 | Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T: ASTTy)); |
5673 | NeedsGlobalCtor = true; |
5674 | } else if (getLangOpts().CPlusPlus) { |
5675 | Init = EmitNullConstant(T); |
5676 | if (!IsDefinitionAvailableExternally) |
5677 | NeedsGlobalCtor = true; |
5678 | if (InitDecl->hasFlexibleArrayInit(Ctx: getContext())) { |
5679 | ErrorUnsupported(D, "flexible array initializer" ); |
5680 | // We cannot create ctor for flexible array initializer |
5681 | NeedsGlobalCtor = false; |
5682 | } |
5683 | } else { |
5684 | ErrorUnsupported(D, "static initializer" ); |
5685 | Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T)); |
5686 | } |
5687 | } else { |
5688 | Init = Initializer; |
5689 | // We don't need an initializer, so remove the entry for the delayed |
5690 | // initializer position (just in case this entry was delayed) if we |
5691 | // also don't need to register a destructor. |
5692 | if (getLangOpts().CPlusPlus && !NeedsGlobalDtor) |
5693 | DelayedCXXInitPosition.erase(D); |
5694 | |
5695 | #ifndef NDEBUG |
5696 | CharUnits VarSize = getContext().getTypeSizeInChars(T: ASTTy) + |
5697 | InitDecl->getFlexibleArrayInitChars(Ctx: getContext()); |
5698 | CharUnits CstSize = CharUnits::fromQuantity( |
5699 | Quantity: getDataLayout().getTypeAllocSize(Ty: Init->getType())); |
5700 | assert(VarSize == CstSize && "Emitted constant has unexpected size" ); |
5701 | #endif |
5702 | } |
5703 | } |
5704 | |
5705 | llvm::Type* InitType = Init->getType(); |
5706 | llvm::Constant *Entry = |
5707 | GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative)); |
5708 | |
5709 | // Strip off pointer casts if we got them. |
5710 | Entry = Entry->stripPointerCasts(); |
5711 | |
5712 | // Entry is now either a Function or GlobalVariable. |
5713 | auto *GV = dyn_cast<llvm::GlobalVariable>(Val: Entry); |
5714 | |
5715 | // We have a definition after a declaration with the wrong type. |
5716 | // We must make a new GlobalVariable* and update everything that used OldGV |
5717 | // (a declaration or tentative definition) with the new GlobalVariable* |
5718 | // (which will be a definition). |
5719 | // |
5720 | // This happens if there is a prototype for a global (e.g. |
5721 | // "extern int x[];") and then a definition of a different type (e.g. |
5722 | // "int x[10];"). This also happens when an initializer has a different type |
5723 | // from the type of the global (this happens with unions). |
5724 | if (!GV || GV->getValueType() != InitType || |
5725 | GV->getType()->getAddressSpace() != |
5726 | getContext().getTargetAddressSpace(AS: GetGlobalVarAddressSpace(D))) { |
5727 | |
5728 | // Move the old entry aside so that we'll create a new one. |
5729 | Entry->setName(StringRef()); |
5730 | |
5731 | // Make a new global with the correct type, this is now guaranteed to work. |
5732 | GV = cast<llvm::GlobalVariable>( |
5733 | Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative)) |
5734 | ->stripPointerCasts()); |
5735 | |
5736 | // Replace all uses of the old global with the new global |
5737 | llvm::Constant *NewPtrForOldDecl = |
5738 | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: GV, |
5739 | Ty: Entry->getType()); |
5740 | Entry->replaceAllUsesWith(V: NewPtrForOldDecl); |
5741 | |
5742 | // Erase the old global, since it is no longer used. |
5743 | cast<llvm::GlobalValue>(Val: Entry)->eraseFromParent(); |
5744 | } |
5745 | |
5746 | MaybeHandleStaticInExternC(D, GV); |
5747 | |
5748 | if (D->hasAttr<AnnotateAttr>()) |
5749 | AddGlobalAnnotations(D, GV); |
5750 | |
5751 | // Set the llvm linkage type as appropriate. |
5752 | llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD: D); |
5753 | |
5754 | // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on |
5755 | // the device. [...]" |
5756 | // CUDA B.2.2 "The __constant__ qualifier, optionally used together with |
5757 | // __device__, declares a variable that: [...] |
5758 | // Is accessible from all the threads within the grid and from the host |
5759 | // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize() |
5760 | // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())." |
5761 | if (LangOpts.CUDA) { |
5762 | if (LangOpts.CUDAIsDevice) { |
5763 | if (Linkage != llvm::GlobalValue::InternalLinkage && |
5764 | (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || |
5765 | D->getType()->isCUDADeviceBuiltinSurfaceType() || |
5766 | D->getType()->isCUDADeviceBuiltinTextureType())) |
5767 | GV->setExternallyInitialized(true); |
5768 | } else { |
5769 | getCUDARuntime().internalizeDeviceSideVar(D, Linkage); |
5770 | } |
5771 | getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV); |
5772 | } |
5773 | |
5774 | if (LangOpts.HLSL && GetGlobalVarAddressSpace(D) == LangAS::hlsl_input) { |
5775 | // HLSL Input variables are considered to be set by the driver/pipeline, but |
5776 | // only visible to a single thread/wave. |
5777 | GV->setExternallyInitialized(true); |
5778 | } else { |
5779 | GV->setInitializer(Init); |
5780 | } |
5781 | |
5782 | if (LangOpts.HLSL) |
5783 | getHLSLRuntime().handleGlobalVarDefinition(VD: D, Var: GV); |
5784 | |
5785 | if (emitter) |
5786 | emitter->finalize(global: GV); |
5787 | |
5788 | // If it is safe to mark the global 'constant', do so now. |
5789 | GV->setConstant((D->hasAttr<CUDAConstantAttr>() && LangOpts.CUDAIsDevice) || |
5790 | (!NeedsGlobalCtor && !NeedsGlobalDtor && |
5791 | D->getType().isConstantStorage(getContext(), true, true))); |
5792 | |
5793 | // If it is in a read-only section, mark it 'constant'. |
5794 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) { |
5795 | const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()]; |
5796 | if ((SI.SectionFlags & ASTContext::PSF_Write) == 0) |
5797 | GV->setConstant(true); |
5798 | } |
5799 | |
5800 | CharUnits AlignVal = getContext().getDeclAlign(D); |
5801 | // Check for alignment specifed in an 'omp allocate' directive. |
5802 | if (std::optional<CharUnits> AlignValFromAllocate = |
5803 | getOMPAllocateAlignment(VD: D)) |
5804 | AlignVal = *AlignValFromAllocate; |
5805 | GV->setAlignment(AlignVal.getAsAlign()); |
5806 | |
5807 | // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper |
5808 | // function is only defined alongside the variable, not also alongside |
5809 | // callers. Normally, all accesses to a thread_local go through the |
5810 | // thread-wrapper in order to ensure initialization has occurred, underlying |
5811 | // variable will never be used other than the thread-wrapper, so it can be |
5812 | // converted to internal linkage. |
5813 | // |
5814 | // However, if the variable has the 'constinit' attribute, it _can_ be |
5815 | // referenced directly, without calling the thread-wrapper, so the linkage |
5816 | // must not be changed. |
5817 | // |
5818 | // Additionally, if the variable isn't plain external linkage, e.g. if it's |
5819 | // weak or linkonce, the de-duplication semantics are important to preserve, |
5820 | // so we don't change the linkage. |
5821 | if (D->getTLSKind() == VarDecl::TLS_Dynamic && |
5822 | Linkage == llvm::GlobalValue::ExternalLinkage && |
5823 | Context.getTargetInfo().getTriple().isOSDarwin() && |
5824 | !D->hasAttr<ConstInitAttr>()) |
5825 | Linkage = llvm::GlobalValue::InternalLinkage; |
5826 | |
5827 | // HLSL variables in the input address space maps like memory-mapped |
5828 | // variables. Even if they are 'static', they are externally initialized and |
5829 | // read/write by the hardware/driver/pipeline. |
5830 | if (LangOpts.HLSL && GetGlobalVarAddressSpace(D) == LangAS::hlsl_input) |
5831 | Linkage = llvm::GlobalValue::ExternalLinkage; |
5832 | |
5833 | GV->setLinkage(Linkage); |
5834 | if (D->hasAttr<DLLImportAttr>()) |
5835 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); |
5836 | else if (D->hasAttr<DLLExportAttr>()) |
5837 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); |
5838 | else |
5839 | GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); |
5840 | |
5841 | if (Linkage == llvm::GlobalVariable::CommonLinkage) { |
5842 | // common vars aren't constant even if declared const. |
5843 | GV->setConstant(false); |
5844 | // Tentative definition of global variables may be initialized with |
5845 | // non-zero null pointers. In this case they should have weak linkage |
5846 | // since common linkage must have zero initializer and must not have |
5847 | // explicit section therefore cannot have non-zero initial value. |
5848 | if (!GV->getInitializer()->isNullValue()) |
5849 | GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage); |
5850 | } |
5851 | |
5852 | setNonAliasAttributes(GD: D, GO: GV); |
5853 | |
5854 | if (D->getTLSKind() && !GV->isThreadLocal()) { |
5855 | if (D->getTLSKind() == VarDecl::TLS_Dynamic) |
5856 | CXXThreadLocals.push_back(x: D); |
5857 | setTLSMode(GV, D: *D); |
5858 | } |
5859 | |
5860 | maybeSetTrivialComdat(*D, *GV); |
5861 | |
5862 | // Emit the initializer function if necessary. |
5863 | if (NeedsGlobalCtor || NeedsGlobalDtor) |
5864 | EmitCXXGlobalVarDeclInitFunc(D, Addr: GV, PerformInit: NeedsGlobalCtor); |
5865 | |
5866 | SanitizerMD->reportGlobal(GV, D: *D, IsDynInit: NeedsGlobalCtor); |
5867 | |
5868 | // Emit global variable debug information. |
5869 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
5870 | if (getCodeGenOpts().hasReducedDebugInfo()) |
5871 | DI->EmitGlobalVariable(GV, Decl: D); |
5872 | } |
5873 | |
5874 | static bool isVarDeclStrongDefinition(const ASTContext &Context, |
5875 | CodeGenModule &CGM, const VarDecl *D, |
5876 | bool NoCommon) { |
5877 | // Don't give variables common linkage if -fno-common was specified unless it |
5878 | // was overridden by a NoCommon attribute. |
5879 | if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>()) |
5880 | return true; |
5881 | |
5882 | // C11 6.9.2/2: |
5883 | // A declaration of an identifier for an object that has file scope without |
5884 | // an initializer, and without a storage-class specifier or with the |
5885 | // storage-class specifier static, constitutes a tentative definition. |
5886 | if (D->getInit() || D->hasExternalStorage()) |
5887 | return true; |
5888 | |
5889 | // A variable cannot be both common and exist in a section. |
5890 | if (D->hasAttr<SectionAttr>()) |
5891 | return true; |
5892 | |
5893 | // A variable cannot be both common and exist in a section. |
5894 | // We don't try to determine which is the right section in the front-end. |
5895 | // If no specialized section name is applicable, it will resort to default. |
5896 | if (D->hasAttr<PragmaClangBSSSectionAttr>() || |
5897 | D->hasAttr<PragmaClangDataSectionAttr>() || |
5898 | D->hasAttr<PragmaClangRelroSectionAttr>() || |
5899 | D->hasAttr<PragmaClangRodataSectionAttr>()) |
5900 | return true; |
5901 | |
5902 | // Thread local vars aren't considered common linkage. |
5903 | if (D->getTLSKind()) |
5904 | return true; |
5905 | |
5906 | // Tentative definitions marked with WeakImportAttr are true definitions. |
5907 | if (D->hasAttr<WeakImportAttr>()) |
5908 | return true; |
5909 | |
5910 | // A variable cannot be both common and exist in a comdat. |
5911 | if (shouldBeInCOMDAT(CGM, *D)) |
5912 | return true; |
5913 | |
5914 | // Declarations with a required alignment do not have common linkage in MSVC |
5915 | // mode. |
5916 | if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { |
5917 | if (D->hasAttr<AlignedAttr>()) |
5918 | return true; |
5919 | QualType VarType = D->getType(); |
5920 | if (Context.isAlignmentRequired(T: VarType)) |
5921 | return true; |
5922 | |
5923 | if (const auto *RT = VarType->getAs<RecordType>()) { |
5924 | const RecordDecl *RD = RT->getDecl(); |
5925 | for (const FieldDecl *FD : RD->fields()) { |
5926 | if (FD->isBitField()) |
5927 | continue; |
5928 | if (FD->hasAttr<AlignedAttr>()) |
5929 | return true; |
5930 | if (Context.isAlignmentRequired(FD->getType())) |
5931 | return true; |
5932 | } |
5933 | } |
5934 | } |
5935 | |
5936 | // Microsoft's link.exe doesn't support alignments greater than 32 bytes for |
5937 | // common symbols, so symbols with greater alignment requirements cannot be |
5938 | // common. |
5939 | // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two |
5940 | // alignments for common symbols via the aligncomm directive, so this |
5941 | // restriction only applies to MSVC environments. |
5942 | if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() && |
5943 | Context.getTypeAlignIfKnown(T: D->getType()) > |
5944 | Context.toBits(CharSize: CharUnits::fromQuantity(Quantity: 32))) |
5945 | return true; |
5946 | |
5947 | return false; |
5948 | } |
5949 | |
5950 | llvm::GlobalValue::LinkageTypes |
5951 | CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D, |
5952 | GVALinkage Linkage) { |
5953 | if (Linkage == GVA_Internal) |
5954 | return llvm::Function::InternalLinkage; |
5955 | |
5956 | if (D->hasAttr<WeakAttr>()) |
5957 | return llvm::GlobalVariable::WeakAnyLinkage; |
5958 | |
5959 | if (const auto *FD = D->getAsFunction()) |
5960 | if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally) |
5961 | return llvm::GlobalVariable::LinkOnceAnyLinkage; |
5962 | |
5963 | // We are guaranteed to have a strong definition somewhere else, |
5964 | // so we can use available_externally linkage. |
5965 | if (Linkage == GVA_AvailableExternally) |
5966 | return llvm::GlobalValue::AvailableExternallyLinkage; |
5967 | |
5968 | // Note that Apple's kernel linker doesn't support symbol |
5969 | // coalescing, so we need to avoid linkonce and weak linkages there. |
5970 | // Normally, this means we just map to internal, but for explicit |
5971 | // instantiations we'll map to external. |
5972 | |
5973 | // In C++, the compiler has to emit a definition in every translation unit |
5974 | // that references the function. We should use linkonce_odr because |
5975 | // a) if all references in this translation unit are optimized away, we |
5976 | // don't need to codegen it. b) if the function persists, it needs to be |
5977 | // merged with other definitions. c) C++ has the ODR, so we know the |
5978 | // definition is dependable. |
5979 | if (Linkage == GVA_DiscardableODR) |
5980 | return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage |
5981 | : llvm::Function::InternalLinkage; |
5982 | |
5983 | // An explicit instantiation of a template has weak linkage, since |
5984 | // explicit instantiations can occur in multiple translation units |
5985 | // and must all be equivalent. However, we are not allowed to |
5986 | // throw away these explicit instantiations. |
5987 | // |
5988 | // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU, |
5989 | // so say that CUDA templates are either external (for kernels) or internal. |
5990 | // This lets llvm perform aggressive inter-procedural optimizations. For |
5991 | // -fgpu-rdc case, device function calls across multiple TU's are allowed, |
5992 | // therefore we need to follow the normal linkage paradigm. |
5993 | if (Linkage == GVA_StrongODR) { |
5994 | if (getLangOpts().AppleKext) |
5995 | return llvm::Function::ExternalLinkage; |
5996 | if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && |
5997 | !getLangOpts().GPURelocatableDeviceCode) |
5998 | return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage |
5999 | : llvm::Function::InternalLinkage; |
6000 | return llvm::Function::WeakODRLinkage; |
6001 | } |
6002 | |
6003 | // C++ doesn't have tentative definitions and thus cannot have common |
6004 | // linkage. |
6005 | if (!getLangOpts().CPlusPlus && isa<VarDecl>(Val: D) && |
6006 | !isVarDeclStrongDefinition(Context, CGM&: *this, D: cast<VarDecl>(Val: D), |
6007 | NoCommon: CodeGenOpts.NoCommon)) |
6008 | return llvm::GlobalVariable::CommonLinkage; |
6009 | |
6010 | // selectany symbols are externally visible, so use weak instead of |
6011 | // linkonce. MSVC optimizes away references to const selectany globals, so |
6012 | // all definitions should be the same and ODR linkage should be used. |
6013 | // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx |
6014 | if (D->hasAttr<SelectAnyAttr>()) |
6015 | return llvm::GlobalVariable::WeakODRLinkage; |
6016 | |
6017 | // Otherwise, we have strong external linkage. |
6018 | assert(Linkage == GVA_StrongExternal); |
6019 | return llvm::GlobalVariable::ExternalLinkage; |
6020 | } |
6021 | |
6022 | llvm::GlobalValue::LinkageTypes |
6023 | CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) { |
6024 | GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD); |
6025 | return getLLVMLinkageForDeclarator(VD, Linkage); |
6026 | } |
6027 | |
6028 | /// Replace the uses of a function that was declared with a non-proto type. |
6029 | /// We want to silently drop extra arguments from call sites |
6030 | static void replaceUsesOfNonProtoConstant(llvm::Constant *old, |
6031 | llvm::Function *newFn) { |
6032 | // Fast path. |
6033 | if (old->use_empty()) |
6034 | return; |
6035 | |
6036 | llvm::Type *newRetTy = newFn->getReturnType(); |
6037 | SmallVector<llvm::Value *, 4> newArgs; |
6038 | |
6039 | SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent; |
6040 | |
6041 | for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end(); |
6042 | ui != ue; ui++) { |
6043 | llvm::User *user = ui->getUser(); |
6044 | |
6045 | // Recognize and replace uses of bitcasts. Most calls to |
6046 | // unprototyped functions will use bitcasts. |
6047 | if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(Val: user)) { |
6048 | if (bitcast->getOpcode() == llvm::Instruction::BitCast) |
6049 | replaceUsesOfNonProtoConstant(old: bitcast, newFn); |
6050 | continue; |
6051 | } |
6052 | |
6053 | // Recognize calls to the function. |
6054 | llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(Val: user); |
6055 | if (!callSite) |
6056 | continue; |
6057 | if (!callSite->isCallee(U: &*ui)) |
6058 | continue; |
6059 | |
6060 | // If the return types don't match exactly, then we can't |
6061 | // transform this call unless it's dead. |
6062 | if (callSite->getType() != newRetTy && !callSite->use_empty()) |
6063 | continue; |
6064 | |
6065 | // Get the call site's attribute list. |
6066 | SmallVector<llvm::AttributeSet, 8> newArgAttrs; |
6067 | llvm::AttributeList oldAttrs = callSite->getAttributes(); |
6068 | |
6069 | // If the function was passed too few arguments, don't transform. |
6070 | unsigned newNumArgs = newFn->arg_size(); |
6071 | if (callSite->arg_size() < newNumArgs) |
6072 | continue; |
6073 | |
6074 | // If extra arguments were passed, we silently drop them. |
6075 | // If any of the types mismatch, we don't transform. |
6076 | unsigned argNo = 0; |
6077 | bool dontTransform = false; |
6078 | for (llvm::Argument &A : newFn->args()) { |
6079 | if (callSite->getArgOperand(i: argNo)->getType() != A.getType()) { |
6080 | dontTransform = true; |
6081 | break; |
6082 | } |
6083 | |
6084 | // Add any parameter attributes. |
6085 | newArgAttrs.push_back(Elt: oldAttrs.getParamAttrs(ArgNo: argNo)); |
6086 | argNo++; |
6087 | } |
6088 | if (dontTransform) |
6089 | continue; |
6090 | |
6091 | // Okay, we can transform this. Create the new call instruction and copy |
6092 | // over the required information. |
6093 | newArgs.append(in_start: callSite->arg_begin(), in_end: callSite->arg_begin() + argNo); |
6094 | |
6095 | // Copy over any operand bundles. |
6096 | SmallVector<llvm::OperandBundleDef, 1> newBundles; |
6097 | callSite->getOperandBundlesAsDefs(Defs&: newBundles); |
6098 | |
6099 | llvm::CallBase *newCall; |
6100 | if (isa<llvm::CallInst>(Val: callSite)) { |
6101 | newCall = llvm::CallInst::Create(Func: newFn, Args: newArgs, Bundles: newBundles, NameStr: "" , |
6102 | InsertBefore: callSite->getIterator()); |
6103 | } else { |
6104 | auto *oldInvoke = cast<llvm::InvokeInst>(Val: callSite); |
6105 | newCall = llvm::InvokeInst::Create( |
6106 | Func: newFn, IfNormal: oldInvoke->getNormalDest(), IfException: oldInvoke->getUnwindDest(), |
6107 | Args: newArgs, Bundles: newBundles, NameStr: "" , InsertBefore: callSite->getIterator()); |
6108 | } |
6109 | newArgs.clear(); // for the next iteration |
6110 | |
6111 | if (!newCall->getType()->isVoidTy()) |
6112 | newCall->takeName(V: callSite); |
6113 | newCall->setAttributes( |
6114 | llvm::AttributeList::get(C&: newFn->getContext(), FnAttrs: oldAttrs.getFnAttrs(), |
6115 | RetAttrs: oldAttrs.getRetAttrs(), ArgAttrs: newArgAttrs)); |
6116 | newCall->setCallingConv(callSite->getCallingConv()); |
6117 | |
6118 | // Finally, remove the old call, replacing any uses with the new one. |
6119 | if (!callSite->use_empty()) |
6120 | callSite->replaceAllUsesWith(V: newCall); |
6121 | |
6122 | // Copy debug location attached to CI. |
6123 | if (callSite->getDebugLoc()) |
6124 | newCall->setDebugLoc(callSite->getDebugLoc()); |
6125 | |
6126 | callSitesToBeRemovedFromParent.push_back(Elt: callSite); |
6127 | } |
6128 | |
6129 | for (auto *callSite : callSitesToBeRemovedFromParent) { |
6130 | callSite->eraseFromParent(); |
6131 | } |
6132 | } |
6133 | |
6134 | /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we |
6135 | /// implement a function with no prototype, e.g. "int foo() {}". If there are |
6136 | /// existing call uses of the old function in the module, this adjusts them to |
6137 | /// call the new function directly. |
6138 | /// |
6139 | /// This is not just a cleanup: the always_inline pass requires direct calls to |
6140 | /// functions to be able to inline them. If there is a bitcast in the way, it |
6141 | /// won't inline them. Instcombine normally deletes these calls, but it isn't |
6142 | /// run at -O0. |
6143 | static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, |
6144 | llvm::Function *NewFn) { |
6145 | // If we're redefining a global as a function, don't transform it. |
6146 | if (!isa<llvm::Function>(Val: Old)) return; |
6147 | |
6148 | replaceUsesOfNonProtoConstant(old: Old, newFn: NewFn); |
6149 | } |
6150 | |
6151 | void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { |
6152 | auto DK = VD->isThisDeclarationADefinition(); |
6153 | if ((DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>()) || |
6154 | (LangOpts.CUDA && !shouldEmitCUDAGlobalVar(VD))) |
6155 | return; |
6156 | |
6157 | TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind(); |
6158 | // If we have a definition, this might be a deferred decl. If the |
6159 | // instantiation is explicit, make sure we emit it at the end. |
6160 | if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition) |
6161 | GetAddrOfGlobalVar(D: VD); |
6162 | |
6163 | EmitTopLevelDecl(VD); |
6164 | } |
6165 | |
6166 | void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD, |
6167 | llvm::GlobalValue *GV) { |
6168 | const auto *D = cast<FunctionDecl>(Val: GD.getDecl()); |
6169 | |
6170 | // Compute the function info and LLVM type. |
6171 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
6172 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
6173 | |
6174 | // Get or create the prototype for the function. |
6175 | if (!GV || (GV->getValueType() != Ty)) |
6176 | GV = cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, |
6177 | /*DontDefer=*/true, |
6178 | IsForDefinition: ForDefinition)); |
6179 | |
6180 | // Already emitted. |
6181 | if (!GV->isDeclaration()) |
6182 | return; |
6183 | |
6184 | // We need to set linkage and visibility on the function before |
6185 | // generating code for it because various parts of IR generation |
6186 | // want to propagate this information down (e.g. to local static |
6187 | // declarations). |
6188 | auto *Fn = cast<llvm::Function>(Val: GV); |
6189 | setFunctionLinkage(GD, F: Fn); |
6190 | |
6191 | // FIXME: this is redundant with part of setFunctionDefinitionAttributes |
6192 | setGVProperties(GV: Fn, GD); |
6193 | |
6194 | MaybeHandleStaticInExternC(D, GV: Fn); |
6195 | |
6196 | maybeSetTrivialComdat(*D, *Fn); |
6197 | |
6198 | CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo: FI); |
6199 | |
6200 | setNonAliasAttributes(GD, GO: Fn); |
6201 | |
6202 | bool ShouldAddOptNone = !CodeGenOpts.DisableO0ImplyOptNone && |
6203 | (CodeGenOpts.OptimizationLevel == 0) && |
6204 | !D->hasAttr<MinSizeAttr>(); |
6205 | |
6206 | if (DeviceKernelAttr::isOpenCLSpelling(D->getAttr<DeviceKernelAttr>())) { |
6207 | if (GD.getKernelReferenceKind() == KernelReferenceKind::Stub && |
6208 | !D->hasAttr<NoInlineAttr>() && |
6209 | !Fn->hasFnAttribute(llvm::Attribute::NoInline) && |
6210 | !D->hasAttr<OptimizeNoneAttr>() && |
6211 | !Fn->hasFnAttribute(llvm::Attribute::OptimizeNone) && |
6212 | !ShouldAddOptNone) { |
6213 | Fn->addFnAttr(llvm::Attribute::AlwaysInline); |
6214 | } |
6215 | } |
6216 | |
6217 | SetLLVMFunctionAttributesForDefinition(D, Fn); |
6218 | |
6219 | if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>()) |
6220 | AddGlobalCtor(Ctor: Fn, Priority: CA->getPriority()); |
6221 | if (const DestructorAttr *DA = D->getAttr<DestructorAttr>()) |
6222 | AddGlobalDtor(Dtor: Fn, Priority: DA->getPriority(), IsDtorAttrFunc: true); |
6223 | if (getLangOpts().OpenMP && D->hasAttr<OMPDeclareTargetDeclAttr>()) |
6224 | getOpenMPRuntime().emitDeclareTargetFunction(FD: D, GV); |
6225 | } |
6226 | |
6227 | void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { |
6228 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
6229 | const AliasAttr *AA = D->getAttr<AliasAttr>(); |
6230 | assert(AA && "Not an alias?" ); |
6231 | |
6232 | StringRef MangledName = getMangledName(GD); |
6233 | |
6234 | if (AA->getAliasee() == MangledName) { |
6235 | Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0; |
6236 | return; |
6237 | } |
6238 | |
6239 | // If there is a definition in the module, then it wins over the alias. |
6240 | // This is dubious, but allow it to be safe. Just ignore the alias. |
6241 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
6242 | if (Entry && !Entry->isDeclaration()) |
6243 | return; |
6244 | |
6245 | Aliases.push_back(x: GD); |
6246 | |
6247 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType()); |
6248 | |
6249 | // Create a reference to the named value. This ensures that it is emitted |
6250 | // if a deferred decl. |
6251 | llvm::Constant *Aliasee; |
6252 | llvm::GlobalValue::LinkageTypes LT; |
6253 | if (isa<llvm::FunctionType>(Val: DeclTy)) { |
6254 | Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, GD, |
6255 | /*ForVTable=*/false); |
6256 | LT = getFunctionLinkage(GD); |
6257 | } else { |
6258 | Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default, |
6259 | /*D=*/nullptr); |
6260 | if (const auto *VD = dyn_cast<VarDecl>(Val: GD.getDecl())) |
6261 | LT = getLLVMLinkageVarDefinition(VD); |
6262 | else |
6263 | LT = getFunctionLinkage(GD); |
6264 | } |
6265 | |
6266 | // Create the new alias itself, but don't set a name yet. |
6267 | unsigned AS = Aliasee->getType()->getPointerAddressSpace(); |
6268 | auto *GA = |
6269 | llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage: LT, Name: "" , Aliasee, Parent: &getModule()); |
6270 | |
6271 | if (Entry) { |
6272 | if (GA->getAliasee() == Entry) { |
6273 | Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0; |
6274 | return; |
6275 | } |
6276 | |
6277 | assert(Entry->isDeclaration()); |
6278 | |
6279 | // If there is a declaration in the module, then we had an extern followed |
6280 | // by the alias, as in: |
6281 | // extern int test6(); |
6282 | // ... |
6283 | // int test6() __attribute__((alias("test7"))); |
6284 | // |
6285 | // Remove it and replace uses of it with the alias. |
6286 | GA->takeName(V: Entry); |
6287 | |
6288 | Entry->replaceAllUsesWith(V: GA); |
6289 | Entry->eraseFromParent(); |
6290 | } else { |
6291 | GA->setName(MangledName); |
6292 | } |
6293 | |
6294 | // Set attributes which are particular to an alias; this is a |
6295 | // specialization of the attributes which may be set on a global |
6296 | // variable/function. |
6297 | if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() || |
6298 | D->isWeakImported()) { |
6299 | GA->setLinkage(llvm::Function::WeakAnyLinkage); |
6300 | } |
6301 | |
6302 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) |
6303 | if (VD->getTLSKind()) |
6304 | setTLSMode(GV: GA, D: *VD); |
6305 | |
6306 | SetCommonAttributes(GD, GV: GA); |
6307 | |
6308 | // Emit global alias debug information. |
6309 | if (isa<VarDecl>(Val: D)) |
6310 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6311 | DI->EmitGlobalAlias(GV: cast<llvm::GlobalValue>(Val: GA->getAliasee()->stripPointerCasts()), Decl: GD); |
6312 | } |
6313 | |
6314 | void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) { |
6315 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
6316 | const IFuncAttr *IFA = D->getAttr<IFuncAttr>(); |
6317 | assert(IFA && "Not an ifunc?" ); |
6318 | |
6319 | StringRef MangledName = getMangledName(GD); |
6320 | |
6321 | if (IFA->getResolver() == MangledName) { |
6322 | Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1; |
6323 | return; |
6324 | } |
6325 | |
6326 | // Report an error if some definition overrides ifunc. |
6327 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
6328 | if (Entry && !Entry->isDeclaration()) { |
6329 | GlobalDecl OtherGD; |
6330 | if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
6331 | DiagnosedConflictingDefinitions.insert(V: GD).second) { |
6332 | Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name) |
6333 | << MangledName; |
6334 | Diags.Report(OtherGD.getDecl()->getLocation(), |
6335 | diag::note_previous_definition); |
6336 | } |
6337 | return; |
6338 | } |
6339 | |
6340 | Aliases.push_back(x: GD); |
6341 | |
6342 | // The resolver might not be visited yet. Specify a dummy non-function type to |
6343 | // indicate IsIncompleteFunction. Either the type is ignored (if the resolver |
6344 | // was emitted) or the whole function will be replaced (if the resolver has |
6345 | // not been emitted). |
6346 | llvm::Constant *Resolver = |
6347 | GetOrCreateLLVMFunction(MangledName: IFA->getResolver(), Ty: VoidTy, GD: {}, |
6348 | /*ForVTable=*/false); |
6349 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType()); |
6350 | unsigned AS = getTypes().getTargetAddressSpace(T: D->getType()); |
6351 | llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create( |
6352 | Ty: DeclTy, AddressSpace: AS, Linkage: llvm::Function::ExternalLinkage, Name: "" , Resolver, Parent: &getModule()); |
6353 | if (Entry) { |
6354 | if (GIF->getResolver() == Entry) { |
6355 | Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1; |
6356 | return; |
6357 | } |
6358 | assert(Entry->isDeclaration()); |
6359 | |
6360 | // If there is a declaration in the module, then we had an extern followed |
6361 | // by the ifunc, as in: |
6362 | // extern int test(); |
6363 | // ... |
6364 | // int test() __attribute__((ifunc("resolver"))); |
6365 | // |
6366 | // Remove it and replace uses of it with the ifunc. |
6367 | GIF->takeName(V: Entry); |
6368 | |
6369 | Entry->replaceAllUsesWith(V: GIF); |
6370 | Entry->eraseFromParent(); |
6371 | } else |
6372 | GIF->setName(MangledName); |
6373 | SetCommonAttributes(GD, GV: GIF); |
6374 | } |
6375 | |
6376 | llvm::Function *CodeGenModule::getIntrinsic(unsigned IID, |
6377 | ArrayRef<llvm::Type*> Tys) { |
6378 | return llvm::Intrinsic::getOrInsertDeclaration(M: &getModule(), |
6379 | id: (llvm::Intrinsic::ID)IID, Tys); |
6380 | } |
6381 | |
6382 | static llvm::StringMapEntry<llvm::GlobalVariable *> & |
6383 | GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map, |
6384 | const StringLiteral *Literal, bool TargetIsLSB, |
6385 | bool &IsUTF16, unsigned &StringLength) { |
6386 | StringRef String = Literal->getString(); |
6387 | unsigned NumBytes = String.size(); |
6388 | |
6389 | // Check for simple case. |
6390 | if (!Literal->containsNonAsciiOrNull()) { |
6391 | StringLength = NumBytes; |
6392 | return *Map.insert(KV: std::make_pair(x&: String, y: nullptr)).first; |
6393 | } |
6394 | |
6395 | // Otherwise, convert the UTF8 literals into a string of shorts. |
6396 | IsUTF16 = true; |
6397 | |
6398 | SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls. |
6399 | const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); |
6400 | llvm::UTF16 *ToPtr = &ToBuf[0]; |
6401 | |
6402 | (void)llvm::ConvertUTF8toUTF16(sourceStart: &FromPtr, sourceEnd: FromPtr + NumBytes, targetStart: &ToPtr, |
6403 | targetEnd: ToPtr + NumBytes, flags: llvm::strictConversion); |
6404 | |
6405 | // ConvertUTF8toUTF16 returns the length in ToPtr. |
6406 | StringLength = ToPtr - &ToBuf[0]; |
6407 | |
6408 | // Add an explicit null. |
6409 | *ToPtr = 0; |
6410 | return *Map.insert(KV: std::make_pair( |
6411 | x: StringRef(reinterpret_cast<const char *>(ToBuf.data()), |
6412 | (StringLength + 1) * 2), |
6413 | y: nullptr)).first; |
6414 | } |
6415 | |
6416 | ConstantAddress |
6417 | CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { |
6418 | unsigned StringLength = 0; |
6419 | bool isUTF16 = false; |
6420 | llvm::StringMapEntry<llvm::GlobalVariable *> &Entry = |
6421 | GetConstantCFStringEntry(Map&: CFConstantStringMap, Literal, |
6422 | TargetIsLSB: getDataLayout().isLittleEndian(), IsUTF16&: isUTF16, |
6423 | StringLength); |
6424 | |
6425 | if (auto *C = Entry.second) |
6426 | return ConstantAddress( |
6427 | C, C->getValueType(), CharUnits::fromQuantity(Quantity: C->getAlignment())); |
6428 | |
6429 | const ASTContext &Context = getContext(); |
6430 | const llvm::Triple &Triple = getTriple(); |
6431 | |
6432 | const auto CFRuntime = getLangOpts().CFRuntime; |
6433 | const bool IsSwiftABI = |
6434 | static_cast<unsigned>(CFRuntime) >= |
6435 | static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift); |
6436 | const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1; |
6437 | |
6438 | // If we don't already have it, get __CFConstantStringClassReference. |
6439 | if (!CFConstantStringClassRef) { |
6440 | const char *CFConstantStringClassName = "__CFConstantStringClassReference" ; |
6441 | llvm::Type *Ty = getTypes().ConvertType(T: getContext().IntTy); |
6442 | Ty = llvm::ArrayType::get(ElementType: Ty, NumElements: 0); |
6443 | |
6444 | switch (CFRuntime) { |
6445 | default: break; |
6446 | case LangOptions::CoreFoundationABI::Swift: [[fallthrough]]; |
6447 | case LangOptions::CoreFoundationABI::Swift5_0: |
6448 | CFConstantStringClassName = |
6449 | Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN" |
6450 | : "$s10Foundation19_NSCFConstantStringCN" ; |
6451 | Ty = IntPtrTy; |
6452 | break; |
6453 | case LangOptions::CoreFoundationABI::Swift4_2: |
6454 | CFConstantStringClassName = |
6455 | Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN" |
6456 | : "$S10Foundation19_NSCFConstantStringCN" ; |
6457 | Ty = IntPtrTy; |
6458 | break; |
6459 | case LangOptions::CoreFoundationABI::Swift4_1: |
6460 | CFConstantStringClassName = |
6461 | Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN" |
6462 | : "__T010Foundation19_NSCFConstantStringCN" ; |
6463 | Ty = IntPtrTy; |
6464 | break; |
6465 | } |
6466 | |
6467 | llvm::Constant *C = CreateRuntimeVariable(Ty, Name: CFConstantStringClassName); |
6468 | |
6469 | if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) { |
6470 | llvm::GlobalValue *GV = nullptr; |
6471 | |
6472 | if ((GV = dyn_cast<llvm::GlobalValue>(Val: C))) { |
6473 | IdentifierInfo &II = Context.Idents.get(Name: GV->getName()); |
6474 | TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl(); |
6475 | DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl); |
6476 | |
6477 | const VarDecl *VD = nullptr; |
6478 | for (const auto *Result : DC->lookup(Name: &II)) |
6479 | if ((VD = dyn_cast<VarDecl>(Val: Result))) |
6480 | break; |
6481 | |
6482 | if (Triple.isOSBinFormatELF()) { |
6483 | if (!VD) |
6484 | GV->setLinkage(llvm::GlobalValue::ExternalLinkage); |
6485 | } else { |
6486 | GV->setLinkage(llvm::GlobalValue::ExternalLinkage); |
6487 | if (!VD || !VD->hasAttr<DLLExportAttr>()) |
6488 | GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); |
6489 | else |
6490 | GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); |
6491 | } |
6492 | |
6493 | setDSOLocal(GV); |
6494 | } |
6495 | } |
6496 | |
6497 | // Decay array -> ptr |
6498 | CFConstantStringClassRef = |
6499 | IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty) : C; |
6500 | } |
6501 | |
6502 | QualType CFTy = Context.getCFConstantStringType(); |
6503 | |
6504 | auto *STy = cast<llvm::StructType>(Val: getTypes().ConvertType(T: CFTy)); |
6505 | |
6506 | ConstantInitBuilder Builder(*this); |
6507 | auto Fields = Builder.beginStruct(structTy: STy); |
6508 | |
6509 | // Class pointer. |
6510 | Fields.add(value: cast<llvm::Constant>(Val&: CFConstantStringClassRef)); |
6511 | |
6512 | // Flags. |
6513 | if (IsSwiftABI) { |
6514 | Fields.addInt(intTy: IntPtrTy, value: IsSwift4_1 ? 0x05 : 0x01); |
6515 | Fields.addInt(intTy: Int64Ty, value: isUTF16 ? 0x07d0 : 0x07c8); |
6516 | } else { |
6517 | Fields.addInt(intTy: IntTy, value: isUTF16 ? 0x07d0 : 0x07C8); |
6518 | } |
6519 | |
6520 | // String pointer. |
6521 | llvm::Constant *C = nullptr; |
6522 | if (isUTF16) { |
6523 | auto Arr = llvm::ArrayRef( |
6524 | reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())), |
6525 | Entry.first().size() / 2); |
6526 | C = llvm::ConstantDataArray::get(Context&: VMContext, Elts: Arr); |
6527 | } else { |
6528 | C = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Entry.first()); |
6529 | } |
6530 | |
6531 | // Note: -fwritable-strings doesn't make the backing store strings of |
6532 | // CFStrings writable. |
6533 | auto *GV = |
6534 | new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true, |
6535 | llvm::GlobalValue::PrivateLinkage, C, ".str" ); |
6536 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
6537 | // Don't enforce the target's minimum global alignment, since the only use |
6538 | // of the string is via this class initializer. |
6539 | CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy) |
6540 | : Context.getTypeAlignInChars(Context.CharTy); |
6541 | GV->setAlignment(Align.getAsAlign()); |
6542 | |
6543 | // FIXME: We set the section explicitly to avoid a bug in ld64 224.1. |
6544 | // Without it LLVM can merge the string with a non unnamed_addr one during |
6545 | // LTO. Doing that changes the section it ends in, which surprises ld64. |
6546 | if (Triple.isOSBinFormatMachO()) |
6547 | GV->setSection(isUTF16 ? "__TEXT,__ustring" |
6548 | : "__TEXT,__cstring,cstring_literals" ); |
6549 | // Make sure the literal ends up in .rodata to allow for safe ICF and for |
6550 | // the static linker to adjust permissions to read-only later on. |
6551 | else if (Triple.isOSBinFormatELF()) |
6552 | GV->setSection(".rodata" ); |
6553 | |
6554 | // String. |
6555 | Fields.add(value: GV); |
6556 | |
6557 | // String length. |
6558 | llvm::IntegerType *LengthTy = |
6559 | llvm::IntegerType::get(C&: getModule().getContext(), |
6560 | NumBits: Context.getTargetInfo().getLongWidth()); |
6561 | if (IsSwiftABI) { |
6562 | if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || |
6563 | CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) |
6564 | LengthTy = Int32Ty; |
6565 | else |
6566 | LengthTy = IntPtrTy; |
6567 | } |
6568 | Fields.addInt(intTy: LengthTy, value: StringLength); |
6569 | |
6570 | // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is |
6571 | // properly aligned on 32-bit platforms. |
6572 | CharUnits Alignment = |
6573 | IsSwiftABI ? Context.toCharUnitsFromBits(BitSize: 64) : getPointerAlign(); |
6574 | |
6575 | // The struct. |
6576 | GV = Fields.finishAndCreateGlobal(args: "_unnamed_cfstring_" , args&: Alignment, |
6577 | /*isConstant=*/args: false, |
6578 | args: llvm::GlobalVariable::PrivateLinkage); |
6579 | GV->addAttribute(Kind: "objc_arc_inert" ); |
6580 | switch (Triple.getObjectFormat()) { |
6581 | case llvm::Triple::UnknownObjectFormat: |
6582 | llvm_unreachable("unknown file format" ); |
6583 | case llvm::Triple::DXContainer: |
6584 | case llvm::Triple::GOFF: |
6585 | case llvm::Triple::SPIRV: |
6586 | case llvm::Triple::XCOFF: |
6587 | llvm_unreachable("unimplemented" ); |
6588 | case llvm::Triple::COFF: |
6589 | case llvm::Triple::ELF: |
6590 | case llvm::Triple::Wasm: |
6591 | GV->setSection("cfstring" ); |
6592 | break; |
6593 | case llvm::Triple::MachO: |
6594 | GV->setSection("__DATA,__cfstring" ); |
6595 | break; |
6596 | } |
6597 | Entry.second = GV; |
6598 | |
6599 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
6600 | } |
6601 | |
6602 | bool CodeGenModule::getExpressionLocationsEnabled() const { |
6603 | return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo; |
6604 | } |
6605 | |
6606 | QualType CodeGenModule::getObjCFastEnumerationStateType() { |
6607 | if (ObjCFastEnumerationStateType.isNull()) { |
6608 | RecordDecl *D = Context.buildImplicitRecord(Name: "__objcFastEnumerationState" ); |
6609 | D->startDefinition(); |
6610 | |
6611 | QualType FieldTypes[] = { |
6612 | Context.UnsignedLongTy, Context.getPointerType(T: Context.getObjCIdType()), |
6613 | Context.getPointerType(Context.UnsignedLongTy), |
6614 | Context.getConstantArrayType(EltTy: Context.UnsignedLongTy, ArySize: llvm::APInt(32, 5), |
6615 | SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0)}; |
6616 | |
6617 | for (size_t i = 0; i < 4; ++i) { |
6618 | FieldDecl *Field = FieldDecl::Create(C: Context, |
6619 | DC: D, |
6620 | StartLoc: SourceLocation(), |
6621 | IdLoc: SourceLocation(), Id: nullptr, |
6622 | T: FieldTypes[i], /*TInfo=*/nullptr, |
6623 | /*BitWidth=*/BW: nullptr, |
6624 | /*Mutable=*/false, |
6625 | InitStyle: ICIS_NoInit); |
6626 | Field->setAccess(AS_public); |
6627 | D->addDecl(Field); |
6628 | } |
6629 | |
6630 | D->completeDefinition(); |
6631 | ObjCFastEnumerationStateType = Context.getTagDeclType(D); |
6632 | } |
6633 | |
6634 | return ObjCFastEnumerationStateType; |
6635 | } |
6636 | |
6637 | llvm::Constant * |
6638 | CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) { |
6639 | assert(!E->getType()->isPointerType() && "Strings are always arrays" ); |
6640 | |
6641 | // Don't emit it as the address of the string, emit the string data itself |
6642 | // as an inline array. |
6643 | if (E->getCharByteWidth() == 1) { |
6644 | SmallString<64> Str(E->getString()); |
6645 | |
6646 | // Resize the string to the right size, which is indicated by its type. |
6647 | const ConstantArrayType *CAT = Context.getAsConstantArrayType(T: E->getType()); |
6648 | assert(CAT && "String literal not of constant array type!" ); |
6649 | Str.resize(N: CAT->getZExtSize()); |
6650 | return llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Str, AddNull: false); |
6651 | } |
6652 | |
6653 | auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(T: E->getType())); |
6654 | llvm::Type *ElemTy = AType->getElementType(); |
6655 | unsigned NumElements = AType->getNumElements(); |
6656 | |
6657 | // Wide strings have either 2-byte or 4-byte elements. |
6658 | if (ElemTy->getPrimitiveSizeInBits() == 16) { |
6659 | SmallVector<uint16_t, 32> Elements; |
6660 | Elements.reserve(N: NumElements); |
6661 | |
6662 | for(unsigned i = 0, e = E->getLength(); i != e; ++i) |
6663 | Elements.push_back(Elt: E->getCodeUnit(i)); |
6664 | Elements.resize(N: NumElements); |
6665 | return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements); |
6666 | } |
6667 | |
6668 | assert(ElemTy->getPrimitiveSizeInBits() == 32); |
6669 | SmallVector<uint32_t, 32> Elements; |
6670 | Elements.reserve(N: NumElements); |
6671 | |
6672 | for(unsigned i = 0, e = E->getLength(); i != e; ++i) |
6673 | Elements.push_back(Elt: E->getCodeUnit(i)); |
6674 | Elements.resize(N: NumElements); |
6675 | return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements); |
6676 | } |
6677 | |
6678 | static llvm::GlobalVariable * |
6679 | GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT, |
6680 | CodeGenModule &CGM, StringRef GlobalName, |
6681 | CharUnits Alignment) { |
6682 | unsigned AddrSpace = CGM.getContext().getTargetAddressSpace( |
6683 | AS: CGM.GetGlobalConstantAddressSpace()); |
6684 | |
6685 | llvm::Module &M = CGM.getModule(); |
6686 | // Create a global variable for this string |
6687 | auto *GV = new llvm::GlobalVariable( |
6688 | M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName, |
6689 | nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); |
6690 | GV->setAlignment(Alignment.getAsAlign()); |
6691 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
6692 | if (GV->isWeakForLinker()) { |
6693 | assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals" ); |
6694 | GV->setComdat(M.getOrInsertComdat(Name: GV->getName())); |
6695 | } |
6696 | CGM.setDSOLocal(GV); |
6697 | |
6698 | return GV; |
6699 | } |
6700 | |
6701 | /// GetAddrOfConstantStringFromLiteral - Return a pointer to a |
6702 | /// constant array for the given string literal. |
6703 | ConstantAddress |
6704 | CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S, |
6705 | StringRef Name) { |
6706 | CharUnits Alignment = |
6707 | getContext().getAlignOfGlobalVarInChars(T: S->getType(), /*VD=*/nullptr); |
6708 | |
6709 | llvm::Constant *C = GetConstantArrayFromStringLiteral(E: S); |
6710 | llvm::GlobalVariable **Entry = nullptr; |
6711 | if (!LangOpts.WritableStrings) { |
6712 | Entry = &ConstantStringMap[C]; |
6713 | if (auto GV = *Entry) { |
6714 | if (uint64_t(Alignment.getQuantity()) > GV->getAlignment()) |
6715 | GV->setAlignment(Alignment.getAsAlign()); |
6716 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6717 | GV->getValueType(), Alignment); |
6718 | } |
6719 | } |
6720 | |
6721 | SmallString<256> MangledNameBuffer; |
6722 | StringRef GlobalVariableName; |
6723 | llvm::GlobalValue::LinkageTypes LT; |
6724 | |
6725 | // Mangle the string literal if that's how the ABI merges duplicate strings. |
6726 | // Don't do it if they are writable, since we don't want writes in one TU to |
6727 | // affect strings in another. |
6728 | if (getCXXABI().getMangleContext().shouldMangleStringLiteral(SL: S) && |
6729 | !LangOpts.WritableStrings) { |
6730 | llvm::raw_svector_ostream Out(MangledNameBuffer); |
6731 | getCXXABI().getMangleContext().mangleStringLiteral(SL: S, Out); |
6732 | LT = llvm::GlobalValue::LinkOnceODRLinkage; |
6733 | GlobalVariableName = MangledNameBuffer; |
6734 | } else { |
6735 | LT = llvm::GlobalValue::PrivateLinkage; |
6736 | GlobalVariableName = Name; |
6737 | } |
6738 | |
6739 | auto GV = GenerateStringLiteral(C, LT, CGM&: *this, GlobalName: GlobalVariableName, Alignment); |
6740 | |
6741 | CGDebugInfo *DI = getModuleDebugInfo(); |
6742 | if (DI && getCodeGenOpts().hasReducedDebugInfo()) |
6743 | DI->AddStringLiteralDebugInfo(GV: GV, S); |
6744 | |
6745 | if (Entry) |
6746 | *Entry = GV; |
6747 | |
6748 | SanitizerMD->reportGlobal(GV, S->getStrTokenLoc(TokNum: 0), "<string literal>" ); |
6749 | |
6750 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV), |
6751 | GV->getValueType(), Alignment); |
6752 | } |
6753 | |
6754 | /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant |
6755 | /// array for the given ObjCEncodeExpr node. |
6756 | ConstantAddress |
6757 | CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) { |
6758 | std::string Str; |
6759 | getContext().getObjCEncodingForType(T: E->getEncodedType(), S&: Str); |
6760 | |
6761 | return GetAddrOfConstantCString(Str); |
6762 | } |
6763 | |
6764 | /// GetAddrOfConstantCString - Returns a pointer to a character array containing |
6765 | /// the literal and a terminating '\0' character. |
6766 | /// The result has pointer to array type. |
6767 | ConstantAddress CodeGenModule::GetAddrOfConstantCString( |
6768 | const std::string &Str, const char *GlobalName) { |
6769 | StringRef StrWithNull(Str.c_str(), Str.size() + 1); |
6770 | CharUnits Alignment = getContext().getAlignOfGlobalVarInChars( |
6771 | T: getContext().CharTy, /*VD=*/nullptr); |
6772 | |
6773 | llvm::Constant *C = |
6774 | llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: StrWithNull, AddNull: false); |
6775 | |
6776 | // Don't share any string literals if strings aren't constant. |
6777 | llvm::GlobalVariable **Entry = nullptr; |
6778 | if (!LangOpts.WritableStrings) { |
6779 | Entry = &ConstantStringMap[C]; |
6780 | if (auto GV = *Entry) { |
6781 | if (uint64_t(Alignment.getQuantity()) > GV->getAlignment()) |
6782 | GV->setAlignment(Alignment.getAsAlign()); |
6783 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6784 | GV->getValueType(), Alignment); |
6785 | } |
6786 | } |
6787 | |
6788 | // Get the default prefix if a name wasn't specified. |
6789 | if (!GlobalName) |
6790 | GlobalName = ".str" ; |
6791 | // Create a global variable for this. |
6792 | auto GV = GenerateStringLiteral(C, LT: llvm::GlobalValue::PrivateLinkage, CGM&: *this, |
6793 | GlobalName, Alignment); |
6794 | if (Entry) |
6795 | *Entry = GV; |
6796 | |
6797 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV), |
6798 | GV->getValueType(), Alignment); |
6799 | } |
6800 | |
6801 | ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary( |
6802 | const MaterializeTemporaryExpr *E, const Expr *Init) { |
6803 | assert((E->getStorageDuration() == SD_Static || |
6804 | E->getStorageDuration() == SD_Thread) && "not a global temporary" ); |
6805 | const auto *VD = cast<VarDecl>(Val: E->getExtendingDecl()); |
6806 | |
6807 | // If we're not materializing a subobject of the temporary, keep the |
6808 | // cv-qualifiers from the type of the MaterializeTemporaryExpr. |
6809 | QualType MaterializedType = Init->getType(); |
6810 | if (Init == E->getSubExpr()) |
6811 | MaterializedType = E->getType(); |
6812 | |
6813 | CharUnits Align = getContext().getTypeAlignInChars(T: MaterializedType); |
6814 | |
6815 | auto InsertResult = MaterializedGlobalTemporaryMap.insert({E, nullptr}); |
6816 | if (!InsertResult.second) { |
6817 | // We've seen this before: either we already created it or we're in the |
6818 | // process of doing so. |
6819 | if (!InsertResult.first->second) { |
6820 | // We recursively re-entered this function, probably during emission of |
6821 | // the initializer. Create a placeholder. We'll clean this up in the |
6822 | // outer call, at the end of this function. |
6823 | llvm::Type *Type = getTypes().ConvertTypeForMem(T: MaterializedType); |
6824 | InsertResult.first->second = new llvm::GlobalVariable( |
6825 | getModule(), Type, false, llvm::GlobalVariable::InternalLinkage, |
6826 | nullptr); |
6827 | } |
6828 | return ConstantAddress(InsertResult.first->second, |
6829 | llvm::cast<llvm::GlobalVariable>( |
6830 | InsertResult.first->second->stripPointerCasts()) |
6831 | ->getValueType(), |
6832 | Align); |
6833 | } |
6834 | |
6835 | // FIXME: If an externally-visible declaration extends multiple temporaries, |
6836 | // we need to give each temporary the same name in every translation unit (and |
6837 | // we also need to make the temporaries externally-visible). |
6838 | SmallString<256> Name; |
6839 | llvm::raw_svector_ostream Out(Name); |
6840 | getCXXABI().getMangleContext().mangleReferenceTemporary( |
6841 | D: VD, ManglingNumber: E->getManglingNumber(), Out); |
6842 | |
6843 | APValue *Value = nullptr; |
6844 | if (E->getStorageDuration() == SD_Static && VD->evaluateValue()) { |
6845 | // If the initializer of the extending declaration is a constant |
6846 | // initializer, we should have a cached constant initializer for this |
6847 | // temporary. Note that this might have a different value from the value |
6848 | // computed by evaluating the initializer if the surrounding constant |
6849 | // expression modifies the temporary. |
6850 | Value = E->getOrCreateValue(MayCreate: false); |
6851 | } |
6852 | |
6853 | // Try evaluating it now, it might have a constant initializer. |
6854 | Expr::EvalResult EvalResult; |
6855 | if (!Value && Init->EvaluateAsRValue(Result&: EvalResult, Ctx: getContext()) && |
6856 | !EvalResult.hasSideEffects()) |
6857 | Value = &EvalResult.Val; |
6858 | |
6859 | LangAS AddrSpace = GetGlobalVarAddressSpace(D: VD); |
6860 | |
6861 | std::optional<ConstantEmitter> emitter; |
6862 | llvm::Constant *InitialValue = nullptr; |
6863 | bool Constant = false; |
6864 | llvm::Type *Type; |
6865 | if (Value) { |
6866 | // The temporary has a constant initializer, use it. |
6867 | emitter.emplace(args&: *this); |
6868 | InitialValue = emitter->emitForInitializer(value: *Value, destAddrSpace: AddrSpace, |
6869 | destType: MaterializedType); |
6870 | Constant = |
6871 | MaterializedType.isConstantStorage(Ctx: getContext(), /*ExcludeCtor*/ Value, |
6872 | /*ExcludeDtor*/ false); |
6873 | Type = InitialValue->getType(); |
6874 | } else { |
6875 | // No initializer, the initialization will be provided when we |
6876 | // initialize the declaration which performed lifetime extension. |
6877 | Type = getTypes().ConvertTypeForMem(T: MaterializedType); |
6878 | } |
6879 | |
6880 | // Create a global variable for this lifetime-extended temporary. |
6881 | llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD); |
6882 | if (Linkage == llvm::GlobalVariable::ExternalLinkage) { |
6883 | const VarDecl *InitVD; |
6884 | if (VD->isStaticDataMember() && VD->getAnyInitializer(D&: InitVD) && |
6885 | isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) { |
6886 | // Temporaries defined inside a class get linkonce_odr linkage because the |
6887 | // class can be defined in multiple translation units. |
6888 | Linkage = llvm::GlobalVariable::LinkOnceODRLinkage; |
6889 | } else { |
6890 | // There is no need for this temporary to have external linkage if the |
6891 | // VarDecl has external linkage. |
6892 | Linkage = llvm::GlobalVariable::InternalLinkage; |
6893 | } |
6894 | } |
6895 | auto TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace); |
6896 | auto *GV = new llvm::GlobalVariable( |
6897 | getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(), |
6898 | /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS); |
6899 | if (emitter) emitter->finalize(global: GV); |
6900 | // Don't assign dllimport or dllexport to local linkage globals. |
6901 | if (!llvm::GlobalValue::isLocalLinkage(Linkage)) { |
6902 | setGVProperties(GV, GD: VD); |
6903 | if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass) |
6904 | // The reference temporary should never be dllexport. |
6905 | GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); |
6906 | } |
6907 | GV->setAlignment(Align.getAsAlign()); |
6908 | if (supportsCOMDAT() && GV->isWeakForLinker()) |
6909 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
6910 | if (VD->getTLSKind()) |
6911 | setTLSMode(GV, D: *VD); |
6912 | llvm::Constant *CV = GV; |
6913 | if (AddrSpace != LangAS::Default) |
6914 | CV = getTargetCodeGenInfo().performAddrSpaceCast( |
6915 | CGM&: *this, V: GV, SrcAddr: AddrSpace, |
6916 | DestTy: llvm::PointerType::get( |
6917 | C&: getLLVMContext(), |
6918 | AddressSpace: getContext().getTargetAddressSpace(AS: LangAS::Default))); |
6919 | |
6920 | // Update the map with the new temporary. If we created a placeholder above, |
6921 | // replace it with the new global now. |
6922 | llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E]; |
6923 | if (Entry) { |
6924 | Entry->replaceAllUsesWith(V: CV); |
6925 | llvm::cast<llvm::GlobalVariable>(Val: Entry)->eraseFromParent(); |
6926 | } |
6927 | Entry = CV; |
6928 | |
6929 | return ConstantAddress(CV, Type, Align); |
6930 | } |
6931 | |
6932 | /// EmitObjCPropertyImplementations - Emit information for synthesized |
6933 | /// properties for an implementation. |
6934 | void CodeGenModule::EmitObjCPropertyImplementations(const |
6935 | ObjCImplementationDecl *D) { |
6936 | for (const auto *PID : D->property_impls()) { |
6937 | // Dynamic is just for type-checking. |
6938 | if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) { |
6939 | ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
6940 | |
6941 | // Determine which methods need to be implemented, some may have |
6942 | // been overridden. Note that ::isPropertyAccessor is not the method |
6943 | // we want, that just indicates if the decl came from a |
6944 | // property. What we want to know is if the method is defined in |
6945 | // this implementation. |
6946 | auto *Getter = PID->getGetterMethodDecl(); |
6947 | if (!Getter || Getter->isSynthesizedAccessorStub()) |
6948 | CodeGenFunction(*this).GenerateObjCGetter( |
6949 | const_cast<ObjCImplementationDecl *>(D), PID); |
6950 | auto *Setter = PID->getSetterMethodDecl(); |
6951 | if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub())) |
6952 | CodeGenFunction(*this).GenerateObjCSetter( |
6953 | const_cast<ObjCImplementationDecl *>(D), PID); |
6954 | } |
6955 | } |
6956 | } |
6957 | |
6958 | static bool needsDestructMethod(ObjCImplementationDecl *impl) { |
6959 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
6960 | for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); |
6961 | ivar; ivar = ivar->getNextIvar()) |
6962 | if (ivar->getType().isDestructedType()) |
6963 | return true; |
6964 | |
6965 | return false; |
6966 | } |
6967 | |
6968 | static bool AllTrivialInitializers(CodeGenModule &CGM, |
6969 | ObjCImplementationDecl *D) { |
6970 | CodeGenFunction CGF(CGM); |
6971 | for (ObjCImplementationDecl::init_iterator B = D->init_begin(), |
6972 | E = D->init_end(); B != E; ++B) { |
6973 | CXXCtorInitializer *CtorInitExp = *B; |
6974 | Expr *Init = CtorInitExp->getInit(); |
6975 | if (!CGF.isTrivialInitializer(Init)) |
6976 | return false; |
6977 | } |
6978 | return true; |
6979 | } |
6980 | |
6981 | /// EmitObjCIvarInitializations - Emit information for ivar initialization |
6982 | /// for an implementation. |
6983 | void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) { |
6984 | // We might need a .cxx_destruct even if we don't have any ivar initializers. |
6985 | if (needsDestructMethod(impl: D)) { |
6986 | const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_destruct" ); |
6987 | Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
6988 | ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create( |
6989 | C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector, |
6990 | T: getContext().VoidTy, ReturnTInfo: nullptr, contextDecl: D, |
6991 | /*isInstance=*/true, /*isVariadic=*/false, |
6992 | /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false, |
6993 | /*isImplicitlyDeclared=*/true, |
6994 | /*isDefined=*/false, impControl: ObjCImplementationControl::Required); |
6995 | D->addInstanceMethod(DTORMethod); |
6996 | CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: DTORMethod, ctor: false); |
6997 | D->setHasDestructors(true); |
6998 | } |
6999 | |
7000 | // If the implementation doesn't have any ivar initializers, we don't need |
7001 | // a .cxx_construct. |
7002 | if (D->getNumIvarInitializers() == 0 || |
7003 | AllTrivialInitializers(CGM&: *this, D)) |
7004 | return; |
7005 | |
7006 | const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_construct" ); |
7007 | Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
7008 | // The constructor returns 'self'. |
7009 | ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create( |
7010 | C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector, |
7011 | T: getContext().getObjCIdType(), ReturnTInfo: nullptr, contextDecl: D, /*isInstance=*/true, |
7012 | /*isVariadic=*/false, |
7013 | /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false, |
7014 | /*isImplicitlyDeclared=*/true, |
7015 | /*isDefined=*/false, impControl: ObjCImplementationControl::Required); |
7016 | D->addInstanceMethod(CTORMethod); |
7017 | CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: CTORMethod, ctor: true); |
7018 | D->setHasNonZeroConstructors(true); |
7019 | } |
7020 | |
7021 | // EmitLinkageSpec - Emit all declarations in a linkage spec. |
7022 | void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) { |
7023 | if (LSD->getLanguage() != LinkageSpecLanguageIDs::C && |
7024 | LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) { |
7025 | ErrorUnsupported(LSD, "linkage spec" ); |
7026 | return; |
7027 | } |
7028 | |
7029 | EmitDeclContext(LSD); |
7030 | } |
7031 | |
7032 | void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) { |
7033 | // Device code should not be at top level. |
7034 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) |
7035 | return; |
7036 | |
7037 | std::unique_ptr<CodeGenFunction> &CurCGF = |
7038 | GlobalTopLevelStmtBlockInFlight.first; |
7039 | |
7040 | // We emitted a top-level stmt but after it there is initialization. |
7041 | // Stop squashing the top-level stmts into a single function. |
7042 | if (CurCGF && CXXGlobalInits.back() != CurCGF->CurFn) { |
7043 | CurCGF->FinishFunction(EndLoc: D->getEndLoc()); |
7044 | CurCGF = nullptr; |
7045 | } |
7046 | |
7047 | if (!CurCGF) { |
7048 | // void __stmts__N(void) |
7049 | // FIXME: Ask the ABI name mangler to pick a name. |
7050 | std::string Name = "__stmts__" + llvm::utostr(X: CXXGlobalInits.size()); |
7051 | FunctionArgList Args; |
7052 | QualType RetTy = getContext().VoidTy; |
7053 | const CGFunctionInfo &FnInfo = |
7054 | getTypes().arrangeBuiltinFunctionDeclaration(resultType: RetTy, args: Args); |
7055 | llvm::FunctionType *FnTy = getTypes().GetFunctionType(Info: FnInfo); |
7056 | llvm::Function *Fn = llvm::Function::Create( |
7057 | Ty: FnTy, Linkage: llvm::GlobalValue::InternalLinkage, N: Name, M: &getModule()); |
7058 | |
7059 | CurCGF.reset(p: new CodeGenFunction(*this)); |
7060 | GlobalTopLevelStmtBlockInFlight.second = D; |
7061 | CurCGF->StartFunction(GD: GlobalDecl(), RetTy, Fn, FnInfo, Args, |
7062 | Loc: D->getBeginLoc(), StartLoc: D->getBeginLoc()); |
7063 | CXXGlobalInits.push_back(x: Fn); |
7064 | } |
7065 | |
7066 | CurCGF->EmitStmt(S: D->getStmt()); |
7067 | } |
7068 | |
7069 | void CodeGenModule::EmitDeclContext(const DeclContext *DC) { |
7070 | for (auto *I : DC->decls()) { |
7071 | // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope |
7072 | // are themselves considered "top-level", so EmitTopLevelDecl on an |
7073 | // ObjCImplDecl does not recursively visit them. We need to do that in |
7074 | // case they're nested inside another construct (LinkageSpecDecl / |
7075 | // ExportDecl) that does stop them from being considered "top-level". |
7076 | if (auto *OID = dyn_cast<ObjCImplDecl>(Val: I)) { |
7077 | for (auto *M : OID->methods()) |
7078 | EmitTopLevelDecl(M); |
7079 | } |
7080 | |
7081 | EmitTopLevelDecl(D: I); |
7082 | } |
7083 | } |
7084 | |
7085 | /// EmitTopLevelDecl - Emit code for a single top level declaration. |
7086 | void CodeGenModule::EmitTopLevelDecl(Decl *D) { |
7087 | // Ignore dependent declarations. |
7088 | if (D->isTemplated()) |
7089 | return; |
7090 | |
7091 | // Consteval function shouldn't be emitted. |
7092 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D); FD && FD->isImmediateFunction()) |
7093 | return; |
7094 | |
7095 | switch (D->getKind()) { |
7096 | case Decl::CXXConversion: |
7097 | case Decl::CXXMethod: |
7098 | case Decl::Function: |
7099 | EmitGlobal(GD: cast<FunctionDecl>(Val: D)); |
7100 | // Always provide some coverage mapping |
7101 | // even for the functions that aren't emitted. |
7102 | AddDeferredUnusedCoverageMapping(D); |
7103 | break; |
7104 | |
7105 | case Decl::CXXDeductionGuide: |
7106 | // Function-like, but does not result in code emission. |
7107 | break; |
7108 | |
7109 | case Decl::Var: |
7110 | case Decl::Decomposition: |
7111 | case Decl::VarTemplateSpecialization: |
7112 | EmitGlobal(GD: cast<VarDecl>(Val: D)); |
7113 | if (auto *DD = dyn_cast<DecompositionDecl>(Val: D)) |
7114 | for (auto *B : DD->flat_bindings()) |
7115 | if (auto *HD = B->getHoldingVar()) |
7116 | EmitGlobal(GD: HD); |
7117 | |
7118 | break; |
7119 | |
7120 | // Indirect fields from global anonymous structs and unions can be |
7121 | // ignored; only the actual variable requires IR gen support. |
7122 | case Decl::IndirectField: |
7123 | break; |
7124 | |
7125 | // C++ Decls |
7126 | case Decl::Namespace: |
7127 | EmitDeclContext(cast<NamespaceDecl>(Val: D)); |
7128 | break; |
7129 | case Decl::ClassTemplateSpecialization: { |
7130 | const auto *Spec = cast<ClassTemplateSpecializationDecl>(Val: D); |
7131 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7132 | if (Spec->getSpecializationKind() == |
7133 | TSK_ExplicitInstantiationDefinition && |
7134 | Spec->hasDefinition()) |
7135 | DI->completeTemplateDefinition(SD: *Spec); |
7136 | } [[fallthrough]]; |
7137 | case Decl::CXXRecord: { |
7138 | CXXRecordDecl *CRD = cast<CXXRecordDecl>(Val: D); |
7139 | if (CGDebugInfo *DI = getModuleDebugInfo()) { |
7140 | if (CRD->hasDefinition()) |
7141 | DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: D))); |
7142 | if (auto *ES = D->getASTContext().getExternalSource()) |
7143 | if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never) |
7144 | DI->completeUnusedClass(D: *CRD); |
7145 | } |
7146 | // Emit any static data members, they may be definitions. |
7147 | for (auto *I : CRD->decls()) |
7148 | if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I) || isa<EnumDecl>(I)) |
7149 | EmitTopLevelDecl(I); |
7150 | break; |
7151 | } |
7152 | // No code generation needed. |
7153 | case Decl::UsingShadow: |
7154 | case Decl::ClassTemplate: |
7155 | case Decl::VarTemplate: |
7156 | case Decl::Concept: |
7157 | case Decl::VarTemplatePartialSpecialization: |
7158 | case Decl::FunctionTemplate: |
7159 | case Decl::TypeAliasTemplate: |
7160 | case Decl::Block: |
7161 | case Decl::Empty: |
7162 | case Decl::Binding: |
7163 | break; |
7164 | case Decl::Using: // using X; [C++] |
7165 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7166 | DI->EmitUsingDecl(UD: cast<UsingDecl>(Val&: *D)); |
7167 | break; |
7168 | case Decl::UsingEnum: // using enum X; [C++] |
7169 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7170 | DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val&: *D)); |
7171 | break; |
7172 | case Decl::NamespaceAlias: |
7173 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7174 | DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val&: *D)); |
7175 | break; |
7176 | case Decl::UsingDirective: // using namespace X; [C++] |
7177 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7178 | DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val&: *D)); |
7179 | break; |
7180 | case Decl::CXXConstructor: |
7181 | getCXXABI().EmitCXXConstructors(D: cast<CXXConstructorDecl>(Val: D)); |
7182 | break; |
7183 | case Decl::CXXDestructor: |
7184 | getCXXABI().EmitCXXDestructors(D: cast<CXXDestructorDecl>(Val: D)); |
7185 | break; |
7186 | |
7187 | case Decl::StaticAssert: |
7188 | // Nothing to do. |
7189 | break; |
7190 | |
7191 | // Objective-C Decls |
7192 | |
7193 | // Forward declarations, no (immediate) code generation. |
7194 | case Decl::ObjCInterface: |
7195 | case Decl::ObjCCategory: |
7196 | break; |
7197 | |
7198 | case Decl::ObjCProtocol: { |
7199 | auto *Proto = cast<ObjCProtocolDecl>(Val: D); |
7200 | if (Proto->isThisDeclarationADefinition()) |
7201 | ObjCRuntime->GenerateProtocol(OPD: Proto); |
7202 | break; |
7203 | } |
7204 | |
7205 | case Decl::ObjCCategoryImpl: |
7206 | // Categories have properties but don't support synthesize so we |
7207 | // can ignore them here. |
7208 | ObjCRuntime->GenerateCategory(OCD: cast<ObjCCategoryImplDecl>(Val: D)); |
7209 | break; |
7210 | |
7211 | case Decl::ObjCImplementation: { |
7212 | auto *OMD = cast<ObjCImplementationDecl>(Val: D); |
7213 | EmitObjCPropertyImplementations(D: OMD); |
7214 | EmitObjCIvarInitializations(D: OMD); |
7215 | ObjCRuntime->GenerateClass(OID: OMD); |
7216 | // Emit global variable debug information. |
7217 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7218 | if (getCodeGenOpts().hasReducedDebugInfo()) |
7219 | DI->getOrCreateInterfaceType(Ty: getContext().getObjCInterfaceType( |
7220 | Decl: OMD->getClassInterface()), Loc: OMD->getLocation()); |
7221 | break; |
7222 | } |
7223 | case Decl::ObjCMethod: { |
7224 | auto *OMD = cast<ObjCMethodDecl>(Val: D); |
7225 | // If this is not a prototype, emit the body. |
7226 | if (OMD->getBody()) |
7227 | CodeGenFunction(*this).GenerateObjCMethod(OMD); |
7228 | break; |
7229 | } |
7230 | case Decl::ObjCCompatibleAlias: |
7231 | ObjCRuntime->RegisterAlias(OAD: cast<ObjCCompatibleAliasDecl>(Val: D)); |
7232 | break; |
7233 | |
7234 | case Decl::PragmaComment: { |
7235 | const auto *PCD = cast<PragmaCommentDecl>(Val: D); |
7236 | switch (PCD->getCommentKind()) { |
7237 | case PCK_Unknown: |
7238 | llvm_unreachable("unexpected pragma comment kind" ); |
7239 | case PCK_Linker: |
7240 | AppendLinkerOptions(Opts: PCD->getArg()); |
7241 | break; |
7242 | case PCK_Lib: |
7243 | AddDependentLib(Lib: PCD->getArg()); |
7244 | break; |
7245 | case PCK_Compiler: |
7246 | case PCK_ExeStr: |
7247 | case PCK_User: |
7248 | break; // We ignore all of these. |
7249 | } |
7250 | break; |
7251 | } |
7252 | |
7253 | case Decl::PragmaDetectMismatch: { |
7254 | const auto *PDMD = cast<PragmaDetectMismatchDecl>(Val: D); |
7255 | AddDetectMismatch(Name: PDMD->getName(), Value: PDMD->getValue()); |
7256 | break; |
7257 | } |
7258 | |
7259 | case Decl::LinkageSpec: |
7260 | EmitLinkageSpec(LSD: cast<LinkageSpecDecl>(Val: D)); |
7261 | break; |
7262 | |
7263 | case Decl::FileScopeAsm: { |
7264 | // File-scope asm is ignored during device-side CUDA compilation. |
7265 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) |
7266 | break; |
7267 | // File-scope asm is ignored during device-side OpenMP compilation. |
7268 | if (LangOpts.OpenMPIsTargetDevice) |
7269 | break; |
7270 | // File-scope asm is ignored during device-side SYCL compilation. |
7271 | if (LangOpts.SYCLIsDevice) |
7272 | break; |
7273 | auto *AD = cast<FileScopeAsmDecl>(Val: D); |
7274 | getModule().appendModuleInlineAsm(Asm: AD->getAsmString()); |
7275 | break; |
7276 | } |
7277 | |
7278 | case Decl::TopLevelStmt: |
7279 | EmitTopLevelStmt(D: cast<TopLevelStmtDecl>(Val: D)); |
7280 | break; |
7281 | |
7282 | case Decl::Import: { |
7283 | auto *Import = cast<ImportDecl>(Val: D); |
7284 | |
7285 | // If we've already imported this module, we're done. |
7286 | if (!ImportedModules.insert(X: Import->getImportedModule())) |
7287 | break; |
7288 | |
7289 | // Emit debug information for direct imports. |
7290 | if (!Import->getImportedOwningModule()) { |
7291 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7292 | DI->EmitImportDecl(ID: *Import); |
7293 | } |
7294 | |
7295 | // For C++ standard modules we are done - we will call the module |
7296 | // initializer for imported modules, and that will likewise call those for |
7297 | // any imports it has. |
7298 | if (CXX20ModuleInits && Import->getImportedModule() && |
7299 | Import->getImportedModule()->isNamedModule()) |
7300 | break; |
7301 | |
7302 | // For clang C++ module map modules the initializers for sub-modules are |
7303 | // emitted here. |
7304 | |
7305 | // Find all of the submodules and emit the module initializers. |
7306 | llvm::SmallPtrSet<clang::Module *, 16> Visited; |
7307 | SmallVector<clang::Module *, 16> Stack; |
7308 | Visited.insert(Ptr: Import->getImportedModule()); |
7309 | Stack.push_back(Elt: Import->getImportedModule()); |
7310 | |
7311 | while (!Stack.empty()) { |
7312 | clang::Module *Mod = Stack.pop_back_val(); |
7313 | if (!EmittedModuleInitializers.insert(Ptr: Mod).second) |
7314 | continue; |
7315 | |
7316 | for (auto *D : Context.getModuleInitializers(M: Mod)) |
7317 | EmitTopLevelDecl(D); |
7318 | |
7319 | // Visit the submodules of this module. |
7320 | for (auto *Submodule : Mod->submodules()) { |
7321 | // Skip explicit children; they need to be explicitly imported to emit |
7322 | // the initializers. |
7323 | if (Submodule->IsExplicit) |
7324 | continue; |
7325 | |
7326 | if (Visited.insert(Ptr: Submodule).second) |
7327 | Stack.push_back(Elt: Submodule); |
7328 | } |
7329 | } |
7330 | break; |
7331 | } |
7332 | |
7333 | case Decl::Export: |
7334 | EmitDeclContext(cast<ExportDecl>(Val: D)); |
7335 | break; |
7336 | |
7337 | case Decl::OMPThreadPrivate: |
7338 | EmitOMPThreadPrivateDecl(D: cast<OMPThreadPrivateDecl>(Val: D)); |
7339 | break; |
7340 | |
7341 | case Decl::OMPAllocate: |
7342 | EmitOMPAllocateDecl(D: cast<OMPAllocateDecl>(Val: D)); |
7343 | break; |
7344 | |
7345 | case Decl::OMPDeclareReduction: |
7346 | EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: D)); |
7347 | break; |
7348 | |
7349 | case Decl::OMPDeclareMapper: |
7350 | EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: D)); |
7351 | break; |
7352 | |
7353 | case Decl::OMPRequires: |
7354 | EmitOMPRequiresDecl(D: cast<OMPRequiresDecl>(Val: D)); |
7355 | break; |
7356 | |
7357 | case Decl::Typedef: |
7358 | case Decl::TypeAlias: // using foo = bar; [C++11] |
7359 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7360 | DI->EmitAndRetainType( |
7361 | Ty: getContext().getTypedefType(Decl: cast<TypedefNameDecl>(Val: D))); |
7362 | break; |
7363 | |
7364 | case Decl::Record: |
7365 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7366 | if (cast<RecordDecl>(Val: D)->getDefinition()) |
7367 | DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: D))); |
7368 | break; |
7369 | |
7370 | case Decl::Enum: |
7371 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7372 | if (cast<EnumDecl>(Val: D)->getDefinition()) |
7373 | DI->EmitAndRetainType(Ty: getContext().getEnumType(Decl: cast<EnumDecl>(Val: D))); |
7374 | break; |
7375 | |
7376 | case Decl::HLSLBuffer: |
7377 | getHLSLRuntime().addBuffer(D: cast<HLSLBufferDecl>(Val: D)); |
7378 | break; |
7379 | |
7380 | case Decl::OpenACCDeclare: |
7381 | EmitOpenACCDeclare(D: cast<OpenACCDeclareDecl>(Val: D)); |
7382 | break; |
7383 | case Decl::OpenACCRoutine: |
7384 | EmitOpenACCRoutine(D: cast<OpenACCRoutineDecl>(Val: D)); |
7385 | break; |
7386 | |
7387 | default: |
7388 | // Make sure we handled everything we should, every other kind is a |
7389 | // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind |
7390 | // function. Need to recode Decl::Kind to do that easily. |
7391 | assert(isa<TypeDecl>(D) && "Unsupported decl kind" ); |
7392 | break; |
7393 | } |
7394 | } |
7395 | |
7396 | void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { |
7397 | // Do we need to generate coverage mapping? |
7398 | if (!CodeGenOpts.CoverageMapping) |
7399 | return; |
7400 | switch (D->getKind()) { |
7401 | case Decl::CXXConversion: |
7402 | case Decl::CXXMethod: |
7403 | case Decl::Function: |
7404 | case Decl::ObjCMethod: |
7405 | case Decl::CXXConstructor: |
7406 | case Decl::CXXDestructor: { |
7407 | if (!cast<FunctionDecl>(Val: D)->doesThisDeclarationHaveABody()) |
7408 | break; |
7409 | SourceManager &SM = getContext().getSourceManager(); |
7410 | if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(SpellingLoc: D->getBeginLoc())) |
7411 | break; |
7412 | if (!llvm::coverage::SystemHeadersCoverage && |
7413 | SM.isInSystemHeader(Loc: D->getBeginLoc())) |
7414 | break; |
7415 | DeferredEmptyCoverageMappingDecls.try_emplace(Key: D, Args: true); |
7416 | break; |
7417 | } |
7418 | default: |
7419 | break; |
7420 | }; |
7421 | } |
7422 | |
7423 | void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) { |
7424 | // Do we need to generate coverage mapping? |
7425 | if (!CodeGenOpts.CoverageMapping) |
7426 | return; |
7427 | if (const auto *Fn = dyn_cast<FunctionDecl>(Val: D)) { |
7428 | if (Fn->isTemplateInstantiation()) |
7429 | ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern()); |
7430 | } |
7431 | DeferredEmptyCoverageMappingDecls.insert_or_assign(Key: D, Val: false); |
7432 | } |
7433 | |
7434 | void CodeGenModule::EmitDeferredUnusedCoverageMappings() { |
7435 | // We call takeVector() here to avoid use-after-free. |
7436 | // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because |
7437 | // we deserialize function bodies to emit coverage info for them, and that |
7438 | // deserializes more declarations. How should we handle that case? |
7439 | for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) { |
7440 | if (!Entry.second) |
7441 | continue; |
7442 | const Decl *D = Entry.first; |
7443 | switch (D->getKind()) { |
7444 | case Decl::CXXConversion: |
7445 | case Decl::CXXMethod: |
7446 | case Decl::Function: |
7447 | case Decl::ObjCMethod: { |
7448 | CodeGenPGO PGO(*this); |
7449 | GlobalDecl GD(cast<FunctionDecl>(Val: D)); |
7450 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7451 | Linkage: getFunctionLinkage(GD)); |
7452 | break; |
7453 | } |
7454 | case Decl::CXXConstructor: { |
7455 | CodeGenPGO PGO(*this); |
7456 | GlobalDecl GD(cast<CXXConstructorDecl>(Val: D), Ctor_Base); |
7457 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7458 | Linkage: getFunctionLinkage(GD)); |
7459 | break; |
7460 | } |
7461 | case Decl::CXXDestructor: { |
7462 | CodeGenPGO PGO(*this); |
7463 | GlobalDecl GD(cast<CXXDestructorDecl>(Val: D), Dtor_Base); |
7464 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7465 | Linkage: getFunctionLinkage(GD)); |
7466 | break; |
7467 | } |
7468 | default: |
7469 | break; |
7470 | }; |
7471 | } |
7472 | } |
7473 | |
7474 | void CodeGenModule::EmitMainVoidAlias() { |
7475 | // In order to transition away from "__original_main" gracefully, emit an |
7476 | // alias for "main" in the no-argument case so that libc can detect when |
7477 | // new-style no-argument main is in used. |
7478 | if (llvm::Function *F = getModule().getFunction(Name: "main" )) { |
7479 | if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() && |
7480 | F->getReturnType()->isIntegerTy(Bitwidth: Context.getTargetInfo().getIntWidth())) { |
7481 | auto *GA = llvm::GlobalAlias::create(Name: "__main_void" , Aliasee: F); |
7482 | GA->setVisibility(llvm::GlobalValue::HiddenVisibility); |
7483 | } |
7484 | } |
7485 | } |
7486 | |
7487 | /// Turns the given pointer into a constant. |
7488 | static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context, |
7489 | const void *Ptr) { |
7490 | uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr); |
7491 | llvm::Type *i64 = llvm::Type::getInt64Ty(C&: Context); |
7492 | return llvm::ConstantInt::get(Ty: i64, V: PtrInt); |
7493 | } |
7494 | |
7495 | static void EmitGlobalDeclMetadata(CodeGenModule &CGM, |
7496 | llvm::NamedMDNode *&GlobalMetadata, |
7497 | GlobalDecl D, |
7498 | llvm::GlobalValue *Addr) { |
7499 | if (!GlobalMetadata) |
7500 | GlobalMetadata = |
7501 | CGM.getModule().getOrInsertNamedMetadata(Name: "clang.global.decl.ptrs" ); |
7502 | |
7503 | // TODO: should we report variant information for ctors/dtors? |
7504 | llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(C: Addr), |
7505 | llvm::ConstantAsMetadata::get(C: GetPointerConstant( |
7506 | Context&: CGM.getLLVMContext(), Ptr: D.getDecl()))}; |
7507 | GlobalMetadata->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops)); |
7508 | } |
7509 | |
7510 | bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem, |
7511 | llvm::GlobalValue *CppFunc) { |
7512 | // Store the list of ifuncs we need to replace uses in. |
7513 | llvm::SmallVector<llvm::GlobalIFunc *> IFuncs; |
7514 | // List of ConstantExprs that we should be able to delete when we're done |
7515 | // here. |
7516 | llvm::SmallVector<llvm::ConstantExpr *> CEs; |
7517 | |
7518 | // It isn't valid to replace the extern-C ifuncs if all we find is itself! |
7519 | if (Elem == CppFunc) |
7520 | return false; |
7521 | |
7522 | // First make sure that all users of this are ifuncs (or ifuncs via a |
7523 | // bitcast), and collect the list of ifuncs and CEs so we can work on them |
7524 | // later. |
7525 | for (llvm::User *User : Elem->users()) { |
7526 | // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an |
7527 | // ifunc directly. In any other case, just give up, as we don't know what we |
7528 | // could break by changing those. |
7529 | if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(Val: User)) { |
7530 | if (ConstExpr->getOpcode() != llvm::Instruction::BitCast) |
7531 | return false; |
7532 | |
7533 | for (llvm::User *CEUser : ConstExpr->users()) { |
7534 | if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: CEUser)) { |
7535 | IFuncs.push_back(Elt: IFunc); |
7536 | } else { |
7537 | return false; |
7538 | } |
7539 | } |
7540 | CEs.push_back(Elt: ConstExpr); |
7541 | } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: User)) { |
7542 | IFuncs.push_back(Elt: IFunc); |
7543 | } else { |
7544 | // This user is one we don't know how to handle, so fail redirection. This |
7545 | // will result in an ifunc retaining a resolver name that will ultimately |
7546 | // fail to be resolved to a defined function. |
7547 | return false; |
7548 | } |
7549 | } |
7550 | |
7551 | // Now we know this is a valid case where we can do this alias replacement, we |
7552 | // need to remove all of the references to Elem (and the bitcasts!) so we can |
7553 | // delete it. |
7554 | for (llvm::GlobalIFunc *IFunc : IFuncs) |
7555 | IFunc->setResolver(nullptr); |
7556 | for (llvm::ConstantExpr *ConstExpr : CEs) |
7557 | ConstExpr->destroyConstant(); |
7558 | |
7559 | // We should now be out of uses for the 'old' version of this function, so we |
7560 | // can erase it as well. |
7561 | Elem->eraseFromParent(); |
7562 | |
7563 | for (llvm::GlobalIFunc *IFunc : IFuncs) { |
7564 | // The type of the resolver is always just a function-type that returns the |
7565 | // type of the IFunc, so create that here. If the type of the actual |
7566 | // resolver doesn't match, it just gets bitcast to the right thing. |
7567 | auto *ResolverTy = |
7568 | llvm::FunctionType::get(Result: IFunc->getType(), /*isVarArg*/ false); |
7569 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
7570 | MangledName: CppFunc->getName(), Ty: ResolverTy, GD: {}, /*ForVTable*/ false); |
7571 | IFunc->setResolver(Resolver); |
7572 | } |
7573 | return true; |
7574 | } |
7575 | |
7576 | /// For each function which is declared within an extern "C" region and marked |
7577 | /// as 'used', but has internal linkage, create an alias from the unmangled |
7578 | /// name to the mangled name if possible. People expect to be able to refer |
7579 | /// to such functions with an unmangled name from inline assembly within the |
7580 | /// same translation unit. |
7581 | void CodeGenModule::EmitStaticExternCAliases() { |
7582 | if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases()) |
7583 | return; |
7584 | for (auto &I : StaticExternCValues) { |
7585 | const IdentifierInfo *Name = I.first; |
7586 | llvm::GlobalValue *Val = I.second; |
7587 | |
7588 | // If Val is null, that implies there were multiple declarations that each |
7589 | // had a claim to the unmangled name. In this case, generation of the alias |
7590 | // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC. |
7591 | if (!Val) |
7592 | break; |
7593 | |
7594 | llvm::GlobalValue *ExistingElem = |
7595 | getModule().getNamedValue(Name: Name->getName()); |
7596 | |
7597 | // If there is either not something already by this name, or we were able to |
7598 | // replace all uses from IFuncs, create the alias. |
7599 | if (!ExistingElem || CheckAndReplaceExternCIFuncs(Elem: ExistingElem, CppFunc: Val)) |
7600 | addCompilerUsedGlobal(GV: llvm::GlobalAlias::create(Name: Name->getName(), Aliasee: Val)); |
7601 | } |
7602 | } |
7603 | |
7604 | bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName, |
7605 | GlobalDecl &Result) const { |
7606 | auto Res = Manglings.find(Key: MangledName); |
7607 | if (Res == Manglings.end()) |
7608 | return false; |
7609 | Result = Res->getValue(); |
7610 | return true; |
7611 | } |
7612 | |
7613 | /// Emits metadata nodes associating all the global values in the |
7614 | /// current module with the Decls they came from. This is useful for |
7615 | /// projects using IR gen as a subroutine. |
7616 | /// |
7617 | /// Since there's currently no way to associate an MDNode directly |
7618 | /// with an llvm::GlobalValue, we create a global named metadata |
7619 | /// with the name 'clang.global.decl.ptrs'. |
7620 | void CodeGenModule::EmitDeclMetadata() { |
7621 | llvm::NamedMDNode *GlobalMetadata = nullptr; |
7622 | |
7623 | for (auto &I : MangledDeclNames) { |
7624 | llvm::GlobalValue *Addr = getModule().getNamedValue(Name: I.second); |
7625 | // Some mangled names don't necessarily have an associated GlobalValue |
7626 | // in this module, e.g. if we mangled it for DebugInfo. |
7627 | if (Addr) |
7628 | EmitGlobalDeclMetadata(CGM&: *this, GlobalMetadata, D: I.first, Addr); |
7629 | } |
7630 | } |
7631 | |
7632 | /// Emits metadata nodes for all the local variables in the current |
7633 | /// function. |
7634 | void CodeGenFunction::EmitDeclMetadata() { |
7635 | if (LocalDeclMap.empty()) return; |
7636 | |
7637 | llvm::LLVMContext &Context = getLLVMContext(); |
7638 | |
7639 | // Find the unique metadata ID for this name. |
7640 | unsigned DeclPtrKind = Context.getMDKindID(Name: "clang.decl.ptr" ); |
7641 | |
7642 | llvm::NamedMDNode *GlobalMetadata = nullptr; |
7643 | |
7644 | for (auto &I : LocalDeclMap) { |
7645 | const Decl *D = I.first; |
7646 | llvm::Value *Addr = I.second.emitRawPointer(CGF&: *this); |
7647 | if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Val: Addr)) { |
7648 | llvm::Value *DAddr = GetPointerConstant(Context&: getLLVMContext(), Ptr: D); |
7649 | Alloca->setMetadata( |
7650 | KindID: DeclPtrKind, Node: llvm::MDNode::get( |
7651 | Context, MDs: llvm::ValueAsMetadata::getConstant(C: DAddr))); |
7652 | } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr)) { |
7653 | GlobalDecl GD = GlobalDecl(cast<VarDecl>(Val: D)); |
7654 | EmitGlobalDeclMetadata(CGM, GlobalMetadata, D: GD, Addr: GV); |
7655 | } |
7656 | } |
7657 | } |
7658 | |
7659 | void CodeGenModule::EmitVersionIdentMetadata() { |
7660 | llvm::NamedMDNode *IdentMetadata = |
7661 | TheModule.getOrInsertNamedMetadata(Name: "llvm.ident" ); |
7662 | std::string Version = getClangFullVersion(); |
7663 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7664 | |
7665 | llvm::Metadata *IdentNode[] = {llvm::MDString::get(Context&: Ctx, Str: Version)}; |
7666 | IdentMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: IdentNode)); |
7667 | } |
7668 | |
7669 | void CodeGenModule::EmitCommandLineMetadata() { |
7670 | llvm::NamedMDNode *CommandLineMetadata = |
7671 | TheModule.getOrInsertNamedMetadata(Name: "llvm.commandline" ); |
7672 | std::string CommandLine = getCodeGenOpts().RecordCommandLine; |
7673 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7674 | |
7675 | llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Context&: Ctx, Str: CommandLine)}; |
7676 | CommandLineMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: CommandLineNode)); |
7677 | } |
7678 | |
7679 | void CodeGenModule::EmitCoverageFile() { |
7680 | llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata(Name: "llvm.dbg.cu" ); |
7681 | if (!CUNode) |
7682 | return; |
7683 | |
7684 | llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata(Name: "llvm.gcov" ); |
7685 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7686 | auto *CoverageDataFile = |
7687 | llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageDataFile); |
7688 | auto *CoverageNotesFile = |
7689 | llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageNotesFile); |
7690 | for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) { |
7691 | llvm::MDNode *CU = CUNode->getOperand(i); |
7692 | llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU}; |
7693 | GCov->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: Elts)); |
7694 | } |
7695 | } |
7696 | |
7697 | llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty, |
7698 | bool ForEH) { |
7699 | // Return a bogus pointer if RTTI is disabled, unless it's for EH. |
7700 | // FIXME: should we even be calling this method if RTTI is disabled |
7701 | // and it's not for EH? |
7702 | if (!shouldEmitRTTI(ForEH)) |
7703 | return llvm::Constant::getNullValue(Ty: GlobalsInt8PtrTy); |
7704 | |
7705 | if (ForEH && Ty->isObjCObjectPointerType() && |
7706 | LangOpts.ObjCRuntime.isGNUFamily()) |
7707 | return ObjCRuntime->GetEHType(T: Ty); |
7708 | |
7709 | return getCXXABI().getAddrOfRTTIDescriptor(Ty); |
7710 | } |
7711 | |
7712 | void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) { |
7713 | // Do not emit threadprivates in simd-only mode. |
7714 | if (LangOpts.OpenMP && LangOpts.OpenMPSimd) |
7715 | return; |
7716 | for (auto RefExpr : D->varlist()) { |
7717 | auto *VD = cast<VarDecl>(Val: cast<DeclRefExpr>(Val: RefExpr)->getDecl()); |
7718 | bool PerformInit = |
7719 | VD->getAnyInitializer() && |
7720 | !VD->getAnyInitializer()->isConstantInitializer(Ctx&: getContext(), |
7721 | /*ForRef=*/false); |
7722 | |
7723 | Address Addr(GetAddrOfGlobalVar(D: VD), |
7724 | getTypes().ConvertTypeForMem(T: VD->getType()), |
7725 | getContext().getDeclAlign(VD)); |
7726 | if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition( |
7727 | VD, Addr, RefExpr->getBeginLoc(), PerformInit)) |
7728 | CXXGlobalInits.push_back(InitFunction); |
7729 | } |
7730 | } |
7731 | |
7732 | llvm::Metadata * |
7733 | CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map, |
7734 | StringRef Suffix) { |
7735 | if (auto *FnType = T->getAs<FunctionProtoType>()) |
7736 | T = getContext().getFunctionType( |
7737 | ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(), |
7738 | EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None)); |
7739 | |
7740 | llvm::Metadata *&InternalId = Map[T.getCanonicalType()]; |
7741 | if (InternalId) |
7742 | return InternalId; |
7743 | |
7744 | if (isExternallyVisible(L: T->getLinkage())) { |
7745 | std::string OutName; |
7746 | llvm::raw_string_ostream Out(OutName); |
7747 | getCXXABI().getMangleContext().mangleCanonicalTypeName( |
7748 | T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers); |
7749 | |
7750 | if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers) |
7751 | Out << ".normalized" ; |
7752 | |
7753 | Out << Suffix; |
7754 | |
7755 | InternalId = llvm::MDString::get(Context&: getLLVMContext(), Str: Out.str()); |
7756 | } else { |
7757 | InternalId = llvm::MDNode::getDistinct(Context&: getLLVMContext(), |
7758 | MDs: llvm::ArrayRef<llvm::Metadata *>()); |
7759 | } |
7760 | |
7761 | return InternalId; |
7762 | } |
7763 | |
7764 | llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) { |
7765 | return CreateMetadataIdentifierImpl(T, Map&: MetadataIdMap, Suffix: "" ); |
7766 | } |
7767 | |
7768 | llvm::Metadata * |
7769 | CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) { |
7770 | return CreateMetadataIdentifierImpl(T, Map&: VirtualMetadataIdMap, Suffix: ".virtual" ); |
7771 | } |
7772 | |
7773 | // Generalize pointer types to a void pointer with the qualifiers of the |
7774 | // originally pointed-to type, e.g. 'const char *' and 'char * const *' |
7775 | // generalize to 'const void *' while 'char *' and 'const char **' generalize to |
7776 | // 'void *'. |
7777 | static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) { |
7778 | if (!Ty->isPointerType()) |
7779 | return Ty; |
7780 | |
7781 | return Ctx.getPointerType( |
7782 | T: QualType(Ctx.VoidTy).withCVRQualifiers( |
7783 | CVR: Ty->getPointeeType().getCVRQualifiers())); |
7784 | } |
7785 | |
7786 | // Apply type generalization to a FunctionType's return and argument types |
7787 | static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) { |
7788 | if (auto *FnType = Ty->getAs<FunctionProtoType>()) { |
7789 | SmallVector<QualType, 8> GeneralizedParams; |
7790 | for (auto &Param : FnType->param_types()) |
7791 | GeneralizedParams.push_back(Elt: GeneralizeType(Ctx, Ty: Param)); |
7792 | |
7793 | return Ctx.getFunctionType( |
7794 | ResultTy: GeneralizeType(Ctx, FnType->getReturnType()), |
7795 | Args: GeneralizedParams, EPI: FnType->getExtProtoInfo()); |
7796 | } |
7797 | |
7798 | if (auto *FnType = Ty->getAs<FunctionNoProtoType>()) |
7799 | return Ctx.getFunctionNoProtoType( |
7800 | GeneralizeType(Ctx, FnType->getReturnType())); |
7801 | |
7802 | llvm_unreachable("Encountered unknown FunctionType" ); |
7803 | } |
7804 | |
7805 | llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) { |
7806 | return CreateMetadataIdentifierImpl(T: GeneralizeFunctionType(Ctx&: getContext(), Ty: T), |
7807 | Map&: GeneralizedMetadataIdMap, Suffix: ".generalized" ); |
7808 | } |
7809 | |
7810 | /// Returns whether this module needs the "all-vtables" type identifier. |
7811 | bool CodeGenModule::NeedAllVtablesTypeId() const { |
7812 | // Returns true if at least one of vtable-based CFI checkers is enabled and |
7813 | // is not in the trapping mode. |
7814 | return ((LangOpts.Sanitize.has(K: SanitizerKind::CFIVCall) && |
7815 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIVCall)) || |
7816 | (LangOpts.Sanitize.has(K: SanitizerKind::CFINVCall) && |
7817 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFINVCall)) || |
7818 | (LangOpts.Sanitize.has(K: SanitizerKind::CFIDerivedCast) && |
7819 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIDerivedCast)) || |
7820 | (LangOpts.Sanitize.has(K: SanitizerKind::CFIUnrelatedCast) && |
7821 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIUnrelatedCast))); |
7822 | } |
7823 | |
7824 | void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable, |
7825 | CharUnits Offset, |
7826 | const CXXRecordDecl *RD) { |
7827 | llvm::Metadata *MD = |
7828 | CreateMetadataIdentifierForType(T: QualType(RD->getTypeForDecl(), 0)); |
7829 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD); |
7830 | |
7831 | if (CodeGenOpts.SanitizeCfiCrossDso) |
7832 | if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD)) |
7833 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), |
7834 | TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId)); |
7835 | |
7836 | if (NeedAllVtablesTypeId()) { |
7837 | llvm::Metadata *MD = llvm::MDString::get(Context&: getLLVMContext(), Str: "all-vtables" ); |
7838 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD); |
7839 | } |
7840 | } |
7841 | |
7842 | llvm::SanitizerStatReport &CodeGenModule::getSanStats() { |
7843 | if (!SanStats) |
7844 | SanStats = std::make_unique<llvm::SanitizerStatReport>(args: &getModule()); |
7845 | |
7846 | return *SanStats; |
7847 | } |
7848 | |
7849 | llvm::Value * |
7850 | CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E, |
7851 | CodeGenFunction &CGF) { |
7852 | llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, T: E->getType()); |
7853 | auto *SamplerT = getOpenCLRuntime().getSamplerType(T: E->getType().getTypePtr()); |
7854 | auto *FTy = llvm::FunctionType::get(Result: SamplerT, Params: {C->getType()}, isVarArg: false); |
7855 | auto *Call = CGF.EmitRuntimeCall( |
7856 | callee: CreateRuntimeFunction(FTy, Name: "__translate_sampler_initializer" ), args: {C}); |
7857 | return Call; |
7858 | } |
7859 | |
7860 | CharUnits CodeGenModule::getNaturalPointeeTypeAlignment( |
7861 | QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { |
7862 | return getNaturalTypeAlignment(T: T->getPointeeType(), BaseInfo, TBAAInfo, |
7863 | /* forPointeeType= */ true); |
7864 | } |
7865 | |
7866 | CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T, |
7867 | LValueBaseInfo *BaseInfo, |
7868 | TBAAAccessInfo *TBAAInfo, |
7869 | bool forPointeeType) { |
7870 | if (TBAAInfo) |
7871 | *TBAAInfo = getTBAAAccessInfo(AccessType: T); |
7872 | |
7873 | // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But |
7874 | // that doesn't return the information we need to compute BaseInfo. |
7875 | |
7876 | // Honor alignment typedef attributes even on incomplete types. |
7877 | // We also honor them straight for C++ class types, even as pointees; |
7878 | // there's an expressivity gap here. |
7879 | if (auto TT = T->getAs<TypedefType>()) { |
7880 | if (auto Align = TT->getDecl()->getMaxAlignment()) { |
7881 | if (BaseInfo) |
7882 | *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); |
7883 | return getContext().toCharUnitsFromBits(BitSize: Align); |
7884 | } |
7885 | } |
7886 | |
7887 | bool AlignForArray = T->isArrayType(); |
7888 | |
7889 | // Analyze the base element type, so we don't get confused by incomplete |
7890 | // array types. |
7891 | T = getContext().getBaseElementType(QT: T); |
7892 | |
7893 | if (T->isIncompleteType()) { |
7894 | // We could try to replicate the logic from |
7895 | // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the |
7896 | // type is incomplete, so it's impossible to test. We could try to reuse |
7897 | // getTypeAlignIfKnown, but that doesn't return the information we need |
7898 | // to set BaseInfo. So just ignore the possibility that the alignment is |
7899 | // greater than one. |
7900 | if (BaseInfo) |
7901 | *BaseInfo = LValueBaseInfo(AlignmentSource::Type); |
7902 | return CharUnits::One(); |
7903 | } |
7904 | |
7905 | if (BaseInfo) |
7906 | *BaseInfo = LValueBaseInfo(AlignmentSource::Type); |
7907 | |
7908 | CharUnits Alignment; |
7909 | const CXXRecordDecl *RD; |
7910 | if (T.getQualifiers().hasUnaligned()) { |
7911 | Alignment = CharUnits::One(); |
7912 | } else if (forPointeeType && !AlignForArray && |
7913 | (RD = T->getAsCXXRecordDecl())) { |
7914 | // For C++ class pointees, we don't know whether we're pointing at a |
7915 | // base or a complete object, so we generally need to use the |
7916 | // non-virtual alignment. |
7917 | Alignment = getClassPointerAlignment(CD: RD); |
7918 | } else { |
7919 | Alignment = getContext().getTypeAlignInChars(T); |
7920 | } |
7921 | |
7922 | // Cap to the global maximum type alignment unless the alignment |
7923 | // was somehow explicit on the type. |
7924 | if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) { |
7925 | if (Alignment.getQuantity() > MaxAlign && |
7926 | !getContext().isAlignmentRequired(T)) |
7927 | Alignment = CharUnits::fromQuantity(Quantity: MaxAlign); |
7928 | } |
7929 | return Alignment; |
7930 | } |
7931 | |
7932 | bool CodeGenModule::stopAutoInit() { |
7933 | unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter; |
7934 | if (StopAfter) { |
7935 | // This number is positive only when -ftrivial-auto-var-init-stop-after=* is |
7936 | // used |
7937 | if (NumAutoVarInit >= StopAfter) { |
7938 | return true; |
7939 | } |
7940 | if (!NumAutoVarInit) { |
7941 | unsigned DiagID = getDiags().getCustomDiagID( |
7942 | L: DiagnosticsEngine::Warning, |
7943 | FormatString: "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the " |
7944 | "number of times ftrivial-auto-var-init=%1 gets applied." ); |
7945 | getDiags().Report(DiagID) |
7946 | << StopAfter |
7947 | << (getContext().getLangOpts().getTrivialAutoVarInit() == |
7948 | LangOptions::TrivialAutoVarInitKind::Zero |
7949 | ? "zero" |
7950 | : "pattern" ); |
7951 | } |
7952 | ++NumAutoVarInit; |
7953 | } |
7954 | return false; |
7955 | } |
7956 | |
7957 | void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS, |
7958 | const Decl *D) const { |
7959 | // ptxas does not allow '.' in symbol names. On the other hand, HIP prefers |
7960 | // postfix beginning with '.' since the symbol name can be demangled. |
7961 | if (LangOpts.HIP) |
7962 | OS << (isa<VarDecl>(Val: D) ? ".static." : ".intern." ); |
7963 | else |
7964 | OS << (isa<VarDecl>(Val: D) ? "__static__" : "__intern__" ); |
7965 | |
7966 | // If the CUID is not specified we try to generate a unique postfix. |
7967 | if (getLangOpts().CUID.empty()) { |
7968 | SourceManager &SM = getContext().getSourceManager(); |
7969 | PresumedLoc PLoc = SM.getPresumedLoc(Loc: D->getLocation()); |
7970 | assert(PLoc.isValid() && "Source location is expected to be valid." ); |
7971 | |
7972 | // Get the hash of the user defined macros. |
7973 | llvm::MD5 Hash; |
7974 | llvm::MD5::MD5Result Result; |
7975 | for (const auto &Arg : PreprocessorOpts.Macros) |
7976 | Hash.update(Str: Arg.first); |
7977 | Hash.final(Result); |
7978 | |
7979 | // Get the UniqueID for the file containing the decl. |
7980 | llvm::sys::fs::UniqueID ID; |
7981 | if (llvm::sys::fs::getUniqueID(Path: PLoc.getFilename(), Result&: ID)) { |
7982 | PLoc = SM.getPresumedLoc(Loc: D->getLocation(), /*UseLineDirectives=*/false); |
7983 | assert(PLoc.isValid() && "Source location is expected to be valid." ); |
7984 | if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) |
7985 | SM.getDiagnostics().Report(diag::err_cannot_open_file) |
7986 | << PLoc.getFilename() << EC.message(); |
7987 | } |
7988 | OS << llvm::format(Fmt: "%x" , Vals: ID.getFile()) << llvm::format(Fmt: "%x" , Vals: ID.getDevice()) |
7989 | << "_" << llvm::utohexstr(X: Result.low(), /*LowerCase=*/true, /*Width=*/8); |
7990 | } else { |
7991 | OS << getContext().getCUIDHash(); |
7992 | } |
7993 | } |
7994 | |
7995 | void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) { |
7996 | assert(DeferredDeclsToEmit.empty() && |
7997 | "Should have emitted all decls deferred to emit." ); |
7998 | assert(NewBuilder->DeferredDecls.empty() && |
7999 | "Newly created module should not have deferred decls" ); |
8000 | NewBuilder->DeferredDecls = std::move(DeferredDecls); |
8001 | assert(EmittedDeferredDecls.empty() && |
8002 | "Still have (unmerged) EmittedDeferredDecls deferred decls" ); |
8003 | |
8004 | assert(NewBuilder->DeferredVTables.empty() && |
8005 | "Newly created module should not have deferred vtables" ); |
8006 | NewBuilder->DeferredVTables = std::move(DeferredVTables); |
8007 | |
8008 | assert(NewBuilder->MangledDeclNames.empty() && |
8009 | "Newly created module should not have mangled decl names" ); |
8010 | assert(NewBuilder->Manglings.empty() && |
8011 | "Newly created module should not have manglings" ); |
8012 | NewBuilder->Manglings = std::move(Manglings); |
8013 | |
8014 | NewBuilder->WeakRefReferences = std::move(WeakRefReferences); |
8015 | |
8016 | NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx); |
8017 | } |
8018 | |