1//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the actions class which performs semantic analysis and
10// builds an AST out of a parse stream.
11//
12//===----------------------------------------------------------------------===//
13
14#include "UsedDeclVisitor.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTDiagnostic.h"
17#include "clang/AST/Decl.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/DeclFriend.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/PrettyDeclStackTrace.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/AST/TypeOrdering.h"
26#include "clang/Basic/DarwinSDKInfo.h"
27#include "clang/Basic/DiagnosticOptions.h"
28#include "clang/Basic/PartialDiagnostic.h"
29#include "clang/Basic/SourceManager.h"
30#include "clang/Basic/TargetInfo.h"
31#include "clang/Lex/HeaderSearch.h"
32#include "clang/Lex/HeaderSearchOptions.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/CXXFieldCollector.h"
35#include "clang/Sema/EnterExpressionEvaluationContext.h"
36#include "clang/Sema/ExternalSemaSource.h"
37#include "clang/Sema/Initialization.h"
38#include "clang/Sema/MultiplexExternalSemaSource.h"
39#include "clang/Sema/ObjCMethodList.h"
40#include "clang/Sema/RISCVIntrinsicManager.h"
41#include "clang/Sema/Scope.h"
42#include "clang/Sema/ScopeInfo.h"
43#include "clang/Sema/SemaAMDGPU.h"
44#include "clang/Sema/SemaARM.h"
45#include "clang/Sema/SemaAVR.h"
46#include "clang/Sema/SemaBPF.h"
47#include "clang/Sema/SemaCUDA.h"
48#include "clang/Sema/SemaCodeCompletion.h"
49#include "clang/Sema/SemaConsumer.h"
50#include "clang/Sema/SemaDirectX.h"
51#include "clang/Sema/SemaHLSL.h"
52#include "clang/Sema/SemaHexagon.h"
53#include "clang/Sema/SemaLoongArch.h"
54#include "clang/Sema/SemaM68k.h"
55#include "clang/Sema/SemaMIPS.h"
56#include "clang/Sema/SemaMSP430.h"
57#include "clang/Sema/SemaNVPTX.h"
58#include "clang/Sema/SemaObjC.h"
59#include "clang/Sema/SemaOpenACC.h"
60#include "clang/Sema/SemaOpenCL.h"
61#include "clang/Sema/SemaOpenMP.h"
62#include "clang/Sema/SemaPPC.h"
63#include "clang/Sema/SemaPseudoObject.h"
64#include "clang/Sema/SemaRISCV.h"
65#include "clang/Sema/SemaSPIRV.h"
66#include "clang/Sema/SemaSYCL.h"
67#include "clang/Sema/SemaSwift.h"
68#include "clang/Sema/SemaSystemZ.h"
69#include "clang/Sema/SemaWasm.h"
70#include "clang/Sema/SemaX86.h"
71#include "clang/Sema/TemplateDeduction.h"
72#include "clang/Sema/TemplateInstCallback.h"
73#include "clang/Sema/TypoCorrection.h"
74#include "llvm/ADT/DenseMap.h"
75#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/SmallPtrSet.h"
77#include "llvm/Support/TimeProfiler.h"
78#include <optional>
79
80using namespace clang;
81using namespace sema;
82
83SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
84 return Lexer::getLocForEndOfToken(Loc, Offset, SM: SourceMgr, LangOpts);
85}
86
87SourceRange
88Sema::getRangeForNextToken(SourceLocation Loc, bool IncludeMacros,
89 bool IncludeComments,
90 std::optional<tok::TokenKind> ExpectedToken) {
91 if (!Loc.isValid())
92 return SourceRange();
93 std::optional<Token> NextToken =
94 Lexer::findNextToken(Loc, SM: SourceMgr, LangOpts, IncludeComments);
95 if (!NextToken)
96 return SourceRange();
97 if (ExpectedToken && NextToken->getKind() != *ExpectedToken)
98 return SourceRange();
99 SourceLocation TokenStart = NextToken->getLocation();
100 SourceLocation TokenEnd = NextToken->getLastLoc();
101 if (!TokenStart.isValid() || !TokenEnd.isValid())
102 return SourceRange();
103 if (!IncludeMacros && (TokenStart.isMacroID() || TokenEnd.isMacroID()))
104 return SourceRange();
105
106 return SourceRange(TokenStart, TokenEnd);
107}
108
109ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
110
111DarwinSDKInfo *
112Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
113 StringRef Platform) {
114 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
115 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
116 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
117 << Platform;
118 WarnedDarwinSDKInfoMissing = true;
119 }
120 return SDKInfo;
121}
122
123DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
124 if (CachedDarwinSDKInfo)
125 return CachedDarwinSDKInfo->get();
126 auto SDKInfo = parseDarwinSDKInfo(
127 VFS&: PP.getFileManager().getVirtualFileSystem(),
128 SDKRootPath: PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
129 if (SDKInfo && *SDKInfo) {
130 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(args: std::move(**SDKInfo));
131 return CachedDarwinSDKInfo->get();
132 }
133 if (!SDKInfo)
134 llvm::consumeError(Err: SDKInfo.takeError());
135 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
136 return nullptr;
137}
138
139IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
140 const IdentifierInfo *ParamName, unsigned int Index) {
141 std::string InventedName;
142 llvm::raw_string_ostream OS(InventedName);
143
144 if (!ParamName)
145 OS << "auto:" << Index + 1;
146 else
147 OS << ParamName->getName() << ":auto";
148
149 return &Context.Idents.get(Name: OS.str());
150}
151
152PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
153 const Preprocessor &PP) {
154 PrintingPolicy Policy = Context.getPrintingPolicy();
155 // In diagnostics, we print _Bool as bool if the latter is defined as the
156 // former.
157 Policy.Bool = Context.getLangOpts().Bool;
158 if (!Policy.Bool) {
159 if (const MacroInfo *BoolMacro = PP.getMacroInfo(II: Context.getBoolName())) {
160 Policy.Bool = BoolMacro->isObjectLike() &&
161 BoolMacro->getNumTokens() == 1 &&
162 BoolMacro->getReplacementToken(Tok: 0).is(K: tok::kw__Bool);
163 }
164 }
165
166 // Shorten the data output if needed
167 Policy.EntireContentsOfLargeArray = false;
168
169 return Policy;
170}
171
172void Sema::ActOnTranslationUnitScope(Scope *S) {
173 TUScope = S;
174 PushDeclContext(S, Context.getTranslationUnitDecl());
175}
176
177namespace clang {
178namespace sema {
179
180class SemaPPCallbacks : public PPCallbacks {
181 Sema *S = nullptr;
182 llvm::SmallVector<SourceLocation, 8> IncludeStack;
183 llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
184
185public:
186 void set(Sema &S) { this->S = &S; }
187
188 void reset() { S = nullptr; }
189
190 void FileChanged(SourceLocation Loc, FileChangeReason Reason,
191 SrcMgr::CharacteristicKind FileType,
192 FileID PrevFID) override {
193 if (!S)
194 return;
195 switch (Reason) {
196 case EnterFile: {
197 SourceManager &SM = S->getSourceManager();
198 SourceLocation IncludeLoc = SM.getIncludeLoc(FID: SM.getFileID(SpellingLoc: Loc));
199 if (IncludeLoc.isValid()) {
200 if (llvm::timeTraceProfilerEnabled()) {
201 OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID: SM.getFileID(SpellingLoc: Loc));
202 ProfilerStack.push_back(Elt: llvm::timeTraceAsyncProfilerBegin(
203 Name: "Source", Detail: FE ? FE->getName() : StringRef("<unknown>")));
204 }
205
206 IncludeStack.push_back(Elt: IncludeLoc);
207 S->DiagnoseNonDefaultPragmaAlignPack(
208 Kind: Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
209 IncludeLoc);
210 }
211 break;
212 }
213 case ExitFile:
214 if (!IncludeStack.empty()) {
215 if (llvm::timeTraceProfilerEnabled())
216 llvm::timeTraceProfilerEnd(E: ProfilerStack.pop_back_val());
217
218 S->DiagnoseNonDefaultPragmaAlignPack(
219 Kind: Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
220 IncludeLoc: IncludeStack.pop_back_val());
221 }
222 break;
223 default:
224 break;
225 }
226 }
227 void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
228 diag::Severity Mapping, StringRef Str) override {
229 // If one of the analysis-based diagnostics was enabled while processing
230 // a function, we want to note it in the analysis-based warnings so they
231 // can be run at the end of the function body even if the analysis warnings
232 // are disabled at that point.
233 SmallVector<diag::kind, 256> GroupDiags;
234 diag::Flavor Flavor =
235 Str[1] == 'W' ? diag::Flavor::WarningOrError : diag::Flavor::Remark;
236 StringRef Group = Str.substr(Start: 2);
237
238 if (S->PP.getDiagnostics().getDiagnosticIDs()->getDiagnosticsInGroup(
239 Flavor, Group, Diags&: GroupDiags))
240 return;
241
242 for (diag::kind K : GroupDiags) {
243 // Note: the cases in this switch should be kept in sync with the
244 // diagnostics in AnalysisBasedWarnings::getPolicyInEffectAt().
245 AnalysisBasedWarnings::Policy &Override =
246 S->AnalysisWarnings.getPolicyOverrides();
247 switch (K) {
248 default: break;
249 case diag::warn_unreachable:
250 case diag::warn_unreachable_break:
251 case diag::warn_unreachable_return:
252 case diag::warn_unreachable_loop_increment:
253 Override.enableCheckUnreachable = true;
254 break;
255 case diag::warn_double_lock:
256 Override.enableThreadSafetyAnalysis = true;
257 break;
258 case diag::warn_use_in_invalid_state:
259 Override.enableConsumedAnalysis = true;
260 break;
261 }
262 }
263 }
264};
265
266} // end namespace sema
267} // end namespace clang
268
269const unsigned Sema::MaxAlignmentExponent;
270const uint64_t Sema::MaximumAlignment;
271
272Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
273 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
274 : SemaBase(*this), CollectStats(false), TUKind(TUKind),
275 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
276 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
277 SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
278 AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
279 LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
280 OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
281 StackHandler(Diags), CurScope(nullptr), Ident_super(nullptr),
282 AMDGPUPtr(std::make_unique<SemaAMDGPU>(args&: *this)),
283 ARMPtr(std::make_unique<SemaARM>(args&: *this)),
284 AVRPtr(std::make_unique<SemaAVR>(args&: *this)),
285 BPFPtr(std::make_unique<SemaBPF>(args&: *this)),
286 CodeCompletionPtr(
287 std::make_unique<SemaCodeCompletion>(args&: *this, args&: CodeCompleter)),
288 CUDAPtr(std::make_unique<SemaCUDA>(args&: *this)),
289 DirectXPtr(std::make_unique<SemaDirectX>(args&: *this)),
290 HLSLPtr(std::make_unique<SemaHLSL>(args&: *this)),
291 HexagonPtr(std::make_unique<SemaHexagon>(args&: *this)),
292 LoongArchPtr(std::make_unique<SemaLoongArch>(args&: *this)),
293 M68kPtr(std::make_unique<SemaM68k>(args&: *this)),
294 MIPSPtr(std::make_unique<SemaMIPS>(args&: *this)),
295 MSP430Ptr(std::make_unique<SemaMSP430>(args&: *this)),
296 NVPTXPtr(std::make_unique<SemaNVPTX>(args&: *this)),
297 ObjCPtr(std::make_unique<SemaObjC>(args&: *this)),
298 OpenACCPtr(std::make_unique<SemaOpenACC>(args&: *this)),
299 OpenCLPtr(std::make_unique<SemaOpenCL>(args&: *this)),
300 OpenMPPtr(std::make_unique<SemaOpenMP>(args&: *this)),
301 PPCPtr(std::make_unique<SemaPPC>(args&: *this)),
302 PseudoObjectPtr(std::make_unique<SemaPseudoObject>(args&: *this)),
303 RISCVPtr(std::make_unique<SemaRISCV>(args&: *this)),
304 SPIRVPtr(std::make_unique<SemaSPIRV>(args&: *this)),
305 SYCLPtr(std::make_unique<SemaSYCL>(args&: *this)),
306 SwiftPtr(std::make_unique<SemaSwift>(args&: *this)),
307 SystemZPtr(std::make_unique<SemaSystemZ>(args&: *this)),
308 WasmPtr(std::make_unique<SemaWasm>(args&: *this)),
309 X86Ptr(std::make_unique<SemaX86>(args&: *this)),
310 MSPointerToMemberRepresentationMethod(
311 LangOpts.getMSPointerToMemberRepresentationMethod()),
312 MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
313 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
314 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
315 CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
316 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
317 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
318 StdCoroutineTraitsCache(nullptr), IdResolver(pp),
319 OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
320 StdTypeIdentity(nullptr),
321 FullyCheckedComparisonCategories(
322 static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
323 StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
324 GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
325 TyposCorrected(0), IsBuildingRecoveryCallExpr(false), NumSFINAEErrors(0),
326 AccessCheckingSFINAE(false), CurrentInstantiationScope(nullptr),
327 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
328 ArgPackSubstIndex(std::nullopt), SatisfactionCache(Context) {
329 assert(pp.TUKind == TUKind);
330 TUScope = nullptr;
331
332 LoadedExternalKnownNamespaces = false;
333 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
334 ObjC().NSNumberLiteralMethods[I] = nullptr;
335
336 if (getLangOpts().ObjC)
337 ObjC().NSAPIObj.reset(p: new NSAPI(Context));
338
339 if (getLangOpts().CPlusPlus)
340 FieldCollector.reset(p: new CXXFieldCollector());
341
342 // Tell diagnostics how to render things from the AST library.
343 Diags.SetArgToStringFn(Fn: &FormatASTNodeDiagnosticArgument, Cookie: &Context);
344
345 // This evaluation context exists to ensure that there's always at least one
346 // valid evaluation context available. It is never removed from the
347 // evaluation stack.
348 ExprEvalContexts.emplace_back(
349 Args: ExpressionEvaluationContext::PotentiallyEvaluated, Args: 0, Args: CleanupInfo{},
350 Args: nullptr, Args: ExpressionEvaluationContextRecord::EK_Other);
351
352 // Initialization of data sharing attributes stack for OpenMP
353 OpenMP().InitDataSharingAttributesStack();
354
355 std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
356 std::make_unique<sema::SemaPPCallbacks>();
357 SemaPPCallbackHandler = Callbacks.get();
358 PP.addPPCallbacks(C: std::move(Callbacks));
359 SemaPPCallbackHandler->set(*this);
360
361 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
362}
363
364// Anchor Sema's type info to this TU.
365void Sema::anchor() {}
366
367void Sema::addImplicitTypedef(StringRef Name, QualType T) {
368 DeclarationName DN = &Context.Idents.get(Name);
369 if (IdResolver.begin(Name: DN) == IdResolver.end())
370 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
371}
372
373void Sema::Initialize() {
374 // Create BuiltinVaListDecl *before* ExternalSemaSource::InitializeSema(this)
375 // because during initialization ASTReader can emit globals that require
376 // name mangling. And the name mangling uses BuiltinVaListDecl.
377 if (Context.getTargetInfo().hasBuiltinMSVaList())
378 (void)Context.getBuiltinMSVaListDecl();
379 (void)Context.getBuiltinVaListDecl();
380
381 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
382 SC->InitializeSema(S&: *this);
383
384 // Tell the external Sema source about this Sema object.
385 if (ExternalSemaSource *ExternalSema
386 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
387 ExternalSema->InitializeSema(S&: *this);
388
389 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
390 // will not be able to merge any duplicate __va_list_tag decls correctly.
391 VAListTagName = PP.getIdentifierInfo(Name: "__va_list_tag");
392
393 if (!TUScope)
394 return;
395
396 // Initialize predefined 128-bit integer types, if needed.
397 if (Context.getTargetInfo().hasInt128Type() ||
398 (Context.getAuxTargetInfo() &&
399 Context.getAuxTargetInfo()->hasInt128Type())) {
400 // If either of the 128-bit integer types are unavailable to name lookup,
401 // define them now.
402 DeclarationName Int128 = &Context.Idents.get(Name: "__int128_t");
403 if (IdResolver.begin(Name: Int128) == IdResolver.end())
404 PushOnScopeChains(Context.getInt128Decl(), TUScope);
405
406 DeclarationName UInt128 = &Context.Idents.get(Name: "__uint128_t");
407 if (IdResolver.begin(Name: UInt128) == IdResolver.end())
408 PushOnScopeChains(Context.getUInt128Decl(), TUScope);
409 }
410
411
412 // Initialize predefined Objective-C types:
413 if (getLangOpts().ObjC) {
414 // If 'SEL' does not yet refer to any declarations, make it refer to the
415 // predefined 'SEL'.
416 DeclarationName SEL = &Context.Idents.get(Name: "SEL");
417 if (IdResolver.begin(Name: SEL) == IdResolver.end())
418 PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
419
420 // If 'id' does not yet refer to any declarations, make it refer to the
421 // predefined 'id'.
422 DeclarationName Id = &Context.Idents.get(Name: "id");
423 if (IdResolver.begin(Name: Id) == IdResolver.end())
424 PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
425
426 // Create the built-in typedef for 'Class'.
427 DeclarationName Class = &Context.Idents.get(Name: "Class");
428 if (IdResolver.begin(Name: Class) == IdResolver.end())
429 PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
430
431 // Create the built-in forward declaratino for 'Protocol'.
432 DeclarationName Protocol = &Context.Idents.get(Name: "Protocol");
433 if (IdResolver.begin(Name: Protocol) == IdResolver.end())
434 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
435 }
436
437 // Create the internal type for the *StringMakeConstantString builtins.
438 DeclarationName ConstantString = &Context.Idents.get(Name: "__NSConstantString");
439 if (IdResolver.begin(Name: ConstantString) == IdResolver.end())
440 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
441
442 // Initialize Microsoft "predefined C++ types".
443 if (getLangOpts().MSVCCompat) {
444 if (getLangOpts().CPlusPlus &&
445 IdResolver.begin(Name: &Context.Idents.get(Name: "type_info")) == IdResolver.end())
446 PushOnScopeChains(
447 Context.buildImplicitRecord(Name: "type_info", TK: TagTypeKind::Class),
448 TUScope);
449
450 addImplicitTypedef(Name: "size_t", T: Context.getSizeType());
451 }
452
453 // Initialize predefined OpenCL types and supported extensions and (optional)
454 // core features.
455 if (getLangOpts().OpenCL) {
456 getOpenCLOptions().addSupport(
457 FeaturesMap: Context.getTargetInfo().getSupportedOpenCLOpts(), Opts: getLangOpts());
458 addImplicitTypedef(Name: "sampler_t", T: Context.OCLSamplerTy);
459 addImplicitTypedef(Name: "event_t", T: Context.OCLEventTy);
460 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
461 if (OCLCompatibleVersion >= 200) {
462 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
463 addImplicitTypedef(Name: "clk_event_t", T: Context.OCLClkEventTy);
464 addImplicitTypedef(Name: "queue_t", T: Context.OCLQueueTy);
465 }
466 if (getLangOpts().OpenCLPipes)
467 addImplicitTypedef(Name: "reserve_id_t", T: Context.OCLReserveIDTy);
468 addImplicitTypedef(Name: "atomic_int", T: Context.getAtomicType(T: Context.IntTy));
469 addImplicitTypedef(Name: "atomic_uint",
470 T: Context.getAtomicType(T: Context.UnsignedIntTy));
471 addImplicitTypedef(Name: "atomic_float",
472 T: Context.getAtomicType(T: Context.FloatTy));
473 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
474 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
475 addImplicitTypedef(Name: "atomic_flag", T: Context.getAtomicType(T: Context.IntTy));
476
477
478 // OpenCL v2.0 s6.13.11.6:
479 // - The atomic_long and atomic_ulong types are supported if the
480 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
481 // extensions are supported.
482 // - The atomic_double type is only supported if double precision
483 // is supported and the cl_khr_int64_base_atomics and
484 // cl_khr_int64_extended_atomics extensions are supported.
485 // - If the device address space is 64-bits, the data types
486 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
487 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
488 // cl_khr_int64_extended_atomics extensions are supported.
489
490 auto AddPointerSizeDependentTypes = [&]() {
491 auto AtomicSizeT = Context.getAtomicType(T: Context.getSizeType());
492 auto AtomicIntPtrT = Context.getAtomicType(T: Context.getIntPtrType());
493 auto AtomicUIntPtrT = Context.getAtomicType(T: Context.getUIntPtrType());
494 auto AtomicPtrDiffT =
495 Context.getAtomicType(T: Context.getPointerDiffType());
496 addImplicitTypedef(Name: "atomic_size_t", T: AtomicSizeT);
497 addImplicitTypedef(Name: "atomic_intptr_t", T: AtomicIntPtrT);
498 addImplicitTypedef(Name: "atomic_uintptr_t", T: AtomicUIntPtrT);
499 addImplicitTypedef(Name: "atomic_ptrdiff_t", T: AtomicPtrDiffT);
500 };
501
502 if (Context.getTypeSize(T: Context.getSizeType()) == 32) {
503 AddPointerSizeDependentTypes();
504 }
505
506 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp16", LO: getLangOpts())) {
507 auto AtomicHalfT = Context.getAtomicType(T: Context.HalfTy);
508 addImplicitTypedef(Name: "atomic_half", T: AtomicHalfT);
509 }
510
511 std::vector<QualType> Atomic64BitTypes;
512 if (getOpenCLOptions().isSupported(Ext: "cl_khr_int64_base_atomics",
513 LO: getLangOpts()) &&
514 getOpenCLOptions().isSupported(Ext: "cl_khr_int64_extended_atomics",
515 LO: getLangOpts())) {
516 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp64", LO: getLangOpts())) {
517 auto AtomicDoubleT = Context.getAtomicType(T: Context.DoubleTy);
518 addImplicitTypedef(Name: "atomic_double", T: AtomicDoubleT);
519 Atomic64BitTypes.push_back(AtomicDoubleT);
520 }
521 auto AtomicLongT = Context.getAtomicType(T: Context.LongTy);
522 auto AtomicULongT = Context.getAtomicType(T: Context.UnsignedLongTy);
523 addImplicitTypedef(Name: "atomic_long", T: AtomicLongT);
524 addImplicitTypedef(Name: "atomic_ulong", T: AtomicULongT);
525
526
527 if (Context.getTypeSize(T: Context.getSizeType()) == 64) {
528 AddPointerSizeDependentTypes();
529 }
530 }
531 }
532
533#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
534 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
535 addImplicitTypedef(#ExtType, Context.Id##Ty); \
536 }
537#include "clang/Basic/OpenCLExtensionTypes.def"
538 }
539
540 if (Context.getTargetInfo().hasAArch64ACLETypes() ||
541 (Context.getAuxTargetInfo() &&
542 Context.getAuxTargetInfo()->hasAArch64ACLETypes())) {
543#define SVE_TYPE(Name, Id, SingletonId) \
544 addImplicitTypedef(#Name, Context.SingletonId);
545#define NEON_VECTOR_TYPE(Name, BaseType, ElBits, NumEls, VectorKind) \
546 addImplicitTypedef( \
547 #Name, Context.getVectorType(Context.BaseType, NumEls, VectorKind));
548#include "clang/Basic/AArch64ACLETypes.def"
549 }
550
551 if (Context.getTargetInfo().getTriple().isPPC64()) {
552#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
553 addImplicitTypedef(#Name, Context.Id##Ty);
554#include "clang/Basic/PPCTypes.def"
555#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
556 addImplicitTypedef(#Name, Context.Id##Ty);
557#include "clang/Basic/PPCTypes.def"
558 }
559
560 if (Context.getTargetInfo().hasRISCVVTypes()) {
561#define RVV_TYPE(Name, Id, SingletonId) \
562 addImplicitTypedef(Name, Context.SingletonId);
563#include "clang/Basic/RISCVVTypes.def"
564 }
565
566 if (Context.getTargetInfo().getTriple().isWasm() &&
567 Context.getTargetInfo().hasFeature(Feature: "reference-types")) {
568#define WASM_TYPE(Name, Id, SingletonId) \
569 addImplicitTypedef(Name, Context.SingletonId);
570#include "clang/Basic/WebAssemblyReferenceTypes.def"
571 }
572
573 if (Context.getTargetInfo().getTriple().isAMDGPU() ||
574 (Context.getAuxTargetInfo() &&
575 Context.getAuxTargetInfo()->getTriple().isAMDGPU())) {
576#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
577 addImplicitTypedef(Name, Context.SingletonId);
578#include "clang/Basic/AMDGPUTypes.def"
579 }
580
581 if (Context.getTargetInfo().hasBuiltinMSVaList()) {
582 DeclarationName MSVaList = &Context.Idents.get(Name: "__builtin_ms_va_list");
583 if (IdResolver.begin(Name: MSVaList) == IdResolver.end())
584 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
585 }
586
587 DeclarationName BuiltinVaList = &Context.Idents.get(Name: "__builtin_va_list");
588 if (IdResolver.begin(Name: BuiltinVaList) == IdResolver.end())
589 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
590}
591
592Sema::~Sema() {
593 assert(InstantiatingSpecializations.empty() &&
594 "failed to clean up an InstantiatingTemplate?");
595
596 if (VisContext) FreeVisContext();
597
598 // Kill all the active scopes.
599 for (sema::FunctionScopeInfo *FSI : FunctionScopes)
600 delete FSI;
601
602 // Tell the SemaConsumer to forget about us; we're going out of scope.
603 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
604 SC->ForgetSema();
605
606 // Detach from the external Sema source.
607 if (ExternalSemaSource *ExternalSema
608 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
609 ExternalSema->ForgetSema();
610
611 // Delete cached satisfactions.
612 std::vector<ConstraintSatisfaction *> Satisfactions;
613 Satisfactions.reserve(n: SatisfactionCache.size());
614 for (auto &Node : SatisfactionCache)
615 Satisfactions.push_back(x: &Node);
616 for (auto *Node : Satisfactions)
617 delete Node;
618
619 threadSafety::threadSafetyCleanup(Cache: ThreadSafetyDeclCache);
620
621 // Destroys data sharing attributes stack for OpenMP
622 OpenMP().DestroyDataSharingAttributesStack();
623
624 // Detach from the PP callback handler which outlives Sema since it's owned
625 // by the preprocessor.
626 SemaPPCallbackHandler->reset();
627}
628
629void Sema::runWithSufficientStackSpace(SourceLocation Loc,
630 llvm::function_ref<void()> Fn) {
631 StackHandler.runWithSufficientStackSpace(Loc, Fn);
632}
633
634bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
635 UnavailableAttr::ImplicitReason reason) {
636 // If we're not in a function, it's an error.
637 FunctionDecl *fn = dyn_cast<FunctionDecl>(Val: CurContext);
638 if (!fn) return false;
639
640 // If we're in template instantiation, it's an error.
641 if (inTemplateInstantiation())
642 return false;
643
644 // If that function's not in a system header, it's an error.
645 if (!Context.getSourceManager().isInSystemHeader(Loc: loc))
646 return false;
647
648 // If the function is already unavailable, it's not an error.
649 if (fn->hasAttr<UnavailableAttr>()) return true;
650
651 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
652 return true;
653}
654
655ASTMutationListener *Sema::getASTMutationListener() const {
656 return getASTConsumer().GetASTMutationListener();
657}
658
659void Sema::addExternalSource(ExternalSemaSource *E) {
660 assert(E && "Cannot use with NULL ptr");
661
662 if (!ExternalSource) {
663 ExternalSource = E;
664 return;
665 }
666
667 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(Val&: ExternalSource))
668 Ex->AddSource(Source: E);
669 else
670 ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
671}
672
673void Sema::PrintStats() const {
674 llvm::errs() << "\n*** Semantic Analysis Stats:\n";
675 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
676
677 BumpAlloc.PrintStats();
678 AnalysisWarnings.PrintStats();
679}
680
681void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
682 QualType SrcType,
683 SourceLocation Loc) {
684 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
685 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
686 *ExprNullability != NullabilityKind::NullableResult))
687 return;
688
689 std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
690 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
691 return;
692
693 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
694}
695
696// Generate diagnostics when adding or removing effects in a type conversion.
697void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
698 SourceLocation Loc) {
699 const auto SrcFX = FunctionEffectsRef::get(QT: SrcType);
700 const auto DstFX = FunctionEffectsRef::get(QT: DstType);
701 if (SrcFX != DstFX) {
702 for (const auto &Diff : FunctionEffectDiffVector(SrcFX, DstFX)) {
703 if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX))
704 Diag(Loc, diag::warn_invalid_add_func_effects) << Diff.effectName();
705 }
706 }
707}
708
709void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
710 // nullptr only exists from C++11 on, so don't warn on its absence earlier.
711 if (!getLangOpts().CPlusPlus11)
712 return;
713
714 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
715 return;
716
717 const Expr *EStripped = E->IgnoreParenImpCasts();
718 if (EStripped->getType()->isNullPtrType())
719 return;
720 if (isa<GNUNullExpr>(Val: EStripped))
721 return;
722
723 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
724 E->getBeginLoc()))
725 return;
726
727 // Don't diagnose the conversion from a 0 literal to a null pointer argument
728 // in a synthesized call to operator<=>.
729 if (!CodeSynthesisContexts.empty() &&
730 CodeSynthesisContexts.back().Kind ==
731 CodeSynthesisContext::RewritingOperatorAsSpaceship)
732 return;
733
734 // Ignore null pointers in defaulted comparison operators.
735 FunctionDecl *FD = getCurFunctionDecl();
736 if (FD && FD->isDefaulted()) {
737 return;
738 }
739
740 // If it is a macro from system header, and if the macro name is not "NULL",
741 // do not warn.
742 // Note that uses of "NULL" will be ignored above on systems that define it
743 // as __null.
744 SourceLocation MaybeMacroLoc = E->getBeginLoc();
745 if (Diags.getSuppressSystemWarnings() &&
746 SourceMgr.isInSystemMacro(loc: MaybeMacroLoc) &&
747 !findMacroSpelling(loc&: MaybeMacroLoc, name: "NULL"))
748 return;
749
750 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
751 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
752}
753
754/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
755/// If there is already an implicit cast, merge into the existing one.
756/// The result is of the given category.
757ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
758 CastKind Kind, ExprValueKind VK,
759 const CXXCastPath *BasePath,
760 CheckedConversionKind CCK) {
761#ifndef NDEBUG
762 if (VK == VK_PRValue && !E->isPRValue()) {
763 switch (Kind) {
764 default:
765 llvm_unreachable(
766 ("can't implicitly cast glvalue to prvalue with this cast "
767 "kind: " +
768 std::string(CastExpr::getCastKindName(Kind)))
769 .c_str());
770 case CK_Dependent:
771 case CK_LValueToRValue:
772 case CK_ArrayToPointerDecay:
773 case CK_FunctionToPointerDecay:
774 case CK_ToVoid:
775 case CK_NonAtomicToAtomic:
776 case CK_HLSLArrayRValue:
777 case CK_HLSLAggregateSplatCast:
778 break;
779 }
780 }
781 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
782 "can't cast prvalue to glvalue");
783#endif
784
785 diagnoseNullableToNonnullConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
786 diagnoseZeroToNullptrConversion(Kind, E);
787 if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
788 Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
789 diagnoseFunctionEffectConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
790
791 QualType ExprTy = Context.getCanonicalType(T: E->getType());
792 QualType TypeTy = Context.getCanonicalType(T: Ty);
793
794 // This cast is used in place of a regular LValue to RValue cast for
795 // HLSL Array Parameter Types. It needs to be emitted even if
796 // ExprTy == TypeTy, except if E is an HLSLOutArgExpr
797 // Emitting a cast in that case will prevent HLSLOutArgExpr from
798 // being handled properly in EmitCallArg
799 if (Kind == CK_HLSLArrayRValue && !isa<HLSLOutArgExpr>(Val: E))
800 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
801 FPO: CurFPFeatureOverrides());
802
803 if (ExprTy == TypeTy)
804 return E;
805
806 if (Kind == CK_ArrayToPointerDecay) {
807 // C++1z [conv.array]: The temporary materialization conversion is applied.
808 // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
809 if (getLangOpts().CPlusPlus && E->isPRValue()) {
810 // The temporary is an lvalue in C++98 and an xvalue otherwise.
811 ExprResult Materialized = CreateMaterializeTemporaryExpr(
812 T: E->getType(), Temporary: E, BoundToLvalueReference: !getLangOpts().CPlusPlus11);
813 if (Materialized.isInvalid())
814 return ExprError();
815 E = Materialized.get();
816 }
817 // C17 6.7.1p6 footnote 124: The implementation can treat any register
818 // declaration simply as an auto declaration. However, whether or not
819 // addressable storage is actually used, the address of any part of an
820 // object declared with storage-class specifier register cannot be
821 // computed, either explicitly(by use of the unary & operator as discussed
822 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
823 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
824 // array declared with storage-class specifier register is sizeof.
825 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
826 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
827 if (const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl())) {
828 if (VD->getStorageClass() == SC_Register) {
829 Diag(E->getExprLoc(), diag::err_typecheck_address_of)
830 << /*register variable*/ 3 << E->getSourceRange();
831 return ExprError();
832 }
833 }
834 }
835 }
836 }
837
838 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(Val: E)) {
839 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
840 ImpCast->setType(Ty);
841 ImpCast->setValueKind(VK);
842 return E;
843 }
844 }
845
846 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
847 FPO: CurFPFeatureOverrides());
848}
849
850CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
851 switch (ScalarTy->getScalarTypeKind()) {
852 case Type::STK_Bool: return CK_NoOp;
853 case Type::STK_CPointer: return CK_PointerToBoolean;
854 case Type::STK_BlockPointer: return CK_PointerToBoolean;
855 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
856 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
857 case Type::STK_Integral: return CK_IntegralToBoolean;
858 case Type::STK_Floating: return CK_FloatingToBoolean;
859 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
860 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
861 case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
862 }
863 llvm_unreachable("unknown scalar type kind");
864}
865
866/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
867static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
868 if (D->getMostRecentDecl()->isUsed())
869 return true;
870
871 if (D->isExternallyVisible())
872 return true;
873
874 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: D)) {
875 // If this is a function template and none of its specializations is used,
876 // we should warn.
877 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
878 for (const auto *Spec : Template->specializations())
879 if (ShouldRemoveFromUnused(SemaRef, Spec))
880 return true;
881
882 // UnusedFileScopedDecls stores the first declaration.
883 // The declaration may have become definition so check again.
884 const FunctionDecl *DeclToCheck;
885 if (FD->hasBody(Definition&: DeclToCheck))
886 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
887
888 // Later redecls may add new information resulting in not having to warn,
889 // so check again.
890 DeclToCheck = FD->getMostRecentDecl();
891 if (DeclToCheck != FD)
892 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
893 }
894
895 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) {
896 // If a variable usable in constant expressions is referenced,
897 // don't warn if it isn't used: if the value of a variable is required
898 // for the computation of a constant expression, it doesn't make sense to
899 // warn even if the variable isn't odr-used. (isReferenced doesn't
900 // precisely reflect that, but it's a decent approximation.)
901 if (VD->isReferenced() &&
902 VD->mightBeUsableInConstantExpressions(C: SemaRef->Context))
903 return true;
904
905 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
906 // If this is a variable template and none of its specializations is used,
907 // we should warn.
908 for (const auto *Spec : Template->specializations())
909 if (ShouldRemoveFromUnused(SemaRef, Spec))
910 return true;
911
912 // UnusedFileScopedDecls stores the first declaration.
913 // The declaration may have become definition so check again.
914 const VarDecl *DeclToCheck = VD->getDefinition();
915 if (DeclToCheck)
916 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
917
918 // Later redecls may add new information resulting in not having to warn,
919 // so check again.
920 DeclToCheck = VD->getMostRecentDecl();
921 if (DeclToCheck != VD)
922 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
923 }
924
925 return false;
926}
927
928static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
929 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
930 return FD->isExternC();
931 return cast<VarDecl>(Val: ND)->isExternC();
932}
933
934/// Determine whether ND is an external-linkage function or variable whose
935/// type has no linkage.
936bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
937 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
938 // because we also want to catch the case where its type has VisibleNoLinkage,
939 // which does not affect the linkage of VD.
940 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
941 !isExternalFormalLinkage(L: VD->getType()->getLinkage()) &&
942 !isFunctionOrVarDeclExternC(VD);
943}
944
945/// Obtains a sorted list of functions and variables that are undefined but
946/// ODR-used.
947void Sema::getUndefinedButUsed(
948 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
949 for (const auto &UndefinedUse : UndefinedButUsed) {
950 NamedDecl *ND = UndefinedUse.first;
951
952 // Ignore attributes that have become invalid.
953 if (ND->isInvalidDecl()) continue;
954
955 // __attribute__((weakref)) is basically a definition.
956 if (ND->hasAttr<WeakRefAttr>()) continue;
957
958 if (isa<CXXDeductionGuideDecl>(Val: ND))
959 continue;
960
961 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
962 // An exported function will always be emitted when defined, so even if
963 // the function is inline, it doesn't have to be emitted in this TU. An
964 // imported function implies that it has been exported somewhere else.
965 continue;
966 }
967
968 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) {
969 if (FD->isDefined())
970 continue;
971 if (FD->isExternallyVisible() &&
972 !isExternalWithNoLinkageType(FD) &&
973 !FD->getMostRecentDecl()->isInlined() &&
974 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
975 continue;
976 if (FD->getBuiltinID())
977 continue;
978 } else {
979 const auto *VD = cast<VarDecl>(Val: ND);
980 if (VD->hasDefinition() != VarDecl::DeclarationOnly)
981 continue;
982 if (VD->isExternallyVisible() &&
983 !isExternalWithNoLinkageType(VD) &&
984 !VD->getMostRecentDecl()->isInline() &&
985 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
986 continue;
987
988 // Skip VarDecls that lack formal definitions but which we know are in
989 // fact defined somewhere.
990 if (VD->isKnownToBeDefined())
991 continue;
992 }
993
994 Undefined.push_back(Elt: std::make_pair(x&: ND, y: UndefinedUse.second));
995 }
996}
997
998/// checkUndefinedButUsed - Check for undefined objects with internal linkage
999/// or that are inline.
1000static void checkUndefinedButUsed(Sema &S) {
1001 if (S.UndefinedButUsed.empty()) return;
1002
1003 // Collect all the still-undefined entities with internal linkage.
1004 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
1005 S.getUndefinedButUsed(Undefined);
1006 S.UndefinedButUsed.clear();
1007 if (Undefined.empty()) return;
1008
1009 for (const auto &Undef : Undefined) {
1010 ValueDecl *VD = cast<ValueDecl>(Val: Undef.first);
1011 SourceLocation UseLoc = Undef.second;
1012
1013 if (S.isExternalWithNoLinkageType(VD)) {
1014 // C++ [basic.link]p8:
1015 // A type without linkage shall not be used as the type of a variable
1016 // or function with external linkage unless
1017 // -- the entity has C language linkage
1018 // -- the entity is not odr-used or is defined in the same TU
1019 //
1020 // As an extension, accept this in cases where the type is externally
1021 // visible, since the function or variable actually can be defined in
1022 // another translation unit in that case.
1023 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
1024 ? diag::ext_undefined_internal_type
1025 : diag::err_undefined_internal_type)
1026 << isa<VarDecl>(VD) << VD;
1027 } else if (!VD->isExternallyVisible()) {
1028 // FIXME: We can promote this to an error. The function or variable can't
1029 // be defined anywhere else, so the program must necessarily violate the
1030 // one definition rule.
1031 bool IsImplicitBase = false;
1032 if (const auto *BaseD = dyn_cast<FunctionDecl>(Val: VD)) {
1033 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
1034 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
1035 llvm::omp::TraitProperty::
1036 implementation_extension_disable_implicit_base)) {
1037 const auto *Func = cast<FunctionDecl>(
1038 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
1039 IsImplicitBase = BaseD->isImplicit() &&
1040 Func->getIdentifier()->isMangledOpenMPVariantName();
1041 }
1042 }
1043 if (!S.getLangOpts().OpenMP || !IsImplicitBase)
1044 S.Diag(VD->getLocation(), diag::warn_undefined_internal)
1045 << isa<VarDecl>(VD) << VD;
1046 } else if (auto *FD = dyn_cast<FunctionDecl>(Val: VD)) {
1047 (void)FD;
1048 assert(FD->getMostRecentDecl()->isInlined() &&
1049 "used object requires definition but isn't inline or internal?");
1050 // FIXME: This is ill-formed; we should reject.
1051 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
1052 } else {
1053 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
1054 "used var requires definition but isn't inline or internal?");
1055 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
1056 }
1057 if (UseLoc.isValid())
1058 S.Diag(UseLoc, diag::note_used_here);
1059 }
1060}
1061
1062void Sema::LoadExternalWeakUndeclaredIdentifiers() {
1063 if (!ExternalSource)
1064 return;
1065
1066 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
1067 ExternalSource->ReadWeakUndeclaredIdentifiers(WI&: WeakIDs);
1068 for (auto &WeakID : WeakIDs)
1069 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(X: WeakID.second);
1070}
1071
1072
1073typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
1074
1075/// Returns true, if all methods and nested classes of the given
1076/// CXXRecordDecl are defined in this translation unit.
1077///
1078/// Should only be called from ActOnEndOfTranslationUnit so that all
1079/// definitions are actually read.
1080static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
1081 RecordCompleteMap &MNCComplete) {
1082 RecordCompleteMap::iterator Cache = MNCComplete.find(Val: RD);
1083 if (Cache != MNCComplete.end())
1084 return Cache->second;
1085 if (!RD->isCompleteDefinition())
1086 return false;
1087 bool Complete = true;
1088 for (DeclContext::decl_iterator I = RD->decls_begin(),
1089 E = RD->decls_end();
1090 I != E && Complete; ++I) {
1091 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Val: *I))
1092 Complete = M->isDefined() || M->isDefaulted() ||
1093 (M->isPureVirtual() && !isa<CXXDestructorDecl>(Val: M));
1094 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(Val: *I))
1095 // If the template function is marked as late template parsed at this
1096 // point, it has not been instantiated and therefore we have not
1097 // performed semantic analysis on it yet, so we cannot know if the type
1098 // can be considered complete.
1099 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
1100 F->getTemplatedDecl()->isDefined();
1101 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(Val: *I)) {
1102 if (R->isInjectedClassName())
1103 continue;
1104 if (R->hasDefinition())
1105 Complete = MethodsAndNestedClassesComplete(RD: R->getDefinition(),
1106 MNCComplete);
1107 else
1108 Complete = false;
1109 }
1110 }
1111 MNCComplete[RD] = Complete;
1112 return Complete;
1113}
1114
1115/// Returns true, if the given CXXRecordDecl is fully defined in this
1116/// translation unit, i.e. all methods are defined or pure virtual and all
1117/// friends, friend functions and nested classes are fully defined in this
1118/// translation unit.
1119///
1120/// Should only be called from ActOnEndOfTranslationUnit so that all
1121/// definitions are actually read.
1122static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
1123 RecordCompleteMap &RecordsComplete,
1124 RecordCompleteMap &MNCComplete) {
1125 RecordCompleteMap::iterator Cache = RecordsComplete.find(Val: RD);
1126 if (Cache != RecordsComplete.end())
1127 return Cache->second;
1128 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
1129 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
1130 E = RD->friend_end();
1131 I != E && Complete; ++I) {
1132 // Check if friend classes and methods are complete.
1133 if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1134 // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1135 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1136 Complete = MethodsAndNestedClassesComplete(RD: FriendD, MNCComplete);
1137 else
1138 Complete = false;
1139 } else {
1140 // Friend functions are available through the NamedDecl of FriendDecl.
1141 if (const FunctionDecl *FD =
1142 dyn_cast<FunctionDecl>(Val: (*I)->getFriendDecl()))
1143 Complete = FD->isDefined();
1144 else
1145 // This is a template friend, give up.
1146 Complete = false;
1147 }
1148 }
1149 RecordsComplete[RD] = Complete;
1150 return Complete;
1151}
1152
1153void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1154 if (ExternalSource)
1155 ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1156 Decls&: UnusedLocalTypedefNameCandidates);
1157 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1158 if (TD->isReferenced())
1159 continue;
1160 Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1161 << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1162 }
1163 UnusedLocalTypedefNameCandidates.clear();
1164}
1165
1166void Sema::ActOnStartOfTranslationUnit() {
1167 if (getLangOpts().CPlusPlusModules &&
1168 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1169 HandleStartOfHeaderUnit();
1170}
1171
1172void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1173 if (Kind == TUFragmentKind::Global) {
1174 // Perform Pending Instantiations at the end of global module fragment so
1175 // that the module ownership of TU-level decls won't get messed.
1176 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1177 PerformPendingInstantiations();
1178 return;
1179 }
1180
1181 // Transfer late parsed template instantiations over to the pending template
1182 // instantiation list. During normal compilation, the late template parser
1183 // will be installed and instantiating these templates will succeed.
1184 //
1185 // If we are building a TU prefix for serialization, it is also safe to
1186 // transfer these over, even though they are not parsed. The end of the TU
1187 // should be outside of any eager template instantiation scope, so when this
1188 // AST is deserialized, these templates will not be parsed until the end of
1189 // the combined TU.
1190 PendingInstantiations.insert(position: PendingInstantiations.end(),
1191 first: LateParsedInstantiations.begin(),
1192 last: LateParsedInstantiations.end());
1193 LateParsedInstantiations.clear();
1194
1195 // If DefinedUsedVTables ends up marking any virtual member functions it
1196 // might lead to more pending template instantiations, which we then need
1197 // to instantiate.
1198 DefineUsedVTables();
1199
1200 // C++: Perform implicit template instantiations.
1201 //
1202 // FIXME: When we perform these implicit instantiations, we do not
1203 // carefully keep track of the point of instantiation (C++ [temp.point]).
1204 // This means that name lookup that occurs within the template
1205 // instantiation will always happen at the end of the translation unit,
1206 // so it will find some names that are not required to be found. This is
1207 // valid, but we could do better by diagnosing if an instantiation uses a
1208 // name that was not visible at its first point of instantiation.
1209 if (ExternalSource) {
1210 // Load pending instantiations from the external source.
1211 SmallVector<PendingImplicitInstantiation, 4> Pending;
1212 ExternalSource->ReadPendingInstantiations(Pending);
1213 for (auto PII : Pending)
1214 if (auto Func = dyn_cast<FunctionDecl>(Val: PII.first))
1215 Func->setInstantiationIsPending(true);
1216 PendingInstantiations.insert(position: PendingInstantiations.begin(),
1217 first: Pending.begin(), last: Pending.end());
1218 }
1219
1220 {
1221 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1222 PerformPendingInstantiations();
1223 }
1224
1225 emitDeferredDiags();
1226
1227 assert(LateParsedInstantiations.empty() &&
1228 "end of TU template instantiation should not create more "
1229 "late-parsed templates");
1230
1231 // Report diagnostics for uncorrected delayed typos. Ideally all of them
1232 // should have been corrected by that time, but it is very hard to cover all
1233 // cases in practice.
1234 for (const auto &Typo : DelayedTypos) {
1235 // We pass an empty TypoCorrection to indicate no correction was performed.
1236 Typo.second.DiagHandler(TypoCorrection());
1237 }
1238 DelayedTypos.clear();
1239}
1240
1241void Sema::ActOnEndOfTranslationUnit() {
1242 assert(DelayedDiagnostics.getCurrentPool() == nullptr
1243 && "reached end of translation unit with a pool attached?");
1244
1245 // If code completion is enabled, don't perform any end-of-translation-unit
1246 // work.
1247 if (PP.isCodeCompletionEnabled())
1248 return;
1249
1250 // Complete translation units and modules define vtables and perform implicit
1251 // instantiations. PCH files do not.
1252 if (TUKind != TU_Prefix) {
1253 ObjC().DiagnoseUseOfUnimplementedSelectors();
1254
1255 ActOnEndOfTranslationUnitFragment(
1256 Kind: !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1257 Module::PrivateModuleFragment
1258 ? TUFragmentKind::Private
1259 : TUFragmentKind::Normal);
1260
1261 if (LateTemplateParserCleanup)
1262 LateTemplateParserCleanup(OpaqueParser);
1263
1264 CheckDelayedMemberExceptionSpecs();
1265 } else {
1266 // If we are building a TU prefix for serialization, it is safe to transfer
1267 // these over, even though they are not parsed. The end of the TU should be
1268 // outside of any eager template instantiation scope, so when this AST is
1269 // deserialized, these templates will not be parsed until the end of the
1270 // combined TU.
1271 PendingInstantiations.insert(position: PendingInstantiations.end(),
1272 first: LateParsedInstantiations.begin(),
1273 last: LateParsedInstantiations.end());
1274 LateParsedInstantiations.clear();
1275
1276 if (LangOpts.PCHInstantiateTemplates) {
1277 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1278 PerformPendingInstantiations();
1279 }
1280 }
1281
1282 DiagnoseUnterminatedPragmaAlignPack();
1283 DiagnoseUnterminatedPragmaAttribute();
1284 OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
1285 DiagnosePrecisionLossInComplexDivision();
1286
1287 // All delayed member exception specs should be checked or we end up accepting
1288 // incompatible declarations.
1289 assert(DelayedOverridingExceptionSpecChecks.empty());
1290 assert(DelayedEquivalentExceptionSpecChecks.empty());
1291
1292 // All dllexport classes should have been processed already.
1293 assert(DelayedDllExportClasses.empty());
1294 assert(DelayedDllExportMemberFunctions.empty());
1295
1296 // Remove file scoped decls that turned out to be used.
1297 UnusedFileScopedDecls.erase(
1298 From: std::remove_if(first: UnusedFileScopedDecls.begin(source: nullptr, LocalOnly: true),
1299 last: UnusedFileScopedDecls.end(),
1300 pred: [this](const DeclaratorDecl *DD) {
1301 return ShouldRemoveFromUnused(SemaRef: this, D: DD);
1302 }),
1303 To: UnusedFileScopedDecls.end());
1304
1305 if (TUKind == TU_Prefix) {
1306 // Translation unit prefixes don't need any of the checking below.
1307 if (!PP.isIncrementalProcessingEnabled())
1308 TUScope = nullptr;
1309 return;
1310 }
1311
1312 // Check for #pragma weak identifiers that were never declared
1313 LoadExternalWeakUndeclaredIdentifiers();
1314 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1315 if (WeakIDs.second.empty())
1316 continue;
1317
1318 Decl *PrevDecl = LookupSingleName(S: TUScope, Name: WeakIDs.first, Loc: SourceLocation(),
1319 NameKind: LookupOrdinaryName);
1320 if (PrevDecl != nullptr &&
1321 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1322 for (const auto &WI : WeakIDs.second)
1323 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1324 << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1325 else
1326 for (const auto &WI : WeakIDs.second)
1327 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1328 << WeakIDs.first;
1329 }
1330
1331 if (LangOpts.CPlusPlus11 &&
1332 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1333 CheckDelegatingCtorCycles();
1334
1335 if (!Diags.hasErrorOccurred()) {
1336 if (ExternalSource)
1337 ExternalSource->ReadUndefinedButUsed(Undefined&: UndefinedButUsed);
1338 checkUndefinedButUsed(S&: *this);
1339 }
1340
1341 // A global-module-fragment is only permitted within a module unit.
1342 if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1343 Module::ExplicitGlobalModuleFragment) {
1344 Diag(ModuleScopes.back().BeginLoc,
1345 diag::err_module_declaration_missing_after_global_module_introducer);
1346 } else if (getLangOpts().getCompilingModule() ==
1347 LangOptions::CMK_ModuleInterface &&
1348 // We can't use ModuleScopes here since ModuleScopes is always
1349 // empty if we're compiling the BMI.
1350 !getASTContext().getCurrentNamedModule()) {
1351 // If we are building a module interface unit, we should have seen the
1352 // module declaration.
1353 //
1354 // FIXME: Make a better guess as to where to put the module declaration.
1355 Diag(getSourceManager().getLocForStartOfFile(
1356 getSourceManager().getMainFileID()),
1357 diag::err_module_declaration_missing);
1358 }
1359
1360 // Now we can decide whether the modules we're building need an initializer.
1361 if (Module *CurrentModule = getCurrentModule();
1362 CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1363 auto DoesModNeedInit = [this](Module *M) {
1364 if (!getASTContext().getModuleInitializers(M).empty())
1365 return true;
1366 for (auto [Exported, _] : M->Exports)
1367 if (Exported->isNamedModuleInterfaceHasInit())
1368 return true;
1369 for (Module *I : M->Imports)
1370 if (I->isNamedModuleInterfaceHasInit())
1371 return true;
1372
1373 return false;
1374 };
1375
1376 CurrentModule->NamedModuleHasInit =
1377 DoesModNeedInit(CurrentModule) ||
1378 llvm::any_of(Range: CurrentModule->submodules(), P: DoesModNeedInit);
1379 }
1380
1381 if (TUKind == TU_ClangModule) {
1382 // If we are building a module, resolve all of the exported declarations
1383 // now.
1384 if (Module *CurrentModule = PP.getCurrentModule()) {
1385 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1386
1387 SmallVector<Module *, 2> Stack;
1388 Stack.push_back(Elt: CurrentModule);
1389 while (!Stack.empty()) {
1390 Module *Mod = Stack.pop_back_val();
1391
1392 // Resolve the exported declarations and conflicts.
1393 // FIXME: Actually complain, once we figure out how to teach the
1394 // diagnostic client to deal with complaints in the module map at this
1395 // point.
1396 ModMap.resolveExports(Mod, /*Complain=*/false);
1397 ModMap.resolveUses(Mod, /*Complain=*/false);
1398 ModMap.resolveConflicts(Mod, /*Complain=*/false);
1399
1400 // Queue the submodules, so their exports will also be resolved.
1401 auto SubmodulesRange = Mod->submodules();
1402 Stack.append(in_start: SubmodulesRange.begin(), in_end: SubmodulesRange.end());
1403 }
1404 }
1405
1406 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1407 // modules when they are built, not every time they are used.
1408 emitAndClearUnusedLocalTypedefWarnings();
1409 }
1410
1411 // C++ standard modules. Diagnose cases where a function is declared inline
1412 // in the module purview but has no definition before the end of the TU or
1413 // the start of a Private Module Fragment (if one is present).
1414 if (!PendingInlineFuncDecls.empty()) {
1415 for (auto *D : PendingInlineFuncDecls) {
1416 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1417 bool DefInPMF = false;
1418 if (auto *FDD = FD->getDefinition()) {
1419 DefInPMF = FDD->getOwningModule()->isPrivateModule();
1420 if (!DefInPMF)
1421 continue;
1422 }
1423 Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1424 << DefInPMF;
1425 // If we have a PMF it should be at the end of the ModuleScopes.
1426 if (DefInPMF &&
1427 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1428 Diag(ModuleScopes.back().BeginLoc,
1429 diag::note_private_module_fragment);
1430 }
1431 }
1432 }
1433 PendingInlineFuncDecls.clear();
1434 }
1435
1436 // C99 6.9.2p2:
1437 // A declaration of an identifier for an object that has file
1438 // scope without an initializer, and without a storage-class
1439 // specifier or with the storage-class specifier static,
1440 // constitutes a tentative definition. If a translation unit
1441 // contains one or more tentative definitions for an identifier,
1442 // and the translation unit contains no external definition for
1443 // that identifier, then the behavior is exactly as if the
1444 // translation unit contains a file scope declaration of that
1445 // identifier, with the composite type as of the end of the
1446 // translation unit, with an initializer equal to 0.
1447 llvm::SmallSet<VarDecl *, 32> Seen;
1448 for (TentativeDefinitionsType::iterator
1449 T = TentativeDefinitions.begin(source: ExternalSource.get()),
1450 TEnd = TentativeDefinitions.end();
1451 T != TEnd; ++T) {
1452 VarDecl *VD = (*T)->getActingDefinition();
1453
1454 // If the tentative definition was completed, getActingDefinition() returns
1455 // null. If we've already seen this variable before, insert()'s second
1456 // return value is false.
1457 if (!VD || VD->isInvalidDecl() || !Seen.insert(Ptr: VD).second)
1458 continue;
1459
1460 if (const IncompleteArrayType *ArrayT
1461 = Context.getAsIncompleteArrayType(T: VD->getType())) {
1462 // Set the length of the array to 1 (C99 6.9.2p5).
1463 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1464 llvm::APInt One(Context.getTypeSize(T: Context.getSizeType()), true);
1465 QualType T = Context.getConstantArrayType(
1466 EltTy: ArrayT->getElementType(), ArySize: One, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1467 VD->setType(T);
1468 } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1469 diag::err_tentative_def_incomplete_type))
1470 VD->setInvalidDecl();
1471
1472 // No initialization is performed for a tentative definition.
1473 CheckCompleteVariableDeclaration(VD);
1474
1475 // In C, if the definition is const-qualified and has no initializer, it
1476 // is left uninitialized unless it has static or thread storage duration.
1477 QualType Type = VD->getType();
1478 if (!VD->isInvalidDecl() && !getLangOpts().CPlusPlus &&
1479 Type.isConstQualified() && !VD->getAnyInitializer()) {
1480 unsigned DiagID = diag::warn_default_init_const_unsafe;
1481 if (VD->getStorageDuration() == SD_Static ||
1482 VD->getStorageDuration() == SD_Thread)
1483 DiagID = diag::warn_default_init_const;
1484
1485 bool EmitCppCompat = !Diags.isIgnored(
1486 diag::warn_cxx_compat_hack_fake_diagnostic_do_not_emit,
1487 VD->getLocation());
1488
1489 Diag(VD->getLocation(), DiagID) << Type << EmitCppCompat;
1490 }
1491
1492 // Notify the consumer that we've completed a tentative definition.
1493 if (!VD->isInvalidDecl())
1494 Consumer.CompleteTentativeDefinition(D: VD);
1495 }
1496
1497 for (auto *D : ExternalDeclarations) {
1498 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1499 continue;
1500
1501 Consumer.CompleteExternalDeclaration(D);
1502 }
1503
1504 if (LangOpts.HLSL)
1505 HLSL().ActOnEndOfTranslationUnit(TU: getASTContext().getTranslationUnitDecl());
1506
1507 // If there were errors, disable 'unused' warnings since they will mostly be
1508 // noise. Don't warn for a use from a module: either we should warn on all
1509 // file-scope declarations in modules or not at all, but whether the
1510 // declaration is used is immaterial.
1511 if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
1512 // Output warning for unused file scoped decls.
1513 for (UnusedFileScopedDeclsType::iterator
1514 I = UnusedFileScopedDecls.begin(source: ExternalSource.get()),
1515 E = UnusedFileScopedDecls.end();
1516 I != E; ++I) {
1517 if (ShouldRemoveFromUnused(SemaRef: this, D: *I))
1518 continue;
1519
1520 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: *I)) {
1521 const FunctionDecl *DiagD;
1522 if (!FD->hasBody(Definition&: DiagD))
1523 DiagD = FD;
1524 if (DiagD->isDeleted())
1525 continue; // Deleted functions are supposed to be unused.
1526 SourceRange DiagRange = DiagD->getLocation();
1527 if (const ASTTemplateArgumentListInfo *ASTTAL =
1528 DiagD->getTemplateSpecializationArgsAsWritten())
1529 DiagRange.setEnd(ASTTAL->RAngleLoc);
1530 if (DiagD->isReferenced()) {
1531 if (isa<CXXMethodDecl>(Val: DiagD))
1532 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1533 << DiagD << DiagRange;
1534 else {
1535 if (FD->getStorageClass() == SC_Static &&
1536 !FD->isInlineSpecified() &&
1537 !SourceMgr.isInMainFile(
1538 SourceMgr.getExpansionLoc(FD->getLocation())))
1539 Diag(DiagD->getLocation(),
1540 diag::warn_unneeded_static_internal_decl)
1541 << DiagD << DiagRange;
1542 else
1543 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1544 << /*function=*/0 << DiagD << DiagRange;
1545 }
1546 } else if (!FD->isTargetMultiVersion() ||
1547 FD->isTargetMultiVersionDefault()) {
1548 if (FD->getDescribedFunctionTemplate())
1549 Diag(DiagD->getLocation(), diag::warn_unused_template)
1550 << /*function=*/0 << DiagD << DiagRange;
1551 else
1552 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1553 ? diag::warn_unused_member_function
1554 : diag::warn_unused_function)
1555 << DiagD << DiagRange;
1556 }
1557 } else {
1558 const VarDecl *DiagD = cast<VarDecl>(Val: *I)->getDefinition();
1559 if (!DiagD)
1560 DiagD = cast<VarDecl>(Val: *I);
1561 SourceRange DiagRange = DiagD->getLocation();
1562 if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Val: DiagD)) {
1563 if (const ASTTemplateArgumentListInfo *ASTTAL =
1564 VTSD->getTemplateArgsAsWritten())
1565 DiagRange.setEnd(ASTTAL->RAngleLoc);
1566 }
1567 if (DiagD->isReferenced()) {
1568 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1569 << /*variable=*/1 << DiagD << DiagRange;
1570 } else if (DiagD->getDescribedVarTemplate()) {
1571 Diag(DiagD->getLocation(), diag::warn_unused_template)
1572 << /*variable=*/1 << DiagD << DiagRange;
1573 } else if (DiagD->getType().isConstQualified()) {
1574 const SourceManager &SM = SourceMgr;
1575 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1576 !PP.getLangOpts().IsHeaderFile)
1577 Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1578 << DiagD << DiagRange;
1579 } else {
1580 Diag(DiagD->getLocation(), diag::warn_unused_variable)
1581 << DiagD << DiagRange;
1582 }
1583 }
1584 }
1585
1586 emitAndClearUnusedLocalTypedefWarnings();
1587 }
1588
1589 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1590 // FIXME: Load additional unused private field candidates from the external
1591 // source.
1592 RecordCompleteMap RecordsComplete;
1593 RecordCompleteMap MNCComplete;
1594 for (const NamedDecl *D : UnusedPrivateFields) {
1595 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1596 if (RD && !RD->isUnion() &&
1597 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1598 Diag(D->getLocation(), diag::warn_unused_private_field)
1599 << D->getDeclName();
1600 }
1601 }
1602 }
1603
1604 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1605 if (ExternalSource)
1606 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1607 for (const auto &DeletedFieldInfo : DeleteExprs) {
1608 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1609 AnalyzeDeleteExprMismatch(Field: DeletedFieldInfo.first, DeleteLoc: DeleteExprLoc.first,
1610 DeleteWasArrayForm: DeleteExprLoc.second);
1611 }
1612 }
1613 }
1614
1615 AnalysisWarnings.IssueWarnings(D: Context.getTranslationUnitDecl());
1616
1617 if (Context.hasAnyFunctionEffects())
1618 performFunctionEffectAnalysis(TU: Context.getTranslationUnitDecl());
1619
1620 // Check we've noticed that we're no longer parsing the initializer for every
1621 // variable. If we miss cases, then at best we have a performance issue and
1622 // at worst a rejects-valid bug.
1623 assert(ParsingInitForAutoVars.empty() &&
1624 "Didn't unmark var as having its initializer parsed");
1625
1626 if (!PP.isIncrementalProcessingEnabled())
1627 TUScope = nullptr;
1628}
1629
1630
1631//===----------------------------------------------------------------------===//
1632// Helper functions.
1633//===----------------------------------------------------------------------===//
1634
1635DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1636 DeclContext *DC = CurContext;
1637
1638 while (true) {
1639 if (isa<BlockDecl>(Val: DC) || isa<EnumDecl>(Val: DC) || isa<CapturedDecl>(Val: DC) ||
1640 isa<RequiresExprBodyDecl>(Val: DC)) {
1641 DC = DC->getParent();
1642 } else if (!AllowLambda && isa<CXXMethodDecl>(Val: DC) &&
1643 cast<CXXMethodDecl>(Val: DC)->getOverloadedOperator() == OO_Call &&
1644 cast<CXXRecordDecl>(Val: DC->getParent())->isLambda()) {
1645 DC = DC->getParent()->getParent();
1646 } else break;
1647 }
1648
1649 return DC;
1650}
1651
1652/// getCurFunctionDecl - If inside of a function body, this returns a pointer
1653/// to the function decl for the function being parsed. If we're currently
1654/// in a 'block', this returns the containing context.
1655FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1656 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1657 return dyn_cast<FunctionDecl>(Val: DC);
1658}
1659
1660ObjCMethodDecl *Sema::getCurMethodDecl() {
1661 DeclContext *DC = getFunctionLevelDeclContext();
1662 while (isa<RecordDecl>(Val: DC))
1663 DC = DC->getParent();
1664 return dyn_cast<ObjCMethodDecl>(Val: DC);
1665}
1666
1667NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1668 DeclContext *DC = getFunctionLevelDeclContext();
1669 if (isa<ObjCMethodDecl>(Val: DC) || isa<FunctionDecl>(Val: DC))
1670 return cast<NamedDecl>(Val: DC);
1671 return nullptr;
1672}
1673
1674LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1675 if (getLangOpts().OpenCL)
1676 return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1677 return LangAS::Default;
1678}
1679
1680void Sema::EmitDiagnostic(unsigned DiagID, const DiagnosticBuilder &DB) {
1681 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1682 // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1683 // been made more painfully obvious by the refactor that introduced this
1684 // function, but it is possible that the incoming argument can be
1685 // eliminated. If it truly cannot be (for example, there is some reentrancy
1686 // issue I am not seeing yet), then there should at least be a clarifying
1687 // comment somewhere.
1688 Diagnostic DiagInfo(&Diags, DB);
1689 if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1690 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(DiagID: DiagInfo.getID())) {
1691 case DiagnosticIDs::SFINAE_Report:
1692 // We'll report the diagnostic below.
1693 break;
1694
1695 case DiagnosticIDs::SFINAE_SubstitutionFailure:
1696 // Count this failure so that we know that template argument deduction
1697 // has failed.
1698 ++NumSFINAEErrors;
1699
1700 // Make a copy of this suppressed diagnostic and store it with the
1701 // template-deduction information.
1702 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1703 (*Info)->addSFINAEDiagnostic(Loc: DiagInfo.getLocation(),
1704 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1705 }
1706
1707 Diags.setLastDiagnosticIgnored(true);
1708 return;
1709
1710 case DiagnosticIDs::SFINAE_AccessControl: {
1711 // Per C++ Core Issue 1170, access control is part of SFINAE.
1712 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1713 // make access control a part of SFINAE for the purposes of checking
1714 // type traits.
1715 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1716 break;
1717
1718 SourceLocation Loc = DiagInfo.getLocation();
1719
1720 // Suppress this diagnostic.
1721 ++NumSFINAEErrors;
1722
1723 // Make a copy of this suppressed diagnostic and store it with the
1724 // template-deduction information.
1725 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1726 (*Info)->addSFINAEDiagnostic(Loc: DiagInfo.getLocation(),
1727 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1728 }
1729
1730 Diags.setLastDiagnosticIgnored(true);
1731
1732 // Now produce a C++98 compatibility warning.
1733 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1734
1735 // The last diagnostic which Sema produced was ignored. Suppress any
1736 // notes attached to it.
1737 Diags.setLastDiagnosticIgnored(true);
1738 return;
1739 }
1740
1741 case DiagnosticIDs::SFINAE_Suppress:
1742 if (DiagnosticsEngine::Level Level = getDiagnostics().getDiagnosticLevel(
1743 DiagID: DiagInfo.getID(), Loc: DiagInfo.getLocation());
1744 Level == DiagnosticsEngine::Ignored)
1745 return;
1746 // Make a copy of this suppressed diagnostic and store it with the
1747 // template-deduction information;
1748 if (*Info) {
1749 (*Info)->addSuppressedDiagnostic(
1750 Loc: DiagInfo.getLocation(),
1751 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1752 if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1753 PrintContextStack(DiagFunc: [Info](SourceLocation Loc, PartialDiagnostic PD) {
1754 (*Info)->addSuppressedDiagnostic(Loc, PD: std::move(PD));
1755 });
1756 }
1757
1758 // Suppress this diagnostic.
1759 Diags.setLastDiagnosticIgnored(true);
1760 return;
1761 }
1762 }
1763
1764 // Copy the diagnostic printing policy over the ASTContext printing policy.
1765 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
1766 Context.setPrintingPolicy(getPrintingPolicy());
1767
1768 // Emit the diagnostic.
1769 if (!Diags.EmitDiagnostic(DB))
1770 return;
1771
1772 // If this is not a note, and we're in a template instantiation
1773 // that is different from the last template instantiation where
1774 // we emitted an error, print a template instantiation
1775 // backtrace.
1776 if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1777 PrintContextStack();
1778}
1779
1780bool Sema::hasUncompilableErrorOccurred() const {
1781 if (getDiagnostics().hasUncompilableErrorOccurred())
1782 return true;
1783 auto *FD = dyn_cast<FunctionDecl>(Val: CurContext);
1784 if (!FD)
1785 return false;
1786 auto Loc = DeviceDeferredDiags.find(Val: FD);
1787 if (Loc == DeviceDeferredDiags.end())
1788 return false;
1789 for (auto PDAt : Loc->second) {
1790 if (Diags.getDiagnosticIDs()->isDefaultMappingAsError(
1791 DiagID: PDAt.second.getDiagID()))
1792 return true;
1793 }
1794 return false;
1795}
1796
1797// Print notes showing how we can reach FD starting from an a priori
1798// known-callable function.
1799static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1800 auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(Val: FD);
1801 while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) {
1802 // Respect error limit.
1803 if (S.Diags.hasFatalErrorOccurred())
1804 return;
1805 DiagnosticBuilder Builder(
1806 S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1807 Builder << FnIt->second.FD;
1808 FnIt = S.CUDA().DeviceKnownEmittedFns.find(Val: FnIt->second.FD);
1809 }
1810}
1811
1812namespace {
1813
1814/// Helper class that emits deferred diagnostic messages if an entity directly
1815/// or indirectly using the function that causes the deferred diagnostic
1816/// messages is known to be emitted.
1817///
1818/// During parsing of AST, certain diagnostic messages are recorded as deferred
1819/// diagnostics since it is unknown whether the functions containing such
1820/// diagnostics will be emitted. A list of potentially emitted functions and
1821/// variables that may potentially trigger emission of functions are also
1822/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1823/// by each function to emit deferred diagnostics.
1824///
1825/// During the visit, certain OpenMP directives or initializer of variables
1826/// with certain OpenMP attributes will cause subsequent visiting of any
1827/// functions enter a state which is called OpenMP device context in this
1828/// implementation. The state is exited when the directive or initializer is
1829/// exited. This state can change the emission states of subsequent uses
1830/// of functions.
1831///
1832/// Conceptually the functions or variables to be visited form a use graph
1833/// where the parent node uses the child node. At any point of the visit,
1834/// the tree nodes traversed from the tree root to the current node form a use
1835/// stack. The emission state of the current node depends on two factors:
1836/// 1. the emission state of the root node
1837/// 2. whether the current node is in OpenMP device context
1838/// If the function is decided to be emitted, its contained deferred diagnostics
1839/// are emitted, together with the information about the use stack.
1840///
1841class DeferredDiagnosticsEmitter
1842 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1843public:
1844 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1845
1846 // Whether the function is already in the current use-path.
1847 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1848
1849 // The current use-path.
1850 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1851
1852 // Whether the visiting of the function has been done. Done[0] is for the
1853 // case not in OpenMP device context. Done[1] is for the case in OpenMP
1854 // device context. We need two sets because diagnostics emission may be
1855 // different depending on whether it is in OpenMP device context.
1856 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1857
1858 // Emission state of the root node of the current use graph.
1859 bool ShouldEmitRootNode;
1860
1861 // Current OpenMP device context level. It is initialized to 0 and each
1862 // entering of device context increases it by 1 and each exit decreases
1863 // it by 1. Non-zero value indicates it is currently in device context.
1864 unsigned InOMPDeviceContext;
1865
1866 DeferredDiagnosticsEmitter(Sema &S)
1867 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1868
1869 bool shouldVisitDiscardedStmt() const { return false; }
1870
1871 void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1872 ++InOMPDeviceContext;
1873 Inherited::VisitOMPTargetDirective(Node);
1874 --InOMPDeviceContext;
1875 }
1876
1877 void visitUsedDecl(SourceLocation Loc, Decl *D) {
1878 if (isa<VarDecl>(Val: D))
1879 return;
1880 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
1881 checkFunc(Loc, FD);
1882 else
1883 Inherited::visitUsedDecl(Loc, D);
1884 }
1885
1886 // Visitor member and parent dtors called by this dtor.
1887 void VisitCalledDestructors(CXXDestructorDecl *DD) {
1888 const CXXRecordDecl *RD = DD->getParent();
1889
1890 // Visit the dtors of all members
1891 for (const FieldDecl *FD : RD->fields()) {
1892 QualType FT = FD->getType();
1893 if (const auto *RT = FT->getAs<RecordType>())
1894 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1895 if (ClassDecl->hasDefinition())
1896 if (CXXDestructorDecl *MemberDtor = ClassDecl->getDestructor())
1897 asImpl().visitUsedDecl(MemberDtor->getLocation(), MemberDtor);
1898 }
1899
1900 // Also visit base class dtors
1901 for (const auto &Base : RD->bases()) {
1902 QualType BaseType = Base.getType();
1903 if (const auto *RT = BaseType->getAs<RecordType>())
1904 if (const auto *BaseDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1905 if (BaseDecl->hasDefinition())
1906 if (CXXDestructorDecl *BaseDtor = BaseDecl->getDestructor())
1907 asImpl().visitUsedDecl(BaseDtor->getLocation(), BaseDtor);
1908 }
1909 }
1910
1911 void VisitDeclStmt(DeclStmt *DS) {
1912 // Visit dtors called by variables that need destruction
1913 for (auto *D : DS->decls())
1914 if (auto *VD = dyn_cast<VarDecl>(Val: D))
1915 if (VD->isThisDeclarationADefinition() &&
1916 VD->needsDestruction(Ctx: S.Context)) {
1917 QualType VT = VD->getType();
1918 if (const auto *RT = VT->getAs<RecordType>())
1919 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1920 if (ClassDecl->hasDefinition())
1921 if (CXXDestructorDecl *Dtor = ClassDecl->getDestructor())
1922 asImpl().visitUsedDecl(Loc: Dtor->getLocation(), D: Dtor);
1923 }
1924
1925 Inherited::VisitDeclStmt(DS);
1926 }
1927 void checkVar(VarDecl *VD) {
1928 assert(VD->isFileVarDecl() &&
1929 "Should only check file-scope variables");
1930 if (auto *Init = VD->getInit()) {
1931 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1932 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1933 *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1934 if (IsDev)
1935 ++InOMPDeviceContext;
1936 this->Visit(Init);
1937 if (IsDev)
1938 --InOMPDeviceContext;
1939 }
1940 }
1941
1942 void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1943 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1944 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1945 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1946 S.shouldIgnoreInHostDeviceCheck(Callee: FD) || InUsePath.count(FD))
1947 return;
1948 // Finalize analysis of OpenMP-specific constructs.
1949 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1950 (ShouldEmitRootNode || InOMPDeviceContext))
1951 S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, Callee: FD, Loc);
1952 if (Caller)
1953 S.CUDA().DeviceKnownEmittedFns[FD] = {.FD: Caller, .Loc: Loc};
1954 // Always emit deferred diagnostics for the direct users. This does not
1955 // lead to explosion of diagnostics since each user is visited at most
1956 // twice.
1957 if (ShouldEmitRootNode || InOMPDeviceContext)
1958 emitDeferredDiags(FD, ShowCallStack: Caller);
1959 // Do not revisit a function if the function body has been completely
1960 // visited before.
1961 if (!Done.insert(FD).second)
1962 return;
1963 InUsePath.insert(FD);
1964 UsePath.push_back(Elt: FD);
1965 if (auto *S = FD->getBody()) {
1966 this->Visit(S);
1967 }
1968 if (CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: FD))
1969 asImpl().VisitCalledDestructors(DD: Dtor);
1970 UsePath.pop_back();
1971 InUsePath.erase(FD);
1972 }
1973
1974 void checkRecordedDecl(Decl *D) {
1975 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1976 ShouldEmitRootNode = S.getEmissionStatus(Decl: FD, /*Final=*/true) ==
1977 Sema::FunctionEmissionStatus::Emitted;
1978 checkFunc(Loc: SourceLocation(), FD);
1979 } else
1980 checkVar(VD: cast<VarDecl>(Val: D));
1981 }
1982
1983 // Emit any deferred diagnostics for FD
1984 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1985 auto It = S.DeviceDeferredDiags.find(Val: FD);
1986 if (It == S.DeviceDeferredDiags.end())
1987 return;
1988 bool HasWarningOrError = false;
1989 bool FirstDiag = true;
1990 for (PartialDiagnosticAt &PDAt : It->second) {
1991 // Respect error limit.
1992 if (S.Diags.hasFatalErrorOccurred())
1993 return;
1994 const SourceLocation &Loc = PDAt.first;
1995 const PartialDiagnostic &PD = PDAt.second;
1996 HasWarningOrError |=
1997 S.getDiagnostics().getDiagnosticLevel(DiagID: PD.getDiagID(), Loc) >=
1998 DiagnosticsEngine::Warning;
1999 {
2000 DiagnosticBuilder Builder(S.Diags.Report(Loc, DiagID: PD.getDiagID()));
2001 PD.Emit(DB: Builder);
2002 }
2003 // Emit the note on the first diagnostic in case too many diagnostics
2004 // cause the note not emitted.
2005 if (FirstDiag && HasWarningOrError && ShowCallStack) {
2006 emitCallStackNotes(S, FD);
2007 FirstDiag = false;
2008 }
2009 }
2010 }
2011};
2012} // namespace
2013
2014void Sema::emitDeferredDiags() {
2015 if (ExternalSource)
2016 ExternalSource->ReadDeclsToCheckForDeferredDiags(
2017 Decls&: DeclsToCheckForDeferredDiags);
2018
2019 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
2020 DeclsToCheckForDeferredDiags.empty())
2021 return;
2022
2023 DeferredDiagnosticsEmitter DDE(*this);
2024 for (auto *D : DeclsToCheckForDeferredDiags)
2025 DDE.checkRecordedDecl(D);
2026}
2027
2028// In CUDA, there are some constructs which may appear in semantically-valid
2029// code, but trigger errors if we ever generate code for the function in which
2030// they appear. Essentially every construct you're not allowed to use on the
2031// device falls into this category, because you are allowed to use these
2032// constructs in a __host__ __device__ function, but only if that function is
2033// never codegen'ed on the device.
2034//
2035// To handle semantic checking for these constructs, we keep track of the set of
2036// functions we know will be emitted, either because we could tell a priori that
2037// they would be emitted, or because they were transitively called by a
2038// known-emitted function.
2039//
2040// We also keep a partial call graph of which not-known-emitted functions call
2041// which other not-known-emitted functions.
2042//
2043// When we see something which is illegal if the current function is emitted
2044// (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
2045// CheckCall), we first check if the current function is known-emitted. If
2046// so, we immediately output the diagnostic.
2047//
2048// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
2049// until we discover that the function is known-emitted, at which point we take
2050// it out of this map and emit the diagnostic.
2051
2052Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
2053 unsigned DiagID,
2054 const FunctionDecl *Fn,
2055 Sema &S)
2056 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
2057 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
2058 switch (K) {
2059 case K_Nop:
2060 break;
2061 case K_Immediate:
2062 case K_ImmediateWithCallStack:
2063 ImmediateDiag.emplace(
2064 args: ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
2065 break;
2066 case K_Deferred:
2067 assert(Fn && "Must have a function to attach the deferred diag to.");
2068 auto &Diags = S.DeviceDeferredDiags[Fn];
2069 PartialDiagId.emplace(args: Diags.size());
2070 Diags.emplace_back(Loc, S.PDiag(DiagID));
2071 break;
2072 }
2073}
2074
2075Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
2076 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
2077 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
2078 PartialDiagId(D.PartialDiagId) {
2079 // Clean the previous diagnostics.
2080 D.ShowCallStack = false;
2081 D.ImmediateDiag.reset();
2082 D.PartialDiagId.reset();
2083}
2084
2085Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
2086 if (ImmediateDiag) {
2087 // Emit our diagnostic and, if it was a warning or error, output a callstack
2088 // if Fn isn't a priori known-emitted.
2089 ImmediateDiag.reset(); // Emit the immediate diag.
2090
2091 if (ShowCallStack) {
2092 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
2093 DiagID, Loc) >= DiagnosticsEngine::Warning;
2094 if (IsWarningOrError)
2095 emitCallStackNotes(S, FD: Fn);
2096 }
2097 } else {
2098 assert((!PartialDiagId || ShowCallStack) &&
2099 "Must always show call stack for deferred diags.");
2100 }
2101}
2102
2103Sema::SemaDiagnosticBuilder
2104Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
2105 FD = FD ? FD : getCurFunctionDecl();
2106 if (LangOpts.OpenMP)
2107 return LangOpts.OpenMPIsTargetDevice
2108 ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
2109 : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
2110 if (getLangOpts().CUDA)
2111 return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
2112 : CUDA().DiagIfHostCode(Loc, DiagID);
2113
2114 if (getLangOpts().SYCLIsDevice)
2115 return SYCL().DiagIfDeviceCode(Loc, DiagID);
2116
2117 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
2118 FD, *this);
2119}
2120
2121void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
2122 if (isUnevaluatedContext() || Ty.isNull())
2123 return;
2124
2125 // The original idea behind checkTypeSupport function is that unused
2126 // declarations can be replaced with an array of bytes of the same size during
2127 // codegen, such replacement doesn't seem to be possible for types without
2128 // constant byte size like zero length arrays. So, do a deep check for SYCL.
2129 if (D && LangOpts.SYCLIsDevice) {
2130 llvm::DenseSet<QualType> Visited;
2131 SYCL().deepTypeCheckForDevice(UsedAt: Loc, Visited, DeclToCheck: D);
2132 }
2133
2134 Decl *C = cast<Decl>(Val: getCurLexicalContext());
2135
2136 // Memcpy operations for structs containing a member with unsupported type
2137 // are ok, though.
2138 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: C)) {
2139 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
2140 MD->isTrivial())
2141 return;
2142
2143 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD))
2144 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
2145 return;
2146 }
2147
2148 // Try to associate errors with the lexical context, if that is a function, or
2149 // the value declaration otherwise.
2150 const FunctionDecl *FD = isa<FunctionDecl>(Val: C)
2151 ? cast<FunctionDecl>(Val: C)
2152 : dyn_cast_or_null<FunctionDecl>(Val: D);
2153
2154 auto CheckDeviceType = [&](QualType Ty) {
2155 if (Ty->isDependentType())
2156 return;
2157
2158 if (Ty->isBitIntType()) {
2159 if (!Context.getTargetInfo().hasBitIntType()) {
2160 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2161 if (D)
2162 PD << D;
2163 else
2164 PD << "expression";
2165 targetDiag(Loc, PD, FD)
2166 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
2167 << Ty << Context.getTargetInfo().getTriple().str();
2168 }
2169 return;
2170 }
2171
2172 // Check if we are dealing with two 'long double' but with different
2173 // semantics.
2174 bool LongDoubleMismatched = false;
2175 if (Ty->isRealFloatingType() && Context.getTypeSize(T: Ty) == 128) {
2176 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: Ty);
2177 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2178 !Context.getTargetInfo().hasFloat128Type()) ||
2179 (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2180 !Context.getTargetInfo().hasIbm128Type()))
2181 LongDoubleMismatched = true;
2182 }
2183
2184 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2185 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2186 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2187 (Ty->isIntegerType() && Context.getTypeSize(T: Ty) == 128 &&
2188 !Context.getTargetInfo().hasInt128Type()) ||
2189 (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2190 !LangOpts.CUDAIsDevice) ||
2191 LongDoubleMismatched) {
2192 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2193 if (D)
2194 PD << D;
2195 else
2196 PD << "expression";
2197
2198 if (targetDiag(Loc, PD, FD)
2199 << true /*show bit size*/
2200 << static_cast<unsigned>(Context.getTypeSize(T: Ty)) << Ty
2201 << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2202 if (D)
2203 D->setInvalidDecl();
2204 }
2205 if (D)
2206 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2207 }
2208 };
2209
2210 auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2211 if (LangOpts.SYCLIsDevice ||
2212 (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2213 LangOpts.CUDAIsDevice)
2214 CheckDeviceType(Ty);
2215
2216 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2217 const TargetInfo &TI = Context.getTargetInfo();
2218 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2219 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2220 if (D)
2221 PD << D;
2222 else
2223 PD << "expression";
2224
2225 if (Diag(Loc, PD, FD)
2226 << false /*show bit size*/ << 0 << Ty << false /*return*/
2227 << TI.getTriple().str()) {
2228 if (D)
2229 D->setInvalidDecl();
2230 }
2231 if (D)
2232 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2233 }
2234
2235 bool IsDouble = UnqualTy == Context.DoubleTy;
2236 bool IsFloat = UnqualTy == Context.FloatTy;
2237 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2238 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2239 if (D)
2240 PD << D;
2241 else
2242 PD << "expression";
2243
2244 if (Diag(Loc, PD, FD)
2245 << false /*show bit size*/ << 0 << Ty << true /*return*/
2246 << TI.getTriple().str()) {
2247 if (D)
2248 D->setInvalidDecl();
2249 }
2250 if (D)
2251 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2252 }
2253
2254 if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
2255 llvm::StringMap<bool> CallerFeatureMap;
2256 Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2257 RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
2258 }
2259
2260 // Don't allow SVE types in functions without a SVE target.
2261 if (Ty->isSVESizelessBuiltinType() && FD) {
2262 llvm::StringMap<bool> CallerFeatureMap;
2263 Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2264 if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) {
2265 if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
2266 Diag(Loc, diag::err_sve_vector_in_non_sve_target) << Ty;
2267 else if (!IsArmStreamingFunction(FD,
2268 /*IncludeLocallyStreaming=*/true)) {
2269 Diag(Loc, diag::err_sve_vector_in_non_streaming_function) << Ty;
2270 }
2271 }
2272 }
2273 };
2274
2275 CheckType(Ty);
2276 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Val&: Ty)) {
2277 for (const auto &ParamTy : FPTy->param_types())
2278 CheckType(ParamTy);
2279 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2280 }
2281 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Val&: Ty))
2282 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2283}
2284
2285bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2286 SourceLocation loc = locref;
2287 if (!loc.isMacroID()) return false;
2288
2289 // There's no good way right now to look at the intermediate
2290 // expansions, so just jump to the expansion location.
2291 loc = getSourceManager().getExpansionLoc(Loc: loc);
2292
2293 // If that's written with the name, stop here.
2294 SmallString<16> buffer;
2295 if (getPreprocessor().getSpelling(loc, buffer) == name) {
2296 locref = loc;
2297 return true;
2298 }
2299 return false;
2300}
2301
2302Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2303
2304 if (!Ctx)
2305 return nullptr;
2306
2307 Ctx = Ctx->getPrimaryContext();
2308 for (Scope *S = getCurScope(); S; S = S->getParent()) {
2309 // Ignore scopes that cannot have declarations. This is important for
2310 // out-of-line definitions of static class members.
2311 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2312 if (DeclContext *Entity = S->getEntity())
2313 if (Ctx == Entity->getPrimaryContext())
2314 return S;
2315 }
2316
2317 return nullptr;
2318}
2319
2320/// Enter a new function scope
2321void Sema::PushFunctionScope() {
2322 if (FunctionScopes.empty() && CachedFunctionScope) {
2323 // Use CachedFunctionScope to avoid allocating memory when possible.
2324 CachedFunctionScope->Clear();
2325 FunctionScopes.push_back(Elt: CachedFunctionScope.release());
2326 } else {
2327 FunctionScopes.push_back(Elt: new FunctionScopeInfo(getDiagnostics()));
2328 }
2329 if (LangOpts.OpenMP)
2330 OpenMP().pushOpenMPFunctionRegion();
2331}
2332
2333void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2334 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2335 BlockScope, Block));
2336 CapturingFunctionScopes++;
2337}
2338
2339LambdaScopeInfo *Sema::PushLambdaScope() {
2340 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2341 FunctionScopes.push_back(LSI);
2342 CapturingFunctionScopes++;
2343 return LSI;
2344}
2345
2346void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2347 if (LambdaScopeInfo *const LSI = getCurLambda()) {
2348 LSI->AutoTemplateParameterDepth = Depth;
2349 return;
2350 }
2351 llvm_unreachable(
2352 "Remove assertion if intentionally called in a non-lambda context.");
2353}
2354
2355// Check that the type of the VarDecl has an accessible copy constructor and
2356// resolve its destructor's exception specification.
2357// This also performs initialization of block variables when they are moved
2358// to the heap. It uses the same rules as applicable for implicit moves
2359// according to the C++ standard in effect ([class.copy.elision]p3).
2360static void checkEscapingByref(VarDecl *VD, Sema &S) {
2361 QualType T = VD->getType();
2362 EnterExpressionEvaluationContext scope(
2363 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2364 SourceLocation Loc = VD->getLocation();
2365 Expr *VarRef =
2366 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2367 ExprResult Result;
2368 auto IE = InitializedEntity::InitializeBlock(BlockVarLoc: Loc, Type: T);
2369 if (S.getLangOpts().CPlusPlus23) {
2370 auto *E = ImplicitCastExpr::Create(Context: S.Context, T, Kind: CK_NoOp, Operand: VarRef, BasePath: nullptr,
2371 Cat: VK_XValue, FPO: FPOptionsOverride());
2372 Result = S.PerformCopyInitialization(Entity: IE, EqualLoc: SourceLocation(), Init: E);
2373 } else {
2374 Result = S.PerformMoveOrCopyInitialization(
2375 Entity: IE, NRInfo: Sema::NamedReturnInfo{.Candidate: VD, .S: Sema::NamedReturnInfo::MoveEligible},
2376 Value: VarRef);
2377 }
2378
2379 if (!Result.isInvalid()) {
2380 Result = S.MaybeCreateExprWithCleanups(SubExpr: Result);
2381 Expr *Init = Result.getAs<Expr>();
2382 S.Context.setBlockVarCopyInit(VD, CopyExpr: Init, CanThrow: S.canThrow(Init));
2383 }
2384
2385 // The destructor's exception specification is needed when IRGen generates
2386 // block copy/destroy functions. Resolve it here.
2387 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2388 if (CXXDestructorDecl *DD = RD->getDestructor()) {
2389 auto *FPT = DD->getType()->castAs<FunctionProtoType>();
2390 S.ResolveExceptionSpec(Loc, FPT: FPT);
2391 }
2392}
2393
2394static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2395 // Set the EscapingByref flag of __block variables captured by
2396 // escaping blocks.
2397 for (const BlockDecl *BD : FSI.Blocks) {
2398 for (const BlockDecl::Capture &BC : BD->captures()) {
2399 VarDecl *VD = BC.getVariable();
2400 if (VD->hasAttr<BlocksAttr>()) {
2401 // Nothing to do if this is a __block variable captured by a
2402 // non-escaping block.
2403 if (BD->doesNotEscape())
2404 continue;
2405 VD->setEscapingByref();
2406 }
2407 // Check whether the captured variable is or contains an object of
2408 // non-trivial C union type.
2409 QualType CapType = BC.getVariable()->getType();
2410 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2411 CapType.hasNonTrivialToPrimitiveCopyCUnion())
2412 S.checkNonTrivialCUnion(QT: BC.getVariable()->getType(),
2413 Loc: BD->getCaretLocation(),
2414 UseContext: NonTrivialCUnionContext::BlockCapture,
2415 NonTrivialKind: Sema::NTCUK_Destruct | Sema::NTCUK_Copy);
2416 }
2417 }
2418
2419 for (VarDecl *VD : FSI.ByrefBlockVars) {
2420 // __block variables might require us to capture a copy-initializer.
2421 if (!VD->isEscapingByref())
2422 continue;
2423 // It's currently invalid to ever have a __block variable with an
2424 // array type; should we diagnose that here?
2425 // Regardless, we don't want to ignore array nesting when
2426 // constructing this copy.
2427 if (VD->getType()->isStructureOrClassType())
2428 checkEscapingByref(VD, S);
2429 }
2430}
2431
2432Sema::PoppedFunctionScopePtr
2433Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2434 const Decl *D, QualType BlockType) {
2435 assert(!FunctionScopes.empty() && "mismatched push/pop!");
2436
2437 markEscapingByrefs(FSI: *FunctionScopes.back(), S&: *this);
2438
2439 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2440 PoppedFunctionScopeDeleter(this));
2441
2442 if (LangOpts.OpenMP)
2443 OpenMP().popOpenMPFunctionRegion(OldFSI: Scope.get());
2444
2445 // Issue any analysis-based warnings.
2446 if (WP && D)
2447 AnalysisWarnings.IssueWarnings(P: *WP, fscope: Scope.get(), D, BlockType);
2448 else
2449 for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2450 Diag(PUD.Loc, PUD.PD);
2451
2452 return Scope;
2453}
2454
2455void Sema::PoppedFunctionScopeDeleter::
2456operator()(sema::FunctionScopeInfo *Scope) const {
2457 if (!Scope->isPlainFunction())
2458 Self->CapturingFunctionScopes--;
2459 // Stash the function scope for later reuse if it's for a normal function.
2460 if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2461 Self->CachedFunctionScope.reset(p: Scope);
2462 else
2463 delete Scope;
2464}
2465
2466void Sema::PushCompoundScope(bool IsStmtExpr) {
2467 getCurFunction()->CompoundScopes.push_back(
2468 Elt: CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2469}
2470
2471void Sema::PopCompoundScope() {
2472 FunctionScopeInfo *CurFunction = getCurFunction();
2473 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2474
2475 CurFunction->CompoundScopes.pop_back();
2476}
2477
2478bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2479 return getCurFunction()->hasUnrecoverableErrorOccurred();
2480}
2481
2482void Sema::setFunctionHasBranchIntoScope() {
2483 if (!FunctionScopes.empty())
2484 FunctionScopes.back()->setHasBranchIntoScope();
2485}
2486
2487void Sema::setFunctionHasBranchProtectedScope() {
2488 if (!FunctionScopes.empty())
2489 FunctionScopes.back()->setHasBranchProtectedScope();
2490}
2491
2492void Sema::setFunctionHasIndirectGoto() {
2493 if (!FunctionScopes.empty())
2494 FunctionScopes.back()->setHasIndirectGoto();
2495}
2496
2497void Sema::setFunctionHasMustTail() {
2498 if (!FunctionScopes.empty())
2499 FunctionScopes.back()->setHasMustTail();
2500}
2501
2502BlockScopeInfo *Sema::getCurBlock() {
2503 if (FunctionScopes.empty())
2504 return nullptr;
2505
2506 auto CurBSI = dyn_cast<BlockScopeInfo>(Val: FunctionScopes.back());
2507 if (CurBSI && CurBSI->TheDecl &&
2508 !CurBSI->TheDecl->Encloses(CurContext)) {
2509 // We have switched contexts due to template instantiation.
2510 assert(!CodeSynthesisContexts.empty());
2511 return nullptr;
2512 }
2513
2514 return CurBSI;
2515}
2516
2517FunctionScopeInfo *Sema::getEnclosingFunction() const {
2518 if (FunctionScopes.empty())
2519 return nullptr;
2520
2521 for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2522 if (isa<sema::BlockScopeInfo>(Val: FunctionScopes[e]))
2523 continue;
2524 return FunctionScopes[e];
2525 }
2526 return nullptr;
2527}
2528
2529CapturingScopeInfo *Sema::getEnclosingLambdaOrBlock() const {
2530 for (auto *Scope : llvm::reverse(C: FunctionScopes)) {
2531 if (auto *CSI = dyn_cast<CapturingScopeInfo>(Val: Scope)) {
2532 auto *LSI = dyn_cast<LambdaScopeInfo>(Val: CSI);
2533 if (LSI && LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2534 LSI->AfterParameterList) {
2535 // We have switched contexts due to template instantiation.
2536 // FIXME: We should swap out the FunctionScopes during code synthesis
2537 // so that we don't need to check for this.
2538 assert(!CodeSynthesisContexts.empty());
2539 return nullptr;
2540 }
2541 return CSI;
2542 }
2543 }
2544 return nullptr;
2545}
2546
2547LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2548 if (FunctionScopes.empty())
2549 return nullptr;
2550
2551 auto I = FunctionScopes.rbegin();
2552 if (IgnoreNonLambdaCapturingScope) {
2553 auto E = FunctionScopes.rend();
2554 while (I != E && isa<CapturingScopeInfo>(Val: *I) && !isa<LambdaScopeInfo>(Val: *I))
2555 ++I;
2556 if (I == E)
2557 return nullptr;
2558 }
2559 auto *CurLSI = dyn_cast<LambdaScopeInfo>(Val: *I);
2560 if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2561 !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2562 // We have switched contexts due to template instantiation.
2563 assert(!CodeSynthesisContexts.empty());
2564 return nullptr;
2565 }
2566
2567 return CurLSI;
2568}
2569
2570// We have a generic lambda if we parsed auto parameters, or we have
2571// an associated template parameter list.
2572LambdaScopeInfo *Sema::getCurGenericLambda() {
2573 if (LambdaScopeInfo *LSI = getCurLambda()) {
2574 return (LSI->TemplateParams.size() ||
2575 LSI->GLTemplateParameterList) ? LSI : nullptr;
2576 }
2577 return nullptr;
2578}
2579
2580
2581void Sema::ActOnComment(SourceRange Comment) {
2582 if (!LangOpts.RetainCommentsFromSystemHeaders &&
2583 SourceMgr.isInSystemHeader(Loc: Comment.getBegin()))
2584 return;
2585 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2586 if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2587 SourceRange MagicMarkerRange(Comment.getBegin(),
2588 Comment.getBegin().getLocWithOffset(Offset: 3));
2589 StringRef MagicMarkerText;
2590 switch (RC.getKind()) {
2591 case RawComment::RCK_OrdinaryBCPL:
2592 MagicMarkerText = "///<";
2593 break;
2594 case RawComment::RCK_OrdinaryC:
2595 MagicMarkerText = "/**<";
2596 break;
2597 case RawComment::RCK_Invalid:
2598 // FIXME: are there other scenarios that could produce an invalid
2599 // raw comment here?
2600 Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2601 return;
2602 default:
2603 llvm_unreachable("if this is an almost Doxygen comment, "
2604 "it should be ordinary");
2605 }
2606 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2607 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2608 }
2609 Context.addComment(RC);
2610}
2611
2612// Pin this vtable to this file.
2613ExternalSemaSource::~ExternalSemaSource() {}
2614char ExternalSemaSource::ID;
2615
2616void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2617void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2618
2619void ExternalSemaSource::ReadKnownNamespaces(
2620 SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2621}
2622
2623void ExternalSemaSource::ReadUndefinedButUsed(
2624 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2625
2626void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2627 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2628
2629bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2630 UnresolvedSetImpl &OverloadSet) {
2631 ZeroArgCallReturnTy = QualType();
2632 OverloadSet.clear();
2633
2634 const OverloadExpr *Overloads = nullptr;
2635 bool IsMemExpr = false;
2636 if (E.getType() == Context.OverloadTy) {
2637 OverloadExpr::FindResult FR = OverloadExpr::find(E: &E);
2638
2639 // Ignore overloads that are pointer-to-member constants.
2640 if (FR.HasFormOfMemberPointer)
2641 return false;
2642
2643 Overloads = FR.Expression;
2644 } else if (E.getType() == Context.BoundMemberTy) {
2645 Overloads = dyn_cast<UnresolvedMemberExpr>(Val: E.IgnoreParens());
2646 IsMemExpr = true;
2647 }
2648
2649 bool Ambiguous = false;
2650 bool IsMV = false;
2651
2652 if (Overloads) {
2653 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2654 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2655 OverloadSet.addDecl(D: *it);
2656
2657 // Check whether the function is a non-template, non-member which takes no
2658 // arguments.
2659 if (IsMemExpr)
2660 continue;
2661 if (const FunctionDecl *OverloadDecl
2662 = dyn_cast<FunctionDecl>(Val: (*it)->getUnderlyingDecl())) {
2663 if (OverloadDecl->getMinRequiredArguments() == 0) {
2664 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2665 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2666 OverloadDecl->isCPUSpecificMultiVersion()))) {
2667 ZeroArgCallReturnTy = QualType();
2668 Ambiguous = true;
2669 } else {
2670 ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2671 IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2672 OverloadDecl->isCPUSpecificMultiVersion();
2673 }
2674 }
2675 }
2676 }
2677
2678 // If it's not a member, use better machinery to try to resolve the call
2679 if (!IsMemExpr)
2680 return !ZeroArgCallReturnTy.isNull();
2681 }
2682
2683 // Attempt to call the member with no arguments - this will correctly handle
2684 // member templates with defaults/deduction of template arguments, overloads
2685 // with default arguments, etc.
2686 if (IsMemExpr && !E.isTypeDependent()) {
2687 Sema::TentativeAnalysisScope Trap(*this);
2688 ExprResult R = BuildCallToMemberFunction(S: nullptr, MemExpr: &E, LParenLoc: SourceLocation(), Args: {},
2689 RParenLoc: SourceLocation());
2690 if (R.isUsable()) {
2691 ZeroArgCallReturnTy = R.get()->getType();
2692 return true;
2693 }
2694 return false;
2695 }
2696
2697 if (const auto *DeclRef = dyn_cast<DeclRefExpr>(Val: E.IgnoreParens())) {
2698 if (const auto *Fun = dyn_cast<FunctionDecl>(Val: DeclRef->getDecl())) {
2699 if (Fun->getMinRequiredArguments() == 0)
2700 ZeroArgCallReturnTy = Fun->getReturnType();
2701 return true;
2702 }
2703 }
2704
2705 // We don't have an expression that's convenient to get a FunctionDecl from,
2706 // but we can at least check if the type is "function of 0 arguments".
2707 QualType ExprTy = E.getType();
2708 const FunctionType *FunTy = nullptr;
2709 QualType PointeeTy = ExprTy->getPointeeType();
2710 if (!PointeeTy.isNull())
2711 FunTy = PointeeTy->getAs<FunctionType>();
2712 if (!FunTy)
2713 FunTy = ExprTy->getAs<FunctionType>();
2714
2715 if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(Val: FunTy)) {
2716 if (FPT->getNumParams() == 0)
2717 ZeroArgCallReturnTy = FunTy->getReturnType();
2718 return true;
2719 }
2720 return false;
2721}
2722
2723/// Give notes for a set of overloads.
2724///
2725/// A companion to tryExprAsCall. In cases when the name that the programmer
2726/// wrote was an overloaded function, we may be able to make some guesses about
2727/// plausible overloads based on their return types; such guesses can be handed
2728/// off to this method to be emitted as notes.
2729///
2730/// \param Overloads - The overloads to note.
2731/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2732/// -fshow-overloads=best, this is the location to attach to the note about too
2733/// many candidates. Typically this will be the location of the original
2734/// ill-formed expression.
2735static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2736 const SourceLocation FinalNoteLoc) {
2737 unsigned ShownOverloads = 0;
2738 unsigned SuppressedOverloads = 0;
2739 for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2740 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2741 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2742 ++SuppressedOverloads;
2743 continue;
2744 }
2745
2746 const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2747 // Don't print overloads for non-default multiversioned functions.
2748 if (const auto *FD = Fn->getAsFunction()) {
2749 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2750 !FD->getAttr<TargetAttr>()->isDefaultVersion())
2751 continue;
2752 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2753 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2754 continue;
2755 }
2756 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2757 ++ShownOverloads;
2758 }
2759
2760 S.Diags.overloadCandidatesShown(N: ShownOverloads);
2761
2762 if (SuppressedOverloads)
2763 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2764 << SuppressedOverloads;
2765}
2766
2767static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2768 const UnresolvedSetImpl &Overloads,
2769 bool (*IsPlausibleResult)(QualType)) {
2770 if (!IsPlausibleResult)
2771 return noteOverloads(S, Overloads, FinalNoteLoc: Loc);
2772
2773 UnresolvedSet<2> PlausibleOverloads;
2774 for (OverloadExpr::decls_iterator It = Overloads.begin(),
2775 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2776 const auto *OverloadDecl = cast<FunctionDecl>(Val: *It);
2777 QualType OverloadResultTy = OverloadDecl->getReturnType();
2778 if (IsPlausibleResult(OverloadResultTy))
2779 PlausibleOverloads.addDecl(D: It.getDecl());
2780 }
2781 noteOverloads(S, Overloads: PlausibleOverloads, FinalNoteLoc: Loc);
2782}
2783
2784/// Determine whether the given expression can be called by just
2785/// putting parentheses after it. Notably, expressions with unary
2786/// operators can't be because the unary operator will start parsing
2787/// outside the call.
2788static bool IsCallableWithAppend(const Expr *E) {
2789 E = E->IgnoreImplicit();
2790 return (!isa<CStyleCastExpr>(Val: E) &&
2791 !isa<UnaryOperator>(Val: E) &&
2792 !isa<BinaryOperator>(Val: E) &&
2793 !isa<CXXOperatorCallExpr>(Val: E));
2794}
2795
2796static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2797 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2798 E = UO->getSubExpr();
2799
2800 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(Val: E)) {
2801 if (ULE->getNumDecls() == 0)
2802 return false;
2803
2804 const NamedDecl *ND = *ULE->decls_begin();
2805 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2806 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2807 }
2808 return false;
2809}
2810
2811bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2812 bool ForceComplain,
2813 bool (*IsPlausibleResult)(QualType)) {
2814 SourceLocation Loc = E.get()->getExprLoc();
2815 SourceRange Range = E.get()->getSourceRange();
2816 UnresolvedSet<4> Overloads;
2817
2818 // If this is a SFINAE context, don't try anything that might trigger ADL
2819 // prematurely.
2820 if (!isSFINAEContext()) {
2821 QualType ZeroArgCallTy;
2822 if (tryExprAsCall(E&: *E.get(), ZeroArgCallReturnTy&: ZeroArgCallTy, OverloadSet&: Overloads) &&
2823 !ZeroArgCallTy.isNull() &&
2824 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2825 // At this point, we know E is potentially callable with 0
2826 // arguments and that it returns something of a reasonable type,
2827 // so we can emit a fixit and carry on pretending that E was
2828 // actually a CallExpr.
2829 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Loc: Range.getEnd());
2830 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2831 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2832 << (IsCallableWithAppend(E: E.get())
2833 ? FixItHint::CreateInsertion(InsertionLoc: ParenInsertionLoc,
2834 Code: "()")
2835 : FixItHint());
2836 if (!IsMV)
2837 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2838
2839 // FIXME: Try this before emitting the fixit, and suppress diagnostics
2840 // while doing so.
2841 E = BuildCallExpr(S: nullptr, Fn: E.get(), LParenLoc: Range.getEnd(), ArgExprs: {},
2842 RParenLoc: Range.getEnd().getLocWithOffset(Offset: 1));
2843 return true;
2844 }
2845 }
2846 if (!ForceComplain) return false;
2847
2848 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2849 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2850 if (!IsMV)
2851 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2852 E = ExprError();
2853 return true;
2854}
2855
2856IdentifierInfo *Sema::getSuperIdentifier() const {
2857 if (!Ident_super)
2858 Ident_super = &Context.Idents.get(Name: "super");
2859 return Ident_super;
2860}
2861
2862void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2863 CapturedRegionKind K,
2864 unsigned OpenMPCaptureLevel) {
2865 auto *CSI = new CapturedRegionScopeInfo(
2866 getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2867 (getLangOpts().OpenMP && K == CR_OpenMP)
2868 ? OpenMP().getOpenMPNestingLevel()
2869 : 0,
2870 OpenMPCaptureLevel);
2871 CSI->ReturnType = Context.VoidTy;
2872 FunctionScopes.push_back(CSI);
2873 CapturingFunctionScopes++;
2874}
2875
2876CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2877 if (FunctionScopes.empty())
2878 return nullptr;
2879
2880 return dyn_cast<CapturedRegionScopeInfo>(Val: FunctionScopes.back());
2881}
2882
2883const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2884Sema::getMismatchingDeleteExpressions() const {
2885 return DeleteExprs;
2886}
2887
2888Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2889 : S(S), OldFPFeaturesState(S.CurFPFeatures),
2890 OldOverrides(S.FpPragmaStack.CurrentValue),
2891 OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2892 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2893
2894Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2895 S.CurFPFeatures = OldFPFeaturesState;
2896 S.FpPragmaStack.CurrentValue = OldOverrides;
2897 S.PP.setCurrentFPEvalMethod(PragmaLoc: OldFPPragmaLocation, Val: OldEvalMethod);
2898}
2899
2900bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2901 assert(D.getCXXScopeSpec().isSet() &&
2902 "can only be called for qualified names");
2903
2904 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2905 LookupOrdinaryName, forRedeclarationInCurContext());
2906 DeclContext *DC = computeDeclContext(SS: D.getCXXScopeSpec(),
2907 EnteringContext: !D.getDeclSpec().isFriendSpecified());
2908 if (!DC)
2909 return false;
2910
2911 LookupQualifiedName(R&: LR, LookupCtx: DC);
2912 bool Result = llvm::all_of(Range&: LR, P: [](Decl *Dcl) {
2913 if (NamedDecl *ND = dyn_cast<NamedDecl>(Val: Dcl)) {
2914 ND = ND->getUnderlyingDecl();
2915 return isa<FunctionDecl>(Val: ND) || isa<FunctionTemplateDecl>(Val: ND) ||
2916 isa<UsingDecl>(Val: ND);
2917 }
2918 return false;
2919 });
2920 return Result;
2921}
2922
2923Attr *Sema::CreateAnnotationAttr(const AttributeCommonInfo &CI, StringRef Annot,
2924 MutableArrayRef<Expr *> Args) {
2925
2926 auto *A = AnnotateAttr::Create(Context, Annot, Args.data(), Args.size(), CI);
2927 if (!ConstantFoldAttrArgs(
2928 CI, Args: MutableArrayRef<Expr *>(A->args_begin(), A->args_end()))) {
2929 return nullptr;
2930 }
2931 return A;
2932}
2933
2934Attr *Sema::CreateAnnotationAttr(const ParsedAttr &AL) {
2935 // Make sure that there is a string literal as the annotation's first
2936 // argument.
2937 StringRef Str;
2938 if (!checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str))
2939 return nullptr;
2940
2941 llvm::SmallVector<Expr *, 4> Args;
2942 Args.reserve(N: AL.getNumArgs() - 1);
2943 for (unsigned Idx = 1; Idx < AL.getNumArgs(); Idx++) {
2944 assert(!AL.isArgIdent(Idx));
2945 Args.push_back(Elt: AL.getArgAsExpr(Arg: Idx));
2946 }
2947
2948 return CreateAnnotationAttr(CI: AL, Annot: Str, Args);
2949}
2950

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of clang/lib/Sema/Sema.cpp