1//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the actions class which performs semantic analysis and
10// builds an AST out of a parse stream.
11//
12//===----------------------------------------------------------------------===//
13
14#include "UsedDeclVisitor.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTDiagnostic.h"
17#include "clang/AST/Decl.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/DeclFriend.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/PrettyDeclStackTrace.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/Basic/DarwinSDKInfo.h"
26#include "clang/Basic/DiagnosticOptions.h"
27#include "clang/Basic/PartialDiagnostic.h"
28#include "clang/Basic/SourceManager.h"
29#include "clang/Basic/Stack.h"
30#include "clang/Basic/TargetInfo.h"
31#include "clang/Lex/HeaderSearch.h"
32#include "clang/Lex/HeaderSearchOptions.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/CXXFieldCollector.h"
35#include "clang/Sema/DelayedDiagnostic.h"
36#include "clang/Sema/EnterExpressionEvaluationContext.h"
37#include "clang/Sema/ExternalSemaSource.h"
38#include "clang/Sema/Initialization.h"
39#include "clang/Sema/MultiplexExternalSemaSource.h"
40#include "clang/Sema/ObjCMethodList.h"
41#include "clang/Sema/RISCVIntrinsicManager.h"
42#include "clang/Sema/Scope.h"
43#include "clang/Sema/ScopeInfo.h"
44#include "clang/Sema/SemaConsumer.h"
45#include "clang/Sema/SemaInternal.h"
46#include "clang/Sema/TemplateDeduction.h"
47#include "clang/Sema/TemplateInstCallback.h"
48#include "clang/Sema/TypoCorrection.h"
49#include "llvm/ADT/DenseMap.h"
50#include "llvm/ADT/STLExtras.h"
51#include "llvm/ADT/SmallPtrSet.h"
52#include "llvm/Support/TimeProfiler.h"
53#include <optional>
54
55using namespace clang;
56using namespace sema;
57
58SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
59 return Lexer::getLocForEndOfToken(Loc, Offset, SM: SourceMgr, LangOpts);
60}
61
62ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
63
64DarwinSDKInfo *
65Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
66 StringRef Platform) {
67 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
68 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
69 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
70 << Platform;
71 WarnedDarwinSDKInfoMissing = true;
72 }
73 return SDKInfo;
74}
75
76DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
77 if (CachedDarwinSDKInfo)
78 return CachedDarwinSDKInfo->get();
79 auto SDKInfo = parseDarwinSDKInfo(
80 VFS&: PP.getFileManager().getVirtualFileSystem(),
81 SDKRootPath: PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
82 if (SDKInfo && *SDKInfo) {
83 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(args: std::move(**SDKInfo));
84 return CachedDarwinSDKInfo->get();
85 }
86 if (!SDKInfo)
87 llvm::consumeError(Err: SDKInfo.takeError());
88 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
89 return nullptr;
90}
91
92IdentifierInfo *
93Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
94 unsigned int Index) {
95 std::string InventedName;
96 llvm::raw_string_ostream OS(InventedName);
97
98 if (!ParamName)
99 OS << "auto:" << Index + 1;
100 else
101 OS << ParamName->getName() << ":auto";
102
103 OS.flush();
104 return &Context.Idents.get(Name: OS.str());
105}
106
107PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
108 const Preprocessor &PP) {
109 PrintingPolicy Policy = Context.getPrintingPolicy();
110 // In diagnostics, we print _Bool as bool if the latter is defined as the
111 // former.
112 Policy.Bool = Context.getLangOpts().Bool;
113 if (!Policy.Bool) {
114 if (const MacroInfo *BoolMacro = PP.getMacroInfo(II: Context.getBoolName())) {
115 Policy.Bool = BoolMacro->isObjectLike() &&
116 BoolMacro->getNumTokens() == 1 &&
117 BoolMacro->getReplacementToken(Tok: 0).is(K: tok::kw__Bool);
118 }
119 }
120
121 // Shorten the data output if needed
122 Policy.EntireContentsOfLargeArray = false;
123
124 return Policy;
125}
126
127void Sema::ActOnTranslationUnitScope(Scope *S) {
128 TUScope = S;
129 PushDeclContext(S, Context.getTranslationUnitDecl());
130}
131
132namespace clang {
133namespace sema {
134
135class SemaPPCallbacks : public PPCallbacks {
136 Sema *S = nullptr;
137 llvm::SmallVector<SourceLocation, 8> IncludeStack;
138
139public:
140 void set(Sema &S) { this->S = &S; }
141
142 void reset() { S = nullptr; }
143
144 void FileChanged(SourceLocation Loc, FileChangeReason Reason,
145 SrcMgr::CharacteristicKind FileType,
146 FileID PrevFID) override {
147 if (!S)
148 return;
149 switch (Reason) {
150 case EnterFile: {
151 SourceManager &SM = S->getSourceManager();
152 SourceLocation IncludeLoc = SM.getIncludeLoc(FID: SM.getFileID(SpellingLoc: Loc));
153 if (IncludeLoc.isValid()) {
154 if (llvm::timeTraceProfilerEnabled()) {
155 OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID: SM.getFileID(SpellingLoc: Loc));
156 llvm::timeTraceProfilerBegin(Name: "Source", Detail: FE ? FE->getName()
157 : StringRef("<unknown>"));
158 }
159
160 IncludeStack.push_back(Elt: IncludeLoc);
161 S->DiagnoseNonDefaultPragmaAlignPack(
162 Kind: Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
163 IncludeLoc);
164 }
165 break;
166 }
167 case ExitFile:
168 if (!IncludeStack.empty()) {
169 if (llvm::timeTraceProfilerEnabled())
170 llvm::timeTraceProfilerEnd();
171
172 S->DiagnoseNonDefaultPragmaAlignPack(
173 Kind: Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
174 IncludeLoc: IncludeStack.pop_back_val());
175 }
176 break;
177 default:
178 break;
179 }
180 }
181};
182
183} // end namespace sema
184} // end namespace clang
185
186const unsigned Sema::MaxAlignmentExponent;
187const uint64_t Sema::MaximumAlignment;
188
189Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
190 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
191 : ExternalSource(nullptr), CurFPFeatures(pp.getLangOpts()),
192 LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
193 Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
194 APINotes(SourceMgr, LangOpts), CollectStats(false),
195 CodeCompleter(CodeCompleter), CurContext(nullptr),
196 OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
197 MSPointerToMemberRepresentationMethod(
198 LangOpts.getMSPointerToMemberRepresentationMethod()),
199 VtorDispStack(LangOpts.getVtorDispMode()),
200 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
201 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
202 CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
203 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
204 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
205 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
206 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
207 StdInitializerList(nullptr), StdCoroutineTraitsCache(nullptr),
208 CXXTypeInfoDecl(nullptr), StdSourceLocationImplDecl(nullptr),
209 NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
210 StringWithUTF8StringMethod(nullptr),
211 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
212 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
213 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
214 TUKind(TUKind), NumSFINAEErrors(0),
215 FullyCheckedComparisonCategories(
216 static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
217 SatisfactionCache(Context), AccessCheckingSFINAE(false),
218 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
219 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
220 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
221 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
222 CurScope(nullptr), Ident_super(nullptr) {
223 assert(pp.TUKind == TUKind);
224 TUScope = nullptr;
225
226 LoadedExternalKnownNamespaces = false;
227 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
228 NSNumberLiteralMethods[I] = nullptr;
229
230 if (getLangOpts().ObjC)
231 NSAPIObj.reset(p: new NSAPI(Context));
232
233 if (getLangOpts().CPlusPlus)
234 FieldCollector.reset(p: new CXXFieldCollector());
235
236 // Tell diagnostics how to render things from the AST library.
237 Diags.SetArgToStringFn(Fn: &FormatASTNodeDiagnosticArgument, Cookie: &Context);
238
239 // This evaluation context exists to ensure that there's always at least one
240 // valid evaluation context available. It is never removed from the
241 // evaluation stack.
242 ExprEvalContexts.emplace_back(
243 Args: ExpressionEvaluationContext::PotentiallyEvaluated, Args: 0, Args: CleanupInfo{},
244 Args: nullptr, Args: ExpressionEvaluationContextRecord::EK_Other);
245
246 // Initialization of data sharing attributes stack for OpenMP
247 InitDataSharingAttributesStack();
248
249 std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
250 std::make_unique<sema::SemaPPCallbacks>();
251 SemaPPCallbackHandler = Callbacks.get();
252 PP.addPPCallbacks(C: std::move(Callbacks));
253 SemaPPCallbackHandler->set(*this);
254
255 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
256}
257
258// Anchor Sema's type info to this TU.
259void Sema::anchor() {}
260
261void Sema::addImplicitTypedef(StringRef Name, QualType T) {
262 DeclarationName DN = &Context.Idents.get(Name);
263 if (IdResolver.begin(Name: DN) == IdResolver.end())
264 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
265}
266
267void Sema::Initialize() {
268 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
269 SC->InitializeSema(S&: *this);
270
271 // Tell the external Sema source about this Sema object.
272 if (ExternalSemaSource *ExternalSema
273 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
274 ExternalSema->InitializeSema(S&: *this);
275
276 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
277 // will not be able to merge any duplicate __va_list_tag decls correctly.
278 VAListTagName = PP.getIdentifierInfo(Name: "__va_list_tag");
279
280 if (!TUScope)
281 return;
282
283 // Initialize predefined 128-bit integer types, if needed.
284 if (Context.getTargetInfo().hasInt128Type() ||
285 (Context.getAuxTargetInfo() &&
286 Context.getAuxTargetInfo()->hasInt128Type())) {
287 // If either of the 128-bit integer types are unavailable to name lookup,
288 // define them now.
289 DeclarationName Int128 = &Context.Idents.get(Name: "__int128_t");
290 if (IdResolver.begin(Name: Int128) == IdResolver.end())
291 PushOnScopeChains(Context.getInt128Decl(), TUScope);
292
293 DeclarationName UInt128 = &Context.Idents.get(Name: "__uint128_t");
294 if (IdResolver.begin(Name: UInt128) == IdResolver.end())
295 PushOnScopeChains(Context.getUInt128Decl(), TUScope);
296 }
297
298
299 // Initialize predefined Objective-C types:
300 if (getLangOpts().ObjC) {
301 // If 'SEL' does not yet refer to any declarations, make it refer to the
302 // predefined 'SEL'.
303 DeclarationName SEL = &Context.Idents.get(Name: "SEL");
304 if (IdResolver.begin(Name: SEL) == IdResolver.end())
305 PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
306
307 // If 'id' does not yet refer to any declarations, make it refer to the
308 // predefined 'id'.
309 DeclarationName Id = &Context.Idents.get(Name: "id");
310 if (IdResolver.begin(Name: Id) == IdResolver.end())
311 PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
312
313 // Create the built-in typedef for 'Class'.
314 DeclarationName Class = &Context.Idents.get(Name: "Class");
315 if (IdResolver.begin(Name: Class) == IdResolver.end())
316 PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
317
318 // Create the built-in forward declaratino for 'Protocol'.
319 DeclarationName Protocol = &Context.Idents.get(Name: "Protocol");
320 if (IdResolver.begin(Name: Protocol) == IdResolver.end())
321 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
322 }
323
324 // Create the internal type for the *StringMakeConstantString builtins.
325 DeclarationName ConstantString = &Context.Idents.get(Name: "__NSConstantString");
326 if (IdResolver.begin(Name: ConstantString) == IdResolver.end())
327 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
328
329 // Initialize Microsoft "predefined C++ types".
330 if (getLangOpts().MSVCCompat) {
331 if (getLangOpts().CPlusPlus &&
332 IdResolver.begin(Name: &Context.Idents.get(Name: "type_info")) == IdResolver.end())
333 PushOnScopeChains(
334 Context.buildImplicitRecord(Name: "type_info", TK: TagTypeKind::Class),
335 TUScope);
336
337 addImplicitTypedef(Name: "size_t", T: Context.getSizeType());
338 }
339
340 // Initialize predefined OpenCL types and supported extensions and (optional)
341 // core features.
342 if (getLangOpts().OpenCL) {
343 getOpenCLOptions().addSupport(
344 FeaturesMap: Context.getTargetInfo().getSupportedOpenCLOpts(), Opts: getLangOpts());
345 addImplicitTypedef(Name: "sampler_t", T: Context.OCLSamplerTy);
346 addImplicitTypedef(Name: "event_t", T: Context.OCLEventTy);
347 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
348 if (OCLCompatibleVersion >= 200) {
349 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
350 addImplicitTypedef(Name: "clk_event_t", T: Context.OCLClkEventTy);
351 addImplicitTypedef(Name: "queue_t", T: Context.OCLQueueTy);
352 }
353 if (getLangOpts().OpenCLPipes)
354 addImplicitTypedef(Name: "reserve_id_t", T: Context.OCLReserveIDTy);
355 addImplicitTypedef(Name: "atomic_int", T: Context.getAtomicType(T: Context.IntTy));
356 addImplicitTypedef(Name: "atomic_uint",
357 T: Context.getAtomicType(T: Context.UnsignedIntTy));
358 addImplicitTypedef(Name: "atomic_float",
359 T: Context.getAtomicType(T: Context.FloatTy));
360 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
361 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
362 addImplicitTypedef(Name: "atomic_flag", T: Context.getAtomicType(T: Context.IntTy));
363
364
365 // OpenCL v2.0 s6.13.11.6:
366 // - The atomic_long and atomic_ulong types are supported if the
367 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
368 // extensions are supported.
369 // - The atomic_double type is only supported if double precision
370 // is supported and the cl_khr_int64_base_atomics and
371 // cl_khr_int64_extended_atomics extensions are supported.
372 // - If the device address space is 64-bits, the data types
373 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
374 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
375 // cl_khr_int64_extended_atomics extensions are supported.
376
377 auto AddPointerSizeDependentTypes = [&]() {
378 auto AtomicSizeT = Context.getAtomicType(T: Context.getSizeType());
379 auto AtomicIntPtrT = Context.getAtomicType(T: Context.getIntPtrType());
380 auto AtomicUIntPtrT = Context.getAtomicType(T: Context.getUIntPtrType());
381 auto AtomicPtrDiffT =
382 Context.getAtomicType(T: Context.getPointerDiffType());
383 addImplicitTypedef(Name: "atomic_size_t", T: AtomicSizeT);
384 addImplicitTypedef(Name: "atomic_intptr_t", T: AtomicIntPtrT);
385 addImplicitTypedef(Name: "atomic_uintptr_t", T: AtomicUIntPtrT);
386 addImplicitTypedef(Name: "atomic_ptrdiff_t", T: AtomicPtrDiffT);
387 };
388
389 if (Context.getTypeSize(T: Context.getSizeType()) == 32) {
390 AddPointerSizeDependentTypes();
391 }
392
393 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp16", LO: getLangOpts())) {
394 auto AtomicHalfT = Context.getAtomicType(T: Context.HalfTy);
395 addImplicitTypedef(Name: "atomic_half", T: AtomicHalfT);
396 }
397
398 std::vector<QualType> Atomic64BitTypes;
399 if (getOpenCLOptions().isSupported(Ext: "cl_khr_int64_base_atomics",
400 LO: getLangOpts()) &&
401 getOpenCLOptions().isSupported(Ext: "cl_khr_int64_extended_atomics",
402 LO: getLangOpts())) {
403 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp64", LO: getLangOpts())) {
404 auto AtomicDoubleT = Context.getAtomicType(T: Context.DoubleTy);
405 addImplicitTypedef(Name: "atomic_double", T: AtomicDoubleT);
406 Atomic64BitTypes.push_back(AtomicDoubleT);
407 }
408 auto AtomicLongT = Context.getAtomicType(T: Context.LongTy);
409 auto AtomicULongT = Context.getAtomicType(T: Context.UnsignedLongTy);
410 addImplicitTypedef(Name: "atomic_long", T: AtomicLongT);
411 addImplicitTypedef(Name: "atomic_ulong", T: AtomicULongT);
412
413
414 if (Context.getTypeSize(T: Context.getSizeType()) == 64) {
415 AddPointerSizeDependentTypes();
416 }
417 }
418 }
419
420#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
421 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
422 addImplicitTypedef(#ExtType, Context.Id##Ty); \
423 }
424#include "clang/Basic/OpenCLExtensionTypes.def"
425 }
426
427 if (Context.getTargetInfo().hasAArch64SVETypes()) {
428#define SVE_TYPE(Name, Id, SingletonId) \
429 addImplicitTypedef(Name, Context.SingletonId);
430#include "clang/Basic/AArch64SVEACLETypes.def"
431 }
432
433 if (Context.getTargetInfo().getTriple().isPPC64()) {
434#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
435 addImplicitTypedef(#Name, Context.Id##Ty);
436#include "clang/Basic/PPCTypes.def"
437#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
438 addImplicitTypedef(#Name, Context.Id##Ty);
439#include "clang/Basic/PPCTypes.def"
440 }
441
442 if (Context.getTargetInfo().hasRISCVVTypes()) {
443#define RVV_TYPE(Name, Id, SingletonId) \
444 addImplicitTypedef(Name, Context.SingletonId);
445#include "clang/Basic/RISCVVTypes.def"
446 }
447
448 if (Context.getTargetInfo().getTriple().isWasm() &&
449 Context.getTargetInfo().hasFeature(Feature: "reference-types")) {
450#define WASM_TYPE(Name, Id, SingletonId) \
451 addImplicitTypedef(Name, Context.SingletonId);
452#include "clang/Basic/WebAssemblyReferenceTypes.def"
453 }
454
455 if (Context.getTargetInfo().hasBuiltinMSVaList()) {
456 DeclarationName MSVaList = &Context.Idents.get(Name: "__builtin_ms_va_list");
457 if (IdResolver.begin(Name: MSVaList) == IdResolver.end())
458 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
459 }
460
461 DeclarationName BuiltinVaList = &Context.Idents.get(Name: "__builtin_va_list");
462 if (IdResolver.begin(Name: BuiltinVaList) == IdResolver.end())
463 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
464}
465
466Sema::~Sema() {
467 assert(InstantiatingSpecializations.empty() &&
468 "failed to clean up an InstantiatingTemplate?");
469
470 if (VisContext) FreeVisContext();
471
472 // Kill all the active scopes.
473 for (sema::FunctionScopeInfo *FSI : FunctionScopes)
474 delete FSI;
475
476 // Tell the SemaConsumer to forget about us; we're going out of scope.
477 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
478 SC->ForgetSema();
479
480 // Detach from the external Sema source.
481 if (ExternalSemaSource *ExternalSema
482 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
483 ExternalSema->ForgetSema();
484
485 // Delete cached satisfactions.
486 std::vector<ConstraintSatisfaction *> Satisfactions;
487 Satisfactions.reserve(n: SatisfactionCache.size());
488 for (auto &Node : SatisfactionCache)
489 Satisfactions.push_back(x: &Node);
490 for (auto *Node : Satisfactions)
491 delete Node;
492
493 threadSafety::threadSafetyCleanup(Cache: ThreadSafetyDeclCache);
494
495 // Destroys data sharing attributes stack for OpenMP
496 DestroyDataSharingAttributesStack();
497
498 // Detach from the PP callback handler which outlives Sema since it's owned
499 // by the preprocessor.
500 SemaPPCallbackHandler->reset();
501}
502
503void Sema::warnStackExhausted(SourceLocation Loc) {
504 // Only warn about this once.
505 if (!WarnedStackExhausted) {
506 Diag(Loc, diag::warn_stack_exhausted);
507 WarnedStackExhausted = true;
508 }
509}
510
511void Sema::runWithSufficientStackSpace(SourceLocation Loc,
512 llvm::function_ref<void()> Fn) {
513 clang::runWithSufficientStackSpace(Diag: [&] { warnStackExhausted(Loc); }, Fn);
514}
515
516/// makeUnavailableInSystemHeader - There is an error in the current
517/// context. If we're still in a system header, and we can plausibly
518/// make the relevant declaration unavailable instead of erroring, do
519/// so and return true.
520bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
521 UnavailableAttr::ImplicitReason reason) {
522 // If we're not in a function, it's an error.
523 FunctionDecl *fn = dyn_cast<FunctionDecl>(Val: CurContext);
524 if (!fn) return false;
525
526 // If we're in template instantiation, it's an error.
527 if (inTemplateInstantiation())
528 return false;
529
530 // If that function's not in a system header, it's an error.
531 if (!Context.getSourceManager().isInSystemHeader(Loc: loc))
532 return false;
533
534 // If the function is already unavailable, it's not an error.
535 if (fn->hasAttr<UnavailableAttr>()) return true;
536
537 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
538 return true;
539}
540
541ASTMutationListener *Sema::getASTMutationListener() const {
542 return getASTConsumer().GetASTMutationListener();
543}
544
545///Registers an external source. If an external source already exists,
546/// creates a multiplex external source and appends to it.
547///
548///\param[in] E - A non-null external sema source.
549///
550void Sema::addExternalSource(ExternalSemaSource *E) {
551 assert(E && "Cannot use with NULL ptr");
552
553 if (!ExternalSource) {
554 ExternalSource = E;
555 return;
556 }
557
558 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(Val&: ExternalSource))
559 Ex->AddSource(Source: E);
560 else
561 ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
562}
563
564/// Print out statistics about the semantic analysis.
565void Sema::PrintStats() const {
566 llvm::errs() << "\n*** Semantic Analysis Stats:\n";
567 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
568
569 BumpAlloc.PrintStats();
570 AnalysisWarnings.PrintStats();
571}
572
573void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
574 QualType SrcType,
575 SourceLocation Loc) {
576 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
577 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
578 *ExprNullability != NullabilityKind::NullableResult))
579 return;
580
581 std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
582 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
583 return;
584
585 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
586}
587
588void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
589 // nullptr only exists from C++11 on, so don't warn on its absence earlier.
590 if (!getLangOpts().CPlusPlus11)
591 return;
592
593 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
594 return;
595
596 const Expr *EStripped = E->IgnoreParenImpCasts();
597 if (EStripped->getType()->isNullPtrType())
598 return;
599 if (isa<GNUNullExpr>(Val: EStripped))
600 return;
601
602 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
603 E->getBeginLoc()))
604 return;
605
606 // Don't diagnose the conversion from a 0 literal to a null pointer argument
607 // in a synthesized call to operator<=>.
608 if (!CodeSynthesisContexts.empty() &&
609 CodeSynthesisContexts.back().Kind ==
610 CodeSynthesisContext::RewritingOperatorAsSpaceship)
611 return;
612
613 // Ignore null pointers in defaulted comparison operators.
614 FunctionDecl *FD = getCurFunctionDecl();
615 if (FD && FD->isDefaulted()) {
616 return;
617 }
618
619 // If it is a macro from system header, and if the macro name is not "NULL",
620 // do not warn.
621 // Note that uses of "NULL" will be ignored above on systems that define it
622 // as __null.
623 SourceLocation MaybeMacroLoc = E->getBeginLoc();
624 if (Diags.getSuppressSystemWarnings() &&
625 SourceMgr.isInSystemMacro(loc: MaybeMacroLoc) &&
626 !findMacroSpelling(loc&: MaybeMacroLoc, name: "NULL"))
627 return;
628
629 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
630 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
631}
632
633/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
634/// If there is already an implicit cast, merge into the existing one.
635/// The result is of the given category.
636ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
637 CastKind Kind, ExprValueKind VK,
638 const CXXCastPath *BasePath,
639 CheckedConversionKind CCK) {
640#ifndef NDEBUG
641 if (VK == VK_PRValue && !E->isPRValue()) {
642 switch (Kind) {
643 default:
644 llvm_unreachable(
645 ("can't implicitly cast glvalue to prvalue with this cast "
646 "kind: " +
647 std::string(CastExpr::getCastKindName(Kind)))
648 .c_str());
649 case CK_Dependent:
650 case CK_LValueToRValue:
651 case CK_ArrayToPointerDecay:
652 case CK_FunctionToPointerDecay:
653 case CK_ToVoid:
654 case CK_NonAtomicToAtomic:
655 break;
656 }
657 }
658 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
659 "can't cast prvalue to glvalue");
660#endif
661
662 diagnoseNullableToNonnullConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
663 diagnoseZeroToNullptrConversion(Kind, E);
664
665 QualType ExprTy = Context.getCanonicalType(T: E->getType());
666 QualType TypeTy = Context.getCanonicalType(T: Ty);
667
668 if (ExprTy == TypeTy)
669 return E;
670
671 if (Kind == CK_ArrayToPointerDecay) {
672 // C++1z [conv.array]: The temporary materialization conversion is applied.
673 // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
674 if (getLangOpts().CPlusPlus && E->isPRValue()) {
675 // The temporary is an lvalue in C++98 and an xvalue otherwise.
676 ExprResult Materialized = CreateMaterializeTemporaryExpr(
677 T: E->getType(), Temporary: E, BoundToLvalueReference: !getLangOpts().CPlusPlus11);
678 if (Materialized.isInvalid())
679 return ExprError();
680 E = Materialized.get();
681 }
682 // C17 6.7.1p6 footnote 124: The implementation can treat any register
683 // declaration simply as an auto declaration. However, whether or not
684 // addressable storage is actually used, the address of any part of an
685 // object declared with storage-class specifier register cannot be
686 // computed, either explicitly(by use of the unary & operator as discussed
687 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
688 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
689 // array declared with storage-class specifier register is sizeof.
690 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
691 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
692 if (const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl())) {
693 if (VD->getStorageClass() == SC_Register) {
694 Diag(E->getExprLoc(), diag::err_typecheck_address_of)
695 << /*register variable*/ 3 << E->getSourceRange();
696 return ExprError();
697 }
698 }
699 }
700 }
701 }
702
703 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(Val: E)) {
704 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
705 ImpCast->setType(Ty);
706 ImpCast->setValueKind(VK);
707 return E;
708 }
709 }
710
711 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
712 FPO: CurFPFeatureOverrides());
713}
714
715/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
716/// to the conversion from scalar type ScalarTy to the Boolean type.
717CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
718 switch (ScalarTy->getScalarTypeKind()) {
719 case Type::STK_Bool: return CK_NoOp;
720 case Type::STK_CPointer: return CK_PointerToBoolean;
721 case Type::STK_BlockPointer: return CK_PointerToBoolean;
722 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
723 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
724 case Type::STK_Integral: return CK_IntegralToBoolean;
725 case Type::STK_Floating: return CK_FloatingToBoolean;
726 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
727 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
728 case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
729 }
730 llvm_unreachable("unknown scalar type kind");
731}
732
733/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
734static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
735 if (D->getMostRecentDecl()->isUsed())
736 return true;
737
738 if (D->isExternallyVisible())
739 return true;
740
741 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: D)) {
742 // If this is a function template and none of its specializations is used,
743 // we should warn.
744 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
745 for (const auto *Spec : Template->specializations())
746 if (ShouldRemoveFromUnused(SemaRef, Spec))
747 return true;
748
749 // UnusedFileScopedDecls stores the first declaration.
750 // The declaration may have become definition so check again.
751 const FunctionDecl *DeclToCheck;
752 if (FD->hasBody(Definition&: DeclToCheck))
753 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
754
755 // Later redecls may add new information resulting in not having to warn,
756 // so check again.
757 DeclToCheck = FD->getMostRecentDecl();
758 if (DeclToCheck != FD)
759 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
760 }
761
762 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) {
763 // If a variable usable in constant expressions is referenced,
764 // don't warn if it isn't used: if the value of a variable is required
765 // for the computation of a constant expression, it doesn't make sense to
766 // warn even if the variable isn't odr-used. (isReferenced doesn't
767 // precisely reflect that, but it's a decent approximation.)
768 if (VD->isReferenced() &&
769 VD->mightBeUsableInConstantExpressions(C: SemaRef->Context))
770 return true;
771
772 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
773 // If this is a variable template and none of its specializations is used,
774 // we should warn.
775 for (const auto *Spec : Template->specializations())
776 if (ShouldRemoveFromUnused(SemaRef, Spec))
777 return true;
778
779 // UnusedFileScopedDecls stores the first declaration.
780 // The declaration may have become definition so check again.
781 const VarDecl *DeclToCheck = VD->getDefinition();
782 if (DeclToCheck)
783 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
784
785 // Later redecls may add new information resulting in not having to warn,
786 // so check again.
787 DeclToCheck = VD->getMostRecentDecl();
788 if (DeclToCheck != VD)
789 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
790 }
791
792 return false;
793}
794
795static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
796 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
797 return FD->isExternC();
798 return cast<VarDecl>(Val: ND)->isExternC();
799}
800
801/// Determine whether ND is an external-linkage function or variable whose
802/// type has no linkage.
803bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
804 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
805 // because we also want to catch the case where its type has VisibleNoLinkage,
806 // which does not affect the linkage of VD.
807 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
808 !isExternalFormalLinkage(L: VD->getType()->getLinkage()) &&
809 !isFunctionOrVarDeclExternC(VD);
810}
811
812/// Obtains a sorted list of functions and variables that are undefined but
813/// ODR-used.
814void Sema::getUndefinedButUsed(
815 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
816 for (const auto &UndefinedUse : UndefinedButUsed) {
817 NamedDecl *ND = UndefinedUse.first;
818
819 // Ignore attributes that have become invalid.
820 if (ND->isInvalidDecl()) continue;
821
822 // __attribute__((weakref)) is basically a definition.
823 if (ND->hasAttr<WeakRefAttr>()) continue;
824
825 if (isa<CXXDeductionGuideDecl>(Val: ND))
826 continue;
827
828 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
829 // An exported function will always be emitted when defined, so even if
830 // the function is inline, it doesn't have to be emitted in this TU. An
831 // imported function implies that it has been exported somewhere else.
832 continue;
833 }
834
835 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) {
836 if (FD->isDefined())
837 continue;
838 if (FD->isExternallyVisible() &&
839 !isExternalWithNoLinkageType(FD) &&
840 !FD->getMostRecentDecl()->isInlined() &&
841 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
842 continue;
843 if (FD->getBuiltinID())
844 continue;
845 } else {
846 const auto *VD = cast<VarDecl>(Val: ND);
847 if (VD->hasDefinition() != VarDecl::DeclarationOnly)
848 continue;
849 if (VD->isExternallyVisible() &&
850 !isExternalWithNoLinkageType(VD) &&
851 !VD->getMostRecentDecl()->isInline() &&
852 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
853 continue;
854
855 // Skip VarDecls that lack formal definitions but which we know are in
856 // fact defined somewhere.
857 if (VD->isKnownToBeDefined())
858 continue;
859 }
860
861 Undefined.push_back(Elt: std::make_pair(x&: ND, y: UndefinedUse.second));
862 }
863}
864
865/// checkUndefinedButUsed - Check for undefined objects with internal linkage
866/// or that are inline.
867static void checkUndefinedButUsed(Sema &S) {
868 if (S.UndefinedButUsed.empty()) return;
869
870 // Collect all the still-undefined entities with internal linkage.
871 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
872 S.getUndefinedButUsed(Undefined);
873 S.UndefinedButUsed.clear();
874 if (Undefined.empty()) return;
875
876 for (const auto &Undef : Undefined) {
877 ValueDecl *VD = cast<ValueDecl>(Val: Undef.first);
878 SourceLocation UseLoc = Undef.second;
879
880 if (S.isExternalWithNoLinkageType(VD)) {
881 // C++ [basic.link]p8:
882 // A type without linkage shall not be used as the type of a variable
883 // or function with external linkage unless
884 // -- the entity has C language linkage
885 // -- the entity is not odr-used or is defined in the same TU
886 //
887 // As an extension, accept this in cases where the type is externally
888 // visible, since the function or variable actually can be defined in
889 // another translation unit in that case.
890 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
891 ? diag::ext_undefined_internal_type
892 : diag::err_undefined_internal_type)
893 << isa<VarDecl>(VD) << VD;
894 } else if (!VD->isExternallyVisible()) {
895 // FIXME: We can promote this to an error. The function or variable can't
896 // be defined anywhere else, so the program must necessarily violate the
897 // one definition rule.
898 bool IsImplicitBase = false;
899 if (const auto *BaseD = dyn_cast<FunctionDecl>(Val: VD)) {
900 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
901 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
902 llvm::omp::TraitProperty::
903 implementation_extension_disable_implicit_base)) {
904 const auto *Func = cast<FunctionDecl>(
905 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
906 IsImplicitBase = BaseD->isImplicit() &&
907 Func->getIdentifier()->isMangledOpenMPVariantName();
908 }
909 }
910 if (!S.getLangOpts().OpenMP || !IsImplicitBase)
911 S.Diag(VD->getLocation(), diag::warn_undefined_internal)
912 << isa<VarDecl>(VD) << VD;
913 } else if (auto *FD = dyn_cast<FunctionDecl>(Val: VD)) {
914 (void)FD;
915 assert(FD->getMostRecentDecl()->isInlined() &&
916 "used object requires definition but isn't inline or internal?");
917 // FIXME: This is ill-formed; we should reject.
918 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
919 } else {
920 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
921 "used var requires definition but isn't inline or internal?");
922 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
923 }
924 if (UseLoc.isValid())
925 S.Diag(UseLoc, diag::note_used_here);
926 }
927}
928
929void Sema::LoadExternalWeakUndeclaredIdentifiers() {
930 if (!ExternalSource)
931 return;
932
933 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
934 ExternalSource->ReadWeakUndeclaredIdentifiers(WI&: WeakIDs);
935 for (auto &WeakID : WeakIDs)
936 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(X: WeakID.second);
937}
938
939
940typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
941
942/// Returns true, if all methods and nested classes of the given
943/// CXXRecordDecl are defined in this translation unit.
944///
945/// Should only be called from ActOnEndOfTranslationUnit so that all
946/// definitions are actually read.
947static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
948 RecordCompleteMap &MNCComplete) {
949 RecordCompleteMap::iterator Cache = MNCComplete.find(Val: RD);
950 if (Cache != MNCComplete.end())
951 return Cache->second;
952 if (!RD->isCompleteDefinition())
953 return false;
954 bool Complete = true;
955 for (DeclContext::decl_iterator I = RD->decls_begin(),
956 E = RD->decls_end();
957 I != E && Complete; ++I) {
958 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Val: *I))
959 Complete = M->isDefined() || M->isDefaulted() ||
960 (M->isPureVirtual() && !isa<CXXDestructorDecl>(Val: M));
961 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(Val: *I))
962 // If the template function is marked as late template parsed at this
963 // point, it has not been instantiated and therefore we have not
964 // performed semantic analysis on it yet, so we cannot know if the type
965 // can be considered complete.
966 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
967 F->getTemplatedDecl()->isDefined();
968 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(Val: *I)) {
969 if (R->isInjectedClassName())
970 continue;
971 if (R->hasDefinition())
972 Complete = MethodsAndNestedClassesComplete(RD: R->getDefinition(),
973 MNCComplete);
974 else
975 Complete = false;
976 }
977 }
978 MNCComplete[RD] = Complete;
979 return Complete;
980}
981
982/// Returns true, if the given CXXRecordDecl is fully defined in this
983/// translation unit, i.e. all methods are defined or pure virtual and all
984/// friends, friend functions and nested classes are fully defined in this
985/// translation unit.
986///
987/// Should only be called from ActOnEndOfTranslationUnit so that all
988/// definitions are actually read.
989static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
990 RecordCompleteMap &RecordsComplete,
991 RecordCompleteMap &MNCComplete) {
992 RecordCompleteMap::iterator Cache = RecordsComplete.find(Val: RD);
993 if (Cache != RecordsComplete.end())
994 return Cache->second;
995 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
996 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
997 E = RD->friend_end();
998 I != E && Complete; ++I) {
999 // Check if friend classes and methods are complete.
1000 if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1001 // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1002 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1003 Complete = MethodsAndNestedClassesComplete(RD: FriendD, MNCComplete);
1004 else
1005 Complete = false;
1006 } else {
1007 // Friend functions are available through the NamedDecl of FriendDecl.
1008 if (const FunctionDecl *FD =
1009 dyn_cast<FunctionDecl>(Val: (*I)->getFriendDecl()))
1010 Complete = FD->isDefined();
1011 else
1012 // This is a template friend, give up.
1013 Complete = false;
1014 }
1015 }
1016 RecordsComplete[RD] = Complete;
1017 return Complete;
1018}
1019
1020void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1021 if (ExternalSource)
1022 ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1023 Decls&: UnusedLocalTypedefNameCandidates);
1024 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1025 if (TD->isReferenced())
1026 continue;
1027 Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1028 << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1029 }
1030 UnusedLocalTypedefNameCandidates.clear();
1031}
1032
1033/// This is called before the very first declaration in the translation unit
1034/// is parsed. Note that the ASTContext may have already injected some
1035/// declarations.
1036void Sema::ActOnStartOfTranslationUnit() {
1037 if (getLangOpts().CPlusPlusModules &&
1038 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1039 HandleStartOfHeaderUnit();
1040}
1041
1042void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1043 // No explicit actions are required at the end of the global module fragment.
1044 if (Kind == TUFragmentKind::Global)
1045 return;
1046
1047 // Transfer late parsed template instantiations over to the pending template
1048 // instantiation list. During normal compilation, the late template parser
1049 // will be installed and instantiating these templates will succeed.
1050 //
1051 // If we are building a TU prefix for serialization, it is also safe to
1052 // transfer these over, even though they are not parsed. The end of the TU
1053 // should be outside of any eager template instantiation scope, so when this
1054 // AST is deserialized, these templates will not be parsed until the end of
1055 // the combined TU.
1056 PendingInstantiations.insert(position: PendingInstantiations.end(),
1057 first: LateParsedInstantiations.begin(),
1058 last: LateParsedInstantiations.end());
1059 LateParsedInstantiations.clear();
1060
1061 // If DefinedUsedVTables ends up marking any virtual member functions it
1062 // might lead to more pending template instantiations, which we then need
1063 // to instantiate.
1064 DefineUsedVTables();
1065
1066 // C++: Perform implicit template instantiations.
1067 //
1068 // FIXME: When we perform these implicit instantiations, we do not
1069 // carefully keep track of the point of instantiation (C++ [temp.point]).
1070 // This means that name lookup that occurs within the template
1071 // instantiation will always happen at the end of the translation unit,
1072 // so it will find some names that are not required to be found. This is
1073 // valid, but we could do better by diagnosing if an instantiation uses a
1074 // name that was not visible at its first point of instantiation.
1075 if (ExternalSource) {
1076 // Load pending instantiations from the external source.
1077 SmallVector<PendingImplicitInstantiation, 4> Pending;
1078 ExternalSource->ReadPendingInstantiations(Pending);
1079 for (auto PII : Pending)
1080 if (auto Func = dyn_cast<FunctionDecl>(Val: PII.first))
1081 Func->setInstantiationIsPending(true);
1082 PendingInstantiations.insert(position: PendingInstantiations.begin(),
1083 first: Pending.begin(), last: Pending.end());
1084 }
1085
1086 {
1087 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1088 PerformPendingInstantiations();
1089 }
1090
1091 emitDeferredDiags();
1092
1093 assert(LateParsedInstantiations.empty() &&
1094 "end of TU template instantiation should not create more "
1095 "late-parsed templates");
1096
1097 // Report diagnostics for uncorrected delayed typos. Ideally all of them
1098 // should have been corrected by that time, but it is very hard to cover all
1099 // cases in practice.
1100 for (const auto &Typo : DelayedTypos) {
1101 // We pass an empty TypoCorrection to indicate no correction was performed.
1102 Typo.second.DiagHandler(TypoCorrection());
1103 }
1104 DelayedTypos.clear();
1105}
1106
1107/// ActOnEndOfTranslationUnit - This is called at the very end of the
1108/// translation unit when EOF is reached and all but the top-level scope is
1109/// popped.
1110void Sema::ActOnEndOfTranslationUnit() {
1111 assert(DelayedDiagnostics.getCurrentPool() == nullptr
1112 && "reached end of translation unit with a pool attached?");
1113
1114 // If code completion is enabled, don't perform any end-of-translation-unit
1115 // work.
1116 if (PP.isCodeCompletionEnabled())
1117 return;
1118
1119 // Complete translation units and modules define vtables and perform implicit
1120 // instantiations. PCH files do not.
1121 if (TUKind != TU_Prefix) {
1122 DiagnoseUseOfUnimplementedSelectors();
1123
1124 ActOnEndOfTranslationUnitFragment(
1125 Kind: !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1126 Module::PrivateModuleFragment
1127 ? TUFragmentKind::Private
1128 : TUFragmentKind::Normal);
1129
1130 if (LateTemplateParserCleanup)
1131 LateTemplateParserCleanup(OpaqueParser);
1132
1133 CheckDelayedMemberExceptionSpecs();
1134 } else {
1135 // If we are building a TU prefix for serialization, it is safe to transfer
1136 // these over, even though they are not parsed. The end of the TU should be
1137 // outside of any eager template instantiation scope, so when this AST is
1138 // deserialized, these templates will not be parsed until the end of the
1139 // combined TU.
1140 PendingInstantiations.insert(position: PendingInstantiations.end(),
1141 first: LateParsedInstantiations.begin(),
1142 last: LateParsedInstantiations.end());
1143 LateParsedInstantiations.clear();
1144
1145 if (LangOpts.PCHInstantiateTemplates) {
1146 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1147 PerformPendingInstantiations();
1148 }
1149 }
1150
1151 DiagnoseUnterminatedPragmaAlignPack();
1152 DiagnoseUnterminatedPragmaAttribute();
1153 DiagnoseUnterminatedOpenMPDeclareTarget();
1154
1155 // All delayed member exception specs should be checked or we end up accepting
1156 // incompatible declarations.
1157 assert(DelayedOverridingExceptionSpecChecks.empty());
1158 assert(DelayedEquivalentExceptionSpecChecks.empty());
1159
1160 // All dllexport classes should have been processed already.
1161 assert(DelayedDllExportClasses.empty());
1162 assert(DelayedDllExportMemberFunctions.empty());
1163
1164 // Remove file scoped decls that turned out to be used.
1165 UnusedFileScopedDecls.erase(
1166 From: std::remove_if(first: UnusedFileScopedDecls.begin(source: nullptr, LocalOnly: true),
1167 last: UnusedFileScopedDecls.end(),
1168 pred: [this](const DeclaratorDecl *DD) {
1169 return ShouldRemoveFromUnused(SemaRef: this, D: DD);
1170 }),
1171 To: UnusedFileScopedDecls.end());
1172
1173 if (TUKind == TU_Prefix) {
1174 // Translation unit prefixes don't need any of the checking below.
1175 if (!PP.isIncrementalProcessingEnabled())
1176 TUScope = nullptr;
1177 return;
1178 }
1179
1180 // Check for #pragma weak identifiers that were never declared
1181 LoadExternalWeakUndeclaredIdentifiers();
1182 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1183 if (WeakIDs.second.empty())
1184 continue;
1185
1186 Decl *PrevDecl = LookupSingleName(S: TUScope, Name: WeakIDs.first, Loc: SourceLocation(),
1187 NameKind: LookupOrdinaryName);
1188 if (PrevDecl != nullptr &&
1189 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1190 for (const auto &WI : WeakIDs.second)
1191 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1192 << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1193 else
1194 for (const auto &WI : WeakIDs.second)
1195 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1196 << WeakIDs.first;
1197 }
1198
1199 if (LangOpts.CPlusPlus11 &&
1200 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1201 CheckDelegatingCtorCycles();
1202
1203 if (!Diags.hasErrorOccurred()) {
1204 if (ExternalSource)
1205 ExternalSource->ReadUndefinedButUsed(Undefined&: UndefinedButUsed);
1206 checkUndefinedButUsed(S&: *this);
1207 }
1208
1209 // A global-module-fragment is only permitted within a module unit.
1210 bool DiagnosedMissingModuleDeclaration = false;
1211 if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1212 Module::ExplicitGlobalModuleFragment) {
1213 Diag(ModuleScopes.back().BeginLoc,
1214 diag::err_module_declaration_missing_after_global_module_introducer);
1215 DiagnosedMissingModuleDeclaration = true;
1216 }
1217
1218 if (TUKind == TU_Module) {
1219 // If we are building a module interface unit, we need to have seen the
1220 // module declaration by now.
1221 if (getLangOpts().getCompilingModule() ==
1222 LangOptions::CMK_ModuleInterface &&
1223 !isCurrentModulePurview() && !DiagnosedMissingModuleDeclaration) {
1224 // FIXME: Make a better guess as to where to put the module declaration.
1225 Diag(getSourceManager().getLocForStartOfFile(
1226 getSourceManager().getMainFileID()),
1227 diag::err_module_declaration_missing);
1228 }
1229
1230 // If we are building a module, resolve all of the exported declarations
1231 // now.
1232 if (Module *CurrentModule = PP.getCurrentModule()) {
1233 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1234
1235 SmallVector<Module *, 2> Stack;
1236 Stack.push_back(Elt: CurrentModule);
1237 while (!Stack.empty()) {
1238 Module *Mod = Stack.pop_back_val();
1239
1240 // Resolve the exported declarations and conflicts.
1241 // FIXME: Actually complain, once we figure out how to teach the
1242 // diagnostic client to deal with complaints in the module map at this
1243 // point.
1244 ModMap.resolveExports(Mod, /*Complain=*/false);
1245 ModMap.resolveUses(Mod, /*Complain=*/false);
1246 ModMap.resolveConflicts(Mod, /*Complain=*/false);
1247
1248 // Queue the submodules, so their exports will also be resolved.
1249 auto SubmodulesRange = Mod->submodules();
1250 Stack.append(in_start: SubmodulesRange.begin(), in_end: SubmodulesRange.end());
1251 }
1252 }
1253
1254 // Now we can decide whether the modules we're building need an initializer.
1255 if (Module *CurrentModule = getCurrentModule();
1256 CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1257 auto DoesModNeedInit = [this](Module *M) {
1258 if (!getASTContext().getModuleInitializers(M).empty())
1259 return true;
1260 for (auto [Exported, _] : M->Exports)
1261 if (Exported->isNamedModuleInterfaceHasInit())
1262 return true;
1263 for (Module *I : M->Imports)
1264 if (I->isNamedModuleInterfaceHasInit())
1265 return true;
1266
1267 return false;
1268 };
1269
1270 CurrentModule->NamedModuleHasInit =
1271 DoesModNeedInit(CurrentModule) ||
1272 llvm::any_of(Range: CurrentModule->submodules(),
1273 P: [&](auto *SubM) { return DoesModNeedInit(SubM); });
1274 }
1275
1276 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1277 // modules when they are built, not every time they are used.
1278 emitAndClearUnusedLocalTypedefWarnings();
1279 }
1280
1281 // C++ standard modules. Diagnose cases where a function is declared inline
1282 // in the module purview but has no definition before the end of the TU or
1283 // the start of a Private Module Fragment (if one is present).
1284 if (!PendingInlineFuncDecls.empty()) {
1285 for (auto *D : PendingInlineFuncDecls) {
1286 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1287 bool DefInPMF = false;
1288 if (auto *FDD = FD->getDefinition()) {
1289 DefInPMF = FDD->getOwningModule()->isPrivateModule();
1290 if (!DefInPMF)
1291 continue;
1292 }
1293 Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1294 << DefInPMF;
1295 // If we have a PMF it should be at the end of the ModuleScopes.
1296 if (DefInPMF &&
1297 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1298 Diag(ModuleScopes.back().BeginLoc,
1299 diag::note_private_module_fragment);
1300 }
1301 }
1302 }
1303 PendingInlineFuncDecls.clear();
1304 }
1305
1306 // C99 6.9.2p2:
1307 // A declaration of an identifier for an object that has file
1308 // scope without an initializer, and without a storage-class
1309 // specifier or with the storage-class specifier static,
1310 // constitutes a tentative definition. If a translation unit
1311 // contains one or more tentative definitions for an identifier,
1312 // and the translation unit contains no external definition for
1313 // that identifier, then the behavior is exactly as if the
1314 // translation unit contains a file scope declaration of that
1315 // identifier, with the composite type as of the end of the
1316 // translation unit, with an initializer equal to 0.
1317 llvm::SmallSet<VarDecl *, 32> Seen;
1318 for (TentativeDefinitionsType::iterator
1319 T = TentativeDefinitions.begin(source: ExternalSource.get()),
1320 TEnd = TentativeDefinitions.end();
1321 T != TEnd; ++T) {
1322 VarDecl *VD = (*T)->getActingDefinition();
1323
1324 // If the tentative definition was completed, getActingDefinition() returns
1325 // null. If we've already seen this variable before, insert()'s second
1326 // return value is false.
1327 if (!VD || VD->isInvalidDecl() || !Seen.insert(Ptr: VD).second)
1328 continue;
1329
1330 if (const IncompleteArrayType *ArrayT
1331 = Context.getAsIncompleteArrayType(T: VD->getType())) {
1332 // Set the length of the array to 1 (C99 6.9.2p5).
1333 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1334 llvm::APInt One(Context.getTypeSize(T: Context.getSizeType()), true);
1335 QualType T = Context.getConstantArrayType(
1336 EltTy: ArrayT->getElementType(), ArySize: One, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1337 VD->setType(T);
1338 } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1339 diag::err_tentative_def_incomplete_type))
1340 VD->setInvalidDecl();
1341
1342 // No initialization is performed for a tentative definition.
1343 CheckCompleteVariableDeclaration(VD);
1344
1345 // Notify the consumer that we've completed a tentative definition.
1346 if (!VD->isInvalidDecl())
1347 Consumer.CompleteTentativeDefinition(D: VD);
1348 }
1349
1350 for (auto *D : ExternalDeclarations) {
1351 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1352 continue;
1353
1354 Consumer.CompleteExternalDeclaration(D);
1355 }
1356
1357 // If there were errors, disable 'unused' warnings since they will mostly be
1358 // noise. Don't warn for a use from a module: either we should warn on all
1359 // file-scope declarations in modules or not at all, but whether the
1360 // declaration is used is immaterial.
1361 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
1362 // Output warning for unused file scoped decls.
1363 for (UnusedFileScopedDeclsType::iterator
1364 I = UnusedFileScopedDecls.begin(source: ExternalSource.get()),
1365 E = UnusedFileScopedDecls.end();
1366 I != E; ++I) {
1367 if (ShouldRemoveFromUnused(SemaRef: this, D: *I))
1368 continue;
1369
1370 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: *I)) {
1371 const FunctionDecl *DiagD;
1372 if (!FD->hasBody(Definition&: DiagD))
1373 DiagD = FD;
1374 if (DiagD->isDeleted())
1375 continue; // Deleted functions are supposed to be unused.
1376 SourceRange DiagRange = DiagD->getLocation();
1377 if (const ASTTemplateArgumentListInfo *ASTTAL =
1378 DiagD->getTemplateSpecializationArgsAsWritten())
1379 DiagRange.setEnd(ASTTAL->RAngleLoc);
1380 if (DiagD->isReferenced()) {
1381 if (isa<CXXMethodDecl>(Val: DiagD))
1382 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1383 << DiagD << DiagRange;
1384 else {
1385 if (FD->getStorageClass() == SC_Static &&
1386 !FD->isInlineSpecified() &&
1387 !SourceMgr.isInMainFile(
1388 SourceMgr.getExpansionLoc(FD->getLocation())))
1389 Diag(DiagD->getLocation(),
1390 diag::warn_unneeded_static_internal_decl)
1391 << DiagD << DiagRange;
1392 else
1393 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1394 << /*function=*/0 << DiagD << DiagRange;
1395 }
1396 } else if (!FD->isTargetMultiVersion() ||
1397 FD->isTargetMultiVersionDefault()) {
1398 if (FD->getDescribedFunctionTemplate())
1399 Diag(DiagD->getLocation(), diag::warn_unused_template)
1400 << /*function=*/0 << DiagD << DiagRange;
1401 else
1402 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1403 ? diag::warn_unused_member_function
1404 : diag::warn_unused_function)
1405 << DiagD << DiagRange;
1406 }
1407 } else {
1408 const VarDecl *DiagD = cast<VarDecl>(Val: *I)->getDefinition();
1409 if (!DiagD)
1410 DiagD = cast<VarDecl>(Val: *I);
1411 SourceRange DiagRange = DiagD->getLocation();
1412 if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Val: DiagD)) {
1413 if (const ASTTemplateArgumentListInfo *ASTTAL =
1414 VTSD->getTemplateArgsInfo())
1415 DiagRange.setEnd(ASTTAL->RAngleLoc);
1416 }
1417 if (DiagD->isReferenced()) {
1418 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1419 << /*variable=*/1 << DiagD << DiagRange;
1420 } else if (DiagD->getDescribedVarTemplate()) {
1421 Diag(DiagD->getLocation(), diag::warn_unused_template)
1422 << /*variable=*/1 << DiagD << DiagRange;
1423 } else if (DiagD->getType().isConstQualified()) {
1424 const SourceManager &SM = SourceMgr;
1425 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1426 !PP.getLangOpts().IsHeaderFile)
1427 Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1428 << DiagD << DiagRange;
1429 } else {
1430 Diag(DiagD->getLocation(), diag::warn_unused_variable)
1431 << DiagD << DiagRange;
1432 }
1433 }
1434 }
1435
1436 emitAndClearUnusedLocalTypedefWarnings();
1437 }
1438
1439 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1440 // FIXME: Load additional unused private field candidates from the external
1441 // source.
1442 RecordCompleteMap RecordsComplete;
1443 RecordCompleteMap MNCComplete;
1444 for (const NamedDecl *D : UnusedPrivateFields) {
1445 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1446 if (RD && !RD->isUnion() &&
1447 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1448 Diag(D->getLocation(), diag::warn_unused_private_field)
1449 << D->getDeclName();
1450 }
1451 }
1452 }
1453
1454 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1455 if (ExternalSource)
1456 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1457 for (const auto &DeletedFieldInfo : DeleteExprs) {
1458 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1459 AnalyzeDeleteExprMismatch(Field: DeletedFieldInfo.first, DeleteLoc: DeleteExprLoc.first,
1460 DeleteWasArrayForm: DeleteExprLoc.second);
1461 }
1462 }
1463 }
1464
1465 AnalysisWarnings.IssueWarnings(D: Context.getTranslationUnitDecl());
1466
1467 // Check we've noticed that we're no longer parsing the initializer for every
1468 // variable. If we miss cases, then at best we have a performance issue and
1469 // at worst a rejects-valid bug.
1470 assert(ParsingInitForAutoVars.empty() &&
1471 "Didn't unmark var as having its initializer parsed");
1472
1473 if (!PP.isIncrementalProcessingEnabled())
1474 TUScope = nullptr;
1475}
1476
1477
1478//===----------------------------------------------------------------------===//
1479// Helper functions.
1480//===----------------------------------------------------------------------===//
1481
1482DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1483 DeclContext *DC = CurContext;
1484
1485 while (true) {
1486 if (isa<BlockDecl>(Val: DC) || isa<EnumDecl>(Val: DC) || isa<CapturedDecl>(Val: DC) ||
1487 isa<RequiresExprBodyDecl>(Val: DC)) {
1488 DC = DC->getParent();
1489 } else if (!AllowLambda && isa<CXXMethodDecl>(Val: DC) &&
1490 cast<CXXMethodDecl>(Val: DC)->getOverloadedOperator() == OO_Call &&
1491 cast<CXXRecordDecl>(Val: DC->getParent())->isLambda()) {
1492 DC = DC->getParent()->getParent();
1493 } else break;
1494 }
1495
1496 return DC;
1497}
1498
1499/// getCurFunctionDecl - If inside of a function body, this returns a pointer
1500/// to the function decl for the function being parsed. If we're currently
1501/// in a 'block', this returns the containing context.
1502FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1503 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1504 return dyn_cast<FunctionDecl>(Val: DC);
1505}
1506
1507ObjCMethodDecl *Sema::getCurMethodDecl() {
1508 DeclContext *DC = getFunctionLevelDeclContext();
1509 while (isa<RecordDecl>(Val: DC))
1510 DC = DC->getParent();
1511 return dyn_cast<ObjCMethodDecl>(Val: DC);
1512}
1513
1514NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1515 DeclContext *DC = getFunctionLevelDeclContext();
1516 if (isa<ObjCMethodDecl>(Val: DC) || isa<FunctionDecl>(Val: DC))
1517 return cast<NamedDecl>(Val: DC);
1518 return nullptr;
1519}
1520
1521LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1522 if (getLangOpts().OpenCL)
1523 return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1524 return LangAS::Default;
1525}
1526
1527void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
1528 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1529 // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1530 // been made more painfully obvious by the refactor that introduced this
1531 // function, but it is possible that the incoming argument can be
1532 // eliminated. If it truly cannot be (for example, there is some reentrancy
1533 // issue I am not seeing yet), then there should at least be a clarifying
1534 // comment somewhere.
1535 if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1536 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
1537 DiagID: Diags.getCurrentDiagID())) {
1538 case DiagnosticIDs::SFINAE_Report:
1539 // We'll report the diagnostic below.
1540 break;
1541
1542 case DiagnosticIDs::SFINAE_SubstitutionFailure:
1543 // Count this failure so that we know that template argument deduction
1544 // has failed.
1545 ++NumSFINAEErrors;
1546
1547 // Make a copy of this suppressed diagnostic and store it with the
1548 // template-deduction information.
1549 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1550 Diagnostic DiagInfo(&Diags);
1551 (*Info)->addSFINAEDiagnostic(Loc: DiagInfo.getLocation(),
1552 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1553 }
1554
1555 Diags.setLastDiagnosticIgnored(true);
1556 Diags.Clear();
1557 return;
1558
1559 case DiagnosticIDs::SFINAE_AccessControl: {
1560 // Per C++ Core Issue 1170, access control is part of SFINAE.
1561 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1562 // make access control a part of SFINAE for the purposes of checking
1563 // type traits.
1564 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1565 break;
1566
1567 SourceLocation Loc = Diags.getCurrentDiagLoc();
1568
1569 // Suppress this diagnostic.
1570 ++NumSFINAEErrors;
1571
1572 // Make a copy of this suppressed diagnostic and store it with the
1573 // template-deduction information.
1574 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1575 Diagnostic DiagInfo(&Diags);
1576 (*Info)->addSFINAEDiagnostic(Loc: DiagInfo.getLocation(),
1577 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1578 }
1579
1580 Diags.setLastDiagnosticIgnored(true);
1581 Diags.Clear();
1582
1583 // Now the diagnostic state is clear, produce a C++98 compatibility
1584 // warning.
1585 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1586
1587 // The last diagnostic which Sema produced was ignored. Suppress any
1588 // notes attached to it.
1589 Diags.setLastDiagnosticIgnored(true);
1590 return;
1591 }
1592
1593 case DiagnosticIDs::SFINAE_Suppress:
1594 // Make a copy of this suppressed diagnostic and store it with the
1595 // template-deduction information;
1596 if (*Info) {
1597 Diagnostic DiagInfo(&Diags);
1598 (*Info)->addSuppressedDiagnostic(Loc: DiagInfo.getLocation(),
1599 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1600 }
1601
1602 // Suppress this diagnostic.
1603 Diags.setLastDiagnosticIgnored(true);
1604 Diags.Clear();
1605 return;
1606 }
1607 }
1608
1609 // Copy the diagnostic printing policy over the ASTContext printing policy.
1610 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
1611 Context.setPrintingPolicy(getPrintingPolicy());
1612
1613 // Emit the diagnostic.
1614 if (!Diags.EmitCurrentDiagnostic())
1615 return;
1616
1617 // If this is not a note, and we're in a template instantiation
1618 // that is different from the last template instantiation where
1619 // we emitted an error, print a template instantiation
1620 // backtrace.
1621 if (!DiagnosticIDs::isBuiltinNote(DiagID))
1622 PrintContextStack();
1623}
1624
1625Sema::SemaDiagnosticBuilder
1626Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) {
1627 return Diag(Loc, DiagID: PD.getDiagID(), DeferHint) << PD;
1628}
1629
1630bool Sema::hasUncompilableErrorOccurred() const {
1631 if (getDiagnostics().hasUncompilableErrorOccurred())
1632 return true;
1633 auto *FD = dyn_cast<FunctionDecl>(Val: CurContext);
1634 if (!FD)
1635 return false;
1636 auto Loc = DeviceDeferredDiags.find(Val: FD);
1637 if (Loc == DeviceDeferredDiags.end())
1638 return false;
1639 for (auto PDAt : Loc->second) {
1640 if (DiagnosticIDs::isDefaultMappingAsError(DiagID: PDAt.second.getDiagID()))
1641 return true;
1642 }
1643 return false;
1644}
1645
1646// Print notes showing how we can reach FD starting from an a priori
1647// known-callable function.
1648static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1649 auto FnIt = S.DeviceKnownEmittedFns.find(Val: FD);
1650 while (FnIt != S.DeviceKnownEmittedFns.end()) {
1651 // Respect error limit.
1652 if (S.Diags.hasFatalErrorOccurred())
1653 return;
1654 DiagnosticBuilder Builder(
1655 S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1656 Builder << FnIt->second.FD;
1657 FnIt = S.DeviceKnownEmittedFns.find(Val: FnIt->second.FD);
1658 }
1659}
1660
1661namespace {
1662
1663/// Helper class that emits deferred diagnostic messages if an entity directly
1664/// or indirectly using the function that causes the deferred diagnostic
1665/// messages is known to be emitted.
1666///
1667/// During parsing of AST, certain diagnostic messages are recorded as deferred
1668/// diagnostics since it is unknown whether the functions containing such
1669/// diagnostics will be emitted. A list of potentially emitted functions and
1670/// variables that may potentially trigger emission of functions are also
1671/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1672/// by each function to emit deferred diagnostics.
1673///
1674/// During the visit, certain OpenMP directives or initializer of variables
1675/// with certain OpenMP attributes will cause subsequent visiting of any
1676/// functions enter a state which is called OpenMP device context in this
1677/// implementation. The state is exited when the directive or initializer is
1678/// exited. This state can change the emission states of subsequent uses
1679/// of functions.
1680///
1681/// Conceptually the functions or variables to be visited form a use graph
1682/// where the parent node uses the child node. At any point of the visit,
1683/// the tree nodes traversed from the tree root to the current node form a use
1684/// stack. The emission state of the current node depends on two factors:
1685/// 1. the emission state of the root node
1686/// 2. whether the current node is in OpenMP device context
1687/// If the function is decided to be emitted, its contained deferred diagnostics
1688/// are emitted, together with the information about the use stack.
1689///
1690class DeferredDiagnosticsEmitter
1691 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1692public:
1693 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1694
1695 // Whether the function is already in the current use-path.
1696 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1697
1698 // The current use-path.
1699 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1700
1701 // Whether the visiting of the function has been done. Done[0] is for the
1702 // case not in OpenMP device context. Done[1] is for the case in OpenMP
1703 // device context. We need two sets because diagnostics emission may be
1704 // different depending on whether it is in OpenMP device context.
1705 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1706
1707 // Emission state of the root node of the current use graph.
1708 bool ShouldEmitRootNode;
1709
1710 // Current OpenMP device context level. It is initialized to 0 and each
1711 // entering of device context increases it by 1 and each exit decreases
1712 // it by 1. Non-zero value indicates it is currently in device context.
1713 unsigned InOMPDeviceContext;
1714
1715 DeferredDiagnosticsEmitter(Sema &S)
1716 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1717
1718 bool shouldVisitDiscardedStmt() const { return false; }
1719
1720 void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1721 ++InOMPDeviceContext;
1722 Inherited::VisitOMPTargetDirective(Node);
1723 --InOMPDeviceContext;
1724 }
1725
1726 void visitUsedDecl(SourceLocation Loc, Decl *D) {
1727 if (isa<VarDecl>(Val: D))
1728 return;
1729 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
1730 checkFunc(Loc, FD);
1731 else
1732 Inherited::visitUsedDecl(Loc, D);
1733 }
1734
1735 void checkVar(VarDecl *VD) {
1736 assert(VD->isFileVarDecl() &&
1737 "Should only check file-scope variables");
1738 if (auto *Init = VD->getInit()) {
1739 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1740 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1741 *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1742 if (IsDev)
1743 ++InOMPDeviceContext;
1744 this->Visit(Init);
1745 if (IsDev)
1746 --InOMPDeviceContext;
1747 }
1748 }
1749
1750 void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1751 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1752 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1753 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1754 S.shouldIgnoreInHostDeviceCheck(Callee: FD) || InUsePath.count(FD))
1755 return;
1756 // Finalize analysis of OpenMP-specific constructs.
1757 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1758 (ShouldEmitRootNode || InOMPDeviceContext))
1759 S.finalizeOpenMPDelayedAnalysis(Caller, Callee: FD, Loc);
1760 if (Caller)
1761 S.DeviceKnownEmittedFns[FD] = {.FD: Caller, .Loc: Loc};
1762 // Always emit deferred diagnostics for the direct users. This does not
1763 // lead to explosion of diagnostics since each user is visited at most
1764 // twice.
1765 if (ShouldEmitRootNode || InOMPDeviceContext)
1766 emitDeferredDiags(FD, ShowCallStack: Caller);
1767 // Do not revisit a function if the function body has been completely
1768 // visited before.
1769 if (!Done.insert(FD).second)
1770 return;
1771 InUsePath.insert(FD);
1772 UsePath.push_back(Elt: FD);
1773 if (auto *S = FD->getBody()) {
1774 this->Visit(S);
1775 }
1776 UsePath.pop_back();
1777 InUsePath.erase(FD);
1778 }
1779
1780 void checkRecordedDecl(Decl *D) {
1781 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1782 ShouldEmitRootNode = S.getEmissionStatus(Decl: FD, /*Final=*/true) ==
1783 Sema::FunctionEmissionStatus::Emitted;
1784 checkFunc(Loc: SourceLocation(), FD);
1785 } else
1786 checkVar(VD: cast<VarDecl>(Val: D));
1787 }
1788
1789 // Emit any deferred diagnostics for FD
1790 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1791 auto It = S.DeviceDeferredDiags.find(Val: FD);
1792 if (It == S.DeviceDeferredDiags.end())
1793 return;
1794 bool HasWarningOrError = false;
1795 bool FirstDiag = true;
1796 for (PartialDiagnosticAt &PDAt : It->second) {
1797 // Respect error limit.
1798 if (S.Diags.hasFatalErrorOccurred())
1799 return;
1800 const SourceLocation &Loc = PDAt.first;
1801 const PartialDiagnostic &PD = PDAt.second;
1802 HasWarningOrError |=
1803 S.getDiagnostics().getDiagnosticLevel(DiagID: PD.getDiagID(), Loc) >=
1804 DiagnosticsEngine::Warning;
1805 {
1806 DiagnosticBuilder Builder(S.Diags.Report(Loc, DiagID: PD.getDiagID()));
1807 PD.Emit(DB: Builder);
1808 }
1809 // Emit the note on the first diagnostic in case too many diagnostics
1810 // cause the note not emitted.
1811 if (FirstDiag && HasWarningOrError && ShowCallStack) {
1812 emitCallStackNotes(S, FD);
1813 FirstDiag = false;
1814 }
1815 }
1816 }
1817};
1818} // namespace
1819
1820void Sema::emitDeferredDiags() {
1821 if (ExternalSource)
1822 ExternalSource->ReadDeclsToCheckForDeferredDiags(
1823 Decls&: DeclsToCheckForDeferredDiags);
1824
1825 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
1826 DeclsToCheckForDeferredDiags.empty())
1827 return;
1828
1829 DeferredDiagnosticsEmitter DDE(*this);
1830 for (auto *D : DeclsToCheckForDeferredDiags)
1831 DDE.checkRecordedDecl(D);
1832}
1833
1834// In CUDA, there are some constructs which may appear in semantically-valid
1835// code, but trigger errors if we ever generate code for the function in which
1836// they appear. Essentially every construct you're not allowed to use on the
1837// device falls into this category, because you are allowed to use these
1838// constructs in a __host__ __device__ function, but only if that function is
1839// never codegen'ed on the device.
1840//
1841// To handle semantic checking for these constructs, we keep track of the set of
1842// functions we know will be emitted, either because we could tell a priori that
1843// they would be emitted, or because they were transitively called by a
1844// known-emitted function.
1845//
1846// We also keep a partial call graph of which not-known-emitted functions call
1847// which other not-known-emitted functions.
1848//
1849// When we see something which is illegal if the current function is emitted
1850// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
1851// CheckCUDACall), we first check if the current function is known-emitted. If
1852// so, we immediately output the diagnostic.
1853//
1854// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
1855// until we discover that the function is known-emitted, at which point we take
1856// it out of this map and emit the diagnostic.
1857
1858Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
1859 unsigned DiagID,
1860 const FunctionDecl *Fn,
1861 Sema &S)
1862 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
1863 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
1864 switch (K) {
1865 case K_Nop:
1866 break;
1867 case K_Immediate:
1868 case K_ImmediateWithCallStack:
1869 ImmediateDiag.emplace(
1870 args: ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
1871 break;
1872 case K_Deferred:
1873 assert(Fn && "Must have a function to attach the deferred diag to.");
1874 auto &Diags = S.DeviceDeferredDiags[Fn];
1875 PartialDiagId.emplace(args: Diags.size());
1876 Diags.emplace_back(args&: Loc, args: S.PDiag(DiagID));
1877 break;
1878 }
1879}
1880
1881Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
1882 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
1883 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
1884 PartialDiagId(D.PartialDiagId) {
1885 // Clean the previous diagnostics.
1886 D.ShowCallStack = false;
1887 D.ImmediateDiag.reset();
1888 D.PartialDiagId.reset();
1889}
1890
1891Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
1892 if (ImmediateDiag) {
1893 // Emit our diagnostic and, if it was a warning or error, output a callstack
1894 // if Fn isn't a priori known-emitted.
1895 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
1896 DiagID, Loc) >= DiagnosticsEngine::Warning;
1897 ImmediateDiag.reset(); // Emit the immediate diag.
1898 if (IsWarningOrError && ShowCallStack)
1899 emitCallStackNotes(S, FD: Fn);
1900 } else {
1901 assert((!PartialDiagId || ShowCallStack) &&
1902 "Must always show call stack for deferred diags.");
1903 }
1904}
1905
1906Sema::SemaDiagnosticBuilder
1907Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
1908 FD = FD ? FD : getCurFunctionDecl();
1909 if (LangOpts.OpenMP)
1910 return LangOpts.OpenMPIsTargetDevice
1911 ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
1912 : diagIfOpenMPHostCode(Loc, DiagID, FD);
1913 if (getLangOpts().CUDA)
1914 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
1915 : CUDADiagIfHostCode(Loc, DiagID);
1916
1917 if (getLangOpts().SYCLIsDevice)
1918 return SYCLDiagIfDeviceCode(Loc, DiagID);
1919
1920 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
1921 FD, *this);
1922}
1923
1924Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
1925 bool DeferHint) {
1926 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
1927 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
1928 DiagnosticIDs::isDeferrable(DiagID) &&
1929 (DeferHint || DeferDiags || !IsError);
1930 auto SetIsLastErrorImmediate = [&](bool Flag) {
1931 if (IsError)
1932 IsLastErrorImmediate = Flag;
1933 };
1934 if (!ShouldDefer) {
1935 SetIsLastErrorImmediate(true);
1936 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
1937 DiagID, getCurFunctionDecl(), *this);
1938 }
1939
1940 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
1941 ? CUDADiagIfDeviceCode(Loc, DiagID)
1942 : CUDADiagIfHostCode(Loc, DiagID);
1943 SetIsLastErrorImmediate(DB.isImmediate());
1944 return DB;
1945}
1946
1947void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
1948 if (isUnevaluatedContext() || Ty.isNull())
1949 return;
1950
1951 // The original idea behind checkTypeSupport function is that unused
1952 // declarations can be replaced with an array of bytes of the same size during
1953 // codegen, such replacement doesn't seem to be possible for types without
1954 // constant byte size like zero length arrays. So, do a deep check for SYCL.
1955 if (D && LangOpts.SYCLIsDevice) {
1956 llvm::DenseSet<QualType> Visited;
1957 deepTypeCheckForSYCLDevice(UsedAt: Loc, Visited, DeclToCheck: D);
1958 }
1959
1960 Decl *C = cast<Decl>(Val: getCurLexicalContext());
1961
1962 // Memcpy operations for structs containing a member with unsupported type
1963 // are ok, though.
1964 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: C)) {
1965 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
1966 MD->isTrivial())
1967 return;
1968
1969 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD))
1970 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
1971 return;
1972 }
1973
1974 // Try to associate errors with the lexical context, if that is a function, or
1975 // the value declaration otherwise.
1976 const FunctionDecl *FD = isa<FunctionDecl>(Val: C)
1977 ? cast<FunctionDecl>(Val: C)
1978 : dyn_cast_or_null<FunctionDecl>(Val: D);
1979
1980 auto CheckDeviceType = [&](QualType Ty) {
1981 if (Ty->isDependentType())
1982 return;
1983
1984 if (Ty->isBitIntType()) {
1985 if (!Context.getTargetInfo().hasBitIntType()) {
1986 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1987 if (D)
1988 PD << D;
1989 else
1990 PD << "expression";
1991 targetDiag(Loc, PD, FD)
1992 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
1993 << Ty << Context.getTargetInfo().getTriple().str();
1994 }
1995 return;
1996 }
1997
1998 // Check if we are dealing with two 'long double' but with different
1999 // semantics.
2000 bool LongDoubleMismatched = false;
2001 if (Ty->isRealFloatingType() && Context.getTypeSize(T: Ty) == 128) {
2002 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: Ty);
2003 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2004 !Context.getTargetInfo().hasFloat128Type()) ||
2005 (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2006 !Context.getTargetInfo().hasIbm128Type()))
2007 LongDoubleMismatched = true;
2008 }
2009
2010 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2011 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2012 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2013 (Ty->isIntegerType() && Context.getTypeSize(T: Ty) == 128 &&
2014 !Context.getTargetInfo().hasInt128Type()) ||
2015 (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2016 !LangOpts.CUDAIsDevice) ||
2017 LongDoubleMismatched) {
2018 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2019 if (D)
2020 PD << D;
2021 else
2022 PD << "expression";
2023
2024 if (targetDiag(Loc, PD, FD)
2025 << true /*show bit size*/
2026 << static_cast<unsigned>(Context.getTypeSize(T: Ty)) << Ty
2027 << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2028 if (D)
2029 D->setInvalidDecl();
2030 }
2031 if (D)
2032 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2033 }
2034 };
2035
2036 auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2037 if (LangOpts.SYCLIsDevice ||
2038 (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2039 LangOpts.CUDAIsDevice)
2040 CheckDeviceType(Ty);
2041
2042 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2043 const TargetInfo &TI = Context.getTargetInfo();
2044 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2045 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2046 if (D)
2047 PD << D;
2048 else
2049 PD << "expression";
2050
2051 if (Diag(Loc, PD, DeferHint: FD)
2052 << false /*show bit size*/ << 0 << Ty << false /*return*/
2053 << TI.getTriple().str()) {
2054 if (D)
2055 D->setInvalidDecl();
2056 }
2057 if (D)
2058 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2059 }
2060
2061 bool IsDouble = UnqualTy == Context.DoubleTy;
2062 bool IsFloat = UnqualTy == Context.FloatTy;
2063 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2064 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2065 if (D)
2066 PD << D;
2067 else
2068 PD << "expression";
2069
2070 if (Diag(Loc, PD, DeferHint: FD)
2071 << false /*show bit size*/ << 0 << Ty << true /*return*/
2072 << TI.getTriple().str()) {
2073 if (D)
2074 D->setInvalidDecl();
2075 }
2076 if (D)
2077 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2078 }
2079
2080 if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType())
2081 checkRVVTypeSupport(Ty, Loc, D);
2082
2083 // Don't allow SVE types in functions without a SVE target.
2084 if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
2085 llvm::StringMap<bool> CallerFeatureMap;
2086 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2087 if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap) &&
2088 !Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
2089 Diag(D->getLocation(), diag::err_sve_vector_in_non_sve_target) << Ty;
2090 }
2091 };
2092
2093 CheckType(Ty);
2094 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Val&: Ty)) {
2095 for (const auto &ParamTy : FPTy->param_types())
2096 CheckType(ParamTy);
2097 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2098 }
2099 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Val&: Ty))
2100 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2101}
2102
2103/// Looks through the macro-expansion chain for the given
2104/// location, looking for a macro expansion with the given name.
2105/// If one is found, returns true and sets the location to that
2106/// expansion loc.
2107bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2108 SourceLocation loc = locref;
2109 if (!loc.isMacroID()) return false;
2110
2111 // There's no good way right now to look at the intermediate
2112 // expansions, so just jump to the expansion location.
2113 loc = getSourceManager().getExpansionLoc(Loc: loc);
2114
2115 // If that's written with the name, stop here.
2116 SmallString<16> buffer;
2117 if (getPreprocessor().getSpelling(loc, buffer) == name) {
2118 locref = loc;
2119 return true;
2120 }
2121 return false;
2122}
2123
2124/// Determines the active Scope associated with the given declaration
2125/// context.
2126///
2127/// This routine maps a declaration context to the active Scope object that
2128/// represents that declaration context in the parser. It is typically used
2129/// from "scope-less" code (e.g., template instantiation, lazy creation of
2130/// declarations) that injects a name for name-lookup purposes and, therefore,
2131/// must update the Scope.
2132///
2133/// \returns The scope corresponding to the given declaraion context, or NULL
2134/// if no such scope is open.
2135Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2136
2137 if (!Ctx)
2138 return nullptr;
2139
2140 Ctx = Ctx->getPrimaryContext();
2141 for (Scope *S = getCurScope(); S; S = S->getParent()) {
2142 // Ignore scopes that cannot have declarations. This is important for
2143 // out-of-line definitions of static class members.
2144 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2145 if (DeclContext *Entity = S->getEntity())
2146 if (Ctx == Entity->getPrimaryContext())
2147 return S;
2148 }
2149
2150 return nullptr;
2151}
2152
2153/// Enter a new function scope
2154void Sema::PushFunctionScope() {
2155 if (FunctionScopes.empty() && CachedFunctionScope) {
2156 // Use CachedFunctionScope to avoid allocating memory when possible.
2157 CachedFunctionScope->Clear();
2158 FunctionScopes.push_back(Elt: CachedFunctionScope.release());
2159 } else {
2160 FunctionScopes.push_back(Elt: new FunctionScopeInfo(getDiagnostics()));
2161 }
2162 if (LangOpts.OpenMP)
2163 pushOpenMPFunctionRegion();
2164}
2165
2166void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2167 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2168 BlockScope, Block));
2169 CapturingFunctionScopes++;
2170}
2171
2172LambdaScopeInfo *Sema::PushLambdaScope() {
2173 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2174 FunctionScopes.push_back(LSI);
2175 CapturingFunctionScopes++;
2176 return LSI;
2177}
2178
2179void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2180 if (LambdaScopeInfo *const LSI = getCurLambda()) {
2181 LSI->AutoTemplateParameterDepth = Depth;
2182 return;
2183 }
2184 llvm_unreachable(
2185 "Remove assertion if intentionally called in a non-lambda context.");
2186}
2187
2188// Check that the type of the VarDecl has an accessible copy constructor and
2189// resolve its destructor's exception specification.
2190// This also performs initialization of block variables when they are moved
2191// to the heap. It uses the same rules as applicable for implicit moves
2192// according to the C++ standard in effect ([class.copy.elision]p3).
2193static void checkEscapingByref(VarDecl *VD, Sema &S) {
2194 QualType T = VD->getType();
2195 EnterExpressionEvaluationContext scope(
2196 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2197 SourceLocation Loc = VD->getLocation();
2198 Expr *VarRef =
2199 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2200 ExprResult Result;
2201 auto IE = InitializedEntity::InitializeBlock(BlockVarLoc: Loc, Type: T);
2202 if (S.getLangOpts().CPlusPlus23) {
2203 auto *E = ImplicitCastExpr::Create(Context: S.Context, T, Kind: CK_NoOp, Operand: VarRef, BasePath: nullptr,
2204 Cat: VK_XValue, FPO: FPOptionsOverride());
2205 Result = S.PerformCopyInitialization(Entity: IE, EqualLoc: SourceLocation(), Init: E);
2206 } else {
2207 Result = S.PerformMoveOrCopyInitialization(
2208 Entity: IE, NRInfo: Sema::NamedReturnInfo{.Candidate: VD, .S: Sema::NamedReturnInfo::MoveEligible},
2209 Value: VarRef);
2210 }
2211
2212 if (!Result.isInvalid()) {
2213 Result = S.MaybeCreateExprWithCleanups(SubExpr: Result);
2214 Expr *Init = Result.getAs<Expr>();
2215 S.Context.setBlockVarCopyInit(VD, CopyExpr: Init, CanThrow: S.canThrow(Init));
2216 }
2217
2218 // The destructor's exception specification is needed when IRGen generates
2219 // block copy/destroy functions. Resolve it here.
2220 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2221 if (CXXDestructorDecl *DD = RD->getDestructor()) {
2222 auto *FPT = DD->getType()->getAs<FunctionProtoType>();
2223 S.ResolveExceptionSpec(Loc, FPT: FPT);
2224 }
2225}
2226
2227static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2228 // Set the EscapingByref flag of __block variables captured by
2229 // escaping blocks.
2230 for (const BlockDecl *BD : FSI.Blocks) {
2231 for (const BlockDecl::Capture &BC : BD->captures()) {
2232 VarDecl *VD = BC.getVariable();
2233 if (VD->hasAttr<BlocksAttr>()) {
2234 // Nothing to do if this is a __block variable captured by a
2235 // non-escaping block.
2236 if (BD->doesNotEscape())
2237 continue;
2238 VD->setEscapingByref();
2239 }
2240 // Check whether the captured variable is or contains an object of
2241 // non-trivial C union type.
2242 QualType CapType = BC.getVariable()->getType();
2243 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2244 CapType.hasNonTrivialToPrimitiveCopyCUnion())
2245 S.checkNonTrivialCUnion(QT: BC.getVariable()->getType(),
2246 Loc: BD->getCaretLocation(),
2247 UseContext: Sema::NTCUC_BlockCapture,
2248 NonTrivialKind: Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
2249 }
2250 }
2251
2252 for (VarDecl *VD : FSI.ByrefBlockVars) {
2253 // __block variables might require us to capture a copy-initializer.
2254 if (!VD->isEscapingByref())
2255 continue;
2256 // It's currently invalid to ever have a __block variable with an
2257 // array type; should we diagnose that here?
2258 // Regardless, we don't want to ignore array nesting when
2259 // constructing this copy.
2260 if (VD->getType()->isStructureOrClassType())
2261 checkEscapingByref(VD, S);
2262 }
2263}
2264
2265/// Pop a function (or block or lambda or captured region) scope from the stack.
2266///
2267/// \param WP The warning policy to use for CFG-based warnings, or null if such
2268/// warnings should not be produced.
2269/// \param D The declaration corresponding to this function scope, if producing
2270/// CFG-based warnings.
2271/// \param BlockType The type of the block expression, if D is a BlockDecl.
2272Sema::PoppedFunctionScopePtr
2273Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2274 const Decl *D, QualType BlockType) {
2275 assert(!FunctionScopes.empty() && "mismatched push/pop!");
2276
2277 markEscapingByrefs(FSI: *FunctionScopes.back(), S&: *this);
2278
2279 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2280 PoppedFunctionScopeDeleter(this));
2281
2282 if (LangOpts.OpenMP)
2283 popOpenMPFunctionRegion(OldFSI: Scope.get());
2284
2285 // Issue any analysis-based warnings.
2286 if (WP && D)
2287 AnalysisWarnings.IssueWarnings(P: *WP, fscope: Scope.get(), D, BlockType);
2288 else
2289 for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2290 Diag(Loc: PUD.Loc, PD: PUD.PD);
2291
2292 return Scope;
2293}
2294
2295void Sema::PoppedFunctionScopeDeleter::
2296operator()(sema::FunctionScopeInfo *Scope) const {
2297 if (!Scope->isPlainFunction())
2298 Self->CapturingFunctionScopes--;
2299 // Stash the function scope for later reuse if it's for a normal function.
2300 if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2301 Self->CachedFunctionScope.reset(p: Scope);
2302 else
2303 delete Scope;
2304}
2305
2306void Sema::PushCompoundScope(bool IsStmtExpr) {
2307 getCurFunction()->CompoundScopes.push_back(
2308 Elt: CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2309}
2310
2311void Sema::PopCompoundScope() {
2312 FunctionScopeInfo *CurFunction = getCurFunction();
2313 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2314
2315 CurFunction->CompoundScopes.pop_back();
2316}
2317
2318/// Determine whether any errors occurred within this function/method/
2319/// block.
2320bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2321 return getCurFunction()->hasUnrecoverableErrorOccurred();
2322}
2323
2324void Sema::setFunctionHasBranchIntoScope() {
2325 if (!FunctionScopes.empty())
2326 FunctionScopes.back()->setHasBranchIntoScope();
2327}
2328
2329void Sema::setFunctionHasBranchProtectedScope() {
2330 if (!FunctionScopes.empty())
2331 FunctionScopes.back()->setHasBranchProtectedScope();
2332}
2333
2334void Sema::setFunctionHasIndirectGoto() {
2335 if (!FunctionScopes.empty())
2336 FunctionScopes.back()->setHasIndirectGoto();
2337}
2338
2339void Sema::setFunctionHasMustTail() {
2340 if (!FunctionScopes.empty())
2341 FunctionScopes.back()->setHasMustTail();
2342}
2343
2344BlockScopeInfo *Sema::getCurBlock() {
2345 if (FunctionScopes.empty())
2346 return nullptr;
2347
2348 auto CurBSI = dyn_cast<BlockScopeInfo>(Val: FunctionScopes.back());
2349 if (CurBSI && CurBSI->TheDecl &&
2350 !CurBSI->TheDecl->Encloses(CurContext)) {
2351 // We have switched contexts due to template instantiation.
2352 assert(!CodeSynthesisContexts.empty());
2353 return nullptr;
2354 }
2355
2356 return CurBSI;
2357}
2358
2359FunctionScopeInfo *Sema::getEnclosingFunction() const {
2360 if (FunctionScopes.empty())
2361 return nullptr;
2362
2363 for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2364 if (isa<sema::BlockScopeInfo>(Val: FunctionScopes[e]))
2365 continue;
2366 return FunctionScopes[e];
2367 }
2368 return nullptr;
2369}
2370
2371LambdaScopeInfo *Sema::getEnclosingLambda() const {
2372 for (auto *Scope : llvm::reverse(C: FunctionScopes)) {
2373 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Val: Scope)) {
2374 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2375 LSI->AfterParameterList) {
2376 // We have switched contexts due to template instantiation.
2377 // FIXME: We should swap out the FunctionScopes during code synthesis
2378 // so that we don't need to check for this.
2379 assert(!CodeSynthesisContexts.empty());
2380 return nullptr;
2381 }
2382 return LSI;
2383 }
2384 }
2385 return nullptr;
2386}
2387
2388LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2389 if (FunctionScopes.empty())
2390 return nullptr;
2391
2392 auto I = FunctionScopes.rbegin();
2393 if (IgnoreNonLambdaCapturingScope) {
2394 auto E = FunctionScopes.rend();
2395 while (I != E && isa<CapturingScopeInfo>(Val: *I) && !isa<LambdaScopeInfo>(Val: *I))
2396 ++I;
2397 if (I == E)
2398 return nullptr;
2399 }
2400 auto *CurLSI = dyn_cast<LambdaScopeInfo>(Val: *I);
2401 if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2402 !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2403 // We have switched contexts due to template instantiation.
2404 assert(!CodeSynthesisContexts.empty());
2405 return nullptr;
2406 }
2407
2408 return CurLSI;
2409}
2410
2411// We have a generic lambda if we parsed auto parameters, or we have
2412// an associated template parameter list.
2413LambdaScopeInfo *Sema::getCurGenericLambda() {
2414 if (LambdaScopeInfo *LSI = getCurLambda()) {
2415 return (LSI->TemplateParams.size() ||
2416 LSI->GLTemplateParameterList) ? LSI : nullptr;
2417 }
2418 return nullptr;
2419}
2420
2421
2422void Sema::ActOnComment(SourceRange Comment) {
2423 if (!LangOpts.RetainCommentsFromSystemHeaders &&
2424 SourceMgr.isInSystemHeader(Loc: Comment.getBegin()))
2425 return;
2426 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2427 if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2428 SourceRange MagicMarkerRange(Comment.getBegin(),
2429 Comment.getBegin().getLocWithOffset(Offset: 3));
2430 StringRef MagicMarkerText;
2431 switch (RC.getKind()) {
2432 case RawComment::RCK_OrdinaryBCPL:
2433 MagicMarkerText = "///<";
2434 break;
2435 case RawComment::RCK_OrdinaryC:
2436 MagicMarkerText = "/**<";
2437 break;
2438 case RawComment::RCK_Invalid:
2439 // FIXME: are there other scenarios that could produce an invalid
2440 // raw comment here?
2441 Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2442 return;
2443 default:
2444 llvm_unreachable("if this is an almost Doxygen comment, "
2445 "it should be ordinary");
2446 }
2447 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2448 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2449 }
2450 Context.addComment(RC);
2451}
2452
2453// Pin this vtable to this file.
2454ExternalSemaSource::~ExternalSemaSource() {}
2455char ExternalSemaSource::ID;
2456
2457void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2458void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2459
2460void ExternalSemaSource::ReadKnownNamespaces(
2461 SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2462}
2463
2464void ExternalSemaSource::ReadUndefinedButUsed(
2465 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2466
2467void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2468 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2469
2470/// Figure out if an expression could be turned into a call.
2471///
2472/// Use this when trying to recover from an error where the programmer may have
2473/// written just the name of a function instead of actually calling it.
2474///
2475/// \param E - The expression to examine.
2476/// \param ZeroArgCallReturnTy - If the expression can be turned into a call
2477/// with no arguments, this parameter is set to the type returned by such a
2478/// call; otherwise, it is set to an empty QualType.
2479/// \param OverloadSet - If the expression is an overloaded function
2480/// name, this parameter is populated with the decls of the various overloads.
2481bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2482 UnresolvedSetImpl &OverloadSet) {
2483 ZeroArgCallReturnTy = QualType();
2484 OverloadSet.clear();
2485
2486 const OverloadExpr *Overloads = nullptr;
2487 bool IsMemExpr = false;
2488 if (E.getType() == Context.OverloadTy) {
2489 OverloadExpr::FindResult FR = OverloadExpr::find(E: const_cast<Expr*>(&E));
2490
2491 // Ignore overloads that are pointer-to-member constants.
2492 if (FR.HasFormOfMemberPointer)
2493 return false;
2494
2495 Overloads = FR.Expression;
2496 } else if (E.getType() == Context.BoundMemberTy) {
2497 Overloads = dyn_cast<UnresolvedMemberExpr>(Val: E.IgnoreParens());
2498 IsMemExpr = true;
2499 }
2500
2501 bool Ambiguous = false;
2502 bool IsMV = false;
2503
2504 if (Overloads) {
2505 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2506 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2507 OverloadSet.addDecl(D: *it);
2508
2509 // Check whether the function is a non-template, non-member which takes no
2510 // arguments.
2511 if (IsMemExpr)
2512 continue;
2513 if (const FunctionDecl *OverloadDecl
2514 = dyn_cast<FunctionDecl>(Val: (*it)->getUnderlyingDecl())) {
2515 if (OverloadDecl->getMinRequiredArguments() == 0) {
2516 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2517 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2518 OverloadDecl->isCPUSpecificMultiVersion()))) {
2519 ZeroArgCallReturnTy = QualType();
2520 Ambiguous = true;
2521 } else {
2522 ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2523 IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2524 OverloadDecl->isCPUSpecificMultiVersion();
2525 }
2526 }
2527 }
2528 }
2529
2530 // If it's not a member, use better machinery to try to resolve the call
2531 if (!IsMemExpr)
2532 return !ZeroArgCallReturnTy.isNull();
2533 }
2534
2535 // Attempt to call the member with no arguments - this will correctly handle
2536 // member templates with defaults/deduction of template arguments, overloads
2537 // with default arguments, etc.
2538 if (IsMemExpr && !E.isTypeDependent()) {
2539 Sema::TentativeAnalysisScope Trap(*this);
2540 ExprResult R = BuildCallToMemberFunction(S: nullptr, MemExpr: &E, LParenLoc: SourceLocation(),
2541 Args: std::nullopt, RParenLoc: SourceLocation());
2542 if (R.isUsable()) {
2543 ZeroArgCallReturnTy = R.get()->getType();
2544 return true;
2545 }
2546 return false;
2547 }
2548
2549 if (const auto *DeclRef = dyn_cast<DeclRefExpr>(Val: E.IgnoreParens())) {
2550 if (const auto *Fun = dyn_cast<FunctionDecl>(Val: DeclRef->getDecl())) {
2551 if (Fun->getMinRequiredArguments() == 0)
2552 ZeroArgCallReturnTy = Fun->getReturnType();
2553 return true;
2554 }
2555 }
2556
2557 // We don't have an expression that's convenient to get a FunctionDecl from,
2558 // but we can at least check if the type is "function of 0 arguments".
2559 QualType ExprTy = E.getType();
2560 const FunctionType *FunTy = nullptr;
2561 QualType PointeeTy = ExprTy->getPointeeType();
2562 if (!PointeeTy.isNull())
2563 FunTy = PointeeTy->getAs<FunctionType>();
2564 if (!FunTy)
2565 FunTy = ExprTy->getAs<FunctionType>();
2566
2567 if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(Val: FunTy)) {
2568 if (FPT->getNumParams() == 0)
2569 ZeroArgCallReturnTy = FunTy->getReturnType();
2570 return true;
2571 }
2572 return false;
2573}
2574
2575/// Give notes for a set of overloads.
2576///
2577/// A companion to tryExprAsCall. In cases when the name that the programmer
2578/// wrote was an overloaded function, we may be able to make some guesses about
2579/// plausible overloads based on their return types; such guesses can be handed
2580/// off to this method to be emitted as notes.
2581///
2582/// \param Overloads - The overloads to note.
2583/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2584/// -fshow-overloads=best, this is the location to attach to the note about too
2585/// many candidates. Typically this will be the location of the original
2586/// ill-formed expression.
2587static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2588 const SourceLocation FinalNoteLoc) {
2589 unsigned ShownOverloads = 0;
2590 unsigned SuppressedOverloads = 0;
2591 for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2592 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2593 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2594 ++SuppressedOverloads;
2595 continue;
2596 }
2597
2598 const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2599 // Don't print overloads for non-default multiversioned functions.
2600 if (const auto *FD = Fn->getAsFunction()) {
2601 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2602 !FD->getAttr<TargetAttr>()->isDefaultVersion())
2603 continue;
2604 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2605 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2606 continue;
2607 }
2608 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2609 ++ShownOverloads;
2610 }
2611
2612 S.Diags.overloadCandidatesShown(N: ShownOverloads);
2613
2614 if (SuppressedOverloads)
2615 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2616 << SuppressedOverloads;
2617}
2618
2619static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2620 const UnresolvedSetImpl &Overloads,
2621 bool (*IsPlausibleResult)(QualType)) {
2622 if (!IsPlausibleResult)
2623 return noteOverloads(S, Overloads, FinalNoteLoc: Loc);
2624
2625 UnresolvedSet<2> PlausibleOverloads;
2626 for (OverloadExpr::decls_iterator It = Overloads.begin(),
2627 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2628 const auto *OverloadDecl = cast<FunctionDecl>(Val: *It);
2629 QualType OverloadResultTy = OverloadDecl->getReturnType();
2630 if (IsPlausibleResult(OverloadResultTy))
2631 PlausibleOverloads.addDecl(D: It.getDecl());
2632 }
2633 noteOverloads(S, Overloads: PlausibleOverloads, FinalNoteLoc: Loc);
2634}
2635
2636/// Determine whether the given expression can be called by just
2637/// putting parentheses after it. Notably, expressions with unary
2638/// operators can't be because the unary operator will start parsing
2639/// outside the call.
2640static bool IsCallableWithAppend(const Expr *E) {
2641 E = E->IgnoreImplicit();
2642 return (!isa<CStyleCastExpr>(Val: E) &&
2643 !isa<UnaryOperator>(Val: E) &&
2644 !isa<BinaryOperator>(Val: E) &&
2645 !isa<CXXOperatorCallExpr>(Val: E));
2646}
2647
2648static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2649 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2650 E = UO->getSubExpr();
2651
2652 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(Val: E)) {
2653 if (ULE->getNumDecls() == 0)
2654 return false;
2655
2656 const NamedDecl *ND = *ULE->decls_begin();
2657 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2658 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2659 }
2660 return false;
2661}
2662
2663bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2664 bool ForceComplain,
2665 bool (*IsPlausibleResult)(QualType)) {
2666 SourceLocation Loc = E.get()->getExprLoc();
2667 SourceRange Range = E.get()->getSourceRange();
2668 UnresolvedSet<4> Overloads;
2669
2670 // If this is a SFINAE context, don't try anything that might trigger ADL
2671 // prematurely.
2672 if (!isSFINAEContext()) {
2673 QualType ZeroArgCallTy;
2674 if (tryExprAsCall(E&: *E.get(), ZeroArgCallReturnTy&: ZeroArgCallTy, OverloadSet&: Overloads) &&
2675 !ZeroArgCallTy.isNull() &&
2676 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2677 // At this point, we know E is potentially callable with 0
2678 // arguments and that it returns something of a reasonable type,
2679 // so we can emit a fixit and carry on pretending that E was
2680 // actually a CallExpr.
2681 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Loc: Range.getEnd());
2682 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2683 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2684 << (IsCallableWithAppend(E: E.get())
2685 ? FixItHint::CreateInsertion(InsertionLoc: ParenInsertionLoc,
2686 Code: "()")
2687 : FixItHint());
2688 if (!IsMV)
2689 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2690
2691 // FIXME: Try this before emitting the fixit, and suppress diagnostics
2692 // while doing so.
2693 E = BuildCallExpr(S: nullptr, Fn: E.get(), LParenLoc: Range.getEnd(), ArgExprs: std::nullopt,
2694 RParenLoc: Range.getEnd().getLocWithOffset(Offset: 1));
2695 return true;
2696 }
2697 }
2698 if (!ForceComplain) return false;
2699
2700 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2701 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2702 if (!IsMV)
2703 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2704 E = ExprError();
2705 return true;
2706}
2707
2708IdentifierInfo *Sema::getSuperIdentifier() const {
2709 if (!Ident_super)
2710 Ident_super = &Context.Idents.get(Name: "super");
2711 return Ident_super;
2712}
2713
2714void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2715 CapturedRegionKind K,
2716 unsigned OpenMPCaptureLevel) {
2717 auto *CSI = new CapturedRegionScopeInfo(
2718 getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2719 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0,
2720 OpenMPCaptureLevel);
2721 CSI->ReturnType = Context.VoidTy;
2722 FunctionScopes.push_back(CSI);
2723 CapturingFunctionScopes++;
2724}
2725
2726CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2727 if (FunctionScopes.empty())
2728 return nullptr;
2729
2730 return dyn_cast<CapturedRegionScopeInfo>(Val: FunctionScopes.back());
2731}
2732
2733const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2734Sema::getMismatchingDeleteExpressions() const {
2735 return DeleteExprs;
2736}
2737
2738Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2739 : S(S), OldFPFeaturesState(S.CurFPFeatures),
2740 OldOverrides(S.FpPragmaStack.CurrentValue),
2741 OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2742 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2743
2744Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2745 S.CurFPFeatures = OldFPFeaturesState;
2746 S.FpPragmaStack.CurrentValue = OldOverrides;
2747 S.PP.setCurrentFPEvalMethod(PragmaLoc: OldFPPragmaLocation, Val: OldEvalMethod);
2748}
2749
2750bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2751 assert(D.getCXXScopeSpec().isSet() &&
2752 "can only be called for qualified names");
2753
2754 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2755 LookupOrdinaryName, forRedeclarationInCurContext());
2756 DeclContext *DC = computeDeclContext(SS: D.getCXXScopeSpec(),
2757 EnteringContext: !D.getDeclSpec().isFriendSpecified());
2758 if (!DC)
2759 return false;
2760
2761 LookupQualifiedName(R&: LR, LookupCtx: DC);
2762 bool Result = llvm::all_of(Range&: LR, P: [](Decl *Dcl) {
2763 if (NamedDecl *ND = dyn_cast<NamedDecl>(Val: Dcl)) {
2764 ND = ND->getUnderlyingDecl();
2765 return isa<FunctionDecl>(Val: ND) || isa<FunctionTemplateDecl>(Val: ND) ||
2766 isa<UsingDecl>(Val: ND);
2767 }
2768 return false;
2769 });
2770 return Result;
2771}
2772

source code of clang/lib/Sema/Sema.cpp