1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTStructuralEquivalence.h"
20#include "clang/AST/ASTTypeTraits.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/AttrIterator.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Comment.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclBase.h"
27#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclContextInternals.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/DeclOpenMP.h"
31#include "clang/AST/DeclTemplate.h"
32#include "clang/AST/DeclarationName.h"
33#include "clang/AST/DependenceFlags.h"
34#include "clang/AST/Expr.h"
35#include "clang/AST/ExprCXX.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/ExceptionSpecificationType.h"
54#include "clang/Basic/IdentifierTable.h"
55#include "clang/Basic/LLVM.h"
56#include "clang/Basic/LangOptions.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
59#include "clang/Basic/NoSanitizeList.h"
60#include "clang/Basic/ObjCRuntime.h"
61#include "clang/Basic/ProfileList.h"
62#include "clang/Basic/SourceLocation.h"
63#include "clang/Basic/SourceManager.h"
64#include "clang/Basic/Specifiers.h"
65#include "clang/Basic/TargetCXXABI.h"
66#include "clang/Basic/TargetInfo.h"
67#include "clang/Basic/XRayLists.h"
68#include "llvm/ADT/APFixedPoint.h"
69#include "llvm/ADT/APInt.h"
70#include "llvm/ADT/APSInt.h"
71#include "llvm/ADT/ArrayRef.h"
72#include "llvm/ADT/DenseMap.h"
73#include "llvm/ADT/DenseSet.h"
74#include "llvm/ADT/FoldingSet.h"
75#include "llvm/ADT/PointerUnion.h"
76#include "llvm/ADT/STLExtras.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/StringExtras.h"
80#include "llvm/ADT/StringRef.h"
81#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82#include "llvm/Support/Capacity.h"
83#include "llvm/Support/Compiler.h"
84#include "llvm/Support/ErrorHandling.h"
85#include "llvm/Support/MD5.h"
86#include "llvm/Support/MathExtras.h"
87#include "llvm/Support/SipHash.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/TargetParser/AArch64TargetParser.h"
90#include "llvm/TargetParser/Triple.h"
91#include <algorithm>
92#include <cassert>
93#include <cstddef>
94#include <cstdint>
95#include <cstdlib>
96#include <map>
97#include <memory>
98#include <optional>
99#include <string>
100#include <tuple>
101#include <utility>
102
103using namespace clang;
104
105enum FloatingRank {
106 BFloat16Rank,
107 Float16Rank,
108 HalfRank,
109 FloatRank,
110 DoubleRank,
111 LongDoubleRank,
112 Float128Rank,
113 Ibm128Rank
114};
115
116template <> struct llvm::DenseMapInfo<llvm::FoldingSetNodeID> {
117 static FoldingSetNodeID getEmptyKey() { return FoldingSetNodeID{}; }
118
119 static FoldingSetNodeID getTombstoneKey() {
120 FoldingSetNodeID id;
121 for (size_t i = 0; i < sizeof(id) / sizeof(unsigned); ++i) {
122 id.AddInteger(I: std::numeric_limits<unsigned>::max());
123 }
124 return id;
125 }
126
127 static unsigned getHashValue(const FoldingSetNodeID &Val) {
128 return Val.ComputeHash();
129 }
130
131 static bool isEqual(const FoldingSetNodeID &LHS,
132 const FoldingSetNodeID &RHS) {
133 return LHS == RHS;
134 }
135};
136
137/// \returns The locations that are relevant when searching for Doc comments
138/// related to \p D.
139static SmallVector<SourceLocation, 2>
140getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
141 assert(D);
142
143 // User can not attach documentation to implicit declarations.
144 if (D->isImplicit())
145 return {};
146
147 // User can not attach documentation to implicit instantiations.
148 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
149 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
150 return {};
151 }
152
153 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
154 if (VD->isStaticDataMember() &&
155 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
156 return {};
157 }
158
159 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: D)) {
160 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
161 return {};
162 }
163
164 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: D)) {
165 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
166 if (TSK == TSK_ImplicitInstantiation ||
167 TSK == TSK_Undeclared)
168 return {};
169 }
170
171 if (const auto *ED = dyn_cast<EnumDecl>(Val: D)) {
172 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
173 return {};
174 }
175 if (const auto *TD = dyn_cast<TagDecl>(Val: D)) {
176 // When tag declaration (but not definition!) is part of the
177 // decl-specifier-seq of some other declaration, it doesn't get comment
178 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
179 return {};
180 }
181 // TODO: handle comments for function parameters properly.
182 if (isa<ParmVarDecl>(Val: D))
183 return {};
184
185 // TODO: we could look up template parameter documentation in the template
186 // documentation.
187 if (isa<TemplateTypeParmDecl>(Val: D) ||
188 isa<NonTypeTemplateParmDecl>(Val: D) ||
189 isa<TemplateTemplateParmDecl>(Val: D))
190 return {};
191
192 SmallVector<SourceLocation, 2> Locations;
193 // Find declaration location.
194 // For Objective-C declarations we generally don't expect to have multiple
195 // declarators, thus use declaration starting location as the "declaration
196 // location".
197 // For all other declarations multiple declarators are used quite frequently,
198 // so we use the location of the identifier as the "declaration location".
199 SourceLocation BaseLocation;
200 if (isa<ObjCMethodDecl>(Val: D) || isa<ObjCContainerDecl>(Val: D) ||
201 isa<ObjCPropertyDecl>(Val: D) || isa<RedeclarableTemplateDecl>(Val: D) ||
202 isa<ClassTemplateSpecializationDecl>(Val: D) ||
203 // Allow association with Y across {} in `typedef struct X {} Y`.
204 isa<TypedefDecl>(Val: D))
205 BaseLocation = D->getBeginLoc();
206 else
207 BaseLocation = D->getLocation();
208
209 if (!D->getLocation().isMacroID()) {
210 Locations.emplace_back(Args&: BaseLocation);
211 } else {
212 const auto *DeclCtx = D->getDeclContext();
213
214 // When encountering definitions generated from a macro (that are not
215 // contained by another declaration in the macro) we need to try and find
216 // the comment at the location of the expansion but if there is no comment
217 // there we should retry to see if there is a comment inside the macro as
218 // well. To this end we return first BaseLocation to first look at the
219 // expansion site, the second value is the spelling location of the
220 // beginning of the declaration defined inside the macro.
221 if (!(DeclCtx &&
222 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
223 Locations.emplace_back(Args: SourceMgr.getExpansionLoc(Loc: BaseLocation));
224 }
225
226 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
227 // we don't refer to the macro argument location at the expansion site (this
228 // can happen if the name's spelling is provided via macro argument), and
229 // always to the declaration itself.
230 Locations.emplace_back(Args: SourceMgr.getSpellingLoc(Loc: D->getBeginLoc()));
231 }
232
233 return Locations;
234}
235
236RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
237 const Decl *D, const SourceLocation RepresentativeLocForDecl,
238 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
239 // If the declaration doesn't map directly to a location in a file, we
240 // can't find the comment.
241 if (RepresentativeLocForDecl.isInvalid() ||
242 !RepresentativeLocForDecl.isFileID())
243 return nullptr;
244
245 // If there are no comments anywhere, we won't find anything.
246 if (CommentsInTheFile.empty())
247 return nullptr;
248
249 // Decompose the location for the declaration and find the beginning of the
250 // file buffer.
251 const FileIDAndOffset DeclLocDecomp =
252 SourceMgr.getDecomposedLoc(Loc: RepresentativeLocForDecl);
253
254 // Slow path.
255 auto OffsetCommentBehindDecl =
256 CommentsInTheFile.lower_bound(x: DeclLocDecomp.second);
257
258 // First check whether we have a trailing comment.
259 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
260 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
261 if ((CommentBehindDecl->isDocumentation() ||
262 LangOpts.CommentOpts.ParseAllComments) &&
263 CommentBehindDecl->isTrailingComment() &&
264 (isa<FieldDecl>(Val: D) || isa<EnumConstantDecl>(Val: D) || isa<VarDecl>(Val: D) ||
265 isa<ObjCMethodDecl>(Val: D) || isa<ObjCPropertyDecl>(Val: D))) {
266
267 // Check that Doxygen trailing comment comes after the declaration, starts
268 // on the same line and in the same file as the declaration.
269 if (SourceMgr.getLineNumber(FID: DeclLocDecomp.first, FilePos: DeclLocDecomp.second) ==
270 Comments.getCommentBeginLine(C: CommentBehindDecl, File: DeclLocDecomp.first,
271 Offset: OffsetCommentBehindDecl->first)) {
272 return CommentBehindDecl;
273 }
274 }
275 }
276
277 // The comment just after the declaration was not a trailing comment.
278 // Let's look at the previous comment.
279 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
280 return nullptr;
281
282 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
283 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
284
285 // Check that we actually have a non-member Doxygen comment.
286 if (!(CommentBeforeDecl->isDocumentation() ||
287 LangOpts.CommentOpts.ParseAllComments) ||
288 CommentBeforeDecl->isTrailingComment())
289 return nullptr;
290
291 // Decompose the end of the comment.
292 const unsigned CommentEndOffset =
293 Comments.getCommentEndOffset(C: CommentBeforeDecl);
294
295 // Get the corresponding buffer.
296 bool Invalid = false;
297 const char *Buffer = SourceMgr.getBufferData(FID: DeclLocDecomp.first,
298 Invalid: &Invalid).data();
299 if (Invalid)
300 return nullptr;
301
302 // Extract text between the comment and declaration.
303 StringRef Text(Buffer + CommentEndOffset,
304 DeclLocDecomp.second - CommentEndOffset);
305
306 // There should be no other declarations or preprocessor directives between
307 // comment and declaration.
308 if (Text.find_last_of(Chars: ";{}#@") != StringRef::npos)
309 return nullptr;
310
311 return CommentBeforeDecl;
312}
313
314RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
315 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
316
317 for (const auto DeclLoc : DeclLocs) {
318 // If the declaration doesn't map directly to a location in a file, we
319 // can't find the comment.
320 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
321 continue;
322
323 if (ExternalSource && !CommentsLoaded) {
324 ExternalSource->ReadComments();
325 CommentsLoaded = true;
326 }
327
328 if (Comments.empty())
329 continue;
330
331 const FileID File = SourceMgr.getDecomposedLoc(Loc: DeclLoc).first;
332 if (!File.isValid())
333 continue;
334
335 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
336 if (!CommentsInThisFile || CommentsInThisFile->empty())
337 continue;
338
339 if (RawComment *Comment =
340 getRawCommentForDeclNoCacheImpl(D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile))
341 return Comment;
342 }
343
344 return nullptr;
345}
346
347void ASTContext::addComment(const RawComment &RC) {
348 assert(LangOpts.RetainCommentsFromSystemHeaders ||
349 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
350 Comments.addComment(RC, CommentOpts: LangOpts.CommentOpts, Allocator&: BumpAlloc);
351}
352
353/// If we have a 'templated' declaration for a template, adjust 'D' to
354/// refer to the actual template.
355/// If we have an implicit instantiation, adjust 'D' to refer to template.
356static const Decl &adjustDeclToTemplate(const Decl &D) {
357 if (const auto *FD = dyn_cast<FunctionDecl>(Val: &D)) {
358 // Is this function declaration part of a function template?
359 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
360 return *FTD;
361
362 // Nothing to do if function is not an implicit instantiation.
363 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
364 return D;
365
366 // Function is an implicit instantiation of a function template?
367 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
368 return *FTD;
369
370 // Function is instantiated from a member definition of a class template?
371 if (const FunctionDecl *MemberDecl =
372 FD->getInstantiatedFromMemberFunction())
373 return *MemberDecl;
374
375 return D;
376 }
377 if (const auto *VD = dyn_cast<VarDecl>(Val: &D)) {
378 // Static data member is instantiated from a member definition of a class
379 // template?
380 if (VD->isStaticDataMember())
381 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
382 return *MemberDecl;
383
384 return D;
385 }
386 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: &D)) {
387 // Is this class declaration part of a class template?
388 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
389 return *CTD;
390
391 // Class is an implicit instantiation of a class template or partial
392 // specialization?
393 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: CRD)) {
394 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
395 return D;
396 llvm::PointerUnion<ClassTemplateDecl *,
397 ClassTemplatePartialSpecializationDecl *>
398 PU = CTSD->getSpecializedTemplateOrPartial();
399 return isa<ClassTemplateDecl *>(Val: PU)
400 ? *static_cast<const Decl *>(cast<ClassTemplateDecl *>(Val&: PU))
401 : *static_cast<const Decl *>(
402 cast<ClassTemplatePartialSpecializationDecl *>(Val&: PU));
403 }
404
405 // Class is instantiated from a member definition of a class template?
406 if (const MemberSpecializationInfo *Info =
407 CRD->getMemberSpecializationInfo())
408 return *Info->getInstantiatedFrom();
409
410 return D;
411 }
412 if (const auto *ED = dyn_cast<EnumDecl>(Val: &D)) {
413 // Enum is instantiated from a member definition of a class template?
414 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
415 return *MemberDecl;
416
417 return D;
418 }
419 // FIXME: Adjust alias templates?
420 return D;
421}
422
423const RawComment *ASTContext::getRawCommentForAnyRedecl(
424 const Decl *D,
425 const Decl **OriginalDecl) const {
426 if (!D) {
427 if (OriginalDecl)
428 OriginalDecl = nullptr;
429 return nullptr;
430 }
431
432 D = &adjustDeclToTemplate(D: *D);
433
434 // Any comment directly attached to D?
435 {
436 auto DeclComment = DeclRawComments.find(Val: D);
437 if (DeclComment != DeclRawComments.end()) {
438 if (OriginalDecl)
439 *OriginalDecl = D;
440 return DeclComment->second;
441 }
442 }
443
444 // Any comment attached to any redeclaration of D?
445 const Decl *CanonicalD = D->getCanonicalDecl();
446 if (!CanonicalD)
447 return nullptr;
448
449 {
450 auto RedeclComment = RedeclChainComments.find(Val: CanonicalD);
451 if (RedeclComment != RedeclChainComments.end()) {
452 if (OriginalDecl)
453 *OriginalDecl = RedeclComment->second;
454 auto CommentAtRedecl = DeclRawComments.find(Val: RedeclComment->second);
455 assert(CommentAtRedecl != DeclRawComments.end() &&
456 "This decl is supposed to have comment attached.");
457 return CommentAtRedecl->second;
458 }
459 }
460
461 // Any redeclarations of D that we haven't checked for comments yet?
462 const Decl *LastCheckedRedecl = [&]() {
463 const Decl *LastChecked = CommentlessRedeclChains.lookup(Val: CanonicalD);
464 bool CanUseCommentlessCache = false;
465 if (LastChecked) {
466 for (auto *Redecl : CanonicalD->redecls()) {
467 if (Redecl == D) {
468 CanUseCommentlessCache = true;
469 break;
470 }
471 if (Redecl == LastChecked)
472 break;
473 }
474 }
475 // FIXME: This could be improved so that even if CanUseCommentlessCache
476 // is false, once we've traversed past CanonicalD we still skip ahead
477 // LastChecked.
478 return CanUseCommentlessCache ? LastChecked : nullptr;
479 }();
480
481 for (const Decl *Redecl : D->redecls()) {
482 assert(Redecl);
483 // Skip all redeclarations that have been checked previously.
484 if (LastCheckedRedecl) {
485 if (LastCheckedRedecl == Redecl) {
486 LastCheckedRedecl = nullptr;
487 }
488 continue;
489 }
490 const RawComment *RedeclComment = getRawCommentForDeclNoCache(D: Redecl);
491 if (RedeclComment) {
492 cacheRawCommentForDecl(OriginalD: *Redecl, Comment: *RedeclComment);
493 if (OriginalDecl)
494 *OriginalDecl = Redecl;
495 return RedeclComment;
496 }
497 CommentlessRedeclChains[CanonicalD] = Redecl;
498 }
499
500 if (OriginalDecl)
501 *OriginalDecl = nullptr;
502 return nullptr;
503}
504
505void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
506 const RawComment &Comment) const {
507 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
508 DeclRawComments.try_emplace(Key: &OriginalD, Args: &Comment);
509 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
510 RedeclChainComments.try_emplace(Key: CanonicalDecl, Args: &OriginalD);
511 CommentlessRedeclChains.erase(Val: CanonicalDecl);
512}
513
514static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
515 SmallVectorImpl<const NamedDecl *> &Redeclared) {
516 const DeclContext *DC = ObjCMethod->getDeclContext();
517 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: DC)) {
518 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
519 if (!ID)
520 return;
521 // Add redeclared method here.
522 for (const auto *Ext : ID->known_extensions()) {
523 if (ObjCMethodDecl *RedeclaredMethod =
524 Ext->getMethod(Sel: ObjCMethod->getSelector(),
525 isInstance: ObjCMethod->isInstanceMethod()))
526 Redeclared.push_back(Elt: RedeclaredMethod);
527 }
528 }
529}
530
531void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
532 const Preprocessor *PP) {
533 if (Comments.empty() || Decls.empty())
534 return;
535
536 FileID File;
537 for (const Decl *D : Decls) {
538 if (D->isInvalidDecl())
539 continue;
540
541 D = &adjustDeclToTemplate(D: *D);
542 SourceLocation Loc = D->getLocation();
543 if (Loc.isValid()) {
544 // See if there are any new comments that are not attached to a decl.
545 // The location doesn't have to be precise - we care only about the file.
546 File = SourceMgr.getDecomposedLoc(Loc).first;
547 break;
548 }
549 }
550
551 if (File.isInvalid())
552 return;
553
554 auto CommentsInThisFile = Comments.getCommentsInFile(File);
555 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
556 CommentsInThisFile->rbegin()->second->isAttached())
557 return;
558
559 // There is at least one comment not attached to a decl.
560 // Maybe it should be attached to one of Decls?
561 //
562 // Note that this way we pick up not only comments that precede the
563 // declaration, but also comments that *follow* the declaration -- thanks to
564 // the lookahead in the lexer: we've consumed the semicolon and looked
565 // ahead through comments.
566 for (const Decl *D : Decls) {
567 assert(D);
568 if (D->isInvalidDecl())
569 continue;
570
571 D = &adjustDeclToTemplate(D: *D);
572
573 if (DeclRawComments.count(Val: D) > 0)
574 continue;
575
576 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
577
578 for (const auto DeclLoc : DeclLocs) {
579 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
580 continue;
581
582 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
583 D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile)) {
584 cacheRawCommentForDecl(OriginalD: *D, Comment: *DocComment);
585 comments::FullComment *FC = DocComment->parse(Context: *this, PP, D);
586 ParsedComments[D->getCanonicalDecl()] = FC;
587 break;
588 }
589 }
590 }
591}
592
593comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
594 const Decl *D) const {
595 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
596 ThisDeclInfo->CommentDecl = D;
597 ThisDeclInfo->IsFilled = false;
598 ThisDeclInfo->fill();
599 ThisDeclInfo->CommentDecl = FC->getDecl();
600 if (!ThisDeclInfo->TemplateParameters)
601 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
602 comments::FullComment *CFC =
603 new (*this) comments::FullComment(FC->getBlocks(),
604 ThisDeclInfo);
605 return CFC;
606}
607
608comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
609 const RawComment *RC = getRawCommentForDeclNoCache(D);
610 return RC ? RC->parse(Context: *this, PP: nullptr, D) : nullptr;
611}
612
613comments::FullComment *ASTContext::getCommentForDecl(
614 const Decl *D,
615 const Preprocessor *PP) const {
616 if (!D || D->isInvalidDecl())
617 return nullptr;
618 D = &adjustDeclToTemplate(D: *D);
619
620 const Decl *Canonical = D->getCanonicalDecl();
621 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
622 ParsedComments.find(Val: Canonical);
623
624 if (Pos != ParsedComments.end()) {
625 if (Canonical != D) {
626 comments::FullComment *FC = Pos->second;
627 comments::FullComment *CFC = cloneFullComment(FC, D);
628 return CFC;
629 }
630 return Pos->second;
631 }
632
633 const Decl *OriginalDecl = nullptr;
634
635 const RawComment *RC = getRawCommentForAnyRedecl(D, OriginalDecl: &OriginalDecl);
636 if (!RC) {
637 if (isa<ObjCMethodDecl>(Val: D) || isa<FunctionDecl>(Val: D)) {
638 SmallVector<const NamedDecl*, 8> Overridden;
639 const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: D);
640 if (OMD && OMD->isPropertyAccessor())
641 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
642 if (comments::FullComment *FC = getCommentForDecl(D: PDecl, PP))
643 return cloneFullComment(FC, D);
644 if (OMD)
645 addRedeclaredMethods(ObjCMethod: OMD, Redeclared&: Overridden);
646 getOverriddenMethods(Method: dyn_cast<NamedDecl>(Val: D), Overridden);
647 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
648 if (comments::FullComment *FC = getCommentForDecl(D: Overridden[i], PP))
649 return cloneFullComment(FC, D);
650 }
651 else if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: D)) {
652 // Attach any tag type's documentation to its typedef if latter
653 // does not have one of its own.
654 QualType QT = TD->getUnderlyingType();
655 if (const auto *TT = QT->getAs<TagType>())
656 if (const Decl *TD = TT->getDecl())
657 if (comments::FullComment *FC = getCommentForDecl(D: TD, PP))
658 return cloneFullComment(FC, D);
659 }
660 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(Val: D)) {
661 while (IC->getSuperClass()) {
662 IC = IC->getSuperClass();
663 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
664 return cloneFullComment(FC, D);
665 }
666 }
667 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: D)) {
668 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
669 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
670 return cloneFullComment(FC, D);
671 }
672 else if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: D)) {
673 if (!(RD = RD->getDefinition()))
674 return nullptr;
675 // Check non-virtual bases.
676 for (const auto &I : RD->bases()) {
677 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
678 continue;
679 QualType Ty = I.getType();
680 if (Ty.isNull())
681 continue;
682 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
683 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
684 continue;
685
686 if (comments::FullComment *FC = getCommentForDecl(D: (NonVirtualBase), PP))
687 return cloneFullComment(FC, D);
688 }
689 }
690 // Check virtual bases.
691 for (const auto &I : RD->vbases()) {
692 if (I.getAccessSpecifier() != AS_public)
693 continue;
694 QualType Ty = I.getType();
695 if (Ty.isNull())
696 continue;
697 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
698 if (!(VirtualBase= VirtualBase->getDefinition()))
699 continue;
700 if (comments::FullComment *FC = getCommentForDecl(D: (VirtualBase), PP))
701 return cloneFullComment(FC, D);
702 }
703 }
704 }
705 return nullptr;
706 }
707
708 // If the RawComment was attached to other redeclaration of this Decl, we
709 // should parse the comment in context of that other Decl. This is important
710 // because comments can contain references to parameter names which can be
711 // different across redeclarations.
712 if (D != OriginalDecl && OriginalDecl)
713 return getCommentForDecl(D: OriginalDecl, PP);
714
715 comments::FullComment *FC = RC->parse(Context: *this, PP, D);
716 ParsedComments[Canonical] = FC;
717 return FC;
718}
719
720void
721ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
722 const ASTContext &C,
723 TemplateTemplateParmDecl *Parm) {
724 ID.AddInteger(I: Parm->getDepth());
725 ID.AddInteger(I: Parm->getPosition());
726 ID.AddBoolean(B: Parm->isParameterPack());
727
728 TemplateParameterList *Params = Parm->getTemplateParameters();
729 ID.AddInteger(I: Params->size());
730 for (TemplateParameterList::const_iterator P = Params->begin(),
731 PEnd = Params->end();
732 P != PEnd; ++P) {
733 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
734 ID.AddInteger(I: 0);
735 ID.AddBoolean(B: TTP->isParameterPack());
736 ID.AddInteger(
737 I: TTP->getNumExpansionParameters().toInternalRepresentation());
738 continue;
739 }
740
741 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
742 ID.AddInteger(I: 1);
743 ID.AddBoolean(B: NTTP->isParameterPack());
744 ID.AddPointer(Ptr: C.getUnconstrainedType(T: C.getCanonicalType(T: NTTP->getType()))
745 .getAsOpaquePtr());
746 if (NTTP->isExpandedParameterPack()) {
747 ID.AddBoolean(B: true);
748 ID.AddInteger(I: NTTP->getNumExpansionTypes());
749 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
750 QualType T = NTTP->getExpansionType(I);
751 ID.AddPointer(Ptr: T.getCanonicalType().getAsOpaquePtr());
752 }
753 } else
754 ID.AddBoolean(B: false);
755 continue;
756 }
757
758 auto *TTP = cast<TemplateTemplateParmDecl>(Val: *P);
759 ID.AddInteger(I: 2);
760 Profile(ID, C, Parm: TTP);
761 }
762}
763
764TemplateTemplateParmDecl *
765ASTContext::getCanonicalTemplateTemplateParmDecl(
766 TemplateTemplateParmDecl *TTP) const {
767 // Check if we already have a canonical template template parameter.
768 llvm::FoldingSetNodeID ID;
769 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
770 void *InsertPos = nullptr;
771 CanonicalTemplateTemplateParm *Canonical
772 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
773 if (Canonical)
774 return Canonical->getParam();
775
776 // Build a canonical template parameter list.
777 TemplateParameterList *Params = TTP->getTemplateParameters();
778 SmallVector<NamedDecl *, 4> CanonParams;
779 CanonParams.reserve(N: Params->size());
780 for (TemplateParameterList::const_iterator P = Params->begin(),
781 PEnd = Params->end();
782 P != PEnd; ++P) {
783 // Note that, per C++20 [temp.over.link]/6, when determining whether
784 // template-parameters are equivalent, constraints are ignored.
785 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
786 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
787 C: *this, DC: getTranslationUnitDecl(), KeyLoc: SourceLocation(), NameLoc: SourceLocation(),
788 D: TTP->getDepth(), P: TTP->getIndex(), Id: nullptr, Typename: false,
789 ParameterPack: TTP->isParameterPack(), /*HasTypeConstraint=*/false,
790 NumExpanded: TTP->getNumExpansionParameters());
791 CanonParams.push_back(Elt: NewTTP);
792 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
793 QualType T = getUnconstrainedType(T: getCanonicalType(T: NTTP->getType()));
794 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
795 NonTypeTemplateParmDecl *Param;
796 if (NTTP->isExpandedParameterPack()) {
797 SmallVector<QualType, 2> ExpandedTypes;
798 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
799 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
800 ExpandedTypes.push_back(Elt: getCanonicalType(T: NTTP->getExpansionType(I)));
801 ExpandedTInfos.push_back(
802 Elt: getTrivialTypeSourceInfo(T: ExpandedTypes.back()));
803 }
804
805 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
806 StartLoc: SourceLocation(),
807 IdLoc: SourceLocation(),
808 D: NTTP->getDepth(),
809 P: NTTP->getPosition(), Id: nullptr,
810 T,
811 TInfo,
812 ExpandedTypes,
813 ExpandedTInfos);
814 } else {
815 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
816 StartLoc: SourceLocation(),
817 IdLoc: SourceLocation(),
818 D: NTTP->getDepth(),
819 P: NTTP->getPosition(), Id: nullptr,
820 T,
821 ParameterPack: NTTP->isParameterPack(),
822 TInfo);
823 }
824 CanonParams.push_back(Elt: Param);
825 } else
826 CanonParams.push_back(Elt: getCanonicalTemplateTemplateParmDecl(
827 TTP: cast<TemplateTemplateParmDecl>(Val: *P)));
828 }
829
830 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
831 C: *this, DC: getTranslationUnitDecl(), L: SourceLocation(), D: TTP->getDepth(),
832 P: TTP->getPosition(), ParameterPack: TTP->isParameterPack(), Id: nullptr, /*Typename=*/false,
833 Params: TemplateParameterList::Create(C: *this, TemplateLoc: SourceLocation(), LAngleLoc: SourceLocation(),
834 Params: CanonParams, RAngleLoc: SourceLocation(),
835 /*RequiresClause=*/nullptr));
836
837 // Get the new insert position for the node we care about.
838 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
839 assert(!Canonical && "Shouldn't be in the map!");
840 (void)Canonical;
841
842 // Create the canonical template template parameter entry.
843 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
844 CanonTemplateTemplateParms.InsertNode(N: Canonical, InsertPos);
845 return CanonTTP;
846}
847
848TemplateTemplateParmDecl *
849ASTContext::findCanonicalTemplateTemplateParmDeclInternal(
850 TemplateTemplateParmDecl *TTP) const {
851 llvm::FoldingSetNodeID ID;
852 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
853 void *InsertPos = nullptr;
854 CanonicalTemplateTemplateParm *Canonical =
855 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
856 return Canonical ? Canonical->getParam() : nullptr;
857}
858
859TemplateTemplateParmDecl *
860ASTContext::insertCanonicalTemplateTemplateParmDeclInternal(
861 TemplateTemplateParmDecl *CanonTTP) const {
862 llvm::FoldingSetNodeID ID;
863 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: CanonTTP);
864 void *InsertPos = nullptr;
865 if (auto *Existing =
866 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos))
867 return Existing->getParam();
868 CanonTemplateTemplateParms.InsertNode(
869 N: new (*this) CanonicalTemplateTemplateParm(CanonTTP), InsertPos);
870 return CanonTTP;
871}
872
873/// Check if a type can have its sanitizer instrumentation elided based on its
874/// presence within an ignorelist.
875bool ASTContext::isTypeIgnoredBySanitizer(const SanitizerMask &Mask,
876 const QualType &Ty) const {
877 std::string TyName = Ty.getUnqualifiedType().getAsString(Policy: getPrintingPolicy());
878 return NoSanitizeL->containsType(Mask, MangledTypeName: TyName);
879}
880
881TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
882 auto Kind = getTargetInfo().getCXXABI().getKind();
883 return getLangOpts().CXXABI.value_or(u&: Kind);
884}
885
886CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
887 if (!LangOpts.CPlusPlus) return nullptr;
888
889 switch (getCXXABIKind()) {
890 case TargetCXXABI::AppleARM64:
891 case TargetCXXABI::Fuchsia:
892 case TargetCXXABI::GenericARM: // Same as Itanium at this level
893 case TargetCXXABI::iOS:
894 case TargetCXXABI::WatchOS:
895 case TargetCXXABI::GenericAArch64:
896 case TargetCXXABI::GenericMIPS:
897 case TargetCXXABI::GenericItanium:
898 case TargetCXXABI::WebAssembly:
899 case TargetCXXABI::XL:
900 return CreateItaniumCXXABI(Ctx&: *this);
901 case TargetCXXABI::Microsoft:
902 return CreateMicrosoftCXXABI(Ctx&: *this);
903 }
904 llvm_unreachable("Invalid CXXABI type!");
905}
906
907interp::Context &ASTContext::getInterpContext() {
908 if (!InterpContext) {
909 InterpContext.reset(p: new interp::Context(*this));
910 }
911 return *InterpContext;
912}
913
914ParentMapContext &ASTContext::getParentMapContext() {
915 if (!ParentMapCtx)
916 ParentMapCtx.reset(p: new ParentMapContext(*this));
917 return *ParentMapCtx;
918}
919
920static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
921 const LangOptions &LangOpts) {
922 switch (LangOpts.getAddressSpaceMapMangling()) {
923 case LangOptions::ASMM_Target:
924 return TI.useAddressSpaceMapMangling();
925 case LangOptions::ASMM_On:
926 return true;
927 case LangOptions::ASMM_Off:
928 return false;
929 }
930 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
931}
932
933ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
934 IdentifierTable &idents, SelectorTable &sels,
935 Builtin::Context &builtins, TranslationUnitKind TUKind)
936 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
937 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
938 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
939 DependentSizedMatrixTypes(this_()),
940 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
941 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
942 DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
943 DependentTemplateSpecializationTypes(this_()),
944 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
945 DeducedTemplates(this_()), ArrayParameterTypes(this_()),
946 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
947 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
948 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
949 LangOpts.XRayNeverInstrumentFiles,
950 LangOpts.XRayAttrListFiles, SM)),
951 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
952 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
953 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
954 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
955 CompCategories(this_()), LastSDM(nullptr, 0) {
956 addTranslationUnitDecl();
957}
958
959void ASTContext::cleanup() {
960 // Release the DenseMaps associated with DeclContext objects.
961 // FIXME: Is this the ideal solution?
962 ReleaseDeclContextMaps();
963
964 // Call all of the deallocation functions on all of their targets.
965 for (auto &Pair : Deallocations)
966 (Pair.first)(Pair.second);
967 Deallocations.clear();
968
969 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
970 // because they can contain DenseMaps.
971 for (llvm::DenseMap<const ObjCInterfaceDecl *,
972 const ASTRecordLayout *>::iterator
973 I = ObjCLayouts.begin(),
974 E = ObjCLayouts.end();
975 I != E;)
976 // Increment in loop to prevent using deallocated memory.
977 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
978 R->Destroy(Ctx&: *this);
979 ObjCLayouts.clear();
980
981 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
982 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
983 // Increment in loop to prevent using deallocated memory.
984 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
985 R->Destroy(Ctx&: *this);
986 }
987 ASTRecordLayouts.clear();
988
989 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
990 AEnd = DeclAttrs.end();
991 A != AEnd; ++A)
992 A->second->~AttrVec();
993 DeclAttrs.clear();
994
995 for (const auto &Value : ModuleInitializers)
996 Value.second->~PerModuleInitializers();
997 ModuleInitializers.clear();
998}
999
1000ASTContext::~ASTContext() { cleanup(); }
1001
1002void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
1003 TraversalScope = TopLevelDecls;
1004 getParentMapContext().clear();
1005}
1006
1007void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
1008 Deallocations.push_back(Elt: {Callback, Data});
1009}
1010
1011void
1012ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
1013 ExternalSource = std::move(Source);
1014}
1015
1016void ASTContext::PrintStats() const {
1017 llvm::errs() << "\n*** AST Context Stats:\n";
1018 llvm::errs() << " " << Types.size() << " types total.\n";
1019
1020 unsigned counts[] = {
1021#define TYPE(Name, Parent) 0,
1022#define ABSTRACT_TYPE(Name, Parent)
1023#include "clang/AST/TypeNodes.inc"
1024 0 // Extra
1025 };
1026
1027 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
1028 Type *T = Types[i];
1029 counts[(unsigned)T->getTypeClass()]++;
1030 }
1031
1032 unsigned Idx = 0;
1033 unsigned TotalBytes = 0;
1034#define TYPE(Name, Parent) \
1035 if (counts[Idx]) \
1036 llvm::errs() << " " << counts[Idx] << " " << #Name \
1037 << " types, " << sizeof(Name##Type) << " each " \
1038 << "(" << counts[Idx] * sizeof(Name##Type) \
1039 << " bytes)\n"; \
1040 TotalBytes += counts[Idx] * sizeof(Name##Type); \
1041 ++Idx;
1042#define ABSTRACT_TYPE(Name, Parent)
1043#include "clang/AST/TypeNodes.inc"
1044
1045 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
1046
1047 // Implicit special member functions.
1048 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
1049 << NumImplicitDefaultConstructors
1050 << " implicit default constructors created\n";
1051 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1052 << NumImplicitCopyConstructors
1053 << " implicit copy constructors created\n";
1054 if (getLangOpts().CPlusPlus)
1055 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1056 << NumImplicitMoveConstructors
1057 << " implicit move constructors created\n";
1058 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1059 << NumImplicitCopyAssignmentOperators
1060 << " implicit copy assignment operators created\n";
1061 if (getLangOpts().CPlusPlus)
1062 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1063 << NumImplicitMoveAssignmentOperators
1064 << " implicit move assignment operators created\n";
1065 llvm::errs() << NumImplicitDestructorsDeclared << "/"
1066 << NumImplicitDestructors
1067 << " implicit destructors created\n";
1068
1069 if (ExternalSource) {
1070 llvm::errs() << "\n";
1071 ExternalSource->PrintStats();
1072 }
1073
1074 BumpAlloc.PrintStats();
1075}
1076
1077void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1078 bool NotifyListeners) {
1079 if (NotifyListeners)
1080 if (auto *Listener = getASTMutationListener();
1081 Listener && !ND->isUnconditionallyVisible())
1082 Listener->RedefinedHiddenDefinition(D: ND, M);
1083
1084 MergedDefModules[cast<NamedDecl>(Val: ND->getCanonicalDecl())].push_back(NewVal: M);
1085}
1086
1087void ASTContext::deduplicateMergedDefinitionsFor(NamedDecl *ND) {
1088 auto It = MergedDefModules.find(Val: cast<NamedDecl>(Val: ND->getCanonicalDecl()));
1089 if (It == MergedDefModules.end())
1090 return;
1091
1092 auto &Merged = It->second;
1093 llvm::DenseSet<Module*> Found;
1094 for (Module *&M : Merged)
1095 if (!Found.insert(V: M).second)
1096 M = nullptr;
1097 llvm::erase(C&: Merged, V: nullptr);
1098}
1099
1100ArrayRef<Module *>
1101ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1102 auto MergedIt =
1103 MergedDefModules.find(Val: cast<NamedDecl>(Val: Def->getCanonicalDecl()));
1104 if (MergedIt == MergedDefModules.end())
1105 return {};
1106 return MergedIt->second;
1107}
1108
1109void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1110 if (LazyInitializers.empty())
1111 return;
1112
1113 auto *Source = Ctx.getExternalSource();
1114 assert(Source && "lazy initializers but no external source");
1115
1116 auto LazyInits = std::move(LazyInitializers);
1117 LazyInitializers.clear();
1118
1119 for (auto ID : LazyInits)
1120 Initializers.push_back(Elt: Source->GetExternalDecl(ID));
1121
1122 assert(LazyInitializers.empty() &&
1123 "GetExternalDecl for lazy module initializer added more inits");
1124}
1125
1126void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1127 // One special case: if we add a module initializer that imports another
1128 // module, and that module's only initializer is an ImportDecl, simplify.
1129 if (const auto *ID = dyn_cast<ImportDecl>(Val: D)) {
1130 auto It = ModuleInitializers.find(Val: ID->getImportedModule());
1131
1132 // Maybe the ImportDecl does nothing at all. (Common case.)
1133 if (It == ModuleInitializers.end())
1134 return;
1135
1136 // Maybe the ImportDecl only imports another ImportDecl.
1137 auto &Imported = *It->second;
1138 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1139 Imported.resolve(Ctx&: *this);
1140 auto *OnlyDecl = Imported.Initializers.front();
1141 if (isa<ImportDecl>(Val: OnlyDecl))
1142 D = OnlyDecl;
1143 }
1144 }
1145
1146 auto *&Inits = ModuleInitializers[M];
1147 if (!Inits)
1148 Inits = new (*this) PerModuleInitializers;
1149 Inits->Initializers.push_back(Elt: D);
1150}
1151
1152void ASTContext::addLazyModuleInitializers(Module *M,
1153 ArrayRef<GlobalDeclID> IDs) {
1154 auto *&Inits = ModuleInitializers[M];
1155 if (!Inits)
1156 Inits = new (*this) PerModuleInitializers;
1157 Inits->LazyInitializers.insert(I: Inits->LazyInitializers.end(),
1158 From: IDs.begin(), To: IDs.end());
1159}
1160
1161ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1162 auto It = ModuleInitializers.find(Val: M);
1163 if (It == ModuleInitializers.end())
1164 return {};
1165
1166 auto *Inits = It->second;
1167 Inits->resolve(Ctx&: *this);
1168 return Inits->Initializers;
1169}
1170
1171void ASTContext::setCurrentNamedModule(Module *M) {
1172 assert(M->isNamedModule());
1173 assert(!CurrentCXXNamedModule &&
1174 "We should set named module for ASTContext for only once");
1175 CurrentCXXNamedModule = M;
1176}
1177
1178bool ASTContext::isInSameModule(const Module *M1, const Module *M2) const {
1179 if (!M1 != !M2)
1180 return false;
1181
1182 /// Get the representative module for M. The representative module is the
1183 /// first module unit for a specific primary module name. So that the module
1184 /// units have the same representative module belongs to the same module.
1185 ///
1186 /// The process is helpful to reduce the expensive string operations.
1187 auto GetRepresentativeModule = [this](const Module *M) {
1188 auto Iter = SameModuleLookupSet.find(Val: M);
1189 if (Iter != SameModuleLookupSet.end())
1190 return Iter->second;
1191
1192 const Module *RepresentativeModule =
1193 PrimaryModuleNameMap.try_emplace(Key: M->getPrimaryModuleInterfaceName(), Args&: M)
1194 .first->second;
1195 SameModuleLookupSet[M] = RepresentativeModule;
1196 return RepresentativeModule;
1197 };
1198
1199 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1200 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1201}
1202
1203ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1204 if (!ExternCContext)
1205 ExternCContext = ExternCContextDecl::Create(C: *this, TU: getTranslationUnitDecl());
1206
1207 return ExternCContext;
1208}
1209
1210BuiltinTemplateDecl *
1211ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1212 const IdentifierInfo *II) const {
1213 auto *BuiltinTemplate =
1214 BuiltinTemplateDecl::Create(C: *this, DC: getTranslationUnitDecl(), Name: II, BTK);
1215 BuiltinTemplate->setImplicit();
1216 getTranslationUnitDecl()->addDecl(D: BuiltinTemplate);
1217
1218 return BuiltinTemplate;
1219}
1220
1221#define BuiltinTemplate(BTName) \
1222 BuiltinTemplateDecl *ASTContext::get##BTName##Decl() const { \
1223 if (!Decl##BTName) \
1224 Decl##BTName = \
1225 buildBuiltinTemplateDecl(BTK##BTName, get##BTName##Name()); \
1226 return Decl##BTName; \
1227 }
1228#include "clang/Basic/BuiltinTemplates.inc"
1229
1230RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1231 RecordDecl::TagKind TK) const {
1232 SourceLocation Loc;
1233 RecordDecl *NewDecl;
1234 if (getLangOpts().CPlusPlus)
1235 NewDecl = CXXRecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc,
1236 IdLoc: Loc, Id: &Idents.get(Name));
1237 else
1238 NewDecl = RecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc, IdLoc: Loc,
1239 Id: &Idents.get(Name));
1240 NewDecl->setImplicit();
1241 NewDecl->addAttr(A: TypeVisibilityAttr::CreateImplicit(
1242 Ctx&: const_cast<ASTContext &>(*this), Visibility: TypeVisibilityAttr::Default));
1243 return NewDecl;
1244}
1245
1246TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1247 StringRef Name) const {
1248 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1249 TypedefDecl *NewDecl = TypedefDecl::Create(
1250 C&: const_cast<ASTContext &>(*this), DC: getTranslationUnitDecl(),
1251 StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: &Idents.get(Name), TInfo);
1252 NewDecl->setImplicit();
1253 return NewDecl;
1254}
1255
1256TypedefDecl *ASTContext::getInt128Decl() const {
1257 if (!Int128Decl)
1258 Int128Decl = buildImplicitTypedef(T: Int128Ty, Name: "__int128_t");
1259 return Int128Decl;
1260}
1261
1262TypedefDecl *ASTContext::getUInt128Decl() const {
1263 if (!UInt128Decl)
1264 UInt128Decl = buildImplicitTypedef(T: UnsignedInt128Ty, Name: "__uint128_t");
1265 return UInt128Decl;
1266}
1267
1268void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1269 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1270 R = CanQualType::CreateUnsafe(Other: QualType(Ty, 0));
1271 Types.push_back(Elt: Ty);
1272}
1273
1274void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1275 const TargetInfo *AuxTarget) {
1276 assert((!this->Target || this->Target == &Target) &&
1277 "Incorrect target reinitialization");
1278 assert(VoidTy.isNull() && "Context reinitialized?");
1279
1280 this->Target = &Target;
1281 this->AuxTarget = AuxTarget;
1282
1283 ABI.reset(p: createCXXABI(T: Target));
1284 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(TI: Target, LangOpts);
1285
1286 // C99 6.2.5p19.
1287 InitBuiltinType(R&: VoidTy, K: BuiltinType::Void);
1288
1289 // C99 6.2.5p2.
1290 InitBuiltinType(R&: BoolTy, K: BuiltinType::Bool);
1291 // C99 6.2.5p3.
1292 if (LangOpts.CharIsSigned)
1293 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_S);
1294 else
1295 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_U);
1296 // C99 6.2.5p4.
1297 InitBuiltinType(R&: SignedCharTy, K: BuiltinType::SChar);
1298 InitBuiltinType(R&: ShortTy, K: BuiltinType::Short);
1299 InitBuiltinType(R&: IntTy, K: BuiltinType::Int);
1300 InitBuiltinType(R&: LongTy, K: BuiltinType::Long);
1301 InitBuiltinType(R&: LongLongTy, K: BuiltinType::LongLong);
1302
1303 // C99 6.2.5p6.
1304 InitBuiltinType(R&: UnsignedCharTy, K: BuiltinType::UChar);
1305 InitBuiltinType(R&: UnsignedShortTy, K: BuiltinType::UShort);
1306 InitBuiltinType(R&: UnsignedIntTy, K: BuiltinType::UInt);
1307 InitBuiltinType(R&: UnsignedLongTy, K: BuiltinType::ULong);
1308 InitBuiltinType(R&: UnsignedLongLongTy, K: BuiltinType::ULongLong);
1309
1310 // C99 6.2.5p10.
1311 InitBuiltinType(R&: FloatTy, K: BuiltinType::Float);
1312 InitBuiltinType(R&: DoubleTy, K: BuiltinType::Double);
1313 InitBuiltinType(R&: LongDoubleTy, K: BuiltinType::LongDouble);
1314
1315 // GNU extension, __float128 for IEEE quadruple precision
1316 InitBuiltinType(R&: Float128Ty, K: BuiltinType::Float128);
1317
1318 // __ibm128 for IBM extended precision
1319 InitBuiltinType(R&: Ibm128Ty, K: BuiltinType::Ibm128);
1320
1321 // C11 extension ISO/IEC TS 18661-3
1322 InitBuiltinType(R&: Float16Ty, K: BuiltinType::Float16);
1323
1324 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1325 InitBuiltinType(R&: ShortAccumTy, K: BuiltinType::ShortAccum);
1326 InitBuiltinType(R&: AccumTy, K: BuiltinType::Accum);
1327 InitBuiltinType(R&: LongAccumTy, K: BuiltinType::LongAccum);
1328 InitBuiltinType(R&: UnsignedShortAccumTy, K: BuiltinType::UShortAccum);
1329 InitBuiltinType(R&: UnsignedAccumTy, K: BuiltinType::UAccum);
1330 InitBuiltinType(R&: UnsignedLongAccumTy, K: BuiltinType::ULongAccum);
1331 InitBuiltinType(R&: ShortFractTy, K: BuiltinType::ShortFract);
1332 InitBuiltinType(R&: FractTy, K: BuiltinType::Fract);
1333 InitBuiltinType(R&: LongFractTy, K: BuiltinType::LongFract);
1334 InitBuiltinType(R&: UnsignedShortFractTy, K: BuiltinType::UShortFract);
1335 InitBuiltinType(R&: UnsignedFractTy, K: BuiltinType::UFract);
1336 InitBuiltinType(R&: UnsignedLongFractTy, K: BuiltinType::ULongFract);
1337 InitBuiltinType(R&: SatShortAccumTy, K: BuiltinType::SatShortAccum);
1338 InitBuiltinType(R&: SatAccumTy, K: BuiltinType::SatAccum);
1339 InitBuiltinType(R&: SatLongAccumTy, K: BuiltinType::SatLongAccum);
1340 InitBuiltinType(R&: SatUnsignedShortAccumTy, K: BuiltinType::SatUShortAccum);
1341 InitBuiltinType(R&: SatUnsignedAccumTy, K: BuiltinType::SatUAccum);
1342 InitBuiltinType(R&: SatUnsignedLongAccumTy, K: BuiltinType::SatULongAccum);
1343 InitBuiltinType(R&: SatShortFractTy, K: BuiltinType::SatShortFract);
1344 InitBuiltinType(R&: SatFractTy, K: BuiltinType::SatFract);
1345 InitBuiltinType(R&: SatLongFractTy, K: BuiltinType::SatLongFract);
1346 InitBuiltinType(R&: SatUnsignedShortFractTy, K: BuiltinType::SatUShortFract);
1347 InitBuiltinType(R&: SatUnsignedFractTy, K: BuiltinType::SatUFract);
1348 InitBuiltinType(R&: SatUnsignedLongFractTy, K: BuiltinType::SatULongFract);
1349
1350 // GNU extension, 128-bit integers.
1351 InitBuiltinType(R&: Int128Ty, K: BuiltinType::Int128);
1352 InitBuiltinType(R&: UnsignedInt128Ty, K: BuiltinType::UInt128);
1353
1354 // C++ 3.9.1p5
1355 if (TargetInfo::isTypeSigned(T: Target.getWCharType()))
1356 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_S);
1357 else // -fshort-wchar makes wchar_t be unsigned.
1358 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_U);
1359 if (LangOpts.CPlusPlus && LangOpts.WChar)
1360 WideCharTy = WCharTy;
1361 else {
1362 // C99 (or C++ using -fno-wchar).
1363 WideCharTy = getFromTargetType(Type: Target.getWCharType());
1364 }
1365
1366 WIntTy = getFromTargetType(Type: Target.getWIntType());
1367
1368 // C++20 (proposed)
1369 InitBuiltinType(R&: Char8Ty, K: BuiltinType::Char8);
1370
1371 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1372 InitBuiltinType(R&: Char16Ty, K: BuiltinType::Char16);
1373 else // C99
1374 Char16Ty = getFromTargetType(Type: Target.getChar16Type());
1375
1376 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1377 InitBuiltinType(R&: Char32Ty, K: BuiltinType::Char32);
1378 else // C99
1379 Char32Ty = getFromTargetType(Type: Target.getChar32Type());
1380
1381 // Placeholder type for type-dependent expressions whose type is
1382 // completely unknown. No code should ever check a type against
1383 // DependentTy and users should never see it; however, it is here to
1384 // help diagnose failures to properly check for type-dependent
1385 // expressions.
1386 InitBuiltinType(R&: DependentTy, K: BuiltinType::Dependent);
1387
1388 // Placeholder type for functions.
1389 InitBuiltinType(R&: OverloadTy, K: BuiltinType::Overload);
1390
1391 // Placeholder type for bound members.
1392 InitBuiltinType(R&: BoundMemberTy, K: BuiltinType::BoundMember);
1393
1394 // Placeholder type for unresolved templates.
1395 InitBuiltinType(R&: UnresolvedTemplateTy, K: BuiltinType::UnresolvedTemplate);
1396
1397 // Placeholder type for pseudo-objects.
1398 InitBuiltinType(R&: PseudoObjectTy, K: BuiltinType::PseudoObject);
1399
1400 // "any" type; useful for debugger-like clients.
1401 InitBuiltinType(R&: UnknownAnyTy, K: BuiltinType::UnknownAny);
1402
1403 // Placeholder type for unbridged ARC casts.
1404 InitBuiltinType(R&: ARCUnbridgedCastTy, K: BuiltinType::ARCUnbridgedCast);
1405
1406 // Placeholder type for builtin functions.
1407 InitBuiltinType(R&: BuiltinFnTy, K: BuiltinType::BuiltinFn);
1408
1409 // Placeholder type for OMP array sections.
1410 if (LangOpts.OpenMP) {
1411 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1412 InitBuiltinType(R&: OMPArrayShapingTy, K: BuiltinType::OMPArrayShaping);
1413 InitBuiltinType(R&: OMPIteratorTy, K: BuiltinType::OMPIterator);
1414 }
1415 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1416 // don't bother, as we're just using the same type as OMP.
1417 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1418 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1419 }
1420 if (LangOpts.MatrixTypes)
1421 InitBuiltinType(R&: IncompleteMatrixIdxTy, K: BuiltinType::IncompleteMatrixIdx);
1422
1423 // Builtin types for 'id', 'Class', and 'SEL'.
1424 InitBuiltinType(R&: ObjCBuiltinIdTy, K: BuiltinType::ObjCId);
1425 InitBuiltinType(R&: ObjCBuiltinClassTy, K: BuiltinType::ObjCClass);
1426 InitBuiltinType(R&: ObjCBuiltinSelTy, K: BuiltinType::ObjCSel);
1427
1428 if (LangOpts.OpenCL) {
1429#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1430 InitBuiltinType(SingletonId, BuiltinType::Id);
1431#include "clang/Basic/OpenCLImageTypes.def"
1432
1433 InitBuiltinType(R&: OCLSamplerTy, K: BuiltinType::OCLSampler);
1434 InitBuiltinType(R&: OCLEventTy, K: BuiltinType::OCLEvent);
1435 InitBuiltinType(R&: OCLClkEventTy, K: BuiltinType::OCLClkEvent);
1436 InitBuiltinType(R&: OCLQueueTy, K: BuiltinType::OCLQueue);
1437 InitBuiltinType(R&: OCLReserveIDTy, K: BuiltinType::OCLReserveID);
1438
1439#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1440 InitBuiltinType(Id##Ty, BuiltinType::Id);
1441#include "clang/Basic/OpenCLExtensionTypes.def"
1442 }
1443
1444 if (LangOpts.HLSL) {
1445#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1446 InitBuiltinType(SingletonId, BuiltinType::Id);
1447#include "clang/Basic/HLSLIntangibleTypes.def"
1448 }
1449
1450 if (Target.hasAArch64ACLETypes() ||
1451 (AuxTarget && AuxTarget->hasAArch64ACLETypes())) {
1452#define SVE_TYPE(Name, Id, SingletonId) \
1453 InitBuiltinType(SingletonId, BuiltinType::Id);
1454#include "clang/Basic/AArch64ACLETypes.def"
1455 }
1456
1457 if (Target.getTriple().isPPC64()) {
1458#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1459 InitBuiltinType(Id##Ty, BuiltinType::Id);
1460#include "clang/Basic/PPCTypes.def"
1461#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1462 InitBuiltinType(Id##Ty, BuiltinType::Id);
1463#include "clang/Basic/PPCTypes.def"
1464 }
1465
1466 if (Target.hasRISCVVTypes()) {
1467#define RVV_TYPE(Name, Id, SingletonId) \
1468 InitBuiltinType(SingletonId, BuiltinType::Id);
1469#include "clang/Basic/RISCVVTypes.def"
1470 }
1471
1472 if (Target.getTriple().isWasm() && Target.hasFeature(Feature: "reference-types")) {
1473#define WASM_TYPE(Name, Id, SingletonId) \
1474 InitBuiltinType(SingletonId, BuiltinType::Id);
1475#include "clang/Basic/WebAssemblyReferenceTypes.def"
1476 }
1477
1478 if (Target.getTriple().isAMDGPU() ||
1479 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
1480#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
1481 InitBuiltinType(SingletonId, BuiltinType::Id);
1482#include "clang/Basic/AMDGPUTypes.def"
1483 }
1484
1485 // Builtin type for __objc_yes and __objc_no
1486 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1487 SignedCharTy : BoolTy);
1488
1489 ObjCConstantStringType = QualType();
1490
1491 ObjCSuperType = QualType();
1492
1493 // void * type
1494 if (LangOpts.OpenCLGenericAddressSpace) {
1495 auto Q = VoidTy.getQualifiers();
1496 Q.setAddressSpace(LangAS::opencl_generic);
1497 VoidPtrTy = getPointerType(T: getCanonicalType(
1498 T: getQualifiedType(T: VoidTy.getUnqualifiedType(), Qs: Q)));
1499 } else {
1500 VoidPtrTy = getPointerType(T: VoidTy);
1501 }
1502
1503 // nullptr type (C++0x 2.14.7)
1504 InitBuiltinType(R&: NullPtrTy, K: BuiltinType::NullPtr);
1505
1506 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1507 InitBuiltinType(R&: HalfTy, K: BuiltinType::Half);
1508
1509 InitBuiltinType(R&: BFloat16Ty, K: BuiltinType::BFloat16);
1510
1511 // Builtin type used to help define __builtin_va_list.
1512 VaListTagDecl = nullptr;
1513
1514 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1515 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1516 MSGuidTagDecl = buildImplicitRecord(Name: "_GUID");
1517 getTranslationUnitDecl()->addDecl(D: MSGuidTagDecl);
1518 }
1519}
1520
1521DiagnosticsEngine &ASTContext::getDiagnostics() const {
1522 return SourceMgr.getDiagnostics();
1523}
1524
1525AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1526 AttrVec *&Result = DeclAttrs[D];
1527 if (!Result) {
1528 void *Mem = Allocate(Size: sizeof(AttrVec));
1529 Result = new (Mem) AttrVec;
1530 }
1531
1532 return *Result;
1533}
1534
1535/// Erase the attributes corresponding to the given declaration.
1536void ASTContext::eraseDeclAttrs(const Decl *D) {
1537 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(Val: D);
1538 if (Pos != DeclAttrs.end()) {
1539 Pos->second->~AttrVec();
1540 DeclAttrs.erase(I: Pos);
1541 }
1542}
1543
1544// FIXME: Remove ?
1545MemberSpecializationInfo *
1546ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1547 assert(Var->isStaticDataMember() && "Not a static data member");
1548 return getTemplateOrSpecializationInfo(Var)
1549 .dyn_cast<MemberSpecializationInfo *>();
1550}
1551
1552ASTContext::TemplateOrSpecializationInfo
1553ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1554 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1555 TemplateOrInstantiation.find(Val: Var);
1556 if (Pos == TemplateOrInstantiation.end())
1557 return {};
1558
1559 return Pos->second;
1560}
1561
1562void
1563ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1564 TemplateSpecializationKind TSK,
1565 SourceLocation PointOfInstantiation) {
1566 assert(Inst->isStaticDataMember() && "Not a static data member");
1567 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1568 setTemplateOrSpecializationInfo(Inst, TSI: new (*this) MemberSpecializationInfo(
1569 Tmpl, TSK, PointOfInstantiation));
1570}
1571
1572void
1573ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1574 TemplateOrSpecializationInfo TSI) {
1575 assert(!TemplateOrInstantiation[Inst] &&
1576 "Already noted what the variable was instantiated from");
1577 TemplateOrInstantiation[Inst] = TSI;
1578}
1579
1580NamedDecl *
1581ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1582 return InstantiatedFromUsingDecl.lookup(Val: UUD);
1583}
1584
1585void
1586ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1587 assert((isa<UsingDecl>(Pattern) ||
1588 isa<UnresolvedUsingValueDecl>(Pattern) ||
1589 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1590 "pattern decl is not a using decl");
1591 assert((isa<UsingDecl>(Inst) ||
1592 isa<UnresolvedUsingValueDecl>(Inst) ||
1593 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1594 "instantiation did not produce a using decl");
1595 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1596 InstantiatedFromUsingDecl[Inst] = Pattern;
1597}
1598
1599UsingEnumDecl *
1600ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1601 return InstantiatedFromUsingEnumDecl.lookup(Val: UUD);
1602}
1603
1604void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1605 UsingEnumDecl *Pattern) {
1606 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1607 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1608}
1609
1610UsingShadowDecl *
1611ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1612 return InstantiatedFromUsingShadowDecl.lookup(Val: Inst);
1613}
1614
1615void
1616ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1617 UsingShadowDecl *Pattern) {
1618 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1619 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1620}
1621
1622FieldDecl *
1623ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) const {
1624 return InstantiatedFromUnnamedFieldDecl.lookup(Val: Field);
1625}
1626
1627void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1628 FieldDecl *Tmpl) {
1629 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1630 "Instantiated field decl is not unnamed");
1631 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1632 "Template field decl is not unnamed");
1633 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1634 "Already noted what unnamed field was instantiated from");
1635
1636 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1637}
1638
1639ASTContext::overridden_cxx_method_iterator
1640ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1641 return overridden_methods(Method).begin();
1642}
1643
1644ASTContext::overridden_cxx_method_iterator
1645ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1646 return overridden_methods(Method).end();
1647}
1648
1649unsigned
1650ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1651 auto Range = overridden_methods(Method);
1652 return Range.end() - Range.begin();
1653}
1654
1655ASTContext::overridden_method_range
1656ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1657 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1658 OverriddenMethods.find(Val: Method->getCanonicalDecl());
1659 if (Pos == OverriddenMethods.end())
1660 return overridden_method_range(nullptr, nullptr);
1661 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1662}
1663
1664void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1665 const CXXMethodDecl *Overridden) {
1666 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1667 OverriddenMethods[Method].push_back(NewVal: Overridden);
1668}
1669
1670void ASTContext::getOverriddenMethods(
1671 const NamedDecl *D,
1672 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1673 assert(D);
1674
1675 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(Val: D)) {
1676 Overridden.append(in_start: overridden_methods_begin(Method: CXXMethod),
1677 in_end: overridden_methods_end(Method: CXXMethod));
1678 return;
1679 }
1680
1681 const auto *Method = dyn_cast<ObjCMethodDecl>(Val: D);
1682 if (!Method)
1683 return;
1684
1685 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1686 Method->getOverriddenMethods(Overridden&: OverDecls);
1687 Overridden.append(in_start: OverDecls.begin(), in_end: OverDecls.end());
1688}
1689
1690std::optional<ASTContext::CXXRecordDeclRelocationInfo>
1691ASTContext::getRelocationInfoForCXXRecord(const CXXRecordDecl *RD) const {
1692 assert(RD);
1693 CXXRecordDecl *D = RD->getDefinition();
1694 auto it = RelocatableClasses.find(Val: D);
1695 if (it != RelocatableClasses.end())
1696 return it->getSecond();
1697 return std::nullopt;
1698}
1699
1700void ASTContext::setRelocationInfoForCXXRecord(
1701 const CXXRecordDecl *RD, CXXRecordDeclRelocationInfo Info) {
1702 assert(RD);
1703 CXXRecordDecl *D = RD->getDefinition();
1704 assert(RelocatableClasses.find(D) == RelocatableClasses.end());
1705 RelocatableClasses.insert(KV: {D, Info});
1706}
1707
1708static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
1709 ASTContext &Context, const CXXRecordDecl *Class) {
1710 if (!Class->isPolymorphic())
1711 return false;
1712 const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(ThisClass: Class);
1713 using AuthAttr = VTablePointerAuthenticationAttr;
1714 const AuthAttr *ExplicitAuth = BaseType->getAttr<AuthAttr>();
1715 if (!ExplicitAuth)
1716 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1717 AuthAttr::AddressDiscriminationMode AddressDiscrimination =
1718 ExplicitAuth->getAddressDiscrimination();
1719 if (AddressDiscrimination == AuthAttr::DefaultAddressDiscrimination)
1720 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1721 return AddressDiscrimination == AuthAttr::AddressDiscrimination;
1722}
1723
1724ASTContext::PointerAuthContent ASTContext::findPointerAuthContent(QualType T) {
1725 assert(isPointerAuthenticationAvailable());
1726
1727 T = T.getCanonicalType();
1728 if (T->isDependentType())
1729 return PointerAuthContent::None;
1730
1731 if (T.hasAddressDiscriminatedPointerAuth())
1732 return PointerAuthContent::AddressDiscriminatedData;
1733 const RecordDecl *RD = T->getAsRecordDecl();
1734 if (!RD)
1735 return PointerAuthContent::None;
1736
1737 if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(Val: RD);
1738 Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
1739 return Existing->second;
1740
1741 PointerAuthContent Result = PointerAuthContent::None;
1742
1743 auto SaveResultAndReturn = [&]() -> PointerAuthContent {
1744 auto [ResultIter, DidAdd] =
1745 RecordContainsAddressDiscriminatedPointerAuth.try_emplace(Key: RD, Args&: Result);
1746 (void)ResultIter;
1747 (void)DidAdd;
1748 assert(DidAdd);
1749 return Result;
1750 };
1751 auto ShouldContinueAfterUpdate = [&](PointerAuthContent NewResult) {
1752 static_assert(PointerAuthContent::None <
1753 PointerAuthContent::AddressDiscriminatedVTable);
1754 static_assert(PointerAuthContent::AddressDiscriminatedVTable <
1755 PointerAuthContent::AddressDiscriminatedData);
1756 if (NewResult > Result)
1757 Result = NewResult;
1758 return Result != PointerAuthContent::AddressDiscriminatedData;
1759 };
1760 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
1761 if (primaryBaseHaseAddressDiscriminatedVTableAuthentication(Context&: *this, Class: CXXRD) &&
1762 !ShouldContinueAfterUpdate(
1763 PointerAuthContent::AddressDiscriminatedVTable))
1764 return SaveResultAndReturn();
1765 for (auto Base : CXXRD->bases()) {
1766 if (!ShouldContinueAfterUpdate(findPointerAuthContent(T: Base.getType())))
1767 return SaveResultAndReturn();
1768 }
1769 }
1770 for (auto *FieldDecl : RD->fields()) {
1771 if (!ShouldContinueAfterUpdate(
1772 findPointerAuthContent(T: FieldDecl->getType())))
1773 return SaveResultAndReturn();
1774 }
1775 return SaveResultAndReturn();
1776}
1777
1778void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1779 assert(!Import->getNextLocalImport() &&
1780 "Import declaration already in the chain");
1781 assert(!Import->isFromASTFile() && "Non-local import declaration");
1782 if (!FirstLocalImport) {
1783 FirstLocalImport = Import;
1784 LastLocalImport = Import;
1785 return;
1786 }
1787
1788 LastLocalImport->setNextLocalImport(Import);
1789 LastLocalImport = Import;
1790}
1791
1792//===----------------------------------------------------------------------===//
1793// Type Sizing and Analysis
1794//===----------------------------------------------------------------------===//
1795
1796/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1797/// scalar floating point type.
1798const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1799 switch (T->castAs<BuiltinType>()->getKind()) {
1800 default:
1801 llvm_unreachable("Not a floating point type!");
1802 case BuiltinType::BFloat16:
1803 return Target->getBFloat16Format();
1804 case BuiltinType::Float16:
1805 return Target->getHalfFormat();
1806 case BuiltinType::Half:
1807 return Target->getHalfFormat();
1808 case BuiltinType::Float: return Target->getFloatFormat();
1809 case BuiltinType::Double: return Target->getDoubleFormat();
1810 case BuiltinType::Ibm128:
1811 return Target->getIbm128Format();
1812 case BuiltinType::LongDouble:
1813 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1814 return AuxTarget->getLongDoubleFormat();
1815 return Target->getLongDoubleFormat();
1816 case BuiltinType::Float128:
1817 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1818 return AuxTarget->getFloat128Format();
1819 return Target->getFloat128Format();
1820 }
1821}
1822
1823CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1824 unsigned Align = Target->getCharWidth();
1825
1826 const unsigned AlignFromAttr = D->getMaxAlignment();
1827 if (AlignFromAttr)
1828 Align = AlignFromAttr;
1829
1830 // __attribute__((aligned)) can increase or decrease alignment
1831 // *except* on a struct or struct member, where it only increases
1832 // alignment unless 'packed' is also specified.
1833 //
1834 // It is an error for alignas to decrease alignment, so we can
1835 // ignore that possibility; Sema should diagnose it.
1836 bool UseAlignAttrOnly;
1837 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D))
1838 UseAlignAttrOnly =
1839 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1840 else
1841 UseAlignAttrOnly = AlignFromAttr != 0;
1842 // If we're using the align attribute only, just ignore everything
1843 // else about the declaration and its type.
1844 if (UseAlignAttrOnly) {
1845 // do nothing
1846 } else if (const auto *VD = dyn_cast<ValueDecl>(Val: D)) {
1847 QualType T = VD->getType();
1848 if (const auto *RT = T->getAs<ReferenceType>()) {
1849 if (ForAlignof)
1850 T = RT->getPointeeType();
1851 else
1852 T = getPointerType(T: RT->getPointeeType());
1853 }
1854 QualType BaseT = getBaseElementType(QT: T);
1855 if (T->isFunctionType())
1856 Align = getTypeInfoImpl(T: T.getTypePtr()).Align;
1857 else if (!BaseT->isIncompleteType()) {
1858 // Adjust alignments of declarations with array type by the
1859 // large-array alignment on the target.
1860 if (const ArrayType *arrayType = getAsArrayType(T)) {
1861 unsigned MinWidth = Target->getLargeArrayMinWidth();
1862 if (!ForAlignof && MinWidth) {
1863 if (isa<VariableArrayType>(Val: arrayType))
1864 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1865 else if (isa<ConstantArrayType>(Val: arrayType) &&
1866 MinWidth <= getTypeSize(T: cast<ConstantArrayType>(Val: arrayType)))
1867 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1868 }
1869 }
1870 Align = std::max(a: Align, b: getPreferredTypeAlign(T: T.getTypePtr()));
1871 if (BaseT.getQualifiers().hasUnaligned())
1872 Align = Target->getCharWidth();
1873 }
1874
1875 // Ensure minimum alignment for global variables.
1876 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
1877 if (VD->hasGlobalStorage() && !ForAlignof) {
1878 uint64_t TypeSize =
1879 !BaseT->isIncompleteType() ? getTypeSize(T: T.getTypePtr()) : 0;
1880 Align = std::max(a: Align, b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
1881 }
1882
1883 // Fields can be subject to extra alignment constraints, like if
1884 // the field is packed, the struct is packed, or the struct has a
1885 // a max-field-alignment constraint (#pragma pack). So calculate
1886 // the actual alignment of the field within the struct, and then
1887 // (as we're expected to) constrain that by the alignment of the type.
1888 if (const auto *Field = dyn_cast<FieldDecl>(Val: VD)) {
1889 const RecordDecl *Parent = Field->getParent();
1890 // We can only produce a sensible answer if the record is valid.
1891 if (!Parent->isInvalidDecl()) {
1892 const ASTRecordLayout &Layout = getASTRecordLayout(D: Parent);
1893
1894 // Start with the record's overall alignment.
1895 unsigned FieldAlign = toBits(CharSize: Layout.getAlignment());
1896
1897 // Use the GCD of that and the offset within the record.
1898 uint64_t Offset = Layout.getFieldOffset(FieldNo: Field->getFieldIndex());
1899 if (Offset > 0) {
1900 // Alignment is always a power of 2, so the GCD will be a power of 2,
1901 // which means we get to do this crazy thing instead of Euclid's.
1902 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1903 if (LowBitOfOffset < FieldAlign)
1904 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1905 }
1906
1907 Align = std::min(a: Align, b: FieldAlign);
1908 }
1909 }
1910 }
1911
1912 // Some targets have hard limitation on the maximum requestable alignment in
1913 // aligned attribute for static variables.
1914 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1915 const auto *VD = dyn_cast<VarDecl>(Val: D);
1916 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1917 Align = std::min(a: Align, b: MaxAlignedAttr);
1918
1919 return toCharUnitsFromBits(BitSize: Align);
1920}
1921
1922CharUnits ASTContext::getExnObjectAlignment() const {
1923 return toCharUnitsFromBits(BitSize: Target->getExnObjectAlignment());
1924}
1925
1926// getTypeInfoDataSizeInChars - Return the size of a type, in
1927// chars. If the type is a record, its data size is returned. This is
1928// the size of the memcpy that's performed when assigning this type
1929// using a trivial copy/move assignment operator.
1930TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1931 TypeInfoChars Info = getTypeInfoInChars(T);
1932
1933 // In C++, objects can sometimes be allocated into the tail padding
1934 // of a base-class subobject. We decide whether that's possible
1935 // during class layout, so here we can just trust the layout results.
1936 if (getLangOpts().CPlusPlus) {
1937 if (const auto *RT = T->getAs<RecordType>();
1938 RT && !RT->getDecl()->isInvalidDecl()) {
1939 const ASTRecordLayout &layout = getASTRecordLayout(D: RT->getDecl());
1940 Info.Width = layout.getDataSize();
1941 }
1942 }
1943
1944 return Info;
1945}
1946
1947/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1948/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1949TypeInfoChars
1950static getConstantArrayInfoInChars(const ASTContext &Context,
1951 const ConstantArrayType *CAT) {
1952 TypeInfoChars EltInfo = Context.getTypeInfoInChars(T: CAT->getElementType());
1953 uint64_t Size = CAT->getZExtSize();
1954 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1955 (uint64_t)(-1)/Size) &&
1956 "Overflow in array type char size evaluation");
1957 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1958 unsigned Align = EltInfo.Align.getQuantity();
1959 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1960 Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1961 Width = llvm::alignTo(Value: Width, Align);
1962 return TypeInfoChars(CharUnits::fromQuantity(Quantity: Width),
1963 CharUnits::fromQuantity(Quantity: Align),
1964 EltInfo.AlignRequirement);
1965}
1966
1967TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1968 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
1969 return getConstantArrayInfoInChars(Context: *this, CAT);
1970 TypeInfo Info = getTypeInfo(T);
1971 return TypeInfoChars(toCharUnitsFromBits(BitSize: Info.Width),
1972 toCharUnitsFromBits(BitSize: Info.Align), Info.AlignRequirement);
1973}
1974
1975TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1976 return getTypeInfoInChars(T: T.getTypePtr());
1977}
1978
1979bool ASTContext::isPromotableIntegerType(QualType T) const {
1980 // HLSL doesn't promote all small integer types to int, it
1981 // just uses the rank-based promotion rules for all types.
1982 if (getLangOpts().HLSL)
1983 return false;
1984
1985 if (const auto *BT = T->getAs<BuiltinType>())
1986 switch (BT->getKind()) {
1987 case BuiltinType::Bool:
1988 case BuiltinType::Char_S:
1989 case BuiltinType::Char_U:
1990 case BuiltinType::SChar:
1991 case BuiltinType::UChar:
1992 case BuiltinType::Short:
1993 case BuiltinType::UShort:
1994 case BuiltinType::WChar_S:
1995 case BuiltinType::WChar_U:
1996 case BuiltinType::Char8:
1997 case BuiltinType::Char16:
1998 case BuiltinType::Char32:
1999 return true;
2000 default:
2001 return false;
2002 }
2003
2004 // Enumerated types are promotable to their compatible integer types
2005 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
2006 if (const auto *ET = T->getAs<EnumType>()) {
2007 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
2008 ET->getDecl()->isScoped())
2009 return false;
2010
2011 return true;
2012 }
2013
2014 return false;
2015}
2016
2017bool ASTContext::isAlignmentRequired(const Type *T) const {
2018 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
2019}
2020
2021bool ASTContext::isAlignmentRequired(QualType T) const {
2022 return isAlignmentRequired(T: T.getTypePtr());
2023}
2024
2025unsigned ASTContext::getTypeAlignIfKnown(QualType T,
2026 bool NeedsPreferredAlignment) const {
2027 // An alignment on a typedef overrides anything else.
2028 if (const auto *TT = T->getAs<TypedefType>())
2029 if (unsigned Align = TT->getDecl()->getMaxAlignment())
2030 return Align;
2031
2032 // If we have an (array of) complete type, we're done.
2033 T = getBaseElementType(QT: T);
2034 if (!T->isIncompleteType())
2035 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
2036
2037 // If we had an array type, its element type might be a typedef
2038 // type with an alignment attribute.
2039 if (const auto *TT = T->getAs<TypedefType>())
2040 if (unsigned Align = TT->getDecl()->getMaxAlignment())
2041 return Align;
2042
2043 // Otherwise, see if the declaration of the type had an attribute.
2044 if (const auto *TT = T->getAs<TagType>())
2045 return TT->getDecl()->getMaxAlignment();
2046
2047 return 0;
2048}
2049
2050TypeInfo ASTContext::getTypeInfo(const Type *T) const {
2051 TypeInfoMap::iterator I = MemoizedTypeInfo.find(Val: T);
2052 if (I != MemoizedTypeInfo.end())
2053 return I->second;
2054
2055 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
2056 TypeInfo TI = getTypeInfoImpl(T);
2057 MemoizedTypeInfo[T] = TI;
2058 return TI;
2059}
2060
2061/// getTypeInfoImpl - Return the size of the specified type, in bits. This
2062/// method does not work on incomplete types.
2063///
2064/// FIXME: Pointers into different addr spaces could have different sizes and
2065/// alignment requirements: getPointerInfo should take an AddrSpace, this
2066/// should take a QualType, &c.
2067TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
2068 uint64_t Width = 0;
2069 unsigned Align = 8;
2070 AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
2071 LangAS AS = LangAS::Default;
2072 switch (T->getTypeClass()) {
2073#define TYPE(Class, Base)
2074#define ABSTRACT_TYPE(Class, Base)
2075#define NON_CANONICAL_TYPE(Class, Base)
2076#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2077#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
2078 case Type::Class: \
2079 assert(!T->isDependentType() && "should not see dependent types here"); \
2080 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
2081#include "clang/AST/TypeNodes.inc"
2082 llvm_unreachable("Should not see dependent types");
2083
2084 case Type::FunctionNoProto:
2085 case Type::FunctionProto:
2086 // GCC extension: alignof(function) = 32 bits
2087 Width = 0;
2088 Align = 32;
2089 break;
2090
2091 case Type::IncompleteArray:
2092 case Type::VariableArray:
2093 case Type::ConstantArray:
2094 case Type::ArrayParameter: {
2095 // Model non-constant sized arrays as size zero, but track the alignment.
2096 uint64_t Size = 0;
2097 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
2098 Size = CAT->getZExtSize();
2099
2100 TypeInfo EltInfo = getTypeInfo(T: cast<ArrayType>(Val: T)->getElementType());
2101 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2102 "Overflow in array type bit size evaluation");
2103 Width = EltInfo.Width * Size;
2104 Align = EltInfo.Align;
2105 AlignRequirement = EltInfo.AlignRequirement;
2106 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2107 getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
2108 Width = llvm::alignTo(Value: Width, Align);
2109 break;
2110 }
2111
2112 case Type::ExtVector:
2113 case Type::Vector: {
2114 const auto *VT = cast<VectorType>(Val: T);
2115 TypeInfo EltInfo = getTypeInfo(T: VT->getElementType());
2116 Width = VT->isPackedVectorBoolType(ctx: *this)
2117 ? VT->getNumElements()
2118 : EltInfo.Width * VT->getNumElements();
2119 // Enforce at least byte size and alignment.
2120 Width = std::max<unsigned>(a: 8, b: Width);
2121 Align = std::max<unsigned>(a: 8, b: Width);
2122
2123 // If the alignment is not a power of 2, round up to the next power of 2.
2124 // This happens for non-power-of-2 length vectors.
2125 if (Align & (Align-1)) {
2126 Align = llvm::bit_ceil(Value: Align);
2127 Width = llvm::alignTo(Value: Width, Align);
2128 }
2129 // Adjust the alignment based on the target max.
2130 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2131 if (TargetVectorAlign && TargetVectorAlign < Align)
2132 Align = TargetVectorAlign;
2133 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
2134 // Adjust the alignment for fixed-length SVE vectors. This is important
2135 // for non-power-of-2 vector lengths.
2136 Align = 128;
2137 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
2138 // Adjust the alignment for fixed-length SVE predicates.
2139 Align = 16;
2140 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
2141 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
2142 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
2143 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
2144 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
2145 // Adjust the alignment for fixed-length RVV vectors.
2146 Align = std::min<unsigned>(a: 64, b: Width);
2147 break;
2148 }
2149
2150 case Type::ConstantMatrix: {
2151 const auto *MT = cast<ConstantMatrixType>(Val: T);
2152 TypeInfo ElementInfo = getTypeInfo(T: MT->getElementType());
2153 // The internal layout of a matrix value is implementation defined.
2154 // Initially be ABI compatible with arrays with respect to alignment and
2155 // size.
2156 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2157 Align = ElementInfo.Align;
2158 break;
2159 }
2160
2161 case Type::Builtin:
2162 switch (cast<BuiltinType>(Val: T)->getKind()) {
2163 default: llvm_unreachable("Unknown builtin type!");
2164 case BuiltinType::Void:
2165 // GCC extension: alignof(void) = 8 bits.
2166 Width = 0;
2167 Align = 8;
2168 break;
2169 case BuiltinType::Bool:
2170 Width = Target->getBoolWidth();
2171 Align = Target->getBoolAlign();
2172 break;
2173 case BuiltinType::Char_S:
2174 case BuiltinType::Char_U:
2175 case BuiltinType::UChar:
2176 case BuiltinType::SChar:
2177 case BuiltinType::Char8:
2178 Width = Target->getCharWidth();
2179 Align = Target->getCharAlign();
2180 break;
2181 case BuiltinType::WChar_S:
2182 case BuiltinType::WChar_U:
2183 Width = Target->getWCharWidth();
2184 Align = Target->getWCharAlign();
2185 break;
2186 case BuiltinType::Char16:
2187 Width = Target->getChar16Width();
2188 Align = Target->getChar16Align();
2189 break;
2190 case BuiltinType::Char32:
2191 Width = Target->getChar32Width();
2192 Align = Target->getChar32Align();
2193 break;
2194 case BuiltinType::UShort:
2195 case BuiltinType::Short:
2196 Width = Target->getShortWidth();
2197 Align = Target->getShortAlign();
2198 break;
2199 case BuiltinType::UInt:
2200 case BuiltinType::Int:
2201 Width = Target->getIntWidth();
2202 Align = Target->getIntAlign();
2203 break;
2204 case BuiltinType::ULong:
2205 case BuiltinType::Long:
2206 Width = Target->getLongWidth();
2207 Align = Target->getLongAlign();
2208 break;
2209 case BuiltinType::ULongLong:
2210 case BuiltinType::LongLong:
2211 Width = Target->getLongLongWidth();
2212 Align = Target->getLongLongAlign();
2213 break;
2214 case BuiltinType::Int128:
2215 case BuiltinType::UInt128:
2216 Width = 128;
2217 Align = Target->getInt128Align();
2218 break;
2219 case BuiltinType::ShortAccum:
2220 case BuiltinType::UShortAccum:
2221 case BuiltinType::SatShortAccum:
2222 case BuiltinType::SatUShortAccum:
2223 Width = Target->getShortAccumWidth();
2224 Align = Target->getShortAccumAlign();
2225 break;
2226 case BuiltinType::Accum:
2227 case BuiltinType::UAccum:
2228 case BuiltinType::SatAccum:
2229 case BuiltinType::SatUAccum:
2230 Width = Target->getAccumWidth();
2231 Align = Target->getAccumAlign();
2232 break;
2233 case BuiltinType::LongAccum:
2234 case BuiltinType::ULongAccum:
2235 case BuiltinType::SatLongAccum:
2236 case BuiltinType::SatULongAccum:
2237 Width = Target->getLongAccumWidth();
2238 Align = Target->getLongAccumAlign();
2239 break;
2240 case BuiltinType::ShortFract:
2241 case BuiltinType::UShortFract:
2242 case BuiltinType::SatShortFract:
2243 case BuiltinType::SatUShortFract:
2244 Width = Target->getShortFractWidth();
2245 Align = Target->getShortFractAlign();
2246 break;
2247 case BuiltinType::Fract:
2248 case BuiltinType::UFract:
2249 case BuiltinType::SatFract:
2250 case BuiltinType::SatUFract:
2251 Width = Target->getFractWidth();
2252 Align = Target->getFractAlign();
2253 break;
2254 case BuiltinType::LongFract:
2255 case BuiltinType::ULongFract:
2256 case BuiltinType::SatLongFract:
2257 case BuiltinType::SatULongFract:
2258 Width = Target->getLongFractWidth();
2259 Align = Target->getLongFractAlign();
2260 break;
2261 case BuiltinType::BFloat16:
2262 if (Target->hasBFloat16Type()) {
2263 Width = Target->getBFloat16Width();
2264 Align = Target->getBFloat16Align();
2265 } else if ((getLangOpts().SYCLIsDevice ||
2266 (getLangOpts().OpenMP &&
2267 getLangOpts().OpenMPIsTargetDevice)) &&
2268 AuxTarget->hasBFloat16Type()) {
2269 Width = AuxTarget->getBFloat16Width();
2270 Align = AuxTarget->getBFloat16Align();
2271 }
2272 break;
2273 case BuiltinType::Float16:
2274 case BuiltinType::Half:
2275 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2276 !getLangOpts().OpenMPIsTargetDevice) {
2277 Width = Target->getHalfWidth();
2278 Align = Target->getHalfAlign();
2279 } else {
2280 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2281 "Expected OpenMP device compilation.");
2282 Width = AuxTarget->getHalfWidth();
2283 Align = AuxTarget->getHalfAlign();
2284 }
2285 break;
2286 case BuiltinType::Float:
2287 Width = Target->getFloatWidth();
2288 Align = Target->getFloatAlign();
2289 break;
2290 case BuiltinType::Double:
2291 Width = Target->getDoubleWidth();
2292 Align = Target->getDoubleAlign();
2293 break;
2294 case BuiltinType::Ibm128:
2295 Width = Target->getIbm128Width();
2296 Align = Target->getIbm128Align();
2297 break;
2298 case BuiltinType::LongDouble:
2299 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2300 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2301 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2302 Width = AuxTarget->getLongDoubleWidth();
2303 Align = AuxTarget->getLongDoubleAlign();
2304 } else {
2305 Width = Target->getLongDoubleWidth();
2306 Align = Target->getLongDoubleAlign();
2307 }
2308 break;
2309 case BuiltinType::Float128:
2310 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2311 !getLangOpts().OpenMPIsTargetDevice) {
2312 Width = Target->getFloat128Width();
2313 Align = Target->getFloat128Align();
2314 } else {
2315 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2316 "Expected OpenMP device compilation.");
2317 Width = AuxTarget->getFloat128Width();
2318 Align = AuxTarget->getFloat128Align();
2319 }
2320 break;
2321 case BuiltinType::NullPtr:
2322 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2323 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2324 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2325 break;
2326 case BuiltinType::ObjCId:
2327 case BuiltinType::ObjCClass:
2328 case BuiltinType::ObjCSel:
2329 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2330 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2331 break;
2332 case BuiltinType::OCLSampler:
2333 case BuiltinType::OCLEvent:
2334 case BuiltinType::OCLClkEvent:
2335 case BuiltinType::OCLQueue:
2336 case BuiltinType::OCLReserveID:
2337#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2338 case BuiltinType::Id:
2339#include "clang/Basic/OpenCLImageTypes.def"
2340#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2341 case BuiltinType::Id:
2342#include "clang/Basic/OpenCLExtensionTypes.def"
2343 AS = Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
2344 Width = Target->getPointerWidth(AddrSpace: AS);
2345 Align = Target->getPointerAlign(AddrSpace: AS);
2346 break;
2347 // The SVE types are effectively target-specific. The length of an
2348 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2349 // of 128 bits. There is one predicate bit for each vector byte, so the
2350 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2351 //
2352 // Because the length is only known at runtime, we use a dummy value
2353 // of 0 for the static length. The alignment values are those defined
2354 // by the Procedure Call Standard for the Arm Architecture.
2355#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
2356 case BuiltinType::Id: \
2357 Width = 0; \
2358 Align = 128; \
2359 break;
2360#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
2361 case BuiltinType::Id: \
2362 Width = 0; \
2363 Align = 16; \
2364 break;
2365#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2366 case BuiltinType::Id: \
2367 Width = 0; \
2368 Align = 16; \
2369 break;
2370#define SVE_SCALAR_TYPE(Name, MangledName, Id, SingletonId, Bits) \
2371 case BuiltinType::Id: \
2372 Width = Bits; \
2373 Align = Bits; \
2374 break;
2375#include "clang/Basic/AArch64ACLETypes.def"
2376#define PPC_VECTOR_TYPE(Name, Id, Size) \
2377 case BuiltinType::Id: \
2378 Width = Size; \
2379 Align = Size; \
2380 break;
2381#include "clang/Basic/PPCTypes.def"
2382#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2383 IsFP, IsBF) \
2384 case BuiltinType::Id: \
2385 Width = 0; \
2386 Align = ElBits; \
2387 break;
2388#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2389 case BuiltinType::Id: \
2390 Width = 0; \
2391 Align = 8; \
2392 break;
2393#include "clang/Basic/RISCVVTypes.def"
2394#define WASM_TYPE(Name, Id, SingletonId) \
2395 case BuiltinType::Id: \
2396 Width = 0; \
2397 Align = 8; \
2398 break;
2399#include "clang/Basic/WebAssemblyReferenceTypes.def"
2400#define AMDGPU_TYPE(NAME, ID, SINGLETONID, WIDTH, ALIGN) \
2401 case BuiltinType::ID: \
2402 Width = WIDTH; \
2403 Align = ALIGN; \
2404 break;
2405#include "clang/Basic/AMDGPUTypes.def"
2406#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2407#include "clang/Basic/HLSLIntangibleTypes.def"
2408 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2409 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2410 break;
2411 }
2412 break;
2413 case Type::ObjCObjectPointer:
2414 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2415 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2416 break;
2417 case Type::BlockPointer:
2418 AS = cast<BlockPointerType>(Val: T)->getPointeeType().getAddressSpace();
2419 Width = Target->getPointerWidth(AddrSpace: AS);
2420 Align = Target->getPointerAlign(AddrSpace: AS);
2421 break;
2422 case Type::LValueReference:
2423 case Type::RValueReference:
2424 // alignof and sizeof should never enter this code path here, so we go
2425 // the pointer route.
2426 AS = cast<ReferenceType>(Val: T)->getPointeeType().getAddressSpace();
2427 Width = Target->getPointerWidth(AddrSpace: AS);
2428 Align = Target->getPointerAlign(AddrSpace: AS);
2429 break;
2430 case Type::Pointer:
2431 AS = cast<PointerType>(Val: T)->getPointeeType().getAddressSpace();
2432 Width = Target->getPointerWidth(AddrSpace: AS);
2433 Align = Target->getPointerAlign(AddrSpace: AS);
2434 break;
2435 case Type::MemberPointer: {
2436 const auto *MPT = cast<MemberPointerType>(Val: T);
2437 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2438 Width = MPI.Width;
2439 Align = MPI.Align;
2440 break;
2441 }
2442 case Type::Complex: {
2443 // Complex types have the same alignment as their elements, but twice the
2444 // size.
2445 TypeInfo EltInfo = getTypeInfo(T: cast<ComplexType>(Val: T)->getElementType());
2446 Width = EltInfo.Width * 2;
2447 Align = EltInfo.Align;
2448 break;
2449 }
2450 case Type::ObjCObject:
2451 return getTypeInfo(T: cast<ObjCObjectType>(Val: T)->getBaseType().getTypePtr());
2452 case Type::Adjusted:
2453 case Type::Decayed:
2454 return getTypeInfo(T: cast<AdjustedType>(Val: T)->getAdjustedType().getTypePtr());
2455 case Type::ObjCInterface: {
2456 const auto *ObjCI = cast<ObjCInterfaceType>(Val: T);
2457 if (ObjCI->getDecl()->isInvalidDecl()) {
2458 Width = 8;
2459 Align = 8;
2460 break;
2461 }
2462 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2463 Width = toBits(CharSize: Layout.getSize());
2464 Align = toBits(CharSize: Layout.getAlignment());
2465 break;
2466 }
2467 case Type::BitInt: {
2468 const auto *EIT = cast<BitIntType>(Val: T);
2469 Align = Target->getBitIntAlign(NumBits: EIT->getNumBits());
2470 Width = Target->getBitIntWidth(NumBits: EIT->getNumBits());
2471 break;
2472 }
2473 case Type::Record:
2474 case Type::Enum: {
2475 const auto *TT = cast<TagType>(Val: T);
2476
2477 if (TT->getDecl()->isInvalidDecl()) {
2478 Width = 8;
2479 Align = 8;
2480 break;
2481 }
2482
2483 if (const auto *ET = dyn_cast<EnumType>(Val: TT)) {
2484 const EnumDecl *ED = ET->getDecl();
2485 TypeInfo Info =
2486 getTypeInfo(T: ED->getIntegerType()->getUnqualifiedDesugaredType());
2487 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2488 Info.Align = AttrAlign;
2489 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
2490 }
2491 return Info;
2492 }
2493
2494 const auto *RT = cast<RecordType>(Val: TT);
2495 const RecordDecl *RD = RT->getDecl();
2496 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2497 Width = toBits(CharSize: Layout.getSize());
2498 Align = toBits(CharSize: Layout.getAlignment());
2499 AlignRequirement = RD->hasAttr<AlignedAttr>()
2500 ? AlignRequirementKind::RequiredByRecord
2501 : AlignRequirementKind::None;
2502 break;
2503 }
2504
2505 case Type::SubstTemplateTypeParm:
2506 return getTypeInfo(T: cast<SubstTemplateTypeParmType>(Val: T)->
2507 getReplacementType().getTypePtr());
2508
2509 case Type::Auto:
2510 case Type::DeducedTemplateSpecialization: {
2511 const auto *A = cast<DeducedType>(Val: T);
2512 assert(!A->getDeducedType().isNull() &&
2513 "cannot request the size of an undeduced or dependent auto type");
2514 return getTypeInfo(T: A->getDeducedType().getTypePtr());
2515 }
2516
2517 case Type::Paren:
2518 return getTypeInfo(T: cast<ParenType>(Val: T)->getInnerType().getTypePtr());
2519
2520 case Type::MacroQualified:
2521 return getTypeInfo(
2522 T: cast<MacroQualifiedType>(Val: T)->getUnderlyingType().getTypePtr());
2523
2524 case Type::ObjCTypeParam:
2525 return getTypeInfo(T: cast<ObjCTypeParamType>(Val: T)->desugar().getTypePtr());
2526
2527 case Type::Using:
2528 return getTypeInfo(T: cast<UsingType>(Val: T)->desugar().getTypePtr());
2529
2530 case Type::Typedef: {
2531 const auto *TT = cast<TypedefType>(Val: T);
2532 TypeInfo Info = getTypeInfo(T: TT->desugar().getTypePtr());
2533 // If the typedef has an aligned attribute on it, it overrides any computed
2534 // alignment we have. This violates the GCC documentation (which says that
2535 // attribute(aligned) can only round up) but matches its implementation.
2536 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2537 Align = AttrAlign;
2538 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2539 } else {
2540 Align = Info.Align;
2541 AlignRequirement = Info.AlignRequirement;
2542 }
2543 Width = Info.Width;
2544 break;
2545 }
2546
2547 case Type::Elaborated:
2548 return getTypeInfo(T: cast<ElaboratedType>(Val: T)->getNamedType().getTypePtr());
2549
2550 case Type::Attributed:
2551 return getTypeInfo(
2552 T: cast<AttributedType>(Val: T)->getEquivalentType().getTypePtr());
2553
2554 case Type::CountAttributed:
2555 return getTypeInfo(T: cast<CountAttributedType>(Val: T)->desugar().getTypePtr());
2556
2557 case Type::BTFTagAttributed:
2558 return getTypeInfo(
2559 T: cast<BTFTagAttributedType>(Val: T)->getWrappedType().getTypePtr());
2560
2561 case Type::HLSLAttributedResource:
2562 return getTypeInfo(
2563 T: cast<HLSLAttributedResourceType>(Val: T)->getWrappedType().getTypePtr());
2564
2565 case Type::HLSLInlineSpirv: {
2566 const auto *ST = cast<HLSLInlineSpirvType>(Val: T);
2567 // Size is specified in bytes, convert to bits
2568 Width = ST->getSize() * 8;
2569 Align = ST->getAlignment();
2570 if (Width == 0 && Align == 0) {
2571 // We are defaulting to laying out opaque SPIR-V types as 32-bit ints.
2572 Width = 32;
2573 Align = 32;
2574 }
2575 break;
2576 }
2577
2578 case Type::Atomic: {
2579 // Start with the base type information.
2580 TypeInfo Info = getTypeInfo(T: cast<AtomicType>(Val: T)->getValueType());
2581 Width = Info.Width;
2582 Align = Info.Align;
2583
2584 if (!Width) {
2585 // An otherwise zero-sized type should still generate an
2586 // atomic operation.
2587 Width = Target->getCharWidth();
2588 assert(Align);
2589 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2590 // If the size of the type doesn't exceed the platform's max
2591 // atomic promotion width, make the size and alignment more
2592 // favorable to atomic operations:
2593
2594 // Round the size up to a power of 2.
2595 Width = llvm::bit_ceil(Value: Width);
2596
2597 // Set the alignment equal to the size.
2598 Align = static_cast<unsigned>(Width);
2599 }
2600 }
2601 break;
2602
2603 case Type::Pipe:
2604 Width = Target->getPointerWidth(AddrSpace: LangAS::opencl_global);
2605 Align = Target->getPointerAlign(AddrSpace: LangAS::opencl_global);
2606 break;
2607 }
2608
2609 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2610 return TypeInfo(Width, Align, AlignRequirement);
2611}
2612
2613unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2614 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(Val: T);
2615 if (I != MemoizedUnadjustedAlign.end())
2616 return I->second;
2617
2618 unsigned UnadjustedAlign;
2619 if (const auto *RT = T->getAs<RecordType>()) {
2620 const RecordDecl *RD = RT->getDecl();
2621 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2622 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2623 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2624 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2625 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2626 } else {
2627 UnadjustedAlign = getTypeAlign(T: T->getUnqualifiedDesugaredType());
2628 }
2629
2630 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2631 return UnadjustedAlign;
2632}
2633
2634unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2635 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2636 TargetTriple: getTargetInfo().getTriple(), Features: Target->getTargetOpts().FeatureMap);
2637 return SimdAlign;
2638}
2639
2640/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2641CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2642 return CharUnits::fromQuantity(Quantity: BitSize / getCharWidth());
2643}
2644
2645/// toBits - Convert a size in characters to a size in characters.
2646int64_t ASTContext::toBits(CharUnits CharSize) const {
2647 return CharSize.getQuantity() * getCharWidth();
2648}
2649
2650/// getTypeSizeInChars - Return the size of the specified type, in characters.
2651/// This method does not work on incomplete types.
2652CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2653 return getTypeInfoInChars(T).Width;
2654}
2655CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2656 return getTypeInfoInChars(T).Width;
2657}
2658
2659/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2660/// characters. This method does not work on incomplete types.
2661CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2662 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2663}
2664CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2665 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2666}
2667
2668/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2669/// type, in characters, before alignment adjustments. This method does
2670/// not work on incomplete types.
2671CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2672 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2673}
2674CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2675 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2676}
2677
2678/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2679/// type for the current target in bits. This can be different than the ABI
2680/// alignment in cases where it is beneficial for performance or backwards
2681/// compatibility preserving to overalign a data type. (Note: despite the name,
2682/// the preferred alignment is ABI-impacting, and not an optimization.)
2683unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2684 TypeInfo TI = getTypeInfo(T);
2685 unsigned ABIAlign = TI.Align;
2686
2687 T = T->getBaseElementTypeUnsafe();
2688
2689 // The preferred alignment of member pointers is that of a pointer.
2690 if (T->isMemberPointerType())
2691 return getPreferredTypeAlign(T: getPointerDiffType().getTypePtr());
2692
2693 if (!Target->allowsLargerPreferedTypeAlignment())
2694 return ABIAlign;
2695
2696 if (const auto *RT = T->getAs<RecordType>()) {
2697 const RecordDecl *RD = RT->getDecl();
2698
2699 // When used as part of a typedef, or together with a 'packed' attribute,
2700 // the 'aligned' attribute can be used to decrease alignment. Note that the
2701 // 'packed' case is already taken into consideration when computing the
2702 // alignment, we only need to handle the typedef case here.
2703 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
2704 RD->isInvalidDecl())
2705 return ABIAlign;
2706
2707 unsigned PreferredAlign = static_cast<unsigned>(
2708 toBits(CharSize: getASTRecordLayout(D: RD).PreferredAlignment));
2709 assert(PreferredAlign >= ABIAlign &&
2710 "PreferredAlign should be at least as large as ABIAlign.");
2711 return PreferredAlign;
2712 }
2713
2714 // Double (and, for targets supporting AIX `power` alignment, long double) and
2715 // long long should be naturally aligned (despite requiring less alignment) if
2716 // possible.
2717 if (const auto *CT = T->getAs<ComplexType>())
2718 T = CT->getElementType().getTypePtr();
2719 if (const auto *ET = T->getAs<EnumType>())
2720 T = ET->getDecl()->getIntegerType().getTypePtr();
2721 if (T->isSpecificBuiltinType(K: BuiltinType::Double) ||
2722 T->isSpecificBuiltinType(K: BuiltinType::LongLong) ||
2723 T->isSpecificBuiltinType(K: BuiltinType::ULongLong) ||
2724 (T->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
2725 Target->defaultsToAIXPowerAlignment()))
2726 // Don't increase the alignment if an alignment attribute was specified on a
2727 // typedef declaration.
2728 if (!TI.isAlignRequired())
2729 return std::max(a: ABIAlign, b: (unsigned)getTypeSize(T));
2730
2731 return ABIAlign;
2732}
2733
2734/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2735/// for __attribute__((aligned)) on this target, to be used if no alignment
2736/// value is specified.
2737unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2738 return getTargetInfo().getDefaultAlignForAttributeAligned();
2739}
2740
2741/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2742/// to a global variable of the specified type.
2743unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
2744 uint64_t TypeSize = getTypeSize(T: T.getTypePtr());
2745 return std::max(a: getPreferredTypeAlign(T),
2746 b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
2747}
2748
2749/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2750/// should be given to a global variable of the specified type.
2751CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
2752 const VarDecl *VD) const {
2753 return toCharUnitsFromBits(BitSize: getAlignOfGlobalVar(T, VD));
2754}
2755
2756unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
2757 const VarDecl *VD) const {
2758 // Make the default handling as that of a non-weak definition in the
2759 // current translation unit.
2760 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2761 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2762}
2763
2764CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2765 CharUnits Offset = CharUnits::Zero();
2766 const ASTRecordLayout *Layout = &getASTRecordLayout(D: RD);
2767 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2768 Offset += Layout->getBaseClassOffset(Base);
2769 Layout = &getASTRecordLayout(D: Base);
2770 }
2771 return Offset;
2772}
2773
2774CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2775 const ValueDecl *MPD = MP.getMemberPointerDecl();
2776 CharUnits ThisAdjustment = CharUnits::Zero();
2777 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2778 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2779 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: MPD->getDeclContext());
2780 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2781 const CXXRecordDecl *Base = RD;
2782 const CXXRecordDecl *Derived = Path[I];
2783 if (DerivedMember)
2784 std::swap(a&: Base, b&: Derived);
2785 ThisAdjustment += getASTRecordLayout(D: Derived).getBaseClassOffset(Base);
2786 RD = Path[I];
2787 }
2788 if (DerivedMember)
2789 ThisAdjustment = -ThisAdjustment;
2790 return ThisAdjustment;
2791}
2792
2793/// DeepCollectObjCIvars -
2794/// This routine first collects all declared, but not synthesized, ivars in
2795/// super class and then collects all ivars, including those synthesized for
2796/// current class. This routine is used for implementation of current class
2797/// when all ivars, declared and synthesized are known.
2798void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2799 bool leafClass,
2800 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2801 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2802 DeepCollectObjCIvars(OI: SuperClass, leafClass: false, Ivars);
2803 if (!leafClass) {
2804 llvm::append_range(C&: Ivars, R: OI->ivars());
2805 } else {
2806 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2807 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2808 Iv= Iv->getNextIvar())
2809 Ivars.push_back(Elt: Iv);
2810 }
2811}
2812
2813/// CollectInheritedProtocols - Collect all protocols in current class and
2814/// those inherited by it.
2815void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2816 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2817 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(Val: CDecl)) {
2818 // We can use protocol_iterator here instead of
2819 // all_referenced_protocol_iterator since we are walking all categories.
2820 for (auto *Proto : OI->all_referenced_protocols()) {
2821 CollectInheritedProtocols(CDecl: Proto, Protocols);
2822 }
2823
2824 // Categories of this Interface.
2825 for (const auto *Cat : OI->visible_categories())
2826 CollectInheritedProtocols(CDecl: Cat, Protocols);
2827
2828 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2829 while (SD) {
2830 CollectInheritedProtocols(CDecl: SD, Protocols);
2831 SD = SD->getSuperClass();
2832 }
2833 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(Val: CDecl)) {
2834 for (auto *Proto : OC->protocols()) {
2835 CollectInheritedProtocols(CDecl: Proto, Protocols);
2836 }
2837 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(Val: CDecl)) {
2838 // Insert the protocol.
2839 if (!Protocols.insert(
2840 Ptr: const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2841 return;
2842
2843 for (auto *Proto : OP->protocols())
2844 CollectInheritedProtocols(CDecl: Proto, Protocols);
2845 }
2846}
2847
2848static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2849 const RecordDecl *RD,
2850 bool CheckIfTriviallyCopyable) {
2851 assert(RD->isUnion() && "Must be union type");
2852 CharUnits UnionSize = Context.getTypeSizeInChars(T: RD->getTypeForDecl());
2853
2854 for (const auto *Field : RD->fields()) {
2855 if (!Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2856 CheckIfTriviallyCopyable))
2857 return false;
2858 CharUnits FieldSize = Context.getTypeSizeInChars(T: Field->getType());
2859 if (FieldSize != UnionSize)
2860 return false;
2861 }
2862 return !RD->field_empty();
2863}
2864
2865static int64_t getSubobjectOffset(const FieldDecl *Field,
2866 const ASTContext &Context,
2867 const clang::ASTRecordLayout & /*Layout*/) {
2868 return Context.getFieldOffset(FD: Field);
2869}
2870
2871static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2872 const ASTContext &Context,
2873 const clang::ASTRecordLayout &Layout) {
2874 return Context.toBits(CharSize: Layout.getBaseClassOffset(Base: RD));
2875}
2876
2877static std::optional<int64_t>
2878structHasUniqueObjectRepresentations(const ASTContext &Context,
2879 const RecordDecl *RD,
2880 bool CheckIfTriviallyCopyable);
2881
2882static std::optional<int64_t>
2883getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2884 bool CheckIfTriviallyCopyable) {
2885 if (Field->getType()->isRecordType()) {
2886 const RecordDecl *RD = Field->getType()->getAsRecordDecl();
2887 if (!RD->isUnion())
2888 return structHasUniqueObjectRepresentations(Context, RD,
2889 CheckIfTriviallyCopyable);
2890 }
2891
2892 // A _BitInt type may not be unique if it has padding bits
2893 // but if it is a bitfield the padding bits are not used.
2894 bool IsBitIntType = Field->getType()->isBitIntType();
2895 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2896 !Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2897 CheckIfTriviallyCopyable))
2898 return std::nullopt;
2899
2900 int64_t FieldSizeInBits =
2901 Context.toBits(CharSize: Context.getTypeSizeInChars(T: Field->getType()));
2902 if (Field->isBitField()) {
2903 // If we have explicit padding bits, they don't contribute bits
2904 // to the actual object representation, so return 0.
2905 if (Field->isUnnamedBitField())
2906 return 0;
2907
2908 int64_t BitfieldSize = Field->getBitWidthValue();
2909 if (IsBitIntType) {
2910 if ((unsigned)BitfieldSize >
2911 cast<BitIntType>(Val: Field->getType())->getNumBits())
2912 return std::nullopt;
2913 } else if (BitfieldSize > FieldSizeInBits) {
2914 return std::nullopt;
2915 }
2916 FieldSizeInBits = BitfieldSize;
2917 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2918 Ty: Field->getType(), CheckIfTriviallyCopyable)) {
2919 return std::nullopt;
2920 }
2921 return FieldSizeInBits;
2922}
2923
2924static std::optional<int64_t>
2925getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
2926 bool CheckIfTriviallyCopyable) {
2927 return structHasUniqueObjectRepresentations(Context, RD,
2928 CheckIfTriviallyCopyable);
2929}
2930
2931template <typename RangeT>
2932static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2933 const RangeT &Subobjects, int64_t CurOffsetInBits,
2934 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2935 bool CheckIfTriviallyCopyable) {
2936 for (const auto *Subobject : Subobjects) {
2937 std::optional<int64_t> SizeInBits =
2938 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2939 if (!SizeInBits)
2940 return std::nullopt;
2941 if (*SizeInBits != 0) {
2942 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2943 if (Offset != CurOffsetInBits)
2944 return std::nullopt;
2945 CurOffsetInBits += *SizeInBits;
2946 }
2947 }
2948 return CurOffsetInBits;
2949}
2950
2951static std::optional<int64_t>
2952structHasUniqueObjectRepresentations(const ASTContext &Context,
2953 const RecordDecl *RD,
2954 bool CheckIfTriviallyCopyable) {
2955 assert(!RD->isUnion() && "Must be struct/class type");
2956 const auto &Layout = Context.getASTRecordLayout(D: RD);
2957
2958 int64_t CurOffsetInBits = 0;
2959 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RD)) {
2960 if (ClassDecl->isDynamicClass())
2961 return std::nullopt;
2962
2963 SmallVector<CXXRecordDecl *, 4> Bases;
2964 for (const auto &Base : ClassDecl->bases()) {
2965 // Empty types can be inherited from, and non-empty types can potentially
2966 // have tail padding, so just make sure there isn't an error.
2967 Bases.emplace_back(Args: Base.getType()->getAsCXXRecordDecl());
2968 }
2969
2970 llvm::sort(C&: Bases, Comp: [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2971 return Layout.getBaseClassOffset(Base: L) < Layout.getBaseClassOffset(Base: R);
2972 });
2973
2974 std::optional<int64_t> OffsetAfterBases =
2975 structSubobjectsHaveUniqueObjectRepresentations(
2976 Subobjects: Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2977 if (!OffsetAfterBases)
2978 return std::nullopt;
2979 CurOffsetInBits = *OffsetAfterBases;
2980 }
2981
2982 std::optional<int64_t> OffsetAfterFields =
2983 structSubobjectsHaveUniqueObjectRepresentations(
2984 Subobjects: RD->fields(), CurOffsetInBits, Context, Layout,
2985 CheckIfTriviallyCopyable);
2986 if (!OffsetAfterFields)
2987 return std::nullopt;
2988 CurOffsetInBits = *OffsetAfterFields;
2989
2990 return CurOffsetInBits;
2991}
2992
2993bool ASTContext::hasUniqueObjectRepresentations(
2994 QualType Ty, bool CheckIfTriviallyCopyable) const {
2995 // C++17 [meta.unary.prop]:
2996 // The predicate condition for a template specialization
2997 // has_unique_object_representations<T> shall be satisfied if and only if:
2998 // (9.1) - T is trivially copyable, and
2999 // (9.2) - any two objects of type T with the same value have the same
3000 // object representation, where:
3001 // - two objects of array or non-union class type are considered to have
3002 // the same value if their respective sequences of direct subobjects
3003 // have the same values, and
3004 // - two objects of union type are considered to have the same value if
3005 // they have the same active member and the corresponding members have
3006 // the same value.
3007 // The set of scalar types for which this condition holds is
3008 // implementation-defined. [ Note: If a type has padding bits, the condition
3009 // does not hold; otherwise, the condition holds true for unsigned integral
3010 // types. -- end note ]
3011 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
3012
3013 // Arrays are unique only if their element type is unique.
3014 if (Ty->isArrayType())
3015 return hasUniqueObjectRepresentations(Ty: getBaseElementType(QT: Ty),
3016 CheckIfTriviallyCopyable);
3017
3018 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
3019 "hasUniqueObjectRepresentations should not be called with an "
3020 "incomplete type");
3021
3022 // (9.1) - T is trivially copyable...
3023 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(Context: *this))
3024 return false;
3025
3026 // All integrals and enums are unique.
3027 if (Ty->isIntegralOrEnumerationType()) {
3028 // Address discriminated integer types are not unique.
3029 if (Ty.hasAddressDiscriminatedPointerAuth())
3030 return false;
3031 // Except _BitInt types that have padding bits.
3032 if (const auto *BIT = Ty->getAs<BitIntType>())
3033 return getTypeSize(T: BIT) == BIT->getNumBits();
3034
3035 return true;
3036 }
3037
3038 // All other pointers are unique.
3039 if (Ty->isPointerType())
3040 return !Ty.hasAddressDiscriminatedPointerAuth();
3041
3042 if (const auto *MPT = Ty->getAs<MemberPointerType>())
3043 return !ABI->getMemberPointerInfo(MPT).HasPadding;
3044
3045 if (Ty->isRecordType()) {
3046 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
3047
3048 if (Record->isInvalidDecl())
3049 return false;
3050
3051 if (Record->isUnion())
3052 return unionHasUniqueObjectRepresentations(Context: *this, RD: Record,
3053 CheckIfTriviallyCopyable);
3054
3055 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
3056 Context: *this, RD: Record, CheckIfTriviallyCopyable);
3057
3058 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(T: Ty));
3059 }
3060
3061 // FIXME: More cases to handle here (list by rsmith):
3062 // vectors (careful about, eg, vector of 3 foo)
3063 // _Complex int and friends
3064 // _Atomic T
3065 // Obj-C block pointers
3066 // Obj-C object pointers
3067 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
3068 // clk_event_t, queue_t, reserve_id_t)
3069 // There're also Obj-C class types and the Obj-C selector type, but I think it
3070 // makes sense for those to return false here.
3071
3072 return false;
3073}
3074
3075unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
3076 unsigned count = 0;
3077 // Count ivars declared in class extension.
3078 for (const auto *Ext : OI->known_extensions())
3079 count += Ext->ivar_size();
3080
3081 // Count ivar defined in this class's implementation. This
3082 // includes synthesized ivars.
3083 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
3084 count += ImplDecl->ivar_size();
3085
3086 return count;
3087}
3088
3089bool ASTContext::isSentinelNullExpr(const Expr *E) {
3090 if (!E)
3091 return false;
3092
3093 // nullptr_t is always treated as null.
3094 if (E->getType()->isNullPtrType()) return true;
3095
3096 if (E->getType()->isAnyPointerType() &&
3097 E->IgnoreParenCasts()->isNullPointerConstant(Ctx&: *this,
3098 NPC: Expr::NPC_ValueDependentIsNull))
3099 return true;
3100
3101 // Unfortunately, __null has type 'int'.
3102 if (isa<GNUNullExpr>(Val: E)) return true;
3103
3104 return false;
3105}
3106
3107/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
3108/// exists.
3109ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
3110 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3111 I = ObjCImpls.find(Val: D);
3112 if (I != ObjCImpls.end())
3113 return cast<ObjCImplementationDecl>(Val: I->second);
3114 return nullptr;
3115}
3116
3117/// Get the implementation of ObjCCategoryDecl, or nullptr if none
3118/// exists.
3119ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
3120 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3121 I = ObjCImpls.find(Val: D);
3122 if (I != ObjCImpls.end())
3123 return cast<ObjCCategoryImplDecl>(Val: I->second);
3124 return nullptr;
3125}
3126
3127/// Set the implementation of ObjCInterfaceDecl.
3128void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
3129 ObjCImplementationDecl *ImplD) {
3130 assert(IFaceD && ImplD && "Passed null params");
3131 ObjCImpls[IFaceD] = ImplD;
3132}
3133
3134/// Set the implementation of ObjCCategoryDecl.
3135void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
3136 ObjCCategoryImplDecl *ImplD) {
3137 assert(CatD && ImplD && "Passed null params");
3138 ObjCImpls[CatD] = ImplD;
3139}
3140
3141const ObjCMethodDecl *
3142ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
3143 return ObjCMethodRedecls.lookup(Val: MD);
3144}
3145
3146void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
3147 const ObjCMethodDecl *Redecl) {
3148 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3149 ObjCMethodRedecls[MD] = Redecl;
3150}
3151
3152const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
3153 const NamedDecl *ND) const {
3154 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(Val: ND->getDeclContext()))
3155 return ID;
3156 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: ND->getDeclContext()))
3157 return CD->getClassInterface();
3158 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: ND->getDeclContext()))
3159 return IMD->getClassInterface();
3160
3161 return nullptr;
3162}
3163
3164/// Get the copy initialization expression of VarDecl, or nullptr if
3165/// none exists.
3166BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
3167 assert(VD && "Passed null params");
3168 assert(VD->hasAttr<BlocksAttr>() &&
3169 "getBlockVarCopyInits - not __block var");
3170 auto I = BlockVarCopyInits.find(Val: VD);
3171 if (I != BlockVarCopyInits.end())
3172 return I->second;
3173 return {nullptr, false};
3174}
3175
3176/// Set the copy initialization expression of a block var decl.
3177void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
3178 bool CanThrow) {
3179 assert(VD && CopyExpr && "Passed null params");
3180 assert(VD->hasAttr<BlocksAttr>() &&
3181 "setBlockVarCopyInits - not __block var");
3182 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3183}
3184
3185TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
3186 unsigned DataSize) const {
3187 if (!DataSize)
3188 DataSize = TypeLoc::getFullDataSizeForType(Ty: T);
3189 else
3190 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3191 "incorrect data size provided to CreateTypeSourceInfo!");
3192
3193 auto *TInfo =
3194 (TypeSourceInfo*)BumpAlloc.Allocate(Size: sizeof(TypeSourceInfo) + DataSize, Alignment: 8);
3195 new (TInfo) TypeSourceInfo(T, DataSize);
3196 return TInfo;
3197}
3198
3199TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
3200 SourceLocation L) const {
3201 TypeSourceInfo *DI = CreateTypeSourceInfo(T);
3202 DI->getTypeLoc().initialize(Context&: const_cast<ASTContext &>(*this), Loc: L);
3203 return DI;
3204}
3205
3206const ASTRecordLayout &
3207ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
3208 return getObjCLayout(D);
3209}
3210
3211static auto getCanonicalTemplateArguments(const ASTContext &C,
3212 ArrayRef<TemplateArgument> Args,
3213 bool &AnyNonCanonArgs) {
3214 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3215 AnyNonCanonArgs |= C.canonicalizeTemplateArguments(Args: CanonArgs);
3216 return CanonArgs;
3217}
3218
3219bool ASTContext::canonicalizeTemplateArguments(
3220 MutableArrayRef<TemplateArgument> Args) const {
3221 bool AnyNonCanonArgs = false;
3222 for (auto &Arg : Args) {
3223 TemplateArgument OrigArg = Arg;
3224 Arg = getCanonicalTemplateArgument(Arg);
3225 AnyNonCanonArgs |= !Arg.structurallyEquals(Other: OrigArg);
3226 }
3227 return AnyNonCanonArgs;
3228}
3229
3230//===----------------------------------------------------------------------===//
3231// Type creation/memoization methods
3232//===----------------------------------------------------------------------===//
3233
3234QualType
3235ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3236 unsigned fastQuals = quals.getFastQualifiers();
3237 quals.removeFastQualifiers();
3238
3239 // Check if we've already instantiated this type.
3240 llvm::FoldingSetNodeID ID;
3241 ExtQuals::Profile(ID, BaseType: baseType, Quals: quals);
3242 void *insertPos = nullptr;
3243 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos)) {
3244 assert(eq->getQualifiers() == quals);
3245 return QualType(eq, fastQuals);
3246 }
3247
3248 // If the base type is not canonical, make the appropriate canonical type.
3249 QualType canon;
3250 if (!baseType->isCanonicalUnqualified()) {
3251 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3252 canonSplit.Quals.addConsistentQualifiers(qs: quals);
3253 canon = getExtQualType(baseType: canonSplit.Ty, quals: canonSplit.Quals);
3254
3255 // Re-find the insert position.
3256 (void) ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3257 }
3258
3259 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3260 ExtQualNodes.InsertNode(N: eq, InsertPos: insertPos);
3261 return QualType(eq, fastQuals);
3262}
3263
3264QualType ASTContext::getAddrSpaceQualType(QualType T,
3265 LangAS AddressSpace) const {
3266 QualType CanT = getCanonicalType(T);
3267 if (CanT.getAddressSpace() == AddressSpace)
3268 return T;
3269
3270 // If we are composing extended qualifiers together, merge together
3271 // into one ExtQuals node.
3272 QualifierCollector Quals;
3273 const Type *TypeNode = Quals.strip(type: T);
3274
3275 // If this type already has an address space specified, it cannot get
3276 // another one.
3277 assert(!Quals.hasAddressSpace() &&
3278 "Type cannot be in multiple addr spaces!");
3279 Quals.addAddressSpace(space: AddressSpace);
3280
3281 return getExtQualType(baseType: TypeNode, quals: Quals);
3282}
3283
3284QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
3285 // If the type is not qualified with an address space, just return it
3286 // immediately.
3287 if (!T.hasAddressSpace())
3288 return T;
3289
3290 QualifierCollector Quals;
3291 const Type *TypeNode;
3292 // For arrays, strip the qualifier off the element type, then reconstruct the
3293 // array type
3294 if (T.getTypePtr()->isArrayType()) {
3295 T = getUnqualifiedArrayType(T, Quals);
3296 TypeNode = T.getTypePtr();
3297 } else {
3298 // If we are composing extended qualifiers together, merge together
3299 // into one ExtQuals node.
3300 while (T.hasAddressSpace()) {
3301 TypeNode = Quals.strip(type: T);
3302
3303 // If the type no longer has an address space after stripping qualifiers,
3304 // jump out.
3305 if (!QualType(TypeNode, 0).hasAddressSpace())
3306 break;
3307
3308 // There might be sugar in the way. Strip it and try again.
3309 T = T.getSingleStepDesugaredType(Context: *this);
3310 }
3311 }
3312
3313 Quals.removeAddressSpace();
3314
3315 // Removal of the address space can mean there are no longer any
3316 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3317 // or required.
3318 if (Quals.hasNonFastQualifiers())
3319 return getExtQualType(baseType: TypeNode, quals: Quals);
3320 else
3321 return QualType(TypeNode, Quals.getFastQualifiers());
3322}
3323
3324uint16_t
3325ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
3326 assert(RD->isPolymorphic() &&
3327 "Attempted to get vtable pointer discriminator on a monomorphic type");
3328 std::unique_ptr<MangleContext> MC(createMangleContext());
3329 SmallString<256> Str;
3330 llvm::raw_svector_ostream Out(Str);
3331 MC->mangleCXXVTable(RD, Out);
3332 return llvm::getPointerAuthStableSipHash(S: Str);
3333}
3334
3335/// Encode a function type for use in the discriminator of a function pointer
3336/// type. We can't use the itanium scheme for this since C has quite permissive
3337/// rules for type compatibility that we need to be compatible with.
3338///
3339/// Formally, this function associates every function pointer type T with an
3340/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3341/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3342/// compatibility requires equivalent treatment under the ABI, so
3343/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3344/// a subset of ~. Crucially, however, it must be a proper subset because
3345/// CCompatible is not an equivalence relation: for example, int[] is compatible
3346/// with both int[1] and int[2], but the latter are not compatible with each
3347/// other. Therefore this encoding function must be careful to only distinguish
3348/// types if there is no third type with which they are both required to be
3349/// compatible.
3350static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
3351 raw_ostream &OS, QualType QT) {
3352 // FIXME: Consider address space qualifiers.
3353 const Type *T = QT.getCanonicalType().getTypePtr();
3354
3355 // FIXME: Consider using the C++ type mangling when we encounter a construct
3356 // that is incompatible with C.
3357
3358 switch (T->getTypeClass()) {
3359 case Type::Atomic:
3360 return encodeTypeForFunctionPointerAuth(
3361 Ctx, OS, QT: cast<AtomicType>(Val: T)->getValueType());
3362
3363 case Type::LValueReference:
3364 OS << "R";
3365 encodeTypeForFunctionPointerAuth(Ctx, OS,
3366 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3367 return;
3368 case Type::RValueReference:
3369 OS << "O";
3370 encodeTypeForFunctionPointerAuth(Ctx, OS,
3371 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3372 return;
3373
3374 case Type::Pointer:
3375 // C11 6.7.6.1p2:
3376 // For two pointer types to be compatible, both shall be identically
3377 // qualified and both shall be pointers to compatible types.
3378 // FIXME: we should also consider pointee types.
3379 OS << "P";
3380 return;
3381
3382 case Type::ObjCObjectPointer:
3383 case Type::BlockPointer:
3384 OS << "P";
3385 return;
3386
3387 case Type::Complex:
3388 OS << "C";
3389 return encodeTypeForFunctionPointerAuth(
3390 Ctx, OS, QT: cast<ComplexType>(Val: T)->getElementType());
3391
3392 case Type::VariableArray:
3393 case Type::ConstantArray:
3394 case Type::IncompleteArray:
3395 case Type::ArrayParameter:
3396 // C11 6.7.6.2p6:
3397 // For two array types to be compatible, both shall have compatible
3398 // element types, and if both size specifiers are present, and are integer
3399 // constant expressions, then both size specifiers shall have the same
3400 // constant value [...]
3401 //
3402 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3403 // width of the array.
3404 OS << "A";
3405 return encodeTypeForFunctionPointerAuth(
3406 Ctx, OS, QT: cast<ArrayType>(Val: T)->getElementType());
3407
3408 case Type::ObjCInterface:
3409 case Type::ObjCObject:
3410 OS << "<objc_object>";
3411 return;
3412
3413 case Type::Enum: {
3414 // C11 6.7.2.2p4:
3415 // Each enumerated type shall be compatible with char, a signed integer
3416 // type, or an unsigned integer type.
3417 //
3418 // So we have to treat enum types as integers.
3419 QualType UnderlyingType = cast<EnumType>(Val: T)->getDecl()->getIntegerType();
3420 return encodeTypeForFunctionPointerAuth(
3421 Ctx, OS, QT: UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3422 }
3423
3424 case Type::FunctionNoProto:
3425 case Type::FunctionProto: {
3426 // C11 6.7.6.3p15:
3427 // For two function types to be compatible, both shall specify compatible
3428 // return types. Moreover, the parameter type lists, if both are present,
3429 // shall agree in the number of parameters and in the use of the ellipsis
3430 // terminator; corresponding parameters shall have compatible types.
3431 //
3432 // That paragraph goes on to describe how unprototyped functions are to be
3433 // handled, which we ignore here. Unprototyped function pointers are hashed
3434 // as though they were prototyped nullary functions since thats probably
3435 // what the user meant. This behavior is non-conforming.
3436 // FIXME: If we add a "custom discriminator" function type attribute we
3437 // should encode functions as their discriminators.
3438 OS << "F";
3439 const auto *FuncType = cast<FunctionType>(Val: T);
3440 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: FuncType->getReturnType());
3441 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FuncType)) {
3442 for (QualType Param : FPT->param_types()) {
3443 Param = Ctx.getSignatureParameterType(T: Param);
3444 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: Param);
3445 }
3446 if (FPT->isVariadic())
3447 OS << "z";
3448 }
3449 OS << "E";
3450 return;
3451 }
3452
3453 case Type::MemberPointer: {
3454 OS << "M";
3455 const auto *MPT = T->castAs<MemberPointerType>();
3456 encodeTypeForFunctionPointerAuth(
3457 Ctx, OS, QT: QualType(MPT->getQualifier()->getAsType(), 0));
3458 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: MPT->getPointeeType());
3459 return;
3460 }
3461 case Type::ExtVector:
3462 case Type::Vector:
3463 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3464 break;
3465
3466 // Don't bother discriminating based on these types.
3467 case Type::Pipe:
3468 case Type::BitInt:
3469 case Type::ConstantMatrix:
3470 OS << "?";
3471 return;
3472
3473 case Type::Builtin: {
3474 const auto *BTy = T->castAs<BuiltinType>();
3475 switch (BTy->getKind()) {
3476#define SIGNED_TYPE(Id, SingletonId) \
3477 case BuiltinType::Id: \
3478 OS << "i"; \
3479 return;
3480#define UNSIGNED_TYPE(Id, SingletonId) \
3481 case BuiltinType::Id: \
3482 OS << "i"; \
3483 return;
3484#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3485#define BUILTIN_TYPE(Id, SingletonId)
3486#include "clang/AST/BuiltinTypes.def"
3487 llvm_unreachable("placeholder types should not appear here.");
3488
3489 case BuiltinType::Half:
3490 OS << "Dh";
3491 return;
3492 case BuiltinType::Float:
3493 OS << "f";
3494 return;
3495 case BuiltinType::Double:
3496 OS << "d";
3497 return;
3498 case BuiltinType::LongDouble:
3499 OS << "e";
3500 return;
3501 case BuiltinType::Float16:
3502 OS << "DF16_";
3503 return;
3504 case BuiltinType::Float128:
3505 OS << "g";
3506 return;
3507
3508 case BuiltinType::Void:
3509 OS << "v";
3510 return;
3511
3512 case BuiltinType::ObjCId:
3513 case BuiltinType::ObjCClass:
3514 case BuiltinType::ObjCSel:
3515 case BuiltinType::NullPtr:
3516 OS << "P";
3517 return;
3518
3519 // Don't bother discriminating based on OpenCL types.
3520 case BuiltinType::OCLSampler:
3521 case BuiltinType::OCLEvent:
3522 case BuiltinType::OCLClkEvent:
3523 case BuiltinType::OCLQueue:
3524 case BuiltinType::OCLReserveID:
3525 case BuiltinType::BFloat16:
3526 case BuiltinType::VectorQuad:
3527 case BuiltinType::VectorPair:
3528 case BuiltinType::DMR1024:
3529 OS << "?";
3530 return;
3531
3532 // Don't bother discriminating based on these seldom-used types.
3533 case BuiltinType::Ibm128:
3534 return;
3535#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3536 case BuiltinType::Id: \
3537 return;
3538#include "clang/Basic/OpenCLImageTypes.def"
3539#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3540 case BuiltinType::Id: \
3541 return;
3542#include "clang/Basic/OpenCLExtensionTypes.def"
3543#define SVE_TYPE(Name, Id, SingletonId) \
3544 case BuiltinType::Id: \
3545 return;
3546#include "clang/Basic/AArch64ACLETypes.def"
3547#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3548 case BuiltinType::Id: \
3549 return;
3550#include "clang/Basic/HLSLIntangibleTypes.def"
3551 case BuiltinType::Dependent:
3552 llvm_unreachable("should never get here");
3553#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3554#include "clang/Basic/AMDGPUTypes.def"
3555 case BuiltinType::WasmExternRef:
3556#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3557#include "clang/Basic/RISCVVTypes.def"
3558 llvm_unreachable("not yet implemented");
3559 }
3560 llvm_unreachable("should never get here");
3561 }
3562 case Type::Record: {
3563 const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
3564 const IdentifierInfo *II = RD->getIdentifier();
3565
3566 // In C++, an immediate typedef of an anonymous struct or union
3567 // is considered to name it for ODR purposes, but C's specification
3568 // of type compatibility does not have a similar rule. Using the typedef
3569 // name in function type discriminators anyway, as we do here,
3570 // therefore technically violates the C standard: two function pointer
3571 // types defined in terms of two typedef'd anonymous structs with
3572 // different names are formally still compatible, but we are assigning
3573 // them different discriminators and therefore incompatible ABIs.
3574 //
3575 // This is a relatively minor violation that significantly improves
3576 // discrimination in some cases and has not caused problems in
3577 // practice. Regardless, it is now part of the ABI in places where
3578 // function type discrimination is used, and it can no longer be
3579 // changed except on new platforms.
3580
3581 if (!II)
3582 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3583 II = Typedef->getDeclName().getAsIdentifierInfo();
3584
3585 if (!II) {
3586 OS << "<anonymous_record>";
3587 return;
3588 }
3589 OS << II->getLength() << II->getName();
3590 return;
3591 }
3592 case Type::HLSLAttributedResource:
3593 case Type::HLSLInlineSpirv:
3594 llvm_unreachable("should never get here");
3595 break;
3596 case Type::DeducedTemplateSpecialization:
3597 case Type::Auto:
3598#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3599#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3600#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3601#define ABSTRACT_TYPE(Class, Base)
3602#define TYPE(Class, Base)
3603#include "clang/AST/TypeNodes.inc"
3604 llvm_unreachable("unexpected non-canonical or dependent type!");
3605 return;
3606 }
3607}
3608
3609uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
3610 assert(!T->isDependentType() &&
3611 "cannot compute type discriminator of a dependent type");
3612
3613 SmallString<256> Str;
3614 llvm::raw_svector_ostream Out(Str);
3615
3616 if (T->isFunctionPointerType() || T->isFunctionReferenceType())
3617 T = T->getPointeeType();
3618
3619 if (T->isFunctionType()) {
3620 encodeTypeForFunctionPointerAuth(Ctx: *this, OS&: Out, QT: T);
3621 } else {
3622 T = T.getUnqualifiedType();
3623 // Calls to member function pointers don't need to worry about
3624 // language interop or the laxness of the C type compatibility rules.
3625 // We just mangle the member pointer type directly, which is
3626 // implicitly much stricter about type matching. However, we do
3627 // strip any top-level exception specification before this mangling.
3628 // C++23 requires calls to work when the function type is convertible
3629 // to the pointer type by a function pointer conversion, which can
3630 // change the exception specification. This does not technically
3631 // require the exception specification to not affect representation,
3632 // because the function pointer conversion is still always a direct
3633 // value conversion and therefore an opportunity to resign the
3634 // pointer. (This is in contrast to e.g. qualification conversions,
3635 // which can be applied in nested pointer positions, effectively
3636 // requiring qualified and unqualified representations to match.)
3637 // However, it is pragmatic to ignore exception specifications
3638 // because it allows a certain amount of `noexcept` mismatching
3639 // to not become a visible ODR problem. This also leaves some
3640 // room for the committee to add laxness to function pointer
3641 // conversions in future standards.
3642 if (auto *MPT = T->getAs<MemberPointerType>())
3643 if (MPT->isMemberFunctionPointer()) {
3644 QualType PointeeType = MPT->getPointeeType();
3645 if (PointeeType->castAs<FunctionProtoType>()->getExceptionSpecType() !=
3646 EST_None) {
3647 QualType FT = getFunctionTypeWithExceptionSpec(Orig: PointeeType, ESI: EST_None);
3648 T = getMemberPointerType(T: FT, Qualifier: MPT->getQualifier(),
3649 Cls: MPT->getMostRecentCXXRecordDecl());
3650 }
3651 }
3652 std::unique_ptr<MangleContext> MC(createMangleContext());
3653 MC->mangleCanonicalTypeName(T, Out);
3654 }
3655
3656 return llvm::getPointerAuthStableSipHash(S: Str);
3657}
3658
3659QualType ASTContext::getObjCGCQualType(QualType T,
3660 Qualifiers::GC GCAttr) const {
3661 QualType CanT = getCanonicalType(T);
3662 if (CanT.getObjCGCAttr() == GCAttr)
3663 return T;
3664
3665 if (const auto *ptr = T->getAs<PointerType>()) {
3666 QualType Pointee = ptr->getPointeeType();
3667 if (Pointee->isAnyPointerType()) {
3668 QualType ResultType = getObjCGCQualType(T: Pointee, GCAttr);
3669 return getPointerType(T: ResultType);
3670 }
3671 }
3672
3673 // If we are composing extended qualifiers together, merge together
3674 // into one ExtQuals node.
3675 QualifierCollector Quals;
3676 const Type *TypeNode = Quals.strip(type: T);
3677
3678 // If this type already has an ObjCGC specified, it cannot get
3679 // another one.
3680 assert(!Quals.hasObjCGCAttr() &&
3681 "Type cannot have multiple ObjCGCs!");
3682 Quals.addObjCGCAttr(type: GCAttr);
3683
3684 return getExtQualType(baseType: TypeNode, quals: Quals);
3685}
3686
3687QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3688 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3689 QualType Pointee = Ptr->getPointeeType();
3690 if (isPtrSizeAddressSpace(AS: Pointee.getAddressSpace())) {
3691 return getPointerType(T: removeAddrSpaceQualType(T: Pointee));
3692 }
3693 }
3694 return T;
3695}
3696
3697QualType ASTContext::getCountAttributedType(
3698 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3699 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3700 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3701
3702 llvm::FoldingSetNodeID ID;
3703 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, Nullable: OrNull);
3704
3705 void *InsertPos = nullptr;
3706 CountAttributedType *CATy =
3707 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3708 if (CATy)
3709 return QualType(CATy, 0);
3710
3711 QualType CanonTy = getCanonicalType(T: WrappedTy);
3712 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3713 Counts: DependentDecls.size());
3714 CATy = (CountAttributedType *)Allocate(Size, Align: TypeAlignment);
3715 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3716 OrNull, DependentDecls);
3717 Types.push_back(Elt: CATy);
3718 CountAttributedTypes.InsertNode(N: CATy, InsertPos);
3719
3720 return QualType(CATy, 0);
3721}
3722
3723QualType
3724ASTContext::adjustType(QualType Orig,
3725 llvm::function_ref<QualType(QualType)> Adjust) const {
3726 switch (Orig->getTypeClass()) {
3727 case Type::Attributed: {
3728 const auto *AT = cast<AttributedType>(Val&: Orig);
3729 return getAttributedType(attrKind: AT->getAttrKind(),
3730 modifiedType: adjustType(Orig: AT->getModifiedType(), Adjust),
3731 equivalentType: adjustType(Orig: AT->getEquivalentType(), Adjust),
3732 attr: AT->getAttr());
3733 }
3734
3735 case Type::BTFTagAttributed: {
3736 const auto *BTFT = dyn_cast<BTFTagAttributedType>(Val&: Orig);
3737 return getBTFTagAttributedType(BTFAttr: BTFT->getAttr(),
3738 Wrapped: adjustType(Orig: BTFT->getWrappedType(), Adjust));
3739 }
3740
3741 case Type::Elaborated: {
3742 const auto *ET = cast<ElaboratedType>(Val&: Orig);
3743 return getElaboratedType(Keyword: ET->getKeyword(), NNS: ET->getQualifier(),
3744 NamedType: adjustType(Orig: ET->getNamedType(), Adjust));
3745 }
3746
3747 case Type::Paren:
3748 return getParenType(
3749 NamedType: adjustType(Orig: cast<ParenType>(Val&: Orig)->getInnerType(), Adjust));
3750
3751 case Type::Adjusted: {
3752 const auto *AT = cast<AdjustedType>(Val&: Orig);
3753 return getAdjustedType(Orig: AT->getOriginalType(),
3754 New: adjustType(Orig: AT->getAdjustedType(), Adjust));
3755 }
3756
3757 case Type::MacroQualified: {
3758 const auto *MQT = cast<MacroQualifiedType>(Val&: Orig);
3759 return getMacroQualifiedType(UnderlyingTy: adjustType(Orig: MQT->getUnderlyingType(), Adjust),
3760 MacroII: MQT->getMacroIdentifier());
3761 }
3762
3763 default:
3764 return Adjust(Orig);
3765 }
3766}
3767
3768const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3769 FunctionType::ExtInfo Info) {
3770 if (T->getExtInfo() == Info)
3771 return T;
3772
3773 QualType Result;
3774 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(Val: T)) {
3775 Result = getFunctionNoProtoType(ResultTy: FNPT->getReturnType(), Info);
3776 } else {
3777 const auto *FPT = cast<FunctionProtoType>(Val: T);
3778 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3779 EPI.ExtInfo = Info;
3780 Result = getFunctionType(ResultTy: FPT->getReturnType(), Args: FPT->getParamTypes(), EPI);
3781 }
3782
3783 return cast<FunctionType>(Val: Result.getTypePtr());
3784}
3785
3786QualType ASTContext::adjustFunctionResultType(QualType FunctionType,
3787 QualType ResultType) {
3788 return adjustType(Orig: FunctionType, Adjust: [&](QualType Orig) {
3789 if (const auto *FNPT = Orig->getAs<FunctionNoProtoType>())
3790 return getFunctionNoProtoType(ResultTy: ResultType, Info: FNPT->getExtInfo());
3791
3792 const auto *FPT = Orig->castAs<FunctionProtoType>();
3793 return getFunctionType(ResultTy: ResultType, Args: FPT->getParamTypes(),
3794 EPI: FPT->getExtProtoInfo());
3795 });
3796}
3797
3798void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3799 QualType ResultType) {
3800 FD = FD->getMostRecentDecl();
3801 while (true) {
3802 FD->setType(adjustFunctionResultType(FunctionType: FD->getType(), ResultType));
3803 if (FunctionDecl *Next = FD->getPreviousDecl())
3804 FD = Next;
3805 else
3806 break;
3807 }
3808 if (ASTMutationListener *L = getASTMutationListener())
3809 L->DeducedReturnType(FD, ReturnType: ResultType);
3810}
3811
3812/// Get a function type and produce the equivalent function type with the
3813/// specified exception specification. Type sugar that can be present on a
3814/// declaration of a function with an exception specification is permitted
3815/// and preserved. Other type sugar (for instance, typedefs) is not.
3816QualType ASTContext::getFunctionTypeWithExceptionSpec(
3817 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3818 return adjustType(Orig, Adjust: [&](QualType Ty) {
3819 const auto *Proto = Ty->castAs<FunctionProtoType>();
3820 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->getParamTypes(),
3821 EPI: Proto->getExtProtoInfo().withExceptionSpec(ESI));
3822 });
3823}
3824
3825bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3826 QualType U) const {
3827 return hasSameType(T1: T, T2: U) ||
3828 (getLangOpts().CPlusPlus17 &&
3829 hasSameType(T1: getFunctionTypeWithExceptionSpec(Orig: T, ESI: EST_None),
3830 T2: getFunctionTypeWithExceptionSpec(Orig: U, ESI: EST_None)));
3831}
3832
3833QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3834 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3835 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3836 SmallVector<QualType, 16> Args(Proto->param_types().size());
3837 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3838 Args[i] = removePtrSizeAddrSpace(T: Proto->param_types()[i]);
3839 return getFunctionType(ResultTy: RetTy, Args, EPI: Proto->getExtProtoInfo());
3840 }
3841
3842 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3843 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3844 return getFunctionNoProtoType(ResultTy: RetTy, Info: Proto->getExtInfo());
3845 }
3846
3847 return T;
3848}
3849
3850bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3851 return hasSameType(T1: T, T2: U) ||
3852 hasSameType(T1: getFunctionTypeWithoutPtrSizes(T),
3853 T2: getFunctionTypeWithoutPtrSizes(T: U));
3854}
3855
3856QualType ASTContext::getFunctionTypeWithoutParamABIs(QualType T) const {
3857 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3858 FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
3859 EPI.ExtParameterInfos = nullptr;
3860 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->param_types(), EPI);
3861 }
3862 return T;
3863}
3864
3865bool ASTContext::hasSameFunctionTypeIgnoringParamABI(QualType T,
3866 QualType U) const {
3867 return hasSameType(T1: T, T2: U) || hasSameType(T1: getFunctionTypeWithoutParamABIs(T),
3868 T2: getFunctionTypeWithoutParamABIs(T: U));
3869}
3870
3871void ASTContext::adjustExceptionSpec(
3872 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3873 bool AsWritten) {
3874 // Update the type.
3875 QualType Updated =
3876 getFunctionTypeWithExceptionSpec(Orig: FD->getType(), ESI);
3877 FD->setType(Updated);
3878
3879 if (!AsWritten)
3880 return;
3881
3882 // Update the type in the type source information too.
3883 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3884 // If the type and the type-as-written differ, we may need to update
3885 // the type-as-written too.
3886 if (TSInfo->getType() != FD->getType())
3887 Updated = getFunctionTypeWithExceptionSpec(Orig: TSInfo->getType(), ESI);
3888
3889 // FIXME: When we get proper type location information for exceptions,
3890 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3891 // up the TypeSourceInfo;
3892 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3893 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3894 "TypeLoc size mismatch from updating exception specification");
3895 TSInfo->overrideType(T: Updated);
3896 }
3897}
3898
3899/// getComplexType - Return the uniqued reference to the type for a complex
3900/// number with the specified element type.
3901QualType ASTContext::getComplexType(QualType T) const {
3902 // Unique pointers, to guarantee there is only one pointer of a particular
3903 // structure.
3904 llvm::FoldingSetNodeID ID;
3905 ComplexType::Profile(ID, Element: T);
3906
3907 void *InsertPos = nullptr;
3908 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3909 return QualType(CT, 0);
3910
3911 // If the pointee type isn't canonical, this won't be a canonical type either,
3912 // so fill in the canonical type field.
3913 QualType Canonical;
3914 if (!T.isCanonical()) {
3915 Canonical = getComplexType(T: getCanonicalType(T));
3916
3917 // Get the new insert position for the node we care about.
3918 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3919 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3920 }
3921 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3922 Types.push_back(Elt: New);
3923 ComplexTypes.InsertNode(N: New, InsertPos);
3924 return QualType(New, 0);
3925}
3926
3927/// getPointerType - Return the uniqued reference to the type for a pointer to
3928/// the specified type.
3929QualType ASTContext::getPointerType(QualType T) const {
3930 // Unique pointers, to guarantee there is only one pointer of a particular
3931 // structure.
3932 llvm::FoldingSetNodeID ID;
3933 PointerType::Profile(ID, Pointee: T);
3934
3935 void *InsertPos = nullptr;
3936 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3937 return QualType(PT, 0);
3938
3939 // If the pointee type isn't canonical, this won't be a canonical type either,
3940 // so fill in the canonical type field.
3941 QualType Canonical;
3942 if (!T.isCanonical()) {
3943 Canonical = getPointerType(T: getCanonicalType(T));
3944
3945 // Get the new insert position for the node we care about.
3946 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3947 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3948 }
3949 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3950 Types.push_back(Elt: New);
3951 PointerTypes.InsertNode(N: New, InsertPos);
3952 return QualType(New, 0);
3953}
3954
3955QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3956 llvm::FoldingSetNodeID ID;
3957 AdjustedType::Profile(ID, Orig, New);
3958 void *InsertPos = nullptr;
3959 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3960 if (AT)
3961 return QualType(AT, 0);
3962
3963 QualType Canonical = getCanonicalType(T: New);
3964
3965 // Get the new insert position for the node we care about.
3966 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3967 assert(!AT && "Shouldn't be in the map!");
3968
3969 AT = new (*this, alignof(AdjustedType))
3970 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3971 Types.push_back(Elt: AT);
3972 AdjustedTypes.InsertNode(N: AT, InsertPos);
3973 return QualType(AT, 0);
3974}
3975
3976QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
3977 llvm::FoldingSetNodeID ID;
3978 AdjustedType::Profile(ID, Orig, New: Decayed);
3979 void *InsertPos = nullptr;
3980 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3981 if (AT)
3982 return QualType(AT, 0);
3983
3984 QualType Canonical = getCanonicalType(T: Decayed);
3985
3986 // Get the new insert position for the node we care about.
3987 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3988 assert(!AT && "Shouldn't be in the map!");
3989
3990 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3991 Types.push_back(Elt: AT);
3992 AdjustedTypes.InsertNode(N: AT, InsertPos);
3993 return QualType(AT, 0);
3994}
3995
3996QualType ASTContext::getDecayedType(QualType T) const {
3997 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3998
3999 QualType Decayed;
4000
4001 // C99 6.7.5.3p7:
4002 // A declaration of a parameter as "array of type" shall be
4003 // adjusted to "qualified pointer to type", where the type
4004 // qualifiers (if any) are those specified within the [ and ] of
4005 // the array type derivation.
4006 if (T->isArrayType())
4007 Decayed = getArrayDecayedType(T);
4008
4009 // C99 6.7.5.3p8:
4010 // A declaration of a parameter as "function returning type"
4011 // shall be adjusted to "pointer to function returning type", as
4012 // in 6.3.2.1.
4013 if (T->isFunctionType())
4014 Decayed = getPointerType(T);
4015
4016 return getDecayedType(Orig: T, Decayed);
4017}
4018
4019QualType ASTContext::getArrayParameterType(QualType Ty) const {
4020 if (Ty->isArrayParameterType())
4021 return Ty;
4022 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
4023 QualType DTy = Ty.getDesugaredType(Context: *this);
4024 const auto *ATy = cast<ConstantArrayType>(Val&: DTy);
4025 llvm::FoldingSetNodeID ID;
4026 ATy->Profile(ID, Ctx: *this, ET: ATy->getElementType(), ArraySize: ATy->getZExtSize(),
4027 SizeExpr: ATy->getSizeExpr(), SizeMod: ATy->getSizeModifier(),
4028 TypeQuals: ATy->getIndexTypeQualifiers().getAsOpaqueValue());
4029 void *InsertPos = nullptr;
4030 ArrayParameterType *AT =
4031 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
4032 if (AT)
4033 return QualType(AT, 0);
4034
4035 QualType Canonical;
4036 if (!DTy.isCanonical()) {
4037 Canonical = getArrayParameterType(Ty: getCanonicalType(T: Ty));
4038
4039 // Get the new insert position for the node we care about.
4040 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
4041 assert(!AT && "Shouldn't be in the map!");
4042 }
4043
4044 AT = new (*this, alignof(ArrayParameterType))
4045 ArrayParameterType(ATy, Canonical);
4046 Types.push_back(Elt: AT);
4047 ArrayParameterTypes.InsertNode(N: AT, InsertPos);
4048 return QualType(AT, 0);
4049}
4050
4051/// getBlockPointerType - Return the uniqued reference to the type for
4052/// a pointer to the specified block.
4053QualType ASTContext::getBlockPointerType(QualType T) const {
4054 assert(T->isFunctionType() && "block of function types only");
4055 // Unique pointers, to guarantee there is only one block of a particular
4056 // structure.
4057 llvm::FoldingSetNodeID ID;
4058 BlockPointerType::Profile(ID, Pointee: T);
4059
4060 void *InsertPos = nullptr;
4061 if (BlockPointerType *PT =
4062 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4063 return QualType(PT, 0);
4064
4065 // If the block pointee type isn't canonical, this won't be a canonical
4066 // type either so fill in the canonical type field.
4067 QualType Canonical;
4068 if (!T.isCanonical()) {
4069 Canonical = getBlockPointerType(T: getCanonicalType(T));
4070
4071 // Get the new insert position for the node we care about.
4072 BlockPointerType *NewIP =
4073 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4074 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4075 }
4076 auto *New =
4077 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
4078 Types.push_back(Elt: New);
4079 BlockPointerTypes.InsertNode(N: New, InsertPos);
4080 return QualType(New, 0);
4081}
4082
4083/// getLValueReferenceType - Return the uniqued reference to the type for an
4084/// lvalue reference to the specified type.
4085QualType
4086ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
4087 assert((!T->isPlaceholderType() ||
4088 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4089 "Unresolved placeholder type");
4090
4091 // Unique pointers, to guarantee there is only one pointer of a particular
4092 // structure.
4093 llvm::FoldingSetNodeID ID;
4094 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue);
4095
4096 void *InsertPos = nullptr;
4097 if (LValueReferenceType *RT =
4098 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4099 return QualType(RT, 0);
4100
4101 const auto *InnerRef = T->getAs<ReferenceType>();
4102
4103 // If the referencee type isn't canonical, this won't be a canonical type
4104 // either, so fill in the canonical type field.
4105 QualType Canonical;
4106 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
4107 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4108 Canonical = getLValueReferenceType(T: getCanonicalType(T: PointeeType));
4109
4110 // Get the new insert position for the node we care about.
4111 LValueReferenceType *NewIP =
4112 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4113 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4114 }
4115
4116 auto *New = new (*this, alignof(LValueReferenceType))
4117 LValueReferenceType(T, Canonical, SpelledAsLValue);
4118 Types.push_back(Elt: New);
4119 LValueReferenceTypes.InsertNode(N: New, InsertPos);
4120
4121 return QualType(New, 0);
4122}
4123
4124/// getRValueReferenceType - Return the uniqued reference to the type for an
4125/// rvalue reference to the specified type.
4126QualType ASTContext::getRValueReferenceType(QualType T) const {
4127 assert((!T->isPlaceholderType() ||
4128 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4129 "Unresolved placeholder type");
4130
4131 // Unique pointers, to guarantee there is only one pointer of a particular
4132 // structure.
4133 llvm::FoldingSetNodeID ID;
4134 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue: false);
4135
4136 void *InsertPos = nullptr;
4137 if (RValueReferenceType *RT =
4138 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4139 return QualType(RT, 0);
4140
4141 const auto *InnerRef = T->getAs<ReferenceType>();
4142
4143 // If the referencee type isn't canonical, this won't be a canonical type
4144 // either, so fill in the canonical type field.
4145 QualType Canonical;
4146 if (InnerRef || !T.isCanonical()) {
4147 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4148 Canonical = getRValueReferenceType(T: getCanonicalType(T: PointeeType));
4149
4150 // Get the new insert position for the node we care about.
4151 RValueReferenceType *NewIP =
4152 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4153 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4154 }
4155
4156 auto *New = new (*this, alignof(RValueReferenceType))
4157 RValueReferenceType(T, Canonical);
4158 Types.push_back(Elt: New);
4159 RValueReferenceTypes.InsertNode(N: New, InsertPos);
4160 return QualType(New, 0);
4161}
4162
4163QualType ASTContext::getMemberPointerType(QualType T,
4164 NestedNameSpecifier *Qualifier,
4165 const CXXRecordDecl *Cls) const {
4166 if (!Qualifier) {
4167 assert(Cls && "At least one of Qualifier or Cls must be provided");
4168 Qualifier = NestedNameSpecifier::Create(Context: *this, /*Prefix=*/nullptr,
4169 T: getTypeDeclType(Decl: Cls).getTypePtr());
4170 } else if (!Cls) {
4171 Cls = Qualifier->getAsRecordDecl();
4172 }
4173 // Unique pointers, to guarantee there is only one pointer of a particular
4174 // structure.
4175 llvm::FoldingSetNodeID ID;
4176 MemberPointerType::Profile(ID, Pointee: T, Qualifier, Cls);
4177
4178 void *InsertPos = nullptr;
4179 if (MemberPointerType *PT =
4180 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4181 return QualType(PT, 0);
4182
4183 NestedNameSpecifier *CanonicalQualifier = [&] {
4184 if (!Cls)
4185 return getCanonicalNestedNameSpecifier(NNS: Qualifier);
4186 NestedNameSpecifier *R = NestedNameSpecifier::Create(
4187 Context: *this, /*Prefix=*/nullptr, T: Cls->getCanonicalDecl()->getTypeForDecl());
4188 assert(R == getCanonicalNestedNameSpecifier(R));
4189 return R;
4190 }();
4191 // If the pointee or class type isn't canonical, this won't be a canonical
4192 // type either, so fill in the canonical type field.
4193 QualType Canonical;
4194 if (!T.isCanonical() || Qualifier != CanonicalQualifier) {
4195 Canonical =
4196 getMemberPointerType(T: getCanonicalType(T), Qualifier: CanonicalQualifier, Cls);
4197 assert(!cast<MemberPointerType>(Canonical)->isSugared());
4198 // Get the new insert position for the node we care about.
4199 [[maybe_unused]] MemberPointerType *NewIP =
4200 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4201 assert(!NewIP && "Shouldn't be in the map!");
4202 }
4203 auto *New = new (*this, alignof(MemberPointerType))
4204 MemberPointerType(T, Qualifier, Canonical);
4205 Types.push_back(Elt: New);
4206 MemberPointerTypes.InsertNode(N: New, InsertPos);
4207 return QualType(New, 0);
4208}
4209
4210/// getConstantArrayType - Return the unique reference to the type for an
4211/// array of the specified element type.
4212QualType ASTContext::getConstantArrayType(QualType EltTy,
4213 const llvm::APInt &ArySizeIn,
4214 const Expr *SizeExpr,
4215 ArraySizeModifier ASM,
4216 unsigned IndexTypeQuals) const {
4217 assert((EltTy->isDependentType() ||
4218 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
4219 "Constant array of VLAs is illegal!");
4220
4221 // We only need the size as part of the type if it's instantiation-dependent.
4222 if (SizeExpr && !SizeExpr->isInstantiationDependent())
4223 SizeExpr = nullptr;
4224
4225 // Convert the array size into a canonical width matching the pointer size for
4226 // the target.
4227 llvm::APInt ArySize(ArySizeIn);
4228 ArySize = ArySize.zextOrTrunc(width: Target->getMaxPointerWidth());
4229
4230 llvm::FoldingSetNodeID ID;
4231 ConstantArrayType::Profile(ID, Ctx: *this, ET: EltTy, ArraySize: ArySize.getZExtValue(), SizeExpr,
4232 SizeMod: ASM, TypeQuals: IndexTypeQuals);
4233
4234 void *InsertPos = nullptr;
4235 if (ConstantArrayType *ATP =
4236 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
4237 return QualType(ATP, 0);
4238
4239 // If the element type isn't canonical or has qualifiers, or the array bound
4240 // is instantiation-dependent, this won't be a canonical type either, so fill
4241 // in the canonical type field.
4242 QualType Canon;
4243 // FIXME: Check below should look for qualifiers behind sugar.
4244 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
4245 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4246 Canon = getConstantArrayType(EltTy: QualType(canonSplit.Ty, 0), ArySizeIn: ArySize, SizeExpr: nullptr,
4247 ASM, IndexTypeQuals);
4248 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4249
4250 // Get the new insert position for the node we care about.
4251 ConstantArrayType *NewIP =
4252 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
4253 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4254 }
4255
4256 auto *New = ConstantArrayType::Create(Ctx: *this, ET: EltTy, Can: Canon, Sz: ArySize, SzExpr: SizeExpr,
4257 SzMod: ASM, Qual: IndexTypeQuals);
4258 ConstantArrayTypes.InsertNode(N: New, InsertPos);
4259 Types.push_back(Elt: New);
4260 return QualType(New, 0);
4261}
4262
4263/// getVariableArrayDecayedType - Turns the given type, which may be
4264/// variably-modified, into the corresponding type with all the known
4265/// sizes replaced with [*].
4266QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
4267 // Vastly most common case.
4268 if (!type->isVariablyModifiedType()) return type;
4269
4270 QualType result;
4271
4272 SplitQualType split = type.getSplitDesugaredType();
4273 const Type *ty = split.Ty;
4274 switch (ty->getTypeClass()) {
4275#define TYPE(Class, Base)
4276#define ABSTRACT_TYPE(Class, Base)
4277#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4278#include "clang/AST/TypeNodes.inc"
4279 llvm_unreachable("didn't desugar past all non-canonical types?");
4280
4281 // These types should never be variably-modified.
4282 case Type::Builtin:
4283 case Type::Complex:
4284 case Type::Vector:
4285 case Type::DependentVector:
4286 case Type::ExtVector:
4287 case Type::DependentSizedExtVector:
4288 case Type::ConstantMatrix:
4289 case Type::DependentSizedMatrix:
4290 case Type::DependentAddressSpace:
4291 case Type::ObjCObject:
4292 case Type::ObjCInterface:
4293 case Type::ObjCObjectPointer:
4294 case Type::Record:
4295 case Type::Enum:
4296 case Type::UnresolvedUsing:
4297 case Type::TypeOfExpr:
4298 case Type::TypeOf:
4299 case Type::Decltype:
4300 case Type::UnaryTransform:
4301 case Type::DependentName:
4302 case Type::InjectedClassName:
4303 case Type::TemplateSpecialization:
4304 case Type::DependentTemplateSpecialization:
4305 case Type::TemplateTypeParm:
4306 case Type::SubstTemplateTypeParmPack:
4307 case Type::Auto:
4308 case Type::DeducedTemplateSpecialization:
4309 case Type::PackExpansion:
4310 case Type::PackIndexing:
4311 case Type::BitInt:
4312 case Type::DependentBitInt:
4313 case Type::ArrayParameter:
4314 case Type::HLSLAttributedResource:
4315 case Type::HLSLInlineSpirv:
4316 llvm_unreachable("type should never be variably-modified");
4317
4318 // These types can be variably-modified but should never need to
4319 // further decay.
4320 case Type::FunctionNoProto:
4321 case Type::FunctionProto:
4322 case Type::BlockPointer:
4323 case Type::MemberPointer:
4324 case Type::Pipe:
4325 return type;
4326
4327 // These types can be variably-modified. All these modifications
4328 // preserve structure except as noted by comments.
4329 // TODO: if we ever care about optimizing VLAs, there are no-op
4330 // optimizations available here.
4331 case Type::Pointer:
4332 result = getPointerType(T: getVariableArrayDecayedType(
4333 type: cast<PointerType>(Val: ty)->getPointeeType()));
4334 break;
4335
4336 case Type::LValueReference: {
4337 const auto *lv = cast<LValueReferenceType>(Val: ty);
4338 result = getLValueReferenceType(
4339 T: getVariableArrayDecayedType(type: lv->getPointeeType()),
4340 SpelledAsLValue: lv->isSpelledAsLValue());
4341 break;
4342 }
4343
4344 case Type::RValueReference: {
4345 const auto *lv = cast<RValueReferenceType>(Val: ty);
4346 result = getRValueReferenceType(
4347 T: getVariableArrayDecayedType(type: lv->getPointeeType()));
4348 break;
4349 }
4350
4351 case Type::Atomic: {
4352 const auto *at = cast<AtomicType>(Val: ty);
4353 result = getAtomicType(T: getVariableArrayDecayedType(type: at->getValueType()));
4354 break;
4355 }
4356
4357 case Type::ConstantArray: {
4358 const auto *cat = cast<ConstantArrayType>(Val: ty);
4359 result = getConstantArrayType(
4360 EltTy: getVariableArrayDecayedType(type: cat->getElementType()),
4361 ArySizeIn: cat->getSize(),
4362 SizeExpr: cat->getSizeExpr(),
4363 ASM: cat->getSizeModifier(),
4364 IndexTypeQuals: cat->getIndexTypeCVRQualifiers());
4365 break;
4366 }
4367
4368 case Type::DependentSizedArray: {
4369 const auto *dat = cast<DependentSizedArrayType>(Val: ty);
4370 result = getDependentSizedArrayType(
4371 EltTy: getVariableArrayDecayedType(type: dat->getElementType()), NumElts: dat->getSizeExpr(),
4372 ASM: dat->getSizeModifier(), IndexTypeQuals: dat->getIndexTypeCVRQualifiers());
4373 break;
4374 }
4375
4376 // Turn incomplete types into [*] types.
4377 case Type::IncompleteArray: {
4378 const auto *iat = cast<IncompleteArrayType>(Val: ty);
4379 result =
4380 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: iat->getElementType()),
4381 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Normal,
4382 IndexTypeQuals: iat->getIndexTypeCVRQualifiers());
4383 break;
4384 }
4385
4386 // Turn VLA types into [*] types.
4387 case Type::VariableArray: {
4388 const auto *vat = cast<VariableArrayType>(Val: ty);
4389 result =
4390 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: vat->getElementType()),
4391 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Star,
4392 IndexTypeQuals: vat->getIndexTypeCVRQualifiers());
4393 break;
4394 }
4395 }
4396
4397 // Apply the top-level qualifiers from the original.
4398 return getQualifiedType(T: result, Qs: split.Quals);
4399}
4400
4401/// getVariableArrayType - Returns a non-unique reference to the type for a
4402/// variable array of the specified element type.
4403QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
4404 ArraySizeModifier ASM,
4405 unsigned IndexTypeQuals) const {
4406 // Since we don't unique expressions, it isn't possible to unique VLA's
4407 // that have an expression provided for their size.
4408 QualType Canon;
4409
4410 // Be sure to pull qualifiers off the element type.
4411 // FIXME: Check below should look for qualifiers behind sugar.
4412 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4413 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4414 Canon = getVariableArrayType(EltTy: QualType(canonSplit.Ty, 0), NumElts, ASM,
4415 IndexTypeQuals);
4416 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4417 }
4418
4419 auto *New = new (*this, alignof(VariableArrayType))
4420 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals);
4421
4422 VariableArrayTypes.push_back(x: New);
4423 Types.push_back(Elt: New);
4424 return QualType(New, 0);
4425}
4426
4427/// getDependentSizedArrayType - Returns a non-unique reference to
4428/// the type for a dependently-sized array of the specified element
4429/// type.
4430QualType
4431ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements,
4432 ArraySizeModifier ASM,
4433 unsigned elementTypeQuals) const {
4434 assert((!numElements || numElements->isTypeDependent() ||
4435 numElements->isValueDependent()) &&
4436 "Size must be type- or value-dependent!");
4437
4438 SplitQualType canonElementType = getCanonicalType(T: elementType).split();
4439
4440 void *insertPos = nullptr;
4441 llvm::FoldingSetNodeID ID;
4442 DependentSizedArrayType::Profile(
4443 ID, Context: *this, ET: numElements ? QualType(canonElementType.Ty, 0) : elementType,
4444 SizeMod: ASM, TypeQuals: elementTypeQuals, E: numElements);
4445
4446 // Look for an existing type with these properties.
4447 DependentSizedArrayType *canonTy =
4448 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4449
4450 // Dependently-sized array types that do not have a specified number
4451 // of elements will have their sizes deduced from a dependent
4452 // initializer.
4453 if (!numElements) {
4454 if (canonTy)
4455 return QualType(canonTy, 0);
4456
4457 auto *newType = new (*this, alignof(DependentSizedArrayType))
4458 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4459 elementTypeQuals);
4460 DependentSizedArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4461 Types.push_back(Elt: newType);
4462 return QualType(newType, 0);
4463 }
4464
4465 // If we don't have one, build one.
4466 if (!canonTy) {
4467 canonTy = new (*this, alignof(DependentSizedArrayType))
4468 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4469 numElements, ASM, elementTypeQuals);
4470 DependentSizedArrayTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4471 Types.push_back(Elt: canonTy);
4472 }
4473
4474 // Apply qualifiers from the element type to the array.
4475 QualType canon = getQualifiedType(T: QualType(canonTy,0),
4476 Qs: canonElementType.Quals);
4477
4478 // If we didn't need extra canonicalization for the element type or the size
4479 // expression, then just use that as our result.
4480 if (QualType(canonElementType.Ty, 0) == elementType &&
4481 canonTy->getSizeExpr() == numElements)
4482 return canon;
4483
4484 // Otherwise, we need to build a type which follows the spelling
4485 // of the element type.
4486 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4487 DependentSizedArrayType(elementType, canon, numElements, ASM,
4488 elementTypeQuals);
4489 Types.push_back(Elt: sugaredType);
4490 return QualType(sugaredType, 0);
4491}
4492
4493QualType ASTContext::getIncompleteArrayType(QualType elementType,
4494 ArraySizeModifier ASM,
4495 unsigned elementTypeQuals) const {
4496 llvm::FoldingSetNodeID ID;
4497 IncompleteArrayType::Profile(ID, ET: elementType, SizeMod: ASM, TypeQuals: elementTypeQuals);
4498
4499 void *insertPos = nullptr;
4500 if (IncompleteArrayType *iat =
4501 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos))
4502 return QualType(iat, 0);
4503
4504 // If the element type isn't canonical, this won't be a canonical type
4505 // either, so fill in the canonical type field. We also have to pull
4506 // qualifiers off the element type.
4507 QualType canon;
4508
4509 // FIXME: Check below should look for qualifiers behind sugar.
4510 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4511 SplitQualType canonSplit = getCanonicalType(T: elementType).split();
4512 canon = getIncompleteArrayType(elementType: QualType(canonSplit.Ty, 0),
4513 ASM, elementTypeQuals);
4514 canon = getQualifiedType(T: canon, Qs: canonSplit.Quals);
4515
4516 // Get the new insert position for the node we care about.
4517 IncompleteArrayType *existing =
4518 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4519 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4520 }
4521
4522 auto *newType = new (*this, alignof(IncompleteArrayType))
4523 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4524
4525 IncompleteArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4526 Types.push_back(Elt: newType);
4527 return QualType(newType, 0);
4528}
4529
4530ASTContext::BuiltinVectorTypeInfo
4531ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
4532#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4533 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4534 NUMVECTORS};
4535
4536#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4537 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4538
4539 switch (Ty->getKind()) {
4540 default:
4541 llvm_unreachable("Unsupported builtin vector type");
4542
4543#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4544 ElBits, NF, IsSigned) \
4545 case BuiltinType::Id: \
4546 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4547 llvm::ElementCount::getScalable(NumEls), NF};
4548#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4549 ElBits, NF) \
4550 case BuiltinType::Id: \
4551 return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \
4552 llvm::ElementCount::getScalable(NumEls), NF};
4553#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4554 ElBits, NF) \
4555 case BuiltinType::Id: \
4556 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4557#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4558 ElBits, NF) \
4559 case BuiltinType::Id: \
4560 return {MFloat8Ty, llvm::ElementCount::getScalable(NumEls), NF};
4561#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4562 case BuiltinType::Id: \
4563 return {BoolTy, llvm::ElementCount::getScalable(NumEls), NF};
4564#include "clang/Basic/AArch64ACLETypes.def"
4565
4566#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4567 IsSigned) \
4568 case BuiltinType::Id: \
4569 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4570 llvm::ElementCount::getScalable(NumEls), NF};
4571#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4572 case BuiltinType::Id: \
4573 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4574 llvm::ElementCount::getScalable(NumEls), NF};
4575#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4576 case BuiltinType::Id: \
4577 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4578#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4579 case BuiltinType::Id: \
4580 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4581#include "clang/Basic/RISCVVTypes.def"
4582 }
4583}
4584
4585/// getExternrefType - Return a WebAssembly externref type, which represents an
4586/// opaque reference to a host value.
4587QualType ASTContext::getWebAssemblyExternrefType() const {
4588 if (Target->getTriple().isWasm() && Target->hasFeature(Feature: "reference-types")) {
4589#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4590 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4591 return SingletonId;
4592#include "clang/Basic/WebAssemblyReferenceTypes.def"
4593 }
4594 llvm_unreachable(
4595 "shouldn't try to generate type externref outside WebAssembly target");
4596}
4597
4598/// getScalableVectorType - Return the unique reference to a scalable vector
4599/// type of the specified element type and size. VectorType must be a built-in
4600/// type.
4601QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
4602 unsigned NumFields) const {
4603 if (Target->hasAArch64ACLETypes()) {
4604 uint64_t EltTySize = getTypeSize(T: EltTy);
4605
4606#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4607 ElBits, NF, IsSigned) \
4608 if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \
4609 EltTy->hasSignedIntegerRepresentation() == IsSigned && \
4610 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4611 return SingletonId; \
4612 }
4613#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4614 ElBits, NF) \
4615 if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4616 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4617 return SingletonId; \
4618 }
4619#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4620 ElBits, NF) \
4621 if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4622 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4623 return SingletonId; \
4624 }
4625#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4626 ElBits, NF) \
4627 if (EltTy->isMFloat8Type() && EltTySize == ElBits && \
4628 NumElts == (NumEls * NF) && NumFields == 1) { \
4629 return SingletonId; \
4630 }
4631#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4632 if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \
4633 return SingletonId;
4634#include "clang/Basic/AArch64ACLETypes.def"
4635 } else if (Target->hasRISCVVTypes()) {
4636 uint64_t EltTySize = getTypeSize(T: EltTy);
4637#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4638 IsFP, IsBF) \
4639 if (!EltTy->isBooleanType() && \
4640 ((EltTy->hasIntegerRepresentation() && \
4641 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4642 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4643 IsFP && !IsBF) || \
4644 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4645 IsBF && !IsFP)) && \
4646 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4647 return SingletonId;
4648#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4649 if (EltTy->isBooleanType() && NumElts == NumEls) \
4650 return SingletonId;
4651#include "clang/Basic/RISCVVTypes.def"
4652 }
4653 return QualType();
4654}
4655
4656/// getVectorType - Return the unique reference to a vector type of
4657/// the specified element type and size. VectorType must be a built-in type.
4658QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4659 VectorKind VecKind) const {
4660 assert(vecType->isBuiltinType() ||
4661 (vecType->isBitIntType() &&
4662 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4663 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4664
4665 // Check if we've already instantiated a vector of this type.
4666 llvm::FoldingSetNodeID ID;
4667 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::Vector, VecKind);
4668
4669 void *InsertPos = nullptr;
4670 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4671 return QualType(VTP, 0);
4672
4673 // If the element type isn't canonical, this won't be a canonical type either,
4674 // so fill in the canonical type field.
4675 QualType Canonical;
4676 if (!vecType.isCanonical()) {
4677 Canonical = getVectorType(vecType: getCanonicalType(T: vecType), NumElts, VecKind);
4678
4679 // Get the new insert position for the node we care about.
4680 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4681 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4682 }
4683 auto *New = new (*this, alignof(VectorType))
4684 VectorType(vecType, NumElts, Canonical, VecKind);
4685 VectorTypes.InsertNode(N: New, InsertPos);
4686 Types.push_back(Elt: New);
4687 return QualType(New, 0);
4688}
4689
4690QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
4691 SourceLocation AttrLoc,
4692 VectorKind VecKind) const {
4693 llvm::FoldingSetNodeID ID;
4694 DependentVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: VecType), SizeExpr,
4695 VecKind);
4696 void *InsertPos = nullptr;
4697 DependentVectorType *Canon =
4698 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4699 DependentVectorType *New;
4700
4701 if (Canon) {
4702 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4703 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4704 } else {
4705 QualType CanonVecTy = getCanonicalType(T: VecType);
4706 if (CanonVecTy == VecType) {
4707 New = new (*this, alignof(DependentVectorType))
4708 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4709
4710 DependentVectorType *CanonCheck =
4711 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4712 assert(!CanonCheck &&
4713 "Dependent-sized vector_size canonical type broken");
4714 (void)CanonCheck;
4715 DependentVectorTypes.InsertNode(N: New, InsertPos);
4716 } else {
4717 QualType CanonTy = getDependentVectorType(VecType: CanonVecTy, SizeExpr,
4718 AttrLoc: SourceLocation(), VecKind);
4719 New = new (*this, alignof(DependentVectorType))
4720 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4721 }
4722 }
4723
4724 Types.push_back(Elt: New);
4725 return QualType(New, 0);
4726}
4727
4728/// getExtVectorType - Return the unique reference to an extended vector type of
4729/// the specified element type and size. VectorType must be a built-in type.
4730QualType ASTContext::getExtVectorType(QualType vecType,
4731 unsigned NumElts) const {
4732 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4733 (vecType->isBitIntType() &&
4734 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4735 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4736
4737 // Check if we've already instantiated a vector of this type.
4738 llvm::FoldingSetNodeID ID;
4739 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::ExtVector,
4740 VecKind: VectorKind::Generic);
4741 void *InsertPos = nullptr;
4742 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4743 return QualType(VTP, 0);
4744
4745 // If the element type isn't canonical, this won't be a canonical type either,
4746 // so fill in the canonical type field.
4747 QualType Canonical;
4748 if (!vecType.isCanonical()) {
4749 Canonical = getExtVectorType(vecType: getCanonicalType(T: vecType), NumElts);
4750
4751 // Get the new insert position for the node we care about.
4752 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4753 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4754 }
4755 auto *New = new (*this, alignof(ExtVectorType))
4756 ExtVectorType(vecType, NumElts, Canonical);
4757 VectorTypes.InsertNode(N: New, InsertPos);
4758 Types.push_back(Elt: New);
4759 return QualType(New, 0);
4760}
4761
4762QualType
4763ASTContext::getDependentSizedExtVectorType(QualType vecType,
4764 Expr *SizeExpr,
4765 SourceLocation AttrLoc) const {
4766 llvm::FoldingSetNodeID ID;
4767 DependentSizedExtVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: vecType),
4768 SizeExpr);
4769
4770 void *InsertPos = nullptr;
4771 DependentSizedExtVectorType *Canon
4772 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4773 DependentSizedExtVectorType *New;
4774 if (Canon) {
4775 // We already have a canonical version of this array type; use it as
4776 // the canonical type for a newly-built type.
4777 New = new (*this, alignof(DependentSizedExtVectorType))
4778 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4779 AttrLoc);
4780 } else {
4781 QualType CanonVecTy = getCanonicalType(T: vecType);
4782 if (CanonVecTy == vecType) {
4783 New = new (*this, alignof(DependentSizedExtVectorType))
4784 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4785
4786 DependentSizedExtVectorType *CanonCheck
4787 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4788 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4789 (void)CanonCheck;
4790 DependentSizedExtVectorTypes.InsertNode(N: New, InsertPos);
4791 } else {
4792 QualType CanonExtTy = getDependentSizedExtVectorType(vecType: CanonVecTy, SizeExpr,
4793 AttrLoc: SourceLocation());
4794 New = new (*this, alignof(DependentSizedExtVectorType))
4795 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4796 }
4797 }
4798
4799 Types.push_back(Elt: New);
4800 return QualType(New, 0);
4801}
4802
4803QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4804 unsigned NumColumns) const {
4805 llvm::FoldingSetNodeID ID;
4806 ConstantMatrixType::Profile(ID, ElementType: ElementTy, NumRows, NumColumns,
4807 TypeClass: Type::ConstantMatrix);
4808
4809 assert(MatrixType::isValidElementType(ElementTy) &&
4810 "need a valid element type");
4811 assert(ConstantMatrixType::isDimensionValid(NumRows) &&
4812 ConstantMatrixType::isDimensionValid(NumColumns) &&
4813 "need valid matrix dimensions");
4814 void *InsertPos = nullptr;
4815 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4816 return QualType(MTP, 0);
4817
4818 QualType Canonical;
4819 if (!ElementTy.isCanonical()) {
4820 Canonical =
4821 getConstantMatrixType(ElementTy: getCanonicalType(T: ElementTy), NumRows, NumColumns);
4822
4823 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4824 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4825 (void)NewIP;
4826 }
4827
4828 auto *New = new (*this, alignof(ConstantMatrixType))
4829 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4830 MatrixTypes.InsertNode(N: New, InsertPos);
4831 Types.push_back(Elt: New);
4832 return QualType(New, 0);
4833}
4834
4835QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4836 Expr *RowExpr,
4837 Expr *ColumnExpr,
4838 SourceLocation AttrLoc) const {
4839 QualType CanonElementTy = getCanonicalType(T: ElementTy);
4840 llvm::FoldingSetNodeID ID;
4841 DependentSizedMatrixType::Profile(ID, Context: *this, ElementType: CanonElementTy, RowExpr,
4842 ColumnExpr);
4843
4844 void *InsertPos = nullptr;
4845 DependentSizedMatrixType *Canon =
4846 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4847
4848 if (!Canon) {
4849 Canon = new (*this, alignof(DependentSizedMatrixType))
4850 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4851 ColumnExpr, AttrLoc);
4852#ifndef NDEBUG
4853 DependentSizedMatrixType *CanonCheck =
4854 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4855 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4856#endif
4857 DependentSizedMatrixTypes.InsertNode(N: Canon, InsertPos);
4858 Types.push_back(Elt: Canon);
4859 }
4860
4861 // Already have a canonical version of the matrix type
4862 //
4863 // If it exactly matches the requested type, use it directly.
4864 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4865 Canon->getRowExpr() == ColumnExpr)
4866 return QualType(Canon, 0);
4867
4868 // Use Canon as the canonical type for newly-built type.
4869 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4870 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4871 ColumnExpr, AttrLoc);
4872 Types.push_back(Elt: New);
4873 return QualType(New, 0);
4874}
4875
4876QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4877 Expr *AddrSpaceExpr,
4878 SourceLocation AttrLoc) const {
4879 assert(AddrSpaceExpr->isInstantiationDependent());
4880
4881 QualType canonPointeeType = getCanonicalType(T: PointeeType);
4882
4883 void *insertPos = nullptr;
4884 llvm::FoldingSetNodeID ID;
4885 DependentAddressSpaceType::Profile(ID, Context: *this, PointeeType: canonPointeeType,
4886 AddrSpaceExpr);
4887
4888 DependentAddressSpaceType *canonTy =
4889 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4890
4891 if (!canonTy) {
4892 canonTy = new (*this, alignof(DependentAddressSpaceType))
4893 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4894 AttrLoc);
4895 DependentAddressSpaceTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4896 Types.push_back(Elt: canonTy);
4897 }
4898
4899 if (canonPointeeType == PointeeType &&
4900 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4901 return QualType(canonTy, 0);
4902
4903 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4904 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4905 AddrSpaceExpr, AttrLoc);
4906 Types.push_back(Elt: sugaredType);
4907 return QualType(sugaredType, 0);
4908}
4909
4910/// Determine whether \p T is canonical as the result type of a function.
4911static bool isCanonicalResultType(QualType T) {
4912 return T.isCanonical() &&
4913 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4914 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4915}
4916
4917/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4918QualType
4919ASTContext::getFunctionNoProtoType(QualType ResultTy,
4920 const FunctionType::ExtInfo &Info) const {
4921 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4922 // functionality creates a function without a prototype regardless of
4923 // language mode (so it makes them even in C++). Once the rewriter has been
4924 // fixed, this assertion can be enabled again.
4925 //assert(!LangOpts.requiresStrictPrototypes() &&
4926 // "strict prototypes are disabled");
4927
4928 // Unique functions, to guarantee there is only one function of a particular
4929 // structure.
4930 llvm::FoldingSetNodeID ID;
4931 FunctionNoProtoType::Profile(ID, ResultType: ResultTy, Info);
4932
4933 void *InsertPos = nullptr;
4934 if (FunctionNoProtoType *FT =
4935 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4936 return QualType(FT, 0);
4937
4938 QualType Canonical;
4939 if (!isCanonicalResultType(T: ResultTy)) {
4940 Canonical =
4941 getFunctionNoProtoType(ResultTy: getCanonicalFunctionResultType(ResultType: ResultTy), Info);
4942
4943 // Get the new insert position for the node we care about.
4944 FunctionNoProtoType *NewIP =
4945 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4946 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4947 }
4948
4949 auto *New = new (*this, alignof(FunctionNoProtoType))
4950 FunctionNoProtoType(ResultTy, Canonical, Info);
4951 Types.push_back(Elt: New);
4952 FunctionNoProtoTypes.InsertNode(N: New, InsertPos);
4953 return QualType(New, 0);
4954}
4955
4956CanQualType
4957ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4958 CanQualType CanResultType = getCanonicalType(T: ResultType);
4959
4960 // Canonical result types do not have ARC lifetime qualifiers.
4961 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4962 Qualifiers Qs = CanResultType.getQualifiers();
4963 Qs.removeObjCLifetime();
4964 return CanQualType::CreateUnsafe(
4965 Other: getQualifiedType(T: CanResultType.getUnqualifiedType(), Qs));
4966 }
4967
4968 return CanResultType;
4969}
4970
4971static bool isCanonicalExceptionSpecification(
4972 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4973 if (ESI.Type == EST_None)
4974 return true;
4975 if (!NoexceptInType)
4976 return false;
4977
4978 // C++17 onwards: exception specification is part of the type, as a simple
4979 // boolean "can this function type throw".
4980 if (ESI.Type == EST_BasicNoexcept)
4981 return true;
4982
4983 // A noexcept(expr) specification is (possibly) canonical if expr is
4984 // value-dependent.
4985 if (ESI.Type == EST_DependentNoexcept)
4986 return true;
4987
4988 // A dynamic exception specification is canonical if it only contains pack
4989 // expansions (so we can't tell whether it's non-throwing) and all its
4990 // contained types are canonical.
4991 if (ESI.Type == EST_Dynamic) {
4992 bool AnyPackExpansions = false;
4993 for (QualType ET : ESI.Exceptions) {
4994 if (!ET.isCanonical())
4995 return false;
4996 if (ET->getAs<PackExpansionType>())
4997 AnyPackExpansions = true;
4998 }
4999 return AnyPackExpansions;
5000 }
5001
5002 return false;
5003}
5004
5005QualType ASTContext::getFunctionTypeInternal(
5006 QualType ResultTy, ArrayRef<QualType> ArgArray,
5007 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
5008 size_t NumArgs = ArgArray.size();
5009
5010 // Unique functions, to guarantee there is only one function of a particular
5011 // structure.
5012 llvm::FoldingSetNodeID ID;
5013 FunctionProtoType::Profile(ID, Result: ResultTy, ArgTys: ArgArray.begin(), NumArgs, EPI,
5014 Context: *this, Canonical: true);
5015
5016 QualType Canonical;
5017 bool Unique = false;
5018
5019 void *InsertPos = nullptr;
5020 if (FunctionProtoType *FPT =
5021 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
5022 QualType Existing = QualType(FPT, 0);
5023
5024 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
5025 // it so long as our exception specification doesn't contain a dependent
5026 // noexcept expression, or we're just looking for a canonical type.
5027 // Otherwise, we're going to need to create a type
5028 // sugar node to hold the concrete expression.
5029 if (OnlyWantCanonical || !isComputedNoexcept(ESpecType: EPI.ExceptionSpec.Type) ||
5030 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
5031 return Existing;
5032
5033 // We need a new type sugar node for this one, to hold the new noexcept
5034 // expression. We do no canonicalization here, but that's OK since we don't
5035 // expect to see the same noexcept expression much more than once.
5036 Canonical = getCanonicalType(T: Existing);
5037 Unique = true;
5038 }
5039
5040 bool NoexceptInType = getLangOpts().CPlusPlus17;
5041 bool IsCanonicalExceptionSpec =
5042 isCanonicalExceptionSpecification(ESI: EPI.ExceptionSpec, NoexceptInType);
5043
5044 // Determine whether the type being created is already canonical or not.
5045 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
5046 isCanonicalResultType(T: ResultTy) && !EPI.HasTrailingReturn;
5047 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
5048 if (!ArgArray[i].isCanonicalAsParam())
5049 isCanonical = false;
5050
5051 if (OnlyWantCanonical)
5052 assert(isCanonical &&
5053 "given non-canonical parameters constructing canonical type");
5054
5055 // If this type isn't canonical, get the canonical version of it if we don't
5056 // already have it. The exception spec is only partially part of the
5057 // canonical type, and only in C++17 onwards.
5058 if (!isCanonical && Canonical.isNull()) {
5059 SmallVector<QualType, 16> CanonicalArgs;
5060 CanonicalArgs.reserve(N: NumArgs);
5061 for (unsigned i = 0; i != NumArgs; ++i)
5062 CanonicalArgs.push_back(Elt: getCanonicalParamType(T: ArgArray[i]));
5063
5064 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
5065 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
5066 CanonicalEPI.HasTrailingReturn = false;
5067
5068 if (IsCanonicalExceptionSpec) {
5069 // Exception spec is already OK.
5070 } else if (NoexceptInType) {
5071 switch (EPI.ExceptionSpec.Type) {
5072 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
5073 // We don't know yet. It shouldn't matter what we pick here; no-one
5074 // should ever look at this.
5075 [[fallthrough]];
5076 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
5077 CanonicalEPI.ExceptionSpec.Type = EST_None;
5078 break;
5079
5080 // A dynamic exception specification is almost always "not noexcept",
5081 // with the exception that a pack expansion might expand to no types.
5082 case EST_Dynamic: {
5083 bool AnyPacks = false;
5084 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
5085 if (ET->getAs<PackExpansionType>())
5086 AnyPacks = true;
5087 ExceptionTypeStorage.push_back(Elt: getCanonicalType(T: ET));
5088 }
5089 if (!AnyPacks)
5090 CanonicalEPI.ExceptionSpec.Type = EST_None;
5091 else {
5092 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
5093 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
5094 }
5095 break;
5096 }
5097
5098 case EST_DynamicNone:
5099 case EST_BasicNoexcept:
5100 case EST_NoexceptTrue:
5101 case EST_NoThrow:
5102 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
5103 break;
5104
5105 case EST_DependentNoexcept:
5106 llvm_unreachable("dependent noexcept is already canonical");
5107 }
5108 } else {
5109 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
5110 }
5111
5112 // Adjust the canonical function result type.
5113 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultType: ResultTy);
5114 Canonical =
5115 getFunctionTypeInternal(ResultTy: CanResultTy, ArgArray: CanonicalArgs, EPI: CanonicalEPI, OnlyWantCanonical: true);
5116
5117 // Get the new insert position for the node we care about.
5118 FunctionProtoType *NewIP =
5119 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
5120 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5121 }
5122
5123 // Compute the needed size to hold this FunctionProtoType and the
5124 // various trailing objects.
5125 auto ESH = FunctionProtoType::getExceptionSpecSize(
5126 EST: EPI.ExceptionSpec.Type, NumExceptions: EPI.ExceptionSpec.Exceptions.size());
5127 size_t Size = FunctionProtoType::totalSizeToAlloc<
5128 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
5129 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
5130 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
5131 FunctionEffect, EffectConditionExpr>(
5132 Counts: NumArgs, Counts: EPI.Variadic, Counts: EPI.requiresFunctionProtoTypeExtraBitfields(),
5133 Counts: EPI.requiresFunctionProtoTypeArmAttributes(), Counts: ESH.NumExceptionType,
5134 Counts: ESH.NumExprPtr, Counts: ESH.NumFunctionDeclPtr,
5135 Counts: EPI.ExtParameterInfos ? NumArgs : 0,
5136 Counts: EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, Counts: EPI.FunctionEffects.size(),
5137 Counts: EPI.FunctionEffects.conditions().size());
5138
5139 auto *FTP = (FunctionProtoType *)Allocate(Size, Align: alignof(FunctionProtoType));
5140 FunctionProtoType::ExtProtoInfo newEPI = EPI;
5141 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
5142 Types.push_back(Elt: FTP);
5143 if (!Unique)
5144 FunctionProtoTypes.InsertNode(N: FTP, InsertPos);
5145 if (!EPI.FunctionEffects.empty())
5146 AnyFunctionEffects = true;
5147 return QualType(FTP, 0);
5148}
5149
5150QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
5151 llvm::FoldingSetNodeID ID;
5152 PipeType::Profile(ID, T, isRead: ReadOnly);
5153
5154 void *InsertPos = nullptr;
5155 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
5156 return QualType(PT, 0);
5157
5158 // If the pipe element type isn't canonical, this won't be a canonical type
5159 // either, so fill in the canonical type field.
5160 QualType Canonical;
5161 if (!T.isCanonical()) {
5162 Canonical = getPipeType(T: getCanonicalType(T), ReadOnly);
5163
5164 // Get the new insert position for the node we care about.
5165 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
5166 assert(!NewIP && "Shouldn't be in the map!");
5167 (void)NewIP;
5168 }
5169 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
5170 Types.push_back(Elt: New);
5171 PipeTypes.InsertNode(N: New, InsertPos);
5172 return QualType(New, 0);
5173}
5174
5175QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
5176 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
5177 return LangOpts.OpenCL ? getAddrSpaceQualType(T: Ty, AddressSpace: LangAS::opencl_constant)
5178 : Ty;
5179}
5180
5181QualType ASTContext::getReadPipeType(QualType T) const {
5182 return getPipeType(T, ReadOnly: true);
5183}
5184
5185QualType ASTContext::getWritePipeType(QualType T) const {
5186 return getPipeType(T, ReadOnly: false);
5187}
5188
5189QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
5190 llvm::FoldingSetNodeID ID;
5191 BitIntType::Profile(ID, IsUnsigned, NumBits);
5192
5193 void *InsertPos = nullptr;
5194 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5195 return QualType(EIT, 0);
5196
5197 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
5198 BitIntTypes.InsertNode(N: New, InsertPos);
5199 Types.push_back(Elt: New);
5200 return QualType(New, 0);
5201}
5202
5203QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
5204 Expr *NumBitsExpr) const {
5205 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
5206 llvm::FoldingSetNodeID ID;
5207 DependentBitIntType::Profile(ID, Context: *this, IsUnsigned, NumBitsExpr);
5208
5209 void *InsertPos = nullptr;
5210 if (DependentBitIntType *Existing =
5211 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5212 return QualType(Existing, 0);
5213
5214 auto *New = new (*this, alignof(DependentBitIntType))
5215 DependentBitIntType(IsUnsigned, NumBitsExpr);
5216 DependentBitIntTypes.InsertNode(N: New, InsertPos);
5217
5218 Types.push_back(Elt: New);
5219 return QualType(New, 0);
5220}
5221
5222#ifndef NDEBUG
5223static bool NeedsInjectedClassNameType(const RecordDecl *D) {
5224 if (!isa<CXXRecordDecl>(D)) return false;
5225 const auto *RD = cast<CXXRecordDecl>(D);
5226 if (isa<ClassTemplatePartialSpecializationDecl>(RD))
5227 return true;
5228 if (RD->getDescribedClassTemplate() &&
5229 !isa<ClassTemplateSpecializationDecl>(RD))
5230 return true;
5231 return false;
5232}
5233#endif
5234
5235/// getInjectedClassNameType - Return the unique reference to the
5236/// injected class name type for the specified templated declaration.
5237QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
5238 QualType TST) const {
5239 assert(NeedsInjectedClassNameType(Decl));
5240 if (Decl->TypeForDecl) {
5241 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
5242 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
5243 assert(PrevDecl->TypeForDecl && "previous declaration has no type");
5244 Decl->TypeForDecl = PrevDecl->TypeForDecl;
5245 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
5246 } else {
5247 Type *newType = new (*this, alignof(InjectedClassNameType))
5248 InjectedClassNameType(Decl, TST);
5249 Decl->TypeForDecl = newType;
5250 Types.push_back(Elt: newType);
5251 }
5252 return QualType(Decl->TypeForDecl, 0);
5253}
5254
5255/// getTypeDeclType - Return the unique reference to the type for the
5256/// specified type declaration.
5257QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
5258 assert(Decl && "Passed null for Decl param");
5259 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
5260
5261 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Val: Decl))
5262 return getTypedefType(Decl: Typedef);
5263
5264 assert(!isa<TemplateTypeParmDecl>(Decl) &&
5265 "Template type parameter types are always available.");
5266
5267 if (const auto *Record = dyn_cast<RecordDecl>(Val: Decl)) {
5268 assert(Record->isFirstDecl() && "struct/union has previous declaration");
5269 assert(!NeedsInjectedClassNameType(Record));
5270 return getRecordType(Decl: Record);
5271 } else if (const auto *Enum = dyn_cast<EnumDecl>(Val: Decl)) {
5272 assert(Enum->isFirstDecl() && "enum has previous declaration");
5273 return getEnumType(Decl: Enum);
5274 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl)) {
5275 return getUnresolvedUsingType(Decl: Using);
5276 } else
5277 llvm_unreachable("TypeDecl without a type?");
5278
5279 return QualType(Decl->TypeForDecl, 0);
5280}
5281
5282/// getTypedefType - Return the unique reference to the type for the
5283/// specified typedef name decl.
5284QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
5285 QualType Underlying) const {
5286 if (!Decl->TypeForDecl) {
5287 if (Underlying.isNull())
5288 Underlying = Decl->getUnderlyingType();
5289 auto *NewType = new (*this, alignof(TypedefType)) TypedefType(
5290 Type::Typedef, Decl, Underlying, /*HasTypeDifferentFromDecl=*/false);
5291 Decl->TypeForDecl = NewType;
5292 Types.push_back(Elt: NewType);
5293 return QualType(NewType, 0);
5294 }
5295 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
5296 return QualType(Decl->TypeForDecl, 0);
5297 assert(hasSameType(Decl->getUnderlyingType(), Underlying));
5298
5299 llvm::FoldingSetNodeID ID;
5300 TypedefType::Profile(ID, Decl, Underlying);
5301
5302 void *InsertPos = nullptr;
5303 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
5304 assert(!T->typeMatchesDecl() &&
5305 "non-divergent case should be handled with TypeDecl");
5306 return QualType(T, 0);
5307 }
5308
5309 void *Mem = Allocate(Size: TypedefType::totalSizeToAlloc<QualType>(Counts: true),
5310 Align: alignof(TypedefType));
5311 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
5312 /*HasTypeDifferentFromDecl=*/true);
5313 TypedefTypes.InsertNode(N: NewType, InsertPos);
5314 Types.push_back(Elt: NewType);
5315 return QualType(NewType, 0);
5316}
5317
5318QualType ASTContext::getUsingType(const UsingShadowDecl *Found,
5319 QualType Underlying) const {
5320 llvm::FoldingSetNodeID ID;
5321 UsingType::Profile(ID, Found, Underlying);
5322
5323 void *InsertPos = nullptr;
5324 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5325 return QualType(T, 0);
5326
5327 const Type *TypeForDecl =
5328 cast<TypeDecl>(Val: Found->getTargetDecl())->getTypeForDecl();
5329
5330 assert(!Underlying.hasLocalQualifiers());
5331 QualType Canon = Underlying->getCanonicalTypeInternal();
5332 assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
5333
5334 if (Underlying.getTypePtr() == TypeForDecl)
5335 Underlying = QualType();
5336 void *Mem =
5337 Allocate(Size: UsingType::totalSizeToAlloc<QualType>(Counts: !Underlying.isNull()),
5338 Align: alignof(UsingType));
5339 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
5340 Types.push_back(Elt: NewType);
5341 UsingTypes.InsertNode(N: NewType, InsertPos);
5342 return QualType(NewType, 0);
5343}
5344
5345QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
5346 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
5347
5348 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
5349 if (PrevDecl->TypeForDecl)
5350 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
5351
5352 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl);
5353 Decl->TypeForDecl = newType;
5354 Types.push_back(Elt: newType);
5355 return QualType(newType, 0);
5356}
5357
5358QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
5359 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
5360
5361 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
5362 if (PrevDecl->TypeForDecl)
5363 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
5364
5365 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl);
5366 Decl->TypeForDecl = newType;
5367 Types.push_back(Elt: newType);
5368 return QualType(newType, 0);
5369}
5370
5371bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
5372 unsigned NumPositiveBits,
5373 QualType &BestType,
5374 QualType &BestPromotionType) {
5375 unsigned IntWidth = Target->getIntWidth();
5376 unsigned CharWidth = Target->getCharWidth();
5377 unsigned ShortWidth = Target->getShortWidth();
5378 bool EnumTooLarge = false;
5379 unsigned BestWidth;
5380 if (NumNegativeBits) {
5381 // If there is a negative value, figure out the smallest integer type (of
5382 // int/long/longlong) that fits.
5383 // If it's packed, check also if it fits a char or a short.
5384 if (IsPacked && NumNegativeBits <= CharWidth &&
5385 NumPositiveBits < CharWidth) {
5386 BestType = SignedCharTy;
5387 BestWidth = CharWidth;
5388 } else if (IsPacked && NumNegativeBits <= ShortWidth &&
5389 NumPositiveBits < ShortWidth) {
5390 BestType = ShortTy;
5391 BestWidth = ShortWidth;
5392 } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
5393 BestType = IntTy;
5394 BestWidth = IntWidth;
5395 } else {
5396 BestWidth = Target->getLongWidth();
5397
5398 if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
5399 BestType = LongTy;
5400 } else {
5401 BestWidth = Target->getLongLongWidth();
5402
5403 if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
5404 EnumTooLarge = true;
5405 BestType = LongLongTy;
5406 }
5407 }
5408 BestPromotionType = (BestWidth <= IntWidth ? IntTy : BestType);
5409 } else {
5410 // If there is no negative value, figure out the smallest type that fits
5411 // all of the enumerator values.
5412 // If it's packed, check also if it fits a char or a short.
5413 if (IsPacked && NumPositiveBits <= CharWidth) {
5414 BestType = UnsignedCharTy;
5415 BestPromotionType = IntTy;
5416 BestWidth = CharWidth;
5417 } else if (IsPacked && NumPositiveBits <= ShortWidth) {
5418 BestType = UnsignedShortTy;
5419 BestPromotionType = IntTy;
5420 BestWidth = ShortWidth;
5421 } else if (NumPositiveBits <= IntWidth) {
5422 BestType = UnsignedIntTy;
5423 BestWidth = IntWidth;
5424 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5425 ? UnsignedIntTy
5426 : IntTy;
5427 } else if (NumPositiveBits <= (BestWidth = Target->getLongWidth())) {
5428 BestType = UnsignedLongTy;
5429 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5430 ? UnsignedLongTy
5431 : LongTy;
5432 } else {
5433 BestWidth = Target->getLongLongWidth();
5434 if (NumPositiveBits > BestWidth) {
5435 // This can happen with bit-precise integer types, but those are not
5436 // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
5437 // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
5438 // a 128-bit integer, we should consider doing the same.
5439 EnumTooLarge = true;
5440 }
5441 BestType = UnsignedLongLongTy;
5442 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5443 ? UnsignedLongLongTy
5444 : LongLongTy;
5445 }
5446 }
5447 return EnumTooLarge;
5448}
5449
5450bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
5451 assert((T->isIntegralType(*this) || T->isEnumeralType()) &&
5452 "Integral type required!");
5453 unsigned BitWidth = getIntWidth(T);
5454
5455 if (Value.isUnsigned() || Value.isNonNegative()) {
5456 if (T->isSignedIntegerOrEnumerationType())
5457 --BitWidth;
5458 return Value.getActiveBits() <= BitWidth;
5459 }
5460 return Value.getSignificantBits() <= BitWidth;
5461}
5462
5463QualType ASTContext::getUnresolvedUsingType(
5464 const UnresolvedUsingTypenameDecl *Decl) const {
5465 if (Decl->TypeForDecl)
5466 return QualType(Decl->TypeForDecl, 0);
5467
5468 if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
5469 Decl->getCanonicalDecl())
5470 if (CanonicalDecl->TypeForDecl)
5471 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
5472
5473 Type *newType =
5474 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl);
5475 Decl->TypeForDecl = newType;
5476 Types.push_back(Elt: newType);
5477 return QualType(newType, 0);
5478}
5479
5480QualType ASTContext::getAttributedType(attr::Kind attrKind,
5481 QualType modifiedType,
5482 QualType equivalentType,
5483 const Attr *attr) const {
5484 llvm::FoldingSetNodeID id;
5485 AttributedType::Profile(ID&: id, attrKind, modified: modifiedType, equivalent: equivalentType, attr);
5486
5487 void *insertPos = nullptr;
5488 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
5489 if (type) return QualType(type, 0);
5490
5491 assert(!attr || attr->getKind() == attrKind);
5492
5493 QualType canon = getCanonicalType(T: equivalentType);
5494 type = new (*this, alignof(AttributedType))
5495 AttributedType(canon, attrKind, attr, modifiedType, equivalentType);
5496
5497 Types.push_back(Elt: type);
5498 AttributedTypes.InsertNode(N: type, InsertPos: insertPos);
5499
5500 return QualType(type, 0);
5501}
5502
5503QualType ASTContext::getAttributedType(const Attr *attr, QualType modifiedType,
5504 QualType equivalentType) const {
5505 return getAttributedType(attrKind: attr->getKind(), modifiedType, equivalentType, attr);
5506}
5507
5508QualType ASTContext::getAttributedType(NullabilityKind nullability,
5509 QualType modifiedType,
5510 QualType equivalentType) {
5511 switch (nullability) {
5512 case NullabilityKind::NonNull:
5513 return getAttributedType(attrKind: attr::TypeNonNull, modifiedType, equivalentType);
5514
5515 case NullabilityKind::Nullable:
5516 return getAttributedType(attrKind: attr::TypeNullable, modifiedType, equivalentType);
5517
5518 case NullabilityKind::NullableResult:
5519 return getAttributedType(attrKind: attr::TypeNullableResult, modifiedType,
5520 equivalentType);
5521
5522 case NullabilityKind::Unspecified:
5523 return getAttributedType(attrKind: attr::TypeNullUnspecified, modifiedType,
5524 equivalentType);
5525 }
5526
5527 llvm_unreachable("Unknown nullability kind");
5528}
5529
5530QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
5531 QualType Wrapped) const {
5532 llvm::FoldingSetNodeID ID;
5533 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
5534
5535 void *InsertPos = nullptr;
5536 BTFTagAttributedType *Ty =
5537 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
5538 if (Ty)
5539 return QualType(Ty, 0);
5540
5541 QualType Canon = getCanonicalType(T: Wrapped);
5542 Ty = new (*this, alignof(BTFTagAttributedType))
5543 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
5544
5545 Types.push_back(Elt: Ty);
5546 BTFTagAttributedTypes.InsertNode(N: Ty, InsertPos);
5547
5548 return QualType(Ty, 0);
5549}
5550
5551QualType ASTContext::getHLSLAttributedResourceType(
5552 QualType Wrapped, QualType Contained,
5553 const HLSLAttributedResourceType::Attributes &Attrs) {
5554
5555 llvm::FoldingSetNodeID ID;
5556 HLSLAttributedResourceType::Profile(ID, Wrapped, Contained, Attrs);
5557
5558 void *InsertPos = nullptr;
5559 HLSLAttributedResourceType *Ty =
5560 HLSLAttributedResourceTypes.FindNodeOrInsertPos(ID, InsertPos);
5561 if (Ty)
5562 return QualType(Ty, 0);
5563
5564 Ty = new (*this, alignof(HLSLAttributedResourceType))
5565 HLSLAttributedResourceType(Wrapped, Contained, Attrs);
5566
5567 Types.push_back(Elt: Ty);
5568 HLSLAttributedResourceTypes.InsertNode(N: Ty, InsertPos);
5569
5570 return QualType(Ty, 0);
5571}
5572
5573QualType ASTContext::getHLSLInlineSpirvType(uint32_t Opcode, uint32_t Size,
5574 uint32_t Alignment,
5575 ArrayRef<SpirvOperand> Operands) {
5576 llvm::FoldingSetNodeID ID;
5577 HLSLInlineSpirvType::Profile(ID, Opcode, Size, Alignment, Operands);
5578
5579 void *InsertPos = nullptr;
5580 HLSLInlineSpirvType *Ty =
5581 HLSLInlineSpirvTypes.FindNodeOrInsertPos(ID, InsertPos);
5582 if (Ty)
5583 return QualType(Ty, 0);
5584
5585 void *Mem = Allocate(
5586 Size: HLSLInlineSpirvType::totalSizeToAlloc<SpirvOperand>(Counts: Operands.size()),
5587 Align: alignof(HLSLInlineSpirvType));
5588
5589 Ty = new (Mem) HLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
5590
5591 Types.push_back(Elt: Ty);
5592 HLSLInlineSpirvTypes.InsertNode(N: Ty, InsertPos);
5593
5594 return QualType(Ty, 0);
5595}
5596
5597/// Retrieve a substitution-result type.
5598QualType ASTContext::getSubstTemplateTypeParmType(QualType Replacement,
5599 Decl *AssociatedDecl,
5600 unsigned Index,
5601 UnsignedOrNone PackIndex,
5602 bool Final) const {
5603 llvm::FoldingSetNodeID ID;
5604 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
5605 PackIndex, Final);
5606 void *InsertPos = nullptr;
5607 SubstTemplateTypeParmType *SubstParm =
5608 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5609
5610 if (!SubstParm) {
5611 void *Mem = Allocate(Size: SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
5612 Counts: !Replacement.isCanonical()),
5613 Align: alignof(SubstTemplateTypeParmType));
5614 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
5615 Index, PackIndex, Final);
5616 Types.push_back(Elt: SubstParm);
5617 SubstTemplateTypeParmTypes.InsertNode(N: SubstParm, InsertPos);
5618 }
5619
5620 return QualType(SubstParm, 0);
5621}
5622
5623/// Retrieve a
5624QualType
5625ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
5626 unsigned Index, bool Final,
5627 const TemplateArgument &ArgPack) {
5628#ifndef NDEBUG
5629 for (const auto &P : ArgPack.pack_elements())
5630 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
5631#endif
5632
5633 llvm::FoldingSetNodeID ID;
5634 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
5635 ArgPack);
5636 void *InsertPos = nullptr;
5637 if (SubstTemplateTypeParmPackType *SubstParm =
5638 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
5639 return QualType(SubstParm, 0);
5640
5641 QualType Canon;
5642 {
5643 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5644 if (!AssociatedDecl->isCanonicalDecl() ||
5645 !CanonArgPack.structurallyEquals(Other: ArgPack)) {
5646 Canon = getSubstTemplateTypeParmPackType(
5647 AssociatedDecl: AssociatedDecl->getCanonicalDecl(), Index, Final, ArgPack: CanonArgPack);
5648 [[maybe_unused]] const auto *Nothing =
5649 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
5650 assert(!Nothing);
5651 }
5652 }
5653
5654 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
5655 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
5656 ArgPack);
5657 Types.push_back(Elt: SubstParm);
5658 SubstTemplateTypeParmPackTypes.InsertNode(N: SubstParm, InsertPos);
5659 return QualType(SubstParm, 0);
5660}
5661
5662/// Retrieve the template type parameter type for a template
5663/// parameter or parameter pack with the given depth, index, and (optionally)
5664/// name.
5665QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
5666 bool ParameterPack,
5667 TemplateTypeParmDecl *TTPDecl) const {
5668 llvm::FoldingSetNodeID ID;
5669 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
5670 void *InsertPos = nullptr;
5671 TemplateTypeParmType *TypeParm
5672 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5673
5674 if (TypeParm)
5675 return QualType(TypeParm, 0);
5676
5677 if (TTPDecl) {
5678 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
5679 TypeParm = new (*this, alignof(TemplateTypeParmType))
5680 TemplateTypeParmType(Depth, Index, ParameterPack, TTPDecl, Canon);
5681
5682 TemplateTypeParmType *TypeCheck
5683 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5684 assert(!TypeCheck && "Template type parameter canonical type broken");
5685 (void)TypeCheck;
5686 } else
5687 TypeParm = new (*this, alignof(TemplateTypeParmType)) TemplateTypeParmType(
5688 Depth, Index, ParameterPack, /*TTPDecl=*/nullptr, /*Canon=*/QualType());
5689
5690 Types.push_back(Elt: TypeParm);
5691 TemplateTypeParmTypes.InsertNode(N: TypeParm, InsertPos);
5692
5693 return QualType(TypeParm, 0);
5694}
5695
5696TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
5697 TemplateName Name, SourceLocation NameLoc,
5698 const TemplateArgumentListInfo &SpecifiedArgs,
5699 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5700 QualType TST = getTemplateSpecializationType(T: Name, SpecifiedArgs: SpecifiedArgs.arguments(),
5701 CanonicalArgs, Canon: Underlying);
5702
5703 TypeSourceInfo *DI = CreateTypeSourceInfo(T: TST);
5704 TemplateSpecializationTypeLoc TL =
5705 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
5706 TL.setTemplateKeywordLoc(SourceLocation());
5707 TL.setTemplateNameLoc(NameLoc);
5708 TL.setLAngleLoc(SpecifiedArgs.getLAngleLoc());
5709 TL.setRAngleLoc(SpecifiedArgs.getRAngleLoc());
5710 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
5711 TL.setArgLocInfo(i, AI: SpecifiedArgs[i].getLocInfo());
5712 return DI;
5713}
5714
5715QualType ASTContext::getTemplateSpecializationType(
5716 TemplateName Template, ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
5717 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5718 SmallVector<TemplateArgument, 4> SpecifiedArgVec;
5719 SpecifiedArgVec.reserve(N: SpecifiedArgs.size());
5720 for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
5721 SpecifiedArgVec.push_back(Elt: Arg.getArgument());
5722
5723 return getTemplateSpecializationType(T: Template, SpecifiedArgs: SpecifiedArgVec, CanonicalArgs,
5724 Underlying);
5725}
5726
5727[[maybe_unused]] static bool
5728hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
5729 for (const TemplateArgument &Arg : Args)
5730 if (Arg.isPackExpansion())
5731 return true;
5732 return false;
5733}
5734
5735QualType ASTContext::getCanonicalTemplateSpecializationType(
5736 TemplateName Template, ArrayRef<TemplateArgument> Args) const {
5737 assert(Template ==
5738 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true));
5739 assert(!Args.empty());
5740#ifndef NDEBUG
5741 for (const auto &Arg : Args)
5742 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
5743#endif
5744
5745 llvm::FoldingSetNodeID ID;
5746 TemplateSpecializationType::Profile(ID, T: Template, Args, Underlying: QualType(), Context: *this);
5747 void *InsertPos = nullptr;
5748 if (auto *T = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5749 return QualType(T, 0);
5750
5751 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
5752 sizeof(TemplateArgument) * Args.size(),
5753 Align: alignof(TemplateSpecializationType));
5754 auto *Spec = new (Mem)
5755 TemplateSpecializationType(Template, /*IsAlias=*/false, Args, QualType());
5756 assert(Spec->isDependentType() &&
5757 "canonical template specialization must be dependent");
5758 Types.push_back(Elt: Spec);
5759 TemplateSpecializationTypes.InsertNode(N: Spec, InsertPos);
5760 return QualType(Spec, 0);
5761}
5762
5763QualType ASTContext::getTemplateSpecializationType(
5764 TemplateName Template, ArrayRef<TemplateArgument> SpecifiedArgs,
5765 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5766 assert(!Template.getUnderlying().getAsDependentTemplateName() &&
5767 "No dependent template names here!");
5768
5769 const auto *TD = Template.getAsTemplateDecl(/*IgnoreDeduced=*/true);
5770 bool IsTypeAlias = TD && TD->isTypeAlias();
5771 if (Underlying.isNull()) {
5772 TemplateName CanonTemplate =
5773 getCanonicalTemplateName(Name: Template, /*IgnoreDeduced=*/true);
5774 bool NonCanonical = Template != CanonTemplate;
5775 SmallVector<TemplateArgument, 4> CanonArgsVec;
5776 if (CanonicalArgs.empty()) {
5777 CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
5778 NonCanonical |= canonicalizeTemplateArguments(Args: CanonArgsVec);
5779 CanonicalArgs = CanonArgsVec;
5780 } else {
5781 NonCanonical |= !llvm::equal(
5782 LRange&: SpecifiedArgs, RRange&: CanonicalArgs,
5783 P: [](const TemplateArgument &A, const TemplateArgument &B) {
5784 return A.structurallyEquals(Other: B);
5785 });
5786 }
5787
5788 // We can get here with an alias template when the specialization
5789 // contains a pack expansion that does not match up with a parameter
5790 // pack, or a builtin template which cannot be resolved due to dependency.
5791 assert((!isa_and_nonnull<TypeAliasTemplateDecl>(TD) ||
5792 hasAnyPackExpansions(CanonicalArgs)) &&
5793 "Caller must compute aliased type");
5794 IsTypeAlias = false;
5795
5796 Underlying =
5797 getCanonicalTemplateSpecializationType(Template: CanonTemplate, Args: CanonicalArgs);
5798 if (!NonCanonical)
5799 return Underlying;
5800 }
5801 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
5802 sizeof(TemplateArgument) * SpecifiedArgs.size() +
5803 (IsTypeAlias ? sizeof(QualType) : 0),
5804 Align: alignof(TemplateSpecializationType));
5805 auto *Spec = new (Mem) TemplateSpecializationType(Template, IsTypeAlias,
5806 SpecifiedArgs, Underlying);
5807 Types.push_back(Elt: Spec);
5808 return QualType(Spec, 0);
5809}
5810
5811QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
5812 NestedNameSpecifier *NNS,
5813 QualType NamedType,
5814 TagDecl *OwnedTagDecl) const {
5815 llvm::FoldingSetNodeID ID;
5816 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
5817
5818 void *InsertPos = nullptr;
5819 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5820 if (T)
5821 return QualType(T, 0);
5822
5823 QualType Canon = NamedType;
5824 if (!Canon.isCanonical()) {
5825 Canon = getCanonicalType(T: NamedType);
5826 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5827 assert(!CheckT && "Elaborated canonical type broken");
5828 (void)CheckT;
5829 }
5830
5831 void *Mem =
5832 Allocate(Size: ElaboratedType::totalSizeToAlloc<TagDecl *>(Counts: !!OwnedTagDecl),
5833 Align: alignof(ElaboratedType));
5834 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
5835
5836 Types.push_back(Elt: T);
5837 ElaboratedTypes.InsertNode(N: T, InsertPos);
5838 return QualType(T, 0);
5839}
5840
5841QualType
5842ASTContext::getParenType(QualType InnerType) const {
5843 llvm::FoldingSetNodeID ID;
5844 ParenType::Profile(ID, Inner: InnerType);
5845
5846 void *InsertPos = nullptr;
5847 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5848 if (T)
5849 return QualType(T, 0);
5850
5851 QualType Canon = InnerType;
5852 if (!Canon.isCanonical()) {
5853 Canon = getCanonicalType(T: InnerType);
5854 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5855 assert(!CheckT && "Paren canonical type broken");
5856 (void)CheckT;
5857 }
5858
5859 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
5860 Types.push_back(Elt: T);
5861 ParenTypes.InsertNode(N: T, InsertPos);
5862 return QualType(T, 0);
5863}
5864
5865QualType
5866ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
5867 const IdentifierInfo *MacroII) const {
5868 QualType Canon = UnderlyingTy;
5869 if (!Canon.isCanonical())
5870 Canon = getCanonicalType(T: UnderlyingTy);
5871
5872 auto *newType = new (*this, alignof(MacroQualifiedType))
5873 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
5874 Types.push_back(Elt: newType);
5875 return QualType(newType, 0);
5876}
5877
5878static ElaboratedTypeKeyword
5879getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
5880 switch (Keyword) {
5881 // These are just themselves.
5882 case ElaboratedTypeKeyword::None:
5883 case ElaboratedTypeKeyword::Struct:
5884 case ElaboratedTypeKeyword::Union:
5885 case ElaboratedTypeKeyword::Enum:
5886 case ElaboratedTypeKeyword::Interface:
5887 return Keyword;
5888
5889 // These are equivalent.
5890 case ElaboratedTypeKeyword::Typename:
5891 return ElaboratedTypeKeyword::None;
5892
5893 // These are functionally equivalent, so relying on their equivalence is
5894 // IFNDR. By making them equivalent, we disallow overloading, which at least
5895 // can produce a diagnostic.
5896 case ElaboratedTypeKeyword::Class:
5897 return ElaboratedTypeKeyword::Struct;
5898 }
5899 llvm_unreachable("unexpected keyword kind");
5900}
5901
5902QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
5903 NestedNameSpecifier *NNS,
5904 const IdentifierInfo *Name) const {
5905 llvm::FoldingSetNodeID ID;
5906 DependentNameType::Profile(ID, Keyword, NNS, Name);
5907
5908 void *InsertPos = nullptr;
5909 if (DependentNameType *T =
5910 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos))
5911 return QualType(T, 0);
5912
5913 ElaboratedTypeKeyword CanonKeyword =
5914 getCanonicalElaboratedTypeKeyword(Keyword);
5915 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
5916
5917 QualType Canon;
5918 if (CanonKeyword != Keyword || CanonNNS != NNS) {
5919 Canon = getDependentNameType(Keyword: CanonKeyword, NNS: CanonNNS, Name);
5920 [[maybe_unused]] DependentNameType *T =
5921 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
5922 assert(!T && "broken canonicalization");
5923 assert(Canon.isCanonical());
5924 }
5925
5926 DependentNameType *T = new (*this, alignof(DependentNameType))
5927 DependentNameType(Keyword, NNS, Name, Canon);
5928 Types.push_back(Elt: T);
5929 DependentNameTypes.InsertNode(N: T, InsertPos);
5930 return QualType(T, 0);
5931}
5932
5933QualType ASTContext::getDependentTemplateSpecializationType(
5934 ElaboratedTypeKeyword Keyword, const DependentTemplateStorage &Name,
5935 ArrayRef<TemplateArgumentLoc> Args) const {
5936 // TODO: avoid this copy
5937 SmallVector<TemplateArgument, 16> ArgCopy;
5938 for (unsigned I = 0, E = Args.size(); I != E; ++I)
5939 ArgCopy.push_back(Elt: Args[I].getArgument());
5940 return getDependentTemplateSpecializationType(Keyword, Name, Args: ArgCopy);
5941}
5942
5943QualType ASTContext::getDependentTemplateSpecializationType(
5944 ElaboratedTypeKeyword Keyword, const DependentTemplateStorage &Name,
5945 ArrayRef<TemplateArgument> Args, bool IsCanonical) const {
5946 llvm::FoldingSetNodeID ID;
5947 DependentTemplateSpecializationType::Profile(ID, Context: *this, Keyword, Name, Args);
5948
5949 void *InsertPos = nullptr;
5950 if (auto *T = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(
5951 ID, InsertPos))
5952 return QualType(T, 0);
5953
5954 NestedNameSpecifier *NNS = Name.getQualifier();
5955
5956 QualType Canon;
5957 if (!IsCanonical) {
5958 ElaboratedTypeKeyword CanonKeyword =
5959 getCanonicalElaboratedTypeKeyword(Keyword);
5960 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
5961 bool AnyNonCanonArgs = false;
5962 auto CanonArgs =
5963 ::getCanonicalTemplateArguments(C: *this, Args, AnyNonCanonArgs);
5964
5965 if (CanonKeyword != Keyword || AnyNonCanonArgs || CanonNNS != NNS ||
5966 !Name.hasTemplateKeyword()) {
5967 Canon = getDependentTemplateSpecializationType(
5968 Keyword: CanonKeyword, Name: {CanonNNS, Name.getName(), /*HasTemplateKeyword=*/true},
5969 Args: CanonArgs,
5970 /*IsCanonical=*/true);
5971 // Find the insert position again.
5972 [[maybe_unused]] auto *Nothing =
5973 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID,
5974 InsertPos);
5975 assert(!Nothing && "canonical type broken");
5976 }
5977 } else {
5978 assert(Keyword == getCanonicalElaboratedTypeKeyword(Keyword));
5979 assert(Name.hasTemplateKeyword());
5980 assert(NNS == getCanonicalNestedNameSpecifier(NNS));
5981#ifndef NDEBUG
5982 for (const auto &Arg : Args)
5983 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
5984#endif
5985 }
5986 void *Mem = Allocate(Size: (sizeof(DependentTemplateSpecializationType) +
5987 sizeof(TemplateArgument) * Args.size()),
5988 Align: alignof(DependentTemplateSpecializationType));
5989 auto *T =
5990 new (Mem) DependentTemplateSpecializationType(Keyword, Name, Args, Canon);
5991 Types.push_back(Elt: T);
5992 DependentTemplateSpecializationTypes.InsertNode(N: T, InsertPos);
5993 return QualType(T, 0);
5994}
5995
5996TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
5997 TemplateArgument Arg;
5998 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: Param)) {
5999 QualType ArgType = getTypeDeclType(Decl: TTP);
6000 if (TTP->isParameterPack())
6001 ArgType = getPackExpansionType(Pattern: ArgType, NumExpansions: std::nullopt);
6002
6003 Arg = TemplateArgument(ArgType);
6004 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: Param)) {
6005 QualType T =
6006 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(Context: *this);
6007 // For class NTTPs, ensure we include the 'const' so the type matches that
6008 // of a real template argument.
6009 // FIXME: It would be more faithful to model this as something like an
6010 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
6011 ExprValueKind VK;
6012 if (T->isRecordType()) {
6013 // C++ [temp.param]p8: An id-expression naming a non-type
6014 // template-parameter of class type T denotes a static storage duration
6015 // object of type const T.
6016 T.addConst();
6017 VK = VK_LValue;
6018 } else {
6019 VK = Expr::getValueKindForType(T: NTTP->getType());
6020 }
6021 Expr *E = new (*this)
6022 DeclRefExpr(*this, NTTP, /*RefersToEnclosingVariableOrCapture=*/false,
6023 T, VK, NTTP->getLocation());
6024
6025 if (NTTP->isParameterPack())
6026 E = new (*this) PackExpansionExpr(E, NTTP->getLocation(), std::nullopt);
6027 Arg = TemplateArgument(E, /*IsCanonical=*/false);
6028 } else {
6029 auto *TTP = cast<TemplateTemplateParmDecl>(Val: Param);
6030 TemplateName Name = getQualifiedTemplateName(
6031 NNS: nullptr, /*TemplateKeyword=*/false, Template: TemplateName(TTP));
6032 if (TTP->isParameterPack())
6033 Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
6034 else
6035 Arg = TemplateArgument(Name);
6036 }
6037
6038 if (Param->isTemplateParameterPack())
6039 Arg =
6040 TemplateArgument::CreatePackCopy(Context&: const_cast<ASTContext &>(*this), Args: Arg);
6041
6042 return Arg;
6043}
6044
6045QualType ASTContext::getPackExpansionType(QualType Pattern,
6046 UnsignedOrNone NumExpansions,
6047 bool ExpectPackInType) const {
6048 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
6049 "Pack expansions must expand one or more parameter packs");
6050
6051 llvm::FoldingSetNodeID ID;
6052 PackExpansionType::Profile(ID, Pattern, NumExpansions);
6053
6054 void *InsertPos = nullptr;
6055 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6056 if (T)
6057 return QualType(T, 0);
6058
6059 QualType Canon;
6060 if (!Pattern.isCanonical()) {
6061 Canon = getPackExpansionType(Pattern: getCanonicalType(T: Pattern), NumExpansions,
6062 /*ExpectPackInType=*/false);
6063
6064 // Find the insert position again, in case we inserted an element into
6065 // PackExpansionTypes and invalidated our insert position.
6066 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6067 }
6068
6069 T = new (*this, alignof(PackExpansionType))
6070 PackExpansionType(Pattern, Canon, NumExpansions);
6071 Types.push_back(Elt: T);
6072 PackExpansionTypes.InsertNode(N: T, InsertPos);
6073 return QualType(T, 0);
6074}
6075
6076/// CmpProtocolNames - Comparison predicate for sorting protocols
6077/// alphabetically.
6078static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
6079 ObjCProtocolDecl *const *RHS) {
6080 return DeclarationName::compare(LHS: (*LHS)->getDeclName(), RHS: (*RHS)->getDeclName());
6081}
6082
6083static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
6084 if (Protocols.empty()) return true;
6085
6086 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
6087 return false;
6088
6089 for (unsigned i = 1; i != Protocols.size(); ++i)
6090 if (CmpProtocolNames(LHS: &Protocols[i - 1], RHS: &Protocols[i]) >= 0 ||
6091 Protocols[i]->getCanonicalDecl() != Protocols[i])
6092 return false;
6093 return true;
6094}
6095
6096static void
6097SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
6098 // Sort protocols, keyed by name.
6099 llvm::array_pod_sort(Start: Protocols.begin(), End: Protocols.end(), Compare: CmpProtocolNames);
6100
6101 // Canonicalize.
6102 for (ObjCProtocolDecl *&P : Protocols)
6103 P = P->getCanonicalDecl();
6104
6105 // Remove duplicates.
6106 auto ProtocolsEnd = llvm::unique(R&: Protocols);
6107 Protocols.erase(CS: ProtocolsEnd, CE: Protocols.end());
6108}
6109
6110QualType ASTContext::getObjCObjectType(QualType BaseType,
6111 ObjCProtocolDecl * const *Protocols,
6112 unsigned NumProtocols) const {
6113 return getObjCObjectType(Base: BaseType, typeArgs: {}, protocols: ArrayRef(Protocols, NumProtocols),
6114 /*isKindOf=*/false);
6115}
6116
6117QualType ASTContext::getObjCObjectType(
6118 QualType baseType,
6119 ArrayRef<QualType> typeArgs,
6120 ArrayRef<ObjCProtocolDecl *> protocols,
6121 bool isKindOf) const {
6122 // If the base type is an interface and there aren't any protocols or
6123 // type arguments to add, then the interface type will do just fine.
6124 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
6125 isa<ObjCInterfaceType>(Val: baseType))
6126 return baseType;
6127
6128 // Look in the folding set for an existing type.
6129 llvm::FoldingSetNodeID ID;
6130 ObjCObjectTypeImpl::Profile(ID, Base: baseType, typeArgs, protocols, isKindOf);
6131 void *InsertPos = nullptr;
6132 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
6133 return QualType(QT, 0);
6134
6135 // Determine the type arguments to be used for canonicalization,
6136 // which may be explicitly specified here or written on the base
6137 // type.
6138 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
6139 if (effectiveTypeArgs.empty()) {
6140 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
6141 effectiveTypeArgs = baseObject->getTypeArgs();
6142 }
6143
6144 // Build the canonical type, which has the canonical base type and a
6145 // sorted-and-uniqued list of protocols and the type arguments
6146 // canonicalized.
6147 QualType canonical;
6148 bool typeArgsAreCanonical = llvm::all_of(
6149 Range&: effectiveTypeArgs, P: [&](QualType type) { return type.isCanonical(); });
6150 bool protocolsSorted = areSortedAndUniqued(Protocols: protocols);
6151 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
6152 // Determine the canonical type arguments.
6153 ArrayRef<QualType> canonTypeArgs;
6154 SmallVector<QualType, 4> canonTypeArgsVec;
6155 if (!typeArgsAreCanonical) {
6156 canonTypeArgsVec.reserve(N: effectiveTypeArgs.size());
6157 for (auto typeArg : effectiveTypeArgs)
6158 canonTypeArgsVec.push_back(Elt: getCanonicalType(T: typeArg));
6159 canonTypeArgs = canonTypeArgsVec;
6160 } else {
6161 canonTypeArgs = effectiveTypeArgs;
6162 }
6163
6164 ArrayRef<ObjCProtocolDecl *> canonProtocols;
6165 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
6166 if (!protocolsSorted) {
6167 canonProtocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6168 SortAndUniqueProtocols(Protocols&: canonProtocolsVec);
6169 canonProtocols = canonProtocolsVec;
6170 } else {
6171 canonProtocols = protocols;
6172 }
6173
6174 canonical = getObjCObjectType(baseType: getCanonicalType(T: baseType), typeArgs: canonTypeArgs,
6175 protocols: canonProtocols, isKindOf);
6176
6177 // Regenerate InsertPos.
6178 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
6179 }
6180
6181 unsigned size = sizeof(ObjCObjectTypeImpl);
6182 size += typeArgs.size() * sizeof(QualType);
6183 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6184 void *mem = Allocate(Size: size, Align: alignof(ObjCObjectTypeImpl));
6185 auto *T =
6186 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
6187 isKindOf);
6188
6189 Types.push_back(Elt: T);
6190 ObjCObjectTypes.InsertNode(N: T, InsertPos);
6191 return QualType(T, 0);
6192}
6193
6194/// Apply Objective-C protocol qualifiers to the given type.
6195/// If this is for the canonical type of a type parameter, we can apply
6196/// protocol qualifiers on the ObjCObjectPointerType.
6197QualType
6198ASTContext::applyObjCProtocolQualifiers(QualType type,
6199 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
6200 bool allowOnPointerType) const {
6201 hasError = false;
6202
6203 if (const auto *objT = dyn_cast<ObjCTypeParamType>(Val: type.getTypePtr())) {
6204 return getObjCTypeParamType(Decl: objT->getDecl(), protocols);
6205 }
6206
6207 // Apply protocol qualifiers to ObjCObjectPointerType.
6208 if (allowOnPointerType) {
6209 if (const auto *objPtr =
6210 dyn_cast<ObjCObjectPointerType>(Val: type.getTypePtr())) {
6211 const ObjCObjectType *objT = objPtr->getObjectType();
6212 // Merge protocol lists and construct ObjCObjectType.
6213 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
6214 protocolsVec.append(in_start: objT->qual_begin(),
6215 in_end: objT->qual_end());
6216 protocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6217 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
6218 type = getObjCObjectType(
6219 baseType: objT->getBaseType(),
6220 typeArgs: objT->getTypeArgsAsWritten(),
6221 protocols,
6222 isKindOf: objT->isKindOfTypeAsWritten());
6223 return getObjCObjectPointerType(OIT: type);
6224 }
6225 }
6226
6227 // Apply protocol qualifiers to ObjCObjectType.
6228 if (const auto *objT = dyn_cast<ObjCObjectType>(Val: type.getTypePtr())){
6229 // FIXME: Check for protocols to which the class type is already
6230 // known to conform.
6231
6232 return getObjCObjectType(baseType: objT->getBaseType(),
6233 typeArgs: objT->getTypeArgsAsWritten(),
6234 protocols,
6235 isKindOf: objT->isKindOfTypeAsWritten());
6236 }
6237
6238 // If the canonical type is ObjCObjectType, ...
6239 if (type->isObjCObjectType()) {
6240 // Silently overwrite any existing protocol qualifiers.
6241 // TODO: determine whether that's the right thing to do.
6242
6243 // FIXME: Check for protocols to which the class type is already
6244 // known to conform.
6245 return getObjCObjectType(baseType: type, typeArgs: {}, protocols, isKindOf: false);
6246 }
6247
6248 // id<protocol-list>
6249 if (type->isObjCIdType()) {
6250 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6251 type = getObjCObjectType(baseType: ObjCBuiltinIdTy, typeArgs: {}, protocols,
6252 isKindOf: objPtr->isKindOfType());
6253 return getObjCObjectPointerType(OIT: type);
6254 }
6255
6256 // Class<protocol-list>
6257 if (type->isObjCClassType()) {
6258 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6259 type = getObjCObjectType(baseType: ObjCBuiltinClassTy, typeArgs: {}, protocols,
6260 isKindOf: objPtr->isKindOfType());
6261 return getObjCObjectPointerType(OIT: type);
6262 }
6263
6264 hasError = true;
6265 return type;
6266}
6267
6268QualType
6269ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
6270 ArrayRef<ObjCProtocolDecl *> protocols) const {
6271 // Look in the folding set for an existing type.
6272 llvm::FoldingSetNodeID ID;
6273 ObjCTypeParamType::Profile(ID, OTPDecl: Decl, CanonicalType: Decl->getUnderlyingType(), protocols);
6274 void *InsertPos = nullptr;
6275 if (ObjCTypeParamType *TypeParam =
6276 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
6277 return QualType(TypeParam, 0);
6278
6279 // We canonicalize to the underlying type.
6280 QualType Canonical = getCanonicalType(T: Decl->getUnderlyingType());
6281 if (!protocols.empty()) {
6282 // Apply the protocol qualifers.
6283 bool hasError;
6284 Canonical = getCanonicalType(T: applyObjCProtocolQualifiers(
6285 type: Canonical, protocols, hasError, allowOnPointerType: true /*allowOnPointerType*/));
6286 assert(!hasError && "Error when apply protocol qualifier to bound type");
6287 }
6288
6289 unsigned size = sizeof(ObjCTypeParamType);
6290 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6291 void *mem = Allocate(Size: size, Align: alignof(ObjCTypeParamType));
6292 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
6293
6294 Types.push_back(Elt: newType);
6295 ObjCTypeParamTypes.InsertNode(N: newType, InsertPos);
6296 return QualType(newType, 0);
6297}
6298
6299void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
6300 ObjCTypeParamDecl *New) const {
6301 New->setTypeSourceInfo(getTrivialTypeSourceInfo(T: Orig->getUnderlyingType()));
6302 // Update TypeForDecl after updating TypeSourceInfo.
6303 auto NewTypeParamTy = cast<ObjCTypeParamType>(Val: New->getTypeForDecl());
6304 SmallVector<ObjCProtocolDecl *, 8> protocols;
6305 protocols.append(in_start: NewTypeParamTy->qual_begin(), in_end: NewTypeParamTy->qual_end());
6306 QualType UpdatedTy = getObjCTypeParamType(Decl: New, protocols);
6307 New->setTypeForDecl(UpdatedTy.getTypePtr());
6308}
6309
6310/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
6311/// protocol list adopt all protocols in QT's qualified-id protocol
6312/// list.
6313bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
6314 ObjCInterfaceDecl *IC) {
6315 if (!QT->isObjCQualifiedIdType())
6316 return false;
6317
6318 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
6319 // If both the right and left sides have qualifiers.
6320 for (auto *Proto : OPT->quals()) {
6321 if (!IC->ClassImplementsProtocol(lProto: Proto, lookupCategory: false))
6322 return false;
6323 }
6324 return true;
6325 }
6326 return false;
6327}
6328
6329/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
6330/// QT's qualified-id protocol list adopt all protocols in IDecl's list
6331/// of protocols.
6332bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
6333 ObjCInterfaceDecl *IDecl) {
6334 if (!QT->isObjCQualifiedIdType())
6335 return false;
6336 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
6337 if (!OPT)
6338 return false;
6339 if (!IDecl->hasDefinition())
6340 return false;
6341 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
6342 CollectInheritedProtocols(CDecl: IDecl, Protocols&: InheritedProtocols);
6343 if (InheritedProtocols.empty())
6344 return false;
6345 // Check that if every protocol in list of id<plist> conforms to a protocol
6346 // of IDecl's, then bridge casting is ok.
6347 bool Conforms = false;
6348 for (auto *Proto : OPT->quals()) {
6349 Conforms = false;
6350 for (auto *PI : InheritedProtocols) {
6351 if (ProtocolCompatibleWithProtocol(lProto: Proto, rProto: PI)) {
6352 Conforms = true;
6353 break;
6354 }
6355 }
6356 if (!Conforms)
6357 break;
6358 }
6359 if (Conforms)
6360 return true;
6361
6362 for (auto *PI : InheritedProtocols) {
6363 // If both the right and left sides have qualifiers.
6364 bool Adopts = false;
6365 for (auto *Proto : OPT->quals()) {
6366 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
6367 if ((Adopts = ProtocolCompatibleWithProtocol(lProto: PI, rProto: Proto)))
6368 break;
6369 }
6370 if (!Adopts)
6371 return false;
6372 }
6373 return true;
6374}
6375
6376/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
6377/// the given object type.
6378QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
6379 llvm::FoldingSetNodeID ID;
6380 ObjCObjectPointerType::Profile(ID, T: ObjectT);
6381
6382 void *InsertPos = nullptr;
6383 if (ObjCObjectPointerType *QT =
6384 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
6385 return QualType(QT, 0);
6386
6387 // Find the canonical object type.
6388 QualType Canonical;
6389 if (!ObjectT.isCanonical()) {
6390 Canonical = getObjCObjectPointerType(ObjectT: getCanonicalType(T: ObjectT));
6391
6392 // Regenerate InsertPos.
6393 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
6394 }
6395
6396 // No match.
6397 void *Mem =
6398 Allocate(Size: sizeof(ObjCObjectPointerType), Align: alignof(ObjCObjectPointerType));
6399 auto *QType =
6400 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
6401
6402 Types.push_back(Elt: QType);
6403 ObjCObjectPointerTypes.InsertNode(N: QType, InsertPos);
6404 return QualType(QType, 0);
6405}
6406
6407/// getObjCInterfaceType - Return the unique reference to the type for the
6408/// specified ObjC interface decl. The list of protocols is optional.
6409QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
6410 ObjCInterfaceDecl *PrevDecl) const {
6411 if (Decl->TypeForDecl)
6412 return QualType(Decl->TypeForDecl, 0);
6413
6414 if (PrevDecl) {
6415 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
6416 Decl->TypeForDecl = PrevDecl->TypeForDecl;
6417 return QualType(PrevDecl->TypeForDecl, 0);
6418 }
6419
6420 // Prefer the definition, if there is one.
6421 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
6422 Decl = Def;
6423
6424 void *Mem = Allocate(Size: sizeof(ObjCInterfaceType), Align: alignof(ObjCInterfaceType));
6425 auto *T = new (Mem) ObjCInterfaceType(Decl);
6426 Decl->TypeForDecl = T;
6427 Types.push_back(Elt: T);
6428 return QualType(T, 0);
6429}
6430
6431/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
6432/// TypeOfExprType AST's (since expression's are never shared). For example,
6433/// multiple declarations that refer to "typeof(x)" all contain different
6434/// DeclRefExpr's. This doesn't effect the type checker, since it operates
6435/// on canonical type's (which are always unique).
6436QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
6437 TypeOfExprType *toe;
6438 if (tofExpr->isTypeDependent()) {
6439 llvm::FoldingSetNodeID ID;
6440 DependentTypeOfExprType::Profile(ID, Context: *this, E: tofExpr,
6441 IsUnqual: Kind == TypeOfKind::Unqualified);
6442
6443 void *InsertPos = nullptr;
6444 DependentTypeOfExprType *Canon =
6445 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
6446 if (Canon) {
6447 // We already have a "canonical" version of an identical, dependent
6448 // typeof(expr) type. Use that as our canonical type.
6449 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
6450 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
6451 } else {
6452 // Build a new, canonical typeof(expr) type.
6453 Canon = new (*this, alignof(DependentTypeOfExprType))
6454 DependentTypeOfExprType(*this, tofExpr, Kind);
6455 DependentTypeOfExprTypes.InsertNode(N: Canon, InsertPos);
6456 toe = Canon;
6457 }
6458 } else {
6459 QualType Canonical = getCanonicalType(T: tofExpr->getType());
6460 toe = new (*this, alignof(TypeOfExprType))
6461 TypeOfExprType(*this, tofExpr, Kind, Canonical);
6462 }
6463 Types.push_back(Elt: toe);
6464 return QualType(toe, 0);
6465}
6466
6467/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
6468/// TypeOfType nodes. The only motivation to unique these nodes would be
6469/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
6470/// an issue. This doesn't affect the type checker, since it operates
6471/// on canonical types (which are always unique).
6472QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
6473 QualType Canonical = getCanonicalType(T: tofType);
6474 auto *tot = new (*this, alignof(TypeOfType))
6475 TypeOfType(*this, tofType, Canonical, Kind);
6476 Types.push_back(Elt: tot);
6477 return QualType(tot, 0);
6478}
6479
6480/// getReferenceQualifiedType - Given an expr, will return the type for
6481/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
6482/// and class member access into account.
6483QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
6484 // C++11 [dcl.type.simple]p4:
6485 // [...]
6486 QualType T = E->getType();
6487 switch (E->getValueKind()) {
6488 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
6489 // type of e;
6490 case VK_XValue:
6491 return getRValueReferenceType(T);
6492 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
6493 // type of e;
6494 case VK_LValue:
6495 return getLValueReferenceType(T);
6496 // - otherwise, decltype(e) is the type of e.
6497 case VK_PRValue:
6498 return T;
6499 }
6500 llvm_unreachable("Unknown value kind");
6501}
6502
6503/// Unlike many "get<Type>" functions, we don't unique DecltypeType
6504/// nodes. This would never be helpful, since each such type has its own
6505/// expression, and would not give a significant memory saving, since there
6506/// is an Expr tree under each such type.
6507QualType ASTContext::getDecltypeType(Expr *E, QualType UnderlyingType) const {
6508 // C++11 [temp.type]p2:
6509 // If an expression e involves a template parameter, decltype(e) denotes a
6510 // unique dependent type. Two such decltype-specifiers refer to the same
6511 // type only if their expressions are equivalent (14.5.6.1).
6512 QualType CanonType;
6513 if (!E->isInstantiationDependent()) {
6514 CanonType = getCanonicalType(T: UnderlyingType);
6515 } else if (!UnderlyingType.isNull()) {
6516 CanonType = getDecltypeType(E, UnderlyingType: QualType());
6517 } else {
6518 llvm::FoldingSetNodeID ID;
6519 DependentDecltypeType::Profile(ID, Context: *this, E);
6520
6521 void *InsertPos = nullptr;
6522 if (DependentDecltypeType *Canon =
6523 DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos))
6524 return QualType(Canon, 0);
6525
6526 // Build a new, canonical decltype(expr) type.
6527 auto *DT =
6528 new (*this, alignof(DependentDecltypeType)) DependentDecltypeType(E);
6529 DependentDecltypeTypes.InsertNode(N: DT, InsertPos);
6530 Types.push_back(Elt: DT);
6531 return QualType(DT, 0);
6532 }
6533 auto *DT = new (*this, alignof(DecltypeType))
6534 DecltypeType(E, UnderlyingType, CanonType);
6535 Types.push_back(Elt: DT);
6536 return QualType(DT, 0);
6537}
6538
6539QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
6540 bool FullySubstituted,
6541 ArrayRef<QualType> Expansions,
6542 UnsignedOrNone Index) const {
6543 QualType Canonical;
6544 if (FullySubstituted && Index) {
6545 Canonical = getCanonicalType(T: Expansions[*Index]);
6546 } else {
6547 llvm::FoldingSetNodeID ID;
6548 PackIndexingType::Profile(ID, Context: *this, Pattern: Pattern.getCanonicalType(), E: IndexExpr,
6549 FullySubstituted, Expansions);
6550 void *InsertPos = nullptr;
6551 PackIndexingType *Canon =
6552 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
6553 if (!Canon) {
6554 void *Mem = Allocate(
6555 Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6556 Align: TypeAlignment);
6557 Canon =
6558 new (Mem) PackIndexingType(QualType(), Pattern.getCanonicalType(),
6559 IndexExpr, FullySubstituted, Expansions);
6560 DependentPackIndexingTypes.InsertNode(N: Canon, InsertPos);
6561 }
6562 Canonical = QualType(Canon, 0);
6563 }
6564
6565 void *Mem =
6566 Allocate(Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6567 Align: TypeAlignment);
6568 auto *T = new (Mem) PackIndexingType(Canonical, Pattern, IndexExpr,
6569 FullySubstituted, Expansions);
6570 Types.push_back(Elt: T);
6571 return QualType(T, 0);
6572}
6573
6574/// getUnaryTransformationType - We don't unique these, since the memory
6575/// savings are minimal and these are rare.
6576QualType
6577ASTContext::getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
6578 UnaryTransformType::UTTKind Kind) const {
6579
6580 llvm::FoldingSetNodeID ID;
6581 UnaryTransformType::Profile(ID, BaseType, UnderlyingType, UKind: Kind);
6582
6583 void *InsertPos = nullptr;
6584 if (UnaryTransformType *UT =
6585 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos))
6586 return QualType(UT, 0);
6587
6588 QualType CanonType;
6589 if (!BaseType->isDependentType()) {
6590 CanonType = UnderlyingType.getCanonicalType();
6591 } else {
6592 assert(UnderlyingType.isNull() || BaseType == UnderlyingType);
6593 UnderlyingType = QualType();
6594 if (QualType CanonBase = BaseType.getCanonicalType();
6595 BaseType != CanonBase) {
6596 CanonType = getUnaryTransformType(BaseType: CanonBase, UnderlyingType: QualType(), Kind);
6597 assert(CanonType.isCanonical());
6598
6599 // Find the insertion position again.
6600 [[maybe_unused]] UnaryTransformType *UT =
6601 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
6602 assert(!UT && "broken canonicalization");
6603 }
6604 }
6605
6606 auto *UT = new (*this, alignof(UnaryTransformType))
6607 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
6608 UnaryTransformTypes.InsertNode(N: UT, InsertPos);
6609 Types.push_back(Elt: UT);
6610 return QualType(UT, 0);
6611}
6612
6613QualType ASTContext::getAutoTypeInternal(
6614 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent,
6615 bool IsPack, ConceptDecl *TypeConstraintConcept,
6616 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const {
6617 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
6618 !TypeConstraintConcept && !IsDependent)
6619 return getAutoDeductType();
6620
6621 // Look in the folding set for an existing type.
6622 llvm::FoldingSetNodeID ID;
6623 bool IsDeducedDependent =
6624 !DeducedType.isNull() && DeducedType->isDependentType();
6625 AutoType::Profile(ID, Context: *this, Deduced: DeducedType, Keyword,
6626 IsDependent: IsDependent || IsDeducedDependent, CD: TypeConstraintConcept,
6627 Arguments: TypeConstraintArgs);
6628 if (auto const AT_iter = AutoTypes.find(Val: ID); AT_iter != AutoTypes.end())
6629 return QualType(AT_iter->getSecond(), 0);
6630
6631 QualType Canon;
6632 if (!IsCanon) {
6633 if (!DeducedType.isNull()) {
6634 Canon = DeducedType.getCanonicalType();
6635 } else if (TypeConstraintConcept) {
6636 bool AnyNonCanonArgs = false;
6637 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl();
6638 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
6639 C: *this, Args: TypeConstraintArgs, AnyNonCanonArgs);
6640 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
6641 Canon = getAutoTypeInternal(DeducedType: QualType(), Keyword, IsDependent, IsPack,
6642 TypeConstraintConcept: CanonicalConcept, TypeConstraintArgs: CanonicalConceptArgs,
6643 /*IsCanon=*/true);
6644 }
6645 }
6646 }
6647
6648 void *Mem = Allocate(Size: sizeof(AutoType) +
6649 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
6650 Align: alignof(AutoType));
6651 auto *AT = new (Mem) AutoType(
6652 DeducedType, Keyword,
6653 (IsDependent ? TypeDependence::DependentInstantiation
6654 : TypeDependence::None) |
6655 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
6656 Canon, TypeConstraintConcept, TypeConstraintArgs);
6657#ifndef NDEBUG
6658 llvm::FoldingSetNodeID InsertedID;
6659 AT->Profile(InsertedID, *this);
6660 assert(InsertedID == ID && "ID does not match");
6661#endif
6662 Types.push_back(Elt: AT);
6663 AutoTypes.try_emplace(Key: ID, Args&: AT);
6664 return QualType(AT, 0);
6665}
6666
6667/// getAutoType - Return the uniqued reference to the 'auto' type which has been
6668/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
6669/// canonical deduced-but-dependent 'auto' type.
6670QualType
6671ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
6672 bool IsDependent, bool IsPack,
6673 ConceptDecl *TypeConstraintConcept,
6674 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
6675 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
6676 assert((!IsDependent || DeducedType.isNull()) &&
6677 "A dependent auto should be undeduced");
6678 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack,
6679 TypeConstraintConcept, TypeConstraintArgs);
6680}
6681
6682QualType ASTContext::getUnconstrainedType(QualType T) const {
6683 QualType CanonT = T.getNonPackExpansionType().getCanonicalType();
6684
6685 // Remove a type-constraint from a top-level auto or decltype(auto).
6686 if (auto *AT = CanonT->getAs<AutoType>()) {
6687 if (!AT->isConstrained())
6688 return T;
6689 return getQualifiedType(T: getAutoType(DeducedType: QualType(), Keyword: AT->getKeyword(),
6690 IsDependent: AT->isDependentType(),
6691 IsPack: AT->containsUnexpandedParameterPack()),
6692 Qs: T.getQualifiers());
6693 }
6694
6695 // FIXME: We only support constrained auto at the top level in the type of a
6696 // non-type template parameter at the moment. Once we lift that restriction,
6697 // we'll need to recursively build types containing auto here.
6698 assert(!CanonT->getContainedAutoType() ||
6699 !CanonT->getContainedAutoType()->isConstrained());
6700 return T;
6701}
6702
6703QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
6704 TemplateName Template, QualType DeducedType, bool IsDependent,
6705 QualType Canon) const {
6706 // Look in the folding set for an existing type.
6707 void *InsertPos = nullptr;
6708 llvm::FoldingSetNodeID ID;
6709 DeducedTemplateSpecializationType::Profile(ID, Template, Deduced: DeducedType,
6710 IsDependent);
6711 if (DeducedTemplateSpecializationType *DTST =
6712 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6713 return QualType(DTST, 0);
6714
6715 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
6716 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent,
6717 Canon);
6718
6719#ifndef NDEBUG
6720 llvm::FoldingSetNodeID TempID;
6721 DTST->Profile(TempID);
6722 assert(ID == TempID && "ID does not match");
6723#endif
6724 Types.push_back(Elt: DTST);
6725 DeducedTemplateSpecializationTypes.InsertNode(N: DTST, InsertPos);
6726 return QualType(DTST, 0);
6727}
6728
6729/// Return the uniqued reference to the deduced template specialization type
6730/// which has been deduced to the given type, or to the canonical undeduced
6731/// such type, or the canonical deduced-but-dependent such type.
6732QualType ASTContext::getDeducedTemplateSpecializationType(
6733 TemplateName Template, QualType DeducedType, bool IsDependent) const {
6734 QualType Canon = DeducedType.isNull()
6735 ? getDeducedTemplateSpecializationTypeInternal(
6736 Template: getCanonicalTemplateName(Name: Template), DeducedType: QualType(),
6737 IsDependent, Canon: QualType())
6738 : DeducedType.getCanonicalType();
6739 return getDeducedTemplateSpecializationTypeInternal(Template, DeducedType,
6740 IsDependent, Canon);
6741}
6742
6743/// getAtomicType - Return the uniqued reference to the atomic type for
6744/// the given value type.
6745QualType ASTContext::getAtomicType(QualType T) const {
6746 // Unique pointers, to guarantee there is only one pointer of a particular
6747 // structure.
6748 llvm::FoldingSetNodeID ID;
6749 AtomicType::Profile(ID, T);
6750
6751 void *InsertPos = nullptr;
6752 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
6753 return QualType(AT, 0);
6754
6755 // If the atomic value type isn't canonical, this won't be a canonical type
6756 // either, so fill in the canonical type field.
6757 QualType Canonical;
6758 if (!T.isCanonical()) {
6759 Canonical = getAtomicType(T: getCanonicalType(T));
6760
6761 // Get the new insert position for the node we care about.
6762 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
6763 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
6764 }
6765 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
6766 Types.push_back(Elt: New);
6767 AtomicTypes.InsertNode(N: New, InsertPos);
6768 return QualType(New, 0);
6769}
6770
6771/// getAutoDeductType - Get type pattern for deducing against 'auto'.
6772QualType ASTContext::getAutoDeductType() const {
6773 if (AutoDeductTy.isNull())
6774 AutoDeductTy = QualType(new (*this, alignof(AutoType))
6775 AutoType(QualType(), AutoTypeKeyword::Auto,
6776 TypeDependence::None, QualType(),
6777 /*concept*/ nullptr, /*args*/ {}),
6778 0);
6779 return AutoDeductTy;
6780}
6781
6782/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
6783QualType ASTContext::getAutoRRefDeductType() const {
6784 if (AutoRRefDeductTy.isNull())
6785 AutoRRefDeductTy = getRValueReferenceType(T: getAutoDeductType());
6786 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
6787 return AutoRRefDeductTy;
6788}
6789
6790/// getTagDeclType - Return the unique reference to the type for the
6791/// specified TagDecl (struct/union/class/enum) decl.
6792QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
6793 assert(Decl);
6794 // FIXME: What is the design on getTagDeclType when it requires casting
6795 // away const? mutable?
6796 return getTypeDeclType(Decl: const_cast<TagDecl*>(Decl));
6797}
6798
6799/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
6800/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
6801/// needs to agree with the definition in <stddef.h>.
6802CanQualType ASTContext::getSizeType() const {
6803 return getFromTargetType(Type: Target->getSizeType());
6804}
6805
6806/// Return the unique signed counterpart of the integer type
6807/// corresponding to size_t.
6808CanQualType ASTContext::getSignedSizeType() const {
6809 return getFromTargetType(Type: Target->getSignedSizeType());
6810}
6811
6812/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
6813CanQualType ASTContext::getIntMaxType() const {
6814 return getFromTargetType(Type: Target->getIntMaxType());
6815}
6816
6817/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
6818CanQualType ASTContext::getUIntMaxType() const {
6819 return getFromTargetType(Type: Target->getUIntMaxType());
6820}
6821
6822/// getSignedWCharType - Return the type of "signed wchar_t".
6823/// Used when in C++, as a GCC extension.
6824QualType ASTContext::getSignedWCharType() const {
6825 // FIXME: derive from "Target" ?
6826 return WCharTy;
6827}
6828
6829/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
6830/// Used when in C++, as a GCC extension.
6831QualType ASTContext::getUnsignedWCharType() const {
6832 // FIXME: derive from "Target" ?
6833 return UnsignedIntTy;
6834}
6835
6836QualType ASTContext::getIntPtrType() const {
6837 return getFromTargetType(Type: Target->getIntPtrType());
6838}
6839
6840QualType ASTContext::getUIntPtrType() const {
6841 return getCorrespondingUnsignedType(T: getIntPtrType());
6842}
6843
6844/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
6845/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
6846QualType ASTContext::getPointerDiffType() const {
6847 return getFromTargetType(Type: Target->getPtrDiffType(AddrSpace: LangAS::Default));
6848}
6849
6850/// Return the unique unsigned counterpart of "ptrdiff_t"
6851/// integer type. The standard (C11 7.21.6.1p7) refers to this type
6852/// in the definition of %tu format specifier.
6853QualType ASTContext::getUnsignedPointerDiffType() const {
6854 return getFromTargetType(Type: Target->getUnsignedPtrDiffType(AddrSpace: LangAS::Default));
6855}
6856
6857/// Return the unique type for "pid_t" defined in
6858/// <sys/types.h>. We need this to compute the correct type for vfork().
6859QualType ASTContext::getProcessIDType() const {
6860 return getFromTargetType(Type: Target->getProcessIDType());
6861}
6862
6863//===----------------------------------------------------------------------===//
6864// Type Operators
6865//===----------------------------------------------------------------------===//
6866
6867CanQualType ASTContext::getCanonicalParamType(QualType T) const {
6868 // Push qualifiers into arrays, and then discard any remaining
6869 // qualifiers.
6870 T = getCanonicalType(T);
6871 T = getVariableArrayDecayedType(type: T);
6872 const Type *Ty = T.getTypePtr();
6873 QualType Result;
6874 if (getLangOpts().HLSL && isa<ConstantArrayType>(Val: Ty)) {
6875 Result = getArrayParameterType(Ty: QualType(Ty, 0));
6876 } else if (isa<ArrayType>(Val: Ty)) {
6877 Result = getArrayDecayedType(T: QualType(Ty,0));
6878 } else if (isa<FunctionType>(Val: Ty)) {
6879 Result = getPointerType(T: QualType(Ty, 0));
6880 } else {
6881 Result = QualType(Ty, 0);
6882 }
6883
6884 return CanQualType::CreateUnsafe(Other: Result);
6885}
6886
6887QualType ASTContext::getUnqualifiedArrayType(QualType type,
6888 Qualifiers &quals) const {
6889 SplitQualType splitType = type.getSplitUnqualifiedType();
6890
6891 // FIXME: getSplitUnqualifiedType() actually walks all the way to
6892 // the unqualified desugared type and then drops it on the floor.
6893 // We then have to strip that sugar back off with
6894 // getUnqualifiedDesugaredType(), which is silly.
6895 const auto *AT =
6896 dyn_cast<ArrayType>(Val: splitType.Ty->getUnqualifiedDesugaredType());
6897
6898 // If we don't have an array, just use the results in splitType.
6899 if (!AT) {
6900 quals = splitType.Quals;
6901 return QualType(splitType.Ty, 0);
6902 }
6903
6904 // Otherwise, recurse on the array's element type.
6905 QualType elementType = AT->getElementType();
6906 QualType unqualElementType = getUnqualifiedArrayType(type: elementType, quals);
6907
6908 // If that didn't change the element type, AT has no qualifiers, so we
6909 // can just use the results in splitType.
6910 if (elementType == unqualElementType) {
6911 assert(quals.empty()); // from the recursive call
6912 quals = splitType.Quals;
6913 return QualType(splitType.Ty, 0);
6914 }
6915
6916 // Otherwise, add in the qualifiers from the outermost type, then
6917 // build the type back up.
6918 quals.addConsistentQualifiers(qs: splitType.Quals);
6919
6920 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
6921 return getConstantArrayType(EltTy: unqualElementType, ArySizeIn: CAT->getSize(),
6922 SizeExpr: CAT->getSizeExpr(), ASM: CAT->getSizeModifier(), IndexTypeQuals: 0);
6923 }
6924
6925 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: AT)) {
6926 return getIncompleteArrayType(elementType: unqualElementType, ASM: IAT->getSizeModifier(), elementTypeQuals: 0);
6927 }
6928
6929 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) {
6930 return getVariableArrayType(EltTy: unqualElementType, NumElts: VAT->getSizeExpr(),
6931 ASM: VAT->getSizeModifier(),
6932 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers());
6933 }
6934
6935 const auto *DSAT = cast<DependentSizedArrayType>(Val: AT);
6936 return getDependentSizedArrayType(elementType: unqualElementType, numElements: DSAT->getSizeExpr(),
6937 ASM: DSAT->getSizeModifier(), elementTypeQuals: 0);
6938}
6939
6940/// Attempt to unwrap two types that may both be array types with the same bound
6941/// (or both be array types of unknown bound) for the purpose of comparing the
6942/// cv-decomposition of two types per C++ [conv.qual].
6943///
6944/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6945/// C++20 [conv.qual], if permitted by the current language mode.
6946void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
6947 bool AllowPiMismatch) const {
6948 while (true) {
6949 auto *AT1 = getAsArrayType(T: T1);
6950 if (!AT1)
6951 return;
6952
6953 auto *AT2 = getAsArrayType(T: T2);
6954 if (!AT2)
6955 return;
6956
6957 // If we don't have two array types with the same constant bound nor two
6958 // incomplete array types, we've unwrapped everything we can.
6959 // C++20 also permits one type to be a constant array type and the other
6960 // to be an incomplete array type.
6961 // FIXME: Consider also unwrapping array of unknown bound and VLA.
6962 if (auto *CAT1 = dyn_cast<ConstantArrayType>(Val: AT1)) {
6963 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: AT2);
6964 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
6965 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
6966 isa<IncompleteArrayType>(Val: AT2))))
6967 return;
6968 } else if (isa<IncompleteArrayType>(Val: AT1)) {
6969 if (!(isa<IncompleteArrayType>(Val: AT2) ||
6970 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
6971 isa<ConstantArrayType>(Val: AT2))))
6972 return;
6973 } else {
6974 return;
6975 }
6976
6977 T1 = AT1->getElementType();
6978 T2 = AT2->getElementType();
6979 }
6980}
6981
6982/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
6983///
6984/// If T1 and T2 are both pointer types of the same kind, or both array types
6985/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
6986/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
6987///
6988/// This function will typically be called in a loop that successively
6989/// "unwraps" pointer and pointer-to-member types to compare them at each
6990/// level.
6991///
6992/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6993/// C++20 [conv.qual], if permitted by the current language mode.
6994///
6995/// \return \c true if a pointer type was unwrapped, \c false if we reached a
6996/// pair of types that can't be unwrapped further.
6997bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
6998 bool AllowPiMismatch) const {
6999 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
7000
7001 const auto *T1PtrType = T1->getAs<PointerType>();
7002 const auto *T2PtrType = T2->getAs<PointerType>();
7003 if (T1PtrType && T2PtrType) {
7004 T1 = T1PtrType->getPointeeType();
7005 T2 = T2PtrType->getPointeeType();
7006 return true;
7007 }
7008
7009 if (const auto *T1MPType = T1->getAs<MemberPointerType>(),
7010 *T2MPType = T2->getAs<MemberPointerType>();
7011 T1MPType && T2MPType) {
7012 if (auto *RD1 = T1MPType->getMostRecentCXXRecordDecl(),
7013 *RD2 = T2MPType->getMostRecentCXXRecordDecl();
7014 RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
7015 return false;
7016 if (getCanonicalNestedNameSpecifier(NNS: T1MPType->getQualifier()) !=
7017 getCanonicalNestedNameSpecifier(NNS: T2MPType->getQualifier()))
7018 return false;
7019 T1 = T1MPType->getPointeeType();
7020 T2 = T2MPType->getPointeeType();
7021 return true;
7022 }
7023
7024 if (getLangOpts().ObjC) {
7025 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
7026 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
7027 if (T1OPType && T2OPType) {
7028 T1 = T1OPType->getPointeeType();
7029 T2 = T2OPType->getPointeeType();
7030 return true;
7031 }
7032 }
7033
7034 // FIXME: Block pointers, too?
7035
7036 return false;
7037}
7038
7039bool ASTContext::hasSimilarType(QualType T1, QualType T2) const {
7040 while (true) {
7041 Qualifiers Quals;
7042 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals);
7043 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals);
7044 if (hasSameType(T1, T2))
7045 return true;
7046 if (!UnwrapSimilarTypes(T1, T2))
7047 return false;
7048 }
7049}
7050
7051bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
7052 while (true) {
7053 Qualifiers Quals1, Quals2;
7054 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals1);
7055 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals2);
7056
7057 Quals1.removeCVRQualifiers();
7058 Quals2.removeCVRQualifiers();
7059 if (Quals1 != Quals2)
7060 return false;
7061
7062 if (hasSameType(T1, T2))
7063 return true;
7064
7065 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
7066 return false;
7067 }
7068}
7069
7070DeclarationNameInfo
7071ASTContext::getNameForTemplate(TemplateName Name,
7072 SourceLocation NameLoc) const {
7073 switch (Name.getKind()) {
7074 case TemplateName::QualifiedTemplate:
7075 case TemplateName::Template:
7076 // DNInfo work in progress: CHECKME: what about DNLoc?
7077 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
7078 NameLoc);
7079
7080 case TemplateName::OverloadedTemplate: {
7081 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
7082 // DNInfo work in progress: CHECKME: what about DNLoc?
7083 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
7084 }
7085
7086 case TemplateName::AssumedTemplate: {
7087 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
7088 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
7089 }
7090
7091 case TemplateName::DependentTemplate: {
7092 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7093 IdentifierOrOverloadedOperator TN = DTN->getName();
7094 DeclarationName DName;
7095 if (const IdentifierInfo *II = TN.getIdentifier()) {
7096 DName = DeclarationNames.getIdentifier(ID: II);
7097 return DeclarationNameInfo(DName, NameLoc);
7098 } else {
7099 DName = DeclarationNames.getCXXOperatorName(Op: TN.getOperator());
7100 // DNInfo work in progress: FIXME: source locations?
7101 DeclarationNameLoc DNLoc =
7102 DeclarationNameLoc::makeCXXOperatorNameLoc(Range: SourceRange());
7103 return DeclarationNameInfo(DName, NameLoc, DNLoc);
7104 }
7105 }
7106
7107 case TemplateName::SubstTemplateTemplateParm: {
7108 SubstTemplateTemplateParmStorage *subst
7109 = Name.getAsSubstTemplateTemplateParm();
7110 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
7111 NameLoc);
7112 }
7113
7114 case TemplateName::SubstTemplateTemplateParmPack: {
7115 SubstTemplateTemplateParmPackStorage *subst
7116 = Name.getAsSubstTemplateTemplateParmPack();
7117 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
7118 NameLoc);
7119 }
7120 case TemplateName::UsingTemplate:
7121 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
7122 NameLoc);
7123 case TemplateName::DeducedTemplate: {
7124 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7125 return getNameForTemplate(Name: DTS->getUnderlying(), NameLoc);
7126 }
7127 }
7128
7129 llvm_unreachable("bad template name kind!");
7130}
7131
7132static const TemplateArgument *
7133getDefaultTemplateArgumentOrNone(const NamedDecl *P) {
7134 auto handleParam = [](auto *TP) -> const TemplateArgument * {
7135 if (!TP->hasDefaultArgument())
7136 return nullptr;
7137 return &TP->getDefaultArgument().getArgument();
7138 };
7139 switch (P->getKind()) {
7140 case NamedDecl::TemplateTypeParm:
7141 return handleParam(cast<TemplateTypeParmDecl>(Val: P));
7142 case NamedDecl::NonTypeTemplateParm:
7143 return handleParam(cast<NonTypeTemplateParmDecl>(Val: P));
7144 case NamedDecl::TemplateTemplateParm:
7145 return handleParam(cast<TemplateTemplateParmDecl>(Val: P));
7146 default:
7147 llvm_unreachable("Unexpected template parameter kind");
7148 }
7149}
7150
7151TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
7152 bool IgnoreDeduced) const {
7153 while (std::optional<TemplateName> UnderlyingOrNone =
7154 Name.desugar(IgnoreDeduced))
7155 Name = *UnderlyingOrNone;
7156
7157 switch (Name.getKind()) {
7158 case TemplateName::Template: {
7159 TemplateDecl *Template = Name.getAsTemplateDecl();
7160 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Val: Template))
7161 Template = getCanonicalTemplateTemplateParmDecl(TTP);
7162
7163 // The canonical template name is the canonical template declaration.
7164 return TemplateName(cast<TemplateDecl>(Val: Template->getCanonicalDecl()));
7165 }
7166
7167 case TemplateName::OverloadedTemplate:
7168 case TemplateName::AssumedTemplate:
7169 llvm_unreachable("cannot canonicalize unresolved template");
7170
7171 case TemplateName::DependentTemplate: {
7172 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7173 assert(DTN && "Non-dependent template names must refer to template decls.");
7174 NestedNameSpecifier *Qualifier = DTN->getQualifier();
7175 NestedNameSpecifier *CanonQualifier =
7176 getCanonicalNestedNameSpecifier(NNS: Qualifier);
7177 if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
7178 return getDependentTemplateName(Name: {CanonQualifier, DTN->getName(),
7179 /*HasTemplateKeyword=*/true});
7180 return Name;
7181 }
7182
7183 case TemplateName::SubstTemplateTemplateParmPack: {
7184 SubstTemplateTemplateParmPackStorage *subst =
7185 Name.getAsSubstTemplateTemplateParmPack();
7186 TemplateArgument canonArgPack =
7187 getCanonicalTemplateArgument(Arg: subst->getArgumentPack());
7188 return getSubstTemplateTemplateParmPack(
7189 ArgPack: canonArgPack, AssociatedDecl: subst->getAssociatedDecl()->getCanonicalDecl(),
7190 Index: subst->getIndex(), Final: subst->getFinal());
7191 }
7192 case TemplateName::DeducedTemplate: {
7193 assert(IgnoreDeduced == false);
7194 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7195 DefaultArguments DefArgs = DTS->getDefaultArguments();
7196 TemplateName Underlying = DTS->getUnderlying();
7197
7198 TemplateName CanonUnderlying =
7199 getCanonicalTemplateName(Name: Underlying, /*IgnoreDeduced=*/true);
7200 bool NonCanonical = CanonUnderlying != Underlying;
7201 auto CanonArgs =
7202 getCanonicalTemplateArguments(C: *this, Args: DefArgs.Args, AnyNonCanonArgs&: NonCanonical);
7203
7204 ArrayRef<NamedDecl *> Params =
7205 CanonUnderlying.getAsTemplateDecl()->getTemplateParameters()->asArray();
7206 assert(CanonArgs.size() <= Params.size());
7207 // A deduced template name which deduces the same default arguments already
7208 // declared in the underlying template is the same template as the
7209 // underlying template. We need need to note any arguments which differ from
7210 // the corresponding declaration. If any argument differs, we must build a
7211 // deduced template name.
7212 for (int I = CanonArgs.size() - 1; I >= 0; --I) {
7213 const TemplateArgument *A = getDefaultTemplateArgumentOrNone(P: Params[I]);
7214 if (!A)
7215 break;
7216 auto CanonParamDefArg = getCanonicalTemplateArgument(Arg: *A);
7217 TemplateArgument &CanonDefArg = CanonArgs[I];
7218 if (CanonDefArg.structurallyEquals(Other: CanonParamDefArg))
7219 continue;
7220 // Keep popping from the back any deault arguments which are the same.
7221 if (I == int(CanonArgs.size() - 1))
7222 CanonArgs.pop_back();
7223 NonCanonical = true;
7224 }
7225 return NonCanonical ? getDeducedTemplateName(
7226 Underlying: CanonUnderlying,
7227 /*DefaultArgs=*/{.StartPos: DefArgs.StartPos, .Args: CanonArgs})
7228 : Name;
7229 }
7230 case TemplateName::UsingTemplate:
7231 case TemplateName::QualifiedTemplate:
7232 case TemplateName::SubstTemplateTemplateParm:
7233 llvm_unreachable("always sugar node");
7234 }
7235
7236 llvm_unreachable("bad template name!");
7237}
7238
7239bool ASTContext::hasSameTemplateName(const TemplateName &X,
7240 const TemplateName &Y,
7241 bool IgnoreDeduced) const {
7242 return getCanonicalTemplateName(Name: X, IgnoreDeduced) ==
7243 getCanonicalTemplateName(Name: Y, IgnoreDeduced);
7244}
7245
7246bool ASTContext::isSameAssociatedConstraint(
7247 const AssociatedConstraint &ACX, const AssociatedConstraint &ACY) const {
7248 if (ACX.ArgPackSubstIndex != ACY.ArgPackSubstIndex)
7249 return false;
7250 if (!isSameConstraintExpr(XCE: ACX.ConstraintExpr, YCE: ACY.ConstraintExpr))
7251 return false;
7252 return true;
7253}
7254
7255bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
7256 if (!XCE != !YCE)
7257 return false;
7258
7259 if (!XCE)
7260 return true;
7261
7262 llvm::FoldingSetNodeID XCEID, YCEID;
7263 XCE->Profile(ID&: XCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7264 YCE->Profile(ID&: YCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7265 return XCEID == YCEID;
7266}
7267
7268bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
7269 const TypeConstraint *YTC) const {
7270 if (!XTC != !YTC)
7271 return false;
7272
7273 if (!XTC)
7274 return true;
7275
7276 auto *NCX = XTC->getNamedConcept();
7277 auto *NCY = YTC->getNamedConcept();
7278 if (!NCX || !NCY || !isSameEntity(X: NCX, Y: NCY))
7279 return false;
7280 if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
7281 YTC->getConceptReference()->hasExplicitTemplateArgs())
7282 return false;
7283 if (XTC->getConceptReference()->hasExplicitTemplateArgs())
7284 if (XTC->getConceptReference()
7285 ->getTemplateArgsAsWritten()
7286 ->NumTemplateArgs !=
7287 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
7288 return false;
7289
7290 // Compare slowly by profiling.
7291 //
7292 // We couldn't compare the profiling result for the template
7293 // args here. Consider the following example in different modules:
7294 //
7295 // template <__integer_like _Tp, C<_Tp> Sentinel>
7296 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
7297 // return __t;
7298 // }
7299 //
7300 // When we compare the profiling result for `C<_Tp>` in different
7301 // modules, it will compare the type of `_Tp` in different modules.
7302 // However, the type of `_Tp` in different modules refer to different
7303 // types here naturally. So we couldn't compare the profiling result
7304 // for the template args directly.
7305 return isSameConstraintExpr(XCE: XTC->getImmediatelyDeclaredConstraint(),
7306 YCE: YTC->getImmediatelyDeclaredConstraint());
7307}
7308
7309bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
7310 const NamedDecl *Y) const {
7311 if (X->getKind() != Y->getKind())
7312 return false;
7313
7314 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7315 auto *TY = cast<TemplateTypeParmDecl>(Val: Y);
7316 if (TX->isParameterPack() != TY->isParameterPack())
7317 return false;
7318 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
7319 return false;
7320 return isSameTypeConstraint(XTC: TX->getTypeConstraint(),
7321 YTC: TY->getTypeConstraint());
7322 }
7323
7324 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7325 auto *TY = cast<NonTypeTemplateParmDecl>(Val: Y);
7326 return TX->isParameterPack() == TY->isParameterPack() &&
7327 TX->getASTContext().hasSameType(T1: TX->getType(), T2: TY->getType()) &&
7328 isSameConstraintExpr(XCE: TX->getPlaceholderTypeConstraint(),
7329 YCE: TY->getPlaceholderTypeConstraint());
7330 }
7331
7332 auto *TX = cast<TemplateTemplateParmDecl>(Val: X);
7333 auto *TY = cast<TemplateTemplateParmDecl>(Val: Y);
7334 return TX->isParameterPack() == TY->isParameterPack() &&
7335 isSameTemplateParameterList(X: TX->getTemplateParameters(),
7336 Y: TY->getTemplateParameters());
7337}
7338
7339bool ASTContext::isSameTemplateParameterList(
7340 const TemplateParameterList *X, const TemplateParameterList *Y) const {
7341 if (X->size() != Y->size())
7342 return false;
7343
7344 for (unsigned I = 0, N = X->size(); I != N; ++I)
7345 if (!isSameTemplateParameter(X: X->getParam(Idx: I), Y: Y->getParam(Idx: I)))
7346 return false;
7347
7348 return isSameConstraintExpr(XCE: X->getRequiresClause(), YCE: Y->getRequiresClause());
7349}
7350
7351bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
7352 const NamedDecl *Y) const {
7353 // If the type parameter isn't the same already, we don't need to check the
7354 // default argument further.
7355 if (!isSameTemplateParameter(X, Y))
7356 return false;
7357
7358 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7359 auto *TTPY = cast<TemplateTypeParmDecl>(Val: Y);
7360 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7361 return false;
7362
7363 return hasSameType(T1: TTPX->getDefaultArgument().getArgument().getAsType(),
7364 T2: TTPY->getDefaultArgument().getArgument().getAsType());
7365 }
7366
7367 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7368 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Val: Y);
7369 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
7370 return false;
7371
7372 Expr *DefaultArgumentX =
7373 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7374 Expr *DefaultArgumentY =
7375 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7376 llvm::FoldingSetNodeID XID, YID;
7377 DefaultArgumentX->Profile(ID&: XID, Context: *this, /*Canonical=*/true);
7378 DefaultArgumentY->Profile(ID&: YID, Context: *this, /*Canonical=*/true);
7379 return XID == YID;
7380 }
7381
7382 auto *TTPX = cast<TemplateTemplateParmDecl>(Val: X);
7383 auto *TTPY = cast<TemplateTemplateParmDecl>(Val: Y);
7384
7385 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7386 return false;
7387
7388 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
7389 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
7390 return hasSameTemplateName(X: TAX.getAsTemplate(), Y: TAY.getAsTemplate());
7391}
7392
7393static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
7394 if (auto *NS = X->getAsNamespace())
7395 return NS;
7396 if (auto *NAS = X->getAsNamespaceAlias())
7397 return NAS->getNamespace();
7398 return nullptr;
7399}
7400
7401static bool isSameQualifier(const NestedNameSpecifier *X,
7402 const NestedNameSpecifier *Y) {
7403 if (auto *NSX = getNamespace(X)) {
7404 auto *NSY = getNamespace(X: Y);
7405 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
7406 return false;
7407 } else if (X->getKind() != Y->getKind())
7408 return false;
7409
7410 // FIXME: For namespaces and types, we're permitted to check that the entity
7411 // is named via the same tokens. We should probably do so.
7412 switch (X->getKind()) {
7413 case NestedNameSpecifier::Identifier:
7414 if (X->getAsIdentifier() != Y->getAsIdentifier())
7415 return false;
7416 break;
7417 case NestedNameSpecifier::Namespace:
7418 case NestedNameSpecifier::NamespaceAlias:
7419 // We've already checked that we named the same namespace.
7420 break;
7421 case NestedNameSpecifier::TypeSpec:
7422 if (X->getAsType()->getCanonicalTypeInternal() !=
7423 Y->getAsType()->getCanonicalTypeInternal())
7424 return false;
7425 break;
7426 case NestedNameSpecifier::Global:
7427 case NestedNameSpecifier::Super:
7428 return true;
7429 }
7430
7431 // Recurse into earlier portion of NNS, if any.
7432 auto *PX = X->getPrefix();
7433 auto *PY = Y->getPrefix();
7434 if (PX && PY)
7435 return isSameQualifier(X: PX, Y: PY);
7436 return !PX && !PY;
7437}
7438
7439static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
7440 if (!A->getASTContext().getLangOpts().CUDA)
7441 return true; // Target attributes are overloadable in CUDA compilation only.
7442 if (A->hasAttr<CUDADeviceAttr>() != B->hasAttr<CUDADeviceAttr>())
7443 return false;
7444 if (A->hasAttr<CUDADeviceAttr>() && B->hasAttr<CUDADeviceAttr>())
7445 return A->hasAttr<CUDAHostAttr>() == B->hasAttr<CUDAHostAttr>();
7446 return true; // unattributed and __host__ functions are the same.
7447}
7448
7449/// Determine whether the attributes we can overload on are identical for A and
7450/// B. Will ignore any overloadable attrs represented in the type of A and B.
7451static bool hasSameOverloadableAttrs(const FunctionDecl *A,
7452 const FunctionDecl *B) {
7453 // Note that pass_object_size attributes are represented in the function's
7454 // ExtParameterInfo, so we don't need to check them here.
7455
7456 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
7457 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
7458 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
7459
7460 for (auto Pair : zip_longest(t&: AEnableIfAttrs, u&: BEnableIfAttrs)) {
7461 std::optional<EnableIfAttr *> Cand1A = std::get<0>(t&: Pair);
7462 std::optional<EnableIfAttr *> Cand2A = std::get<1>(t&: Pair);
7463
7464 // Return false if the number of enable_if attributes is different.
7465 if (!Cand1A || !Cand2A)
7466 return false;
7467
7468 Cand1ID.clear();
7469 Cand2ID.clear();
7470
7471 (*Cand1A)->getCond()->Profile(ID&: Cand1ID, Context: A->getASTContext(), Canonical: true);
7472 (*Cand2A)->getCond()->Profile(ID&: Cand2ID, Context: B->getASTContext(), Canonical: true);
7473
7474 // Return false if any of the enable_if expressions of A and B are
7475 // different.
7476 if (Cand1ID != Cand2ID)
7477 return false;
7478 }
7479 return hasSameCudaAttrs(A, B);
7480}
7481
7482bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
7483 // Caution: this function is called by the AST reader during deserialization,
7484 // so it cannot rely on AST invariants being met. Non-trivial accessors
7485 // should be avoided, along with any traversal of redeclaration chains.
7486
7487 if (X == Y)
7488 return true;
7489
7490 if (X->getDeclName() != Y->getDeclName())
7491 return false;
7492
7493 // Must be in the same context.
7494 //
7495 // Note that we can't use DeclContext::Equals here, because the DeclContexts
7496 // could be two different declarations of the same function. (We will fix the
7497 // semantic DC to refer to the primary definition after merging.)
7498 if (!declaresSameEntity(D1: cast<Decl>(Val: X->getDeclContext()->getRedeclContext()),
7499 D2: cast<Decl>(Val: Y->getDeclContext()->getRedeclContext())))
7500 return false;
7501
7502 // If either X or Y are local to the owning module, they are only possible to
7503 // be the same entity if they are in the same module.
7504 if (X->isModuleLocal() || Y->isModuleLocal())
7505 if (!isInSameModule(M1: X->getOwningModule(), M2: Y->getOwningModule()))
7506 return false;
7507
7508 // Two typedefs refer to the same entity if they have the same underlying
7509 // type.
7510 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(Val: X))
7511 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Val: Y))
7512 return hasSameType(T1: TypedefX->getUnderlyingType(),
7513 T2: TypedefY->getUnderlyingType());
7514
7515 // Must have the same kind.
7516 if (X->getKind() != Y->getKind())
7517 return false;
7518
7519 // Objective-C classes and protocols with the same name always match.
7520 if (isa<ObjCInterfaceDecl>(Val: X) || isa<ObjCProtocolDecl>(Val: X))
7521 return true;
7522
7523 if (isa<ClassTemplateSpecializationDecl>(Val: X)) {
7524 // No need to handle these here: we merge them when adding them to the
7525 // template.
7526 return false;
7527 }
7528
7529 // Compatible tags match.
7530 if (const auto *TagX = dyn_cast<TagDecl>(Val: X)) {
7531 const auto *TagY = cast<TagDecl>(Val: Y);
7532 return (TagX->getTagKind() == TagY->getTagKind()) ||
7533 ((TagX->getTagKind() == TagTypeKind::Struct ||
7534 TagX->getTagKind() == TagTypeKind::Class ||
7535 TagX->getTagKind() == TagTypeKind::Interface) &&
7536 (TagY->getTagKind() == TagTypeKind::Struct ||
7537 TagY->getTagKind() == TagTypeKind::Class ||
7538 TagY->getTagKind() == TagTypeKind::Interface));
7539 }
7540
7541 // Functions with the same type and linkage match.
7542 // FIXME: This needs to cope with merging of prototyped/non-prototyped
7543 // functions, etc.
7544 if (const auto *FuncX = dyn_cast<FunctionDecl>(Val: X)) {
7545 const auto *FuncY = cast<FunctionDecl>(Val: Y);
7546 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(Val: X)) {
7547 const auto *CtorY = cast<CXXConstructorDecl>(Val: Y);
7548 if (CtorX->getInheritedConstructor() &&
7549 !isSameEntity(X: CtorX->getInheritedConstructor().getConstructor(),
7550 Y: CtorY->getInheritedConstructor().getConstructor()))
7551 return false;
7552 }
7553
7554 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
7555 return false;
7556
7557 // Multiversioned functions with different feature strings are represented
7558 // as separate declarations.
7559 if (FuncX->isMultiVersion()) {
7560 const auto *TAX = FuncX->getAttr<TargetAttr>();
7561 const auto *TAY = FuncY->getAttr<TargetAttr>();
7562 assert(TAX && TAY && "Multiversion Function without target attribute");
7563
7564 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
7565 return false;
7566 }
7567
7568 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
7569 // not the same entity if they are constrained.
7570 if ((FuncX->isMemberLikeConstrainedFriend() ||
7571 FuncY->isMemberLikeConstrainedFriend()) &&
7572 !FuncX->getLexicalDeclContext()->Equals(
7573 DC: FuncY->getLexicalDeclContext())) {
7574 return false;
7575 }
7576
7577 if (!isSameAssociatedConstraint(ACX: FuncX->getTrailingRequiresClause(),
7578 ACY: FuncY->getTrailingRequiresClause()))
7579 return false;
7580
7581 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
7582 // Map to the first declaration that we've already merged into this one.
7583 // The TSI of redeclarations might not match (due to calling conventions
7584 // being inherited onto the type but not the TSI), but the TSI type of
7585 // the first declaration of the function should match across modules.
7586 FD = FD->getCanonicalDecl();
7587 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
7588 : FD->getType();
7589 };
7590 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
7591 if (!hasSameType(T1: XT, T2: YT)) {
7592 // We can get functions with different types on the redecl chain in C++17
7593 // if they have differing exception specifications and at least one of
7594 // the excpetion specs is unresolved.
7595 auto *XFPT = XT->getAs<FunctionProtoType>();
7596 auto *YFPT = YT->getAs<FunctionProtoType>();
7597 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
7598 (isUnresolvedExceptionSpec(ESpecType: XFPT->getExceptionSpecType()) ||
7599 isUnresolvedExceptionSpec(ESpecType: YFPT->getExceptionSpecType())) &&
7600 hasSameFunctionTypeIgnoringExceptionSpec(T: XT, U: YT))
7601 return true;
7602 return false;
7603 }
7604
7605 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
7606 hasSameOverloadableAttrs(A: FuncX, B: FuncY);
7607 }
7608
7609 // Variables with the same type and linkage match.
7610 if (const auto *VarX = dyn_cast<VarDecl>(Val: X)) {
7611 const auto *VarY = cast<VarDecl>(Val: Y);
7612 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
7613 // During deserialization, we might compare variables before we load
7614 // their types. Assume the types will end up being the same.
7615 if (VarX->getType().isNull() || VarY->getType().isNull())
7616 return true;
7617
7618 if (hasSameType(T1: VarX->getType(), T2: VarY->getType()))
7619 return true;
7620
7621 // We can get decls with different types on the redecl chain. Eg.
7622 // template <typename T> struct S { static T Var[]; }; // #1
7623 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
7624 // Only? happens when completing an incomplete array type. In this case
7625 // when comparing #1 and #2 we should go through their element type.
7626 const ArrayType *VarXTy = getAsArrayType(T: VarX->getType());
7627 const ArrayType *VarYTy = getAsArrayType(T: VarY->getType());
7628 if (!VarXTy || !VarYTy)
7629 return false;
7630 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
7631 return hasSameType(T1: VarXTy->getElementType(), T2: VarYTy->getElementType());
7632 }
7633 return false;
7634 }
7635
7636 // Namespaces with the same name and inlinedness match.
7637 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(Val: X)) {
7638 const auto *NamespaceY = cast<NamespaceDecl>(Val: Y);
7639 return NamespaceX->isInline() == NamespaceY->isInline();
7640 }
7641
7642 // Identical template names and kinds match if their template parameter lists
7643 // and patterns match.
7644 if (const auto *TemplateX = dyn_cast<TemplateDecl>(Val: X)) {
7645 const auto *TemplateY = cast<TemplateDecl>(Val: Y);
7646
7647 // ConceptDecl wouldn't be the same if their constraint expression differs.
7648 if (const auto *ConceptX = dyn_cast<ConceptDecl>(Val: X)) {
7649 const auto *ConceptY = cast<ConceptDecl>(Val: Y);
7650 if (!isSameConstraintExpr(XCE: ConceptX->getConstraintExpr(),
7651 YCE: ConceptY->getConstraintExpr()))
7652 return false;
7653 }
7654
7655 return isSameEntity(X: TemplateX->getTemplatedDecl(),
7656 Y: TemplateY->getTemplatedDecl()) &&
7657 isSameTemplateParameterList(X: TemplateX->getTemplateParameters(),
7658 Y: TemplateY->getTemplateParameters());
7659 }
7660
7661 // Fields with the same name and the same type match.
7662 if (const auto *FDX = dyn_cast<FieldDecl>(Val: X)) {
7663 const auto *FDY = cast<FieldDecl>(Val: Y);
7664 // FIXME: Also check the bitwidth is odr-equivalent, if any.
7665 return hasSameType(T1: FDX->getType(), T2: FDY->getType());
7666 }
7667
7668 // Indirect fields with the same target field match.
7669 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(Val: X)) {
7670 const auto *IFDY = cast<IndirectFieldDecl>(Val: Y);
7671 return IFDX->getAnonField()->getCanonicalDecl() ==
7672 IFDY->getAnonField()->getCanonicalDecl();
7673 }
7674
7675 // Enumerators with the same name match.
7676 if (isa<EnumConstantDecl>(Val: X))
7677 // FIXME: Also check the value is odr-equivalent.
7678 return true;
7679
7680 // Using shadow declarations with the same target match.
7681 if (const auto *USX = dyn_cast<UsingShadowDecl>(Val: X)) {
7682 const auto *USY = cast<UsingShadowDecl>(Val: Y);
7683 return declaresSameEntity(D1: USX->getTargetDecl(), D2: USY->getTargetDecl());
7684 }
7685
7686 // Using declarations with the same qualifier match. (We already know that
7687 // the name matches.)
7688 if (const auto *UX = dyn_cast<UsingDecl>(Val: X)) {
7689 const auto *UY = cast<UsingDecl>(Val: Y);
7690 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7691 UX->hasTypename() == UY->hasTypename() &&
7692 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7693 }
7694 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(Val: X)) {
7695 const auto *UY = cast<UnresolvedUsingValueDecl>(Val: Y);
7696 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7697 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7698 }
7699 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(Val: X)) {
7700 return isSameQualifier(
7701 X: UX->getQualifier(),
7702 Y: cast<UnresolvedUsingTypenameDecl>(Val: Y)->getQualifier());
7703 }
7704
7705 // Using-pack declarations are only created by instantiation, and match if
7706 // they're instantiated from matching UnresolvedUsing...Decls.
7707 if (const auto *UX = dyn_cast<UsingPackDecl>(Val: X)) {
7708 return declaresSameEntity(
7709 D1: UX->getInstantiatedFromUsingDecl(),
7710 D2: cast<UsingPackDecl>(Val: Y)->getInstantiatedFromUsingDecl());
7711 }
7712
7713 // Namespace alias definitions with the same target match.
7714 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(Val: X)) {
7715 const auto *NAY = cast<NamespaceAliasDecl>(Val: Y);
7716 return NAX->getNamespace()->Equals(DC: NAY->getNamespace());
7717 }
7718
7719 return false;
7720}
7721
7722TemplateArgument
7723ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
7724 switch (Arg.getKind()) {
7725 case TemplateArgument::Null:
7726 return Arg;
7727
7728 case TemplateArgument::Expression:
7729 return TemplateArgument(Arg.getAsExpr(), /*IsCanonical=*/true,
7730 Arg.getIsDefaulted());
7731
7732 case TemplateArgument::Declaration: {
7733 auto *D = cast<ValueDecl>(Val: Arg.getAsDecl()->getCanonicalDecl());
7734 return TemplateArgument(D, getCanonicalType(T: Arg.getParamTypeForDecl()),
7735 Arg.getIsDefaulted());
7736 }
7737
7738 case TemplateArgument::NullPtr:
7739 return TemplateArgument(getCanonicalType(T: Arg.getNullPtrType()),
7740 /*isNullPtr*/ true, Arg.getIsDefaulted());
7741
7742 case TemplateArgument::Template:
7743 return TemplateArgument(getCanonicalTemplateName(Name: Arg.getAsTemplate()),
7744 Arg.getIsDefaulted());
7745
7746 case TemplateArgument::TemplateExpansion:
7747 return TemplateArgument(
7748 getCanonicalTemplateName(Name: Arg.getAsTemplateOrTemplatePattern()),
7749 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
7750
7751 case TemplateArgument::Integral:
7752 return TemplateArgument(Arg, getCanonicalType(T: Arg.getIntegralType()));
7753
7754 case TemplateArgument::StructuralValue:
7755 return TemplateArgument(*this,
7756 getCanonicalType(T: Arg.getStructuralValueType()),
7757 Arg.getAsStructuralValue(), Arg.getIsDefaulted());
7758
7759 case TemplateArgument::Type:
7760 return TemplateArgument(getCanonicalType(T: Arg.getAsType()),
7761 /*isNullPtr*/ false, Arg.getIsDefaulted());
7762
7763 case TemplateArgument::Pack: {
7764 bool AnyNonCanonArgs = false;
7765 auto CanonArgs = ::getCanonicalTemplateArguments(
7766 C: *this, Args: Arg.pack_elements(), AnyNonCanonArgs);
7767 if (!AnyNonCanonArgs)
7768 return Arg;
7769 auto NewArg = TemplateArgument::CreatePackCopy(
7770 Context&: const_cast<ASTContext &>(*this), Args: CanonArgs);
7771 NewArg.setIsDefaulted(Arg.getIsDefaulted());
7772 return NewArg;
7773 }
7774 }
7775
7776 // Silence GCC warning
7777 llvm_unreachable("Unhandled template argument kind");
7778}
7779
7780bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
7781 const TemplateArgument &Arg2) const {
7782 if (Arg1.getKind() != Arg2.getKind())
7783 return false;
7784
7785 switch (Arg1.getKind()) {
7786 case TemplateArgument::Null:
7787 llvm_unreachable("Comparing NULL template argument");
7788
7789 case TemplateArgument::Type:
7790 return hasSameType(T1: Arg1.getAsType(), T2: Arg2.getAsType());
7791
7792 case TemplateArgument::Declaration:
7793 return Arg1.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl() ==
7794 Arg2.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl();
7795
7796 case TemplateArgument::NullPtr:
7797 return hasSameType(T1: Arg1.getNullPtrType(), T2: Arg2.getNullPtrType());
7798
7799 case TemplateArgument::Template:
7800 case TemplateArgument::TemplateExpansion:
7801 return getCanonicalTemplateName(Name: Arg1.getAsTemplateOrTemplatePattern()) ==
7802 getCanonicalTemplateName(Name: Arg2.getAsTemplateOrTemplatePattern());
7803
7804 case TemplateArgument::Integral:
7805 return llvm::APSInt::isSameValue(I1: Arg1.getAsIntegral(),
7806 I2: Arg2.getAsIntegral());
7807
7808 case TemplateArgument::StructuralValue:
7809 return Arg1.structurallyEquals(Other: Arg2);
7810
7811 case TemplateArgument::Expression: {
7812 llvm::FoldingSetNodeID ID1, ID2;
7813 Arg1.getAsExpr()->Profile(ID&: ID1, Context: *this, /*Canonical=*/true);
7814 Arg2.getAsExpr()->Profile(ID&: ID2, Context: *this, /*Canonical=*/true);
7815 return ID1 == ID2;
7816 }
7817
7818 case TemplateArgument::Pack:
7819 return llvm::equal(
7820 LRange: Arg1.getPackAsArray(), RRange: Arg2.getPackAsArray(),
7821 P: [&](const TemplateArgument &Arg1, const TemplateArgument &Arg2) {
7822 return isSameTemplateArgument(Arg1, Arg2);
7823 });
7824 }
7825
7826 llvm_unreachable("Unhandled template argument kind");
7827}
7828
7829NestedNameSpecifier *
7830ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
7831 if (!NNS)
7832 return nullptr;
7833
7834 switch (NNS->getKind()) {
7835 case NestedNameSpecifier::Identifier:
7836 // Canonicalize the prefix but keep the identifier the same.
7837 return NestedNameSpecifier::Create(Context: *this,
7838 Prefix: getCanonicalNestedNameSpecifier(NNS: NNS->getPrefix()),
7839 II: NNS->getAsIdentifier());
7840
7841 case NestedNameSpecifier::Namespace:
7842 // A namespace is canonical; build a nested-name-specifier with
7843 // this namespace and no prefix.
7844 return NestedNameSpecifier::Create(Context: *this, Prefix: nullptr,
7845 NS: NNS->getAsNamespace()->getFirstDecl());
7846
7847 case NestedNameSpecifier::NamespaceAlias:
7848 // A namespace is canonical; build a nested-name-specifier with
7849 // this namespace and no prefix.
7850 return NestedNameSpecifier::Create(
7851 Context: *this, Prefix: nullptr,
7852 NS: NNS->getAsNamespaceAlias()->getNamespace()->getFirstDecl());
7853
7854 // The difference between TypeSpec and TypeSpecWithTemplate is that the
7855 // latter will have the 'template' keyword when printed.
7856 case NestedNameSpecifier::TypeSpec: {
7857 const Type *T = getCanonicalType(T: NNS->getAsType());
7858
7859 // If we have some kind of dependent-named type (e.g., "typename T::type"),
7860 // break it apart into its prefix and identifier, then reconsititute those
7861 // as the canonical nested-name-specifier. This is required to canonicalize
7862 // a dependent nested-name-specifier involving typedefs of dependent-name
7863 // types, e.g.,
7864 // typedef typename T::type T1;
7865 // typedef typename T1::type T2;
7866 if (const auto *DNT = T->getAs<DependentNameType>())
7867 return NestedNameSpecifier::Create(Context: *this, Prefix: DNT->getQualifier(),
7868 II: DNT->getIdentifier());
7869 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) {
7870 const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
7871 QualType NewT = getDependentTemplateSpecializationType(
7872 Keyword: ElaboratedTypeKeyword::None,
7873 Name: {/*NNS=*/nullptr, DTN.getName(), /*HasTemplateKeyword=*/true},
7874 Args: DTST->template_arguments(), /*IsCanonical=*/true);
7875 assert(NewT.isCanonical());
7876 NestedNameSpecifier *Prefix = DTN.getQualifier();
7877 if (!Prefix)
7878 Prefix = getCanonicalNestedNameSpecifier(NNS: NNS->getPrefix());
7879 return NestedNameSpecifier::Create(Context: *this, Prefix, T: NewT.getTypePtr());
7880 }
7881 return NestedNameSpecifier::Create(Context: *this, Prefix: nullptr, T);
7882 }
7883
7884 case NestedNameSpecifier::Global:
7885 case NestedNameSpecifier::Super:
7886 // The global specifier and __super specifer are canonical and unique.
7887 return NNS;
7888 }
7889
7890 llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
7891}
7892
7893const ArrayType *ASTContext::getAsArrayType(QualType T) const {
7894 // Handle the non-qualified case efficiently.
7895 if (!T.hasLocalQualifiers()) {
7896 // Handle the common positive case fast.
7897 if (const auto *AT = dyn_cast<ArrayType>(Val&: T))
7898 return AT;
7899 }
7900
7901 // Handle the common negative case fast.
7902 if (!isa<ArrayType>(Val: T.getCanonicalType()))
7903 return nullptr;
7904
7905 // Apply any qualifiers from the array type to the element type. This
7906 // implements C99 6.7.3p8: "If the specification of an array type includes
7907 // any type qualifiers, the element type is so qualified, not the array type."
7908
7909 // If we get here, we either have type qualifiers on the type, or we have
7910 // sugar such as a typedef in the way. If we have type qualifiers on the type
7911 // we must propagate them down into the element type.
7912
7913 SplitQualType split = T.getSplitDesugaredType();
7914 Qualifiers qs = split.Quals;
7915
7916 // If we have a simple case, just return now.
7917 const auto *ATy = dyn_cast<ArrayType>(Val: split.Ty);
7918 if (!ATy || qs.empty())
7919 return ATy;
7920
7921 // Otherwise, we have an array and we have qualifiers on it. Push the
7922 // qualifiers into the array element type and return a new array type.
7923 QualType NewEltTy = getQualifiedType(T: ATy->getElementType(), Qs: qs);
7924
7925 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: ATy))
7926 return cast<ArrayType>(Val: getConstantArrayType(EltTy: NewEltTy, ArySizeIn: CAT->getSize(),
7927 SizeExpr: CAT->getSizeExpr(),
7928 ASM: CAT->getSizeModifier(),
7929 IndexTypeQuals: CAT->getIndexTypeCVRQualifiers()));
7930 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: ATy))
7931 return cast<ArrayType>(Val: getIncompleteArrayType(elementType: NewEltTy,
7932 ASM: IAT->getSizeModifier(),
7933 elementTypeQuals: IAT->getIndexTypeCVRQualifiers()));
7934
7935 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(Val: ATy))
7936 return cast<ArrayType>(Val: getDependentSizedArrayType(
7937 elementType: NewEltTy, numElements: DSAT->getSizeExpr(), ASM: DSAT->getSizeModifier(),
7938 elementTypeQuals: DSAT->getIndexTypeCVRQualifiers()));
7939
7940 const auto *VAT = cast<VariableArrayType>(Val: ATy);
7941 return cast<ArrayType>(
7942 Val: getVariableArrayType(EltTy: NewEltTy, NumElts: VAT->getSizeExpr(), ASM: VAT->getSizeModifier(),
7943 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers()));
7944}
7945
7946QualType ASTContext::getAdjustedParameterType(QualType T) const {
7947 if (getLangOpts().HLSL && T->isConstantArrayType())
7948 return getArrayParameterType(Ty: T);
7949 if (T->isArrayType() || T->isFunctionType())
7950 return getDecayedType(T);
7951 return T;
7952}
7953
7954QualType ASTContext::getSignatureParameterType(QualType T) const {
7955 T = getVariableArrayDecayedType(type: T);
7956 T = getAdjustedParameterType(T);
7957 return T.getUnqualifiedType();
7958}
7959
7960QualType ASTContext::getExceptionObjectType(QualType T) const {
7961 // C++ [except.throw]p3:
7962 // A throw-expression initializes a temporary object, called the exception
7963 // object, the type of which is determined by removing any top-level
7964 // cv-qualifiers from the static type of the operand of throw and adjusting
7965 // the type from "array of T" or "function returning T" to "pointer to T"
7966 // or "pointer to function returning T", [...]
7967 T = getVariableArrayDecayedType(type: T);
7968 if (T->isArrayType() || T->isFunctionType())
7969 T = getDecayedType(T);
7970 return T.getUnqualifiedType();
7971}
7972
7973/// getArrayDecayedType - Return the properly qualified result of decaying the
7974/// specified array type to a pointer. This operation is non-trivial when
7975/// handling typedefs etc. The canonical type of "T" must be an array type,
7976/// this returns a pointer to a properly qualified element of the array.
7977///
7978/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
7979QualType ASTContext::getArrayDecayedType(QualType Ty) const {
7980 // Get the element type with 'getAsArrayType' so that we don't lose any
7981 // typedefs in the element type of the array. This also handles propagation
7982 // of type qualifiers from the array type into the element type if present
7983 // (C99 6.7.3p8).
7984 const ArrayType *PrettyArrayType = getAsArrayType(T: Ty);
7985 assert(PrettyArrayType && "Not an array type!");
7986
7987 QualType PtrTy = getPointerType(T: PrettyArrayType->getElementType());
7988
7989 // int x[restrict 4] -> int *restrict
7990 QualType Result = getQualifiedType(T: PtrTy,
7991 Qs: PrettyArrayType->getIndexTypeQualifiers());
7992
7993 // int x[_Nullable] -> int * _Nullable
7994 if (auto Nullability = Ty->getNullability()) {
7995 Result = const_cast<ASTContext *>(this)->getAttributedType(nullability: *Nullability,
7996 modifiedType: Result, equivalentType: Result);
7997 }
7998 return Result;
7999}
8000
8001QualType ASTContext::getBaseElementType(const ArrayType *array) const {
8002 return getBaseElementType(QT: array->getElementType());
8003}
8004
8005QualType ASTContext::getBaseElementType(QualType type) const {
8006 Qualifiers qs;
8007 while (true) {
8008 SplitQualType split = type.getSplitDesugaredType();
8009 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
8010 if (!array) break;
8011
8012 type = array->getElementType();
8013 qs.addConsistentQualifiers(qs: split.Quals);
8014 }
8015
8016 return getQualifiedType(T: type, Qs: qs);
8017}
8018
8019/// getConstantArrayElementCount - Returns number of constant array elements.
8020uint64_t
8021ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
8022 uint64_t ElementCount = 1;
8023 do {
8024 ElementCount *= CA->getZExtSize();
8025 CA = dyn_cast_or_null<ConstantArrayType>(
8026 Val: CA->getElementType()->getAsArrayTypeUnsafe());
8027 } while (CA);
8028 return ElementCount;
8029}
8030
8031uint64_t ASTContext::getArrayInitLoopExprElementCount(
8032 const ArrayInitLoopExpr *AILE) const {
8033 if (!AILE)
8034 return 0;
8035
8036 uint64_t ElementCount = 1;
8037
8038 do {
8039 ElementCount *= AILE->getArraySize().getZExtValue();
8040 AILE = dyn_cast<ArrayInitLoopExpr>(Val: AILE->getSubExpr());
8041 } while (AILE);
8042
8043 return ElementCount;
8044}
8045
8046/// getFloatingRank - Return a relative rank for floating point types.
8047/// This routine will assert if passed a built-in type that isn't a float.
8048static FloatingRank getFloatingRank(QualType T) {
8049 if (const auto *CT = T->getAs<ComplexType>())
8050 return getFloatingRank(T: CT->getElementType());
8051
8052 switch (T->castAs<BuiltinType>()->getKind()) {
8053 default: llvm_unreachable("getFloatingRank(): not a floating type");
8054 case BuiltinType::Float16: return Float16Rank;
8055 case BuiltinType::Half: return HalfRank;
8056 case BuiltinType::Float: return FloatRank;
8057 case BuiltinType::Double: return DoubleRank;
8058 case BuiltinType::LongDouble: return LongDoubleRank;
8059 case BuiltinType::Float128: return Float128Rank;
8060 case BuiltinType::BFloat16: return BFloat16Rank;
8061 case BuiltinType::Ibm128: return Ibm128Rank;
8062 }
8063}
8064
8065/// getFloatingTypeOrder - Compare the rank of the two specified floating
8066/// point types, ignoring the domain of the type (i.e. 'double' ==
8067/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
8068/// LHS < RHS, return -1.
8069int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
8070 FloatingRank LHSR = getFloatingRank(T: LHS);
8071 FloatingRank RHSR = getFloatingRank(T: RHS);
8072
8073 if (LHSR == RHSR)
8074 return 0;
8075 if (LHSR > RHSR)
8076 return 1;
8077 return -1;
8078}
8079
8080int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
8081 if (&getFloatTypeSemantics(T: LHS) == &getFloatTypeSemantics(T: RHS))
8082 return 0;
8083 return getFloatingTypeOrder(LHS, RHS);
8084}
8085
8086/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
8087/// routine will assert if passed a built-in type that isn't an integer or enum,
8088/// or if it is not canonicalized.
8089unsigned ASTContext::getIntegerRank(const Type *T) const {
8090 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
8091
8092 // Results in this 'losing' to any type of the same size, but winning if
8093 // larger.
8094 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
8095 return 0 + (EIT->getNumBits() << 3);
8096
8097 switch (cast<BuiltinType>(Val: T)->getKind()) {
8098 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
8099 case BuiltinType::Bool:
8100 return 1 + (getIntWidth(T: BoolTy) << 3);
8101 case BuiltinType::Char_S:
8102 case BuiltinType::Char_U:
8103 case BuiltinType::SChar:
8104 case BuiltinType::UChar:
8105 return 2 + (getIntWidth(T: CharTy) << 3);
8106 case BuiltinType::Short:
8107 case BuiltinType::UShort:
8108 return 3 + (getIntWidth(T: ShortTy) << 3);
8109 case BuiltinType::Int:
8110 case BuiltinType::UInt:
8111 return 4 + (getIntWidth(T: IntTy) << 3);
8112 case BuiltinType::Long:
8113 case BuiltinType::ULong:
8114 return 5 + (getIntWidth(T: LongTy) << 3);
8115 case BuiltinType::LongLong:
8116 case BuiltinType::ULongLong:
8117 return 6 + (getIntWidth(T: LongLongTy) << 3);
8118 case BuiltinType::Int128:
8119 case BuiltinType::UInt128:
8120 return 7 + (getIntWidth(T: Int128Ty) << 3);
8121
8122 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
8123 // their underlying types" [c++20 conv.rank]
8124 case BuiltinType::Char8:
8125 return getIntegerRank(T: UnsignedCharTy.getTypePtr());
8126 case BuiltinType::Char16:
8127 return getIntegerRank(
8128 T: getFromTargetType(Type: Target->getChar16Type()).getTypePtr());
8129 case BuiltinType::Char32:
8130 return getIntegerRank(
8131 T: getFromTargetType(Type: Target->getChar32Type()).getTypePtr());
8132 case BuiltinType::WChar_S:
8133 case BuiltinType::WChar_U:
8134 return getIntegerRank(
8135 T: getFromTargetType(Type: Target->getWCharType()).getTypePtr());
8136 }
8137}
8138
8139/// Whether this is a promotable bitfield reference according
8140/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
8141///
8142/// \returns the type this bit-field will promote to, or NULL if no
8143/// promotion occurs.
8144QualType ASTContext::isPromotableBitField(Expr *E) const {
8145 if (E->isTypeDependent() || E->isValueDependent())
8146 return {};
8147
8148 // C++ [conv.prom]p5:
8149 // If the bit-field has an enumerated type, it is treated as any other
8150 // value of that type for promotion purposes.
8151 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
8152 return {};
8153
8154 // FIXME: We should not do this unless E->refersToBitField() is true. This
8155 // matters in C where getSourceBitField() will find bit-fields for various
8156 // cases where the source expression is not a bit-field designator.
8157
8158 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
8159 if (!Field)
8160 return {};
8161
8162 QualType FT = Field->getType();
8163
8164 uint64_t BitWidth = Field->getBitWidthValue();
8165 uint64_t IntSize = getTypeSize(T: IntTy);
8166 // C++ [conv.prom]p5:
8167 // A prvalue for an integral bit-field can be converted to a prvalue of type
8168 // int if int can represent all the values of the bit-field; otherwise, it
8169 // can be converted to unsigned int if unsigned int can represent all the
8170 // values of the bit-field. If the bit-field is larger yet, no integral
8171 // promotion applies to it.
8172 // C11 6.3.1.1/2:
8173 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
8174 // If an int can represent all values of the original type (as restricted by
8175 // the width, for a bit-field), the value is converted to an int; otherwise,
8176 // it is converted to an unsigned int.
8177 //
8178 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
8179 // We perform that promotion here to match GCC and C++.
8180 // FIXME: C does not permit promotion of an enum bit-field whose rank is
8181 // greater than that of 'int'. We perform that promotion to match GCC.
8182 //
8183 // C23 6.3.1.1p2:
8184 // The value from a bit-field of a bit-precise integer type is converted to
8185 // the corresponding bit-precise integer type. (The rest is the same as in
8186 // C11.)
8187 if (QualType QT = Field->getType(); QT->isBitIntType())
8188 return QT;
8189
8190 if (BitWidth < IntSize)
8191 return IntTy;
8192
8193 if (BitWidth == IntSize)
8194 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
8195
8196 // Bit-fields wider than int are not subject to promotions, and therefore act
8197 // like the base type. GCC has some weird bugs in this area that we
8198 // deliberately do not follow (GCC follows a pre-standard resolution to
8199 // C's DR315 which treats bit-width as being part of the type, and this leaks
8200 // into their semantics in some cases).
8201 return {};
8202}
8203
8204/// getPromotedIntegerType - Returns the type that Promotable will
8205/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
8206/// integer type.
8207QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
8208 assert(!Promotable.isNull());
8209 assert(isPromotableIntegerType(Promotable));
8210 if (const auto *ET = Promotable->getAs<EnumType>())
8211 return ET->getDecl()->getPromotionType();
8212
8213 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
8214 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
8215 // (3.9.1) can be converted to a prvalue of the first of the following
8216 // types that can represent all the values of its underlying type:
8217 // int, unsigned int, long int, unsigned long int, long long int, or
8218 // unsigned long long int [...]
8219 // FIXME: Is there some better way to compute this?
8220 if (BT->getKind() == BuiltinType::WChar_S ||
8221 BT->getKind() == BuiltinType::WChar_U ||
8222 BT->getKind() == BuiltinType::Char8 ||
8223 BT->getKind() == BuiltinType::Char16 ||
8224 BT->getKind() == BuiltinType::Char32) {
8225 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
8226 uint64_t FromSize = getTypeSize(T: BT);
8227 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
8228 LongLongTy, UnsignedLongLongTy };
8229 for (const auto &PT : PromoteTypes) {
8230 uint64_t ToSize = getTypeSize(T: PT);
8231 if (FromSize < ToSize ||
8232 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
8233 return PT;
8234 }
8235 llvm_unreachable("char type should fit into long long");
8236 }
8237 }
8238
8239 // At this point, we should have a signed or unsigned integer type.
8240 if (Promotable->isSignedIntegerType())
8241 return IntTy;
8242 uint64_t PromotableSize = getIntWidth(T: Promotable);
8243 uint64_t IntSize = getIntWidth(T: IntTy);
8244 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
8245 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
8246}
8247
8248/// Recurses in pointer/array types until it finds an objc retainable
8249/// type and returns its ownership.
8250Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
8251 while (!T.isNull()) {
8252 if (T.getObjCLifetime() != Qualifiers::OCL_None)
8253 return T.getObjCLifetime();
8254 if (T->isArrayType())
8255 T = getBaseElementType(type: T);
8256 else if (const auto *PT = T->getAs<PointerType>())
8257 T = PT->getPointeeType();
8258 else if (const auto *RT = T->getAs<ReferenceType>())
8259 T = RT->getPointeeType();
8260 else
8261 break;
8262 }
8263
8264 return Qualifiers::OCL_None;
8265}
8266
8267static const Type *getIntegerTypeForEnum(const EnumType *ET) {
8268 // Incomplete enum types are not treated as integer types.
8269 // FIXME: In C++, enum types are never integer types.
8270 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
8271 return ET->getDecl()->getIntegerType().getTypePtr();
8272 return nullptr;
8273}
8274
8275/// getIntegerTypeOrder - Returns the highest ranked integer type:
8276/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
8277/// LHS < RHS, return -1.
8278int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
8279 const Type *LHSC = getCanonicalType(T: LHS).getTypePtr();
8280 const Type *RHSC = getCanonicalType(T: RHS).getTypePtr();
8281
8282 // Unwrap enums to their underlying type.
8283 if (const auto *ET = dyn_cast<EnumType>(Val: LHSC))
8284 LHSC = getIntegerTypeForEnum(ET);
8285 if (const auto *ET = dyn_cast<EnumType>(Val: RHSC))
8286 RHSC = getIntegerTypeForEnum(ET);
8287
8288 if (LHSC == RHSC) return 0;
8289
8290 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
8291 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
8292
8293 unsigned LHSRank = getIntegerRank(T: LHSC);
8294 unsigned RHSRank = getIntegerRank(T: RHSC);
8295
8296 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
8297 if (LHSRank == RHSRank) return 0;
8298 return LHSRank > RHSRank ? 1 : -1;
8299 }
8300
8301 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
8302 if (LHSUnsigned) {
8303 // If the unsigned [LHS] type is larger, return it.
8304 if (LHSRank >= RHSRank)
8305 return 1;
8306
8307 // If the signed type can represent all values of the unsigned type, it
8308 // wins. Because we are dealing with 2's complement and types that are
8309 // powers of two larger than each other, this is always safe.
8310 return -1;
8311 }
8312
8313 // If the unsigned [RHS] type is larger, return it.
8314 if (RHSRank >= LHSRank)
8315 return -1;
8316
8317 // If the signed type can represent all values of the unsigned type, it
8318 // wins. Because we are dealing with 2's complement and types that are
8319 // powers of two larger than each other, this is always safe.
8320 return 1;
8321}
8322
8323TypedefDecl *ASTContext::getCFConstantStringDecl() const {
8324 if (CFConstantStringTypeDecl)
8325 return CFConstantStringTypeDecl;
8326
8327 assert(!CFConstantStringTagDecl &&
8328 "tag and typedef should be initialized together");
8329 CFConstantStringTagDecl = buildImplicitRecord(Name: "__NSConstantString_tag");
8330 CFConstantStringTagDecl->startDefinition();
8331
8332 struct {
8333 QualType Type;
8334 const char *Name;
8335 } Fields[5];
8336 unsigned Count = 0;
8337
8338 /// Objective-C ABI
8339 ///
8340 /// typedef struct __NSConstantString_tag {
8341 /// const int *isa;
8342 /// int flags;
8343 /// const char *str;
8344 /// long length;
8345 /// } __NSConstantString;
8346 ///
8347 /// Swift ABI (4.1, 4.2)
8348 ///
8349 /// typedef struct __NSConstantString_tag {
8350 /// uintptr_t _cfisa;
8351 /// uintptr_t _swift_rc;
8352 /// _Atomic(uint64_t) _cfinfoa;
8353 /// const char *_ptr;
8354 /// uint32_t _length;
8355 /// } __NSConstantString;
8356 ///
8357 /// Swift ABI (5.0)
8358 ///
8359 /// typedef struct __NSConstantString_tag {
8360 /// uintptr_t _cfisa;
8361 /// uintptr_t _swift_rc;
8362 /// _Atomic(uint64_t) _cfinfoa;
8363 /// const char *_ptr;
8364 /// uintptr_t _length;
8365 /// } __NSConstantString;
8366
8367 const auto CFRuntime = getLangOpts().CFRuntime;
8368 if (static_cast<unsigned>(CFRuntime) <
8369 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
8370 Fields[Count++] = { .Type: getPointerType(T: IntTy.withConst()), .Name: "isa" };
8371 Fields[Count++] = { .Type: IntTy, .Name: "flags" };
8372 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "str" };
8373 Fields[Count++] = { .Type: LongTy, .Name: "length" };
8374 } else {
8375 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_cfisa" };
8376 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_swift_rc" };
8377 Fields[Count++] = { .Type: getFromTargetType(Type: Target->getUInt64Type()), .Name: "_swift_rc" };
8378 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "_ptr" };
8379 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
8380 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
8381 Fields[Count++] = { .Type: IntTy, .Name: "_ptr" };
8382 else
8383 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_ptr" };
8384 }
8385
8386 // Create fields
8387 for (unsigned i = 0; i < Count; ++i) {
8388 FieldDecl *Field =
8389 FieldDecl::Create(C: *this, DC: CFConstantStringTagDecl, StartLoc: SourceLocation(),
8390 IdLoc: SourceLocation(), Id: &Idents.get(Name: Fields[i].Name),
8391 T: Fields[i].Type, /*TInfo=*/nullptr,
8392 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8393 Field->setAccess(AS_public);
8394 CFConstantStringTagDecl->addDecl(D: Field);
8395 }
8396
8397 CFConstantStringTagDecl->completeDefinition();
8398 // This type is designed to be compatible with NSConstantString, but cannot
8399 // use the same name, since NSConstantString is an interface.
8400 auto tagType = getTagDeclType(Decl: CFConstantStringTagDecl);
8401 CFConstantStringTypeDecl =
8402 buildImplicitTypedef(T: tagType, Name: "__NSConstantString");
8403
8404 return CFConstantStringTypeDecl;
8405}
8406
8407RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
8408 if (!CFConstantStringTagDecl)
8409 getCFConstantStringDecl(); // Build the tag and the typedef.
8410 return CFConstantStringTagDecl;
8411}
8412
8413// getCFConstantStringType - Return the type used for constant CFStrings.
8414QualType ASTContext::getCFConstantStringType() const {
8415 return getTypedefType(Decl: getCFConstantStringDecl());
8416}
8417
8418QualType ASTContext::getObjCSuperType() const {
8419 if (ObjCSuperType.isNull()) {
8420 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord(Name: "objc_super");
8421 getTranslationUnitDecl()->addDecl(D: ObjCSuperTypeDecl);
8422 ObjCSuperType = getTagDeclType(Decl: ObjCSuperTypeDecl);
8423 }
8424 return ObjCSuperType;
8425}
8426
8427void ASTContext::setCFConstantStringType(QualType T) {
8428 const auto *TD = T->castAs<TypedefType>();
8429 CFConstantStringTypeDecl = cast<TypedefDecl>(Val: TD->getDecl());
8430 const auto *TagType = TD->castAs<RecordType>();
8431 CFConstantStringTagDecl = TagType->getDecl();
8432}
8433
8434QualType ASTContext::getBlockDescriptorType() const {
8435 if (BlockDescriptorType)
8436 return getTagDeclType(Decl: BlockDescriptorType);
8437
8438 RecordDecl *RD;
8439 // FIXME: Needs the FlagAppleBlock bit.
8440 RD = buildImplicitRecord(Name: "__block_descriptor");
8441 RD->startDefinition();
8442
8443 QualType FieldTypes[] = {
8444 UnsignedLongTy,
8445 UnsignedLongTy,
8446 };
8447
8448 static const char *const FieldNames[] = {
8449 "reserved",
8450 "Size"
8451 };
8452
8453 for (size_t i = 0; i < 2; ++i) {
8454 FieldDecl *Field = FieldDecl::Create(
8455 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8456 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8457 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8458 Field->setAccess(AS_public);
8459 RD->addDecl(D: Field);
8460 }
8461
8462 RD->completeDefinition();
8463
8464 BlockDescriptorType = RD;
8465
8466 return getTagDeclType(Decl: BlockDescriptorType);
8467}
8468
8469QualType ASTContext::getBlockDescriptorExtendedType() const {
8470 if (BlockDescriptorExtendedType)
8471 return getTagDeclType(Decl: BlockDescriptorExtendedType);
8472
8473 RecordDecl *RD;
8474 // FIXME: Needs the FlagAppleBlock bit.
8475 RD = buildImplicitRecord(Name: "__block_descriptor_withcopydispose");
8476 RD->startDefinition();
8477
8478 QualType FieldTypes[] = {
8479 UnsignedLongTy,
8480 UnsignedLongTy,
8481 getPointerType(T: VoidPtrTy),
8482 getPointerType(T: VoidPtrTy)
8483 };
8484
8485 static const char *const FieldNames[] = {
8486 "reserved",
8487 "Size",
8488 "CopyFuncPtr",
8489 "DestroyFuncPtr"
8490 };
8491
8492 for (size_t i = 0; i < 4; ++i) {
8493 FieldDecl *Field = FieldDecl::Create(
8494 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8495 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8496 /*BitWidth=*/BW: nullptr,
8497 /*Mutable=*/false, InitStyle: ICIS_NoInit);
8498 Field->setAccess(AS_public);
8499 RD->addDecl(D: Field);
8500 }
8501
8502 RD->completeDefinition();
8503
8504 BlockDescriptorExtendedType = RD;
8505 return getTagDeclType(Decl: BlockDescriptorExtendedType);
8506}
8507
8508OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
8509 const auto *BT = dyn_cast<BuiltinType>(Val: T);
8510
8511 if (!BT) {
8512 if (isa<PipeType>(Val: T))
8513 return OCLTK_Pipe;
8514
8515 return OCLTK_Default;
8516 }
8517
8518 switch (BT->getKind()) {
8519#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8520 case BuiltinType::Id: \
8521 return OCLTK_Image;
8522#include "clang/Basic/OpenCLImageTypes.def"
8523
8524 case BuiltinType::OCLClkEvent:
8525 return OCLTK_ClkEvent;
8526
8527 case BuiltinType::OCLEvent:
8528 return OCLTK_Event;
8529
8530 case BuiltinType::OCLQueue:
8531 return OCLTK_Queue;
8532
8533 case BuiltinType::OCLReserveID:
8534 return OCLTK_ReserveID;
8535
8536 case BuiltinType::OCLSampler:
8537 return OCLTK_Sampler;
8538
8539 default:
8540 return OCLTK_Default;
8541 }
8542}
8543
8544LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
8545 return Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
8546}
8547
8548/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
8549/// requires copy/dispose. Note that this must match the logic
8550/// in buildByrefHelpers.
8551bool ASTContext::BlockRequiresCopying(QualType Ty,
8552 const VarDecl *D) {
8553 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
8554 const Expr *copyExpr = getBlockVarCopyInit(VD: D).getCopyExpr();
8555 if (!copyExpr && record->hasTrivialDestructor()) return false;
8556
8557 return true;
8558 }
8559
8560 if (Ty.hasAddressDiscriminatedPointerAuth())
8561 return true;
8562
8563 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
8564 // move or destroy.
8565 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
8566 return true;
8567
8568 if (!Ty->isObjCRetainableType()) return false;
8569
8570 Qualifiers qs = Ty.getQualifiers();
8571
8572 // If we have lifetime, that dominates.
8573 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
8574 switch (lifetime) {
8575 case Qualifiers::OCL_None: llvm_unreachable("impossible");
8576
8577 // These are just bits as far as the runtime is concerned.
8578 case Qualifiers::OCL_ExplicitNone:
8579 case Qualifiers::OCL_Autoreleasing:
8580 return false;
8581
8582 // These cases should have been taken care of when checking the type's
8583 // non-triviality.
8584 case Qualifiers::OCL_Weak:
8585 case Qualifiers::OCL_Strong:
8586 llvm_unreachable("impossible");
8587 }
8588 llvm_unreachable("fell out of lifetime switch!");
8589 }
8590 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
8591 Ty->isObjCObjectPointerType());
8592}
8593
8594bool ASTContext::getByrefLifetime(QualType Ty,
8595 Qualifiers::ObjCLifetime &LifeTime,
8596 bool &HasByrefExtendedLayout) const {
8597 if (!getLangOpts().ObjC ||
8598 getLangOpts().getGC() != LangOptions::NonGC)
8599 return false;
8600
8601 HasByrefExtendedLayout = false;
8602 if (Ty->isRecordType()) {
8603 HasByrefExtendedLayout = true;
8604 LifeTime = Qualifiers::OCL_None;
8605 } else if ((LifeTime = Ty.getObjCLifetime())) {
8606 // Honor the ARC qualifiers.
8607 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
8608 // The MRR rule.
8609 LifeTime = Qualifiers::OCL_ExplicitNone;
8610 } else {
8611 LifeTime = Qualifiers::OCL_None;
8612 }
8613 return true;
8614}
8615
8616CanQualType ASTContext::getNSUIntegerType() const {
8617 assert(Target && "Expected target to be initialized");
8618 const llvm::Triple &T = Target->getTriple();
8619 // Windows is LLP64 rather than LP64
8620 if (T.isOSWindows() && T.isArch64Bit())
8621 return UnsignedLongLongTy;
8622 return UnsignedLongTy;
8623}
8624
8625CanQualType ASTContext::getNSIntegerType() const {
8626 assert(Target && "Expected target to be initialized");
8627 const llvm::Triple &T = Target->getTriple();
8628 // Windows is LLP64 rather than LP64
8629 if (T.isOSWindows() && T.isArch64Bit())
8630 return LongLongTy;
8631 return LongTy;
8632}
8633
8634TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
8635 if (!ObjCInstanceTypeDecl)
8636 ObjCInstanceTypeDecl =
8637 buildImplicitTypedef(T: getObjCIdType(), Name: "instancetype");
8638 return ObjCInstanceTypeDecl;
8639}
8640
8641// This returns true if a type has been typedefed to BOOL:
8642// typedef <type> BOOL;
8643static bool isTypeTypedefedAsBOOL(QualType T) {
8644 if (const auto *TT = dyn_cast<TypedefType>(Val&: T))
8645 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
8646 return II->isStr(Str: "BOOL");
8647
8648 return false;
8649}
8650
8651/// getObjCEncodingTypeSize returns size of type for objective-c encoding
8652/// purpose.
8653CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
8654 if (!type->isIncompleteArrayType() && type->isIncompleteType())
8655 return CharUnits::Zero();
8656
8657 CharUnits sz = getTypeSizeInChars(T: type);
8658
8659 // Make all integer and enum types at least as large as an int
8660 if (sz.isPositive() && type->isIntegralOrEnumerationType())
8661 sz = std::max(a: sz, b: getTypeSizeInChars(T: IntTy));
8662 // Treat arrays as pointers, since that's how they're passed in.
8663 else if (type->isArrayType())
8664 sz = getTypeSizeInChars(T: VoidPtrTy);
8665 return sz;
8666}
8667
8668bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
8669 return getTargetInfo().getCXXABI().isMicrosoft() &&
8670 VD->isStaticDataMember() &&
8671 VD->getType()->isIntegralOrEnumerationType() &&
8672 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
8673}
8674
8675ASTContext::InlineVariableDefinitionKind
8676ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
8677 if (!VD->isInline())
8678 return InlineVariableDefinitionKind::None;
8679
8680 // In almost all cases, it's a weak definition.
8681 auto *First = VD->getFirstDecl();
8682 if (First->isInlineSpecified() || !First->isStaticDataMember())
8683 return InlineVariableDefinitionKind::Weak;
8684
8685 // If there's a file-context declaration in this translation unit, it's a
8686 // non-discardable definition.
8687 for (auto *D : VD->redecls())
8688 if (D->getLexicalDeclContext()->isFileContext() &&
8689 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
8690 return InlineVariableDefinitionKind::Strong;
8691
8692 // If we've not seen one yet, we don't know.
8693 return InlineVariableDefinitionKind::WeakUnknown;
8694}
8695
8696static std::string charUnitsToString(const CharUnits &CU) {
8697 return llvm::itostr(X: CU.getQuantity());
8698}
8699
8700/// getObjCEncodingForBlock - Return the encoded type for this block
8701/// declaration.
8702std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
8703 std::string S;
8704
8705 const BlockDecl *Decl = Expr->getBlockDecl();
8706 QualType BlockTy =
8707 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
8708 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
8709 // Encode result type.
8710 if (getLangOpts().EncodeExtendedBlockSig)
8711 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: BlockReturnTy, S,
8712 Extended: true /*Extended*/);
8713 else
8714 getObjCEncodingForType(T: BlockReturnTy, S);
8715 // Compute size of all parameters.
8716 // Start with computing size of a pointer in number of bytes.
8717 // FIXME: There might(should) be a better way of doing this computation!
8718 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8719 CharUnits ParmOffset = PtrSize;
8720 for (auto *PI : Decl->parameters()) {
8721 QualType PType = PI->getType();
8722 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8723 if (sz.isZero())
8724 continue;
8725 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
8726 ParmOffset += sz;
8727 }
8728 // Size of the argument frame
8729 S += charUnitsToString(CU: ParmOffset);
8730 // Block pointer and offset.
8731 S += "@?0";
8732
8733 // Argument types.
8734 ParmOffset = PtrSize;
8735 for (auto *PVDecl : Decl->parameters()) {
8736 QualType PType = PVDecl->getOriginalType();
8737 if (const auto *AT =
8738 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8739 // Use array's original type only if it has known number of
8740 // elements.
8741 if (!isa<ConstantArrayType>(Val: AT))
8742 PType = PVDecl->getType();
8743 } else if (PType->isFunctionType())
8744 PType = PVDecl->getType();
8745 if (getLangOpts().EncodeExtendedBlockSig)
8746 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: PType,
8747 S, Extended: true /*Extended*/);
8748 else
8749 getObjCEncodingForType(T: PType, S);
8750 S += charUnitsToString(CU: ParmOffset);
8751 ParmOffset += getObjCEncodingTypeSize(type: PType);
8752 }
8753
8754 return S;
8755}
8756
8757std::string
8758ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
8759 std::string S;
8760 // Encode result type.
8761 getObjCEncodingForType(T: Decl->getReturnType(), S);
8762 CharUnits ParmOffset;
8763 // Compute size of all parameters.
8764 for (auto *PI : Decl->parameters()) {
8765 QualType PType = PI->getType();
8766 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8767 if (sz.isZero())
8768 continue;
8769
8770 assert(sz.isPositive() &&
8771 "getObjCEncodingForFunctionDecl - Incomplete param type");
8772 ParmOffset += sz;
8773 }
8774 S += charUnitsToString(CU: ParmOffset);
8775 ParmOffset = CharUnits::Zero();
8776
8777 // Argument types.
8778 for (auto *PVDecl : Decl->parameters()) {
8779 QualType PType = PVDecl->getOriginalType();
8780 if (const auto *AT =
8781 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8782 // Use array's original type only if it has known number of
8783 // elements.
8784 if (!isa<ConstantArrayType>(Val: AT))
8785 PType = PVDecl->getType();
8786 } else if (PType->isFunctionType())
8787 PType = PVDecl->getType();
8788 getObjCEncodingForType(T: PType, S);
8789 S += charUnitsToString(CU: ParmOffset);
8790 ParmOffset += getObjCEncodingTypeSize(type: PType);
8791 }
8792
8793 return S;
8794}
8795
8796/// getObjCEncodingForMethodParameter - Return the encoded type for a single
8797/// method parameter or return type. If Extended, include class names and
8798/// block object types.
8799void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
8800 QualType T, std::string& S,
8801 bool Extended) const {
8802 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
8803 getObjCEncodingForTypeQualifier(QT, S);
8804 // Encode parameter type.
8805 ObjCEncOptions Options = ObjCEncOptions()
8806 .setExpandPointedToStructures()
8807 .setExpandStructures()
8808 .setIsOutermostType();
8809 if (Extended)
8810 Options.setEncodeBlockParameters().setEncodeClassNames();
8811 getObjCEncodingForTypeImpl(t: T, S, Options, /*Field=*/nullptr);
8812}
8813
8814/// getObjCEncodingForMethodDecl - Return the encoded type for this method
8815/// declaration.
8816std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
8817 bool Extended) const {
8818 // FIXME: This is not very efficient.
8819 // Encode return type.
8820 std::string S;
8821 getObjCEncodingForMethodParameter(QT: Decl->getObjCDeclQualifier(),
8822 T: Decl->getReturnType(), S, Extended);
8823 // Compute size of all parameters.
8824 // Start with computing size of a pointer in number of bytes.
8825 // FIXME: There might(should) be a better way of doing this computation!
8826 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8827 // The first two arguments (self and _cmd) are pointers; account for
8828 // their size.
8829 CharUnits ParmOffset = 2 * PtrSize;
8830 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8831 E = Decl->sel_param_end(); PI != E; ++PI) {
8832 QualType PType = (*PI)->getType();
8833 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8834 if (sz.isZero())
8835 continue;
8836
8837 assert(sz.isPositive() &&
8838 "getObjCEncodingForMethodDecl - Incomplete param type");
8839 ParmOffset += sz;
8840 }
8841 S += charUnitsToString(CU: ParmOffset);
8842 S += "@0:";
8843 S += charUnitsToString(CU: PtrSize);
8844
8845 // Argument types.
8846 ParmOffset = 2 * PtrSize;
8847 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8848 E = Decl->sel_param_end(); PI != E; ++PI) {
8849 const ParmVarDecl *PVDecl = *PI;
8850 QualType PType = PVDecl->getOriginalType();
8851 if (const auto *AT =
8852 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8853 // Use array's original type only if it has known number of
8854 // elements.
8855 if (!isa<ConstantArrayType>(Val: AT))
8856 PType = PVDecl->getType();
8857 } else if (PType->isFunctionType())
8858 PType = PVDecl->getType();
8859 getObjCEncodingForMethodParameter(QT: PVDecl->getObjCDeclQualifier(),
8860 T: PType, S, Extended);
8861 S += charUnitsToString(CU: ParmOffset);
8862 ParmOffset += getObjCEncodingTypeSize(type: PType);
8863 }
8864
8865 return S;
8866}
8867
8868ObjCPropertyImplDecl *
8869ASTContext::getObjCPropertyImplDeclForPropertyDecl(
8870 const ObjCPropertyDecl *PD,
8871 const Decl *Container) const {
8872 if (!Container)
8873 return nullptr;
8874 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Val: Container)) {
8875 for (auto *PID : CID->property_impls())
8876 if (PID->getPropertyDecl() == PD)
8877 return PID;
8878 } else {
8879 const auto *OID = cast<ObjCImplementationDecl>(Val: Container);
8880 for (auto *PID : OID->property_impls())
8881 if (PID->getPropertyDecl() == PD)
8882 return PID;
8883 }
8884 return nullptr;
8885}
8886
8887/// getObjCEncodingForPropertyDecl - Return the encoded type for this
8888/// property declaration. If non-NULL, Container must be either an
8889/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
8890/// NULL when getting encodings for protocol properties.
8891/// Property attributes are stored as a comma-delimited C string. The simple
8892/// attributes readonly and bycopy are encoded as single characters. The
8893/// parametrized attributes, getter=name, setter=name, and ivar=name, are
8894/// encoded as single characters, followed by an identifier. Property types
8895/// are also encoded as a parametrized attribute. The characters used to encode
8896/// these attributes are defined by the following enumeration:
8897/// @code
8898/// enum PropertyAttributes {
8899/// kPropertyReadOnly = 'R', // property is read-only.
8900/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
8901/// kPropertyByref = '&', // property is a reference to the value last assigned
8902/// kPropertyDynamic = 'D', // property is dynamic
8903/// kPropertyGetter = 'G', // followed by getter selector name
8904/// kPropertySetter = 'S', // followed by setter selector name
8905/// kPropertyInstanceVariable = 'V' // followed by instance variable name
8906/// kPropertyType = 'T' // followed by old-style type encoding.
8907/// kPropertyWeak = 'W' // 'weak' property
8908/// kPropertyStrong = 'P' // property GC'able
8909/// kPropertyNonAtomic = 'N' // property non-atomic
8910/// kPropertyOptional = '?' // property optional
8911/// };
8912/// @endcode
8913std::string
8914ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
8915 const Decl *Container) const {
8916 // Collect information from the property implementation decl(s).
8917 bool Dynamic = false;
8918 ObjCPropertyImplDecl *SynthesizePID = nullptr;
8919
8920 if (ObjCPropertyImplDecl *PropertyImpDecl =
8921 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
8922 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
8923 Dynamic = true;
8924 else
8925 SynthesizePID = PropertyImpDecl;
8926 }
8927
8928 // FIXME: This is not very efficient.
8929 std::string S = "T";
8930
8931 // Encode result type.
8932 // GCC has some special rules regarding encoding of properties which
8933 // closely resembles encoding of ivars.
8934 getObjCEncodingForPropertyType(T: PD->getType(), S);
8935
8936 if (PD->isOptional())
8937 S += ",?";
8938
8939 if (PD->isReadOnly()) {
8940 S += ",R";
8941 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
8942 S += ",C";
8943 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
8944 S += ",&";
8945 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
8946 S += ",W";
8947 } else {
8948 switch (PD->getSetterKind()) {
8949 case ObjCPropertyDecl::Assign: break;
8950 case ObjCPropertyDecl::Copy: S += ",C"; break;
8951 case ObjCPropertyDecl::Retain: S += ",&"; break;
8952 case ObjCPropertyDecl::Weak: S += ",W"; break;
8953 }
8954 }
8955
8956 // It really isn't clear at all what this means, since properties
8957 // are "dynamic by default".
8958 if (Dynamic)
8959 S += ",D";
8960
8961 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
8962 S += ",N";
8963
8964 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
8965 S += ",G";
8966 S += PD->getGetterName().getAsString();
8967 }
8968
8969 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
8970 S += ",S";
8971 S += PD->getSetterName().getAsString();
8972 }
8973
8974 if (SynthesizePID) {
8975 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
8976 S += ",V";
8977 S += OID->getNameAsString();
8978 }
8979
8980 // FIXME: OBJCGC: weak & strong
8981 return S;
8982}
8983
8984/// getLegacyIntegralTypeEncoding -
8985/// Another legacy compatibility encoding: 32-bit longs are encoded as
8986/// 'l' or 'L' , but not always. For typedefs, we need to use
8987/// 'i' or 'I' instead if encoding a struct field, or a pointer!
8988void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
8989 if (PointeeTy->getAs<TypedefType>()) {
8990 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
8991 if (BT->getKind() == BuiltinType::ULong && getIntWidth(T: PointeeTy) == 32)
8992 PointeeTy = UnsignedIntTy;
8993 else
8994 if (BT->getKind() == BuiltinType::Long && getIntWidth(T: PointeeTy) == 32)
8995 PointeeTy = IntTy;
8996 }
8997 }
8998}
8999
9000void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
9001 const FieldDecl *Field,
9002 QualType *NotEncodedT) const {
9003 // We follow the behavior of gcc, expanding structures which are
9004 // directly pointed to, and expanding embedded structures. Note that
9005 // these rules are sufficient to prevent recursive encoding of the
9006 // same type.
9007 getObjCEncodingForTypeImpl(t: T, S,
9008 Options: ObjCEncOptions()
9009 .setExpandPointedToStructures()
9010 .setExpandStructures()
9011 .setIsOutermostType(),
9012 Field, NotEncodedT);
9013}
9014
9015void ASTContext::getObjCEncodingForPropertyType(QualType T,
9016 std::string& S) const {
9017 // Encode result type.
9018 // GCC has some special rules regarding encoding of properties which
9019 // closely resembles encoding of ivars.
9020 getObjCEncodingForTypeImpl(t: T, S,
9021 Options: ObjCEncOptions()
9022 .setExpandPointedToStructures()
9023 .setExpandStructures()
9024 .setIsOutermostType()
9025 .setEncodingProperty(),
9026 /*Field=*/nullptr);
9027}
9028
9029static char getObjCEncodingForPrimitiveType(const ASTContext *C,
9030 const BuiltinType *BT) {
9031 BuiltinType::Kind kind = BT->getKind();
9032 switch (kind) {
9033 case BuiltinType::Void: return 'v';
9034 case BuiltinType::Bool: return 'B';
9035 case BuiltinType::Char8:
9036 case BuiltinType::Char_U:
9037 case BuiltinType::UChar: return 'C';
9038 case BuiltinType::Char16:
9039 case BuiltinType::UShort: return 'S';
9040 case BuiltinType::Char32:
9041 case BuiltinType::UInt: return 'I';
9042 case BuiltinType::ULong:
9043 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
9044 case BuiltinType::UInt128: return 'T';
9045 case BuiltinType::ULongLong: return 'Q';
9046 case BuiltinType::Char_S:
9047 case BuiltinType::SChar: return 'c';
9048 case BuiltinType::Short: return 's';
9049 case BuiltinType::WChar_S:
9050 case BuiltinType::WChar_U:
9051 case BuiltinType::Int: return 'i';
9052 case BuiltinType::Long:
9053 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
9054 case BuiltinType::LongLong: return 'q';
9055 case BuiltinType::Int128: return 't';
9056 case BuiltinType::Float: return 'f';
9057 case BuiltinType::Double: return 'd';
9058 case BuiltinType::LongDouble: return 'D';
9059 case BuiltinType::NullPtr: return '*'; // like char*
9060
9061 case BuiltinType::BFloat16:
9062 case BuiltinType::Float16:
9063 case BuiltinType::Float128:
9064 case BuiltinType::Ibm128:
9065 case BuiltinType::Half:
9066 case BuiltinType::ShortAccum:
9067 case BuiltinType::Accum:
9068 case BuiltinType::LongAccum:
9069 case BuiltinType::UShortAccum:
9070 case BuiltinType::UAccum:
9071 case BuiltinType::ULongAccum:
9072 case BuiltinType::ShortFract:
9073 case BuiltinType::Fract:
9074 case BuiltinType::LongFract:
9075 case BuiltinType::UShortFract:
9076 case BuiltinType::UFract:
9077 case BuiltinType::ULongFract:
9078 case BuiltinType::SatShortAccum:
9079 case BuiltinType::SatAccum:
9080 case BuiltinType::SatLongAccum:
9081 case BuiltinType::SatUShortAccum:
9082 case BuiltinType::SatUAccum:
9083 case BuiltinType::SatULongAccum:
9084 case BuiltinType::SatShortFract:
9085 case BuiltinType::SatFract:
9086 case BuiltinType::SatLongFract:
9087 case BuiltinType::SatUShortFract:
9088 case BuiltinType::SatUFract:
9089 case BuiltinType::SatULongFract:
9090 // FIXME: potentially need @encodes for these!
9091 return ' ';
9092
9093#define SVE_TYPE(Name, Id, SingletonId) \
9094 case BuiltinType::Id:
9095#include "clang/Basic/AArch64ACLETypes.def"
9096#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9097#include "clang/Basic/RISCVVTypes.def"
9098#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9099#include "clang/Basic/WebAssemblyReferenceTypes.def"
9100#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
9101#include "clang/Basic/AMDGPUTypes.def"
9102 {
9103 DiagnosticsEngine &Diags = C->getDiagnostics();
9104 unsigned DiagID = Diags.getCustomDiagID(L: DiagnosticsEngine::Error,
9105 FormatString: "cannot yet @encode type %0");
9106 Diags.Report(DiagID) << BT->getName(Policy: C->getPrintingPolicy());
9107 return ' ';
9108 }
9109
9110 case BuiltinType::ObjCId:
9111 case BuiltinType::ObjCClass:
9112 case BuiltinType::ObjCSel:
9113 llvm_unreachable("@encoding ObjC primitive type");
9114
9115 // OpenCL and placeholder types don't need @encodings.
9116#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
9117 case BuiltinType::Id:
9118#include "clang/Basic/OpenCLImageTypes.def"
9119#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
9120 case BuiltinType::Id:
9121#include "clang/Basic/OpenCLExtensionTypes.def"
9122 case BuiltinType::OCLEvent:
9123 case BuiltinType::OCLClkEvent:
9124 case BuiltinType::OCLQueue:
9125 case BuiltinType::OCLReserveID:
9126 case BuiltinType::OCLSampler:
9127 case BuiltinType::Dependent:
9128#define PPC_VECTOR_TYPE(Name, Id, Size) \
9129 case BuiltinType::Id:
9130#include "clang/Basic/PPCTypes.def"
9131#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9132#include "clang/Basic/HLSLIntangibleTypes.def"
9133#define BUILTIN_TYPE(KIND, ID)
9134#define PLACEHOLDER_TYPE(KIND, ID) \
9135 case BuiltinType::KIND:
9136#include "clang/AST/BuiltinTypes.def"
9137 llvm_unreachable("invalid builtin type for @encode");
9138 }
9139 llvm_unreachable("invalid BuiltinType::Kind value");
9140}
9141
9142static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
9143 EnumDecl *Enum = ET->getDecl();
9144
9145 // The encoding of an non-fixed enum type is always 'i', regardless of size.
9146 if (!Enum->isFixed())
9147 return 'i';
9148
9149 // The encoding of a fixed enum type matches its fixed underlying type.
9150 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
9151 return getObjCEncodingForPrimitiveType(C, BT);
9152}
9153
9154static void EncodeBitField(const ASTContext *Ctx, std::string& S,
9155 QualType T, const FieldDecl *FD) {
9156 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
9157 S += 'b';
9158 // The NeXT runtime encodes bit fields as b followed by the number of bits.
9159 // The GNU runtime requires more information; bitfields are encoded as b,
9160 // then the offset (in bits) of the first element, then the type of the
9161 // bitfield, then the size in bits. For example, in this structure:
9162 //
9163 // struct
9164 // {
9165 // int integer;
9166 // int flags:2;
9167 // };
9168 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
9169 // runtime, but b32i2 for the GNU runtime. The reason for this extra
9170 // information is not especially sensible, but we're stuck with it for
9171 // compatibility with GCC, although providing it breaks anything that
9172 // actually uses runtime introspection and wants to work on both runtimes...
9173 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
9174 uint64_t Offset;
9175
9176 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(Val: FD)) {
9177 Offset = Ctx->lookupFieldBitOffset(OID: IVD->getContainingInterface(), Ivar: IVD);
9178 } else {
9179 const RecordDecl *RD = FD->getParent();
9180 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(D: RD);
9181 Offset = RL.getFieldOffset(FieldNo: FD->getFieldIndex());
9182 }
9183
9184 S += llvm::utostr(X: Offset);
9185
9186 if (const auto *ET = T->getAs<EnumType>())
9187 S += ObjCEncodingForEnumType(C: Ctx, ET);
9188 else {
9189 const auto *BT = T->castAs<BuiltinType>();
9190 S += getObjCEncodingForPrimitiveType(C: Ctx, BT);
9191 }
9192 }
9193 S += llvm::utostr(X: FD->getBitWidthValue());
9194}
9195
9196// Helper function for determining whether the encoded type string would include
9197// a template specialization type.
9198static bool hasTemplateSpecializationInEncodedString(const Type *T,
9199 bool VisitBasesAndFields) {
9200 T = T->getBaseElementTypeUnsafe();
9201
9202 if (auto *PT = T->getAs<PointerType>())
9203 return hasTemplateSpecializationInEncodedString(
9204 T: PT->getPointeeType().getTypePtr(), VisitBasesAndFields: false);
9205
9206 auto *CXXRD = T->getAsCXXRecordDecl();
9207
9208 if (!CXXRD)
9209 return false;
9210
9211 if (isa<ClassTemplateSpecializationDecl>(Val: CXXRD))
9212 return true;
9213
9214 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
9215 return false;
9216
9217 for (const auto &B : CXXRD->bases())
9218 if (hasTemplateSpecializationInEncodedString(T: B.getType().getTypePtr(),
9219 VisitBasesAndFields: true))
9220 return true;
9221
9222 for (auto *FD : CXXRD->fields())
9223 if (hasTemplateSpecializationInEncodedString(T: FD->getType().getTypePtr(),
9224 VisitBasesAndFields: true))
9225 return true;
9226
9227 return false;
9228}
9229
9230// FIXME: Use SmallString for accumulating string.
9231void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
9232 const ObjCEncOptions Options,
9233 const FieldDecl *FD,
9234 QualType *NotEncodedT) const {
9235 CanQualType CT = getCanonicalType(T);
9236 switch (CT->getTypeClass()) {
9237 case Type::Builtin:
9238 case Type::Enum:
9239 if (FD && FD->isBitField())
9240 return EncodeBitField(Ctx: this, S, T, FD);
9241 if (const auto *BT = dyn_cast<BuiltinType>(Val&: CT))
9242 S += getObjCEncodingForPrimitiveType(C: this, BT);
9243 else
9244 S += ObjCEncodingForEnumType(C: this, ET: cast<EnumType>(Val&: CT));
9245 return;
9246
9247 case Type::Complex:
9248 S += 'j';
9249 getObjCEncodingForTypeImpl(T: T->castAs<ComplexType>()->getElementType(), S,
9250 Options: ObjCEncOptions(),
9251 /*Field=*/FD: nullptr);
9252 return;
9253
9254 case Type::Atomic:
9255 S += 'A';
9256 getObjCEncodingForTypeImpl(T: T->castAs<AtomicType>()->getValueType(), S,
9257 Options: ObjCEncOptions(),
9258 /*Field=*/FD: nullptr);
9259 return;
9260
9261 // encoding for pointer or reference types.
9262 case Type::Pointer:
9263 case Type::LValueReference:
9264 case Type::RValueReference: {
9265 QualType PointeeTy;
9266 if (isa<PointerType>(Val: CT)) {
9267 const auto *PT = T->castAs<PointerType>();
9268 if (PT->isObjCSelType()) {
9269 S += ':';
9270 return;
9271 }
9272 PointeeTy = PT->getPointeeType();
9273 } else {
9274 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
9275 }
9276
9277 bool isReadOnly = false;
9278 // For historical/compatibility reasons, the read-only qualifier of the
9279 // pointee gets emitted _before_ the '^'. The read-only qualifier of
9280 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
9281 // Also, do not emit the 'r' for anything but the outermost type!
9282 if (T->getAs<TypedefType>()) {
9283 if (Options.IsOutermostType() && T.isConstQualified()) {
9284 isReadOnly = true;
9285 S += 'r';
9286 }
9287 } else if (Options.IsOutermostType()) {
9288 QualType P = PointeeTy;
9289 while (auto PT = P->getAs<PointerType>())
9290 P = PT->getPointeeType();
9291 if (P.isConstQualified()) {
9292 isReadOnly = true;
9293 S += 'r';
9294 }
9295 }
9296 if (isReadOnly) {
9297 // Another legacy compatibility encoding. Some ObjC qualifier and type
9298 // combinations need to be rearranged.
9299 // Rewrite "in const" from "nr" to "rn"
9300 if (StringRef(S).ends_with(Suffix: "nr"))
9301 S.replace(i1: S.end()-2, i2: S.end(), s: "rn");
9302 }
9303
9304 if (PointeeTy->isCharType()) {
9305 // char pointer types should be encoded as '*' unless it is a
9306 // type that has been typedef'd to 'BOOL'.
9307 if (!isTypeTypedefedAsBOOL(T: PointeeTy)) {
9308 S += '*';
9309 return;
9310 }
9311 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) {
9312 // GCC binary compat: Need to convert "struct objc_class *" to "#".
9313 if (RTy->getDecl()->getIdentifier() == &Idents.get(Name: "objc_class")) {
9314 S += '#';
9315 return;
9316 }
9317 // GCC binary compat: Need to convert "struct objc_object *" to "@".
9318 if (RTy->getDecl()->getIdentifier() == &Idents.get(Name: "objc_object")) {
9319 S += '@';
9320 return;
9321 }
9322 // If the encoded string for the class includes template names, just emit
9323 // "^v" for pointers to the class.
9324 if (getLangOpts().CPlusPlus &&
9325 (!getLangOpts().EncodeCXXClassTemplateSpec &&
9326 hasTemplateSpecializationInEncodedString(
9327 T: RTy, VisitBasesAndFields: Options.ExpandPointedToStructures()))) {
9328 S += "^v";
9329 return;
9330 }
9331 // fall through...
9332 }
9333 S += '^';
9334 getLegacyIntegralTypeEncoding(PointeeTy);
9335
9336 ObjCEncOptions NewOptions;
9337 if (Options.ExpandPointedToStructures())
9338 NewOptions.setExpandStructures();
9339 getObjCEncodingForTypeImpl(T: PointeeTy, S, Options: NewOptions,
9340 /*Field=*/FD: nullptr, NotEncodedT);
9341 return;
9342 }
9343
9344 case Type::ConstantArray:
9345 case Type::IncompleteArray:
9346 case Type::VariableArray: {
9347 const auto *AT = cast<ArrayType>(Val&: CT);
9348
9349 if (isa<IncompleteArrayType>(Val: AT) && !Options.IsStructField()) {
9350 // Incomplete arrays are encoded as a pointer to the array element.
9351 S += '^';
9352
9353 getObjCEncodingForTypeImpl(
9354 T: AT->getElementType(), S,
9355 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD);
9356 } else {
9357 S += '[';
9358
9359 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
9360 S += llvm::utostr(X: CAT->getZExtSize());
9361 else {
9362 //Variable length arrays are encoded as a regular array with 0 elements.
9363 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
9364 "Unknown array type!");
9365 S += '0';
9366 }
9367
9368 getObjCEncodingForTypeImpl(
9369 T: AT->getElementType(), S,
9370 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD,
9371 NotEncodedT);
9372 S += ']';
9373 }
9374 return;
9375 }
9376
9377 case Type::FunctionNoProto:
9378 case Type::FunctionProto:
9379 S += '?';
9380 return;
9381
9382 case Type::Record: {
9383 RecordDecl *RDecl = cast<RecordType>(Val&: CT)->getDecl();
9384 S += RDecl->isUnion() ? '(' : '{';
9385 // Anonymous structures print as '?'
9386 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
9387 S += II->getName();
9388 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Val: RDecl)) {
9389 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
9390 llvm::raw_string_ostream OS(S);
9391 printTemplateArgumentList(OS, Args: TemplateArgs.asArray(),
9392 Policy: getPrintingPolicy());
9393 }
9394 } else {
9395 S += '?';
9396 }
9397 if (Options.ExpandStructures()) {
9398 S += '=';
9399 if (!RDecl->isUnion()) {
9400 getObjCEncodingForStructureImpl(RD: RDecl, S, Field: FD, includeVBases: true, NotEncodedT);
9401 } else {
9402 for (const auto *Field : RDecl->fields()) {
9403 if (FD) {
9404 S += '"';
9405 S += Field->getNameAsString();
9406 S += '"';
9407 }
9408
9409 // Special case bit-fields.
9410 if (Field->isBitField()) {
9411 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9412 Options: ObjCEncOptions().setExpandStructures(),
9413 FD: Field);
9414 } else {
9415 QualType qt = Field->getType();
9416 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9417 getObjCEncodingForTypeImpl(
9418 T: qt, S,
9419 Options: ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
9420 NotEncodedT);
9421 }
9422 }
9423 }
9424 }
9425 S += RDecl->isUnion() ? ')' : '}';
9426 return;
9427 }
9428
9429 case Type::BlockPointer: {
9430 const auto *BT = T->castAs<BlockPointerType>();
9431 S += "@?"; // Unlike a pointer-to-function, which is "^?".
9432 if (Options.EncodeBlockParameters()) {
9433 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
9434
9435 S += '<';
9436 // Block return type
9437 getObjCEncodingForTypeImpl(T: FT->getReturnType(), S,
9438 Options: Options.forComponentType(), FD, NotEncodedT);
9439 // Block self
9440 S += "@?";
9441 // Block parameters
9442 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
9443 for (const auto &I : FPT->param_types())
9444 getObjCEncodingForTypeImpl(T: I, S, Options: Options.forComponentType(), FD,
9445 NotEncodedT);
9446 }
9447 S += '>';
9448 }
9449 return;
9450 }
9451
9452 case Type::ObjCObject: {
9453 // hack to match legacy encoding of *id and *Class
9454 QualType Ty = getObjCObjectPointerType(ObjectT: CT);
9455 if (Ty->isObjCIdType()) {
9456 S += "{objc_object=}";
9457 return;
9458 }
9459 else if (Ty->isObjCClassType()) {
9460 S += "{objc_class=}";
9461 return;
9462 }
9463 // TODO: Double check to make sure this intentionally falls through.
9464 [[fallthrough]];
9465 }
9466
9467 case Type::ObjCInterface: {
9468 // Ignore protocol qualifiers when mangling at this level.
9469 // @encode(class_name)
9470 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
9471 S += '{';
9472 S += OI->getObjCRuntimeNameAsString();
9473 if (Options.ExpandStructures()) {
9474 S += '=';
9475 SmallVector<const ObjCIvarDecl*, 32> Ivars;
9476 DeepCollectObjCIvars(OI, leafClass: true, Ivars);
9477 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
9478 const FieldDecl *Field = Ivars[i];
9479 if (Field->isBitField())
9480 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9481 Options: ObjCEncOptions().setExpandStructures(),
9482 FD: Field);
9483 else
9484 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9485 Options: ObjCEncOptions().setExpandStructures(), FD,
9486 NotEncodedT);
9487 }
9488 }
9489 S += '}';
9490 return;
9491 }
9492
9493 case Type::ObjCObjectPointer: {
9494 const auto *OPT = T->castAs<ObjCObjectPointerType>();
9495 if (OPT->isObjCIdType()) {
9496 S += '@';
9497 return;
9498 }
9499
9500 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
9501 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
9502 // Since this is a binary compatibility issue, need to consult with
9503 // runtime folks. Fortunately, this is a *very* obscure construct.
9504 S += '#';
9505 return;
9506 }
9507
9508 if (OPT->isObjCQualifiedIdType()) {
9509 getObjCEncodingForTypeImpl(
9510 T: getObjCIdType(), S,
9511 Options: Options.keepingOnly(Mask: ObjCEncOptions()
9512 .setExpandPointedToStructures()
9513 .setExpandStructures()),
9514 FD);
9515 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
9516 // Note that we do extended encoding of protocol qualifier list
9517 // Only when doing ivar or property encoding.
9518 S += '"';
9519 for (const auto *I : OPT->quals()) {
9520 S += '<';
9521 S += I->getObjCRuntimeNameAsString();
9522 S += '>';
9523 }
9524 S += '"';
9525 }
9526 return;
9527 }
9528
9529 S += '@';
9530 if (OPT->getInterfaceDecl() &&
9531 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
9532 S += '"';
9533 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
9534 for (const auto *I : OPT->quals()) {
9535 S += '<';
9536 S += I->getObjCRuntimeNameAsString();
9537 S += '>';
9538 }
9539 S += '"';
9540 }
9541 return;
9542 }
9543
9544 // gcc just blithely ignores member pointers.
9545 // FIXME: we should do better than that. 'M' is available.
9546 case Type::MemberPointer:
9547 // This matches gcc's encoding, even though technically it is insufficient.
9548 //FIXME. We should do a better job than gcc.
9549 case Type::Vector:
9550 case Type::ExtVector:
9551 // Until we have a coherent encoding of these three types, issue warning.
9552 if (NotEncodedT)
9553 *NotEncodedT = T;
9554 return;
9555
9556 case Type::ConstantMatrix:
9557 if (NotEncodedT)
9558 *NotEncodedT = T;
9559 return;
9560
9561 case Type::BitInt:
9562 if (NotEncodedT)
9563 *NotEncodedT = T;
9564 return;
9565
9566 // We could see an undeduced auto type here during error recovery.
9567 // Just ignore it.
9568 case Type::Auto:
9569 case Type::DeducedTemplateSpecialization:
9570 return;
9571
9572 case Type::HLSLAttributedResource:
9573 case Type::HLSLInlineSpirv:
9574 llvm_unreachable("unexpected type");
9575
9576 case Type::ArrayParameter:
9577 case Type::Pipe:
9578#define ABSTRACT_TYPE(KIND, BASE)
9579#define TYPE(KIND, BASE)
9580#define DEPENDENT_TYPE(KIND, BASE) \
9581 case Type::KIND:
9582#define NON_CANONICAL_TYPE(KIND, BASE) \
9583 case Type::KIND:
9584#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
9585 case Type::KIND:
9586#include "clang/AST/TypeNodes.inc"
9587 llvm_unreachable("@encode for dependent type!");
9588 }
9589 llvm_unreachable("bad type kind!");
9590}
9591
9592void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
9593 std::string &S,
9594 const FieldDecl *FD,
9595 bool includeVBases,
9596 QualType *NotEncodedT) const {
9597 assert(RDecl && "Expected non-null RecordDecl");
9598 assert(!RDecl->isUnion() && "Should not be called for unions");
9599 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
9600 return;
9601
9602 const auto *CXXRec = dyn_cast<CXXRecordDecl>(Val: RDecl);
9603 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
9604 const ASTRecordLayout &layout = getASTRecordLayout(D: RDecl);
9605
9606 if (CXXRec) {
9607 for (const auto &BI : CXXRec->bases()) {
9608 if (!BI.isVirtual()) {
9609 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9610 if (base->isEmpty())
9611 continue;
9612 uint64_t offs = toBits(CharSize: layout.getBaseClassOffset(Base: base));
9613 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9614 x: std::make_pair(x&: offs, y&: base));
9615 }
9616 }
9617 }
9618
9619 for (FieldDecl *Field : RDecl->fields()) {
9620 if (!Field->isZeroLengthBitField() && Field->isZeroSize(Ctx: *this))
9621 continue;
9622 uint64_t offs = layout.getFieldOffset(FieldNo: Field->getFieldIndex());
9623 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9624 x: std::make_pair(x&: offs, y&: Field));
9625 }
9626
9627 if (CXXRec && includeVBases) {
9628 for (const auto &BI : CXXRec->vbases()) {
9629 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9630 if (base->isEmpty())
9631 continue;
9632 uint64_t offs = toBits(CharSize: layout.getVBaseClassOffset(VBase: base));
9633 if (offs >= uint64_t(toBits(CharSize: layout.getNonVirtualSize())) &&
9634 FieldOrBaseOffsets.find(x: offs) == FieldOrBaseOffsets.end())
9635 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.end(),
9636 x: std::make_pair(x&: offs, y&: base));
9637 }
9638 }
9639
9640 CharUnits size;
9641 if (CXXRec) {
9642 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
9643 } else {
9644 size = layout.getSize();
9645 }
9646
9647#ifndef NDEBUG
9648 uint64_t CurOffs = 0;
9649#endif
9650 std::multimap<uint64_t, NamedDecl *>::iterator
9651 CurLayObj = FieldOrBaseOffsets.begin();
9652
9653 if (CXXRec && CXXRec->isDynamicClass() &&
9654 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
9655 if (FD) {
9656 S += "\"_vptr$";
9657 std::string recname = CXXRec->getNameAsString();
9658 if (recname.empty()) recname = "?";
9659 S += recname;
9660 S += '"';
9661 }
9662 S += "^^?";
9663#ifndef NDEBUG
9664 CurOffs += getTypeSize(VoidPtrTy);
9665#endif
9666 }
9667
9668 if (!RDecl->hasFlexibleArrayMember()) {
9669 // Mark the end of the structure.
9670 uint64_t offs = toBits(CharSize: size);
9671 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9672 x: std::make_pair(x&: offs, y: nullptr));
9673 }
9674
9675 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
9676#ifndef NDEBUG
9677 assert(CurOffs <= CurLayObj->first);
9678 if (CurOffs < CurLayObj->first) {
9679 uint64_t padding = CurLayObj->first - CurOffs;
9680 // FIXME: There doesn't seem to be a way to indicate in the encoding that
9681 // packing/alignment of members is different that normal, in which case
9682 // the encoding will be out-of-sync with the real layout.
9683 // If the runtime switches to just consider the size of types without
9684 // taking into account alignment, we could make padding explicit in the
9685 // encoding (e.g. using arrays of chars). The encoding strings would be
9686 // longer then though.
9687 CurOffs += padding;
9688 }
9689#endif
9690
9691 NamedDecl *dcl = CurLayObj->second;
9692 if (!dcl)
9693 break; // reached end of structure.
9694
9695 if (auto *base = dyn_cast<CXXRecordDecl>(Val: dcl)) {
9696 // We expand the bases without their virtual bases since those are going
9697 // in the initial structure. Note that this differs from gcc which
9698 // expands virtual bases each time one is encountered in the hierarchy,
9699 // making the encoding type bigger than it really is.
9700 getObjCEncodingForStructureImpl(RDecl: base, S, FD, /*includeVBases*/false,
9701 NotEncodedT);
9702 assert(!base->isEmpty());
9703#ifndef NDEBUG
9704 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
9705#endif
9706 } else {
9707 const auto *field = cast<FieldDecl>(Val: dcl);
9708 if (FD) {
9709 S += '"';
9710 S += field->getNameAsString();
9711 S += '"';
9712 }
9713
9714 if (field->isBitField()) {
9715 EncodeBitField(Ctx: this, S, T: field->getType(), FD: field);
9716#ifndef NDEBUG
9717 CurOffs += field->getBitWidthValue();
9718#endif
9719 } else {
9720 QualType qt = field->getType();
9721 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9722 getObjCEncodingForTypeImpl(
9723 T: qt, S, Options: ObjCEncOptions().setExpandStructures().setIsStructField(),
9724 FD, NotEncodedT);
9725#ifndef NDEBUG
9726 CurOffs += getTypeSize(field->getType());
9727#endif
9728 }
9729 }
9730 }
9731}
9732
9733void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
9734 std::string& S) const {
9735 if (QT & Decl::OBJC_TQ_In)
9736 S += 'n';
9737 if (QT & Decl::OBJC_TQ_Inout)
9738 S += 'N';
9739 if (QT & Decl::OBJC_TQ_Out)
9740 S += 'o';
9741 if (QT & Decl::OBJC_TQ_Bycopy)
9742 S += 'O';
9743 if (QT & Decl::OBJC_TQ_Byref)
9744 S += 'R';
9745 if (QT & Decl::OBJC_TQ_Oneway)
9746 S += 'V';
9747}
9748
9749TypedefDecl *ASTContext::getObjCIdDecl() const {
9750 if (!ObjCIdDecl) {
9751 QualType T = getObjCObjectType(BaseType: ObjCBuiltinIdTy, Protocols: {}, NumProtocols: {});
9752 T = getObjCObjectPointerType(ObjectT: T);
9753 ObjCIdDecl = buildImplicitTypedef(T, Name: "id");
9754 }
9755 return ObjCIdDecl;
9756}
9757
9758TypedefDecl *ASTContext::getObjCSelDecl() const {
9759 if (!ObjCSelDecl) {
9760 QualType T = getPointerType(T: ObjCBuiltinSelTy);
9761 ObjCSelDecl = buildImplicitTypedef(T, Name: "SEL");
9762 }
9763 return ObjCSelDecl;
9764}
9765
9766TypedefDecl *ASTContext::getObjCClassDecl() const {
9767 if (!ObjCClassDecl) {
9768 QualType T = getObjCObjectType(BaseType: ObjCBuiltinClassTy, Protocols: {}, NumProtocols: {});
9769 T = getObjCObjectPointerType(ObjectT: T);
9770 ObjCClassDecl = buildImplicitTypedef(T, Name: "Class");
9771 }
9772 return ObjCClassDecl;
9773}
9774
9775ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
9776 if (!ObjCProtocolClassDecl) {
9777 ObjCProtocolClassDecl
9778 = ObjCInterfaceDecl::Create(C: *this, DC: getTranslationUnitDecl(),
9779 atLoc: SourceLocation(),
9780 Id: &Idents.get(Name: "Protocol"),
9781 /*typeParamList=*/nullptr,
9782 /*PrevDecl=*/nullptr,
9783 ClassLoc: SourceLocation(), isInternal: true);
9784 }
9785
9786 return ObjCProtocolClassDecl;
9787}
9788
9789PointerAuthQualifier ASTContext::getObjCMemberSelTypePtrAuth() {
9790 if (!getLangOpts().PointerAuthObjcInterfaceSel)
9791 return PointerAuthQualifier();
9792 return PointerAuthQualifier::Create(
9793 Key: getLangOpts().PointerAuthObjcInterfaceSelKey,
9794 /*isAddressDiscriminated=*/IsAddressDiscriminated: true, ExtraDiscriminator: SelPointerConstantDiscriminator,
9795 AuthenticationMode: PointerAuthenticationMode::SignAndAuth,
9796 /*isIsaPointer=*/IsIsaPointer: false,
9797 /*authenticatesNullValues=*/AuthenticatesNullValues: false);
9798}
9799
9800//===----------------------------------------------------------------------===//
9801// __builtin_va_list Construction Functions
9802//===----------------------------------------------------------------------===//
9803
9804static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
9805 StringRef Name) {
9806 // typedef char* __builtin[_ms]_va_list;
9807 QualType T = Context->getPointerType(T: Context->CharTy);
9808 return Context->buildImplicitTypedef(T, Name);
9809}
9810
9811static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
9812 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_ms_va_list");
9813}
9814
9815static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
9816 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_va_list");
9817}
9818
9819static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
9820 // typedef void* __builtin_va_list;
9821 QualType T = Context->getPointerType(T: Context->VoidTy);
9822 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
9823}
9824
9825static TypedefDecl *
9826CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
9827 // struct __va_list
9828 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list");
9829 if (Context->getLangOpts().CPlusPlus) {
9830 // namespace std { struct __va_list {
9831 auto *NS = NamespaceDecl::Create(
9832 C&: const_cast<ASTContext &>(*Context), DC: Context->getTranslationUnitDecl(),
9833 /*Inline=*/false, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
9834 Id: &Context->Idents.get(Name: "std"),
9835 /*PrevDecl=*/nullptr, /*Nested=*/false);
9836 NS->setImplicit();
9837 VaListTagDecl->setDeclContext(NS);
9838 }
9839
9840 VaListTagDecl->startDefinition();
9841
9842 const size_t NumFields = 5;
9843 QualType FieldTypes[NumFields];
9844 const char *FieldNames[NumFields];
9845
9846 // void *__stack;
9847 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
9848 FieldNames[0] = "__stack";
9849
9850 // void *__gr_top;
9851 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
9852 FieldNames[1] = "__gr_top";
9853
9854 // void *__vr_top;
9855 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9856 FieldNames[2] = "__vr_top";
9857
9858 // int __gr_offs;
9859 FieldTypes[3] = Context->IntTy;
9860 FieldNames[3] = "__gr_offs";
9861
9862 // int __vr_offs;
9863 FieldTypes[4] = Context->IntTy;
9864 FieldNames[4] = "__vr_offs";
9865
9866 // Create fields
9867 for (unsigned i = 0; i < NumFields; ++i) {
9868 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9869 DC: VaListTagDecl,
9870 StartLoc: SourceLocation(),
9871 IdLoc: SourceLocation(),
9872 Id: &Context->Idents.get(Name: FieldNames[i]),
9873 T: FieldTypes[i], /*TInfo=*/nullptr,
9874 /*BitWidth=*/BW: nullptr,
9875 /*Mutable=*/false,
9876 InitStyle: ICIS_NoInit);
9877 Field->setAccess(AS_public);
9878 VaListTagDecl->addDecl(D: Field);
9879 }
9880 VaListTagDecl->completeDefinition();
9881 Context->VaListTagDecl = VaListTagDecl;
9882 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9883
9884 // } __builtin_va_list;
9885 return Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
9886}
9887
9888static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
9889 // typedef struct __va_list_tag {
9890 RecordDecl *VaListTagDecl;
9891
9892 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9893 VaListTagDecl->startDefinition();
9894
9895 const size_t NumFields = 5;
9896 QualType FieldTypes[NumFields];
9897 const char *FieldNames[NumFields];
9898
9899 // unsigned char gpr;
9900 FieldTypes[0] = Context->UnsignedCharTy;
9901 FieldNames[0] = "gpr";
9902
9903 // unsigned char fpr;
9904 FieldTypes[1] = Context->UnsignedCharTy;
9905 FieldNames[1] = "fpr";
9906
9907 // unsigned short reserved;
9908 FieldTypes[2] = Context->UnsignedShortTy;
9909 FieldNames[2] = "reserved";
9910
9911 // void* overflow_arg_area;
9912 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
9913 FieldNames[3] = "overflow_arg_area";
9914
9915 // void* reg_save_area;
9916 FieldTypes[4] = Context->getPointerType(T: Context->VoidTy);
9917 FieldNames[4] = "reg_save_area";
9918
9919 // Create fields
9920 for (unsigned i = 0; i < NumFields; ++i) {
9921 FieldDecl *Field = FieldDecl::Create(C: *Context, DC: VaListTagDecl,
9922 StartLoc: SourceLocation(),
9923 IdLoc: SourceLocation(),
9924 Id: &Context->Idents.get(Name: FieldNames[i]),
9925 T: FieldTypes[i], /*TInfo=*/nullptr,
9926 /*BitWidth=*/BW: nullptr,
9927 /*Mutable=*/false,
9928 InitStyle: ICIS_NoInit);
9929 Field->setAccess(AS_public);
9930 VaListTagDecl->addDecl(D: Field);
9931 }
9932 VaListTagDecl->completeDefinition();
9933 Context->VaListTagDecl = VaListTagDecl;
9934 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9935
9936 // } __va_list_tag;
9937 TypedefDecl *VaListTagTypedefDecl =
9938 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
9939
9940 QualType VaListTagTypedefType =
9941 Context->getTypedefType(Decl: VaListTagTypedefDecl);
9942
9943 // typedef __va_list_tag __builtin_va_list[1];
9944 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
9945 QualType VaListTagArrayType = Context->getConstantArrayType(
9946 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9947 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
9948}
9949
9950static TypedefDecl *
9951CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
9952 // struct __va_list_tag {
9953 RecordDecl *VaListTagDecl;
9954 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9955 VaListTagDecl->startDefinition();
9956
9957 const size_t NumFields = 4;
9958 QualType FieldTypes[NumFields];
9959 const char *FieldNames[NumFields];
9960
9961 // unsigned gp_offset;
9962 FieldTypes[0] = Context->UnsignedIntTy;
9963 FieldNames[0] = "gp_offset";
9964
9965 // unsigned fp_offset;
9966 FieldTypes[1] = Context->UnsignedIntTy;
9967 FieldNames[1] = "fp_offset";
9968
9969 // void* overflow_arg_area;
9970 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9971 FieldNames[2] = "overflow_arg_area";
9972
9973 // void* reg_save_area;
9974 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
9975 FieldNames[3] = "reg_save_area";
9976
9977 // Create fields
9978 for (unsigned i = 0; i < NumFields; ++i) {
9979 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9980 DC: VaListTagDecl,
9981 StartLoc: SourceLocation(),
9982 IdLoc: SourceLocation(),
9983 Id: &Context->Idents.get(Name: FieldNames[i]),
9984 T: FieldTypes[i], /*TInfo=*/nullptr,
9985 /*BitWidth=*/BW: nullptr,
9986 /*Mutable=*/false,
9987 InitStyle: ICIS_NoInit);
9988 Field->setAccess(AS_public);
9989 VaListTagDecl->addDecl(D: Field);
9990 }
9991 VaListTagDecl->completeDefinition();
9992 Context->VaListTagDecl = VaListTagDecl;
9993 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9994
9995 // };
9996
9997 // typedef struct __va_list_tag __builtin_va_list[1];
9998 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
9999 QualType VaListTagArrayType = Context->getConstantArrayType(
10000 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10001 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10002}
10003
10004static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
10005 // typedef int __builtin_va_list[4];
10006 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 4);
10007 QualType IntArrayType = Context->getConstantArrayType(
10008 EltTy: Context->IntTy, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10009 return Context->buildImplicitTypedef(T: IntArrayType, Name: "__builtin_va_list");
10010}
10011
10012static TypedefDecl *
10013CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
10014 // struct __va_list
10015 RecordDecl *VaListDecl = Context->buildImplicitRecord(Name: "__va_list");
10016 if (Context->getLangOpts().CPlusPlus) {
10017 // namespace std { struct __va_list {
10018 NamespaceDecl *NS;
10019 NS = NamespaceDecl::Create(C&: const_cast<ASTContext &>(*Context),
10020 DC: Context->getTranslationUnitDecl(),
10021 /*Inline=*/false, StartLoc: SourceLocation(),
10022 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: "std"),
10023 /*PrevDecl=*/nullptr, /*Nested=*/false);
10024 NS->setImplicit();
10025 VaListDecl->setDeclContext(NS);
10026 }
10027
10028 VaListDecl->startDefinition();
10029
10030 // void * __ap;
10031 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10032 DC: VaListDecl,
10033 StartLoc: SourceLocation(),
10034 IdLoc: SourceLocation(),
10035 Id: &Context->Idents.get(Name: "__ap"),
10036 T: Context->getPointerType(T: Context->VoidTy),
10037 /*TInfo=*/nullptr,
10038 /*BitWidth=*/BW: nullptr,
10039 /*Mutable=*/false,
10040 InitStyle: ICIS_NoInit);
10041 Field->setAccess(AS_public);
10042 VaListDecl->addDecl(D: Field);
10043
10044 // };
10045 VaListDecl->completeDefinition();
10046 Context->VaListTagDecl = VaListDecl;
10047
10048 // typedef struct __va_list __builtin_va_list;
10049 QualType T = Context->getRecordType(Decl: VaListDecl);
10050 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
10051}
10052
10053static TypedefDecl *
10054CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
10055 // struct __va_list_tag {
10056 RecordDecl *VaListTagDecl;
10057 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10058 VaListTagDecl->startDefinition();
10059
10060 const size_t NumFields = 4;
10061 QualType FieldTypes[NumFields];
10062 const char *FieldNames[NumFields];
10063
10064 // long __gpr;
10065 FieldTypes[0] = Context->LongTy;
10066 FieldNames[0] = "__gpr";
10067
10068 // long __fpr;
10069 FieldTypes[1] = Context->LongTy;
10070 FieldNames[1] = "__fpr";
10071
10072 // void *__overflow_arg_area;
10073 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10074 FieldNames[2] = "__overflow_arg_area";
10075
10076 // void *__reg_save_area;
10077 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10078 FieldNames[3] = "__reg_save_area";
10079
10080 // Create fields
10081 for (unsigned i = 0; i < NumFields; ++i) {
10082 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10083 DC: VaListTagDecl,
10084 StartLoc: SourceLocation(),
10085 IdLoc: SourceLocation(),
10086 Id: &Context->Idents.get(Name: FieldNames[i]),
10087 T: FieldTypes[i], /*TInfo=*/nullptr,
10088 /*BitWidth=*/BW: nullptr,
10089 /*Mutable=*/false,
10090 InitStyle: ICIS_NoInit);
10091 Field->setAccess(AS_public);
10092 VaListTagDecl->addDecl(D: Field);
10093 }
10094 VaListTagDecl->completeDefinition();
10095 Context->VaListTagDecl = VaListTagDecl;
10096 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
10097
10098 // };
10099
10100 // typedef __va_list_tag __builtin_va_list[1];
10101 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10102 QualType VaListTagArrayType = Context->getConstantArrayType(
10103 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10104
10105 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10106}
10107
10108static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
10109 // typedef struct __va_list_tag {
10110 RecordDecl *VaListTagDecl;
10111 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10112 VaListTagDecl->startDefinition();
10113
10114 const size_t NumFields = 3;
10115 QualType FieldTypes[NumFields];
10116 const char *FieldNames[NumFields];
10117
10118 // void *CurrentSavedRegisterArea;
10119 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
10120 FieldNames[0] = "__current_saved_reg_area_pointer";
10121
10122 // void *SavedRegAreaEnd;
10123 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
10124 FieldNames[1] = "__saved_reg_area_end_pointer";
10125
10126 // void *OverflowArea;
10127 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10128 FieldNames[2] = "__overflow_area_pointer";
10129
10130 // Create fields
10131 for (unsigned i = 0; i < NumFields; ++i) {
10132 FieldDecl *Field = FieldDecl::Create(
10133 C: const_cast<ASTContext &>(*Context), DC: VaListTagDecl, StartLoc: SourceLocation(),
10134 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i],
10135 /*TInfo=*/nullptr,
10136 /*BitWidth=*/BW: nullptr,
10137 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10138 Field->setAccess(AS_public);
10139 VaListTagDecl->addDecl(D: Field);
10140 }
10141 VaListTagDecl->completeDefinition();
10142 Context->VaListTagDecl = VaListTagDecl;
10143 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
10144
10145 // } __va_list_tag;
10146 TypedefDecl *VaListTagTypedefDecl =
10147 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10148
10149 QualType VaListTagTypedefType = Context->getTypedefType(Decl: VaListTagTypedefDecl);
10150
10151 // typedef __va_list_tag __builtin_va_list[1];
10152 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10153 QualType VaListTagArrayType = Context->getConstantArrayType(
10154 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10155
10156 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10157}
10158
10159static TypedefDecl *
10160CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
10161 // typedef struct __va_list_tag {
10162 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10163
10164 VaListTagDecl->startDefinition();
10165
10166 // int* __va_stk;
10167 // int* __va_reg;
10168 // int __va_ndx;
10169 constexpr size_t NumFields = 3;
10170 QualType FieldTypes[NumFields] = {Context->getPointerType(T: Context->IntTy),
10171 Context->getPointerType(T: Context->IntTy),
10172 Context->IntTy};
10173 const char *FieldNames[NumFields] = {"__va_stk", "__va_reg", "__va_ndx"};
10174
10175 // Create fields
10176 for (unsigned i = 0; i < NumFields; ++i) {
10177 FieldDecl *Field = FieldDecl::Create(
10178 C: *Context, DC: VaListTagDecl, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
10179 Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
10180 /*BitWidth=*/BW: nullptr,
10181 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10182 Field->setAccess(AS_public);
10183 VaListTagDecl->addDecl(D: Field);
10184 }
10185 VaListTagDecl->completeDefinition();
10186 Context->VaListTagDecl = VaListTagDecl;
10187 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
10188
10189 // } __va_list_tag;
10190 TypedefDecl *VaListTagTypedefDecl =
10191 Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10192
10193 return VaListTagTypedefDecl;
10194}
10195
10196static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
10197 TargetInfo::BuiltinVaListKind Kind) {
10198 switch (Kind) {
10199 case TargetInfo::CharPtrBuiltinVaList:
10200 return CreateCharPtrBuiltinVaListDecl(Context);
10201 case TargetInfo::VoidPtrBuiltinVaList:
10202 return CreateVoidPtrBuiltinVaListDecl(Context);
10203 case TargetInfo::AArch64ABIBuiltinVaList:
10204 return CreateAArch64ABIBuiltinVaListDecl(Context);
10205 case TargetInfo::PowerABIBuiltinVaList:
10206 return CreatePowerABIBuiltinVaListDecl(Context);
10207 case TargetInfo::X86_64ABIBuiltinVaList:
10208 return CreateX86_64ABIBuiltinVaListDecl(Context);
10209 case TargetInfo::PNaClABIBuiltinVaList:
10210 return CreatePNaClABIBuiltinVaListDecl(Context);
10211 case TargetInfo::AAPCSABIBuiltinVaList:
10212 return CreateAAPCSABIBuiltinVaListDecl(Context);
10213 case TargetInfo::SystemZBuiltinVaList:
10214 return CreateSystemZBuiltinVaListDecl(Context);
10215 case TargetInfo::HexagonBuiltinVaList:
10216 return CreateHexagonBuiltinVaListDecl(Context);
10217 case TargetInfo::XtensaABIBuiltinVaList:
10218 return CreateXtensaABIBuiltinVaListDecl(Context);
10219 }
10220
10221 llvm_unreachable("Unhandled __builtin_va_list type kind");
10222}
10223
10224TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
10225 if (!BuiltinVaListDecl) {
10226 BuiltinVaListDecl = CreateVaListDecl(Context: this, Kind: Target->getBuiltinVaListKind());
10227 assert(BuiltinVaListDecl->isImplicit());
10228 }
10229
10230 return BuiltinVaListDecl;
10231}
10232
10233Decl *ASTContext::getVaListTagDecl() const {
10234 // Force the creation of VaListTagDecl by building the __builtin_va_list
10235 // declaration.
10236 if (!VaListTagDecl)
10237 (void)getBuiltinVaListDecl();
10238
10239 return VaListTagDecl;
10240}
10241
10242TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
10243 if (!BuiltinMSVaListDecl)
10244 BuiltinMSVaListDecl = CreateMSVaListDecl(Context: this);
10245
10246 return BuiltinMSVaListDecl;
10247}
10248
10249bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
10250 // Allow redecl custom type checking builtin for HLSL.
10251 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
10252 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10253 return true;
10254 // Allow redecl custom type checking builtin for SPIR-V.
10255 if (getTargetInfo().getTriple().isSPIROrSPIRV() &&
10256 BuiltinInfo.isTSBuiltin(ID: FD->getBuiltinID()) &&
10257 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10258 return true;
10259 return BuiltinInfo.canBeRedeclared(ID: FD->getBuiltinID());
10260}
10261
10262void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
10263 assert(ObjCConstantStringType.isNull() &&
10264 "'NSConstantString' type already set!");
10265
10266 ObjCConstantStringType = getObjCInterfaceType(Decl);
10267}
10268
10269/// Retrieve the template name that corresponds to a non-empty
10270/// lookup.
10271TemplateName
10272ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
10273 UnresolvedSetIterator End) const {
10274 unsigned size = End - Begin;
10275 assert(size > 1 && "set is not overloaded!");
10276
10277 void *memory = Allocate(Size: sizeof(OverloadedTemplateStorage) +
10278 size * sizeof(FunctionTemplateDecl*));
10279 auto *OT = new (memory) OverloadedTemplateStorage(size);
10280
10281 NamedDecl **Storage = OT->getStorage();
10282 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
10283 NamedDecl *D = *I;
10284 assert(isa<FunctionTemplateDecl>(D) ||
10285 isa<UnresolvedUsingValueDecl>(D) ||
10286 (isa<UsingShadowDecl>(D) &&
10287 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
10288 *Storage++ = D;
10289 }
10290
10291 return TemplateName(OT);
10292}
10293
10294/// Retrieve a template name representing an unqualified-id that has been
10295/// assumed to name a template for ADL purposes.
10296TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
10297 auto *OT = new (*this) AssumedTemplateStorage(Name);
10298 return TemplateName(OT);
10299}
10300
10301/// Retrieve the template name that represents a qualified
10302/// template name such as \c std::vector.
10303TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
10304 bool TemplateKeyword,
10305 TemplateName Template) const {
10306 assert(Template.getKind() == TemplateName::Template ||
10307 Template.getKind() == TemplateName::UsingTemplate);
10308
10309 // FIXME: Canonicalization?
10310 llvm::FoldingSetNodeID ID;
10311 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, TN: Template);
10312
10313 void *InsertPos = nullptr;
10314 QualifiedTemplateName *QTN =
10315 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
10316 if (!QTN) {
10317 QTN = new (*this, alignof(QualifiedTemplateName))
10318 QualifiedTemplateName(NNS, TemplateKeyword, Template);
10319 QualifiedTemplateNames.InsertNode(N: QTN, InsertPos);
10320 }
10321
10322 return TemplateName(QTN);
10323}
10324
10325/// Retrieve the template name that represents a dependent
10326/// template name such as \c MetaFun::template operator+.
10327TemplateName
10328ASTContext::getDependentTemplateName(const DependentTemplateStorage &S) const {
10329 llvm::FoldingSetNodeID ID;
10330 S.Profile(ID);
10331
10332 void *InsertPos = nullptr;
10333 if (DependentTemplateName *QTN =
10334 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos))
10335 return TemplateName(QTN);
10336
10337 DependentTemplateName *QTN =
10338 new (*this, alignof(DependentTemplateName)) DependentTemplateName(S);
10339 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
10340 return TemplateName(QTN);
10341}
10342
10343TemplateName ASTContext::getSubstTemplateTemplateParm(TemplateName Replacement,
10344 Decl *AssociatedDecl,
10345 unsigned Index,
10346 UnsignedOrNone PackIndex,
10347 bool Final) const {
10348 llvm::FoldingSetNodeID ID;
10349 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
10350 Index, PackIndex, Final);
10351
10352 void *insertPos = nullptr;
10353 SubstTemplateTemplateParmStorage *subst
10354 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
10355
10356 if (!subst) {
10357 subst = new (*this) SubstTemplateTemplateParmStorage(
10358 Replacement, AssociatedDecl, Index, PackIndex, Final);
10359 SubstTemplateTemplateParms.InsertNode(N: subst, InsertPos: insertPos);
10360 }
10361
10362 return TemplateName(subst);
10363}
10364
10365TemplateName
10366ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
10367 Decl *AssociatedDecl,
10368 unsigned Index, bool Final) const {
10369 auto &Self = const_cast<ASTContext &>(*this);
10370 llvm::FoldingSetNodeID ID;
10371 SubstTemplateTemplateParmPackStorage::Profile(ID, Context&: Self, ArgPack,
10372 AssociatedDecl, Index, Final);
10373
10374 void *InsertPos = nullptr;
10375 SubstTemplateTemplateParmPackStorage *Subst
10376 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
10377
10378 if (!Subst) {
10379 Subst = new (*this) SubstTemplateTemplateParmPackStorage(
10380 ArgPack.pack_elements(), AssociatedDecl, Index, Final);
10381 SubstTemplateTemplateParmPacks.InsertNode(N: Subst, InsertPos);
10382 }
10383
10384 return TemplateName(Subst);
10385}
10386
10387/// Retrieve the template name that represents a template name
10388/// deduced from a specialization.
10389TemplateName
10390ASTContext::getDeducedTemplateName(TemplateName Underlying,
10391 DefaultArguments DefaultArgs) const {
10392 if (!DefaultArgs)
10393 return Underlying;
10394
10395 llvm::FoldingSetNodeID ID;
10396 DeducedTemplateStorage::Profile(ID, Context: *this, Underlying, DefArgs: DefaultArgs);
10397
10398 void *InsertPos = nullptr;
10399 DeducedTemplateStorage *DTS =
10400 DeducedTemplates.FindNodeOrInsertPos(ID, InsertPos);
10401 if (!DTS) {
10402 void *Mem = Allocate(Size: sizeof(DeducedTemplateStorage) +
10403 sizeof(TemplateArgument) * DefaultArgs.Args.size(),
10404 Align: alignof(DeducedTemplateStorage));
10405 DTS = new (Mem) DeducedTemplateStorage(Underlying, DefaultArgs);
10406 DeducedTemplates.InsertNode(N: DTS, InsertPos);
10407 }
10408 return TemplateName(DTS);
10409}
10410
10411/// getFromTargetType - Given one of the integer types provided by
10412/// TargetInfo, produce the corresponding type. The unsigned @p Type
10413/// is actually a value of type @c TargetInfo::IntType.
10414CanQualType ASTContext::getFromTargetType(unsigned Type) const {
10415 switch (Type) {
10416 case TargetInfo::NoInt: return {};
10417 case TargetInfo::SignedChar: return SignedCharTy;
10418 case TargetInfo::UnsignedChar: return UnsignedCharTy;
10419 case TargetInfo::SignedShort: return ShortTy;
10420 case TargetInfo::UnsignedShort: return UnsignedShortTy;
10421 case TargetInfo::SignedInt: return IntTy;
10422 case TargetInfo::UnsignedInt: return UnsignedIntTy;
10423 case TargetInfo::SignedLong: return LongTy;
10424 case TargetInfo::UnsignedLong: return UnsignedLongTy;
10425 case TargetInfo::SignedLongLong: return LongLongTy;
10426 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
10427 }
10428
10429 llvm_unreachable("Unhandled TargetInfo::IntType value");
10430}
10431
10432//===----------------------------------------------------------------------===//
10433// Type Predicates.
10434//===----------------------------------------------------------------------===//
10435
10436/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
10437/// garbage collection attribute.
10438///
10439Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
10440 if (getLangOpts().getGC() == LangOptions::NonGC)
10441 return Qualifiers::GCNone;
10442
10443 assert(getLangOpts().ObjC);
10444 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
10445
10446 // Default behaviour under objective-C's gc is for ObjC pointers
10447 // (or pointers to them) be treated as though they were declared
10448 // as __strong.
10449 if (GCAttrs == Qualifiers::GCNone) {
10450 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
10451 return Qualifiers::Strong;
10452 else if (Ty->isPointerType())
10453 return getObjCGCAttrKind(Ty: Ty->castAs<PointerType>()->getPointeeType());
10454 } else {
10455 // It's not valid to set GC attributes on anything that isn't a
10456 // pointer.
10457#ifndef NDEBUG
10458 QualType CT = Ty->getCanonicalTypeInternal();
10459 while (const auto *AT = dyn_cast<ArrayType>(CT))
10460 CT = AT->getElementType();
10461 assert(CT->isAnyPointerType() || CT->isBlockPointerType());
10462#endif
10463 }
10464 return GCAttrs;
10465}
10466
10467//===----------------------------------------------------------------------===//
10468// Type Compatibility Testing
10469//===----------------------------------------------------------------------===//
10470
10471/// areCompatVectorTypes - Return true if the two specified vector types are
10472/// compatible.
10473static bool areCompatVectorTypes(const VectorType *LHS,
10474 const VectorType *RHS) {
10475 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10476 return LHS->getElementType() == RHS->getElementType() &&
10477 LHS->getNumElements() == RHS->getNumElements();
10478}
10479
10480/// areCompatMatrixTypes - Return true if the two specified matrix types are
10481/// compatible.
10482static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
10483 const ConstantMatrixType *RHS) {
10484 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10485 return LHS->getElementType() == RHS->getElementType() &&
10486 LHS->getNumRows() == RHS->getNumRows() &&
10487 LHS->getNumColumns() == RHS->getNumColumns();
10488}
10489
10490bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
10491 QualType SecondVec) {
10492 assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
10493 assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
10494
10495 if (hasSameUnqualifiedType(T1: FirstVec, T2: SecondVec))
10496 return true;
10497
10498 // Treat Neon vector types and most AltiVec vector types as if they are the
10499 // equivalent GCC vector types.
10500 const auto *First = FirstVec->castAs<VectorType>();
10501 const auto *Second = SecondVec->castAs<VectorType>();
10502 if (First->getNumElements() == Second->getNumElements() &&
10503 hasSameType(T1: First->getElementType(), T2: Second->getElementType()) &&
10504 First->getVectorKind() != VectorKind::AltiVecPixel &&
10505 First->getVectorKind() != VectorKind::AltiVecBool &&
10506 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10507 Second->getVectorKind() != VectorKind::AltiVecBool &&
10508 First->getVectorKind() != VectorKind::SveFixedLengthData &&
10509 First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10510 Second->getVectorKind() != VectorKind::SveFixedLengthData &&
10511 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10512 First->getVectorKind() != VectorKind::RVVFixedLengthData &&
10513 Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
10514 First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10515 Second->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10516 First->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10517 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10518 First->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10519 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10520 First->getVectorKind() != VectorKind::RVVFixedLengthMask_4 &&
10521 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
10522 return true;
10523
10524 return false;
10525}
10526
10527/// getRVVTypeSize - Return RVV vector register size.
10528static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
10529 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
10530 auto VScale = Context.getTargetInfo().getVScaleRange(
10531 LangOpts: Context.getLangOpts(), Mode: TargetInfo::ArmStreamingKind::NotStreaming);
10532 if (!VScale)
10533 return 0;
10534
10535 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
10536
10537 uint64_t EltSize = Context.getTypeSize(T: Info.ElementType);
10538 if (Info.ElementType == Context.BoolTy)
10539 EltSize = 1;
10540
10541 uint64_t MinElts = Info.EC.getKnownMinValue();
10542 return VScale->first * MinElts * EltSize;
10543}
10544
10545bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
10546 QualType SecondType) {
10547 assert(
10548 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10549 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10550 "Expected RVV builtin type and vector type!");
10551
10552 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
10553 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
10554 if (const auto *VT = SecondType->getAs<VectorType>()) {
10555 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
10556 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10557 return FirstType->isRVVVLSBuiltinType() &&
10558 Info.ElementType == BoolTy &&
10559 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)));
10560 }
10561 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1) {
10562 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10563 return FirstType->isRVVVLSBuiltinType() &&
10564 Info.ElementType == BoolTy &&
10565 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT) * 8));
10566 }
10567 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2) {
10568 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10569 return FirstType->isRVVVLSBuiltinType() &&
10570 Info.ElementType == BoolTy &&
10571 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 4);
10572 }
10573 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
10574 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10575 return FirstType->isRVVVLSBuiltinType() &&
10576 Info.ElementType == BoolTy &&
10577 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 2);
10578 }
10579 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
10580 VT->getVectorKind() == VectorKind::Generic)
10581 return FirstType->isRVVVLSBuiltinType() &&
10582 getTypeSize(T: SecondType) == getRVVTypeSize(Context&: *this, Ty: BT) &&
10583 hasSameType(T1: VT->getElementType(),
10584 T2: getBuiltinVectorTypeInfo(Ty: BT).ElementType);
10585 }
10586 }
10587 return false;
10588 };
10589
10590 return IsValidCast(FirstType, SecondType) ||
10591 IsValidCast(SecondType, FirstType);
10592}
10593
10594bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
10595 QualType SecondType) {
10596 assert(
10597 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10598 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10599 "Expected RVV builtin type and vector type!");
10600
10601 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
10602 const auto *BT = FirstType->getAs<BuiltinType>();
10603 if (!BT)
10604 return false;
10605
10606 if (!BT->isRVVVLSBuiltinType())
10607 return false;
10608
10609 const auto *VecTy = SecondType->getAs<VectorType>();
10610 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
10611 const LangOptions::LaxVectorConversionKind LVCKind =
10612 getLangOpts().getLaxVectorConversions();
10613
10614 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
10615 if (getTypeSize(T: SecondType) != getRVVTypeSize(Context&: *this, Ty: BT))
10616 return false;
10617
10618 // If -flax-vector-conversions=all is specified, the types are
10619 // certainly compatible.
10620 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
10621 return true;
10622
10623 // If -flax-vector-conversions=integer is specified, the types are
10624 // compatible if the elements are integer types.
10625 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
10626 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
10627 FirstType->getRVVEltType(Ctx: *this)->isIntegerType();
10628 }
10629
10630 return false;
10631 };
10632
10633 return IsLaxCompatible(FirstType, SecondType) ||
10634 IsLaxCompatible(SecondType, FirstType);
10635}
10636
10637bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
10638 while (true) {
10639 // __strong id
10640 if (const AttributedType *Attr = dyn_cast<AttributedType>(Val&: Ty)) {
10641 if (Attr->getAttrKind() == attr::ObjCOwnership)
10642 return true;
10643
10644 Ty = Attr->getModifiedType();
10645
10646 // X *__strong (...)
10647 } else if (const ParenType *Paren = dyn_cast<ParenType>(Val&: Ty)) {
10648 Ty = Paren->getInnerType();
10649
10650 // We do not want to look through typedefs, typeof(expr),
10651 // typeof(type), or any other way that the type is somehow
10652 // abstracted.
10653 } else {
10654 return false;
10655 }
10656 }
10657}
10658
10659//===----------------------------------------------------------------------===//
10660// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
10661//===----------------------------------------------------------------------===//
10662
10663/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
10664/// inheritance hierarchy of 'rProto'.
10665bool
10666ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
10667 ObjCProtocolDecl *rProto) const {
10668 if (declaresSameEntity(D1: lProto, D2: rProto))
10669 return true;
10670 for (auto *PI : rProto->protocols())
10671 if (ProtocolCompatibleWithProtocol(lProto, rProto: PI))
10672 return true;
10673 return false;
10674}
10675
10676/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
10677/// Class<pr1, ...>.
10678bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
10679 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
10680 for (auto *lhsProto : lhs->quals()) {
10681 bool match = false;
10682 for (auto *rhsProto : rhs->quals()) {
10683 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto)) {
10684 match = true;
10685 break;
10686 }
10687 }
10688 if (!match)
10689 return false;
10690 }
10691 return true;
10692}
10693
10694/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
10695/// ObjCQualifiedIDType.
10696bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
10697 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
10698 bool compare) {
10699 // Allow id<P..> and an 'id' in all cases.
10700 if (lhs->isObjCIdType() || rhs->isObjCIdType())
10701 return true;
10702
10703 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
10704 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
10705 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
10706 return false;
10707
10708 if (lhs->isObjCQualifiedIdType()) {
10709 if (rhs->qual_empty()) {
10710 // If the RHS is a unqualified interface pointer "NSString*",
10711 // make sure we check the class hierarchy.
10712 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10713 for (auto *I : lhs->quals()) {
10714 // when comparing an id<P> on lhs with a static type on rhs,
10715 // see if static class implements all of id's protocols, directly or
10716 // through its super class and categories.
10717 if (!rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true))
10718 return false;
10719 }
10720 }
10721 // If there are no qualifiers and no interface, we have an 'id'.
10722 return true;
10723 }
10724 // Both the right and left sides have qualifiers.
10725 for (auto *lhsProto : lhs->quals()) {
10726 bool match = false;
10727
10728 // when comparing an id<P> on lhs with a static type on rhs,
10729 // see if static class implements all of id's protocols, directly or
10730 // through its super class and categories.
10731 for (auto *rhsProto : rhs->quals()) {
10732 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10733 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10734 match = true;
10735 break;
10736 }
10737 }
10738 // If the RHS is a qualified interface pointer "NSString<P>*",
10739 // make sure we check the class hierarchy.
10740 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10741 for (auto *I : lhs->quals()) {
10742 // when comparing an id<P> on lhs with a static type on rhs,
10743 // see if static class implements all of id's protocols, directly or
10744 // through its super class and categories.
10745 if (rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true)) {
10746 match = true;
10747 break;
10748 }
10749 }
10750 }
10751 if (!match)
10752 return false;
10753 }
10754
10755 return true;
10756 }
10757
10758 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
10759
10760 if (lhs->getInterfaceType()) {
10761 // If both the right and left sides have qualifiers.
10762 for (auto *lhsProto : lhs->quals()) {
10763 bool match = false;
10764
10765 // when comparing an id<P> on rhs with a static type on lhs,
10766 // see if static class implements all of id's protocols, directly or
10767 // through its super class and categories.
10768 // First, lhs protocols in the qualifier list must be found, direct
10769 // or indirect in rhs's qualifier list or it is a mismatch.
10770 for (auto *rhsProto : rhs->quals()) {
10771 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10772 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10773 match = true;
10774 break;
10775 }
10776 }
10777 if (!match)
10778 return false;
10779 }
10780
10781 // Static class's protocols, or its super class or category protocols
10782 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
10783 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
10784 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
10785 CollectInheritedProtocols(CDecl: lhsID, Protocols&: LHSInheritedProtocols);
10786 // This is rather dubious but matches gcc's behavior. If lhs has
10787 // no type qualifier and its class has no static protocol(s)
10788 // assume that it is mismatch.
10789 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
10790 return false;
10791 for (auto *lhsProto : LHSInheritedProtocols) {
10792 bool match = false;
10793 for (auto *rhsProto : rhs->quals()) {
10794 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10795 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10796 match = true;
10797 break;
10798 }
10799 }
10800 if (!match)
10801 return false;
10802 }
10803 }
10804 return true;
10805 }
10806 return false;
10807}
10808
10809/// canAssignObjCInterfaces - Return true if the two interface types are
10810/// compatible for assignment from RHS to LHS. This handles validation of any
10811/// protocol qualifiers on the LHS or RHS.
10812bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
10813 const ObjCObjectPointerType *RHSOPT) {
10814 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10815 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10816
10817 // If either type represents the built-in 'id' type, return true.
10818 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
10819 return true;
10820
10821 // Function object that propagates a successful result or handles
10822 // __kindof types.
10823 auto finish = [&](bool succeeded) -> bool {
10824 if (succeeded)
10825 return true;
10826
10827 if (!RHS->isKindOfType())
10828 return false;
10829
10830 // Strip off __kindof and protocol qualifiers, then check whether
10831 // we can assign the other way.
10832 return canAssignObjCInterfaces(LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10833 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this));
10834 };
10835
10836 // Casts from or to id<P> are allowed when the other side has compatible
10837 // protocols.
10838 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
10839 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false));
10840 }
10841
10842 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
10843 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
10844 return finish(ObjCQualifiedClassTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT));
10845 }
10846
10847 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
10848 if (LHS->isObjCClass() && RHS->isObjCClass()) {
10849 return true;
10850 }
10851
10852 // If we have 2 user-defined types, fall into that path.
10853 if (LHS->getInterface() && RHS->getInterface()) {
10854 return finish(canAssignObjCInterfaces(LHS, RHS));
10855 }
10856
10857 return false;
10858}
10859
10860/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
10861/// for providing type-safety for objective-c pointers used to pass/return
10862/// arguments in block literals. When passed as arguments, passing 'A*' where
10863/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
10864/// not OK. For the return type, the opposite is not OK.
10865bool ASTContext::canAssignObjCInterfacesInBlockPointer(
10866 const ObjCObjectPointerType *LHSOPT,
10867 const ObjCObjectPointerType *RHSOPT,
10868 bool BlockReturnType) {
10869
10870 // Function object that propagates a successful result or handles
10871 // __kindof types.
10872 auto finish = [&](bool succeeded) -> bool {
10873 if (succeeded)
10874 return true;
10875
10876 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
10877 if (!Expected->isKindOfType())
10878 return false;
10879
10880 // Strip off __kindof and protocol qualifiers, then check whether
10881 // we can assign the other way.
10882 return canAssignObjCInterfacesInBlockPointer(
10883 LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10884 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10885 BlockReturnType);
10886 };
10887
10888 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
10889 return true;
10890
10891 if (LHSOPT->isObjCBuiltinType()) {
10892 return finish(RHSOPT->isObjCBuiltinType() ||
10893 RHSOPT->isObjCQualifiedIdType());
10894 }
10895
10896 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
10897 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
10898 // Use for block parameters previous type checking for compatibility.
10899 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false) ||
10900 // Or corrected type checking as in non-compat mode.
10901 (!BlockReturnType &&
10902 ObjCQualifiedIdTypesAreCompatible(lhs: RHSOPT, rhs: LHSOPT, compare: false)));
10903 else
10904 return finish(ObjCQualifiedIdTypesAreCompatible(
10905 lhs: (BlockReturnType ? LHSOPT : RHSOPT),
10906 rhs: (BlockReturnType ? RHSOPT : LHSOPT), compare: false));
10907 }
10908
10909 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
10910 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
10911 if (LHS && RHS) { // We have 2 user-defined types.
10912 if (LHS != RHS) {
10913 if (LHS->getDecl()->isSuperClassOf(I: RHS->getDecl()))
10914 return finish(BlockReturnType);
10915 if (RHS->getDecl()->isSuperClassOf(I: LHS->getDecl()))
10916 return finish(!BlockReturnType);
10917 }
10918 else
10919 return true;
10920 }
10921 return false;
10922}
10923
10924/// Comparison routine for Objective-C protocols to be used with
10925/// llvm::array_pod_sort.
10926static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
10927 ObjCProtocolDecl * const *rhs) {
10928 return (*lhs)->getName().compare(RHS: (*rhs)->getName());
10929}
10930
10931/// getIntersectionOfProtocols - This routine finds the intersection of set
10932/// of protocols inherited from two distinct objective-c pointer objects with
10933/// the given common base.
10934/// It is used to build composite qualifier list of the composite type of
10935/// the conditional expression involving two objective-c pointer objects.
10936static
10937void getIntersectionOfProtocols(ASTContext &Context,
10938 const ObjCInterfaceDecl *CommonBase,
10939 const ObjCObjectPointerType *LHSOPT,
10940 const ObjCObjectPointerType *RHSOPT,
10941 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
10942
10943 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10944 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10945 assert(LHS->getInterface() && "LHS must have an interface base");
10946 assert(RHS->getInterface() && "RHS must have an interface base");
10947
10948 // Add all of the protocols for the LHS.
10949 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
10950
10951 // Start with the protocol qualifiers.
10952 for (auto *proto : LHS->quals()) {
10953 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: LHSProtocolSet);
10954 }
10955
10956 // Also add the protocols associated with the LHS interface.
10957 Context.CollectInheritedProtocols(CDecl: LHS->getInterface(), Protocols&: LHSProtocolSet);
10958
10959 // Add all of the protocols for the RHS.
10960 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
10961
10962 // Start with the protocol qualifiers.
10963 for (auto *proto : RHS->quals()) {
10964 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: RHSProtocolSet);
10965 }
10966
10967 // Also add the protocols associated with the RHS interface.
10968 Context.CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: RHSProtocolSet);
10969
10970 // Compute the intersection of the collected protocol sets.
10971 for (auto *proto : LHSProtocolSet) {
10972 if (RHSProtocolSet.count(Ptr: proto))
10973 IntersectionSet.push_back(Elt: proto);
10974 }
10975
10976 // Compute the set of protocols that is implied by either the common type or
10977 // the protocols within the intersection.
10978 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
10979 Context.CollectInheritedProtocols(CDecl: CommonBase, Protocols&: ImpliedProtocols);
10980
10981 // Remove any implied protocols from the list of inherited protocols.
10982 if (!ImpliedProtocols.empty()) {
10983 llvm::erase_if(C&: IntersectionSet, P: [&](ObjCProtocolDecl *proto) -> bool {
10984 return ImpliedProtocols.contains(Ptr: proto);
10985 });
10986 }
10987
10988 // Sort the remaining protocols by name.
10989 llvm::array_pod_sort(Start: IntersectionSet.begin(), End: IntersectionSet.end(),
10990 Compare: compareObjCProtocolsByName);
10991}
10992
10993/// Determine whether the first type is a subtype of the second.
10994static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
10995 QualType rhs) {
10996 // Common case: two object pointers.
10997 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
10998 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
10999 if (lhsOPT && rhsOPT)
11000 return ctx.canAssignObjCInterfaces(LHSOPT: lhsOPT, RHSOPT: rhsOPT);
11001
11002 // Two block pointers.
11003 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
11004 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
11005 if (lhsBlock && rhsBlock)
11006 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
11007
11008 // If either is an unqualified 'id' and the other is a block, it's
11009 // acceptable.
11010 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
11011 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
11012 return true;
11013
11014 return false;
11015}
11016
11017// Check that the given Objective-C type argument lists are equivalent.
11018static bool sameObjCTypeArgs(ASTContext &ctx,
11019 const ObjCInterfaceDecl *iface,
11020 ArrayRef<QualType> lhsArgs,
11021 ArrayRef<QualType> rhsArgs,
11022 bool stripKindOf) {
11023 if (lhsArgs.size() != rhsArgs.size())
11024 return false;
11025
11026 ObjCTypeParamList *typeParams = iface->getTypeParamList();
11027 if (!typeParams)
11028 return false;
11029
11030 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
11031 if (ctx.hasSameType(T1: lhsArgs[i], T2: rhsArgs[i]))
11032 continue;
11033
11034 switch (typeParams->begin()[i]->getVariance()) {
11035 case ObjCTypeParamVariance::Invariant:
11036 if (!stripKindOf ||
11037 !ctx.hasSameType(T1: lhsArgs[i].stripObjCKindOfType(ctx),
11038 T2: rhsArgs[i].stripObjCKindOfType(ctx))) {
11039 return false;
11040 }
11041 break;
11042
11043 case ObjCTypeParamVariance::Covariant:
11044 if (!canAssignObjCObjectTypes(ctx, lhs: lhsArgs[i], rhs: rhsArgs[i]))
11045 return false;
11046 break;
11047
11048 case ObjCTypeParamVariance::Contravariant:
11049 if (!canAssignObjCObjectTypes(ctx, lhs: rhsArgs[i], rhs: lhsArgs[i]))
11050 return false;
11051 break;
11052 }
11053 }
11054
11055 return true;
11056}
11057
11058QualType ASTContext::areCommonBaseCompatible(
11059 const ObjCObjectPointerType *Lptr,
11060 const ObjCObjectPointerType *Rptr) {
11061 const ObjCObjectType *LHS = Lptr->getObjectType();
11062 const ObjCObjectType *RHS = Rptr->getObjectType();
11063 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
11064 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
11065
11066 if (!LDecl || !RDecl)
11067 return {};
11068
11069 // When either LHS or RHS is a kindof type, we should return a kindof type.
11070 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
11071 // kindof(A).
11072 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
11073
11074 // Follow the left-hand side up the class hierarchy until we either hit a
11075 // root or find the RHS. Record the ancestors in case we don't find it.
11076 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
11077 LHSAncestors;
11078 while (true) {
11079 // Record this ancestor. We'll need this if the common type isn't in the
11080 // path from the LHS to the root.
11081 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
11082
11083 if (declaresSameEntity(D1: LHS->getInterface(), D2: RDecl)) {
11084 // Get the type arguments.
11085 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
11086 bool anyChanges = false;
11087 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11088 // Both have type arguments, compare them.
11089 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11090 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11091 /*stripKindOf=*/true))
11092 return {};
11093 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11094 // If only one has type arguments, the result will not have type
11095 // arguments.
11096 LHSTypeArgs = {};
11097 anyChanges = true;
11098 }
11099
11100 // Compute the intersection of protocols.
11101 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11102 getIntersectionOfProtocols(Context&: *this, CommonBase: LHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11103 IntersectionSet&: Protocols);
11104 if (!Protocols.empty())
11105 anyChanges = true;
11106
11107 // If anything in the LHS will have changed, build a new result type.
11108 // If we need to return a kindof type but LHS is not a kindof type, we
11109 // build a new result type.
11110 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
11111 QualType Result = getObjCInterfaceType(Decl: LHS->getInterface());
11112 Result = getObjCObjectType(baseType: Result, typeArgs: LHSTypeArgs, protocols: Protocols,
11113 isKindOf: anyKindOf || LHS->isKindOfType());
11114 return getObjCObjectPointerType(ObjectT: Result);
11115 }
11116
11117 return getObjCObjectPointerType(ObjectT: QualType(LHS, 0));
11118 }
11119
11120 // Find the superclass.
11121 QualType LHSSuperType = LHS->getSuperClassType();
11122 if (LHSSuperType.isNull())
11123 break;
11124
11125 LHS = LHSSuperType->castAs<ObjCObjectType>();
11126 }
11127
11128 // We didn't find anything by following the LHS to its root; now check
11129 // the RHS against the cached set of ancestors.
11130 while (true) {
11131 auto KnownLHS = LHSAncestors.find(Val: RHS->getInterface()->getCanonicalDecl());
11132 if (KnownLHS != LHSAncestors.end()) {
11133 LHS = KnownLHS->second;
11134
11135 // Get the type arguments.
11136 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
11137 bool anyChanges = false;
11138 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11139 // Both have type arguments, compare them.
11140 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11141 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11142 /*stripKindOf=*/true))
11143 return {};
11144 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11145 // If only one has type arguments, the result will not have type
11146 // arguments.
11147 RHSTypeArgs = {};
11148 anyChanges = true;
11149 }
11150
11151 // Compute the intersection of protocols.
11152 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11153 getIntersectionOfProtocols(Context&: *this, CommonBase: RHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11154 IntersectionSet&: Protocols);
11155 if (!Protocols.empty())
11156 anyChanges = true;
11157
11158 // If we need to return a kindof type but RHS is not a kindof type, we
11159 // build a new result type.
11160 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
11161 QualType Result = getObjCInterfaceType(Decl: RHS->getInterface());
11162 Result = getObjCObjectType(baseType: Result, typeArgs: RHSTypeArgs, protocols: Protocols,
11163 isKindOf: anyKindOf || RHS->isKindOfType());
11164 return getObjCObjectPointerType(ObjectT: Result);
11165 }
11166
11167 return getObjCObjectPointerType(ObjectT: QualType(RHS, 0));
11168 }
11169
11170 // Find the superclass of the RHS.
11171 QualType RHSSuperType = RHS->getSuperClassType();
11172 if (RHSSuperType.isNull())
11173 break;
11174
11175 RHS = RHSSuperType->castAs<ObjCObjectType>();
11176 }
11177
11178 return {};
11179}
11180
11181bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
11182 const ObjCObjectType *RHS) {
11183 assert(LHS->getInterface() && "LHS is not an interface type");
11184 assert(RHS->getInterface() && "RHS is not an interface type");
11185
11186 // Verify that the base decls are compatible: the RHS must be a subclass of
11187 // the LHS.
11188 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
11189 bool IsSuperClass = LHSInterface->isSuperClassOf(I: RHS->getInterface());
11190 if (!IsSuperClass)
11191 return false;
11192
11193 // If the LHS has protocol qualifiers, determine whether all of them are
11194 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
11195 // LHS).
11196 if (LHS->getNumProtocols() > 0) {
11197 // OK if conversion of LHS to SuperClass results in narrowing of types
11198 // ; i.e., SuperClass may implement at least one of the protocols
11199 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
11200 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
11201 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
11202 CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: SuperClassInheritedProtocols);
11203 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
11204 // qualifiers.
11205 for (auto *RHSPI : RHS->quals())
11206 CollectInheritedProtocols(CDecl: RHSPI, Protocols&: SuperClassInheritedProtocols);
11207 // If there is no protocols associated with RHS, it is not a match.
11208 if (SuperClassInheritedProtocols.empty())
11209 return false;
11210
11211 for (const auto *LHSProto : LHS->quals()) {
11212 bool SuperImplementsProtocol = false;
11213 for (auto *SuperClassProto : SuperClassInheritedProtocols)
11214 if (SuperClassProto->lookupProtocolNamed(PName: LHSProto->getIdentifier())) {
11215 SuperImplementsProtocol = true;
11216 break;
11217 }
11218 if (!SuperImplementsProtocol)
11219 return false;
11220 }
11221 }
11222
11223 // If the LHS is specialized, we may need to check type arguments.
11224 if (LHS->isSpecialized()) {
11225 // Follow the superclass chain until we've matched the LHS class in the
11226 // hierarchy. This substitutes type arguments through.
11227 const ObjCObjectType *RHSSuper = RHS;
11228 while (!declaresSameEntity(D1: RHSSuper->getInterface(), D2: LHSInterface))
11229 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
11230
11231 // If the RHS is specializd, compare type arguments.
11232 if (RHSSuper->isSpecialized() &&
11233 !sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11234 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHSSuper->getTypeArgs(),
11235 /*stripKindOf=*/true)) {
11236 return false;
11237 }
11238 }
11239
11240 return true;
11241}
11242
11243bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
11244 // get the "pointed to" types
11245 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
11246 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
11247
11248 if (!LHSOPT || !RHSOPT)
11249 return false;
11250
11251 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
11252 canAssignObjCInterfaces(LHSOPT: RHSOPT, RHSOPT: LHSOPT);
11253}
11254
11255bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
11256 return canAssignObjCInterfaces(
11257 LHSOPT: getObjCObjectPointerType(ObjectT: To)->castAs<ObjCObjectPointerType>(),
11258 RHSOPT: getObjCObjectPointerType(ObjectT: From)->castAs<ObjCObjectPointerType>());
11259}
11260
11261/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
11262/// both shall have the identically qualified version of a compatible type.
11263/// C99 6.2.7p1: Two types have compatible types if their types are the
11264/// same. See 6.7.[2,3,5] for additional rules.
11265bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
11266 bool CompareUnqualified) {
11267 if (getLangOpts().CPlusPlus)
11268 return hasSameType(T1: LHS, T2: RHS);
11269
11270 return !mergeTypes(LHS, RHS, OfBlockPointer: false, Unqualified: CompareUnqualified).isNull();
11271}
11272
11273bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
11274 return typesAreCompatible(LHS, RHS);
11275}
11276
11277bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
11278 return !mergeTypes(LHS, RHS, OfBlockPointer: true).isNull();
11279}
11280
11281/// mergeTransparentUnionType - if T is a transparent union type and a member
11282/// of T is compatible with SubType, return the merged type, else return
11283/// QualType()
11284QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
11285 bool OfBlockPointer,
11286 bool Unqualified) {
11287 if (const RecordType *UT = T->getAsUnionType()) {
11288 RecordDecl *UD = UT->getDecl();
11289 if (UD->hasAttr<TransparentUnionAttr>()) {
11290 for (const auto *I : UD->fields()) {
11291 QualType ET = I->getType().getUnqualifiedType();
11292 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
11293 if (!MT.isNull())
11294 return MT;
11295 }
11296 }
11297 }
11298
11299 return {};
11300}
11301
11302/// mergeFunctionParameterTypes - merge two types which appear as function
11303/// parameter types
11304QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
11305 bool OfBlockPointer,
11306 bool Unqualified) {
11307 // GNU extension: two types are compatible if they appear as a function
11308 // argument, one of the types is a transparent union type and the other
11309 // type is compatible with a union member
11310 QualType lmerge = mergeTransparentUnionType(T: lhs, SubType: rhs, OfBlockPointer,
11311 Unqualified);
11312 if (!lmerge.isNull())
11313 return lmerge;
11314
11315 QualType rmerge = mergeTransparentUnionType(T: rhs, SubType: lhs, OfBlockPointer,
11316 Unqualified);
11317 if (!rmerge.isNull())
11318 return rmerge;
11319
11320 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
11321}
11322
11323QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
11324 bool OfBlockPointer, bool Unqualified,
11325 bool AllowCXX,
11326 bool IsConditionalOperator) {
11327 const auto *lbase = lhs->castAs<FunctionType>();
11328 const auto *rbase = rhs->castAs<FunctionType>();
11329 const auto *lproto = dyn_cast<FunctionProtoType>(Val: lbase);
11330 const auto *rproto = dyn_cast<FunctionProtoType>(Val: rbase);
11331 bool allLTypes = true;
11332 bool allRTypes = true;
11333
11334 // Check return type
11335 QualType retType;
11336 if (OfBlockPointer) {
11337 QualType RHS = rbase->getReturnType();
11338 QualType LHS = lbase->getReturnType();
11339 bool UnqualifiedResult = Unqualified;
11340 if (!UnqualifiedResult)
11341 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
11342 retType = mergeTypes(LHS, RHS, OfBlockPointer: true, Unqualified: UnqualifiedResult, BlockReturnType: true);
11343 }
11344 else
11345 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), OfBlockPointer: false,
11346 Unqualified);
11347 if (retType.isNull())
11348 return {};
11349
11350 if (Unqualified)
11351 retType = retType.getUnqualifiedType();
11352
11353 CanQualType LRetType = getCanonicalType(T: lbase->getReturnType());
11354 CanQualType RRetType = getCanonicalType(T: rbase->getReturnType());
11355 if (Unqualified) {
11356 LRetType = LRetType.getUnqualifiedType();
11357 RRetType = RRetType.getUnqualifiedType();
11358 }
11359
11360 if (getCanonicalType(T: retType) != LRetType)
11361 allLTypes = false;
11362 if (getCanonicalType(T: retType) != RRetType)
11363 allRTypes = false;
11364
11365 // FIXME: double check this
11366 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
11367 // rbase->getRegParmAttr() != 0 &&
11368 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
11369 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
11370 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
11371
11372 // Compatible functions must have compatible calling conventions
11373 if (lbaseInfo.getCC() != rbaseInfo.getCC())
11374 return {};
11375
11376 // Regparm is part of the calling convention.
11377 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
11378 return {};
11379 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
11380 return {};
11381
11382 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
11383 return {};
11384 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
11385 return {};
11386 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
11387 return {};
11388
11389 // When merging declarations, it's common for supplemental information like
11390 // attributes to only be present in one of the declarations, and we generally
11391 // want type merging to preserve the union of information. So a merged
11392 // function type should be noreturn if it was noreturn in *either* operand
11393 // type.
11394 //
11395 // But for the conditional operator, this is backwards. The result of the
11396 // operator could be either operand, and its type should conservatively
11397 // reflect that. So a function type in a composite type is noreturn only
11398 // if it's noreturn in *both* operand types.
11399 //
11400 // Arguably, noreturn is a kind of subtype, and the conditional operator
11401 // ought to produce the most specific common supertype of its operand types.
11402 // That would differ from this rule in contravariant positions. However,
11403 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
11404 // as a practical matter, it would only affect C code that does abstraction of
11405 // higher-order functions (taking noreturn callbacks!), which is uncommon to
11406 // say the least. So we use the simpler rule.
11407 bool NoReturn = IsConditionalOperator
11408 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
11409 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
11410 if (lbaseInfo.getNoReturn() != NoReturn)
11411 allLTypes = false;
11412 if (rbaseInfo.getNoReturn() != NoReturn)
11413 allRTypes = false;
11414
11415 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(noReturn: NoReturn);
11416
11417 std::optional<FunctionEffectSet> MergedFX;
11418
11419 if (lproto && rproto) { // two C99 style function prototypes
11420 assert((AllowCXX ||
11421 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
11422 "C++ shouldn't be here");
11423 // Compatible functions must have the same number of parameters
11424 if (lproto->getNumParams() != rproto->getNumParams())
11425 return {};
11426
11427 // Variadic and non-variadic functions aren't compatible
11428 if (lproto->isVariadic() != rproto->isVariadic())
11429 return {};
11430
11431 if (lproto->getMethodQuals() != rproto->getMethodQuals())
11432 return {};
11433
11434 // Function effects are handled similarly to noreturn, see above.
11435 FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
11436 FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
11437 if (LHSFX != RHSFX) {
11438 if (IsConditionalOperator)
11439 MergedFX = FunctionEffectSet::getIntersection(LHS: LHSFX, RHS: RHSFX);
11440 else {
11441 FunctionEffectSet::Conflicts Errs;
11442 MergedFX = FunctionEffectSet::getUnion(LHS: LHSFX, RHS: RHSFX, Errs);
11443 // Here we're discarding a possible error due to conflicts in the effect
11444 // sets. But we're not in a context where we can report it. The
11445 // operation does however guarantee maintenance of invariants.
11446 }
11447 if (*MergedFX != LHSFX)
11448 allLTypes = false;
11449 if (*MergedFX != RHSFX)
11450 allRTypes = false;
11451 }
11452
11453 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
11454 bool canUseLeft, canUseRight;
11455 if (!mergeExtParameterInfo(FirstFnType: lproto, SecondFnType: rproto, CanUseFirst&: canUseLeft, CanUseSecond&: canUseRight,
11456 NewParamInfos&: newParamInfos))
11457 return {};
11458
11459 if (!canUseLeft)
11460 allLTypes = false;
11461 if (!canUseRight)
11462 allRTypes = false;
11463
11464 // Check parameter type compatibility
11465 SmallVector<QualType, 10> types;
11466 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
11467 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
11468 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
11469 QualType paramType = mergeFunctionParameterTypes(
11470 lhs: lParamType, rhs: rParamType, OfBlockPointer, Unqualified);
11471 if (paramType.isNull())
11472 return {};
11473
11474 if (Unqualified)
11475 paramType = paramType.getUnqualifiedType();
11476
11477 types.push_back(Elt: paramType);
11478 if (Unqualified) {
11479 lParamType = lParamType.getUnqualifiedType();
11480 rParamType = rParamType.getUnqualifiedType();
11481 }
11482
11483 if (getCanonicalType(T: paramType) != getCanonicalType(T: lParamType))
11484 allLTypes = false;
11485 if (getCanonicalType(T: paramType) != getCanonicalType(T: rParamType))
11486 allRTypes = false;
11487 }
11488
11489 if (allLTypes) return lhs;
11490 if (allRTypes) return rhs;
11491
11492 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
11493 EPI.ExtInfo = einfo;
11494 EPI.ExtParameterInfos =
11495 newParamInfos.empty() ? nullptr : newParamInfos.data();
11496 if (MergedFX)
11497 EPI.FunctionEffects = *MergedFX;
11498 return getFunctionType(ResultTy: retType, Args: types, EPI);
11499 }
11500
11501 if (lproto) allRTypes = false;
11502 if (rproto) allLTypes = false;
11503
11504 const FunctionProtoType *proto = lproto ? lproto : rproto;
11505 if (proto) {
11506 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
11507 if (proto->isVariadic())
11508 return {};
11509 // Check that the types are compatible with the types that
11510 // would result from default argument promotions (C99 6.7.5.3p15).
11511 // The only types actually affected are promotable integer
11512 // types and floats, which would be passed as a different
11513 // type depending on whether the prototype is visible.
11514 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
11515 QualType paramTy = proto->getParamType(i);
11516
11517 // Look at the converted type of enum types, since that is the type used
11518 // to pass enum values.
11519 if (const auto *Enum = paramTy->getAs<EnumType>()) {
11520 paramTy = Enum->getDecl()->getIntegerType();
11521 if (paramTy.isNull())
11522 return {};
11523 }
11524
11525 if (isPromotableIntegerType(T: paramTy) ||
11526 getCanonicalType(T: paramTy).getUnqualifiedType() == FloatTy)
11527 return {};
11528 }
11529
11530 if (allLTypes) return lhs;
11531 if (allRTypes) return rhs;
11532
11533 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
11534 EPI.ExtInfo = einfo;
11535 if (MergedFX)
11536 EPI.FunctionEffects = *MergedFX;
11537 return getFunctionType(ResultTy: retType, Args: proto->getParamTypes(), EPI);
11538 }
11539
11540 if (allLTypes) return lhs;
11541 if (allRTypes) return rhs;
11542 return getFunctionNoProtoType(ResultTy: retType, Info: einfo);
11543}
11544
11545/// Given that we have an enum type and a non-enum type, try to merge them.
11546static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
11547 QualType other, bool isBlockReturnType) {
11548 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
11549 // a signed integer type, or an unsigned integer type.
11550 // Compatibility is based on the underlying type, not the promotion
11551 // type.
11552 QualType underlyingType = ET->getDecl()->getIntegerType();
11553 if (underlyingType.isNull())
11554 return {};
11555 if (Context.hasSameType(T1: underlyingType, T2: other))
11556 return other;
11557
11558 // In block return types, we're more permissive and accept any
11559 // integral type of the same size.
11560 if (isBlockReturnType && other->isIntegerType() &&
11561 Context.getTypeSize(T: underlyingType) == Context.getTypeSize(T: other))
11562 return other;
11563
11564 return {};
11565}
11566
11567QualType ASTContext::mergeTagDefinitions(QualType LHS, QualType RHS) {
11568 // C17 and earlier and C++ disallow two tag definitions within the same TU
11569 // from being compatible.
11570 if (LangOpts.CPlusPlus || !LangOpts.C23)
11571 return {};
11572
11573 // C23, on the other hand, requires the members to be "the same enough", so
11574 // we use a structural equivalence check.
11575 StructuralEquivalenceContext::NonEquivalentDeclSet NonEquivalentDecls;
11576 StructuralEquivalenceContext Ctx(
11577 getLangOpts(), *this, *this, NonEquivalentDecls,
11578 StructuralEquivalenceKind::Default, /*StrictTypeSpelling=*/false,
11579 /*Complain=*/false, /*ErrorOnTagTypeMismatch=*/true);
11580 return Ctx.IsEquivalent(T1: LHS, T2: RHS) ? LHS : QualType{};
11581}
11582
11583QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
11584 bool Unqualified, bool BlockReturnType,
11585 bool IsConditionalOperator) {
11586 // For C++ we will not reach this code with reference types (see below),
11587 // for OpenMP variant call overloading we might.
11588 //
11589 // C++ [expr]: If an expression initially has the type "reference to T", the
11590 // type is adjusted to "T" prior to any further analysis, the expression
11591 // designates the object or function denoted by the reference, and the
11592 // expression is an lvalue unless the reference is an rvalue reference and
11593 // the expression is a function call (possibly inside parentheses).
11594 auto *LHSRefTy = LHS->getAs<ReferenceType>();
11595 auto *RHSRefTy = RHS->getAs<ReferenceType>();
11596 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
11597 LHS->getTypeClass() == RHS->getTypeClass())
11598 return mergeTypes(LHS: LHSRefTy->getPointeeType(), RHS: RHSRefTy->getPointeeType(),
11599 OfBlockPointer, Unqualified, BlockReturnType);
11600 if (LHSRefTy || RHSRefTy)
11601 return {};
11602
11603 if (Unqualified) {
11604 LHS = LHS.getUnqualifiedType();
11605 RHS = RHS.getUnqualifiedType();
11606 }
11607
11608 QualType LHSCan = getCanonicalType(T: LHS),
11609 RHSCan = getCanonicalType(T: RHS);
11610
11611 // If two types are identical, they are compatible.
11612 if (LHSCan == RHSCan)
11613 return LHS;
11614
11615 // If the qualifiers are different, the types aren't compatible... mostly.
11616 Qualifiers LQuals = LHSCan.getLocalQualifiers();
11617 Qualifiers RQuals = RHSCan.getLocalQualifiers();
11618 if (LQuals != RQuals) {
11619 // If any of these qualifiers are different, we have a type
11620 // mismatch.
11621 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
11622 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
11623 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
11624 !LQuals.getPointerAuth().isEquivalent(Other: RQuals.getPointerAuth()) ||
11625 LQuals.hasUnaligned() != RQuals.hasUnaligned())
11626 return {};
11627
11628 // Exactly one GC qualifier difference is allowed: __strong is
11629 // okay if the other type has no GC qualifier but is an Objective
11630 // C object pointer (i.e. implicitly strong by default). We fix
11631 // this by pretending that the unqualified type was actually
11632 // qualified __strong.
11633 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
11634 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
11635 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
11636
11637 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
11638 return {};
11639
11640 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
11641 return mergeTypes(LHS, RHS: getObjCGCQualType(T: RHS, GCAttr: Qualifiers::Strong));
11642 }
11643 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
11644 return mergeTypes(LHS: getObjCGCQualType(T: LHS, GCAttr: Qualifiers::Strong), RHS);
11645 }
11646 return {};
11647 }
11648
11649 // Okay, qualifiers are equal.
11650
11651 Type::TypeClass LHSClass = LHSCan->getTypeClass();
11652 Type::TypeClass RHSClass = RHSCan->getTypeClass();
11653
11654 // We want to consider the two function types to be the same for these
11655 // comparisons, just force one to the other.
11656 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
11657 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
11658
11659 // Same as above for arrays
11660 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
11661 LHSClass = Type::ConstantArray;
11662 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
11663 RHSClass = Type::ConstantArray;
11664
11665 // ObjCInterfaces are just specialized ObjCObjects.
11666 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
11667 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
11668
11669 // Canonicalize ExtVector -> Vector.
11670 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
11671 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
11672
11673 // If the canonical type classes don't match.
11674 if (LHSClass != RHSClass) {
11675 // Note that we only have special rules for turning block enum
11676 // returns into block int returns, not vice-versa.
11677 if (const auto *ETy = LHS->getAs<EnumType>()) {
11678 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: RHS, isBlockReturnType: false);
11679 }
11680 if (const EnumType* ETy = RHS->getAs<EnumType>()) {
11681 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: LHS, isBlockReturnType: BlockReturnType);
11682 }
11683 // allow block pointer type to match an 'id' type.
11684 if (OfBlockPointer && !BlockReturnType) {
11685 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
11686 return LHS;
11687 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
11688 return RHS;
11689 }
11690 // Allow __auto_type to match anything; it merges to the type with more
11691 // information.
11692 if (const auto *AT = LHS->getAs<AutoType>()) {
11693 if (!AT->isDeduced() && AT->isGNUAutoType())
11694 return RHS;
11695 }
11696 if (const auto *AT = RHS->getAs<AutoType>()) {
11697 if (!AT->isDeduced() && AT->isGNUAutoType())
11698 return LHS;
11699 }
11700 return {};
11701 }
11702
11703 // The canonical type classes match.
11704 switch (LHSClass) {
11705#define TYPE(Class, Base)
11706#define ABSTRACT_TYPE(Class, Base)
11707#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
11708#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
11709#define DEPENDENT_TYPE(Class, Base) case Type::Class:
11710#include "clang/AST/TypeNodes.inc"
11711 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
11712
11713 case Type::Auto:
11714 case Type::DeducedTemplateSpecialization:
11715 case Type::LValueReference:
11716 case Type::RValueReference:
11717 case Type::MemberPointer:
11718 llvm_unreachable("C++ should never be in mergeTypes");
11719
11720 case Type::ObjCInterface:
11721 case Type::IncompleteArray:
11722 case Type::VariableArray:
11723 case Type::FunctionProto:
11724 case Type::ExtVector:
11725 llvm_unreachable("Types are eliminated above");
11726
11727 case Type::Pointer:
11728 {
11729 // Merge two pointer types, while trying to preserve typedef info
11730 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
11731 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
11732 if (Unqualified) {
11733 LHSPointee = LHSPointee.getUnqualifiedType();
11734 RHSPointee = RHSPointee.getUnqualifiedType();
11735 }
11736 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer: false,
11737 Unqualified);
11738 if (ResultType.isNull())
11739 return {};
11740 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11741 return LHS;
11742 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11743 return RHS;
11744 return getPointerType(T: ResultType);
11745 }
11746 case Type::BlockPointer:
11747 {
11748 // Merge two block pointer types, while trying to preserve typedef info
11749 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
11750 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
11751 if (Unqualified) {
11752 LHSPointee = LHSPointee.getUnqualifiedType();
11753 RHSPointee = RHSPointee.getUnqualifiedType();
11754 }
11755 if (getLangOpts().OpenCL) {
11756 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
11757 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
11758 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
11759 // 6.12.5) thus the following check is asymmetric.
11760 if (!LHSPteeQual.isAddressSpaceSupersetOf(other: RHSPteeQual, Ctx: *this))
11761 return {};
11762 LHSPteeQual.removeAddressSpace();
11763 RHSPteeQual.removeAddressSpace();
11764 LHSPointee =
11765 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
11766 RHSPointee =
11767 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
11768 }
11769 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer,
11770 Unqualified);
11771 if (ResultType.isNull())
11772 return {};
11773 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11774 return LHS;
11775 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11776 return RHS;
11777 return getBlockPointerType(T: ResultType);
11778 }
11779 case Type::Atomic:
11780 {
11781 // Merge two pointer types, while trying to preserve typedef info
11782 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
11783 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
11784 if (Unqualified) {
11785 LHSValue = LHSValue.getUnqualifiedType();
11786 RHSValue = RHSValue.getUnqualifiedType();
11787 }
11788 QualType ResultType = mergeTypes(LHS: LHSValue, RHS: RHSValue, OfBlockPointer: false,
11789 Unqualified);
11790 if (ResultType.isNull())
11791 return {};
11792 if (getCanonicalType(T: LHSValue) == getCanonicalType(T: ResultType))
11793 return LHS;
11794 if (getCanonicalType(T: RHSValue) == getCanonicalType(T: ResultType))
11795 return RHS;
11796 return getAtomicType(T: ResultType);
11797 }
11798 case Type::ConstantArray:
11799 {
11800 const ConstantArrayType* LCAT = getAsConstantArrayType(T: LHS);
11801 const ConstantArrayType* RCAT = getAsConstantArrayType(T: RHS);
11802 if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
11803 return {};
11804
11805 QualType LHSElem = getAsArrayType(T: LHS)->getElementType();
11806 QualType RHSElem = getAsArrayType(T: RHS)->getElementType();
11807 if (Unqualified) {
11808 LHSElem = LHSElem.getUnqualifiedType();
11809 RHSElem = RHSElem.getUnqualifiedType();
11810 }
11811
11812 QualType ResultType = mergeTypes(LHS: LHSElem, RHS: RHSElem, OfBlockPointer: false, Unqualified);
11813 if (ResultType.isNull())
11814 return {};
11815
11816 const VariableArrayType* LVAT = getAsVariableArrayType(T: LHS);
11817 const VariableArrayType* RVAT = getAsVariableArrayType(T: RHS);
11818
11819 // If either side is a variable array, and both are complete, check whether
11820 // the current dimension is definite.
11821 if (LVAT || RVAT) {
11822 auto SizeFetch = [this](const VariableArrayType* VAT,
11823 const ConstantArrayType* CAT)
11824 -> std::pair<bool,llvm::APInt> {
11825 if (VAT) {
11826 std::optional<llvm::APSInt> TheInt;
11827 Expr *E = VAT->getSizeExpr();
11828 if (E && (TheInt = E->getIntegerConstantExpr(Ctx: *this)))
11829 return std::make_pair(x: true, y&: *TheInt);
11830 return std::make_pair(x: false, y: llvm::APSInt());
11831 }
11832 if (CAT)
11833 return std::make_pair(x: true, y: CAT->getSize());
11834 return std::make_pair(x: false, y: llvm::APInt());
11835 };
11836
11837 bool HaveLSize, HaveRSize;
11838 llvm::APInt LSize, RSize;
11839 std::tie(args&: HaveLSize, args&: LSize) = SizeFetch(LVAT, LCAT);
11840 std::tie(args&: HaveRSize, args&: RSize) = SizeFetch(RVAT, RCAT);
11841 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(I1: LSize, I2: RSize))
11842 return {}; // Definite, but unequal, array dimension
11843 }
11844
11845 if (LCAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
11846 return LHS;
11847 if (RCAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
11848 return RHS;
11849 if (LCAT)
11850 return getConstantArrayType(EltTy: ResultType, ArySizeIn: LCAT->getSize(),
11851 SizeExpr: LCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
11852 if (RCAT)
11853 return getConstantArrayType(EltTy: ResultType, ArySizeIn: RCAT->getSize(),
11854 SizeExpr: RCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
11855 if (LVAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
11856 return LHS;
11857 if (RVAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
11858 return RHS;
11859 if (LVAT) {
11860 // FIXME: This isn't correct! But tricky to implement because
11861 // the array's size has to be the size of LHS, but the type
11862 // has to be different.
11863 return LHS;
11864 }
11865 if (RVAT) {
11866 // FIXME: This isn't correct! But tricky to implement because
11867 // the array's size has to be the size of RHS, but the type
11868 // has to be different.
11869 return RHS;
11870 }
11871 if (getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType)) return LHS;
11872 if (getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType)) return RHS;
11873 return getIncompleteArrayType(elementType: ResultType, ASM: ArraySizeModifier(), elementTypeQuals: 0);
11874 }
11875 case Type::FunctionNoProto:
11876 return mergeFunctionTypes(lhs: LHS, rhs: RHS, OfBlockPointer, Unqualified,
11877 /*AllowCXX=*/false, IsConditionalOperator);
11878 case Type::Record:
11879 case Type::Enum:
11880 return mergeTagDefinitions(LHS, RHS);
11881 case Type::Builtin:
11882 // Only exactly equal builtin types are compatible, which is tested above.
11883 return {};
11884 case Type::Complex:
11885 // Distinct complex types are incompatible.
11886 return {};
11887 case Type::Vector:
11888 // FIXME: The merged type should be an ExtVector!
11889 if (areCompatVectorTypes(LHS: LHSCan->castAs<VectorType>(),
11890 RHS: RHSCan->castAs<VectorType>()))
11891 return LHS;
11892 return {};
11893 case Type::ConstantMatrix:
11894 if (areCompatMatrixTypes(LHS: LHSCan->castAs<ConstantMatrixType>(),
11895 RHS: RHSCan->castAs<ConstantMatrixType>()))
11896 return LHS;
11897 return {};
11898 case Type::ObjCObject: {
11899 // Check if the types are assignment compatible.
11900 // FIXME: This should be type compatibility, e.g. whether
11901 // "LHS x; RHS x;" at global scope is legal.
11902 if (canAssignObjCInterfaces(LHS: LHS->castAs<ObjCObjectType>(),
11903 RHS: RHS->castAs<ObjCObjectType>()))
11904 return LHS;
11905 return {};
11906 }
11907 case Type::ObjCObjectPointer:
11908 if (OfBlockPointer) {
11909 if (canAssignObjCInterfacesInBlockPointer(
11910 LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
11911 RHSOPT: RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
11912 return LHS;
11913 return {};
11914 }
11915 if (canAssignObjCInterfaces(LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
11916 RHSOPT: RHS->castAs<ObjCObjectPointerType>()))
11917 return LHS;
11918 return {};
11919 case Type::Pipe:
11920 assert(LHS != RHS &&
11921 "Equivalent pipe types should have already been handled!");
11922 return {};
11923 case Type::ArrayParameter:
11924 assert(LHS != RHS &&
11925 "Equivalent ArrayParameter types should have already been handled!");
11926 return {};
11927 case Type::BitInt: {
11928 // Merge two bit-precise int types, while trying to preserve typedef info.
11929 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
11930 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
11931 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
11932 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
11933
11934 // Like unsigned/int, shouldn't have a type if they don't match.
11935 if (LHSUnsigned != RHSUnsigned)
11936 return {};
11937
11938 if (LHSBits != RHSBits)
11939 return {};
11940 return LHS;
11941 }
11942 case Type::HLSLAttributedResource: {
11943 const HLSLAttributedResourceType *LHSTy =
11944 LHS->castAs<HLSLAttributedResourceType>();
11945 const HLSLAttributedResourceType *RHSTy =
11946 RHS->castAs<HLSLAttributedResourceType>();
11947 assert(LHSTy->getWrappedType() == RHSTy->getWrappedType() &&
11948 LHSTy->getWrappedType()->isHLSLResourceType() &&
11949 "HLSLAttributedResourceType should always wrap __hlsl_resource_t");
11950
11951 if (LHSTy->getAttrs() == RHSTy->getAttrs() &&
11952 LHSTy->getContainedType() == RHSTy->getContainedType())
11953 return LHS;
11954 return {};
11955 }
11956 case Type::HLSLInlineSpirv:
11957 const HLSLInlineSpirvType *LHSTy = LHS->castAs<HLSLInlineSpirvType>();
11958 const HLSLInlineSpirvType *RHSTy = RHS->castAs<HLSLInlineSpirvType>();
11959
11960 if (LHSTy->getOpcode() == RHSTy->getOpcode() &&
11961 LHSTy->getSize() == RHSTy->getSize() &&
11962 LHSTy->getAlignment() == RHSTy->getAlignment()) {
11963 for (size_t I = 0; I < LHSTy->getOperands().size(); I++)
11964 if (LHSTy->getOperands()[I] != RHSTy->getOperands()[I])
11965 return {};
11966
11967 return LHS;
11968 }
11969 return {};
11970 }
11971
11972 llvm_unreachable("Invalid Type::Class!");
11973}
11974
11975bool ASTContext::mergeExtParameterInfo(
11976 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
11977 bool &CanUseFirst, bool &CanUseSecond,
11978 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
11979 assert(NewParamInfos.empty() && "param info list not empty");
11980 CanUseFirst = CanUseSecond = true;
11981 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
11982 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
11983
11984 // Fast path: if the first type doesn't have ext parameter infos,
11985 // we match if and only if the second type also doesn't have them.
11986 if (!FirstHasInfo && !SecondHasInfo)
11987 return true;
11988
11989 bool NeedParamInfo = false;
11990 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
11991 : SecondFnType->getExtParameterInfos().size();
11992
11993 for (size_t I = 0; I < E; ++I) {
11994 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
11995 if (FirstHasInfo)
11996 FirstParam = FirstFnType->getExtParameterInfo(I);
11997 if (SecondHasInfo)
11998 SecondParam = SecondFnType->getExtParameterInfo(I);
11999
12000 // Cannot merge unless everything except the noescape flag matches.
12001 if (FirstParam.withIsNoEscape(NoEscape: false) != SecondParam.withIsNoEscape(NoEscape: false))
12002 return false;
12003
12004 bool FirstNoEscape = FirstParam.isNoEscape();
12005 bool SecondNoEscape = SecondParam.isNoEscape();
12006 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
12007 NewParamInfos.push_back(Elt: FirstParam.withIsNoEscape(NoEscape: IsNoEscape));
12008 if (NewParamInfos.back().getOpaqueValue())
12009 NeedParamInfo = true;
12010 if (FirstNoEscape != IsNoEscape)
12011 CanUseFirst = false;
12012 if (SecondNoEscape != IsNoEscape)
12013 CanUseSecond = false;
12014 }
12015
12016 if (!NeedParamInfo)
12017 NewParamInfos.clear();
12018
12019 return true;
12020}
12021
12022void ASTContext::ResetObjCLayout(const ObjCInterfaceDecl *D) {
12023 if (auto It = ObjCLayouts.find(Val: D); It != ObjCLayouts.end()) {
12024 It->second = nullptr;
12025 for (auto *SubClass : ObjCSubClasses[D])
12026 ResetObjCLayout(D: SubClass);
12027 }
12028}
12029
12030/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
12031/// 'RHS' attributes and returns the merged version; including for function
12032/// return types.
12033QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
12034 QualType LHSCan = getCanonicalType(T: LHS),
12035 RHSCan = getCanonicalType(T: RHS);
12036 // If two types are identical, they are compatible.
12037 if (LHSCan == RHSCan)
12038 return LHS;
12039 if (RHSCan->isFunctionType()) {
12040 if (!LHSCan->isFunctionType())
12041 return {};
12042 QualType OldReturnType =
12043 cast<FunctionType>(Val: RHSCan.getTypePtr())->getReturnType();
12044 QualType NewReturnType =
12045 cast<FunctionType>(Val: LHSCan.getTypePtr())->getReturnType();
12046 QualType ResReturnType =
12047 mergeObjCGCQualifiers(LHS: NewReturnType, RHS: OldReturnType);
12048 if (ResReturnType.isNull())
12049 return {};
12050 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
12051 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
12052 // In either case, use OldReturnType to build the new function type.
12053 const auto *F = LHS->castAs<FunctionType>();
12054 if (const auto *FPT = cast<FunctionProtoType>(Val: F)) {
12055 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
12056 EPI.ExtInfo = getFunctionExtInfo(t: LHS);
12057 QualType ResultType =
12058 getFunctionType(ResultTy: OldReturnType, Args: FPT->getParamTypes(), EPI);
12059 return ResultType;
12060 }
12061 }
12062 return {};
12063 }
12064
12065 // If the qualifiers are different, the types can still be merged.
12066 Qualifiers LQuals = LHSCan.getLocalQualifiers();
12067 Qualifiers RQuals = RHSCan.getLocalQualifiers();
12068 if (LQuals != RQuals) {
12069 // If any of these qualifiers are different, we have a type mismatch.
12070 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
12071 LQuals.getAddressSpace() != RQuals.getAddressSpace())
12072 return {};
12073
12074 // Exactly one GC qualifier difference is allowed: __strong is
12075 // okay if the other type has no GC qualifier but is an Objective
12076 // C object pointer (i.e. implicitly strong by default). We fix
12077 // this by pretending that the unqualified type was actually
12078 // qualified __strong.
12079 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
12080 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
12081 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
12082
12083 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
12084 return {};
12085
12086 if (GC_L == Qualifiers::Strong)
12087 return LHS;
12088 if (GC_R == Qualifiers::Strong)
12089 return RHS;
12090 return {};
12091 }
12092
12093 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
12094 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12095 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12096 QualType ResQT = mergeObjCGCQualifiers(LHS: LHSBaseQT, RHS: RHSBaseQT);
12097 if (ResQT == LHSBaseQT)
12098 return LHS;
12099 if (ResQT == RHSBaseQT)
12100 return RHS;
12101 }
12102 return {};
12103}
12104
12105//===----------------------------------------------------------------------===//
12106// Integer Predicates
12107//===----------------------------------------------------------------------===//
12108
12109unsigned ASTContext::getIntWidth(QualType T) const {
12110 if (const auto *ET = T->getAs<EnumType>())
12111 T = ET->getDecl()->getIntegerType();
12112 if (T->isBooleanType())
12113 return 1;
12114 if (const auto *EIT = T->getAs<BitIntType>())
12115 return EIT->getNumBits();
12116 // For builtin types, just use the standard type sizing method
12117 return (unsigned)getTypeSize(T);
12118}
12119
12120QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
12121 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12122 T->isFixedPointType()) &&
12123 "Unexpected type");
12124
12125 // Turn <4 x signed int> -> <4 x unsigned int>
12126 if (const auto *VTy = T->getAs<VectorType>())
12127 return getVectorType(vecType: getCorrespondingUnsignedType(T: VTy->getElementType()),
12128 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12129
12130 // For _BitInt, return an unsigned _BitInt with same width.
12131 if (const auto *EITy = T->getAs<BitIntType>())
12132 return getBitIntType(/*Unsigned=*/IsUnsigned: true, NumBits: EITy->getNumBits());
12133
12134 // For enums, get the underlying integer type of the enum, and let the general
12135 // integer type signchanging code handle it.
12136 if (const auto *ETy = T->getAs<EnumType>())
12137 T = ETy->getDecl()->getIntegerType();
12138
12139 switch (T->castAs<BuiltinType>()->getKind()) {
12140 case BuiltinType::Char_U:
12141 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
12142 case BuiltinType::Char_S:
12143 case BuiltinType::SChar:
12144 case BuiltinType::Char8:
12145 return UnsignedCharTy;
12146 case BuiltinType::Short:
12147 return UnsignedShortTy;
12148 case BuiltinType::Int:
12149 return UnsignedIntTy;
12150 case BuiltinType::Long:
12151 return UnsignedLongTy;
12152 case BuiltinType::LongLong:
12153 return UnsignedLongLongTy;
12154 case BuiltinType::Int128:
12155 return UnsignedInt128Ty;
12156 // wchar_t is special. It is either signed or not, but when it's signed,
12157 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
12158 // version of its underlying type instead.
12159 case BuiltinType::WChar_S:
12160 return getUnsignedWCharType();
12161
12162 case BuiltinType::ShortAccum:
12163 return UnsignedShortAccumTy;
12164 case BuiltinType::Accum:
12165 return UnsignedAccumTy;
12166 case BuiltinType::LongAccum:
12167 return UnsignedLongAccumTy;
12168 case BuiltinType::SatShortAccum:
12169 return SatUnsignedShortAccumTy;
12170 case BuiltinType::SatAccum:
12171 return SatUnsignedAccumTy;
12172 case BuiltinType::SatLongAccum:
12173 return SatUnsignedLongAccumTy;
12174 case BuiltinType::ShortFract:
12175 return UnsignedShortFractTy;
12176 case BuiltinType::Fract:
12177 return UnsignedFractTy;
12178 case BuiltinType::LongFract:
12179 return UnsignedLongFractTy;
12180 case BuiltinType::SatShortFract:
12181 return SatUnsignedShortFractTy;
12182 case BuiltinType::SatFract:
12183 return SatUnsignedFractTy;
12184 case BuiltinType::SatLongFract:
12185 return SatUnsignedLongFractTy;
12186 default:
12187 assert((T->hasUnsignedIntegerRepresentation() ||
12188 T->isUnsignedFixedPointType()) &&
12189 "Unexpected signed integer or fixed point type");
12190 return T;
12191 }
12192}
12193
12194QualType ASTContext::getCorrespondingSignedType(QualType T) const {
12195 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12196 T->isFixedPointType()) &&
12197 "Unexpected type");
12198
12199 // Turn <4 x unsigned int> -> <4 x signed int>
12200 if (const auto *VTy = T->getAs<VectorType>())
12201 return getVectorType(vecType: getCorrespondingSignedType(T: VTy->getElementType()),
12202 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12203
12204 // For _BitInt, return a signed _BitInt with same width.
12205 if (const auto *EITy = T->getAs<BitIntType>())
12206 return getBitIntType(/*Unsigned=*/IsUnsigned: false, NumBits: EITy->getNumBits());
12207
12208 // For enums, get the underlying integer type of the enum, and let the general
12209 // integer type signchanging code handle it.
12210 if (const auto *ETy = T->getAs<EnumType>())
12211 T = ETy->getDecl()->getIntegerType();
12212
12213 switch (T->castAs<BuiltinType>()->getKind()) {
12214 case BuiltinType::Char_S:
12215 // Plain `char` is mapped to `signed char` even if it's already signed
12216 case BuiltinType::Char_U:
12217 case BuiltinType::UChar:
12218 case BuiltinType::Char8:
12219 return SignedCharTy;
12220 case BuiltinType::UShort:
12221 return ShortTy;
12222 case BuiltinType::UInt:
12223 return IntTy;
12224 case BuiltinType::ULong:
12225 return LongTy;
12226 case BuiltinType::ULongLong:
12227 return LongLongTy;
12228 case BuiltinType::UInt128:
12229 return Int128Ty;
12230 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
12231 // there's no matching "signed wchar_t". Therefore we return the signed
12232 // version of its underlying type instead.
12233 case BuiltinType::WChar_U:
12234 return getSignedWCharType();
12235
12236 case BuiltinType::UShortAccum:
12237 return ShortAccumTy;
12238 case BuiltinType::UAccum:
12239 return AccumTy;
12240 case BuiltinType::ULongAccum:
12241 return LongAccumTy;
12242 case BuiltinType::SatUShortAccum:
12243 return SatShortAccumTy;
12244 case BuiltinType::SatUAccum:
12245 return SatAccumTy;
12246 case BuiltinType::SatULongAccum:
12247 return SatLongAccumTy;
12248 case BuiltinType::UShortFract:
12249 return ShortFractTy;
12250 case BuiltinType::UFract:
12251 return FractTy;
12252 case BuiltinType::ULongFract:
12253 return LongFractTy;
12254 case BuiltinType::SatUShortFract:
12255 return SatShortFractTy;
12256 case BuiltinType::SatUFract:
12257 return SatFractTy;
12258 case BuiltinType::SatULongFract:
12259 return SatLongFractTy;
12260 default:
12261 assert(
12262 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
12263 "Unexpected signed integer or fixed point type");
12264 return T;
12265 }
12266}
12267
12268ASTMutationListener::~ASTMutationListener() = default;
12269
12270void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
12271 QualType ReturnType) {}
12272
12273//===----------------------------------------------------------------------===//
12274// Builtin Type Computation
12275//===----------------------------------------------------------------------===//
12276
12277/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
12278/// pointer over the consumed characters. This returns the resultant type. If
12279/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
12280/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
12281/// a vector of "i*".
12282///
12283/// RequiresICE is filled in on return to indicate whether the value is required
12284/// to be an Integer Constant Expression.
12285static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
12286 ASTContext::GetBuiltinTypeError &Error,
12287 bool &RequiresICE,
12288 bool AllowTypeModifiers) {
12289 // Modifiers.
12290 int HowLong = 0;
12291 bool Signed = false, Unsigned = false;
12292 RequiresICE = false;
12293
12294 // Read the prefixed modifiers first.
12295 bool Done = false;
12296 #ifndef NDEBUG
12297 bool IsSpecial = false;
12298 #endif
12299 while (!Done) {
12300 switch (*Str++) {
12301 default: Done = true; --Str; break;
12302 case 'I':
12303 RequiresICE = true;
12304 break;
12305 case 'S':
12306 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
12307 assert(!Signed && "Can't use 'S' modifier multiple times!");
12308 Signed = true;
12309 break;
12310 case 'U':
12311 assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
12312 assert(!Unsigned && "Can't use 'U' modifier multiple times!");
12313 Unsigned = true;
12314 break;
12315 case 'L':
12316 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
12317 assert(HowLong <= 2 && "Can't have LLLL modifier");
12318 ++HowLong;
12319 break;
12320 case 'N':
12321 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
12322 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12323 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
12324 #ifndef NDEBUG
12325 IsSpecial = true;
12326 #endif
12327 if (Context.getTargetInfo().getLongWidth() == 32)
12328 ++HowLong;
12329 break;
12330 case 'W':
12331 // This modifier represents int64 type.
12332 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12333 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
12334 #ifndef NDEBUG
12335 IsSpecial = true;
12336 #endif
12337 switch (Context.getTargetInfo().getInt64Type()) {
12338 default:
12339 llvm_unreachable("Unexpected integer type");
12340 case TargetInfo::SignedLong:
12341 HowLong = 1;
12342 break;
12343 case TargetInfo::SignedLongLong:
12344 HowLong = 2;
12345 break;
12346 }
12347 break;
12348 case 'Z':
12349 // This modifier represents int32 type.
12350 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12351 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
12352 #ifndef NDEBUG
12353 IsSpecial = true;
12354 #endif
12355 switch (Context.getTargetInfo().getIntTypeByWidth(BitWidth: 32, IsSigned: true)) {
12356 default:
12357 llvm_unreachable("Unexpected integer type");
12358 case TargetInfo::SignedInt:
12359 HowLong = 0;
12360 break;
12361 case TargetInfo::SignedLong:
12362 HowLong = 1;
12363 break;
12364 case TargetInfo::SignedLongLong:
12365 HowLong = 2;
12366 break;
12367 }
12368 break;
12369 case 'O':
12370 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12371 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
12372 #ifndef NDEBUG
12373 IsSpecial = true;
12374 #endif
12375 if (Context.getLangOpts().OpenCL)
12376 HowLong = 1;
12377 else
12378 HowLong = 2;
12379 break;
12380 }
12381 }
12382
12383 QualType Type;
12384
12385 // Read the base type.
12386 switch (*Str++) {
12387 default: llvm_unreachable("Unknown builtin type letter!");
12388 case 'x':
12389 assert(HowLong == 0 && !Signed && !Unsigned &&
12390 "Bad modifiers used with 'x'!");
12391 Type = Context.Float16Ty;
12392 break;
12393 case 'y':
12394 assert(HowLong == 0 && !Signed && !Unsigned &&
12395 "Bad modifiers used with 'y'!");
12396 Type = Context.BFloat16Ty;
12397 break;
12398 case 'v':
12399 assert(HowLong == 0 && !Signed && !Unsigned &&
12400 "Bad modifiers used with 'v'!");
12401 Type = Context.VoidTy;
12402 break;
12403 case 'h':
12404 assert(HowLong == 0 && !Signed && !Unsigned &&
12405 "Bad modifiers used with 'h'!");
12406 Type = Context.HalfTy;
12407 break;
12408 case 'f':
12409 assert(HowLong == 0 && !Signed && !Unsigned &&
12410 "Bad modifiers used with 'f'!");
12411 Type = Context.FloatTy;
12412 break;
12413 case 'd':
12414 assert(HowLong < 3 && !Signed && !Unsigned &&
12415 "Bad modifiers used with 'd'!");
12416 if (HowLong == 1)
12417 Type = Context.LongDoubleTy;
12418 else if (HowLong == 2)
12419 Type = Context.Float128Ty;
12420 else
12421 Type = Context.DoubleTy;
12422 break;
12423 case 's':
12424 assert(HowLong == 0 && "Bad modifiers used with 's'!");
12425 if (Unsigned)
12426 Type = Context.UnsignedShortTy;
12427 else
12428 Type = Context.ShortTy;
12429 break;
12430 case 'i':
12431 if (HowLong == 3)
12432 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
12433 else if (HowLong == 2)
12434 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
12435 else if (HowLong == 1)
12436 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
12437 else
12438 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
12439 break;
12440 case 'c':
12441 assert(HowLong == 0 && "Bad modifiers used with 'c'!");
12442 if (Signed)
12443 Type = Context.SignedCharTy;
12444 else if (Unsigned)
12445 Type = Context.UnsignedCharTy;
12446 else
12447 Type = Context.CharTy;
12448 break;
12449 case 'b': // boolean
12450 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
12451 Type = Context.BoolTy;
12452 break;
12453 case 'z': // size_t.
12454 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
12455 Type = Context.getSizeType();
12456 break;
12457 case 'w': // wchar_t.
12458 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
12459 Type = Context.getWideCharType();
12460 break;
12461 case 'F':
12462 Type = Context.getCFConstantStringType();
12463 break;
12464 case 'G':
12465 Type = Context.getObjCIdType();
12466 break;
12467 case 'H':
12468 Type = Context.getObjCSelType();
12469 break;
12470 case 'M':
12471 Type = Context.getObjCSuperType();
12472 break;
12473 case 'a':
12474 Type = Context.getBuiltinVaListType();
12475 assert(!Type.isNull() && "builtin va list type not initialized!");
12476 break;
12477 case 'A':
12478 // This is a "reference" to a va_list; however, what exactly
12479 // this means depends on how va_list is defined. There are two
12480 // different kinds of va_list: ones passed by value, and ones
12481 // passed by reference. An example of a by-value va_list is
12482 // x86, where va_list is a char*. An example of by-ref va_list
12483 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
12484 // we want this argument to be a char*&; for x86-64, we want
12485 // it to be a __va_list_tag*.
12486 Type = Context.getBuiltinVaListType();
12487 assert(!Type.isNull() && "builtin va list type not initialized!");
12488 if (Type->isArrayType())
12489 Type = Context.getArrayDecayedType(Ty: Type);
12490 else
12491 Type = Context.getLValueReferenceType(T: Type);
12492 break;
12493 case 'q': {
12494 char *End;
12495 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12496 assert(End != Str && "Missing vector size");
12497 Str = End;
12498
12499 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12500 RequiresICE, AllowTypeModifiers: false);
12501 assert(!RequiresICE && "Can't require vector ICE");
12502
12503 Type = Context.getScalableVectorType(EltTy: ElementType, NumElts: NumElements);
12504 break;
12505 }
12506 case 'Q': {
12507 switch (*Str++) {
12508 case 'a': {
12509 Type = Context.SveCountTy;
12510 break;
12511 }
12512 case 'b': {
12513 Type = Context.AMDGPUBufferRsrcTy;
12514 break;
12515 }
12516 default:
12517 llvm_unreachable("Unexpected target builtin type");
12518 }
12519 break;
12520 }
12521 case 'V': {
12522 char *End;
12523 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12524 assert(End != Str && "Missing vector size");
12525 Str = End;
12526
12527 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12528 RequiresICE, AllowTypeModifiers: false);
12529 assert(!RequiresICE && "Can't require vector ICE");
12530
12531 // TODO: No way to make AltiVec vectors in builtins yet.
12532 Type = Context.getVectorType(vecType: ElementType, NumElts: NumElements, VecKind: VectorKind::Generic);
12533 break;
12534 }
12535 case 'E': {
12536 char *End;
12537
12538 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12539 assert(End != Str && "Missing vector size");
12540
12541 Str = End;
12542
12543 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12544 AllowTypeModifiers: false);
12545 Type = Context.getExtVectorType(vecType: ElementType, NumElts: NumElements);
12546 break;
12547 }
12548 case 'X': {
12549 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12550 AllowTypeModifiers: false);
12551 assert(!RequiresICE && "Can't require complex ICE");
12552 Type = Context.getComplexType(T: ElementType);
12553 break;
12554 }
12555 case 'Y':
12556 Type = Context.getPointerDiffType();
12557 break;
12558 case 'P':
12559 Type = Context.getFILEType();
12560 if (Type.isNull()) {
12561 Error = ASTContext::GE_Missing_stdio;
12562 return {};
12563 }
12564 break;
12565 case 'J':
12566 if (Signed)
12567 Type = Context.getsigjmp_bufType();
12568 else
12569 Type = Context.getjmp_bufType();
12570
12571 if (Type.isNull()) {
12572 Error = ASTContext::GE_Missing_setjmp;
12573 return {};
12574 }
12575 break;
12576 case 'K':
12577 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
12578 Type = Context.getucontext_tType();
12579
12580 if (Type.isNull()) {
12581 Error = ASTContext::GE_Missing_ucontext;
12582 return {};
12583 }
12584 break;
12585 case 'p':
12586 Type = Context.getProcessIDType();
12587 break;
12588 case 'm':
12589 Type = Context.MFloat8Ty;
12590 break;
12591 }
12592
12593 // If there are modifiers and if we're allowed to parse them, go for it.
12594 Done = !AllowTypeModifiers;
12595 while (!Done) {
12596 switch (char c = *Str++) {
12597 default: Done = true; --Str; break;
12598 case '*':
12599 case '&': {
12600 // Both pointers and references can have their pointee types
12601 // qualified with an address space.
12602 char *End;
12603 unsigned AddrSpace = strtoul(nptr: Str, endptr: &End, base: 10);
12604 if (End != Str) {
12605 // Note AddrSpace == 0 is not the same as an unspecified address space.
12606 Type = Context.getAddrSpaceQualType(
12607 T: Type,
12608 AddressSpace: Context.getLangASForBuiltinAddressSpace(AS: AddrSpace));
12609 Str = End;
12610 }
12611 if (c == '*')
12612 Type = Context.getPointerType(T: Type);
12613 else
12614 Type = Context.getLValueReferenceType(T: Type);
12615 break;
12616 }
12617 // FIXME: There's no way to have a built-in with an rvalue ref arg.
12618 case 'C':
12619 Type = Type.withConst();
12620 break;
12621 case 'D':
12622 Type = Context.getVolatileType(T: Type);
12623 break;
12624 case 'R':
12625 Type = Type.withRestrict();
12626 break;
12627 }
12628 }
12629
12630 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
12631 "Integer constant 'I' type must be an integer");
12632
12633 return Type;
12634}
12635
12636// On some targets such as PowerPC, some of the builtins are defined with custom
12637// type descriptors for target-dependent types. These descriptors are decoded in
12638// other functions, but it may be useful to be able to fall back to default
12639// descriptor decoding to define builtins mixing target-dependent and target-
12640// independent types. This function allows decoding one type descriptor with
12641// default decoding.
12642QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
12643 GetBuiltinTypeError &Error, bool &RequireICE,
12644 bool AllowTypeModifiers) const {
12645 return DecodeTypeFromStr(Str, Context, Error, RequiresICE&: RequireICE, AllowTypeModifiers);
12646}
12647
12648/// GetBuiltinType - Return the type for the specified builtin.
12649QualType ASTContext::GetBuiltinType(unsigned Id,
12650 GetBuiltinTypeError &Error,
12651 unsigned *IntegerConstantArgs) const {
12652 const char *TypeStr = BuiltinInfo.getTypeString(ID: Id);
12653 if (TypeStr[0] == '\0') {
12654 Error = GE_Missing_type;
12655 return {};
12656 }
12657
12658 SmallVector<QualType, 8> ArgTypes;
12659
12660 bool RequiresICE = false;
12661 Error = GE_None;
12662 QualType ResType = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error,
12663 RequiresICE, AllowTypeModifiers: true);
12664 if (Error != GE_None)
12665 return {};
12666
12667 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
12668
12669 while (TypeStr[0] && TypeStr[0] != '.') {
12670 QualType Ty = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error, RequiresICE, AllowTypeModifiers: true);
12671 if (Error != GE_None)
12672 return {};
12673
12674 // If this argument is required to be an IntegerConstantExpression and the
12675 // caller cares, fill in the bitmask we return.
12676 if (RequiresICE && IntegerConstantArgs)
12677 *IntegerConstantArgs |= 1 << ArgTypes.size();
12678
12679 // Do array -> pointer decay. The builtin should use the decayed type.
12680 if (Ty->isArrayType())
12681 Ty = getArrayDecayedType(Ty);
12682
12683 ArgTypes.push_back(Elt: Ty);
12684 }
12685
12686 if (Id == Builtin::BI__GetExceptionInfo)
12687 return {};
12688
12689 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
12690 "'.' should only occur at end of builtin type list!");
12691
12692 bool Variadic = (TypeStr[0] == '.');
12693
12694 FunctionType::ExtInfo EI(getDefaultCallingConvention(
12695 IsVariadic: Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
12696 if (BuiltinInfo.isNoReturn(ID: Id)) EI = EI.withNoReturn(noReturn: true);
12697
12698
12699 // We really shouldn't be making a no-proto type here.
12700 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
12701 return getFunctionNoProtoType(ResultTy: ResType, Info: EI);
12702
12703 FunctionProtoType::ExtProtoInfo EPI;
12704 EPI.ExtInfo = EI;
12705 EPI.Variadic = Variadic;
12706 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(ID: Id))
12707 EPI.ExceptionSpec.Type =
12708 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
12709
12710 return getFunctionType(ResultTy: ResType, Args: ArgTypes, EPI);
12711}
12712
12713static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
12714 const FunctionDecl *FD) {
12715 if (!FD->isExternallyVisible())
12716 return GVA_Internal;
12717
12718 // Non-user-provided functions get emitted as weak definitions with every
12719 // use, no matter whether they've been explicitly instantiated etc.
12720 if (!FD->isUserProvided())
12721 return GVA_DiscardableODR;
12722
12723 GVALinkage External;
12724 switch (FD->getTemplateSpecializationKind()) {
12725 case TSK_Undeclared:
12726 case TSK_ExplicitSpecialization:
12727 External = GVA_StrongExternal;
12728 break;
12729
12730 case TSK_ExplicitInstantiationDefinition:
12731 return GVA_StrongODR;
12732
12733 // C++11 [temp.explicit]p10:
12734 // [ Note: The intent is that an inline function that is the subject of
12735 // an explicit instantiation declaration will still be implicitly
12736 // instantiated when used so that the body can be considered for
12737 // inlining, but that no out-of-line copy of the inline function would be
12738 // generated in the translation unit. -- end note ]
12739 case TSK_ExplicitInstantiationDeclaration:
12740 return GVA_AvailableExternally;
12741
12742 case TSK_ImplicitInstantiation:
12743 External = GVA_DiscardableODR;
12744 break;
12745 }
12746
12747 if (!FD->isInlined())
12748 return External;
12749
12750 if ((!Context.getLangOpts().CPlusPlus &&
12751 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12752 !FD->hasAttr<DLLExportAttr>()) ||
12753 FD->hasAttr<GNUInlineAttr>()) {
12754 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
12755
12756 // GNU or C99 inline semantics. Determine whether this symbol should be
12757 // externally visible.
12758 if (FD->isInlineDefinitionExternallyVisible())
12759 return External;
12760
12761 // C99 inline semantics, where the symbol is not externally visible.
12762 return GVA_AvailableExternally;
12763 }
12764
12765 // Functions specified with extern and inline in -fms-compatibility mode
12766 // forcibly get emitted. While the body of the function cannot be later
12767 // replaced, the function definition cannot be discarded.
12768 if (FD->isMSExternInline())
12769 return GVA_StrongODR;
12770
12771 if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12772 isa<CXXConstructorDecl>(Val: FD) &&
12773 cast<CXXConstructorDecl>(Val: FD)->isInheritingConstructor())
12774 // Our approach to inheriting constructors is fundamentally different from
12775 // that used by the MS ABI, so keep our inheriting constructor thunks
12776 // internal rather than trying to pick an unambiguous mangling for them.
12777 return GVA_Internal;
12778
12779 return GVA_DiscardableODR;
12780}
12781
12782static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
12783 const Decl *D, GVALinkage L) {
12784 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
12785 // dllexport/dllimport on inline functions.
12786 if (D->hasAttr<DLLImportAttr>()) {
12787 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
12788 return GVA_AvailableExternally;
12789 } else if (D->hasAttr<DLLExportAttr>()) {
12790 if (L == GVA_DiscardableODR)
12791 return GVA_StrongODR;
12792 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
12793 // Device-side functions with __global__ attribute must always be
12794 // visible externally so they can be launched from host.
12795 if (D->hasAttr<CUDAGlobalAttr>() &&
12796 (L == GVA_DiscardableODR || L == GVA_Internal))
12797 return GVA_StrongODR;
12798 // Single source offloading languages like CUDA/HIP need to be able to
12799 // access static device variables from host code of the same compilation
12800 // unit. This is done by externalizing the static variable with a shared
12801 // name between the host and device compilation which is the same for the
12802 // same compilation unit whereas different among different compilation
12803 // units.
12804 if (Context.shouldExternalize(D))
12805 return GVA_StrongExternal;
12806 }
12807 return L;
12808}
12809
12810/// Adjust the GVALinkage for a declaration based on what an external AST source
12811/// knows about whether there can be other definitions of this declaration.
12812static GVALinkage
12813adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
12814 GVALinkage L) {
12815 ExternalASTSource *Source = Ctx.getExternalSource();
12816 if (!Source)
12817 return L;
12818
12819 switch (Source->hasExternalDefinitions(D)) {
12820 case ExternalASTSource::EK_Never:
12821 // Other translation units rely on us to provide the definition.
12822 if (L == GVA_DiscardableODR)
12823 return GVA_StrongODR;
12824 break;
12825
12826 case ExternalASTSource::EK_Always:
12827 return GVA_AvailableExternally;
12828
12829 case ExternalASTSource::EK_ReplyHazy:
12830 break;
12831 }
12832 return L;
12833}
12834
12835GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
12836 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: FD,
12837 L: adjustGVALinkageForAttributes(Context: *this, D: FD,
12838 L: basicGVALinkageForFunction(Context: *this, FD)));
12839}
12840
12841static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
12842 const VarDecl *VD) {
12843 // As an extension for interactive REPLs, make sure constant variables are
12844 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
12845 // marking them as internal.
12846 if (Context.getLangOpts().CPlusPlus &&
12847 Context.getLangOpts().IncrementalExtensions &&
12848 VD->getType().isConstQualified() &&
12849 !VD->getType().isVolatileQualified() && !VD->isInline() &&
12850 !isa<VarTemplateSpecializationDecl>(Val: VD) && !VD->getDescribedVarTemplate())
12851 return GVA_DiscardableODR;
12852
12853 if (!VD->isExternallyVisible())
12854 return GVA_Internal;
12855
12856 if (VD->isStaticLocal()) {
12857 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
12858 while (LexicalContext && !isa<FunctionDecl>(Val: LexicalContext))
12859 LexicalContext = LexicalContext->getLexicalParent();
12860
12861 // ObjC Blocks can create local variables that don't have a FunctionDecl
12862 // LexicalContext.
12863 if (!LexicalContext)
12864 return GVA_DiscardableODR;
12865
12866 // Otherwise, let the static local variable inherit its linkage from the
12867 // nearest enclosing function.
12868 auto StaticLocalLinkage =
12869 Context.GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: LexicalContext));
12870
12871 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
12872 // be emitted in any object with references to the symbol for the object it
12873 // contains, whether inline or out-of-line."
12874 // Similar behavior is observed with MSVC. An alternative ABI could use
12875 // StrongODR/AvailableExternally to match the function, but none are
12876 // known/supported currently.
12877 if (StaticLocalLinkage == GVA_StrongODR ||
12878 StaticLocalLinkage == GVA_AvailableExternally)
12879 return GVA_DiscardableODR;
12880 return StaticLocalLinkage;
12881 }
12882
12883 // MSVC treats in-class initialized static data members as definitions.
12884 // By giving them non-strong linkage, out-of-line definitions won't
12885 // cause link errors.
12886 if (Context.isMSStaticDataMemberInlineDefinition(VD))
12887 return GVA_DiscardableODR;
12888
12889 // Most non-template variables have strong linkage; inline variables are
12890 // linkonce_odr or (occasionally, for compatibility) weak_odr.
12891 GVALinkage StrongLinkage;
12892 switch (Context.getInlineVariableDefinitionKind(VD)) {
12893 case ASTContext::InlineVariableDefinitionKind::None:
12894 StrongLinkage = GVA_StrongExternal;
12895 break;
12896 case ASTContext::InlineVariableDefinitionKind::Weak:
12897 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
12898 StrongLinkage = GVA_DiscardableODR;
12899 break;
12900 case ASTContext::InlineVariableDefinitionKind::Strong:
12901 StrongLinkage = GVA_StrongODR;
12902 break;
12903 }
12904
12905 switch (VD->getTemplateSpecializationKind()) {
12906 case TSK_Undeclared:
12907 return StrongLinkage;
12908
12909 case TSK_ExplicitSpecialization:
12910 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12911 VD->isStaticDataMember()
12912 ? GVA_StrongODR
12913 : StrongLinkage;
12914
12915 case TSK_ExplicitInstantiationDefinition:
12916 return GVA_StrongODR;
12917
12918 case TSK_ExplicitInstantiationDeclaration:
12919 return GVA_AvailableExternally;
12920
12921 case TSK_ImplicitInstantiation:
12922 return GVA_DiscardableODR;
12923 }
12924
12925 llvm_unreachable("Invalid Linkage!");
12926}
12927
12928GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
12929 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: VD,
12930 L: adjustGVALinkageForAttributes(Context: *this, D: VD,
12931 L: basicGVALinkageForVariable(Context: *this, VD)));
12932}
12933
12934bool ASTContext::DeclMustBeEmitted(const Decl *D) {
12935 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
12936 if (!VD->isFileVarDecl())
12937 return false;
12938 // Global named register variables (GNU extension) are never emitted.
12939 if (VD->getStorageClass() == SC_Register)
12940 return false;
12941 if (VD->getDescribedVarTemplate() ||
12942 isa<VarTemplatePartialSpecializationDecl>(Val: VD))
12943 return false;
12944 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
12945 // We never need to emit an uninstantiated function template.
12946 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
12947 return false;
12948 } else if (isa<PragmaCommentDecl>(Val: D))
12949 return true;
12950 else if (isa<PragmaDetectMismatchDecl>(Val: D))
12951 return true;
12952 else if (isa<OMPRequiresDecl>(Val: D))
12953 return true;
12954 else if (isa<OMPThreadPrivateDecl>(Val: D))
12955 return !D->getDeclContext()->isDependentContext();
12956 else if (isa<OMPAllocateDecl>(Val: D))
12957 return !D->getDeclContext()->isDependentContext();
12958 else if (isa<OMPDeclareReductionDecl>(Val: D) || isa<OMPDeclareMapperDecl>(Val: D))
12959 return !D->getDeclContext()->isDependentContext();
12960 else if (isa<ImportDecl>(Val: D))
12961 return true;
12962 else
12963 return false;
12964
12965 // If this is a member of a class template, we do not need to emit it.
12966 if (D->getDeclContext()->isDependentContext())
12967 return false;
12968
12969 // Weak references don't produce any output by themselves.
12970 if (D->hasAttr<WeakRefAttr>())
12971 return false;
12972
12973 // Aliases and used decls are required.
12974 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
12975 return true;
12976
12977 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
12978 // Forward declarations aren't required.
12979 if (!FD->doesThisDeclarationHaveABody())
12980 return FD->doesDeclarationForceExternallyVisibleDefinition();
12981
12982 // Function definitions with the sycl_kernel_entry_point attribute are
12983 // required during device compilation so that SYCL kernel caller offload
12984 // entry points are emitted.
12985 if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>())
12986 return true;
12987
12988 // FIXME: Functions declared with SYCL_EXTERNAL are required during
12989 // device compilation.
12990
12991 // Constructors and destructors are required.
12992 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
12993 return true;
12994
12995 // The key function for a class is required. This rule only comes
12996 // into play when inline functions can be key functions, though.
12997 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
12998 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) {
12999 const CXXRecordDecl *RD = MD->getParent();
13000 if (MD->isOutOfLine() && RD->isDynamicClass()) {
13001 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
13002 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
13003 return true;
13004 }
13005 }
13006 }
13007
13008 GVALinkage Linkage = GetGVALinkageForFunction(FD);
13009
13010 // static, static inline, always_inline, and extern inline functions can
13011 // always be deferred. Normal inline functions can be deferred in C99/C++.
13012 // Implicit template instantiations can also be deferred in C++.
13013 return !isDiscardableGVALinkage(L: Linkage);
13014 }
13015
13016 const auto *VD = cast<VarDecl>(Val: D);
13017 assert(VD->isFileVarDecl() && "Expected file scoped var");
13018
13019 // If the decl is marked as `declare target to`, it should be emitted for the
13020 // host and for the device.
13021 if (LangOpts.OpenMP &&
13022 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
13023 return true;
13024
13025 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
13026 !isMSStaticDataMemberInlineDefinition(VD))
13027 return false;
13028
13029 if (VD->shouldEmitInExternalSource())
13030 return false;
13031
13032 // Variables that can be needed in other TUs are required.
13033 auto Linkage = GetGVALinkageForVariable(VD);
13034 if (!isDiscardableGVALinkage(L: Linkage))
13035 return true;
13036
13037 // We never need to emit a variable that is available in another TU.
13038 if (Linkage == GVA_AvailableExternally)
13039 return false;
13040
13041 // Variables that have destruction with side-effects are required.
13042 if (VD->needsDestruction(Ctx: *this))
13043 return true;
13044
13045 // Variables that have initialization with side-effects are required.
13046 if (VD->hasInitWithSideEffects())
13047 return true;
13048
13049 // Likewise, variables with tuple-like bindings are required if their
13050 // bindings have side-effects.
13051 if (const auto *DD = dyn_cast<DecompositionDecl>(Val: VD)) {
13052 for (const auto *BD : DD->flat_bindings())
13053 if (const auto *BindingVD = BD->getHoldingVar())
13054 if (DeclMustBeEmitted(D: BindingVD))
13055 return true;
13056 }
13057
13058 return false;
13059}
13060
13061void ASTContext::forEachMultiversionedFunctionVersion(
13062 const FunctionDecl *FD,
13063 llvm::function_ref<void(FunctionDecl *)> Pred) const {
13064 assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
13065 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
13066 FD = FD->getMostRecentDecl();
13067 // FIXME: The order of traversal here matters and depends on the order of
13068 // lookup results, which happens to be (mostly) oldest-to-newest, but we
13069 // shouldn't rely on that.
13070 for (auto *CurDecl :
13071 FD->getDeclContext()->getRedeclContext()->lookup(Name: FD->getDeclName())) {
13072 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
13073 if (CurFD && hasSameType(T1: CurFD->getType(), T2: FD->getType()) &&
13074 SeenDecls.insert(V: CurFD).second) {
13075 Pred(CurFD);
13076 }
13077 }
13078}
13079
13080CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
13081 bool IsCXXMethod,
13082 bool IsBuiltin) const {
13083 // Pass through to the C++ ABI object
13084 if (IsCXXMethod)
13085 return ABI->getDefaultMethodCallConv(isVariadic: IsVariadic);
13086
13087 // Builtins ignore user-specified default calling convention and remain the
13088 // Target's default calling convention.
13089 if (!IsBuiltin) {
13090 switch (LangOpts.getDefaultCallingConv()) {
13091 case LangOptions::DCC_None:
13092 break;
13093 case LangOptions::DCC_CDecl:
13094 return CC_C;
13095 case LangOptions::DCC_FastCall:
13096 if (getTargetInfo().hasFeature(Feature: "sse2") && !IsVariadic)
13097 return CC_X86FastCall;
13098 break;
13099 case LangOptions::DCC_StdCall:
13100 if (!IsVariadic)
13101 return CC_X86StdCall;
13102 break;
13103 case LangOptions::DCC_VectorCall:
13104 // __vectorcall cannot be applied to variadic functions.
13105 if (!IsVariadic)
13106 return CC_X86VectorCall;
13107 break;
13108 case LangOptions::DCC_RegCall:
13109 // __regcall cannot be applied to variadic functions.
13110 if (!IsVariadic)
13111 return CC_X86RegCall;
13112 break;
13113 case LangOptions::DCC_RtdCall:
13114 if (!IsVariadic)
13115 return CC_M68kRTD;
13116 break;
13117 }
13118 }
13119 return Target->getDefaultCallingConv();
13120}
13121
13122bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
13123 // Pass through to the C++ ABI object
13124 return ABI->isNearlyEmpty(RD);
13125}
13126
13127VTableContextBase *ASTContext::getVTableContext() {
13128 if (!VTContext) {
13129 auto ABI = Target->getCXXABI();
13130 if (ABI.isMicrosoft())
13131 VTContext.reset(p: new MicrosoftVTableContext(*this));
13132 else {
13133 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
13134 ? ItaniumVTableContext::Relative
13135 : ItaniumVTableContext::Pointer;
13136 VTContext.reset(p: new ItaniumVTableContext(*this, ComponentLayout));
13137 }
13138 }
13139 return VTContext.get();
13140}
13141
13142MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
13143 if (!T)
13144 T = Target;
13145 switch (T->getCXXABI().getKind()) {
13146 case TargetCXXABI::AppleARM64:
13147 case TargetCXXABI::Fuchsia:
13148 case TargetCXXABI::GenericAArch64:
13149 case TargetCXXABI::GenericItanium:
13150 case TargetCXXABI::GenericARM:
13151 case TargetCXXABI::GenericMIPS:
13152 case TargetCXXABI::iOS:
13153 case TargetCXXABI::WebAssembly:
13154 case TargetCXXABI::WatchOS:
13155 case TargetCXXABI::XL:
13156 return ItaniumMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13157 case TargetCXXABI::Microsoft:
13158 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13159 }
13160 llvm_unreachable("Unsupported ABI");
13161}
13162
13163MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
13164 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
13165 "Device mangle context does not support Microsoft mangling.");
13166 switch (T.getCXXABI().getKind()) {
13167 case TargetCXXABI::AppleARM64:
13168 case TargetCXXABI::Fuchsia:
13169 case TargetCXXABI::GenericAArch64:
13170 case TargetCXXABI::GenericItanium:
13171 case TargetCXXABI::GenericARM:
13172 case TargetCXXABI::GenericMIPS:
13173 case TargetCXXABI::iOS:
13174 case TargetCXXABI::WebAssembly:
13175 case TargetCXXABI::WatchOS:
13176 case TargetCXXABI::XL:
13177 return ItaniumMangleContext::create(
13178 Context&: *this, Diags&: getDiagnostics(),
13179 Discriminator: [](ASTContext &, const NamedDecl *ND) -> UnsignedOrNone {
13180 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
13181 return RD->getDeviceLambdaManglingNumber();
13182 return std::nullopt;
13183 },
13184 /*IsAux=*/true);
13185 case TargetCXXABI::Microsoft:
13186 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics(),
13187 /*IsAux=*/true);
13188 }
13189 llvm_unreachable("Unsupported ABI");
13190}
13191
13192CXXABI::~CXXABI() = default;
13193
13194size_t ASTContext::getSideTableAllocatedMemory() const {
13195 return ASTRecordLayouts.getMemorySize() +
13196 llvm::capacity_in_bytes(X: ObjCLayouts) +
13197 llvm::capacity_in_bytes(X: KeyFunctions) +
13198 llvm::capacity_in_bytes(X: ObjCImpls) +
13199 llvm::capacity_in_bytes(X: BlockVarCopyInits) +
13200 llvm::capacity_in_bytes(X: DeclAttrs) +
13201 llvm::capacity_in_bytes(X: TemplateOrInstantiation) +
13202 llvm::capacity_in_bytes(X: InstantiatedFromUsingDecl) +
13203 llvm::capacity_in_bytes(X: InstantiatedFromUsingShadowDecl) +
13204 llvm::capacity_in_bytes(X: InstantiatedFromUnnamedFieldDecl) +
13205 llvm::capacity_in_bytes(X: OverriddenMethods) +
13206 llvm::capacity_in_bytes(X: Types) +
13207 llvm::capacity_in_bytes(x: VariableArrayTypes);
13208}
13209
13210/// getIntTypeForBitwidth -
13211/// sets integer QualTy according to specified details:
13212/// bitwidth, signed/unsigned.
13213/// Returns empty type if there is no appropriate target types.
13214QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
13215 unsigned Signed) const {
13216 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(BitWidth: DestWidth, IsSigned: Signed);
13217 CanQualType QualTy = getFromTargetType(Type: Ty);
13218 if (!QualTy && DestWidth == 128)
13219 return Signed ? Int128Ty : UnsignedInt128Ty;
13220 return QualTy;
13221}
13222
13223/// getRealTypeForBitwidth -
13224/// sets floating point QualTy according to specified bitwidth.
13225/// Returns empty type if there is no appropriate target types.
13226QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
13227 FloatModeKind ExplicitType) const {
13228 FloatModeKind Ty =
13229 getTargetInfo().getRealTypeByWidth(BitWidth: DestWidth, ExplicitType);
13230 switch (Ty) {
13231 case FloatModeKind::Half:
13232 return HalfTy;
13233 case FloatModeKind::Float:
13234 return FloatTy;
13235 case FloatModeKind::Double:
13236 return DoubleTy;
13237 case FloatModeKind::LongDouble:
13238 return LongDoubleTy;
13239 case FloatModeKind::Float128:
13240 return Float128Ty;
13241 case FloatModeKind::Ibm128:
13242 return Ibm128Ty;
13243 case FloatModeKind::NoFloat:
13244 return {};
13245 }
13246
13247 llvm_unreachable("Unhandled TargetInfo::RealType value");
13248}
13249
13250void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
13251 if (Number <= 1)
13252 return;
13253
13254 MangleNumbers[ND] = Number;
13255
13256 if (Listener)
13257 Listener->AddedManglingNumber(D: ND, Number);
13258}
13259
13260unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
13261 bool ForAuxTarget) const {
13262 auto I = MangleNumbers.find(Key: ND);
13263 unsigned Res = I != MangleNumbers.end() ? I->second : 1;
13264 // CUDA/HIP host compilation encodes host and device mangling numbers
13265 // as lower and upper half of 32 bit integer.
13266 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
13267 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
13268 } else {
13269 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
13270 "number for aux target");
13271 }
13272 return Res > 1 ? Res : 1;
13273}
13274
13275void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
13276 if (Number <= 1)
13277 return;
13278
13279 StaticLocalNumbers[VD] = Number;
13280
13281 if (Listener)
13282 Listener->AddedStaticLocalNumbers(D: VD, Number);
13283}
13284
13285unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
13286 auto I = StaticLocalNumbers.find(Key: VD);
13287 return I != StaticLocalNumbers.end() ? I->second : 1;
13288}
13289
13290void ASTContext::setIsDestroyingOperatorDelete(const FunctionDecl *FD,
13291 bool IsDestroying) {
13292 if (!IsDestroying) {
13293 assert(!DestroyingOperatorDeletes.contains(FD->getCanonicalDecl()));
13294 return;
13295 }
13296 DestroyingOperatorDeletes.insert(V: FD->getCanonicalDecl());
13297}
13298
13299bool ASTContext::isDestroyingOperatorDelete(const FunctionDecl *FD) const {
13300 return DestroyingOperatorDeletes.contains(V: FD->getCanonicalDecl());
13301}
13302
13303void ASTContext::setIsTypeAwareOperatorNewOrDelete(const FunctionDecl *FD,
13304 bool IsTypeAware) {
13305 if (!IsTypeAware) {
13306 assert(!TypeAwareOperatorNewAndDeletes.contains(FD->getCanonicalDecl()));
13307 return;
13308 }
13309 TypeAwareOperatorNewAndDeletes.insert(V: FD->getCanonicalDecl());
13310}
13311
13312bool ASTContext::isTypeAwareOperatorNewOrDelete(const FunctionDecl *FD) const {
13313 return TypeAwareOperatorNewAndDeletes.contains(V: FD->getCanonicalDecl());
13314}
13315
13316MangleNumberingContext &
13317ASTContext::getManglingNumberContext(const DeclContext *DC) {
13318 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13319 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
13320 if (!MCtx)
13321 MCtx = createMangleNumberingContext();
13322 return *MCtx;
13323}
13324
13325MangleNumberingContext &
13326ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
13327 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13328 std::unique_ptr<MangleNumberingContext> &MCtx =
13329 ExtraMangleNumberingContexts[D];
13330 if (!MCtx)
13331 MCtx = createMangleNumberingContext();
13332 return *MCtx;
13333}
13334
13335std::unique_ptr<MangleNumberingContext>
13336ASTContext::createMangleNumberingContext() const {
13337 return ABI->createMangleNumberingContext();
13338}
13339
13340const CXXConstructorDecl *
13341ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
13342 return ABI->getCopyConstructorForExceptionObject(
13343 cast<CXXRecordDecl>(Val: RD->getFirstDecl()));
13344}
13345
13346void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
13347 CXXConstructorDecl *CD) {
13348 return ABI->addCopyConstructorForExceptionObject(
13349 cast<CXXRecordDecl>(Val: RD->getFirstDecl()),
13350 cast<CXXConstructorDecl>(Val: CD->getFirstDecl()));
13351}
13352
13353void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
13354 TypedefNameDecl *DD) {
13355 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
13356}
13357
13358TypedefNameDecl *
13359ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
13360 return ABI->getTypedefNameForUnnamedTagDecl(TD);
13361}
13362
13363void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
13364 DeclaratorDecl *DD) {
13365 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
13366}
13367
13368DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
13369 return ABI->getDeclaratorForUnnamedTagDecl(TD);
13370}
13371
13372void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
13373 ParamIndices[D] = index;
13374}
13375
13376unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
13377 ParameterIndexTable::const_iterator I = ParamIndices.find(Val: D);
13378 assert(I != ParamIndices.end() &&
13379 "ParmIndices lacks entry set by ParmVarDecl");
13380 return I->second;
13381}
13382
13383QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
13384 unsigned Length) const {
13385 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
13386 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
13387 EltTy = EltTy.withConst();
13388
13389 EltTy = adjustStringLiteralBaseType(Ty: EltTy);
13390
13391 // Get an array type for the string, according to C99 6.4.5. This includes
13392 // the null terminator character.
13393 return getConstantArrayType(EltTy, ArySizeIn: llvm::APInt(32, Length + 1), SizeExpr: nullptr,
13394 ASM: ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
13395}
13396
13397StringLiteral *
13398ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
13399 StringLiteral *&Result = StringLiteralCache[Key];
13400 if (!Result)
13401 Result = StringLiteral::Create(
13402 Ctx: *this, Str: Key, Kind: StringLiteralKind::Ordinary,
13403 /*Pascal*/ false, Ty: getStringLiteralArrayType(EltTy: CharTy, Length: Key.size()),
13404 Locs: SourceLocation());
13405 return Result;
13406}
13407
13408MSGuidDecl *
13409ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
13410 assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
13411
13412 llvm::FoldingSetNodeID ID;
13413 MSGuidDecl::Profile(ID, P: Parts);
13414
13415 void *InsertPos;
13416 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
13417 return Existing;
13418
13419 QualType GUIDType = getMSGuidType().withConst();
13420 MSGuidDecl *New = MSGuidDecl::Create(C: *this, T: GUIDType, P: Parts);
13421 MSGuidDecls.InsertNode(N: New, InsertPos);
13422 return New;
13423}
13424
13425UnnamedGlobalConstantDecl *
13426ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
13427 const APValue &APVal) const {
13428 llvm::FoldingSetNodeID ID;
13429 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
13430
13431 void *InsertPos;
13432 if (UnnamedGlobalConstantDecl *Existing =
13433 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
13434 return Existing;
13435
13436 UnnamedGlobalConstantDecl *New =
13437 UnnamedGlobalConstantDecl::Create(C: *this, T: Ty, APVal);
13438 UnnamedGlobalConstantDecls.InsertNode(N: New, InsertPos);
13439 return New;
13440}
13441
13442TemplateParamObjectDecl *
13443ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
13444 assert(T->isRecordType() && "template param object of unexpected type");
13445
13446 // C++ [temp.param]p8:
13447 // [...] a static storage duration object of type 'const T' [...]
13448 T.addConst();
13449
13450 llvm::FoldingSetNodeID ID;
13451 TemplateParamObjectDecl::Profile(ID, T, V);
13452
13453 void *InsertPos;
13454 if (TemplateParamObjectDecl *Existing =
13455 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
13456 return Existing;
13457
13458 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(C: *this, T, V);
13459 TemplateParamObjectDecls.InsertNode(N: New, InsertPos);
13460 return New;
13461}
13462
13463bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
13464 const llvm::Triple &T = getTargetInfo().getTriple();
13465 if (!T.isOSDarwin())
13466 return false;
13467
13468 if (!(T.isiOS() && T.isOSVersionLT(Major: 7)) &&
13469 !(T.isMacOSX() && T.isOSVersionLT(Major: 10, Minor: 9)))
13470 return false;
13471
13472 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
13473 CharUnits sizeChars = getTypeSizeInChars(T: AtomicTy);
13474 uint64_t Size = sizeChars.getQuantity();
13475 CharUnits alignChars = getTypeAlignInChars(T: AtomicTy);
13476 unsigned Align = alignChars.getQuantity();
13477 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
13478 return (Size != Align || toBits(CharSize: sizeChars) > MaxInlineWidthInBits);
13479}
13480
13481bool
13482ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
13483 const ObjCMethodDecl *MethodImpl) {
13484 // No point trying to match an unavailable/deprecated mothod.
13485 if (MethodDecl->hasAttr<UnavailableAttr>()
13486 || MethodDecl->hasAttr<DeprecatedAttr>())
13487 return false;
13488 if (MethodDecl->getObjCDeclQualifier() !=
13489 MethodImpl->getObjCDeclQualifier())
13490 return false;
13491 if (!hasSameType(T1: MethodDecl->getReturnType(), T2: MethodImpl->getReturnType()))
13492 return false;
13493
13494 if (MethodDecl->param_size() != MethodImpl->param_size())
13495 return false;
13496
13497 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
13498 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
13499 EF = MethodDecl->param_end();
13500 IM != EM && IF != EF; ++IM, ++IF) {
13501 const ParmVarDecl *DeclVar = (*IF);
13502 const ParmVarDecl *ImplVar = (*IM);
13503 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
13504 return false;
13505 if (!hasSameType(T1: DeclVar->getType(), T2: ImplVar->getType()))
13506 return false;
13507 }
13508
13509 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
13510}
13511
13512uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
13513 LangAS AS;
13514 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
13515 AS = LangAS::Default;
13516 else
13517 AS = QT->getPointeeType().getAddressSpace();
13518
13519 return getTargetInfo().getNullPointerValue(AddrSpace: AS);
13520}
13521
13522unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
13523 return getTargetInfo().getTargetAddressSpace(AS);
13524}
13525
13526bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
13527 if (X == Y)
13528 return true;
13529 if (!X || !Y)
13530 return false;
13531 llvm::FoldingSetNodeID IDX, IDY;
13532 X->Profile(ID&: IDX, Context: *this, /*Canonical=*/true);
13533 Y->Profile(ID&: IDY, Context: *this, /*Canonical=*/true);
13534 return IDX == IDY;
13535}
13536
13537// The getCommon* helpers return, for given 'same' X and Y entities given as
13538// inputs, another entity which is also the 'same' as the inputs, but which
13539// is closer to the canonical form of the inputs, each according to a given
13540// criteria.
13541// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
13542// the regular ones.
13543
13544static Decl *getCommonDecl(Decl *X, Decl *Y) {
13545 if (!declaresSameEntity(D1: X, D2: Y))
13546 return nullptr;
13547 for (const Decl *DX : X->redecls()) {
13548 // If we reach Y before reaching the first decl, that means X is older.
13549 if (DX == Y)
13550 return X;
13551 // If we reach the first decl, then Y is older.
13552 if (DX->isFirstDecl())
13553 return Y;
13554 }
13555 llvm_unreachable("Corrupt redecls chain");
13556}
13557
13558template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13559static T *getCommonDecl(T *X, T *Y) {
13560 return cast_or_null<T>(
13561 getCommonDecl(X: const_cast<Decl *>(cast_or_null<Decl>(X)),
13562 Y: const_cast<Decl *>(cast_or_null<Decl>(Y))));
13563}
13564
13565template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13566static T *getCommonDeclChecked(T *X, T *Y) {
13567 return cast<T>(getCommonDecl(X: const_cast<Decl *>(cast<Decl>(X)),
13568 Y: const_cast<Decl *>(cast<Decl>(Y))));
13569}
13570
13571static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
13572 TemplateName Y,
13573 bool IgnoreDeduced = false) {
13574 if (X.getAsVoidPointer() == Y.getAsVoidPointer())
13575 return X;
13576 // FIXME: There are cases here where we could find a common template name
13577 // with more sugar. For example one could be a SubstTemplateTemplate*
13578 // replacing the other.
13579 TemplateName CX = Ctx.getCanonicalTemplateName(Name: X, IgnoreDeduced);
13580 if (CX.getAsVoidPointer() !=
13581 Ctx.getCanonicalTemplateName(Name: Y).getAsVoidPointer())
13582 return TemplateName();
13583 return CX;
13584}
13585
13586static TemplateName getCommonTemplateNameChecked(ASTContext &Ctx,
13587 TemplateName X, TemplateName Y,
13588 bool IgnoreDeduced) {
13589 TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
13590 assert(R.getAsVoidPointer() != nullptr);
13591 return R;
13592}
13593
13594static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs,
13595 ArrayRef<QualType> Ys, bool Unqualified = false) {
13596 assert(Xs.size() == Ys.size());
13597 SmallVector<QualType, 8> Rs(Xs.size());
13598 for (size_t I = 0; I < Rs.size(); ++I)
13599 Rs[I] = Ctx.getCommonSugaredType(X: Xs[I], Y: Ys[I], Unqualified);
13600 return Rs;
13601}
13602
13603template <class T>
13604static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
13605 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
13606 : SourceLocation();
13607}
13608
13609static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
13610 const TemplateArgument &X,
13611 const TemplateArgument &Y) {
13612 if (X.getKind() != Y.getKind())
13613 return TemplateArgument();
13614
13615 switch (X.getKind()) {
13616 case TemplateArgument::ArgKind::Type:
13617 if (!Ctx.hasSameType(T1: X.getAsType(), T2: Y.getAsType()))
13618 return TemplateArgument();
13619 return TemplateArgument(
13620 Ctx.getCommonSugaredType(X: X.getAsType(), Y: Y.getAsType()));
13621 case TemplateArgument::ArgKind::NullPtr:
13622 if (!Ctx.hasSameType(T1: X.getNullPtrType(), T2: Y.getNullPtrType()))
13623 return TemplateArgument();
13624 return TemplateArgument(
13625 Ctx.getCommonSugaredType(X: X.getNullPtrType(), Y: Y.getNullPtrType()),
13626 /*Unqualified=*/true);
13627 case TemplateArgument::ArgKind::Expression:
13628 if (!Ctx.hasSameType(T1: X.getAsExpr()->getType(), T2: Y.getAsExpr()->getType()))
13629 return TemplateArgument();
13630 // FIXME: Try to keep the common sugar.
13631 return X;
13632 case TemplateArgument::ArgKind::Template: {
13633 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
13634 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13635 if (!CTN.getAsVoidPointer())
13636 return TemplateArgument();
13637 return TemplateArgument(CTN);
13638 }
13639 case TemplateArgument::ArgKind::TemplateExpansion: {
13640 TemplateName TX = X.getAsTemplateOrTemplatePattern(),
13641 TY = Y.getAsTemplateOrTemplatePattern();
13642 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13643 if (!CTN.getAsVoidPointer())
13644 return TemplateName();
13645 auto NExpX = X.getNumTemplateExpansions();
13646 assert(NExpX == Y.getNumTemplateExpansions());
13647 return TemplateArgument(CTN, NExpX);
13648 }
13649 default:
13650 // FIXME: Handle the other argument kinds.
13651 return X;
13652 }
13653}
13654
13655static bool getCommonTemplateArguments(ASTContext &Ctx,
13656 SmallVectorImpl<TemplateArgument> &R,
13657 ArrayRef<TemplateArgument> Xs,
13658 ArrayRef<TemplateArgument> Ys) {
13659 if (Xs.size() != Ys.size())
13660 return true;
13661 R.resize(N: Xs.size());
13662 for (size_t I = 0; I < R.size(); ++I) {
13663 R[I] = getCommonTemplateArgument(Ctx, X: Xs[I], Y: Ys[I]);
13664 if (R[I].isNull())
13665 return true;
13666 }
13667 return false;
13668}
13669
13670static auto getCommonTemplateArguments(ASTContext &Ctx,
13671 ArrayRef<TemplateArgument> Xs,
13672 ArrayRef<TemplateArgument> Ys) {
13673 SmallVector<TemplateArgument, 8> R;
13674 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
13675 assert(!Different);
13676 (void)Different;
13677 return R;
13678}
13679
13680template <class T>
13681static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) {
13682 return X->getKeyword() == Y->getKeyword() ? X->getKeyword()
13683 : ElaboratedTypeKeyword::None;
13684}
13685
13686/// Returns a NestedNameSpecifier which has only the common sugar
13687/// present in both NNS1 and NNS2.
13688static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx,
13689 NestedNameSpecifier *NNS1,
13690 NestedNameSpecifier *NNS2,
13691 bool IsSame) {
13692 // If they are identical, all sugar is common.
13693 if (NNS1 == NNS2)
13694 return NNS1;
13695
13696 // IsSame implies both NNSes are equivalent.
13697 NestedNameSpecifier *Canon = Ctx.getCanonicalNestedNameSpecifier(NNS: NNS1);
13698 if (Canon != Ctx.getCanonicalNestedNameSpecifier(NNS: NNS2)) {
13699 assert(!IsSame && "Should be the same NestedNameSpecifier");
13700 // If they are not the same, there is nothing to unify.
13701 // FIXME: It would be useful here if we could represent a canonically
13702 // empty NNS, which is not identical to an empty-as-written NNS.
13703 return nullptr;
13704 }
13705
13706 NestedNameSpecifier *R = nullptr;
13707 NestedNameSpecifier::SpecifierKind K1 = NNS1->getKind(), K2 = NNS2->getKind();
13708 switch (K1) {
13709 case NestedNameSpecifier::SpecifierKind::Identifier: {
13710 assert(K2 == NestedNameSpecifier::SpecifierKind::Identifier);
13711 IdentifierInfo *II = NNS1->getAsIdentifier();
13712 assert(II == NNS2->getAsIdentifier());
13713 // For an identifier, the prefixes are significant, so they must be the
13714 // same.
13715 NestedNameSpecifier *P = ::getCommonNNS(Ctx, NNS1: NNS1->getPrefix(),
13716 NNS2: NNS2->getPrefix(), /*IsSame=*/true);
13717 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, II);
13718 break;
13719 }
13720 case NestedNameSpecifier::SpecifierKind::Namespace:
13721 case NestedNameSpecifier::SpecifierKind::NamespaceAlias: {
13722 assert(K2 == NestedNameSpecifier::SpecifierKind::Namespace ||
13723 K2 == NestedNameSpecifier::SpecifierKind::NamespaceAlias);
13724 // The prefixes for namespaces are not significant, its declaration
13725 // identifies it uniquely.
13726 NestedNameSpecifier *P =
13727 ::getCommonNNS(Ctx, NNS1: NNS1->getPrefix(), NNS2: NNS2->getPrefix(),
13728 /*IsSame=*/false);
13729 NamespaceAliasDecl *A1 = NNS1->getAsNamespaceAlias(),
13730 *A2 = NNS2->getAsNamespaceAlias();
13731 // Are they the same namespace alias?
13732 if (declaresSameEntity(D1: A1, D2: A2)) {
13733 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, Alias: ::getCommonDeclChecked(X: A1, Y: A2));
13734 break;
13735 }
13736 // Otherwise, look at the namespaces only.
13737 NamespaceDecl *N1 = A1 ? A1->getNamespace() : NNS1->getAsNamespace(),
13738 *N2 = A2 ? A2->getNamespace() : NNS2->getAsNamespace();
13739 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, NS: ::getCommonDeclChecked(X: N1, Y: N2));
13740 break;
13741 }
13742 case NestedNameSpecifier::SpecifierKind::TypeSpec: {
13743 // FIXME: See comment below, on Super case.
13744 if (K2 == NestedNameSpecifier::SpecifierKind::Super)
13745 return Ctx.getCanonicalNestedNameSpecifier(NNS: NNS1);
13746
13747 assert(K2 == NestedNameSpecifier::SpecifierKind::TypeSpec);
13748
13749 const Type *T1 = NNS1->getAsType(), *T2 = NNS2->getAsType();
13750 if (T1 == T2) {
13751 // If the types are indentical, then only the prefixes differ.
13752 // A well-formed NNS never has these types, as they have
13753 // special normalized forms.
13754 assert((!isa<DependentNameType, ElaboratedType>(T1)));
13755 // Only for a DependentTemplateSpecializationType the prefix
13756 // is actually significant. A DependentName, which would be another
13757 // plausible case, cannot occur here, as explained above.
13758 bool IsSame = isa<DependentTemplateSpecializationType>(Val: T1);
13759 NestedNameSpecifier *P =
13760 ::getCommonNNS(Ctx, NNS1: NNS1->getPrefix(), NNS2: NNS2->getPrefix(), IsSame);
13761 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, T: T1);
13762 break;
13763 }
13764 // TODO: Try to salvage the original prefix.
13765 // If getCommonSugaredType removed any top level sugar, the original prefix
13766 // is not applicable anymore.
13767 const Type *T = Ctx.getCommonSugaredType(X: QualType(T1, 0), Y: QualType(T2, 0),
13768 /*Unqualified=*/true)
13769 .getTypePtr();
13770
13771 // A NestedNameSpecifier has special normalization rules for certain types.
13772 switch (T->getTypeClass()) {
13773 case Type::Elaborated: {
13774 // An ElaboratedType is stripped off, it's Qualifier becomes the prefix.
13775 auto *ET = cast<ElaboratedType>(Val: T);
13776 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: ET->getQualifier(),
13777 T: ET->getNamedType().getTypePtr());
13778 break;
13779 }
13780 case Type::DependentName: {
13781 // A DependentName is turned into an Identifier NNS.
13782 auto *DN = cast<DependentNameType>(Val: T);
13783 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: DN->getQualifier(),
13784 II: DN->getIdentifier());
13785 break;
13786 }
13787 case Type::DependentTemplateSpecialization: {
13788 // A DependentTemplateSpecializationType loses it's Qualifier, which
13789 // is turned into the prefix.
13790 auto *DTST = cast<DependentTemplateSpecializationType>(Val: T);
13791 const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
13792 DependentTemplateStorage NewDTN(/*Qualifier=*/nullptr, DTN.getName(),
13793 DTN.hasTemplateKeyword());
13794 T = Ctx.getDependentTemplateSpecializationType(Keyword: DTST->getKeyword(), Name: NewDTN,
13795 Args: DTST->template_arguments())
13796 .getTypePtr();
13797 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: DTN.getQualifier(), T);
13798 break;
13799 }
13800 default:
13801 R = NestedNameSpecifier::Create(Context: Ctx, /*Prefix=*/nullptr, T);
13802 break;
13803 }
13804 break;
13805 }
13806 case NestedNameSpecifier::SpecifierKind::Super:
13807 // FIXME: Can __super even be used with data members?
13808 // If it's only usable in functions, we will never see it here,
13809 // unless we save the qualifiers used in function types.
13810 // In that case, it might be possible NNS2 is a type,
13811 // in which case we should degrade the result to
13812 // a CXXRecordType.
13813 return Ctx.getCanonicalNestedNameSpecifier(NNS: NNS1);
13814 case NestedNameSpecifier::SpecifierKind::Global:
13815 // The global NNS is a singleton.
13816 assert(K2 == NestedNameSpecifier::SpecifierKind::Global &&
13817 "Global NNS cannot be equivalent to any other kind");
13818 llvm_unreachable("Global NestedNameSpecifiers did not compare equal");
13819 }
13820 assert(Ctx.getCanonicalNestedNameSpecifier(R) == Canon);
13821 return R;
13822}
13823
13824template <class T>
13825static NestedNameSpecifier *getCommonQualifier(ASTContext &Ctx, const T *X,
13826 const T *Y, bool IsSame) {
13827 return ::getCommonNNS(Ctx, NNS1: X->getQualifier(), NNS2: Y->getQualifier(), IsSame);
13828}
13829
13830template <class T>
13831static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) {
13832 return Ctx.getCommonSugaredType(X: X->getElementType(), Y: Y->getElementType());
13833}
13834
13835template <class T>
13836static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
13837 Qualifiers &QX, const T *Y,
13838 Qualifiers &QY) {
13839 QualType EX = X->getElementType(), EY = Y->getElementType();
13840 QualType R = Ctx.getCommonSugaredType(X: EX, Y: EY,
13841 /*Unqualified=*/true);
13842 // Qualifiers common to both element types.
13843 Qualifiers RQ = R.getQualifiers();
13844 // For each side, move to the top level any qualifiers which are not common to
13845 // both element types. The caller must assume top level qualifiers might
13846 // be different, even if they are the same type, and can be treated as sugar.
13847 QX += EX.getQualifiers() - RQ;
13848 QY += EY.getQualifiers() - RQ;
13849 return R;
13850}
13851
13852template <class T>
13853static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) {
13854 return Ctx.getCommonSugaredType(X: X->getPointeeType(), Y: Y->getPointeeType());
13855}
13856
13857template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) {
13858 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
13859 return X->getSizeExpr();
13860}
13861
13862static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
13863 assert(X->getSizeModifier() == Y->getSizeModifier());
13864 return X->getSizeModifier();
13865}
13866
13867static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
13868 const ArrayType *Y) {
13869 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
13870 return X->getIndexTypeCVRQualifiers();
13871}
13872
13873// Merges two type lists such that the resulting vector will contain
13874// each type (in a canonical sense) only once, in the order they appear
13875// from X to Y. If they occur in both X and Y, the result will contain
13876// the common sugared type between them.
13877static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out,
13878 ArrayRef<QualType> X, ArrayRef<QualType> Y) {
13879 llvm::DenseMap<QualType, unsigned> Found;
13880 for (auto Ts : {X, Y}) {
13881 for (QualType T : Ts) {
13882 auto Res = Found.try_emplace(Key: Ctx.getCanonicalType(T), Args: Out.size());
13883 if (!Res.second) {
13884 QualType &U = Out[Res.first->second];
13885 U = Ctx.getCommonSugaredType(X: U, Y: T);
13886 } else {
13887 Out.emplace_back(Args&: T);
13888 }
13889 }
13890 }
13891}
13892
13893FunctionProtoType::ExceptionSpecInfo
13894ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
13895 FunctionProtoType::ExceptionSpecInfo ESI2,
13896 SmallVectorImpl<QualType> &ExceptionTypeStorage,
13897 bool AcceptDependent) {
13898 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
13899
13900 // If either of them can throw anything, that is the result.
13901 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
13902 if (EST1 == I)
13903 return ESI1;
13904 if (EST2 == I)
13905 return ESI2;
13906 }
13907
13908 // If either of them is non-throwing, the result is the other.
13909 for (auto I :
13910 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
13911 if (EST1 == I)
13912 return ESI2;
13913 if (EST2 == I)
13914 return ESI1;
13915 }
13916
13917 // If we're left with value-dependent computed noexcept expressions, we're
13918 // stuck. Before C++17, we can just drop the exception specification entirely,
13919 // since it's not actually part of the canonical type. And this should never
13920 // happen in C++17, because it would mean we were computing the composite
13921 // pointer type of dependent types, which should never happen.
13922 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
13923 assert(AcceptDependent &&
13924 "computing composite pointer type of dependent types");
13925 return FunctionProtoType::ExceptionSpecInfo();
13926 }
13927
13928 // Switch over the possibilities so that people adding new values know to
13929 // update this function.
13930 switch (EST1) {
13931 case EST_None:
13932 case EST_DynamicNone:
13933 case EST_MSAny:
13934 case EST_BasicNoexcept:
13935 case EST_DependentNoexcept:
13936 case EST_NoexceptFalse:
13937 case EST_NoexceptTrue:
13938 case EST_NoThrow:
13939 llvm_unreachable("These ESTs should be handled above");
13940
13941 case EST_Dynamic: {
13942 // This is the fun case: both exception specifications are dynamic. Form
13943 // the union of the two lists.
13944 assert(EST2 == EST_Dynamic && "other cases should already be handled");
13945 mergeTypeLists(Ctx&: *this, Out&: ExceptionTypeStorage, X: ESI1.Exceptions,
13946 Y: ESI2.Exceptions);
13947 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
13948 Result.Exceptions = ExceptionTypeStorage;
13949 return Result;
13950 }
13951
13952 case EST_Unevaluated:
13953 case EST_Uninstantiated:
13954 case EST_Unparsed:
13955 llvm_unreachable("shouldn't see unresolved exception specifications here");
13956 }
13957
13958 llvm_unreachable("invalid ExceptionSpecificationType");
13959}
13960
13961static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
13962 Qualifiers &QX, const Type *Y,
13963 Qualifiers &QY) {
13964 Type::TypeClass TC = X->getTypeClass();
13965 assert(TC == Y->getTypeClass());
13966 switch (TC) {
13967#define UNEXPECTED_TYPE(Class, Kind) \
13968 case Type::Class: \
13969 llvm_unreachable("Unexpected " Kind ": " #Class);
13970
13971#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
13972#define TYPE(Class, Base)
13973#include "clang/AST/TypeNodes.inc"
13974
13975#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
13976 SUGAR_FREE_TYPE(Builtin)
13977 SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
13978 SUGAR_FREE_TYPE(DependentBitInt)
13979 SUGAR_FREE_TYPE(Enum)
13980 SUGAR_FREE_TYPE(BitInt)
13981 SUGAR_FREE_TYPE(ObjCInterface)
13982 SUGAR_FREE_TYPE(Record)
13983 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
13984 SUGAR_FREE_TYPE(UnresolvedUsing)
13985 SUGAR_FREE_TYPE(HLSLAttributedResource)
13986 SUGAR_FREE_TYPE(HLSLInlineSpirv)
13987#undef SUGAR_FREE_TYPE
13988#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
13989 NON_UNIQUE_TYPE(TypeOfExpr)
13990 NON_UNIQUE_TYPE(VariableArray)
13991#undef NON_UNIQUE_TYPE
13992
13993 UNEXPECTED_TYPE(TypeOf, "sugar")
13994
13995#undef UNEXPECTED_TYPE
13996
13997 case Type::Auto: {
13998 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
13999 assert(AX->getDeducedType().isNull());
14000 assert(AY->getDeducedType().isNull());
14001 assert(AX->getKeyword() == AY->getKeyword());
14002 assert(AX->isInstantiationDependentType() ==
14003 AY->isInstantiationDependentType());
14004 auto As = getCommonTemplateArguments(Ctx, Xs: AX->getTypeConstraintArguments(),
14005 Ys: AY->getTypeConstraintArguments());
14006 return Ctx.getAutoType(DeducedType: QualType(), Keyword: AX->getKeyword(),
14007 IsDependent: AX->isInstantiationDependentType(),
14008 IsPack: AX->containsUnexpandedParameterPack(),
14009 TypeConstraintConcept: getCommonDeclChecked(X: AX->getTypeConstraintConcept(),
14010 Y: AY->getTypeConstraintConcept()),
14011 TypeConstraintArgs: As);
14012 }
14013 case Type::IncompleteArray: {
14014 const auto *AX = cast<IncompleteArrayType>(Val: X),
14015 *AY = cast<IncompleteArrayType>(Val: Y);
14016 return Ctx.getIncompleteArrayType(
14017 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14018 ASM: getCommonSizeModifier(X: AX, Y: AY), elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14019 }
14020 case Type::DependentSizedArray: {
14021 const auto *AX = cast<DependentSizedArrayType>(Val: X),
14022 *AY = cast<DependentSizedArrayType>(Val: Y);
14023 return Ctx.getDependentSizedArrayType(
14024 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14025 numElements: getCommonSizeExpr(Ctx, X: AX, Y: AY), ASM: getCommonSizeModifier(X: AX, Y: AY),
14026 elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14027 }
14028 case Type::ConstantArray: {
14029 const auto *AX = cast<ConstantArrayType>(Val: X),
14030 *AY = cast<ConstantArrayType>(Val: Y);
14031 assert(AX->getSize() == AY->getSize());
14032 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14033 ? AX->getSizeExpr()
14034 : nullptr;
14035 return Ctx.getConstantArrayType(
14036 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14037 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14038 }
14039 case Type::ArrayParameter: {
14040 const auto *AX = cast<ArrayParameterType>(Val: X),
14041 *AY = cast<ArrayParameterType>(Val: Y);
14042 assert(AX->getSize() == AY->getSize());
14043 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14044 ? AX->getSizeExpr()
14045 : nullptr;
14046 auto ArrayTy = Ctx.getConstantArrayType(
14047 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14048 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14049 return Ctx.getArrayParameterType(Ty: ArrayTy);
14050 }
14051 case Type::Atomic: {
14052 const auto *AX = cast<AtomicType>(Val: X), *AY = cast<AtomicType>(Val: Y);
14053 return Ctx.getAtomicType(
14054 T: Ctx.getCommonSugaredType(X: AX->getValueType(), Y: AY->getValueType()));
14055 }
14056 case Type::Complex: {
14057 const auto *CX = cast<ComplexType>(Val: X), *CY = cast<ComplexType>(Val: Y);
14058 return Ctx.getComplexType(T: getCommonArrayElementType(Ctx, X: CX, QX, Y: CY, QY));
14059 }
14060 case Type::Pointer: {
14061 const auto *PX = cast<PointerType>(Val: X), *PY = cast<PointerType>(Val: Y);
14062 return Ctx.getPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14063 }
14064 case Type::BlockPointer: {
14065 const auto *PX = cast<BlockPointerType>(Val: X), *PY = cast<BlockPointerType>(Val: Y);
14066 return Ctx.getBlockPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14067 }
14068 case Type::ObjCObjectPointer: {
14069 const auto *PX = cast<ObjCObjectPointerType>(Val: X),
14070 *PY = cast<ObjCObjectPointerType>(Val: Y);
14071 return Ctx.getObjCObjectPointerType(ObjectT: getCommonPointeeType(Ctx, X: PX, Y: PY));
14072 }
14073 case Type::MemberPointer: {
14074 const auto *PX = cast<MemberPointerType>(Val: X),
14075 *PY = cast<MemberPointerType>(Val: Y);
14076 assert(declaresSameEntity(PX->getMostRecentCXXRecordDecl(),
14077 PY->getMostRecentCXXRecordDecl()));
14078 return Ctx.getMemberPointerType(
14079 T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14080 Qualifier: getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/true),
14081 Cls: PX->getMostRecentCXXRecordDecl());
14082 }
14083 case Type::LValueReference: {
14084 const auto *PX = cast<LValueReferenceType>(Val: X),
14085 *PY = cast<LValueReferenceType>(Val: Y);
14086 // FIXME: Preserve PointeeTypeAsWritten.
14087 return Ctx.getLValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14088 SpelledAsLValue: PX->isSpelledAsLValue() ||
14089 PY->isSpelledAsLValue());
14090 }
14091 case Type::RValueReference: {
14092 const auto *PX = cast<RValueReferenceType>(Val: X),
14093 *PY = cast<RValueReferenceType>(Val: Y);
14094 // FIXME: Preserve PointeeTypeAsWritten.
14095 return Ctx.getRValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14096 }
14097 case Type::DependentAddressSpace: {
14098 const auto *PX = cast<DependentAddressSpaceType>(Val: X),
14099 *PY = cast<DependentAddressSpaceType>(Val: Y);
14100 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
14101 return Ctx.getDependentAddressSpaceType(PointeeType: getCommonPointeeType(Ctx, X: PX, Y: PY),
14102 AddrSpaceExpr: PX->getAddrSpaceExpr(),
14103 AttrLoc: getCommonAttrLoc(X: PX, Y: PY));
14104 }
14105 case Type::FunctionNoProto: {
14106 const auto *FX = cast<FunctionNoProtoType>(Val: X),
14107 *FY = cast<FunctionNoProtoType>(Val: Y);
14108 assert(FX->getExtInfo() == FY->getExtInfo());
14109 return Ctx.getFunctionNoProtoType(
14110 ResultTy: Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType()),
14111 Info: FX->getExtInfo());
14112 }
14113 case Type::FunctionProto: {
14114 const auto *FX = cast<FunctionProtoType>(Val: X),
14115 *FY = cast<FunctionProtoType>(Val: Y);
14116 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
14117 EPIY = FY->getExtProtoInfo();
14118 assert(EPIX.ExtInfo == EPIY.ExtInfo);
14119 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos);
14120 assert(EPIX.RefQualifier == EPIY.RefQualifier);
14121 assert(EPIX.TypeQuals == EPIY.TypeQuals);
14122 assert(EPIX.Variadic == EPIY.Variadic);
14123
14124 // FIXME: Can we handle an empty EllipsisLoc?
14125 // Use emtpy EllipsisLoc if X and Y differ.
14126
14127 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
14128
14129 QualType R =
14130 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType());
14131 auto P = getCommonTypes(Ctx, Xs: FX->param_types(), Ys: FY->param_types(),
14132 /*Unqualified=*/true);
14133
14134 SmallVector<QualType, 8> Exceptions;
14135 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
14136 ESI1: EPIX.ExceptionSpec, ESI2: EPIY.ExceptionSpec, ExceptionTypeStorage&: Exceptions, AcceptDependent: true);
14137 return Ctx.getFunctionType(ResultTy: R, Args: P, EPI: EPIX);
14138 }
14139 case Type::ObjCObject: {
14140 const auto *OX = cast<ObjCObjectType>(Val: X), *OY = cast<ObjCObjectType>(Val: Y);
14141 assert(
14142 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
14143 OY->getProtocols().begin(), OY->getProtocols().end(),
14144 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
14145 return P0->getCanonicalDecl() == P1->getCanonicalDecl();
14146 }) &&
14147 "protocol lists must be the same");
14148 auto TAs = getCommonTypes(Ctx, Xs: OX->getTypeArgsAsWritten(),
14149 Ys: OY->getTypeArgsAsWritten());
14150 return Ctx.getObjCObjectType(
14151 baseType: Ctx.getCommonSugaredType(X: OX->getBaseType(), Y: OY->getBaseType()), typeArgs: TAs,
14152 protocols: OX->getProtocols(),
14153 isKindOf: OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
14154 }
14155 case Type::ConstantMatrix: {
14156 const auto *MX = cast<ConstantMatrixType>(Val: X),
14157 *MY = cast<ConstantMatrixType>(Val: Y);
14158 assert(MX->getNumRows() == MY->getNumRows());
14159 assert(MX->getNumColumns() == MY->getNumColumns());
14160 return Ctx.getConstantMatrixType(ElementTy: getCommonElementType(Ctx, X: MX, Y: MY),
14161 NumRows: MX->getNumRows(), NumColumns: MX->getNumColumns());
14162 }
14163 case Type::DependentSizedMatrix: {
14164 const auto *MX = cast<DependentSizedMatrixType>(Val: X),
14165 *MY = cast<DependentSizedMatrixType>(Val: Y);
14166 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
14167 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
14168 return Ctx.getDependentSizedMatrixType(
14169 ElementTy: getCommonElementType(Ctx, X: MX, Y: MY), RowExpr: MX->getRowExpr(),
14170 ColumnExpr: MX->getColumnExpr(), AttrLoc: getCommonAttrLoc(X: MX, Y: MY));
14171 }
14172 case Type::Vector: {
14173 const auto *VX = cast<VectorType>(Val: X), *VY = cast<VectorType>(Val: Y);
14174 assert(VX->getNumElements() == VY->getNumElements());
14175 assert(VX->getVectorKind() == VY->getVectorKind());
14176 return Ctx.getVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14177 NumElts: VX->getNumElements(), VecKind: VX->getVectorKind());
14178 }
14179 case Type::ExtVector: {
14180 const auto *VX = cast<ExtVectorType>(Val: X), *VY = cast<ExtVectorType>(Val: Y);
14181 assert(VX->getNumElements() == VY->getNumElements());
14182 return Ctx.getExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14183 NumElts: VX->getNumElements());
14184 }
14185 case Type::DependentSizedExtVector: {
14186 const auto *VX = cast<DependentSizedExtVectorType>(Val: X),
14187 *VY = cast<DependentSizedExtVectorType>(Val: Y);
14188 return Ctx.getDependentSizedExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14189 SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14190 AttrLoc: getCommonAttrLoc(X: VX, Y: VY));
14191 }
14192 case Type::DependentVector: {
14193 const auto *VX = cast<DependentVectorType>(Val: X),
14194 *VY = cast<DependentVectorType>(Val: Y);
14195 assert(VX->getVectorKind() == VY->getVectorKind());
14196 return Ctx.getDependentVectorType(
14197 VecType: getCommonElementType(Ctx, X: VX, Y: VY), SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14198 AttrLoc: getCommonAttrLoc(X: VX, Y: VY), VecKind: VX->getVectorKind());
14199 }
14200 case Type::InjectedClassName: {
14201 const auto *IX = cast<InjectedClassNameType>(Val: X),
14202 *IY = cast<InjectedClassNameType>(Val: Y);
14203 return Ctx.getInjectedClassNameType(
14204 Decl: getCommonDeclChecked(X: IX->getDecl(), Y: IY->getDecl()),
14205 TST: Ctx.getCommonSugaredType(X: IX->getInjectedSpecializationType(),
14206 Y: IY->getInjectedSpecializationType()));
14207 }
14208 case Type::TemplateSpecialization: {
14209 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14210 *TY = cast<TemplateSpecializationType>(Val: Y);
14211 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14212 Ys: TY->template_arguments());
14213 return Ctx.getTemplateSpecializationType(
14214 Template: ::getCommonTemplateNameChecked(Ctx, X: TX->getTemplateName(),
14215 Y: TY->getTemplateName(),
14216 /*IgnoreDeduced=*/true),
14217 SpecifiedArgs: As, /*CanonicalArgs=*/{}, Underlying: X->getCanonicalTypeInternal());
14218 }
14219 case Type::Decltype: {
14220 const auto *DX = cast<DecltypeType>(Val: X);
14221 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Val: Y);
14222 assert(DX->isDependentType());
14223 assert(DY->isDependentType());
14224 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
14225 // As Decltype is not uniqued, building a common type would be wasteful.
14226 return QualType(DX, 0);
14227 }
14228 case Type::PackIndexing: {
14229 const auto *DX = cast<PackIndexingType>(Val: X);
14230 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Val: Y);
14231 assert(DX->isDependentType());
14232 assert(DY->isDependentType());
14233 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
14234 return QualType(DX, 0);
14235 }
14236 case Type::DependentName: {
14237 const auto *NX = cast<DependentNameType>(Val: X),
14238 *NY = cast<DependentNameType>(Val: Y);
14239 assert(NX->getIdentifier() == NY->getIdentifier());
14240 return Ctx.getDependentNameType(
14241 Keyword: getCommonTypeKeyword(X: NX, Y: NY),
14242 NNS: getCommonQualifier(Ctx, X: NX, Y: NY, /*IsSame=*/true), Name: NX->getIdentifier());
14243 }
14244 case Type::DependentTemplateSpecialization: {
14245 const auto *TX = cast<DependentTemplateSpecializationType>(Val: X),
14246 *TY = cast<DependentTemplateSpecializationType>(Val: Y);
14247 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14248 Ys: TY->template_arguments());
14249 const DependentTemplateStorage &SX = TX->getDependentTemplateName(),
14250 &SY = TY->getDependentTemplateName();
14251 assert(SX.getName() == SY.getName());
14252 DependentTemplateStorage Name(
14253 getCommonNNS(Ctx, NNS1: SX.getQualifier(), NNS2: SY.getQualifier(),
14254 /*IsSame=*/true),
14255 SX.getName(), SX.hasTemplateKeyword() || SY.hasTemplateKeyword());
14256 return Ctx.getDependentTemplateSpecializationType(
14257 Keyword: getCommonTypeKeyword(X: TX, Y: TY), Name, Args: As);
14258 }
14259 case Type::UnaryTransform: {
14260 const auto *TX = cast<UnaryTransformType>(Val: X),
14261 *TY = cast<UnaryTransformType>(Val: Y);
14262 assert(TX->getUTTKind() == TY->getUTTKind());
14263 return Ctx.getUnaryTransformType(
14264 BaseType: Ctx.getCommonSugaredType(X: TX->getBaseType(), Y: TY->getBaseType()),
14265 UnderlyingType: Ctx.getCommonSugaredType(X: TX->getUnderlyingType(),
14266 Y: TY->getUnderlyingType()),
14267 Kind: TX->getUTTKind());
14268 }
14269 case Type::PackExpansion: {
14270 const auto *PX = cast<PackExpansionType>(Val: X),
14271 *PY = cast<PackExpansionType>(Val: Y);
14272 assert(PX->getNumExpansions() == PY->getNumExpansions());
14273 return Ctx.getPackExpansionType(
14274 Pattern: Ctx.getCommonSugaredType(X: PX->getPattern(), Y: PY->getPattern()),
14275 NumExpansions: PX->getNumExpansions(), ExpectPackInType: false);
14276 }
14277 case Type::Pipe: {
14278 const auto *PX = cast<PipeType>(Val: X), *PY = cast<PipeType>(Val: Y);
14279 assert(PX->isReadOnly() == PY->isReadOnly());
14280 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
14281 : &ASTContext::getWritePipeType;
14282 return (Ctx.*MP)(getCommonElementType(Ctx, X: PX, Y: PY));
14283 }
14284 case Type::TemplateTypeParm: {
14285 const auto *TX = cast<TemplateTypeParmType>(Val: X),
14286 *TY = cast<TemplateTypeParmType>(Val: Y);
14287 assert(TX->getDepth() == TY->getDepth());
14288 assert(TX->getIndex() == TY->getIndex());
14289 assert(TX->isParameterPack() == TY->isParameterPack());
14290 return Ctx.getTemplateTypeParmType(
14291 Depth: TX->getDepth(), Index: TX->getIndex(), ParameterPack: TX->isParameterPack(),
14292 TTPDecl: getCommonDecl(X: TX->getDecl(), Y: TY->getDecl()));
14293 }
14294 }
14295 llvm_unreachable("Unknown Type Class");
14296}
14297
14298static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
14299 const Type *Y,
14300 SplitQualType Underlying) {
14301 Type::TypeClass TC = X->getTypeClass();
14302 if (TC != Y->getTypeClass())
14303 return QualType();
14304 switch (TC) {
14305#define UNEXPECTED_TYPE(Class, Kind) \
14306 case Type::Class: \
14307 llvm_unreachable("Unexpected " Kind ": " #Class);
14308#define TYPE(Class, Base)
14309#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
14310#include "clang/AST/TypeNodes.inc"
14311
14312#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
14313 CANONICAL_TYPE(Atomic)
14314 CANONICAL_TYPE(BitInt)
14315 CANONICAL_TYPE(BlockPointer)
14316 CANONICAL_TYPE(Builtin)
14317 CANONICAL_TYPE(Complex)
14318 CANONICAL_TYPE(ConstantArray)
14319 CANONICAL_TYPE(ArrayParameter)
14320 CANONICAL_TYPE(ConstantMatrix)
14321 CANONICAL_TYPE(Enum)
14322 CANONICAL_TYPE(ExtVector)
14323 CANONICAL_TYPE(FunctionNoProto)
14324 CANONICAL_TYPE(FunctionProto)
14325 CANONICAL_TYPE(IncompleteArray)
14326 CANONICAL_TYPE(HLSLAttributedResource)
14327 CANONICAL_TYPE(HLSLInlineSpirv)
14328 CANONICAL_TYPE(LValueReference)
14329 CANONICAL_TYPE(ObjCInterface)
14330 CANONICAL_TYPE(ObjCObject)
14331 CANONICAL_TYPE(ObjCObjectPointer)
14332 CANONICAL_TYPE(Pipe)
14333 CANONICAL_TYPE(Pointer)
14334 CANONICAL_TYPE(Record)
14335 CANONICAL_TYPE(RValueReference)
14336 CANONICAL_TYPE(VariableArray)
14337 CANONICAL_TYPE(Vector)
14338#undef CANONICAL_TYPE
14339
14340#undef UNEXPECTED_TYPE
14341
14342 case Type::Adjusted: {
14343 const auto *AX = cast<AdjustedType>(Val: X), *AY = cast<AdjustedType>(Val: Y);
14344 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
14345 if (!Ctx.hasSameType(T1: OX, T2: OY))
14346 return QualType();
14347 // FIXME: It's inefficient to have to unify the original types.
14348 return Ctx.getAdjustedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14349 New: Ctx.getQualifiedType(split: Underlying));
14350 }
14351 case Type::Decayed: {
14352 const auto *DX = cast<DecayedType>(Val: X), *DY = cast<DecayedType>(Val: Y);
14353 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
14354 if (!Ctx.hasSameType(T1: OX, T2: OY))
14355 return QualType();
14356 // FIXME: It's inefficient to have to unify the original types.
14357 return Ctx.getDecayedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14358 Decayed: Ctx.getQualifiedType(split: Underlying));
14359 }
14360 case Type::Attributed: {
14361 const auto *AX = cast<AttributedType>(Val: X), *AY = cast<AttributedType>(Val: Y);
14362 AttributedType::Kind Kind = AX->getAttrKind();
14363 if (Kind != AY->getAttrKind())
14364 return QualType();
14365 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
14366 if (!Ctx.hasSameType(T1: MX, T2: MY))
14367 return QualType();
14368 // FIXME: It's inefficient to have to unify the modified types.
14369 return Ctx.getAttributedType(attrKind: Kind, modifiedType: Ctx.getCommonSugaredType(X: MX, Y: MY),
14370 equivalentType: Ctx.getQualifiedType(split: Underlying),
14371 attr: AX->getAttr());
14372 }
14373 case Type::BTFTagAttributed: {
14374 const auto *BX = cast<BTFTagAttributedType>(Val: X);
14375 const BTFTypeTagAttr *AX = BX->getAttr();
14376 // The attribute is not uniqued, so just compare the tag.
14377 if (AX->getBTFTypeTag() !=
14378 cast<BTFTagAttributedType>(Val: Y)->getAttr()->getBTFTypeTag())
14379 return QualType();
14380 return Ctx.getBTFTagAttributedType(BTFAttr: AX, Wrapped: Ctx.getQualifiedType(split: Underlying));
14381 }
14382 case Type::Auto: {
14383 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14384
14385 AutoTypeKeyword KW = AX->getKeyword();
14386 if (KW != AY->getKeyword())
14387 return QualType();
14388
14389 ConceptDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14390 Y: AY->getTypeConstraintConcept());
14391 SmallVector<TemplateArgument, 8> As;
14392 if (CD &&
14393 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14394 Ys: AY->getTypeConstraintArguments())) {
14395 CD = nullptr; // The arguments differ, so make it unconstrained.
14396 As.clear();
14397 }
14398
14399 // Both auto types can't be dependent, otherwise they wouldn't have been
14400 // sugar. This implies they can't contain unexpanded packs either.
14401 return Ctx.getAutoType(DeducedType: Ctx.getQualifiedType(split: Underlying), Keyword: AX->getKeyword(),
14402 /*IsDependent=*/false, /*IsPack=*/false, TypeConstraintConcept: CD, TypeConstraintArgs: As);
14403 }
14404 case Type::PackIndexing:
14405 case Type::Decltype:
14406 return QualType();
14407 case Type::DeducedTemplateSpecialization:
14408 // FIXME: Try to merge these.
14409 return QualType();
14410
14411 case Type::Elaborated: {
14412 const auto *EX = cast<ElaboratedType>(Val: X), *EY = cast<ElaboratedType>(Val: Y);
14413 return Ctx.getElaboratedType(
14414 Keyword: ::getCommonTypeKeyword(X: EX, Y: EY),
14415 NNS: ::getCommonQualifier(Ctx, X: EX, Y: EY, /*IsSame=*/false),
14416 NamedType: Ctx.getQualifiedType(split: Underlying),
14417 OwnedTagDecl: ::getCommonDecl(X: EX->getOwnedTagDecl(), Y: EY->getOwnedTagDecl()));
14418 }
14419 case Type::MacroQualified: {
14420 const auto *MX = cast<MacroQualifiedType>(Val: X),
14421 *MY = cast<MacroQualifiedType>(Val: Y);
14422 const IdentifierInfo *IX = MX->getMacroIdentifier();
14423 if (IX != MY->getMacroIdentifier())
14424 return QualType();
14425 return Ctx.getMacroQualifiedType(UnderlyingTy: Ctx.getQualifiedType(split: Underlying), MacroII: IX);
14426 }
14427 case Type::SubstTemplateTypeParm: {
14428 const auto *SX = cast<SubstTemplateTypeParmType>(Val: X),
14429 *SY = cast<SubstTemplateTypeParmType>(Val: Y);
14430 Decl *CD =
14431 ::getCommonDecl(X: SX->getAssociatedDecl(), Y: SY->getAssociatedDecl());
14432 if (!CD)
14433 return QualType();
14434 unsigned Index = SX->getIndex();
14435 if (Index != SY->getIndex())
14436 return QualType();
14437 auto PackIndex = SX->getPackIndex();
14438 if (PackIndex != SY->getPackIndex())
14439 return QualType();
14440 return Ctx.getSubstTemplateTypeParmType(Replacement: Ctx.getQualifiedType(split: Underlying),
14441 AssociatedDecl: CD, Index, PackIndex,
14442 Final: SX->getFinal() && SY->getFinal());
14443 }
14444 case Type::ObjCTypeParam:
14445 // FIXME: Try to merge these.
14446 return QualType();
14447 case Type::Paren:
14448 return Ctx.getParenType(InnerType: Ctx.getQualifiedType(split: Underlying));
14449
14450 case Type::TemplateSpecialization: {
14451 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14452 *TY = cast<TemplateSpecializationType>(Val: Y);
14453 TemplateName CTN =
14454 ::getCommonTemplateName(Ctx, X: TX->getTemplateName(),
14455 Y: TY->getTemplateName(), /*IgnoreDeduced=*/true);
14456 if (!CTN.getAsVoidPointer())
14457 return QualType();
14458 SmallVector<TemplateArgument, 8> As;
14459 if (getCommonTemplateArguments(Ctx, R&: As, Xs: TX->template_arguments(),
14460 Ys: TY->template_arguments()))
14461 return QualType();
14462 return Ctx.getTemplateSpecializationType(Template: CTN, SpecifiedArgs: As,
14463 /*CanonicalArgs=*/{},
14464 Underlying: Ctx.getQualifiedType(split: Underlying));
14465 }
14466 case Type::Typedef: {
14467 const auto *TX = cast<TypedefType>(Val: X), *TY = cast<TypedefType>(Val: Y);
14468 const TypedefNameDecl *CD = ::getCommonDecl(X: TX->getDecl(), Y: TY->getDecl());
14469 if (!CD)
14470 return QualType();
14471 return Ctx.getTypedefType(Decl: CD, Underlying: Ctx.getQualifiedType(split: Underlying));
14472 }
14473 case Type::TypeOf: {
14474 // The common sugar between two typeof expressions, where one is
14475 // potentially a typeof_unqual and the other is not, we unify to the
14476 // qualified type as that retains the most information along with the type.
14477 // We only return a typeof_unqual type when both types are unqual types.
14478 TypeOfKind Kind = TypeOfKind::Qualified;
14479 if (cast<TypeOfType>(Val: X)->getKind() == cast<TypeOfType>(Val: Y)->getKind() &&
14480 cast<TypeOfType>(Val: X)->getKind() == TypeOfKind::Unqualified)
14481 Kind = TypeOfKind::Unqualified;
14482 return Ctx.getTypeOfType(tofType: Ctx.getQualifiedType(split: Underlying), Kind);
14483 }
14484 case Type::TypeOfExpr:
14485 return QualType();
14486
14487 case Type::UnaryTransform: {
14488 const auto *UX = cast<UnaryTransformType>(Val: X),
14489 *UY = cast<UnaryTransformType>(Val: Y);
14490 UnaryTransformType::UTTKind KX = UX->getUTTKind();
14491 if (KX != UY->getUTTKind())
14492 return QualType();
14493 QualType BX = UX->getBaseType(), BY = UY->getBaseType();
14494 if (!Ctx.hasSameType(T1: BX, T2: BY))
14495 return QualType();
14496 // FIXME: It's inefficient to have to unify the base types.
14497 return Ctx.getUnaryTransformType(BaseType: Ctx.getCommonSugaredType(X: BX, Y: BY),
14498 UnderlyingType: Ctx.getQualifiedType(split: Underlying), Kind: KX);
14499 }
14500 case Type::Using: {
14501 const auto *UX = cast<UsingType>(Val: X), *UY = cast<UsingType>(Val: Y);
14502 const UsingShadowDecl *CD =
14503 ::getCommonDecl(X: UX->getFoundDecl(), Y: UY->getFoundDecl());
14504 if (!CD)
14505 return QualType();
14506 return Ctx.getUsingType(Found: CD, Underlying: Ctx.getQualifiedType(split: Underlying));
14507 }
14508 case Type::MemberPointer: {
14509 const auto *PX = cast<MemberPointerType>(Val: X),
14510 *PY = cast<MemberPointerType>(Val: Y);
14511 CXXRecordDecl *Cls = PX->getMostRecentCXXRecordDecl();
14512 assert(Cls == PY->getMostRecentCXXRecordDecl());
14513 return Ctx.getMemberPointerType(
14514 T: ::getCommonPointeeType(Ctx, X: PX, Y: PY),
14515 Qualifier: ::getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/false), Cls);
14516 }
14517 case Type::CountAttributed: {
14518 const auto *DX = cast<CountAttributedType>(Val: X),
14519 *DY = cast<CountAttributedType>(Val: Y);
14520 if (DX->isCountInBytes() != DY->isCountInBytes())
14521 return QualType();
14522 if (DX->isOrNull() != DY->isOrNull())
14523 return QualType();
14524 Expr *CEX = DX->getCountExpr();
14525 Expr *CEY = DY->getCountExpr();
14526 ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls();
14527 if (Ctx.hasSameExpr(X: CEX, Y: CEY))
14528 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14529 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14530 DependentDecls: CDX);
14531 if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx))
14532 return QualType();
14533 // Two declarations with the same integer constant may still differ in their
14534 // expression pointers, so we need to evaluate them.
14535 llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx);
14536 llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx);
14537 if (VX != VY)
14538 return QualType();
14539 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14540 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14541 DependentDecls: CDX);
14542 }
14543 }
14544 llvm_unreachable("Unhandled Type Class");
14545}
14546
14547static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
14548 SmallVector<SplitQualType, 8> R;
14549 while (true) {
14550 QTotal.addConsistentQualifiers(qs: T.Quals);
14551 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
14552 if (NT == QualType(T.Ty, 0))
14553 break;
14554 R.push_back(Elt: T);
14555 T = NT.split();
14556 }
14557 return R;
14558}
14559
14560QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
14561 bool Unqualified) {
14562 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
14563 if (X == Y)
14564 return X;
14565 if (!Unqualified) {
14566 if (X.isCanonical())
14567 return X;
14568 if (Y.isCanonical())
14569 return Y;
14570 }
14571
14572 SplitQualType SX = X.split(), SY = Y.split();
14573 Qualifiers QX, QY;
14574 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
14575 // until we reach their underlying "canonical nodes". Note these are not
14576 // necessarily canonical types, as they may still have sugared properties.
14577 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
14578 auto Xs = ::unwrapSugar(T&: SX, QTotal&: QX), Ys = ::unwrapSugar(T&: SY, QTotal&: QY);
14579
14580 // If this is an ArrayType, the element qualifiers are interchangeable with
14581 // the top level qualifiers.
14582 // * In case the canonical nodes are the same, the elements types are already
14583 // the same.
14584 // * Otherwise, the element types will be made the same, and any different
14585 // element qualifiers will be moved up to the top level qualifiers, per
14586 // 'getCommonArrayElementType'.
14587 // In both cases, this means there may be top level qualifiers which differ
14588 // between X and Y. If so, these differing qualifiers are redundant with the
14589 // element qualifiers, and can be removed without changing the canonical type.
14590 // The desired behaviour is the same as for the 'Unqualified' case here:
14591 // treat the redundant qualifiers as sugar, remove the ones which are not
14592 // common to both sides.
14593 bool KeepCommonQualifiers = Unqualified || isa<ArrayType>(Val: SX.Ty);
14594
14595 if (SX.Ty != SY.Ty) {
14596 // The canonical nodes differ. Build a common canonical node out of the two,
14597 // unifying their sugar. This may recurse back here.
14598 SX.Ty =
14599 ::getCommonNonSugarTypeNode(Ctx&: *this, X: SX.Ty, QX, Y: SY.Ty, QY).getTypePtr();
14600 } else {
14601 // The canonical nodes were identical: We may have desugared too much.
14602 // Add any common sugar back in.
14603 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
14604 QX -= SX.Quals;
14605 QY -= SY.Quals;
14606 SX = Xs.pop_back_val();
14607 SY = Ys.pop_back_val();
14608 }
14609 }
14610 if (KeepCommonQualifiers)
14611 QX = Qualifiers::removeCommonQualifiers(L&: QX, R&: QY);
14612 else
14613 assert(QX == QY);
14614
14615 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
14616 // related. Walk up these nodes, unifying them and adding the result.
14617 while (!Xs.empty() && !Ys.empty()) {
14618 auto Underlying = SplitQualType(
14619 SX.Ty, Qualifiers::removeCommonQualifiers(L&: SX.Quals, R&: SY.Quals));
14620 SX = Xs.pop_back_val();
14621 SY = Ys.pop_back_val();
14622 SX.Ty = ::getCommonSugarTypeNode(Ctx&: *this, X: SX.Ty, Y: SY.Ty, Underlying)
14623 .getTypePtrOrNull();
14624 // Stop at the first pair which is unrelated.
14625 if (!SX.Ty) {
14626 SX.Ty = Underlying.Ty;
14627 break;
14628 }
14629 QX -= Underlying.Quals;
14630 };
14631
14632 // Add back the missing accumulated qualifiers, which were stripped off
14633 // with the sugar nodes we could not unify.
14634 QualType R = getQualifiedType(T: SX.Ty, Qs: QX);
14635 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
14636 return R;
14637}
14638
14639QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const {
14640 assert(Ty->isFixedPointType());
14641
14642 if (Ty->isUnsaturatedFixedPointType())
14643 return Ty;
14644
14645 switch (Ty->castAs<BuiltinType>()->getKind()) {
14646 default:
14647 llvm_unreachable("Not a saturated fixed point type!");
14648 case BuiltinType::SatShortAccum:
14649 return ShortAccumTy;
14650 case BuiltinType::SatAccum:
14651 return AccumTy;
14652 case BuiltinType::SatLongAccum:
14653 return LongAccumTy;
14654 case BuiltinType::SatUShortAccum:
14655 return UnsignedShortAccumTy;
14656 case BuiltinType::SatUAccum:
14657 return UnsignedAccumTy;
14658 case BuiltinType::SatULongAccum:
14659 return UnsignedLongAccumTy;
14660 case BuiltinType::SatShortFract:
14661 return ShortFractTy;
14662 case BuiltinType::SatFract:
14663 return FractTy;
14664 case BuiltinType::SatLongFract:
14665 return LongFractTy;
14666 case BuiltinType::SatUShortFract:
14667 return UnsignedShortFractTy;
14668 case BuiltinType::SatUFract:
14669 return UnsignedFractTy;
14670 case BuiltinType::SatULongFract:
14671 return UnsignedLongFractTy;
14672 }
14673}
14674
14675QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
14676 assert(Ty->isFixedPointType());
14677
14678 if (Ty->isSaturatedFixedPointType()) return Ty;
14679
14680 switch (Ty->castAs<BuiltinType>()->getKind()) {
14681 default:
14682 llvm_unreachable("Not a fixed point type!");
14683 case BuiltinType::ShortAccum:
14684 return SatShortAccumTy;
14685 case BuiltinType::Accum:
14686 return SatAccumTy;
14687 case BuiltinType::LongAccum:
14688 return SatLongAccumTy;
14689 case BuiltinType::UShortAccum:
14690 return SatUnsignedShortAccumTy;
14691 case BuiltinType::UAccum:
14692 return SatUnsignedAccumTy;
14693 case BuiltinType::ULongAccum:
14694 return SatUnsignedLongAccumTy;
14695 case BuiltinType::ShortFract:
14696 return SatShortFractTy;
14697 case BuiltinType::Fract:
14698 return SatFractTy;
14699 case BuiltinType::LongFract:
14700 return SatLongFractTy;
14701 case BuiltinType::UShortFract:
14702 return SatUnsignedShortFractTy;
14703 case BuiltinType::UFract:
14704 return SatUnsignedFractTy;
14705 case BuiltinType::ULongFract:
14706 return SatUnsignedLongFractTy;
14707 }
14708}
14709
14710LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
14711 if (LangOpts.OpenCL)
14712 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
14713
14714 if (LangOpts.CUDA)
14715 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
14716
14717 return getLangASFromTargetAS(TargetAS: AS);
14718}
14719
14720// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
14721// doesn't include ASTContext.h
14722template
14723clang::LazyGenerationalUpdatePtr<
14724 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
14725clang::LazyGenerationalUpdatePtr<
14726 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
14727 const clang::ASTContext &Ctx, Decl *Value);
14728
14729unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
14730 assert(Ty->isFixedPointType());
14731
14732 const TargetInfo &Target = getTargetInfo();
14733 switch (Ty->castAs<BuiltinType>()->getKind()) {
14734 default:
14735 llvm_unreachable("Not a fixed point type!");
14736 case BuiltinType::ShortAccum:
14737 case BuiltinType::SatShortAccum:
14738 return Target.getShortAccumScale();
14739 case BuiltinType::Accum:
14740 case BuiltinType::SatAccum:
14741 return Target.getAccumScale();
14742 case BuiltinType::LongAccum:
14743 case BuiltinType::SatLongAccum:
14744 return Target.getLongAccumScale();
14745 case BuiltinType::UShortAccum:
14746 case BuiltinType::SatUShortAccum:
14747 return Target.getUnsignedShortAccumScale();
14748 case BuiltinType::UAccum:
14749 case BuiltinType::SatUAccum:
14750 return Target.getUnsignedAccumScale();
14751 case BuiltinType::ULongAccum:
14752 case BuiltinType::SatULongAccum:
14753 return Target.getUnsignedLongAccumScale();
14754 case BuiltinType::ShortFract:
14755 case BuiltinType::SatShortFract:
14756 return Target.getShortFractScale();
14757 case BuiltinType::Fract:
14758 case BuiltinType::SatFract:
14759 return Target.getFractScale();
14760 case BuiltinType::LongFract:
14761 case BuiltinType::SatLongFract:
14762 return Target.getLongFractScale();
14763 case BuiltinType::UShortFract:
14764 case BuiltinType::SatUShortFract:
14765 return Target.getUnsignedShortFractScale();
14766 case BuiltinType::UFract:
14767 case BuiltinType::SatUFract:
14768 return Target.getUnsignedFractScale();
14769 case BuiltinType::ULongFract:
14770 case BuiltinType::SatULongFract:
14771 return Target.getUnsignedLongFractScale();
14772 }
14773}
14774
14775unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
14776 assert(Ty->isFixedPointType());
14777
14778 const TargetInfo &Target = getTargetInfo();
14779 switch (Ty->castAs<BuiltinType>()->getKind()) {
14780 default:
14781 llvm_unreachable("Not a fixed point type!");
14782 case BuiltinType::ShortAccum:
14783 case BuiltinType::SatShortAccum:
14784 return Target.getShortAccumIBits();
14785 case BuiltinType::Accum:
14786 case BuiltinType::SatAccum:
14787 return Target.getAccumIBits();
14788 case BuiltinType::LongAccum:
14789 case BuiltinType::SatLongAccum:
14790 return Target.getLongAccumIBits();
14791 case BuiltinType::UShortAccum:
14792 case BuiltinType::SatUShortAccum:
14793 return Target.getUnsignedShortAccumIBits();
14794 case BuiltinType::UAccum:
14795 case BuiltinType::SatUAccum:
14796 return Target.getUnsignedAccumIBits();
14797 case BuiltinType::ULongAccum:
14798 case BuiltinType::SatULongAccum:
14799 return Target.getUnsignedLongAccumIBits();
14800 case BuiltinType::ShortFract:
14801 case BuiltinType::SatShortFract:
14802 case BuiltinType::Fract:
14803 case BuiltinType::SatFract:
14804 case BuiltinType::LongFract:
14805 case BuiltinType::SatLongFract:
14806 case BuiltinType::UShortFract:
14807 case BuiltinType::SatUShortFract:
14808 case BuiltinType::UFract:
14809 case BuiltinType::SatUFract:
14810 case BuiltinType::ULongFract:
14811 case BuiltinType::SatULongFract:
14812 return 0;
14813 }
14814}
14815
14816llvm::FixedPointSemantics
14817ASTContext::getFixedPointSemantics(QualType Ty) const {
14818 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
14819 "Can only get the fixed point semantics for a "
14820 "fixed point or integer type.");
14821 if (Ty->isIntegerType())
14822 return llvm::FixedPointSemantics::GetIntegerSemantics(
14823 Width: getIntWidth(T: Ty), IsSigned: Ty->isSignedIntegerType());
14824
14825 bool isSigned = Ty->isSignedFixedPointType();
14826 return llvm::FixedPointSemantics(
14827 static_cast<unsigned>(getTypeSize(T: Ty)), getFixedPointScale(Ty), isSigned,
14828 Ty->isSaturatedFixedPointType(),
14829 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
14830}
14831
14832llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
14833 assert(Ty->isFixedPointType());
14834 return llvm::APFixedPoint::getMax(Sema: getFixedPointSemantics(Ty));
14835}
14836
14837llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
14838 assert(Ty->isFixedPointType());
14839 return llvm::APFixedPoint::getMin(Sema: getFixedPointSemantics(Ty));
14840}
14841
14842QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
14843 assert(Ty->isUnsignedFixedPointType() &&
14844 "Expected unsigned fixed point type");
14845
14846 switch (Ty->castAs<BuiltinType>()->getKind()) {
14847 case BuiltinType::UShortAccum:
14848 return ShortAccumTy;
14849 case BuiltinType::UAccum:
14850 return AccumTy;
14851 case BuiltinType::ULongAccum:
14852 return LongAccumTy;
14853 case BuiltinType::SatUShortAccum:
14854 return SatShortAccumTy;
14855 case BuiltinType::SatUAccum:
14856 return SatAccumTy;
14857 case BuiltinType::SatULongAccum:
14858 return SatLongAccumTy;
14859 case BuiltinType::UShortFract:
14860 return ShortFractTy;
14861 case BuiltinType::UFract:
14862 return FractTy;
14863 case BuiltinType::ULongFract:
14864 return LongFractTy;
14865 case BuiltinType::SatUShortFract:
14866 return SatShortFractTy;
14867 case BuiltinType::SatUFract:
14868 return SatFractTy;
14869 case BuiltinType::SatULongFract:
14870 return SatLongFractTy;
14871 default:
14872 llvm_unreachable("Unexpected unsigned fixed point type");
14873 }
14874}
14875
14876// Given a list of FMV features, return a concatenated list of the
14877// corresponding backend features (which may contain duplicates).
14878static std::vector<std::string> getFMVBackendFeaturesFor(
14879 const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) {
14880 std::vector<std::string> BackendFeats;
14881 llvm::AArch64::ExtensionSet FeatureBits;
14882 for (StringRef F : FMVFeatStrings)
14883 if (auto FMVExt = llvm::AArch64::parseFMVExtension(Extension: F))
14884 if (FMVExt->ID)
14885 FeatureBits.enable(E: *FMVExt->ID);
14886 FeatureBits.toLLVMFeatureList(Features&: BackendFeats);
14887 return BackendFeats;
14888}
14889
14890ParsedTargetAttr
14891ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
14892 assert(TD != nullptr);
14893 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TD->getFeaturesStr());
14894
14895 llvm::erase_if(C&: ParsedAttr.Features, P: [&](const std::string &Feat) {
14896 return !Target->isValidFeatureName(Feature: StringRef{Feat}.substr(Start: 1));
14897 });
14898 return ParsedAttr;
14899}
14900
14901void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
14902 const FunctionDecl *FD) const {
14903 if (FD)
14904 getFunctionFeatureMap(FeatureMap, GD: GlobalDecl().getWithDecl(D: FD));
14905 else
14906 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(),
14907 CPU: Target->getTargetOpts().CPU,
14908 FeatureVec: Target->getTargetOpts().Features);
14909}
14910
14911// Fills in the supplied string map with the set of target features for the
14912// passed in function.
14913void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
14914 GlobalDecl GD) const {
14915 StringRef TargetCPU = Target->getTargetOpts().CPU;
14916 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
14917 if (const auto *TD = FD->getAttr<TargetAttr>()) {
14918 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
14919
14920 // Make a copy of the features as passed on the command line into the
14921 // beginning of the additional features from the function to override.
14922 // AArch64 handles command line option features in parseTargetAttr().
14923 if (!Target->getTriple().isAArch64())
14924 ParsedAttr.Features.insert(
14925 position: ParsedAttr.Features.begin(),
14926 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14927 last: Target->getTargetOpts().FeaturesAsWritten.end());
14928
14929 if (ParsedAttr.CPU != "" && Target->isValidCPUName(Name: ParsedAttr.CPU))
14930 TargetCPU = ParsedAttr.CPU;
14931
14932 // Now populate the feature map, first with the TargetCPU which is either
14933 // the default or a new one from the target attribute string. Then we'll use
14934 // the passed in features (FeaturesAsWritten) along with the new ones from
14935 // the attribute.
14936 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU,
14937 FeatureVec: ParsedAttr.Features);
14938 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
14939 llvm::SmallVector<StringRef, 32> FeaturesTmp;
14940 Target->getCPUSpecificCPUDispatchFeatures(
14941 Name: SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(), Features&: FeaturesTmp);
14942 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
14943 Features.insert(position: Features.begin(),
14944 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14945 last: Target->getTargetOpts().FeaturesAsWritten.end());
14946 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14947 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
14948 if (Target->getTriple().isAArch64()) {
14949 llvm::SmallVector<StringRef, 8> Feats;
14950 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
14951 std::vector<std::string> Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
14952 Features.insert(position: Features.begin(),
14953 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14954 last: Target->getTargetOpts().FeaturesAsWritten.end());
14955 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14956 } else if (Target->getTriple().isRISCV()) {
14957 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
14958 std::vector<std::string> Features;
14959 if (VersionStr != "default") {
14960 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: VersionStr);
14961 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
14962 last: ParsedAttr.Features.end());
14963 }
14964 Features.insert(position: Features.begin(),
14965 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14966 last: Target->getTargetOpts().FeaturesAsWritten.end());
14967 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14968 } else {
14969 std::vector<std::string> Features;
14970 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
14971 if (VersionStr.starts_with(Prefix: "arch="))
14972 TargetCPU = VersionStr.drop_front(N: sizeof("arch=") - 1);
14973 else if (VersionStr != "default")
14974 Features.push_back(x: (StringRef{"+"} + VersionStr).str());
14975 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14976 }
14977 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
14978 std::vector<std::string> Features;
14979 if (Target->getTriple().isRISCV()) {
14980 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TV->getName());
14981 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
14982 last: ParsedAttr.Features.end());
14983 } else {
14984 assert(Target->getTriple().isAArch64());
14985 llvm::SmallVector<StringRef, 8> Feats;
14986 TV->getFeatures(Out&: Feats);
14987 Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
14988 }
14989 Features.insert(position: Features.begin(),
14990 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14991 last: Target->getTargetOpts().FeaturesAsWritten.end());
14992 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14993 } else {
14994 FeatureMap = Target->getTargetOpts().FeatureMap;
14995 }
14996}
14997
14998static SYCLKernelInfo BuildSYCLKernelInfo(ASTContext &Context,
14999 CanQualType KernelNameType,
15000 const FunctionDecl *FD) {
15001 // Host and device compilation may use different ABIs and different ABIs
15002 // may allocate name mangling discriminators differently. A discriminator
15003 // override is used to ensure consistent discriminator allocation across
15004 // host and device compilation.
15005 auto DeviceDiscriminatorOverrider =
15006 [](ASTContext &Ctx, const NamedDecl *ND) -> UnsignedOrNone {
15007 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
15008 if (RD->isLambda())
15009 return RD->getDeviceLambdaManglingNumber();
15010 return std::nullopt;
15011 };
15012 std::unique_ptr<MangleContext> MC{ItaniumMangleContext::create(
15013 Context, Diags&: Context.getDiagnostics(), Discriminator: DeviceDiscriminatorOverrider)};
15014
15015 // Construct a mangled name for the SYCL kernel caller offload entry point.
15016 // FIXME: The Itanium typeinfo mangling (_ZTS<type>) is currently used to
15017 // name the SYCL kernel caller offload entry point function. This mangling
15018 // does not suffice to clearly identify symbols that correspond to SYCL
15019 // kernel caller functions, nor is this mangling natural for targets that
15020 // use a non-Itanium ABI.
15021 std::string Buffer;
15022 Buffer.reserve(res_arg: 128);
15023 llvm::raw_string_ostream Out(Buffer);
15024 MC->mangleCanonicalTypeName(T: KernelNameType, Out);
15025 std::string KernelName = Out.str();
15026
15027 return {KernelNameType, FD, KernelName};
15028}
15029
15030void ASTContext::registerSYCLEntryPointFunction(FunctionDecl *FD) {
15031 // If the function declaration to register is invalid or dependent, the
15032 // registration attempt is ignored.
15033 if (FD->isInvalidDecl() || FD->isTemplated())
15034 return;
15035
15036 const auto *SKEPAttr = FD->getAttr<SYCLKernelEntryPointAttr>();
15037 assert(SKEPAttr && "Missing sycl_kernel_entry_point attribute");
15038
15039 // Be tolerant of multiple registration attempts so long as each attempt
15040 // is for the same entity. Callers are obligated to detect and diagnose
15041 // conflicting kernel names prior to calling this function.
15042 CanQualType KernelNameType = getCanonicalType(T: SKEPAttr->getKernelName());
15043 auto IT = SYCLKernels.find(Val: KernelNameType);
15044 assert((IT == SYCLKernels.end() ||
15045 declaresSameEntity(FD, IT->second.getKernelEntryPointDecl())) &&
15046 "SYCL kernel name conflict");
15047 (void)IT;
15048 SYCLKernels.insert(KV: std::make_pair(
15049 x&: KernelNameType, y: BuildSYCLKernelInfo(Context&: *this, KernelNameType, FD)));
15050}
15051
15052const SYCLKernelInfo &ASTContext::getSYCLKernelInfo(QualType T) const {
15053 CanQualType KernelNameType = getCanonicalType(T);
15054 return SYCLKernels.at(Val: KernelNameType);
15055}
15056
15057const SYCLKernelInfo *ASTContext::findSYCLKernelInfo(QualType T) const {
15058 CanQualType KernelNameType = getCanonicalType(T);
15059 auto IT = SYCLKernels.find(Val: KernelNameType);
15060 if (IT != SYCLKernels.end())
15061 return &IT->second;
15062 return nullptr;
15063}
15064
15065OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
15066 OMPTraitInfoVector.emplace_back(Args: new OMPTraitInfo());
15067 return *OMPTraitInfoVector.back();
15068}
15069
15070const StreamingDiagnostic &clang::
15071operator<<(const StreamingDiagnostic &DB,
15072 const ASTContext::SectionInfo &Section) {
15073 if (Section.Decl)
15074 return DB << Section.Decl;
15075 return DB << "a prior #pragma section";
15076}
15077
15078bool ASTContext::mayExternalize(const Decl *D) const {
15079 bool IsInternalVar =
15080 isa<VarDecl>(Val: D) &&
15081 basicGVALinkageForVariable(Context: *this, VD: cast<VarDecl>(Val: D)) == GVA_Internal;
15082 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
15083 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
15084 (D->hasAttr<CUDAConstantAttr>() &&
15085 !D->getAttr<CUDAConstantAttr>()->isImplicit());
15086 // CUDA/HIP: managed variables need to be externalized since it is
15087 // a declaration in IR, therefore cannot have internal linkage. Kernels in
15088 // anonymous name space needs to be externalized to avoid duplicate symbols.
15089 return (IsInternalVar &&
15090 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
15091 (D->hasAttr<CUDAGlobalAttr>() &&
15092 basicGVALinkageForFunction(Context: *this, FD: cast<FunctionDecl>(Val: D)) ==
15093 GVA_Internal);
15094}
15095
15096bool ASTContext::shouldExternalize(const Decl *D) const {
15097 return mayExternalize(D) &&
15098 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
15099 CUDADeviceVarODRUsedByHost.count(V: cast<VarDecl>(Val: D)));
15100}
15101
15102StringRef ASTContext::getCUIDHash() const {
15103 if (!CUIDHash.empty())
15104 return CUIDHash;
15105 if (LangOpts.CUID.empty())
15106 return StringRef();
15107 CUIDHash = llvm::utohexstr(X: llvm::MD5Hash(Str: LangOpts.CUID), /*LowerCase=*/true);
15108 return CUIDHash;
15109}
15110
15111const CXXRecordDecl *
15112ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) {
15113 assert(ThisClass);
15114 assert(ThisClass->isPolymorphic());
15115 const CXXRecordDecl *PrimaryBase = ThisClass;
15116 while (1) {
15117 assert(PrimaryBase);
15118 assert(PrimaryBase->isPolymorphic());
15119 auto &Layout = getASTRecordLayout(D: PrimaryBase);
15120 auto Base = Layout.getPrimaryBase();
15121 if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
15122 break;
15123 PrimaryBase = Base;
15124 }
15125 return PrimaryBase;
15126}
15127
15128bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
15129 StringRef MangledName) {
15130 auto *Method = cast<CXXMethodDecl>(Val: VirtualMethodDecl.getDecl());
15131 assert(Method->isVirtual());
15132 bool DefaultIncludesPointerAuth =
15133 LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
15134
15135 if (!DefaultIncludesPointerAuth)
15136 return true;
15137
15138 auto Existing = ThunksToBeAbbreviated.find(Val: VirtualMethodDecl);
15139 if (Existing != ThunksToBeAbbreviated.end())
15140 return Existing->second.contains(key: MangledName.str());
15141
15142 std::unique_ptr<MangleContext> Mangler(createMangleContext());
15143 llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks;
15144 auto VtableContext = getVTableContext();
15145 if (const auto *ThunkInfos = VtableContext->getThunkInfo(GD: VirtualMethodDecl)) {
15146 auto *Destructor = dyn_cast<CXXDestructorDecl>(Val: Method);
15147 for (const auto &Thunk : *ThunkInfos) {
15148 SmallString<256> ElidedName;
15149 llvm::raw_svector_ostream ElidedNameStream(ElidedName);
15150 if (Destructor)
15151 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15152 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15153 ElidedNameStream);
15154 else
15155 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15156 ElidedNameStream);
15157 SmallString<256> MangledName;
15158 llvm::raw_svector_ostream mangledNameStream(MangledName);
15159 if (Destructor)
15160 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15161 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15162 mangledNameStream);
15163 else
15164 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15165 mangledNameStream);
15166
15167 Thunks[ElidedName].push_back(Elt: std::string(MangledName));
15168 }
15169 }
15170 llvm::StringSet<> SimplifiedThunkNames;
15171 for (auto &ThunkList : Thunks) {
15172 llvm::sort(C&: ThunkList.second);
15173 SimplifiedThunkNames.insert(key: ThunkList.second[0]);
15174 }
15175 bool Result = SimplifiedThunkNames.contains(key: MangledName);
15176 ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames);
15177 return Result;
15178}
15179

source code of clang/lib/AST/ASTContext.cpp