1//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Implements semantic analysis for C++ expressions.
11///
12//===----------------------------------------------------------------------===//
13
14#include "TreeTransform.h"
15#include "TypeLocBuilder.h"
16#include "clang/AST/ASTContext.h"
17#include "clang/AST/ASTLambda.h"
18#include "clang/AST/CXXInheritance.h"
19#include "clang/AST/CharUnits.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/DynamicRecursiveASTVisitor.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/ExprConcepts.h"
25#include "clang/AST/ExprObjC.h"
26#include "clang/AST/Type.h"
27#include "clang/AST/TypeLoc.h"
28#include "clang/Basic/AlignedAllocation.h"
29#include "clang/Basic/DiagnosticSema.h"
30#include "clang/Basic/PartialDiagnostic.h"
31#include "clang/Basic/TargetInfo.h"
32#include "clang/Basic/TokenKinds.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/DeclSpec.h"
35#include "clang/Sema/EnterExpressionEvaluationContext.h"
36#include "clang/Sema/Initialization.h"
37#include "clang/Sema/Lookup.h"
38#include "clang/Sema/ParsedTemplate.h"
39#include "clang/Sema/Scope.h"
40#include "clang/Sema/ScopeInfo.h"
41#include "clang/Sema/SemaCUDA.h"
42#include "clang/Sema/SemaHLSL.h"
43#include "clang/Sema/SemaLambda.h"
44#include "clang/Sema/SemaObjC.h"
45#include "clang/Sema/SemaPPC.h"
46#include "clang/Sema/Template.h"
47#include "clang/Sema/TemplateDeduction.h"
48#include "llvm/ADT/APInt.h"
49#include "llvm/ADT/STLExtras.h"
50#include "llvm/ADT/StringExtras.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/TypeSize.h"
53#include <optional>
54using namespace clang;
55using namespace sema;
56
57ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
58 SourceLocation NameLoc,
59 const IdentifierInfo &Name) {
60 NestedNameSpecifier *NNS = SS.getScopeRep();
61 if ([[maybe_unused]] const IdentifierInfo *II = NNS->getAsIdentifier())
62 assert(II == &Name && "not a constructor name");
63
64 QualType Type(NNS->translateToType(Context), 0);
65 // This reference to the type is located entirely at the location of the
66 // final identifier in the qualified-id.
67 return CreateParsedType(T: Type,
68 TInfo: Context.getTrivialTypeSourceInfo(T: Type, Loc: NameLoc));
69}
70
71ParsedType Sema::getConstructorName(const IdentifierInfo &II,
72 SourceLocation NameLoc, Scope *S,
73 CXXScopeSpec &SS, bool EnteringContext) {
74 CXXRecordDecl *CurClass = getCurrentClass(S, SS: &SS);
75 assert(CurClass && &II == CurClass->getIdentifier() &&
76 "not a constructor name");
77
78 // When naming a constructor as a member of a dependent context (eg, in a
79 // friend declaration or an inherited constructor declaration), form an
80 // unresolved "typename" type.
81 if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
82 QualType T = Context.getDependentNameType(Keyword: ElaboratedTypeKeyword::None,
83 NNS: SS.getScopeRep(), Name: &II);
84 return ParsedType::make(P: T);
85 }
86
87 if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, DC: CurClass))
88 return ParsedType();
89
90 // Find the injected-class-name declaration. Note that we make no attempt to
91 // diagnose cases where the injected-class-name is shadowed: the only
92 // declaration that can validly shadow the injected-class-name is a
93 // non-static data member, and if the class contains both a non-static data
94 // member and a constructor then it is ill-formed (we check that in
95 // CheckCompletedCXXClass).
96 CXXRecordDecl *InjectedClassName = nullptr;
97 for (NamedDecl *ND : CurClass->lookup(Name: &II)) {
98 auto *RD = dyn_cast<CXXRecordDecl>(Val: ND);
99 if (RD && RD->isInjectedClassName()) {
100 InjectedClassName = RD;
101 break;
102 }
103 }
104 if (!InjectedClassName) {
105 if (!CurClass->isInvalidDecl()) {
106 // FIXME: RequireCompleteDeclContext doesn't check dependent contexts
107 // properly. Work around it here for now.
108 Diag(Loc: SS.getLastQualifierNameLoc(),
109 DiagID: diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
110 }
111 return ParsedType();
112 }
113
114 QualType T = Context.getTypeDeclType(Decl: InjectedClassName);
115 DiagnoseUseOfDecl(D: InjectedClassName, Locs: NameLoc);
116 MarkAnyDeclReferenced(Loc: NameLoc, D: InjectedClassName, /*OdrUse=*/MightBeOdrUse: false);
117
118 return ParsedType::make(P: T);
119}
120
121ParsedType Sema::getDestructorName(const IdentifierInfo &II,
122 SourceLocation NameLoc, Scope *S,
123 CXXScopeSpec &SS, ParsedType ObjectTypePtr,
124 bool EnteringContext) {
125 // Determine where to perform name lookup.
126
127 // FIXME: This area of the standard is very messy, and the current
128 // wording is rather unclear about which scopes we search for the
129 // destructor name; see core issues 399 and 555. Issue 399 in
130 // particular shows where the current description of destructor name
131 // lookup is completely out of line with existing practice, e.g.,
132 // this appears to be ill-formed:
133 //
134 // namespace N {
135 // template <typename T> struct S {
136 // ~S();
137 // };
138 // }
139 //
140 // void f(N::S<int>* s) {
141 // s->N::S<int>::~S();
142 // }
143 //
144 // See also PR6358 and PR6359.
145 //
146 // For now, we accept all the cases in which the name given could plausibly
147 // be interpreted as a correct destructor name, issuing off-by-default
148 // extension diagnostics on the cases that don't strictly conform to the
149 // C++20 rules. This basically means we always consider looking in the
150 // nested-name-specifier prefix, the complete nested-name-specifier, and
151 // the scope, and accept if we find the expected type in any of the three
152 // places.
153
154 if (SS.isInvalid())
155 return nullptr;
156
157 // Whether we've failed with a diagnostic already.
158 bool Failed = false;
159
160 llvm::SmallVector<NamedDecl*, 8> FoundDecls;
161 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
162
163 // If we have an object type, it's because we are in a
164 // pseudo-destructor-expression or a member access expression, and
165 // we know what type we're looking for.
166 QualType SearchType =
167 ObjectTypePtr ? GetTypeFromParser(Ty: ObjectTypePtr) : QualType();
168
169 auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
170 auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
171 auto *Type = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl());
172 if (!Type)
173 return false;
174
175 if (SearchType.isNull() || SearchType->isDependentType())
176 return true;
177
178 QualType T = Context.getTypeDeclType(Decl: Type);
179 return Context.hasSameUnqualifiedType(T1: T, T2: SearchType);
180 };
181
182 unsigned NumAcceptableResults = 0;
183 for (NamedDecl *D : Found) {
184 if (IsAcceptableResult(D))
185 ++NumAcceptableResults;
186
187 // Don't list a class twice in the lookup failure diagnostic if it's
188 // found by both its injected-class-name and by the name in the enclosing
189 // scope.
190 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: D))
191 if (RD->isInjectedClassName())
192 D = cast<NamedDecl>(Val: RD->getParent());
193
194 if (FoundDeclSet.insert(Ptr: D).second)
195 FoundDecls.push_back(Elt: D);
196 }
197
198 // As an extension, attempt to "fix" an ambiguity by erasing all non-type
199 // results, and all non-matching results if we have a search type. It's not
200 // clear what the right behavior is if destructor lookup hits an ambiguity,
201 // but other compilers do generally accept at least some kinds of
202 // ambiguity.
203 if (Found.isAmbiguous() && NumAcceptableResults == 1) {
204 Diag(Loc: NameLoc, DiagID: diag::ext_dtor_name_ambiguous);
205 LookupResult::Filter F = Found.makeFilter();
206 while (F.hasNext()) {
207 NamedDecl *D = F.next();
208 if (auto *TD = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl()))
209 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_type_here)
210 << Context.getTypeDeclType(Decl: TD);
211 else
212 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_nontype_here);
213
214 if (!IsAcceptableResult(D))
215 F.erase();
216 }
217 F.done();
218 }
219
220 if (Found.isAmbiguous())
221 Failed = true;
222
223 if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
224 if (IsAcceptableResult(Type)) {
225 QualType T = Context.getTypeDeclType(Decl: Type);
226 MarkAnyDeclReferenced(Loc: Type->getLocation(), D: Type, /*OdrUse=*/MightBeOdrUse: false);
227 return CreateParsedType(
228 T: Context.getElaboratedType(Keyword: ElaboratedTypeKeyword::None, NNS: nullptr, NamedType: T),
229 TInfo: Context.getTrivialTypeSourceInfo(T, Loc: NameLoc));
230 }
231 }
232
233 return nullptr;
234 };
235
236 bool IsDependent = false;
237
238 auto LookupInObjectType = [&]() -> ParsedType {
239 if (Failed || SearchType.isNull())
240 return nullptr;
241
242 IsDependent |= SearchType->isDependentType();
243
244 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
245 DeclContext *LookupCtx = computeDeclContext(T: SearchType);
246 if (!LookupCtx)
247 return nullptr;
248 LookupQualifiedName(R&: Found, LookupCtx);
249 return CheckLookupResult(Found);
250 };
251
252 auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
253 if (Failed)
254 return nullptr;
255
256 IsDependent |= isDependentScopeSpecifier(SS: LookupSS);
257 DeclContext *LookupCtx = computeDeclContext(SS: LookupSS, EnteringContext);
258 if (!LookupCtx)
259 return nullptr;
260
261 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
262 if (RequireCompleteDeclContext(SS&: LookupSS, DC: LookupCtx)) {
263 Failed = true;
264 return nullptr;
265 }
266 LookupQualifiedName(R&: Found, LookupCtx);
267 return CheckLookupResult(Found);
268 };
269
270 auto LookupInScope = [&]() -> ParsedType {
271 if (Failed || !S)
272 return nullptr;
273
274 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
275 LookupName(R&: Found, S);
276 return CheckLookupResult(Found);
277 };
278
279 // C++2a [basic.lookup.qual]p6:
280 // In a qualified-id of the form
281 //
282 // nested-name-specifier[opt] type-name :: ~ type-name
283 //
284 // the second type-name is looked up in the same scope as the first.
285 //
286 // We interpret this as meaning that if you do a dual-scope lookup for the
287 // first name, you also do a dual-scope lookup for the second name, per
288 // C++ [basic.lookup.classref]p4:
289 //
290 // If the id-expression in a class member access is a qualified-id of the
291 // form
292 //
293 // class-name-or-namespace-name :: ...
294 //
295 // the class-name-or-namespace-name following the . or -> is first looked
296 // up in the class of the object expression and the name, if found, is used.
297 // Otherwise, it is looked up in the context of the entire
298 // postfix-expression.
299 //
300 // This looks in the same scopes as for an unqualified destructor name:
301 //
302 // C++ [basic.lookup.classref]p3:
303 // If the unqualified-id is ~ type-name, the type-name is looked up
304 // in the context of the entire postfix-expression. If the type T
305 // of the object expression is of a class type C, the type-name is
306 // also looked up in the scope of class C. At least one of the
307 // lookups shall find a name that refers to cv T.
308 //
309 // FIXME: The intent is unclear here. Should type-name::~type-name look in
310 // the scope anyway if it finds a non-matching name declared in the class?
311 // If both lookups succeed and find a dependent result, which result should
312 // we retain? (Same question for p->~type-name().)
313
314 if (NestedNameSpecifier *Prefix =
315 SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
316 // This is
317 //
318 // nested-name-specifier type-name :: ~ type-name
319 //
320 // Look for the second type-name in the nested-name-specifier.
321 CXXScopeSpec PrefixSS;
322 PrefixSS.Adopt(Other: NestedNameSpecifierLoc(Prefix, SS.location_data()));
323 if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
324 return T;
325 } else {
326 // This is one of
327 //
328 // type-name :: ~ type-name
329 // ~ type-name
330 //
331 // Look in the scope and (if any) the object type.
332 if (ParsedType T = LookupInScope())
333 return T;
334 if (ParsedType T = LookupInObjectType())
335 return T;
336 }
337
338 if (Failed)
339 return nullptr;
340
341 if (IsDependent) {
342 // We didn't find our type, but that's OK: it's dependent anyway.
343
344 // FIXME: What if we have no nested-name-specifier?
345 TypeSourceInfo *TSI = nullptr;
346 QualType T =
347 CheckTypenameType(Keyword: ElaboratedTypeKeyword::None, KeywordLoc: SourceLocation(),
348 QualifierLoc: SS.getWithLocInContext(Context), II, IILoc: NameLoc, TSI: &TSI,
349 /*DeducedTSTContext=*/true);
350 if (T.isNull())
351 return ParsedType();
352 return CreateParsedType(T, TInfo: TSI);
353 }
354
355 // The remaining cases are all non-standard extensions imitating the behavior
356 // of various other compilers.
357 unsigned NumNonExtensionDecls = FoundDecls.size();
358
359 if (SS.isSet()) {
360 // For compatibility with older broken C++ rules and existing code,
361 //
362 // nested-name-specifier :: ~ type-name
363 //
364 // also looks for type-name within the nested-name-specifier.
365 if (ParsedType T = LookupInNestedNameSpec(SS)) {
366 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_dtor_named_in_wrong_scope)
367 << SS.getRange()
368 << FixItHint::CreateInsertion(InsertionLoc: SS.getEndLoc(),
369 Code: ("::" + II.getName()).str());
370 return T;
371 }
372
373 // For compatibility with other compilers and older versions of Clang,
374 //
375 // nested-name-specifier type-name :: ~ type-name
376 //
377 // also looks for type-name in the scope. Unfortunately, we can't
378 // reasonably apply this fallback for dependent nested-name-specifiers.
379 if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
380 if (ParsedType T = LookupInScope()) {
381 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_qualified_dtor_named_in_lexical_scope)
382 << FixItHint::CreateRemoval(RemoveRange: SS.getRange());
383 Diag(Loc: FoundDecls.back()->getLocation(), DiagID: diag::note_destructor_type_here)
384 << GetTypeFromParser(Ty: T);
385 return T;
386 }
387 }
388 }
389
390 // We didn't find anything matching; tell the user what we did find (if
391 // anything).
392
393 // Don't tell the user about declarations we shouldn't have found.
394 FoundDecls.resize(N: NumNonExtensionDecls);
395
396 // List types before non-types.
397 llvm::stable_sort(Range&: FoundDecls, C: [](NamedDecl *A, NamedDecl *B) {
398 return isa<TypeDecl>(Val: A->getUnderlyingDecl()) >
399 isa<TypeDecl>(Val: B->getUnderlyingDecl());
400 });
401
402 // Suggest a fixit to properly name the destroyed type.
403 auto MakeFixItHint = [&]{
404 const CXXRecordDecl *Destroyed = nullptr;
405 // FIXME: If we have a scope specifier, suggest its last component?
406 if (!SearchType.isNull())
407 Destroyed = SearchType->getAsCXXRecordDecl();
408 else if (S)
409 Destroyed = dyn_cast_or_null<CXXRecordDecl>(Val: S->getEntity());
410 if (Destroyed)
411 return FixItHint::CreateReplacement(RemoveRange: SourceRange(NameLoc),
412 Code: Destroyed->getNameAsString());
413 return FixItHint();
414 };
415
416 if (FoundDecls.empty()) {
417 // FIXME: Attempt typo-correction?
418 Diag(Loc: NameLoc, DiagID: diag::err_undeclared_destructor_name)
419 << &II << MakeFixItHint();
420 } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
421 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundDecls[0]->getUnderlyingDecl())) {
422 assert(!SearchType.isNull() &&
423 "should only reject a type result if we have a search type");
424 QualType T = Context.getTypeDeclType(Decl: TD);
425 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_type_mismatch)
426 << T << SearchType << MakeFixItHint();
427 } else {
428 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_nontype)
429 << &II << MakeFixItHint();
430 }
431 } else {
432 Diag(Loc: NameLoc, DiagID: SearchType.isNull() ? diag::err_destructor_name_nontype
433 : diag::err_destructor_expr_mismatch)
434 << &II << SearchType << MakeFixItHint();
435 }
436
437 for (NamedDecl *FoundD : FoundDecls) {
438 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundD->getUnderlyingDecl()))
439 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_type_here)
440 << Context.getTypeDeclType(Decl: TD);
441 else
442 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_nontype_here)
443 << FoundD;
444 }
445
446 return nullptr;
447}
448
449ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
450 ParsedType ObjectType) {
451 if (DS.getTypeSpecType() == DeclSpec::TST_error)
452 return nullptr;
453
454 if (DS.getTypeSpecType() == DeclSpec::TST_decltype_auto) {
455 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
456 return nullptr;
457 }
458
459 assert(DS.getTypeSpecType() == DeclSpec::TST_decltype &&
460 "unexpected type in getDestructorType");
461 QualType T = BuildDecltypeType(E: DS.getRepAsExpr());
462
463 // If we know the type of the object, check that the correct destructor
464 // type was named now; we can give better diagnostics this way.
465 QualType SearchType = GetTypeFromParser(Ty: ObjectType);
466 if (!SearchType.isNull() && !SearchType->isDependentType() &&
467 !Context.hasSameUnqualifiedType(T1: T, T2: SearchType)) {
468 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_destructor_expr_type_mismatch)
469 << T << SearchType;
470 return nullptr;
471 }
472
473 return ParsedType::make(P: T);
474}
475
476bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
477 const UnqualifiedId &Name, bool IsUDSuffix) {
478 assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
479 if (!IsUDSuffix) {
480 // [over.literal] p8
481 //
482 // double operator""_Bq(long double); // OK: not a reserved identifier
483 // double operator"" _Bq(long double); // ill-formed, no diagnostic required
484 const IdentifierInfo *II = Name.Identifier;
485 ReservedIdentifierStatus Status = II->isReserved(LangOpts: PP.getLangOpts());
486 SourceLocation Loc = Name.getEndLoc();
487
488 auto Hint = FixItHint::CreateReplacement(
489 RemoveRange: Name.getSourceRange(),
490 Code: (StringRef("operator\"\"") + II->getName()).str());
491
492 // Only emit this diagnostic if we start with an underscore, else the
493 // diagnostic for C++11 requiring a space between the quotes and the
494 // identifier conflicts with this and gets confusing. The diagnostic stating
495 // this is a reserved name should force the underscore, which gets this
496 // back.
497 if (II->isReservedLiteralSuffixId() !=
498 ReservedLiteralSuffixIdStatus::NotStartsWithUnderscore)
499 Diag(Loc, DiagID: diag::warn_deprecated_literal_operator_id) << II << Hint;
500
501 if (isReservedInAllContexts(Status))
502 Diag(Loc, DiagID: diag::warn_reserved_extern_symbol)
503 << II << static_cast<int>(Status) << Hint;
504 }
505
506 if (!SS.isValid())
507 return false;
508
509 switch (SS.getScopeRep()->getKind()) {
510 case NestedNameSpecifier::Identifier:
511 case NestedNameSpecifier::TypeSpec:
512 // Per C++11 [over.literal]p2, literal operators can only be declared at
513 // namespace scope. Therefore, this unqualified-id cannot name anything.
514 // Reject it early, because we have no AST representation for this in the
515 // case where the scope is dependent.
516 Diag(Loc: Name.getBeginLoc(), DiagID: diag::err_literal_operator_id_outside_namespace)
517 << SS.getScopeRep();
518 return true;
519
520 case NestedNameSpecifier::Global:
521 case NestedNameSpecifier::Super:
522 case NestedNameSpecifier::Namespace:
523 case NestedNameSpecifier::NamespaceAlias:
524 return false;
525 }
526
527 llvm_unreachable("unknown nested name specifier kind");
528}
529
530ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
531 SourceLocation TypeidLoc,
532 TypeSourceInfo *Operand,
533 SourceLocation RParenLoc) {
534 // C++ [expr.typeid]p4:
535 // The top-level cv-qualifiers of the lvalue expression or the type-id
536 // that is the operand of typeid are always ignored.
537 // If the type of the type-id is a class type or a reference to a class
538 // type, the class shall be completely-defined.
539 Qualifiers Quals;
540 QualType T
541 = Context.getUnqualifiedArrayType(T: Operand->getType().getNonReferenceType(),
542 Quals);
543 if (T->getAs<RecordType>() &&
544 RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
545 return ExprError();
546
547 if (T->isVariablyModifiedType())
548 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid) << T);
549
550 if (CheckQualifiedFunctionForTypeId(T, Loc: TypeidLoc))
551 return ExprError();
552
553 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
554 SourceRange(TypeidLoc, RParenLoc));
555}
556
557ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
558 SourceLocation TypeidLoc,
559 Expr *E,
560 SourceLocation RParenLoc) {
561 bool WasEvaluated = false;
562 if (E && !E->isTypeDependent()) {
563 if (E->hasPlaceholderType()) {
564 ExprResult result = CheckPlaceholderExpr(E);
565 if (result.isInvalid()) return ExprError();
566 E = result.get();
567 }
568
569 QualType T = E->getType();
570 if (const RecordType *RecordT = T->getAs<RecordType>()) {
571 CXXRecordDecl *RecordD = cast<CXXRecordDecl>(Val: RecordT->getDecl());
572 // C++ [expr.typeid]p3:
573 // [...] If the type of the expression is a class type, the class
574 // shall be completely-defined.
575 if (RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
576 return ExprError();
577
578 // C++ [expr.typeid]p3:
579 // When typeid is applied to an expression other than an glvalue of a
580 // polymorphic class type [...] [the] expression is an unevaluated
581 // operand. [...]
582 if (RecordD->isPolymorphic() && E->isGLValue()) {
583 if (isUnevaluatedContext()) {
584 // The operand was processed in unevaluated context, switch the
585 // context and recheck the subexpression.
586 ExprResult Result = TransformToPotentiallyEvaluated(E);
587 if (Result.isInvalid())
588 return ExprError();
589 E = Result.get();
590 }
591
592 // We require a vtable to query the type at run time.
593 MarkVTableUsed(Loc: TypeidLoc, Class: RecordD);
594 WasEvaluated = true;
595 }
596 }
597
598 ExprResult Result = CheckUnevaluatedOperand(E);
599 if (Result.isInvalid())
600 return ExprError();
601 E = Result.get();
602
603 // C++ [expr.typeid]p4:
604 // [...] If the type of the type-id is a reference to a possibly
605 // cv-qualified type, the result of the typeid expression refers to a
606 // std::type_info object representing the cv-unqualified referenced
607 // type.
608 Qualifiers Quals;
609 QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
610 if (!Context.hasSameType(T1: T, T2: UnqualT)) {
611 T = UnqualT;
612 E = ImpCastExprToType(E, Type: UnqualT, CK: CK_NoOp, VK: E->getValueKind()).get();
613 }
614 }
615
616 if (E->getType()->isVariablyModifiedType())
617 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid)
618 << E->getType());
619 else if (!inTemplateInstantiation() &&
620 E->HasSideEffects(Ctx: Context, IncludePossibleEffects: WasEvaluated)) {
621 // The expression operand for typeid is in an unevaluated expression
622 // context, so side effects could result in unintended consequences.
623 Diag(Loc: E->getExprLoc(), DiagID: WasEvaluated
624 ? diag::warn_side_effects_typeid
625 : diag::warn_side_effects_unevaluated_context);
626 }
627
628 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
629 SourceRange(TypeidLoc, RParenLoc));
630}
631
632/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
633ExprResult
634Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
635 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
636 // typeid is not supported in OpenCL.
637 if (getLangOpts().OpenCLCPlusPlus) {
638 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_openclcxx_not_supported)
639 << "typeid");
640 }
641
642 // Find the std::type_info type.
643 if (!getStdNamespace())
644 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid));
645
646 if (!CXXTypeInfoDecl) {
647 IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get(Name: "type_info");
648 LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
649 LookupQualifiedName(R, LookupCtx: getStdNamespace());
650 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
651 // Microsoft's typeinfo doesn't have type_info in std but in the global
652 // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
653 if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
654 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
655 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
656 }
657 if (!CXXTypeInfoDecl)
658 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid));
659 }
660
661 if (!getLangOpts().RTTI) {
662 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_no_typeid_with_fno_rtti));
663 }
664
665 QualType TypeInfoType = Context.getTypeDeclType(Decl: CXXTypeInfoDecl);
666
667 if (isType) {
668 // The operand is a type; handle it as such.
669 TypeSourceInfo *TInfo = nullptr;
670 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
671 TInfo: &TInfo);
672 if (T.isNull())
673 return ExprError();
674
675 if (!TInfo)
676 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
677
678 return BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
679 }
680
681 // The operand is an expression.
682 ExprResult Result =
683 BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, E: (Expr *)TyOrExpr, RParenLoc);
684
685 if (!getLangOpts().RTTIData && !Result.isInvalid())
686 if (auto *CTE = dyn_cast<CXXTypeidExpr>(Val: Result.get()))
687 if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
688 Diag(Loc: OpLoc, DiagID: diag::warn_no_typeid_with_rtti_disabled)
689 << (getDiagnostics().getDiagnosticOptions().getFormat() ==
690 DiagnosticOptions::MSVC);
691 return Result;
692}
693
694/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
695/// a single GUID.
696static void
697getUuidAttrOfType(Sema &SemaRef, QualType QT,
698 llvm::SmallSetVector<const UuidAttr *, 1> &UuidAttrs) {
699 // Optionally remove one level of pointer, reference or array indirection.
700 const Type *Ty = QT.getTypePtr();
701 if (QT->isPointerOrReferenceType())
702 Ty = QT->getPointeeType().getTypePtr();
703 else if (QT->isArrayType())
704 Ty = Ty->getBaseElementTypeUnsafe();
705
706 const auto *TD = Ty->getAsTagDecl();
707 if (!TD)
708 return;
709
710 if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
711 UuidAttrs.insert(X: Uuid);
712 return;
713 }
714
715 // __uuidof can grab UUIDs from template arguments.
716 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: TD)) {
717 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
718 for (const TemplateArgument &TA : TAL.asArray()) {
719 const UuidAttr *UuidForTA = nullptr;
720 if (TA.getKind() == TemplateArgument::Type)
721 getUuidAttrOfType(SemaRef, QT: TA.getAsType(), UuidAttrs);
722 else if (TA.getKind() == TemplateArgument::Declaration)
723 getUuidAttrOfType(SemaRef, QT: TA.getAsDecl()->getType(), UuidAttrs);
724
725 if (UuidForTA)
726 UuidAttrs.insert(X: UuidForTA);
727 }
728 }
729}
730
731ExprResult Sema::BuildCXXUuidof(QualType Type,
732 SourceLocation TypeidLoc,
733 TypeSourceInfo *Operand,
734 SourceLocation RParenLoc) {
735 MSGuidDecl *Guid = nullptr;
736 if (!Operand->getType()->isDependentType()) {
737 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
738 getUuidAttrOfType(SemaRef&: *this, QT: Operand->getType(), UuidAttrs);
739 if (UuidAttrs.empty())
740 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
741 if (UuidAttrs.size() > 1)
742 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
743 Guid = UuidAttrs.back()->getGuidDecl();
744 }
745
746 return new (Context)
747 CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
748}
749
750ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
751 Expr *E, SourceLocation RParenLoc) {
752 MSGuidDecl *Guid = nullptr;
753 if (!E->getType()->isDependentType()) {
754 if (E->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
755 // A null pointer results in {00000000-0000-0000-0000-000000000000}.
756 Guid = Context.getMSGuidDecl(Parts: MSGuidDecl::Parts{});
757 } else {
758 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
759 getUuidAttrOfType(SemaRef&: *this, QT: E->getType(), UuidAttrs);
760 if (UuidAttrs.empty())
761 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
762 if (UuidAttrs.size() > 1)
763 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
764 Guid = UuidAttrs.back()->getGuidDecl();
765 }
766 }
767
768 return new (Context)
769 CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
770}
771
772/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
773ExprResult
774Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
775 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
776 QualType GuidType = Context.getMSGuidType();
777 GuidType.addConst();
778
779 if (isType) {
780 // The operand is a type; handle it as such.
781 TypeSourceInfo *TInfo = nullptr;
782 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
783 TInfo: &TInfo);
784 if (T.isNull())
785 return ExprError();
786
787 if (!TInfo)
788 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
789
790 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
791 }
792
793 // The operand is an expression.
794 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, E: (Expr*)TyOrExpr, RParenLoc);
795}
796
797ExprResult
798Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
799 assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
800 "Unknown C++ Boolean value!");
801 return new (Context)
802 CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
803}
804
805ExprResult
806Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
807 return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
808}
809
810ExprResult
811Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
812 bool IsThrownVarInScope = false;
813 if (Ex) {
814 // C++0x [class.copymove]p31:
815 // When certain criteria are met, an implementation is allowed to omit the
816 // copy/move construction of a class object [...]
817 //
818 // - in a throw-expression, when the operand is the name of a
819 // non-volatile automatic object (other than a function or catch-
820 // clause parameter) whose scope does not extend beyond the end of the
821 // innermost enclosing try-block (if there is one), the copy/move
822 // operation from the operand to the exception object (15.1) can be
823 // omitted by constructing the automatic object directly into the
824 // exception object
825 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Ex->IgnoreParens()))
826 if (const auto *Var = dyn_cast<VarDecl>(Val: DRE->getDecl());
827 Var && Var->hasLocalStorage() &&
828 !Var->getType().isVolatileQualified()) {
829 for (; S; S = S->getParent()) {
830 if (S->isDeclScope(D: Var)) {
831 IsThrownVarInScope = true;
832 break;
833 }
834
835 // FIXME: Many of the scope checks here seem incorrect.
836 if (S->getFlags() &
837 (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
838 Scope::ObjCMethodScope | Scope::TryScope))
839 break;
840 }
841 }
842 }
843
844 return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
845}
846
847ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
848 bool IsThrownVarInScope) {
849 const llvm::Triple &T = Context.getTargetInfo().getTriple();
850 const bool IsOpenMPGPUTarget =
851 getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN());
852
853 DiagnoseExceptionUse(Loc: OpLoc, /* IsTry= */ false);
854
855 // In OpenMP target regions, we replace 'throw' with a trap on GPU targets.
856 if (IsOpenMPGPUTarget)
857 targetDiag(Loc: OpLoc, DiagID: diag::warn_throw_not_valid_on_target) << T.str();
858
859 // Exceptions aren't allowed in CUDA device code.
860 if (getLangOpts().CUDA)
861 CUDA().DiagIfDeviceCode(Loc: OpLoc, DiagID: diag::err_cuda_device_exceptions)
862 << "throw" << CUDA().CurrentTarget();
863
864 if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
865 Diag(Loc: OpLoc, DiagID: diag::err_omp_simd_region_cannot_use_stmt) << "throw";
866
867 // Exceptions that escape a compute construct are ill-formed.
868 if (getLangOpts().OpenACC && getCurScope() &&
869 getCurScope()->isInOpenACCComputeConstructScope(Flags: Scope::TryScope))
870 Diag(Loc: OpLoc, DiagID: diag::err_acc_branch_in_out_compute_construct)
871 << /*throw*/ 2 << /*out of*/ 0;
872
873 if (Ex && !Ex->isTypeDependent()) {
874 // Initialize the exception result. This implicitly weeds out
875 // abstract types or types with inaccessible copy constructors.
876
877 // C++0x [class.copymove]p31:
878 // When certain criteria are met, an implementation is allowed to omit the
879 // copy/move construction of a class object [...]
880 //
881 // - in a throw-expression, when the operand is the name of a
882 // non-volatile automatic object (other than a function or
883 // catch-clause
884 // parameter) whose scope does not extend beyond the end of the
885 // innermost enclosing try-block (if there is one), the copy/move
886 // operation from the operand to the exception object (15.1) can be
887 // omitted by constructing the automatic object directly into the
888 // exception object
889 NamedReturnInfo NRInfo =
890 IsThrownVarInScope ? getNamedReturnInfo(E&: Ex) : NamedReturnInfo();
891
892 QualType ExceptionObjectTy = Context.getExceptionObjectType(T: Ex->getType());
893 if (CheckCXXThrowOperand(ThrowLoc: OpLoc, ThrowTy: ExceptionObjectTy, E: Ex))
894 return ExprError();
895
896 InitializedEntity Entity =
897 InitializedEntity::InitializeException(ThrowLoc: OpLoc, Type: ExceptionObjectTy);
898 ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Value: Ex);
899 if (Res.isInvalid())
900 return ExprError();
901 Ex = Res.get();
902 }
903
904 // PPC MMA non-pointer types are not allowed as throw expr types.
905 if (Ex && Context.getTargetInfo().getTriple().isPPC64())
906 PPC().CheckPPCMMAType(Type: Ex->getType(), TypeLoc: Ex->getBeginLoc());
907
908 return new (Context)
909 CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
910}
911
912static void
913collectPublicBases(CXXRecordDecl *RD,
914 llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
915 llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
916 llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
917 bool ParentIsPublic) {
918 for (const CXXBaseSpecifier &BS : RD->bases()) {
919 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
920 bool NewSubobject;
921 // Virtual bases constitute the same subobject. Non-virtual bases are
922 // always distinct subobjects.
923 if (BS.isVirtual())
924 NewSubobject = VBases.insert(Ptr: BaseDecl).second;
925 else
926 NewSubobject = true;
927
928 if (NewSubobject)
929 ++SubobjectsSeen[BaseDecl];
930
931 // Only add subobjects which have public access throughout the entire chain.
932 bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
933 if (PublicPath)
934 PublicSubobjectsSeen.insert(X: BaseDecl);
935
936 // Recurse on to each base subobject.
937 collectPublicBases(RD: BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
938 ParentIsPublic: PublicPath);
939 }
940}
941
942static void getUnambiguousPublicSubobjects(
943 CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) {
944 llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
945 llvm::SmallSet<CXXRecordDecl *, 2> VBases;
946 llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
947 SubobjectsSeen[RD] = 1;
948 PublicSubobjectsSeen.insert(X: RD);
949 collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
950 /*ParentIsPublic=*/true);
951
952 for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
953 // Skip ambiguous objects.
954 if (SubobjectsSeen[PublicSubobject] > 1)
955 continue;
956
957 Objects.push_back(Elt: PublicSubobject);
958 }
959}
960
961bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
962 QualType ExceptionObjectTy, Expr *E) {
963 // If the type of the exception would be an incomplete type or a pointer
964 // to an incomplete type other than (cv) void the program is ill-formed.
965 QualType Ty = ExceptionObjectTy;
966 bool isPointer = false;
967 if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
968 Ty = Ptr->getPointeeType();
969 isPointer = true;
970 }
971
972 // Cannot throw WebAssembly reference type.
973 if (Ty.isWebAssemblyReferenceType()) {
974 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
975 return true;
976 }
977
978 // Cannot throw WebAssembly table.
979 if (isPointer && Ty.isWebAssemblyReferenceType()) {
980 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_table_art) << 2 << E->getSourceRange();
981 return true;
982 }
983
984 if (!isPointer || !Ty->isVoidType()) {
985 if (RequireCompleteType(Loc: ThrowLoc, T: Ty,
986 DiagID: isPointer ? diag::err_throw_incomplete_ptr
987 : diag::err_throw_incomplete,
988 Args: E->getSourceRange()))
989 return true;
990
991 if (!isPointer && Ty->isSizelessType()) {
992 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_sizeless) << Ty << E->getSourceRange();
993 return true;
994 }
995
996 if (RequireNonAbstractType(Loc: ThrowLoc, T: ExceptionObjectTy,
997 DiagID: diag::err_throw_abstract_type, Args: E))
998 return true;
999 }
1000
1001 // If the exception has class type, we need additional handling.
1002 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
1003 if (!RD)
1004 return false;
1005
1006 // If we are throwing a polymorphic class type or pointer thereof,
1007 // exception handling will make use of the vtable.
1008 MarkVTableUsed(Loc: ThrowLoc, Class: RD);
1009
1010 // If a pointer is thrown, the referenced object will not be destroyed.
1011 if (isPointer)
1012 return false;
1013
1014 // If the class has a destructor, we must be able to call it.
1015 if (!RD->hasIrrelevantDestructor()) {
1016 if (CXXDestructorDecl *Destructor = LookupDestructor(Class: RD)) {
1017 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
1018 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
1019 PDiag: PDiag(DiagID: diag::err_access_dtor_exception) << Ty);
1020 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
1021 return true;
1022 }
1023 }
1024
1025 // The MSVC ABI creates a list of all types which can catch the exception
1026 // object. This list also references the appropriate copy constructor to call
1027 // if the object is caught by value and has a non-trivial copy constructor.
1028 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1029 // We are only interested in the public, unambiguous bases contained within
1030 // the exception object. Bases which are ambiguous or otherwise
1031 // inaccessible are not catchable types.
1032 llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
1033 getUnambiguousPublicSubobjects(RD, Objects&: UnambiguousPublicSubobjects);
1034
1035 for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
1036 // Attempt to lookup the copy constructor. Various pieces of machinery
1037 // will spring into action, like template instantiation, which means this
1038 // cannot be a simple walk of the class's decls. Instead, we must perform
1039 // lookup and overload resolution.
1040 CXXConstructorDecl *CD = LookupCopyingConstructor(Class: Subobject, Quals: 0);
1041 if (!CD || CD->isDeleted())
1042 continue;
1043
1044 // Mark the constructor referenced as it is used by this throw expression.
1045 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: CD);
1046
1047 // Skip this copy constructor if it is trivial, we don't need to record it
1048 // in the catchable type data.
1049 if (CD->isTrivial())
1050 continue;
1051
1052 // The copy constructor is non-trivial, create a mapping from this class
1053 // type to this constructor.
1054 // N.B. The selection of copy constructor is not sensitive to this
1055 // particular throw-site. Lookup will be performed at the catch-site to
1056 // ensure that the copy constructor is, in fact, accessible (via
1057 // friendship or any other means).
1058 Context.addCopyConstructorForExceptionObject(RD: Subobject, CD);
1059
1060 // We don't keep the instantiated default argument expressions around so
1061 // we must rebuild them here.
1062 for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
1063 if (CheckCXXDefaultArgExpr(CallLoc: ThrowLoc, FD: CD, Param: CD->getParamDecl(i: I)))
1064 return true;
1065 }
1066 }
1067 }
1068
1069 // Under the Itanium C++ ABI, memory for the exception object is allocated by
1070 // the runtime with no ability for the compiler to request additional
1071 // alignment. Warn if the exception type requires alignment beyond the minimum
1072 // guaranteed by the target C++ runtime.
1073 if (Context.getTargetInfo().getCXXABI().isItaniumFamily()) {
1074 CharUnits TypeAlign = Context.getTypeAlignInChars(T: Ty);
1075 CharUnits ExnObjAlign = Context.getExnObjectAlignment();
1076 if (ExnObjAlign < TypeAlign) {
1077 Diag(Loc: ThrowLoc, DiagID: diag::warn_throw_underaligned_obj);
1078 Diag(Loc: ThrowLoc, DiagID: diag::note_throw_underaligned_obj)
1079 << Ty << (unsigned)TypeAlign.getQuantity()
1080 << (unsigned)ExnObjAlign.getQuantity();
1081 }
1082 }
1083 if (!isPointer && getLangOpts().AssumeNothrowExceptionDtor) {
1084 if (CXXDestructorDecl *Dtor = RD->getDestructor()) {
1085 auto Ty = Dtor->getType();
1086 if (auto *FT = Ty.getTypePtr()->getAs<FunctionProtoType>()) {
1087 if (!isUnresolvedExceptionSpec(ESpecType: FT->getExceptionSpecType()) &&
1088 !FT->isNothrow())
1089 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_object_throwing_dtor) << RD;
1090 }
1091 }
1092 }
1093
1094 return false;
1095}
1096
1097static QualType adjustCVQualifiersForCXXThisWithinLambda(
1098 ArrayRef<FunctionScopeInfo *> FunctionScopes, QualType ThisTy,
1099 DeclContext *CurSemaContext, ASTContext &ASTCtx) {
1100
1101 QualType ClassType = ThisTy->getPointeeType();
1102 LambdaScopeInfo *CurLSI = nullptr;
1103 DeclContext *CurDC = CurSemaContext;
1104
1105 // Iterate through the stack of lambdas starting from the innermost lambda to
1106 // the outermost lambda, checking if '*this' is ever captured by copy - since
1107 // that could change the cv-qualifiers of the '*this' object.
1108 // The object referred to by '*this' starts out with the cv-qualifiers of its
1109 // member function. We then start with the innermost lambda and iterate
1110 // outward checking to see if any lambda performs a by-copy capture of '*this'
1111 // - and if so, any nested lambda must respect the 'constness' of that
1112 // capturing lamdbda's call operator.
1113 //
1114
1115 // Since the FunctionScopeInfo stack is representative of the lexical
1116 // nesting of the lambda expressions during initial parsing (and is the best
1117 // place for querying information about captures about lambdas that are
1118 // partially processed) and perhaps during instantiation of function templates
1119 // that contain lambda expressions that need to be transformed BUT not
1120 // necessarily during instantiation of a nested generic lambda's function call
1121 // operator (which might even be instantiated at the end of the TU) - at which
1122 // time the DeclContext tree is mature enough to query capture information
1123 // reliably - we use a two pronged approach to walk through all the lexically
1124 // enclosing lambda expressions:
1125 //
1126 // 1) Climb down the FunctionScopeInfo stack as long as each item represents
1127 // a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
1128 // enclosed by the call-operator of the LSI below it on the stack (while
1129 // tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
1130 // the stack represents the innermost lambda.
1131 //
1132 // 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
1133 // represents a lambda's call operator. If it does, we must be instantiating
1134 // a generic lambda's call operator (represented by the Current LSI, and
1135 // should be the only scenario where an inconsistency between the LSI and the
1136 // DeclContext should occur), so climb out the DeclContexts if they
1137 // represent lambdas, while querying the corresponding closure types
1138 // regarding capture information.
1139
1140 // 1) Climb down the function scope info stack.
1141 for (int I = FunctionScopes.size();
1142 I-- && isa<LambdaScopeInfo>(Val: FunctionScopes[I]) &&
1143 (!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
1144 cast<LambdaScopeInfo>(Val: FunctionScopes[I])->CallOperator);
1145 CurDC = getLambdaAwareParentOfDeclContext(DC: CurDC)) {
1146 CurLSI = cast<LambdaScopeInfo>(Val: FunctionScopes[I]);
1147
1148 if (!CurLSI->isCXXThisCaptured())
1149 continue;
1150
1151 auto C = CurLSI->getCXXThisCapture();
1152
1153 if (C.isCopyCapture()) {
1154 if (CurLSI->lambdaCaptureShouldBeConst())
1155 ClassType.addConst();
1156 return ASTCtx.getPointerType(T: ClassType);
1157 }
1158 }
1159
1160 // 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
1161 // can happen during instantiation of its nested generic lambda call
1162 // operator); 2. if we're in a lambda scope (lambda body).
1163 if (CurLSI && isLambdaCallOperator(DC: CurDC)) {
1164 assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator) &&
1165 "While computing 'this' capture-type for a generic lambda, when we "
1166 "run out of enclosing LSI's, yet the enclosing DC is a "
1167 "lambda-call-operator we must be (i.e. Current LSI) in a generic "
1168 "lambda call oeprator");
1169 assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
1170
1171 auto IsThisCaptured =
1172 [](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
1173 IsConst = false;
1174 IsByCopy = false;
1175 for (auto &&C : Closure->captures()) {
1176 if (C.capturesThis()) {
1177 if (C.getCaptureKind() == LCK_StarThis)
1178 IsByCopy = true;
1179 if (Closure->getLambdaCallOperator()->isConst())
1180 IsConst = true;
1181 return true;
1182 }
1183 }
1184 return false;
1185 };
1186
1187 bool IsByCopyCapture = false;
1188 bool IsConstCapture = false;
1189 CXXRecordDecl *Closure = cast<CXXRecordDecl>(Val: CurDC->getParent());
1190 while (Closure &&
1191 IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
1192 if (IsByCopyCapture) {
1193 if (IsConstCapture)
1194 ClassType.addConst();
1195 return ASTCtx.getPointerType(T: ClassType);
1196 }
1197 Closure = isLambdaCallOperator(DC: Closure->getParent())
1198 ? cast<CXXRecordDecl>(Val: Closure->getParent()->getParent())
1199 : nullptr;
1200 }
1201 }
1202 return ThisTy;
1203}
1204
1205QualType Sema::getCurrentThisType() {
1206 DeclContext *DC = getFunctionLevelDeclContext();
1207 QualType ThisTy = CXXThisTypeOverride;
1208
1209 if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(Val: DC)) {
1210 if (method && method->isImplicitObjectMemberFunction())
1211 ThisTy = method->getThisType().getNonReferenceType();
1212 }
1213
1214 if (ThisTy.isNull() && isLambdaCallWithImplicitObjectParameter(DC: CurContext) &&
1215 inTemplateInstantiation() && isa<CXXRecordDecl>(Val: DC)) {
1216
1217 // This is a lambda call operator that is being instantiated as a default
1218 // initializer. DC must point to the enclosing class type, so we can recover
1219 // the 'this' type from it.
1220 QualType ClassTy = Context.getTypeDeclType(Decl: cast<CXXRecordDecl>(Val: DC));
1221 // There are no cv-qualifiers for 'this' within default initializers,
1222 // per [expr.prim.general]p4.
1223 ThisTy = Context.getPointerType(T: ClassTy);
1224 }
1225
1226 // If we are within a lambda's call operator, the cv-qualifiers of 'this'
1227 // might need to be adjusted if the lambda or any of its enclosing lambda's
1228 // captures '*this' by copy.
1229 if (!ThisTy.isNull() && isLambdaCallOperator(DC: CurContext))
1230 return adjustCVQualifiersForCXXThisWithinLambda(FunctionScopes, ThisTy,
1231 CurSemaContext: CurContext, ASTCtx&: Context);
1232 return ThisTy;
1233}
1234
1235Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
1236 Decl *ContextDecl,
1237 Qualifiers CXXThisTypeQuals,
1238 bool Enabled)
1239 : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
1240{
1241 if (!Enabled || !ContextDecl)
1242 return;
1243
1244 CXXRecordDecl *Record = nullptr;
1245 if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(Val: ContextDecl))
1246 Record = Template->getTemplatedDecl();
1247 else
1248 Record = cast<CXXRecordDecl>(Val: ContextDecl);
1249
1250 QualType T = S.Context.getRecordType(Decl: Record);
1251 T = S.getASTContext().getQualifiedType(T, Qs: CXXThisTypeQuals);
1252
1253 S.CXXThisTypeOverride =
1254 S.Context.getLangOpts().HLSL ? T : S.Context.getPointerType(T);
1255
1256 this->Enabled = true;
1257}
1258
1259
1260Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
1261 if (Enabled) {
1262 S.CXXThisTypeOverride = OldCXXThisTypeOverride;
1263 }
1264}
1265
1266static void buildLambdaThisCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI) {
1267 SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
1268 assert(!LSI->isCXXThisCaptured());
1269 // [=, this] {}; // until C++20: Error: this when = is the default
1270 if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval &&
1271 !Sema.getLangOpts().CPlusPlus20)
1272 return;
1273 Sema.Diag(Loc: DiagLoc, DiagID: diag::note_lambda_this_capture_fixit)
1274 << FixItHint::CreateInsertion(
1275 InsertionLoc: DiagLoc, Code: LSI->NumExplicitCaptures > 0 ? ", this" : "this");
1276}
1277
1278bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
1279 bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
1280 const bool ByCopy) {
1281 // We don't need to capture this in an unevaluated context.
1282 if (isUnevaluatedContext() && !Explicit)
1283 return true;
1284
1285 assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
1286
1287 const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
1288 ? *FunctionScopeIndexToStopAt
1289 : FunctionScopes.size() - 1;
1290
1291 // Check that we can capture the *enclosing object* (referred to by '*this')
1292 // by the capturing-entity/closure (lambda/block/etc) at
1293 // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
1294
1295 // Note: The *enclosing object* can only be captured by-value by a
1296 // closure that is a lambda, using the explicit notation:
1297 // [*this] { ... }.
1298 // Every other capture of the *enclosing object* results in its by-reference
1299 // capture.
1300
1301 // For a closure 'L' (at MaxFunctionScopesIndex in the FunctionScopes
1302 // stack), we can capture the *enclosing object* only if:
1303 // - 'L' has an explicit byref or byval capture of the *enclosing object*
1304 // - or, 'L' has an implicit capture.
1305 // AND
1306 // -- there is no enclosing closure
1307 // -- or, there is some enclosing closure 'E' that has already captured the
1308 // *enclosing object*, and every intervening closure (if any) between 'E'
1309 // and 'L' can implicitly capture the *enclosing object*.
1310 // -- or, every enclosing closure can implicitly capture the
1311 // *enclosing object*
1312
1313
1314 unsigned NumCapturingClosures = 0;
1315 for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
1316 if (CapturingScopeInfo *CSI =
1317 dyn_cast<CapturingScopeInfo>(Val: FunctionScopes[idx])) {
1318 if (CSI->CXXThisCaptureIndex != 0) {
1319 // 'this' is already being captured; there isn't anything more to do.
1320 CSI->Captures[CSI->CXXThisCaptureIndex - 1].markUsed(IsODRUse: BuildAndDiagnose);
1321 break;
1322 }
1323 LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(Val: CSI);
1324 if (LSI && isGenericLambdaCallOperatorSpecialization(MD: LSI->CallOperator)) {
1325 // This context can't implicitly capture 'this'; fail out.
1326 if (BuildAndDiagnose) {
1327 LSI->CallOperator->setInvalidDecl();
1328 Diag(Loc, DiagID: diag::err_this_capture)
1329 << (Explicit && idx == MaxFunctionScopesIndex);
1330 if (!Explicit)
1331 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1332 }
1333 return true;
1334 }
1335 if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
1336 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
1337 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
1338 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
1339 (Explicit && idx == MaxFunctionScopesIndex)) {
1340 // Regarding (Explicit && idx == MaxFunctionScopesIndex): only the first
1341 // iteration through can be an explicit capture, all enclosing closures,
1342 // if any, must perform implicit captures.
1343
1344 // This closure can capture 'this'; continue looking upwards.
1345 NumCapturingClosures++;
1346 continue;
1347 }
1348 // This context can't implicitly capture 'this'; fail out.
1349 if (BuildAndDiagnose) {
1350 LSI->CallOperator->setInvalidDecl();
1351 Diag(Loc, DiagID: diag::err_this_capture)
1352 << (Explicit && idx == MaxFunctionScopesIndex);
1353 }
1354 if (!Explicit)
1355 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1356 return true;
1357 }
1358 break;
1359 }
1360 if (!BuildAndDiagnose) return false;
1361
1362 // If we got here, then the closure at MaxFunctionScopesIndex on the
1363 // FunctionScopes stack, can capture the *enclosing object*, so capture it
1364 // (including implicit by-reference captures in any enclosing closures).
1365
1366 // In the loop below, respect the ByCopy flag only for the closure requesting
1367 // the capture (i.e. first iteration through the loop below). Ignore it for
1368 // all enclosing closure's up to NumCapturingClosures (since they must be
1369 // implicitly capturing the *enclosing object* by reference (see loop
1370 // above)).
1371 assert((!ByCopy ||
1372 isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
1373 "Only a lambda can capture the enclosing object (referred to by "
1374 "*this) by copy");
1375 QualType ThisTy = getCurrentThisType();
1376 for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
1377 --idx, --NumCapturingClosures) {
1378 CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(Val: FunctionScopes[idx]);
1379
1380 // The type of the corresponding data member (not a 'this' pointer if 'by
1381 // copy').
1382 QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
1383
1384 bool isNested = NumCapturingClosures > 1;
1385 CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
1386 }
1387 return false;
1388}
1389
1390ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
1391 // C++20 [expr.prim.this]p1:
1392 // The keyword this names a pointer to the object for which an
1393 // implicit object member function is invoked or a non-static
1394 // data member's initializer is evaluated.
1395 QualType ThisTy = getCurrentThisType();
1396
1397 if (CheckCXXThisType(Loc, Type: ThisTy))
1398 return ExprError();
1399
1400 return BuildCXXThisExpr(Loc, Type: ThisTy, /*IsImplicit=*/false);
1401}
1402
1403bool Sema::CheckCXXThisType(SourceLocation Loc, QualType Type) {
1404 if (!Type.isNull())
1405 return false;
1406
1407 // C++20 [expr.prim.this]p3:
1408 // If a declaration declares a member function or member function template
1409 // of a class X, the expression this is a prvalue of type
1410 // "pointer to cv-qualifier-seq X" wherever X is the current class between
1411 // the optional cv-qualifier-seq and the end of the function-definition,
1412 // member-declarator, or declarator. It shall not appear within the
1413 // declaration of either a static member function or an explicit object
1414 // member function of the current class (although its type and value
1415 // category are defined within such member functions as they are within
1416 // an implicit object member function).
1417 DeclContext *DC = getFunctionLevelDeclContext();
1418 const auto *Method = dyn_cast<CXXMethodDecl>(Val: DC);
1419 if (Method && Method->isExplicitObjectMemberFunction()) {
1420 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1421 } else if (Method && isLambdaCallWithExplicitObjectParameter(DC: CurContext)) {
1422 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1423 } else {
1424 Diag(Loc, DiagID: diag::err_invalid_this_use) << 0;
1425 }
1426 return true;
1427}
1428
1429Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
1430 bool IsImplicit) {
1431 auto *This = CXXThisExpr::Create(Ctx: Context, L: Loc, Ty: Type, IsImplicit);
1432 MarkThisReferenced(This);
1433 return This;
1434}
1435
1436void Sema::MarkThisReferenced(CXXThisExpr *This) {
1437 CheckCXXThisCapture(Loc: This->getExprLoc());
1438 if (This->isTypeDependent())
1439 return;
1440
1441 // Check if 'this' is captured by value in a lambda with a dependent explicit
1442 // object parameter, and mark it as type-dependent as well if so.
1443 auto IsDependent = [&]() {
1444 for (auto *Scope : llvm::reverse(C&: FunctionScopes)) {
1445 auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Val: Scope);
1446 if (!LSI)
1447 continue;
1448
1449 if (LSI->Lambda && !LSI->Lambda->Encloses(DC: CurContext) &&
1450 LSI->AfterParameterList)
1451 return false;
1452
1453 // If this lambda captures 'this' by value, then 'this' is dependent iff
1454 // this lambda has a dependent explicit object parameter. If we can't
1455 // determine whether it does (e.g. because the CXXMethodDecl's type is
1456 // null), assume it doesn't.
1457 if (LSI->isCXXThisCaptured()) {
1458 if (!LSI->getCXXThisCapture().isCopyCapture())
1459 continue;
1460
1461 const auto *MD = LSI->CallOperator;
1462 if (MD->getType().isNull())
1463 return false;
1464
1465 const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
1466 return Ty && MD->isExplicitObjectMemberFunction() &&
1467 Ty->getParamType(i: 0)->isDependentType();
1468 }
1469 }
1470 return false;
1471 }();
1472
1473 This->setCapturedByCopyInLambdaWithExplicitObjectParameter(IsDependent);
1474}
1475
1476bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
1477 // If we're outside the body of a member function, then we'll have a specified
1478 // type for 'this'.
1479 if (CXXThisTypeOverride.isNull())
1480 return false;
1481
1482 // Determine whether we're looking into a class that's currently being
1483 // defined.
1484 CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
1485 return Class && Class->isBeingDefined();
1486}
1487
1488ExprResult
1489Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
1490 SourceLocation LParenOrBraceLoc,
1491 MultiExprArg exprs,
1492 SourceLocation RParenOrBraceLoc,
1493 bool ListInitialization) {
1494 if (!TypeRep)
1495 return ExprError();
1496
1497 TypeSourceInfo *TInfo;
1498 QualType Ty = GetTypeFromParser(Ty: TypeRep, TInfo: &TInfo);
1499 if (!TInfo)
1500 TInfo = Context.getTrivialTypeSourceInfo(T: Ty, Loc: SourceLocation());
1501
1502 auto Result = BuildCXXTypeConstructExpr(Type: TInfo, LParenLoc: LParenOrBraceLoc, Exprs: exprs,
1503 RParenLoc: RParenOrBraceLoc, ListInitialization);
1504 if (Result.isInvalid())
1505 Result = CreateRecoveryExpr(Begin: TInfo->getTypeLoc().getBeginLoc(),
1506 End: RParenOrBraceLoc, SubExprs: exprs, T: Ty);
1507 return Result;
1508}
1509
1510ExprResult
1511Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
1512 SourceLocation LParenOrBraceLoc,
1513 MultiExprArg Exprs,
1514 SourceLocation RParenOrBraceLoc,
1515 bool ListInitialization) {
1516 QualType Ty = TInfo->getType();
1517 SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
1518 SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
1519
1520 InitializedEntity Entity =
1521 InitializedEntity::InitializeTemporary(Context, TypeInfo: TInfo);
1522 InitializationKind Kind =
1523 Exprs.size()
1524 ? ListInitialization
1525 ? InitializationKind::CreateDirectList(
1526 InitLoc: TyBeginLoc, LBraceLoc: LParenOrBraceLoc, RBraceLoc: RParenOrBraceLoc)
1527 : InitializationKind::CreateDirect(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1528 RParenLoc: RParenOrBraceLoc)
1529 : InitializationKind::CreateValue(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1530 RParenLoc: RParenOrBraceLoc);
1531
1532 // C++17 [expr.type.conv]p1:
1533 // If the type is a placeholder for a deduced class type, [...perform class
1534 // template argument deduction...]
1535 // C++23:
1536 // Otherwise, if the type contains a placeholder type, it is replaced by the
1537 // type determined by placeholder type deduction.
1538 DeducedType *Deduced = Ty->getContainedDeducedType();
1539 if (Deduced && !Deduced->isDeduced() &&
1540 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
1541 Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
1542 Kind, Init: Exprs);
1543 if (Ty.isNull())
1544 return ExprError();
1545 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1546 } else if (Deduced && !Deduced->isDeduced()) {
1547 MultiExprArg Inits = Exprs;
1548 if (ListInitialization) {
1549 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
1550 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
1551 }
1552
1553 if (Inits.empty())
1554 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_init_no_expression)
1555 << Ty << FullRange);
1556 if (Inits.size() > 1) {
1557 Expr *FirstBad = Inits[1];
1558 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
1559 DiagID: diag::err_auto_expr_init_multiple_expressions)
1560 << Ty << FullRange);
1561 }
1562 if (getLangOpts().CPlusPlus23) {
1563 if (Ty->getAs<AutoType>())
1564 Diag(Loc: TyBeginLoc, DiagID: diag::warn_cxx20_compat_auto_expr) << FullRange;
1565 }
1566 Expr *Deduce = Inits[0];
1567 if (isa<InitListExpr>(Val: Deduce))
1568 return ExprError(
1569 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
1570 << ListInitialization << Ty << FullRange);
1571 QualType DeducedType;
1572 TemplateDeductionInfo Info(Deduce->getExprLoc());
1573 TemplateDeductionResult Result =
1574 DeduceAutoType(AutoTypeLoc: TInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
1575 if (Result != TemplateDeductionResult::Success &&
1576 Result != TemplateDeductionResult::AlreadyDiagnosed)
1577 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_deduction_failure)
1578 << Ty << Deduce->getType() << FullRange
1579 << Deduce->getSourceRange());
1580 if (DeducedType.isNull()) {
1581 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
1582 return ExprError();
1583 }
1584
1585 Ty = DeducedType;
1586 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1587 }
1588
1589 if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs))
1590 return CXXUnresolvedConstructExpr::Create(
1591 Context, T: Ty.getNonReferenceType(), TSI: TInfo, LParenLoc: LParenOrBraceLoc, Args: Exprs,
1592 RParenLoc: RParenOrBraceLoc, IsListInit: ListInitialization);
1593
1594 // C++ [expr.type.conv]p1:
1595 // If the expression list is a parenthesized single expression, the type
1596 // conversion expression is equivalent (in definedness, and if defined in
1597 // meaning) to the corresponding cast expression.
1598 if (Exprs.size() == 1 && !ListInitialization &&
1599 !isa<InitListExpr>(Val: Exprs[0])) {
1600 Expr *Arg = Exprs[0];
1601 return BuildCXXFunctionalCastExpr(TInfo, Type: Ty, LParenLoc: LParenOrBraceLoc, CastExpr: Arg,
1602 RParenLoc: RParenOrBraceLoc);
1603 }
1604
1605 // For an expression of the form T(), T shall not be an array type.
1606 QualType ElemTy = Ty;
1607 if (Ty->isArrayType()) {
1608 if (!ListInitialization)
1609 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_value_init_for_array_type)
1610 << FullRange);
1611 ElemTy = Context.getBaseElementType(QT: Ty);
1612 }
1613
1614 // Only construct objects with object types.
1615 // The standard doesn't explicitly forbid function types here, but that's an
1616 // obvious oversight, as there's no way to dynamically construct a function
1617 // in general.
1618 if (Ty->isFunctionType())
1619 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_init_for_function_type)
1620 << Ty << FullRange);
1621
1622 // C++17 [expr.type.conv]p2, per DR2351:
1623 // If the type is cv void and the initializer is () or {}, the expression is
1624 // a prvalue of the specified type that performs no initialization.
1625 if (Ty->isVoidType()) {
1626 if (Exprs.empty())
1627 return new (Context) CXXScalarValueInitExpr(
1628 Ty.getUnqualifiedType(), TInfo, Kind.getRange().getEnd());
1629 if (ListInitialization &&
1630 cast<InitListExpr>(Val: Exprs[0])->getNumInits() == 0) {
1631 return CXXFunctionalCastExpr::Create(
1632 Context, T: Ty.getUnqualifiedType(), VK: VK_PRValue, Written: TInfo, Kind: CK_ToVoid,
1633 Op: Exprs[0], /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1634 LPLoc: Exprs[0]->getBeginLoc(), RPLoc: Exprs[0]->getEndLoc());
1635 }
1636 } else if (RequireCompleteType(Loc: TyBeginLoc, T: ElemTy,
1637 DiagID: diag::err_invalid_incomplete_type_use,
1638 Args: FullRange))
1639 return ExprError();
1640
1641 // Otherwise, the expression is a prvalue of the specified type whose
1642 // result object is direct-initialized (11.6) with the initializer.
1643 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
1644 ExprResult Result = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
1645
1646 if (Result.isInvalid())
1647 return Result;
1648
1649 Expr *Inner = Result.get();
1650 if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Val: Inner))
1651 Inner = BTE->getSubExpr();
1652 if (auto *CE = dyn_cast<ConstantExpr>(Val: Inner);
1653 CE && CE->isImmediateInvocation())
1654 Inner = CE->getSubExpr();
1655 if (!isa<CXXTemporaryObjectExpr>(Val: Inner) &&
1656 !isa<CXXScalarValueInitExpr>(Val: Inner)) {
1657 // If we created a CXXTemporaryObjectExpr, that node also represents the
1658 // functional cast. Otherwise, create an explicit cast to represent
1659 // the syntactic form of a functional-style cast that was used here.
1660 //
1661 // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
1662 // would give a more consistent AST representation than using a
1663 // CXXTemporaryObjectExpr. It's also weird that the functional cast
1664 // is sometimes handled by initialization and sometimes not.
1665 QualType ResultType = Result.get()->getType();
1666 SourceRange Locs = ListInitialization
1667 ? SourceRange()
1668 : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
1669 Result = CXXFunctionalCastExpr::Create(
1670 Context, T: ResultType, VK: Expr::getValueKindForType(T: Ty), Written: TInfo, Kind: CK_NoOp,
1671 Op: Result.get(), /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1672 LPLoc: Locs.getBegin(), RPLoc: Locs.getEnd());
1673 }
1674
1675 return Result;
1676}
1677
1678bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
1679 // [CUDA] Ignore this function, if we can't call it.
1680 const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
1681 if (getLangOpts().CUDA) {
1682 auto CallPreference = CUDA().IdentifyPreference(Caller, Callee: Method);
1683 // If it's not callable at all, it's not the right function.
1684 if (CallPreference < SemaCUDA::CFP_WrongSide)
1685 return false;
1686 if (CallPreference == SemaCUDA::CFP_WrongSide) {
1687 // Maybe. We have to check if there are better alternatives.
1688 DeclContext::lookup_result R =
1689 Method->getDeclContext()->lookup(Name: Method->getDeclName());
1690 for (const auto *D : R) {
1691 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1692 if (CUDA().IdentifyPreference(Caller, Callee: FD) > SemaCUDA::CFP_WrongSide)
1693 return false;
1694 }
1695 }
1696 // We've found no better variants.
1697 }
1698 }
1699
1700 SmallVector<const FunctionDecl*, 4> PreventedBy;
1701 bool Result = Method->isUsualDeallocationFunction(PreventedBy);
1702
1703 if (Result || !getLangOpts().CUDA || PreventedBy.empty())
1704 return Result;
1705
1706 // In case of CUDA, return true if none of the 1-argument deallocator
1707 // functions are actually callable.
1708 return llvm::none_of(Range&: PreventedBy, P: [&](const FunctionDecl *FD) {
1709 assert(FD->getNumParams() == 1 &&
1710 "Only single-operand functions should be in PreventedBy");
1711 return CUDA().IdentifyPreference(Caller, Callee: FD) >= SemaCUDA::CFP_HostDevice;
1712 });
1713}
1714
1715/// Determine whether the given function is a non-placement
1716/// deallocation function.
1717static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
1718 if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Val: FD))
1719 return S.isUsualDeallocationFunction(Method);
1720
1721 if (!FD->getDeclName().isAnyOperatorDelete())
1722 return false;
1723
1724 if (FD->isTypeAwareOperatorNewOrDelete())
1725 return FunctionDecl::RequiredTypeAwareDeleteParameterCount ==
1726 FD->getNumParams();
1727
1728 unsigned UsualParams = 1;
1729 if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
1730 S.Context.hasSameUnqualifiedType(
1731 T1: FD->getParamDecl(i: UsualParams)->getType(),
1732 T2: S.Context.getSizeType()))
1733 ++UsualParams;
1734
1735 if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
1736 S.Context.hasSameUnqualifiedType(
1737 T1: FD->getParamDecl(i: UsualParams)->getType(),
1738 T2: S.Context.getTypeDeclType(Decl: S.getStdAlignValT())))
1739 ++UsualParams;
1740
1741 return UsualParams == FD->getNumParams();
1742}
1743
1744namespace {
1745 struct UsualDeallocFnInfo {
1746 UsualDeallocFnInfo()
1747 : Found(), FD(nullptr),
1748 IDP(AlignedAllocationMode::No, SizedDeallocationMode::No) {}
1749 UsualDeallocFnInfo(Sema &S, DeclAccessPair Found, QualType AllocType,
1750 SourceLocation Loc)
1751 : Found(Found), FD(dyn_cast<FunctionDecl>(Val: Found->getUnderlyingDecl())),
1752 Destroying(false),
1753 IDP({AllocType, TypeAwareAllocationMode::No,
1754 AlignedAllocationMode::No, SizedDeallocationMode::No}),
1755 CUDAPref(SemaCUDA::CFP_Native) {
1756 // A function template declaration is only a usual deallocation function
1757 // if it is a typed delete.
1758 if (!FD) {
1759 if (AllocType.isNull())
1760 return;
1761 auto *FTD = dyn_cast<FunctionTemplateDecl>(Val: Found->getUnderlyingDecl());
1762 if (!FTD)
1763 return;
1764 FunctionDecl *InstantiatedDecl =
1765 S.BuildTypeAwareUsualDelete(FnDecl: FTD, AllocType, Loc);
1766 if (!InstantiatedDecl)
1767 return;
1768 FD = InstantiatedDecl;
1769 }
1770 unsigned NumBaseParams = 1;
1771 if (FD->isTypeAwareOperatorNewOrDelete()) {
1772 // If this is a type aware operator delete we instantiate an appropriate
1773 // specialization of std::type_identity<>. If we do not know the
1774 // type being deallocated, or if the type-identity parameter of the
1775 // deallocation function does not match the constructed type_identity
1776 // specialization we reject the declaration.
1777 if (AllocType.isNull()) {
1778 FD = nullptr;
1779 return;
1780 }
1781 QualType TypeIdentityTag = FD->getParamDecl(i: 0)->getType();
1782 QualType ExpectedTypeIdentityTag =
1783 S.tryBuildStdTypeIdentity(Type: AllocType, Loc);
1784 if (ExpectedTypeIdentityTag.isNull()) {
1785 FD = nullptr;
1786 return;
1787 }
1788 if (!S.Context.hasSameType(T1: TypeIdentityTag, T2: ExpectedTypeIdentityTag)) {
1789 FD = nullptr;
1790 return;
1791 }
1792 IDP.PassTypeIdentity = TypeAwareAllocationMode::Yes;
1793 ++NumBaseParams;
1794 }
1795
1796 if (FD->isDestroyingOperatorDelete()) {
1797 Destroying = true;
1798 ++NumBaseParams;
1799 }
1800
1801 if (NumBaseParams < FD->getNumParams() &&
1802 S.Context.hasSameUnqualifiedType(
1803 T1: FD->getParamDecl(i: NumBaseParams)->getType(),
1804 T2: S.Context.getSizeType())) {
1805 ++NumBaseParams;
1806 IDP.PassSize = SizedDeallocationMode::Yes;
1807 }
1808
1809 if (NumBaseParams < FD->getNumParams() &&
1810 FD->getParamDecl(i: NumBaseParams)->getType()->isAlignValT()) {
1811 ++NumBaseParams;
1812 IDP.PassAlignment = AlignedAllocationMode::Yes;
1813 }
1814
1815 // In CUDA, determine how much we'd like / dislike to call this.
1816 if (S.getLangOpts().CUDA)
1817 CUDAPref = S.CUDA().IdentifyPreference(
1818 Caller: S.getCurFunctionDecl(/*AllowLambda=*/true), Callee: FD);
1819 }
1820
1821 explicit operator bool() const { return FD; }
1822
1823 int Compare(Sema &S, const UsualDeallocFnInfo &Other,
1824 ImplicitDeallocationParameters TargetIDP) const {
1825 assert(!TargetIDP.Type.isNull() ||
1826 !isTypeAwareAllocation(Other.IDP.PassTypeIdentity));
1827
1828 // C++ P0722:
1829 // A destroying operator delete is preferred over a non-destroying
1830 // operator delete.
1831 if (Destroying != Other.Destroying)
1832 return Destroying ? 1 : -1;
1833
1834 const ImplicitDeallocationParameters &OtherIDP = Other.IDP;
1835 // Selection for type awareness has priority over alignment and size
1836 if (IDP.PassTypeIdentity != OtherIDP.PassTypeIdentity)
1837 return IDP.PassTypeIdentity == TargetIDP.PassTypeIdentity ? 1 : -1;
1838
1839 // C++17 [expr.delete]p10:
1840 // If the type has new-extended alignment, a function with a parameter
1841 // of type std::align_val_t is preferred; otherwise a function without
1842 // such a parameter is preferred
1843 if (IDP.PassAlignment != OtherIDP.PassAlignment)
1844 return IDP.PassAlignment == TargetIDP.PassAlignment ? 1 : -1;
1845
1846 if (IDP.PassSize != OtherIDP.PassSize)
1847 return IDP.PassSize == TargetIDP.PassSize ? 1 : -1;
1848
1849 if (isTypeAwareAllocation(Mode: IDP.PassTypeIdentity)) {
1850 // Type aware allocation involves templates so we need to choose
1851 // the best type
1852 FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate();
1853 FunctionTemplateDecl *OtherPrimaryTemplate =
1854 Other.FD->getPrimaryTemplate();
1855 if ((!PrimaryTemplate) != (!OtherPrimaryTemplate))
1856 return OtherPrimaryTemplate ? 1 : -1;
1857
1858 if (PrimaryTemplate && OtherPrimaryTemplate) {
1859 const auto *DC = dyn_cast<CXXRecordDecl>(Val: Found->getDeclContext());
1860 const auto *OtherDC =
1861 dyn_cast<CXXRecordDecl>(Val: Other.Found->getDeclContext());
1862 unsigned ImplicitArgCount = Destroying + IDP.getNumImplicitArgs();
1863 if (FunctionTemplateDecl *Best = S.getMoreSpecializedTemplate(
1864 FT1: PrimaryTemplate, FT2: OtherPrimaryTemplate, Loc: SourceLocation(),
1865 TPOC: TPOC_Call, NumCallArguments1: ImplicitArgCount,
1866 RawObj1Ty: DC ? QualType(DC->getTypeForDecl(), 0) : QualType{},
1867 RawObj2Ty: OtherDC ? QualType(OtherDC->getTypeForDecl(), 0) : QualType{},
1868 Reversed: false)) {
1869 return Best == PrimaryTemplate ? 1 : -1;
1870 }
1871 }
1872 }
1873
1874 // Use CUDA call preference as a tiebreaker.
1875 if (CUDAPref > Other.CUDAPref)
1876 return 1;
1877 if (CUDAPref == Other.CUDAPref)
1878 return 0;
1879 return -1;
1880 }
1881
1882 DeclAccessPair Found;
1883 FunctionDecl *FD;
1884 bool Destroying;
1885 ImplicitDeallocationParameters IDP;
1886 SemaCUDA::CUDAFunctionPreference CUDAPref;
1887 };
1888}
1889
1890/// Determine whether a type has new-extended alignment. This may be called when
1891/// the type is incomplete (for a delete-expression with an incomplete pointee
1892/// type), in which case it will conservatively return false if the alignment is
1893/// not known.
1894static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
1895 return S.getLangOpts().AlignedAllocation &&
1896 S.getASTContext().getTypeAlignIfKnown(T: AllocType) >
1897 S.getASTContext().getTargetInfo().getNewAlign();
1898}
1899
1900static bool CheckDeleteOperator(Sema &S, SourceLocation StartLoc,
1901 SourceRange Range, bool Diagnose,
1902 CXXRecordDecl *NamingClass, DeclAccessPair Decl,
1903 FunctionDecl *Operator) {
1904 if (Operator->isTypeAwareOperatorNewOrDelete()) {
1905 QualType SelectedTypeIdentityParameter =
1906 Operator->getParamDecl(i: 0)->getType();
1907 if (S.RequireCompleteType(Loc: StartLoc, T: SelectedTypeIdentityParameter,
1908 DiagID: diag::err_incomplete_type))
1909 return true;
1910 }
1911
1912 // FIXME: DiagnoseUseOfDecl?
1913 if (Operator->isDeleted()) {
1914 if (Diagnose) {
1915 StringLiteral *Msg = Operator->getDeletedMessage();
1916 S.Diag(Loc: StartLoc, DiagID: diag::err_deleted_function_use)
1917 << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
1918 S.NoteDeletedFunction(FD: Operator);
1919 }
1920 return true;
1921 }
1922 Sema::AccessResult Accessible =
1923 S.CheckAllocationAccess(OperatorLoc: StartLoc, PlacementRange: Range, NamingClass, FoundDecl: Decl, Diagnose);
1924 return Accessible == Sema::AR_inaccessible;
1925}
1926
1927/// Select the correct "usual" deallocation function to use from a selection of
1928/// deallocation functions (either global or class-scope).
1929static UsualDeallocFnInfo resolveDeallocationOverload(
1930 Sema &S, LookupResult &R, const ImplicitDeallocationParameters &IDP,
1931 SourceLocation Loc,
1932 llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
1933
1934 UsualDeallocFnInfo Best;
1935 for (auto I = R.begin(), E = R.end(); I != E; ++I) {
1936 UsualDeallocFnInfo Info(S, I.getPair(), IDP.Type, Loc);
1937 if (!Info || !isNonPlacementDeallocationFunction(S, FD: Info.FD) ||
1938 Info.CUDAPref == SemaCUDA::CFP_Never)
1939 continue;
1940
1941 if (!isTypeAwareAllocation(Mode: IDP.PassTypeIdentity) &&
1942 isTypeAwareAllocation(Mode: Info.IDP.PassTypeIdentity))
1943 continue;
1944 if (!Best) {
1945 Best = Info;
1946 if (BestFns)
1947 BestFns->push_back(Elt: Info);
1948 continue;
1949 }
1950 int ComparisonResult = Best.Compare(S, Other: Info, TargetIDP: IDP);
1951 if (ComparisonResult > 0)
1952 continue;
1953
1954 // If more than one preferred function is found, all non-preferred
1955 // functions are eliminated from further consideration.
1956 if (BestFns && ComparisonResult < 0)
1957 BestFns->clear();
1958
1959 Best = Info;
1960 if (BestFns)
1961 BestFns->push_back(Elt: Info);
1962 }
1963
1964 return Best;
1965}
1966
1967/// Determine whether a given type is a class for which 'delete[]' would call
1968/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
1969/// we need to store the array size (even if the type is
1970/// trivially-destructible).
1971static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
1972 TypeAwareAllocationMode PassType,
1973 QualType allocType) {
1974 const RecordType *record =
1975 allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
1976 if (!record) return false;
1977
1978 // Try to find an operator delete[] in class scope.
1979
1980 DeclarationName deleteName =
1981 S.Context.DeclarationNames.getCXXOperatorName(Op: OO_Array_Delete);
1982 LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
1983 S.LookupQualifiedName(R&: ops, LookupCtx: record->getDecl());
1984
1985 // We're just doing this for information.
1986 ops.suppressDiagnostics();
1987
1988 // Very likely: there's no operator delete[].
1989 if (ops.empty()) return false;
1990
1991 // If it's ambiguous, it should be illegal to call operator delete[]
1992 // on this thing, so it doesn't matter if we allocate extra space or not.
1993 if (ops.isAmbiguous()) return false;
1994
1995 // C++17 [expr.delete]p10:
1996 // If the deallocation functions have class scope, the one without a
1997 // parameter of type std::size_t is selected.
1998 ImplicitDeallocationParameters IDP = {
1999 allocType, PassType,
2000 alignedAllocationModeFromBool(IsAligned: hasNewExtendedAlignment(S, AllocType: allocType)),
2001 SizedDeallocationMode::No};
2002 auto Best = resolveDeallocationOverload(S, R&: ops, IDP, Loc: loc);
2003 return Best && isSizedDeallocation(Mode: Best.IDP.PassSize);
2004}
2005
2006ExprResult
2007Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
2008 SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
2009 SourceLocation PlacementRParen, SourceRange TypeIdParens,
2010 Declarator &D, Expr *Initializer) {
2011 std::optional<Expr *> ArraySize;
2012 // If the specified type is an array, unwrap it and save the expression.
2013 if (D.getNumTypeObjects() > 0 &&
2014 D.getTypeObject(i: 0).Kind == DeclaratorChunk::Array) {
2015 DeclaratorChunk &Chunk = D.getTypeObject(i: 0);
2016 if (D.getDeclSpec().hasAutoTypeSpec())
2017 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_new_array_of_auto)
2018 << D.getSourceRange());
2019 if (Chunk.Arr.hasStatic)
2020 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_static_illegal_in_new)
2021 << D.getSourceRange());
2022 if (!Chunk.Arr.NumElts && !Initializer)
2023 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_array_new_needs_size)
2024 << D.getSourceRange());
2025
2026 ArraySize = Chunk.Arr.NumElts;
2027 D.DropFirstTypeObject();
2028 }
2029
2030 // Every dimension shall be of constant size.
2031 if (ArraySize) {
2032 for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
2033 if (D.getTypeObject(i: I).Kind != DeclaratorChunk::Array)
2034 break;
2035
2036 DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(i: I).Arr;
2037 if (Expr *NumElts = Array.NumElts) {
2038 if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
2039 // FIXME: GCC permits constant folding here. We should either do so consistently
2040 // or not do so at all, rather than changing behavior in C++14 onwards.
2041 if (getLangOpts().CPlusPlus14) {
2042 // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
2043 // shall be a converted constant expression (5.19) of type std::size_t
2044 // and shall evaluate to a strictly positive value.
2045 llvm::APSInt Value(Context.getIntWidth(T: Context.getSizeType()));
2046 Array.NumElts =
2047 CheckConvertedConstantExpression(From: NumElts, T: Context.getSizeType(),
2048 Value, CCE: CCEKind::ArrayBound)
2049 .get();
2050 } else {
2051 Array.NumElts = VerifyIntegerConstantExpression(
2052 E: NumElts, Result: nullptr, DiagID: diag::err_new_array_nonconst,
2053 CanFold: AllowFoldKind::Allow)
2054 .get();
2055 }
2056 if (!Array.NumElts)
2057 return ExprError();
2058 }
2059 }
2060 }
2061 }
2062
2063 TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
2064 QualType AllocType = TInfo->getType();
2065 if (D.isInvalidType())
2066 return ExprError();
2067
2068 SourceRange DirectInitRange;
2069 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer))
2070 DirectInitRange = List->getSourceRange();
2071
2072 return BuildCXXNew(Range: SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
2073 PlacementLParen, PlacementArgs, PlacementRParen,
2074 TypeIdParens, AllocType, AllocTypeInfo: TInfo, ArraySize, DirectInitRange,
2075 Initializer);
2076}
2077
2078static bool isLegalArrayNewInitializer(CXXNewInitializationStyle Style,
2079 Expr *Init, bool IsCPlusPlus20) {
2080 if (!Init)
2081 return true;
2082 if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Val: Init))
2083 return IsCPlusPlus20 || PLE->getNumExprs() == 0;
2084 if (isa<ImplicitValueInitExpr>(Val: Init))
2085 return true;
2086 else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Val: Init))
2087 return !CCE->isListInitialization() &&
2088 CCE->getConstructor()->isDefaultConstructor();
2089 else if (Style == CXXNewInitializationStyle::Braces) {
2090 assert(isa<InitListExpr>(Init) &&
2091 "Shouldn't create list CXXConstructExprs for arrays.");
2092 return true;
2093 }
2094 return false;
2095}
2096
2097bool
2098Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
2099 if (!getLangOpts().AlignedAllocationUnavailable)
2100 return false;
2101 if (FD.isDefined())
2102 return false;
2103 UnsignedOrNone AlignmentParam = std::nullopt;
2104 if (FD.isReplaceableGlobalAllocationFunction(AlignmentParam: &AlignmentParam) &&
2105 AlignmentParam)
2106 return true;
2107 return false;
2108}
2109
2110// Emit a diagnostic if an aligned allocation/deallocation function that is not
2111// implemented in the standard library is selected.
2112void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
2113 SourceLocation Loc) {
2114 if (isUnavailableAlignedAllocationFunction(FD)) {
2115 const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
2116 StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
2117 Platform: getASTContext().getTargetInfo().getPlatformName());
2118 VersionTuple OSVersion = alignedAllocMinVersion(OS: T.getOS());
2119
2120 bool IsDelete = FD.getDeclName().isAnyOperatorDelete();
2121 Diag(Loc, DiagID: diag::err_aligned_allocation_unavailable)
2122 << IsDelete << FD.getType().getAsString() << OSName
2123 << OSVersion.getAsString() << OSVersion.empty();
2124 Diag(Loc, DiagID: diag::note_silence_aligned_allocation_unavailable);
2125 }
2126}
2127
2128ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
2129 SourceLocation PlacementLParen,
2130 MultiExprArg PlacementArgs,
2131 SourceLocation PlacementRParen,
2132 SourceRange TypeIdParens, QualType AllocType,
2133 TypeSourceInfo *AllocTypeInfo,
2134 std::optional<Expr *> ArraySize,
2135 SourceRange DirectInitRange, Expr *Initializer) {
2136 SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
2137 SourceLocation StartLoc = Range.getBegin();
2138
2139 CXXNewInitializationStyle InitStyle;
2140 if (DirectInitRange.isValid()) {
2141 assert(Initializer && "Have parens but no initializer.");
2142 InitStyle = CXXNewInitializationStyle::Parens;
2143 } else if (isa_and_nonnull<InitListExpr>(Val: Initializer))
2144 InitStyle = CXXNewInitializationStyle::Braces;
2145 else {
2146 assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
2147 isa<CXXConstructExpr>(Initializer)) &&
2148 "Initializer expression that cannot have been implicitly created.");
2149 InitStyle = CXXNewInitializationStyle::None;
2150 }
2151
2152 MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
2153 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer)) {
2154 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2155 "paren init for non-call init");
2156 Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
2157 } else if (auto *List = dyn_cast_or_null<CXXParenListInitExpr>(Val: Initializer)) {
2158 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2159 "paren init for non-call init");
2160 Exprs = List->getInitExprs();
2161 }
2162
2163 // C++11 [expr.new]p15:
2164 // A new-expression that creates an object of type T initializes that
2165 // object as follows:
2166 InitializationKind Kind = [&] {
2167 switch (InitStyle) {
2168 // - If the new-initializer is omitted, the object is default-
2169 // initialized (8.5); if no initialization is performed,
2170 // the object has indeterminate value
2171 case CXXNewInitializationStyle::None:
2172 return InitializationKind::CreateDefault(InitLoc: TypeRange.getBegin());
2173 // - Otherwise, the new-initializer is interpreted according to the
2174 // initialization rules of 8.5 for direct-initialization.
2175 case CXXNewInitializationStyle::Parens:
2176 return InitializationKind::CreateDirect(InitLoc: TypeRange.getBegin(),
2177 LParenLoc: DirectInitRange.getBegin(),
2178 RParenLoc: DirectInitRange.getEnd());
2179 case CXXNewInitializationStyle::Braces:
2180 return InitializationKind::CreateDirectList(InitLoc: TypeRange.getBegin(),
2181 LBraceLoc: Initializer->getBeginLoc(),
2182 RBraceLoc: Initializer->getEndLoc());
2183 }
2184 llvm_unreachable("Unknown initialization kind");
2185 }();
2186
2187 // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
2188 auto *Deduced = AllocType->getContainedDeducedType();
2189 if (Deduced && !Deduced->isDeduced() &&
2190 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
2191 if (ArraySize)
2192 return ExprError(
2193 Diag(Loc: *ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
2194 DiagID: diag::err_deduced_class_template_compound_type)
2195 << /*array*/ 2
2196 << (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
2197
2198 InitializedEntity Entity
2199 = InitializedEntity::InitializeNew(NewLoc: StartLoc, Type: AllocType);
2200 AllocType = DeduceTemplateSpecializationFromInitializer(
2201 TInfo: AllocTypeInfo, Entity, Kind, Init: Exprs);
2202 if (AllocType.isNull())
2203 return ExprError();
2204 } else if (Deduced && !Deduced->isDeduced()) {
2205 MultiExprArg Inits = Exprs;
2206 bool Braced = (InitStyle == CXXNewInitializationStyle::Braces);
2207 if (Braced) {
2208 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
2209 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
2210 }
2211
2212 if (InitStyle == CXXNewInitializationStyle::None || Inits.empty())
2213 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_requires_ctor_arg)
2214 << AllocType << TypeRange);
2215 if (Inits.size() > 1) {
2216 Expr *FirstBad = Inits[1];
2217 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
2218 DiagID: diag::err_auto_new_ctor_multiple_expressions)
2219 << AllocType << TypeRange);
2220 }
2221 if (Braced && !getLangOpts().CPlusPlus17)
2222 Diag(Loc: Initializer->getBeginLoc(), DiagID: diag::ext_auto_new_list_init)
2223 << AllocType << TypeRange;
2224 Expr *Deduce = Inits[0];
2225 if (isa<InitListExpr>(Val: Deduce))
2226 return ExprError(
2227 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
2228 << Braced << AllocType << TypeRange);
2229 QualType DeducedType;
2230 TemplateDeductionInfo Info(Deduce->getExprLoc());
2231 TemplateDeductionResult Result =
2232 DeduceAutoType(AutoTypeLoc: AllocTypeInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
2233 if (Result != TemplateDeductionResult::Success &&
2234 Result != TemplateDeductionResult::AlreadyDiagnosed)
2235 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_deduction_failure)
2236 << AllocType << Deduce->getType() << TypeRange
2237 << Deduce->getSourceRange());
2238 if (DeducedType.isNull()) {
2239 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
2240 return ExprError();
2241 }
2242 AllocType = DeducedType;
2243 }
2244
2245 // Per C++0x [expr.new]p5, the type being constructed may be a
2246 // typedef of an array type.
2247 // Dependent case will be handled separately.
2248 if (!ArraySize && !AllocType->isDependentType()) {
2249 if (const ConstantArrayType *Array
2250 = Context.getAsConstantArrayType(T: AllocType)) {
2251 ArraySize = IntegerLiteral::Create(C: Context, V: Array->getSize(),
2252 type: Context.getSizeType(),
2253 l: TypeRange.getEnd());
2254 AllocType = Array->getElementType();
2255 }
2256 }
2257
2258 if (CheckAllocatedType(AllocType, Loc: TypeRange.getBegin(), R: TypeRange))
2259 return ExprError();
2260
2261 if (ArraySize && !checkArrayElementAlignment(EltTy: AllocType, Loc: TypeRange.getBegin()))
2262 return ExprError();
2263
2264 // In ARC, infer 'retaining' for the allocated
2265 if (getLangOpts().ObjCAutoRefCount &&
2266 AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2267 AllocType->isObjCLifetimeType()) {
2268 AllocType = Context.getLifetimeQualifiedType(type: AllocType,
2269 lifetime: AllocType->getObjCARCImplicitLifetime());
2270 }
2271
2272 QualType ResultType = Context.getPointerType(T: AllocType);
2273
2274 if (ArraySize && *ArraySize &&
2275 (*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
2276 ExprResult result = CheckPlaceholderExpr(E: *ArraySize);
2277 if (result.isInvalid()) return ExprError();
2278 ArraySize = result.get();
2279 }
2280 // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
2281 // integral or enumeration type with a non-negative value."
2282 // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
2283 // enumeration type, or a class type for which a single non-explicit
2284 // conversion function to integral or unscoped enumeration type exists.
2285 // C++1y [expr.new]p6: The expression [...] is implicitly converted to
2286 // std::size_t.
2287 std::optional<uint64_t> KnownArraySize;
2288 if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
2289 ExprResult ConvertedSize;
2290 if (getLangOpts().CPlusPlus14) {
2291 assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
2292
2293 ConvertedSize = PerformImplicitConversion(
2294 From: *ArraySize, ToType: Context.getSizeType(), Action: AssignmentAction::Converting);
2295
2296 if (!ConvertedSize.isInvalid() &&
2297 (*ArraySize)->getType()->getAs<RecordType>())
2298 // Diagnose the compatibility of this conversion.
2299 Diag(Loc: StartLoc, DiagID: diag::warn_cxx98_compat_array_size_conversion)
2300 << (*ArraySize)->getType() << 0 << "'size_t'";
2301 } else {
2302 class SizeConvertDiagnoser : public ICEConvertDiagnoser {
2303 protected:
2304 Expr *ArraySize;
2305
2306 public:
2307 SizeConvertDiagnoser(Expr *ArraySize)
2308 : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
2309 ArraySize(ArraySize) {}
2310
2311 SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
2312 QualType T) override {
2313 return S.Diag(Loc, DiagID: diag::err_array_size_not_integral)
2314 << S.getLangOpts().CPlusPlus11 << T;
2315 }
2316
2317 SemaDiagnosticBuilder diagnoseIncomplete(
2318 Sema &S, SourceLocation Loc, QualType T) override {
2319 return S.Diag(Loc, DiagID: diag::err_array_size_incomplete_type)
2320 << T << ArraySize->getSourceRange();
2321 }
2322
2323 SemaDiagnosticBuilder diagnoseExplicitConv(
2324 Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
2325 return S.Diag(Loc, DiagID: diag::err_array_size_explicit_conversion) << T << ConvTy;
2326 }
2327
2328 SemaDiagnosticBuilder noteExplicitConv(
2329 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2330 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2331 << ConvTy->isEnumeralType() << ConvTy;
2332 }
2333
2334 SemaDiagnosticBuilder diagnoseAmbiguous(
2335 Sema &S, SourceLocation Loc, QualType T) override {
2336 return S.Diag(Loc, DiagID: diag::err_array_size_ambiguous_conversion) << T;
2337 }
2338
2339 SemaDiagnosticBuilder noteAmbiguous(
2340 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2341 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2342 << ConvTy->isEnumeralType() << ConvTy;
2343 }
2344
2345 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
2346 QualType T,
2347 QualType ConvTy) override {
2348 return S.Diag(Loc,
2349 DiagID: S.getLangOpts().CPlusPlus11
2350 ? diag::warn_cxx98_compat_array_size_conversion
2351 : diag::ext_array_size_conversion)
2352 << T << ConvTy->isEnumeralType() << ConvTy;
2353 }
2354 } SizeDiagnoser(*ArraySize);
2355
2356 ConvertedSize = PerformContextualImplicitConversion(Loc: StartLoc, FromE: *ArraySize,
2357 Converter&: SizeDiagnoser);
2358 }
2359 if (ConvertedSize.isInvalid())
2360 return ExprError();
2361
2362 ArraySize = ConvertedSize.get();
2363 QualType SizeType = (*ArraySize)->getType();
2364
2365 if (!SizeType->isIntegralOrUnscopedEnumerationType())
2366 return ExprError();
2367
2368 // C++98 [expr.new]p7:
2369 // The expression in a direct-new-declarator shall have integral type
2370 // with a non-negative value.
2371 //
2372 // Let's see if this is a constant < 0. If so, we reject it out of hand,
2373 // per CWG1464. Otherwise, if it's not a constant, we must have an
2374 // unparenthesized array type.
2375
2376 // We've already performed any required implicit conversion to integer or
2377 // unscoped enumeration type.
2378 // FIXME: Per CWG1464, we are required to check the value prior to
2379 // converting to size_t. This will never find a negative array size in
2380 // C++14 onwards, because Value is always unsigned here!
2381 if (std::optional<llvm::APSInt> Value =
2382 (*ArraySize)->getIntegerConstantExpr(Ctx: Context)) {
2383 if (Value->isSigned() && Value->isNegative()) {
2384 return ExprError(Diag(Loc: (*ArraySize)->getBeginLoc(),
2385 DiagID: diag::err_typecheck_negative_array_size)
2386 << (*ArraySize)->getSourceRange());
2387 }
2388
2389 if (!AllocType->isDependentType()) {
2390 unsigned ActiveSizeBits =
2391 ConstantArrayType::getNumAddressingBits(Context, ElementType: AllocType, NumElements: *Value);
2392 if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
2393 return ExprError(
2394 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::err_array_too_large)
2395 << toString(I: *Value, Radix: 10) << (*ArraySize)->getSourceRange());
2396 }
2397
2398 KnownArraySize = Value->getZExtValue();
2399 } else if (TypeIdParens.isValid()) {
2400 // Can't have dynamic array size when the type-id is in parentheses.
2401 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::ext_new_paren_array_nonconst)
2402 << (*ArraySize)->getSourceRange()
2403 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getBegin())
2404 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getEnd());
2405
2406 TypeIdParens = SourceRange();
2407 }
2408
2409 // Note that we do *not* convert the argument in any way. It can
2410 // be signed, larger than size_t, whatever.
2411 }
2412
2413 FunctionDecl *OperatorNew = nullptr;
2414 FunctionDecl *OperatorDelete = nullptr;
2415 unsigned Alignment =
2416 AllocType->isDependentType() ? 0 : Context.getTypeAlign(T: AllocType);
2417 unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
2418 ImplicitAllocationParameters IAP = {
2419 AllocType, ShouldUseTypeAwareOperatorNewOrDelete(),
2420 alignedAllocationModeFromBool(IsAligned: getLangOpts().AlignedAllocation &&
2421 Alignment > NewAlignment)};
2422
2423 if (CheckArgsForPlaceholders(args: PlacementArgs))
2424 return ExprError();
2425
2426 AllocationFunctionScope Scope = UseGlobal ? AllocationFunctionScope::Global
2427 : AllocationFunctionScope::Both;
2428 SourceRange AllocationParameterRange = Range;
2429 if (PlacementLParen.isValid() && PlacementRParen.isValid())
2430 AllocationParameterRange = SourceRange(PlacementLParen, PlacementRParen);
2431 if (!AllocType->isDependentType() &&
2432 !Expr::hasAnyTypeDependentArguments(Exprs: PlacementArgs) &&
2433 FindAllocationFunctions(StartLoc, Range: AllocationParameterRange, NewScope: Scope, DeleteScope: Scope,
2434 AllocType, IsArray: ArraySize.has_value(), IAP,
2435 PlaceArgs: PlacementArgs, OperatorNew, OperatorDelete))
2436 return ExprError();
2437
2438 // If this is an array allocation, compute whether the usual array
2439 // deallocation function for the type has a size_t parameter.
2440 bool UsualArrayDeleteWantsSize = false;
2441 if (ArraySize && !AllocType->isDependentType())
2442 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
2443 S&: *this, loc: StartLoc, PassType: IAP.PassTypeIdentity, allocType: AllocType);
2444
2445 SmallVector<Expr *, 8> AllPlaceArgs;
2446 if (OperatorNew) {
2447 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2448 VariadicCallType CallType = Proto->isVariadic()
2449 ? VariadicCallType::Function
2450 : VariadicCallType::DoesNotApply;
2451
2452 // We've already converted the placement args, just fill in any default
2453 // arguments. Skip the first parameter because we don't have a corresponding
2454 // argument. Skip the second parameter too if we're passing in the
2455 // alignment; we've already filled it in.
2456 unsigned NumImplicitArgs = 1;
2457 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2458 assert(OperatorNew->isTypeAwareOperatorNewOrDelete());
2459 NumImplicitArgs++;
2460 }
2461 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2462 NumImplicitArgs++;
2463 if (GatherArgumentsForCall(CallLoc: AllocationParameterRange.getBegin(), FDecl: OperatorNew,
2464 Proto, FirstParam: NumImplicitArgs, Args: PlacementArgs,
2465 AllArgs&: AllPlaceArgs, CallType))
2466 return ExprError();
2467
2468 if (!AllPlaceArgs.empty())
2469 PlacementArgs = AllPlaceArgs;
2470
2471 // We would like to perform some checking on the given `operator new` call,
2472 // but the PlacementArgs does not contain the implicit arguments,
2473 // namely allocation size and maybe allocation alignment,
2474 // so we need to conjure them.
2475
2476 QualType SizeTy = Context.getSizeType();
2477 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
2478
2479 llvm::APInt SingleEltSize(
2480 SizeTyWidth, Context.getTypeSizeInChars(T: AllocType).getQuantity());
2481
2482 // How many bytes do we want to allocate here?
2483 std::optional<llvm::APInt> AllocationSize;
2484 if (!ArraySize && !AllocType->isDependentType()) {
2485 // For non-array operator new, we only want to allocate one element.
2486 AllocationSize = SingleEltSize;
2487 } else if (KnownArraySize && !AllocType->isDependentType()) {
2488 // For array operator new, only deal with static array size case.
2489 bool Overflow;
2490 AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
2491 .umul_ov(RHS: SingleEltSize, Overflow);
2492 (void)Overflow;
2493 assert(
2494 !Overflow &&
2495 "Expected that all the overflows would have been handled already.");
2496 }
2497
2498 IntegerLiteral AllocationSizeLiteral(
2499 Context, AllocationSize.value_or(u: llvm::APInt::getZero(numBits: SizeTyWidth)),
2500 SizeTy, StartLoc);
2501 // Otherwise, if we failed to constant-fold the allocation size, we'll
2502 // just give up and pass-in something opaque, that isn't a null pointer.
2503 OpaqueValueExpr OpaqueAllocationSize(StartLoc, SizeTy, VK_PRValue,
2504 OK_Ordinary, /*SourceExpr=*/nullptr);
2505
2506 // Let's synthesize the alignment argument in case we will need it.
2507 // Since we *really* want to allocate these on stack, this is slightly ugly
2508 // because there might not be a `std::align_val_t` type.
2509 EnumDecl *StdAlignValT = getStdAlignValT();
2510 QualType AlignValT =
2511 StdAlignValT ? Context.getTypeDeclType(Decl: StdAlignValT) : SizeTy;
2512 IntegerLiteral AlignmentLiteral(
2513 Context,
2514 llvm::APInt(Context.getTypeSize(T: SizeTy),
2515 Alignment / Context.getCharWidth()),
2516 SizeTy, StartLoc);
2517 ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
2518 CK_IntegralCast, &AlignmentLiteral,
2519 VK_PRValue, FPOptionsOverride());
2520
2521 // Adjust placement args by prepending conjured size and alignment exprs.
2522 llvm::SmallVector<Expr *, 8> CallArgs;
2523 CallArgs.reserve(N: NumImplicitArgs + PlacementArgs.size());
2524 CallArgs.emplace_back(Args: AllocationSize
2525 ? static_cast<Expr *>(&AllocationSizeLiteral)
2526 : &OpaqueAllocationSize);
2527 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2528 CallArgs.emplace_back(Args: &DesiredAlignment);
2529 llvm::append_range(C&: CallArgs, R&: PlacementArgs);
2530
2531 DiagnoseSentinelCalls(D: OperatorNew, Loc: PlacementLParen, Args: CallArgs);
2532
2533 checkCall(FDecl: OperatorNew, Proto, /*ThisArg=*/nullptr, Args: CallArgs,
2534 /*IsMemberFunction=*/false, Loc: StartLoc, Range, CallType);
2535
2536 // Warn if the type is over-aligned and is being allocated by (unaligned)
2537 // global operator new.
2538 if (PlacementArgs.empty() && !isAlignedAllocation(Mode: IAP.PassAlignment) &&
2539 (OperatorNew->isImplicit() ||
2540 (OperatorNew->getBeginLoc().isValid() &&
2541 getSourceManager().isInSystemHeader(Loc: OperatorNew->getBeginLoc())))) {
2542 if (Alignment > NewAlignment)
2543 Diag(Loc: StartLoc, DiagID: diag::warn_overaligned_type)
2544 << AllocType
2545 << unsigned(Alignment / Context.getCharWidth())
2546 << unsigned(NewAlignment / Context.getCharWidth());
2547 }
2548 }
2549
2550 // Array 'new' can't have any initializers except empty parentheses.
2551 // Initializer lists are also allowed, in C++11. Rely on the parser for the
2552 // dialect distinction.
2553 if (ArraySize && !isLegalArrayNewInitializer(Style: InitStyle, Init: Initializer,
2554 IsCPlusPlus20: getLangOpts().CPlusPlus20)) {
2555 SourceRange InitRange(Exprs.front()->getBeginLoc(),
2556 Exprs.back()->getEndLoc());
2557 Diag(Loc: StartLoc, DiagID: diag::err_new_array_init_args) << InitRange;
2558 return ExprError();
2559 }
2560
2561 // If we can perform the initialization, and we've not already done so,
2562 // do it now.
2563 if (!AllocType->isDependentType() &&
2564 !Expr::hasAnyTypeDependentArguments(Exprs)) {
2565 // The type we initialize is the complete type, including the array bound.
2566 QualType InitType;
2567 if (KnownArraySize)
2568 InitType = Context.getConstantArrayType(
2569 EltTy: AllocType,
2570 ArySize: llvm::APInt(Context.getTypeSize(T: Context.getSizeType()),
2571 *KnownArraySize),
2572 SizeExpr: *ArraySize, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2573 else if (ArraySize)
2574 InitType = Context.getIncompleteArrayType(EltTy: AllocType,
2575 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2576 else
2577 InitType = AllocType;
2578
2579 InitializedEntity Entity
2580 = InitializedEntity::InitializeNew(NewLoc: StartLoc, Type: InitType);
2581 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
2582 ExprResult FullInit = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
2583 if (FullInit.isInvalid())
2584 return ExprError();
2585
2586 // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
2587 // we don't want the initialized object to be destructed.
2588 // FIXME: We should not create these in the first place.
2589 if (CXXBindTemporaryExpr *Binder =
2590 dyn_cast_or_null<CXXBindTemporaryExpr>(Val: FullInit.get()))
2591 FullInit = Binder->getSubExpr();
2592
2593 Initializer = FullInit.get();
2594
2595 // FIXME: If we have a KnownArraySize, check that the array bound of the
2596 // initializer is no greater than that constant value.
2597
2598 if (ArraySize && !*ArraySize) {
2599 auto *CAT = Context.getAsConstantArrayType(T: Initializer->getType());
2600 if (CAT) {
2601 // FIXME: Track that the array size was inferred rather than explicitly
2602 // specified.
2603 ArraySize = IntegerLiteral::Create(
2604 C: Context, V: CAT->getSize(), type: Context.getSizeType(), l: TypeRange.getEnd());
2605 } else {
2606 Diag(Loc: TypeRange.getEnd(), DiagID: diag::err_new_array_size_unknown_from_init)
2607 << Initializer->getSourceRange();
2608 }
2609 }
2610 }
2611
2612 // Mark the new and delete operators as referenced.
2613 if (OperatorNew) {
2614 if (DiagnoseUseOfDecl(D: OperatorNew, Locs: StartLoc))
2615 return ExprError();
2616 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorNew);
2617 }
2618 if (OperatorDelete) {
2619 if (DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc))
2620 return ExprError();
2621 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
2622 }
2623
2624 return CXXNewExpr::Create(Ctx: Context, IsGlobalNew: UseGlobal, OperatorNew, OperatorDelete,
2625 IAP, UsualArrayDeleteWantsSize, PlacementArgs,
2626 TypeIdParens, ArraySize, InitializationStyle: InitStyle, Initializer,
2627 Ty: ResultType, AllocatedTypeInfo: AllocTypeInfo, Range, DirectInitRange);
2628}
2629
2630bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
2631 SourceRange R) {
2632 // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
2633 // abstract class type or array thereof.
2634 if (AllocType->isFunctionType())
2635 return Diag(Loc, DiagID: diag::err_bad_new_type)
2636 << AllocType << 0 << R;
2637 else if (AllocType->isReferenceType())
2638 return Diag(Loc, DiagID: diag::err_bad_new_type)
2639 << AllocType << 1 << R;
2640 else if (!AllocType->isDependentType() &&
2641 RequireCompleteSizedType(
2642 Loc, T: AllocType, DiagID: diag::err_new_incomplete_or_sizeless_type, Args: R))
2643 return true;
2644 else if (RequireNonAbstractType(Loc, T: AllocType,
2645 DiagID: diag::err_allocation_of_abstract_type))
2646 return true;
2647 else if (AllocType->isVariablyModifiedType())
2648 return Diag(Loc, DiagID: diag::err_variably_modified_new_type)
2649 << AllocType;
2650 else if (AllocType.getAddressSpace() != LangAS::Default &&
2651 !getLangOpts().OpenCLCPlusPlus)
2652 return Diag(Loc, DiagID: diag::err_address_space_qualified_new)
2653 << AllocType.getUnqualifiedType()
2654 << AllocType.getQualifiers().getAddressSpaceAttributePrintValue();
2655 else if (getLangOpts().ObjCAutoRefCount) {
2656 if (const ArrayType *AT = Context.getAsArrayType(T: AllocType)) {
2657 QualType BaseAllocType = Context.getBaseElementType(VAT: AT);
2658 if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2659 BaseAllocType->isObjCLifetimeType())
2660 return Diag(Loc, DiagID: diag::err_arc_new_array_without_ownership)
2661 << BaseAllocType;
2662 }
2663 }
2664
2665 return false;
2666}
2667
2668enum class ResolveMode { Typed, Untyped };
2669static bool resolveAllocationOverloadInterior(
2670 Sema &S, LookupResult &R, SourceRange Range, ResolveMode Mode,
2671 SmallVectorImpl<Expr *> &Args, AlignedAllocationMode &PassAlignment,
2672 FunctionDecl *&Operator, OverloadCandidateSet *AlignedCandidates,
2673 Expr *AlignArg, bool Diagnose) {
2674 unsigned NonTypeArgumentOffset = 0;
2675 if (Mode == ResolveMode::Typed) {
2676 ++NonTypeArgumentOffset;
2677 }
2678
2679 OverloadCandidateSet Candidates(R.getNameLoc(),
2680 OverloadCandidateSet::CSK_Normal);
2681 for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
2682 Alloc != AllocEnd; ++Alloc) {
2683 // Even member operator new/delete are implicitly treated as
2684 // static, so don't use AddMemberCandidate.
2685 NamedDecl *D = (*Alloc)->getUnderlyingDecl();
2686 bool IsTypeAware = D->getAsFunction()->isTypeAwareOperatorNewOrDelete();
2687 if (IsTypeAware == (Mode != ResolveMode::Typed))
2688 continue;
2689
2690 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
2691 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: Alloc.getPair(),
2692 /*ExplicitTemplateArgs=*/nullptr, Args,
2693 CandidateSet&: Candidates,
2694 /*SuppressUserConversions=*/false);
2695 continue;
2696 }
2697
2698 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
2699 S.AddOverloadCandidate(Function: Fn, FoundDecl: Alloc.getPair(), Args, CandidateSet&: Candidates,
2700 /*SuppressUserConversions=*/false);
2701 }
2702
2703 // Do the resolution.
2704 OverloadCandidateSet::iterator Best;
2705 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
2706 case OR_Success: {
2707 // Got one!
2708 FunctionDecl *FnDecl = Best->Function;
2709 if (S.CheckAllocationAccess(OperatorLoc: R.getNameLoc(), PlacementRange: Range, NamingClass: R.getNamingClass(),
2710 FoundDecl: Best->FoundDecl) == Sema::AR_inaccessible)
2711 return true;
2712
2713 Operator = FnDecl;
2714 return false;
2715 }
2716
2717 case OR_No_Viable_Function:
2718 // C++17 [expr.new]p13:
2719 // If no matching function is found and the allocated object type has
2720 // new-extended alignment, the alignment argument is removed from the
2721 // argument list, and overload resolution is performed again.
2722 if (isAlignedAllocation(Mode: PassAlignment)) {
2723 PassAlignment = AlignedAllocationMode::No;
2724 AlignArg = Args[NonTypeArgumentOffset + 1];
2725 Args.erase(CI: Args.begin() + NonTypeArgumentOffset + 1);
2726 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2727 PassAlignment, Operator,
2728 AlignedCandidates: &Candidates, AlignArg, Diagnose);
2729 }
2730
2731 // MSVC will fall back on trying to find a matching global operator new
2732 // if operator new[] cannot be found. Also, MSVC will leak by not
2733 // generating a call to operator delete or operator delete[], but we
2734 // will not replicate that bug.
2735 // FIXME: Find out how this interacts with the std::align_val_t fallback
2736 // once MSVC implements it.
2737 if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
2738 S.Context.getLangOpts().MSVCCompat && Mode != ResolveMode::Typed) {
2739 R.clear();
2740 R.setLookupName(S.Context.DeclarationNames.getCXXOperatorName(Op: OO_New));
2741 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
2742 // FIXME: This will give bad diagnostics pointing at the wrong functions.
2743 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2744 PassAlignment, Operator,
2745 /*Candidates=*/AlignedCandidates: nullptr,
2746 /*AlignArg=*/nullptr, Diagnose);
2747 }
2748 if (Mode == ResolveMode::Typed) {
2749 // If we can't find a matching type aware operator we don't consider this
2750 // a failure.
2751 Operator = nullptr;
2752 return false;
2753 }
2754 if (Diagnose) {
2755 // If this is an allocation of the form 'new (p) X' for some object
2756 // pointer p (or an expression that will decay to such a pointer),
2757 // diagnose the reason for the error.
2758 if (!R.isClassLookup() && Args.size() == 2 &&
2759 (Args[1]->getType()->isObjectPointerType() ||
2760 Args[1]->getType()->isArrayType())) {
2761 const QualType Arg1Type = Args[1]->getType();
2762 QualType UnderlyingType = S.Context.getBaseElementType(QT: Arg1Type);
2763 if (UnderlyingType->isPointerType())
2764 UnderlyingType = UnderlyingType->getPointeeType();
2765 if (UnderlyingType.isConstQualified()) {
2766 S.Diag(Loc: Args[1]->getExprLoc(),
2767 DiagID: diag::err_placement_new_into_const_qualified_storage)
2768 << Arg1Type << Args[1]->getSourceRange();
2769 return true;
2770 }
2771 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_need_header_before_placement_new)
2772 << R.getLookupName() << Range;
2773 // Listing the candidates is unlikely to be useful; skip it.
2774 return true;
2775 }
2776
2777 // Finish checking all candidates before we note any. This checking can
2778 // produce additional diagnostics so can't be interleaved with our
2779 // emission of notes.
2780 //
2781 // For an aligned allocation, separately check the aligned and unaligned
2782 // candidates with their respective argument lists.
2783 SmallVector<OverloadCandidate*, 32> Cands;
2784 SmallVector<OverloadCandidate*, 32> AlignedCands;
2785 llvm::SmallVector<Expr*, 4> AlignedArgs;
2786 if (AlignedCandidates) {
2787 auto IsAligned = [NonTypeArgumentOffset](OverloadCandidate &C) {
2788 auto AlignArgOffset = NonTypeArgumentOffset + 1;
2789 return C.Function->getNumParams() > AlignArgOffset &&
2790 C.Function->getParamDecl(i: AlignArgOffset)
2791 ->getType()
2792 ->isAlignValT();
2793 };
2794 auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
2795
2796 AlignedArgs.reserve(N: Args.size() + NonTypeArgumentOffset + 1);
2797 for (unsigned Idx = 0; Idx < NonTypeArgumentOffset + 1; ++Idx)
2798 AlignedArgs.push_back(Elt: Args[Idx]);
2799 AlignedArgs.push_back(Elt: AlignArg);
2800 AlignedArgs.append(in_start: Args.begin() + NonTypeArgumentOffset + 1,
2801 in_end: Args.end());
2802 AlignedCands = AlignedCandidates->CompleteCandidates(
2803 S, OCD: OCD_AllCandidates, Args: AlignedArgs, OpLoc: R.getNameLoc(), Filter: IsAligned);
2804
2805 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2806 OpLoc: R.getNameLoc(), Filter: IsUnaligned);
2807 } else {
2808 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2809 OpLoc: R.getNameLoc());
2810 }
2811
2812 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_ovl_no_viable_function_in_call)
2813 << R.getLookupName() << Range;
2814 if (AlignedCandidates)
2815 AlignedCandidates->NoteCandidates(S, Args: AlignedArgs, Cands: AlignedCands, Opc: "",
2816 OpLoc: R.getNameLoc());
2817 Candidates.NoteCandidates(S, Args, Cands, Opc: "", OpLoc: R.getNameLoc());
2818 }
2819 return true;
2820
2821 case OR_Ambiguous:
2822 if (Diagnose) {
2823 Candidates.NoteCandidates(
2824 PA: PartialDiagnosticAt(R.getNameLoc(),
2825 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
2826 << R.getLookupName() << Range),
2827 S, OCD: OCD_AmbiguousCandidates, Args);
2828 }
2829 return true;
2830
2831 case OR_Deleted: {
2832 if (Diagnose)
2833 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
2834 CandidateSet&: Candidates, Fn: Best->Function, Args);
2835 return true;
2836 }
2837 }
2838 llvm_unreachable("Unreachable, bad result from BestViableFunction");
2839}
2840
2841enum class DeallocLookupMode { Untyped, OptionallyTyped };
2842
2843static void LookupGlobalDeallocationFunctions(Sema &S, SourceLocation Loc,
2844 LookupResult &FoundDelete,
2845 DeallocLookupMode Mode,
2846 DeclarationName Name) {
2847 S.LookupQualifiedName(R&: FoundDelete, LookupCtx: S.Context.getTranslationUnitDecl());
2848 if (Mode != DeallocLookupMode::OptionallyTyped) {
2849 // We're going to remove either the typed or the non-typed
2850 bool RemoveTypedDecl = Mode == DeallocLookupMode::Untyped;
2851 LookupResult::Filter Filter = FoundDelete.makeFilter();
2852 while (Filter.hasNext()) {
2853 FunctionDecl *FD = Filter.next()->getUnderlyingDecl()->getAsFunction();
2854 if (FD->isTypeAwareOperatorNewOrDelete() == RemoveTypedDecl)
2855 Filter.erase();
2856 }
2857 Filter.done();
2858 }
2859}
2860
2861static bool resolveAllocationOverload(
2862 Sema &S, LookupResult &R, SourceRange Range, SmallVectorImpl<Expr *> &Args,
2863 ImplicitAllocationParameters &IAP, FunctionDecl *&Operator,
2864 OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
2865 Operator = nullptr;
2866 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2867 assert(S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2868 // The internal overload resolution work mutates the argument list
2869 // in accordance with the spec. We may want to change that in future,
2870 // but for now we deal with this by making a copy of the non-type-identity
2871 // arguments.
2872 SmallVector<Expr *> UntypedParameters;
2873 UntypedParameters.reserve(N: Args.size() - 1);
2874 UntypedParameters.push_back(Elt: Args[1]);
2875 // Type aware allocation implicitly includes the alignment parameter so
2876 // only include it in the untyped parameter list if alignment was explicitly
2877 // requested
2878 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2879 UntypedParameters.push_back(Elt: Args[2]);
2880 UntypedParameters.append(in_start: Args.begin() + 3, in_end: Args.end());
2881
2882 AlignedAllocationMode InitialAlignmentMode = IAP.PassAlignment;
2883 IAP.PassAlignment = AlignedAllocationMode::Yes;
2884 if (resolveAllocationOverloadInterior(
2885 S, R, Range, Mode: ResolveMode::Typed, Args, PassAlignment&: IAP.PassAlignment, Operator,
2886 AlignedCandidates, AlignArg, Diagnose))
2887 return true;
2888 if (Operator)
2889 return false;
2890
2891 // If we got to this point we could not find a matching typed operator
2892 // so we update the IAP flags, and revert to our stored copy of the
2893 // type-identity-less argument list.
2894 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2895 IAP.PassAlignment = InitialAlignmentMode;
2896 Args = std::move(UntypedParameters);
2897 }
2898 assert(!S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2899 return resolveAllocationOverloadInterior(
2900 S, R, Range, Mode: ResolveMode::Untyped, Args, PassAlignment&: IAP.PassAlignment, Operator,
2901 AlignedCandidates, AlignArg, Diagnose);
2902}
2903
2904bool Sema::FindAllocationFunctions(
2905 SourceLocation StartLoc, SourceRange Range,
2906 AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope,
2907 QualType AllocType, bool IsArray, ImplicitAllocationParameters &IAP,
2908 MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew,
2909 FunctionDecl *&OperatorDelete, bool Diagnose) {
2910 // --- Choosing an allocation function ---
2911 // C++ 5.3.4p8 - 14 & 18
2912 // 1) If looking in AllocationFunctionScope::Global scope for allocation
2913 // functions, only look in
2914 // the global scope. Else, if AllocationFunctionScope::Class, only look in
2915 // the scope of the allocated class. If AllocationFunctionScope::Both, look
2916 // in both.
2917 // 2) If an array size is given, look for operator new[], else look for
2918 // operator new.
2919 // 3) The first argument is always size_t. Append the arguments from the
2920 // placement form.
2921
2922 SmallVector<Expr*, 8> AllocArgs;
2923 AllocArgs.reserve(N: IAP.getNumImplicitArgs() + PlaceArgs.size());
2924
2925 // C++ [expr.new]p8:
2926 // If the allocated type is a non-array type, the allocation
2927 // function's name is operator new and the deallocation function's
2928 // name is operator delete. If the allocated type is an array
2929 // type, the allocation function's name is operator new[] and the
2930 // deallocation function's name is operator delete[].
2931 DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
2932 Op: IsArray ? OO_Array_New : OO_New);
2933
2934 QualType AllocElemType = Context.getBaseElementType(QT: AllocType);
2935
2936 // We don't care about the actual value of these arguments.
2937 // FIXME: Should the Sema create the expression and embed it in the syntax
2938 // tree? Or should the consumer just recalculate the value?
2939 // FIXME: Using a dummy value will interact poorly with attribute enable_if.
2940
2941 // We use size_t as a stand in so that we can construct the init
2942 // expr on the stack
2943 QualType TypeIdentity = Context.getSizeType();
2944 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2945 QualType SpecializedTypeIdentity =
2946 tryBuildStdTypeIdentity(Type: IAP.Type, Loc: StartLoc);
2947 if (!SpecializedTypeIdentity.isNull()) {
2948 TypeIdentity = SpecializedTypeIdentity;
2949 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
2950 DiagID: diag::err_incomplete_type))
2951 return true;
2952 } else
2953 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2954 }
2955 TypeAwareAllocationMode OriginalTypeAwareState = IAP.PassTypeIdentity;
2956
2957 CXXScalarValueInitExpr TypeIdentityParam(TypeIdentity, nullptr, StartLoc);
2958 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
2959 AllocArgs.push_back(Elt: &TypeIdentityParam);
2960
2961 QualType SizeTy = Context.getSizeType();
2962 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
2963 IntegerLiteral Size(Context, llvm::APInt::getZero(numBits: SizeTyWidth), SizeTy,
2964 SourceLocation());
2965 AllocArgs.push_back(Elt: &Size);
2966
2967 QualType AlignValT = Context.VoidTy;
2968 bool IncludeAlignParam = isAlignedAllocation(Mode: IAP.PassAlignment) ||
2969 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity);
2970 if (IncludeAlignParam) {
2971 DeclareGlobalNewDelete();
2972 AlignValT = Context.getTypeDeclType(Decl: getStdAlignValT());
2973 }
2974 CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
2975 if (IncludeAlignParam)
2976 AllocArgs.push_back(Elt: &Align);
2977
2978 llvm::append_range(C&: AllocArgs, R&: PlaceArgs);
2979
2980 // Find the allocation function.
2981 {
2982 LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
2983
2984 // C++1z [expr.new]p9:
2985 // If the new-expression begins with a unary :: operator, the allocation
2986 // function's name is looked up in the global scope. Otherwise, if the
2987 // allocated type is a class type T or array thereof, the allocation
2988 // function's name is looked up in the scope of T.
2989 if (AllocElemType->isRecordType() &&
2990 NewScope != AllocationFunctionScope::Global)
2991 LookupQualifiedName(R, LookupCtx: AllocElemType->getAsCXXRecordDecl());
2992
2993 // We can see ambiguity here if the allocation function is found in
2994 // multiple base classes.
2995 if (R.isAmbiguous())
2996 return true;
2997
2998 // If this lookup fails to find the name, or if the allocated type is not
2999 // a class type, the allocation function's name is looked up in the
3000 // global scope.
3001 if (R.empty()) {
3002 if (NewScope == AllocationFunctionScope::Class)
3003 return true;
3004
3005 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
3006 }
3007
3008 if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
3009 if (PlaceArgs.empty()) {
3010 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default new";
3011 } else {
3012 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_placement_new);
3013 }
3014 return true;
3015 }
3016
3017 assert(!R.empty() && "implicitly declared allocation functions not found");
3018 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
3019
3020 // We do our own custom access checks below.
3021 R.suppressDiagnostics();
3022
3023 if (resolveAllocationOverload(S&: *this, R, Range, Args&: AllocArgs, IAP, Operator&: OperatorNew,
3024 /*Candidates=*/AlignedCandidates: nullptr,
3025 /*AlignArg=*/nullptr, Diagnose))
3026 return true;
3027 }
3028
3029 // We don't need an operator delete if we're running under -fno-exceptions.
3030 if (!getLangOpts().Exceptions) {
3031 OperatorDelete = nullptr;
3032 return false;
3033 }
3034
3035 // Note, the name of OperatorNew might have been changed from array to
3036 // non-array by resolveAllocationOverload.
3037 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
3038 Op: OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
3039 ? OO_Array_Delete
3040 : OO_Delete);
3041
3042 // C++ [expr.new]p19:
3043 //
3044 // If the new-expression begins with a unary :: operator, the
3045 // deallocation function's name is looked up in the global
3046 // scope. Otherwise, if the allocated type is a class type T or an
3047 // array thereof, the deallocation function's name is looked up in
3048 // the scope of T. If this lookup fails to find the name, or if
3049 // the allocated type is not a class type or array thereof, the
3050 // deallocation function's name is looked up in the global scope.
3051 LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
3052 if (AllocElemType->isRecordType() &&
3053 DeleteScope != AllocationFunctionScope::Global) {
3054 auto *RD =
3055 cast<CXXRecordDecl>(Val: AllocElemType->castAs<RecordType>()->getDecl());
3056 LookupQualifiedName(R&: FoundDelete, LookupCtx: RD);
3057 }
3058 if (FoundDelete.isAmbiguous())
3059 return true; // FIXME: clean up expressions?
3060
3061 // Filter out any destroying operator deletes. We can't possibly call such a
3062 // function in this context, because we're handling the case where the object
3063 // was not successfully constructed.
3064 // FIXME: This is not covered by the language rules yet.
3065 {
3066 LookupResult::Filter Filter = FoundDelete.makeFilter();
3067 while (Filter.hasNext()) {
3068 auto *FD = dyn_cast<FunctionDecl>(Val: Filter.next()->getUnderlyingDecl());
3069 if (FD && FD->isDestroyingOperatorDelete())
3070 Filter.erase();
3071 }
3072 Filter.done();
3073 }
3074
3075 auto GetRedeclContext = [](Decl *D) {
3076 return D->getDeclContext()->getRedeclContext();
3077 };
3078
3079 DeclContext *OperatorNewContext = GetRedeclContext(OperatorNew);
3080
3081 bool FoundGlobalDelete = FoundDelete.empty();
3082 bool IsClassScopedTypeAwareNew =
3083 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity) &&
3084 OperatorNewContext->isRecord();
3085 auto DiagnoseMissingTypeAwareCleanupOperator = [&](bool IsPlacementOperator) {
3086 assert(isTypeAwareAllocation(IAP.PassTypeIdentity));
3087 if (Diagnose) {
3088 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3089 << OperatorNew->getDeclName() << IsPlacementOperator << DeleteName;
3090 Diag(Loc: OperatorNew->getLocation(), DiagID: diag::note_type_aware_operator_declared)
3091 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3092 << OperatorNew->getDeclName() << OperatorNewContext;
3093 }
3094 };
3095 if (IsClassScopedTypeAwareNew && FoundDelete.empty()) {
3096 DiagnoseMissingTypeAwareCleanupOperator(/*isPlacementNew=*/false);
3097 return true;
3098 }
3099 if (FoundDelete.empty()) {
3100 FoundDelete.clear(Kind: LookupOrdinaryName);
3101
3102 if (DeleteScope == AllocationFunctionScope::Class)
3103 return true;
3104
3105 DeclareGlobalNewDelete();
3106 DeallocLookupMode LookupMode = isTypeAwareAllocation(Mode: OriginalTypeAwareState)
3107 ? DeallocLookupMode::OptionallyTyped
3108 : DeallocLookupMode::Untyped;
3109 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete, Mode: LookupMode,
3110 Name: DeleteName);
3111 }
3112
3113 FoundDelete.suppressDiagnostics();
3114
3115 SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
3116
3117 // Whether we're looking for a placement operator delete is dictated
3118 // by whether we selected a placement operator new, not by whether
3119 // we had explicit placement arguments. This matters for things like
3120 // struct A { void *operator new(size_t, int = 0); ... };
3121 // A *a = new A()
3122 //
3123 // We don't have any definition for what a "placement allocation function"
3124 // is, but we assume it's any allocation function whose
3125 // parameter-declaration-clause is anything other than (size_t).
3126 //
3127 // FIXME: Should (size_t, std::align_val_t) also be considered non-placement?
3128 // This affects whether an exception from the constructor of an overaligned
3129 // type uses the sized or non-sized form of aligned operator delete.
3130
3131 unsigned NonPlacementNewArgCount = 1; // size parameter
3132 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
3133 NonPlacementNewArgCount =
3134 /* type-identity */ 1 + /* size */ 1 + /* alignment */ 1;
3135 bool isPlacementNew = !PlaceArgs.empty() ||
3136 OperatorNew->param_size() != NonPlacementNewArgCount ||
3137 OperatorNew->isVariadic();
3138
3139 if (isPlacementNew) {
3140 // C++ [expr.new]p20:
3141 // A declaration of a placement deallocation function matches the
3142 // declaration of a placement allocation function if it has the
3143 // same number of parameters and, after parameter transformations
3144 // (8.3.5), all parameter types except the first are
3145 // identical. [...]
3146 //
3147 // To perform this comparison, we compute the function type that
3148 // the deallocation function should have, and use that type both
3149 // for template argument deduction and for comparison purposes.
3150 QualType ExpectedFunctionType;
3151 {
3152 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
3153
3154 SmallVector<QualType, 6> ArgTypes;
3155 int InitialParamOffset = 0;
3156 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3157 ArgTypes.push_back(Elt: TypeIdentity);
3158 InitialParamOffset = 1;
3159 }
3160 ArgTypes.push_back(Elt: Context.VoidPtrTy);
3161 for (unsigned I = ArgTypes.size() - InitialParamOffset,
3162 N = Proto->getNumParams();
3163 I < N; ++I)
3164 ArgTypes.push_back(Elt: Proto->getParamType(i: I));
3165
3166 FunctionProtoType::ExtProtoInfo EPI;
3167 // FIXME: This is not part of the standard's rule.
3168 EPI.Variadic = Proto->isVariadic();
3169
3170 ExpectedFunctionType
3171 = Context.getFunctionType(ResultTy: Context.VoidTy, Args: ArgTypes, EPI);
3172 }
3173
3174 for (LookupResult::iterator D = FoundDelete.begin(),
3175 DEnd = FoundDelete.end();
3176 D != DEnd; ++D) {
3177 FunctionDecl *Fn = nullptr;
3178 if (FunctionTemplateDecl *FnTmpl =
3179 dyn_cast<FunctionTemplateDecl>(Val: (*D)->getUnderlyingDecl())) {
3180 // Perform template argument deduction to try to match the
3181 // expected function type.
3182 TemplateDeductionInfo Info(StartLoc);
3183 if (DeduceTemplateArguments(FunctionTemplate: FnTmpl, ExplicitTemplateArgs: nullptr, ArgFunctionType: ExpectedFunctionType, Specialization&: Fn,
3184 Info) != TemplateDeductionResult::Success)
3185 continue;
3186 } else
3187 Fn = cast<FunctionDecl>(Val: (*D)->getUnderlyingDecl());
3188
3189 if (Context.hasSameType(T1: adjustCCAndNoReturn(ArgFunctionType: Fn->getType(),
3190 FunctionType: ExpectedFunctionType,
3191 /*AdjustExcpetionSpec*/AdjustExceptionSpec: true),
3192 T2: ExpectedFunctionType))
3193 Matches.push_back(Elt: std::make_pair(x: D.getPair(), y&: Fn));
3194 }
3195
3196 if (getLangOpts().CUDA)
3197 CUDA().EraseUnwantedMatches(Caller: getCurFunctionDecl(/*AllowLambda=*/true),
3198 Matches);
3199 if (Matches.empty() && isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3200 DiagnoseMissingTypeAwareCleanupOperator(isPlacementNew);
3201 return true;
3202 }
3203 } else {
3204 // C++1y [expr.new]p22:
3205 // For a non-placement allocation function, the normal deallocation
3206 // function lookup is used
3207 //
3208 // Per [expr.delete]p10, this lookup prefers a member operator delete
3209 // without a size_t argument, but prefers a non-member operator delete
3210 // with a size_t where possible (which it always is in this case).
3211 llvm::SmallVector<UsualDeallocFnInfo, 4> BestDeallocFns;
3212 ImplicitDeallocationParameters IDP = {
3213 AllocElemType, OriginalTypeAwareState,
3214 alignedAllocationModeFromBool(
3215 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: AllocElemType)),
3216 sizedDeallocationModeFromBool(IsSized: FoundGlobalDelete)};
3217 UsualDeallocFnInfo Selected = resolveDeallocationOverload(
3218 S&: *this, R&: FoundDelete, IDP, Loc: StartLoc, BestFns: &BestDeallocFns);
3219 if (Selected && BestDeallocFns.empty())
3220 Matches.push_back(Elt: std::make_pair(x&: Selected.Found, y&: Selected.FD));
3221 else {
3222 // If we failed to select an operator, all remaining functions are viable
3223 // but ambiguous.
3224 for (auto Fn : BestDeallocFns)
3225 Matches.push_back(Elt: std::make_pair(x&: Fn.Found, y&: Fn.FD));
3226 }
3227 }
3228
3229 // C++ [expr.new]p20:
3230 // [...] If the lookup finds a single matching deallocation
3231 // function, that function will be called; otherwise, no
3232 // deallocation function will be called.
3233 if (Matches.size() == 1) {
3234 OperatorDelete = Matches[0].second;
3235 DeclContext *OperatorDeleteContext = GetRedeclContext(OperatorDelete);
3236 bool FoundTypeAwareOperator =
3237 OperatorDelete->isTypeAwareOperatorNewOrDelete() ||
3238 OperatorNew->isTypeAwareOperatorNewOrDelete();
3239 if (Diagnose && FoundTypeAwareOperator) {
3240 bool MismatchedTypeAwareness =
3241 OperatorDelete->isTypeAwareOperatorNewOrDelete() !=
3242 OperatorNew->isTypeAwareOperatorNewOrDelete();
3243 bool MismatchedContext = OperatorDeleteContext != OperatorNewContext;
3244 if (MismatchedTypeAwareness || MismatchedContext) {
3245 FunctionDecl *Operators[] = {OperatorDelete, OperatorNew};
3246 bool TypeAwareOperatorIndex =
3247 OperatorNew->isTypeAwareOperatorNewOrDelete();
3248 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3249 << Operators[TypeAwareOperatorIndex]->getDeclName()
3250 << isPlacementNew
3251 << Operators[!TypeAwareOperatorIndex]->getDeclName()
3252 << GetRedeclContext(Operators[TypeAwareOperatorIndex]);
3253 Diag(Loc: OperatorNew->getLocation(),
3254 DiagID: diag::note_type_aware_operator_declared)
3255 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3256 << OperatorNew->getDeclName() << OperatorNewContext;
3257 Diag(Loc: OperatorDelete->getLocation(),
3258 DiagID: diag::note_type_aware_operator_declared)
3259 << OperatorDelete->isTypeAwareOperatorNewOrDelete()
3260 << OperatorDelete->getDeclName() << OperatorDeleteContext;
3261 }
3262 }
3263
3264 // C++1z [expr.new]p23:
3265 // If the lookup finds a usual deallocation function (3.7.4.2)
3266 // with a parameter of type std::size_t and that function, considered
3267 // as a placement deallocation function, would have been
3268 // selected as a match for the allocation function, the program
3269 // is ill-formed.
3270 if (getLangOpts().CPlusPlus11 && isPlacementNew &&
3271 isNonPlacementDeallocationFunction(S&: *this, FD: OperatorDelete)) {
3272 UsualDeallocFnInfo Info(*this,
3273 DeclAccessPair::make(D: OperatorDelete, AS: AS_public),
3274 AllocElemType, StartLoc);
3275 // Core issue, per mail to core reflector, 2016-10-09:
3276 // If this is a member operator delete, and there is a corresponding
3277 // non-sized member operator delete, this isn't /really/ a sized
3278 // deallocation function, it just happens to have a size_t parameter.
3279 bool IsSizedDelete = isSizedDeallocation(Mode: Info.IDP.PassSize);
3280 if (IsSizedDelete && !FoundGlobalDelete) {
3281 ImplicitDeallocationParameters SizeTestingIDP = {
3282 AllocElemType, Info.IDP.PassTypeIdentity, Info.IDP.PassAlignment,
3283 SizedDeallocationMode::No};
3284 auto NonSizedDelete = resolveDeallocationOverload(
3285 S&: *this, R&: FoundDelete, IDP: SizeTestingIDP, Loc: StartLoc);
3286 if (NonSizedDelete &&
3287 !isSizedDeallocation(Mode: NonSizedDelete.IDP.PassSize) &&
3288 NonSizedDelete.IDP.PassAlignment == Info.IDP.PassAlignment)
3289 IsSizedDelete = false;
3290 }
3291
3292 if (IsSizedDelete && !isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3293 SourceRange R = PlaceArgs.empty()
3294 ? SourceRange()
3295 : SourceRange(PlaceArgs.front()->getBeginLoc(),
3296 PlaceArgs.back()->getEndLoc());
3297 Diag(Loc: StartLoc, DiagID: diag::err_placement_new_non_placement_delete) << R;
3298 if (!OperatorDelete->isImplicit())
3299 Diag(Loc: OperatorDelete->getLocation(), DiagID: diag::note_previous_decl)
3300 << DeleteName;
3301 }
3302 }
3303 if (CheckDeleteOperator(S&: *this, StartLoc, Range, Diagnose,
3304 NamingClass: FoundDelete.getNamingClass(), Decl: Matches[0].first,
3305 Operator: Matches[0].second))
3306 return true;
3307
3308 } else if (!Matches.empty()) {
3309 // We found multiple suitable operators. Per [expr.new]p20, that means we
3310 // call no 'operator delete' function, but we should at least warn the user.
3311 // FIXME: Suppress this warning if the construction cannot throw.
3312 Diag(Loc: StartLoc, DiagID: diag::warn_ambiguous_suitable_delete_function_found)
3313 << DeleteName << AllocElemType;
3314
3315 for (auto &Match : Matches)
3316 Diag(Loc: Match.second->getLocation(),
3317 DiagID: diag::note_member_declared_here) << DeleteName;
3318 }
3319
3320 return false;
3321}
3322
3323void Sema::DeclareGlobalNewDelete() {
3324 if (GlobalNewDeleteDeclared)
3325 return;
3326
3327 // The implicitly declared new and delete operators
3328 // are not supported in OpenCL.
3329 if (getLangOpts().OpenCLCPlusPlus)
3330 return;
3331
3332 // C++ [basic.stc.dynamic.general]p2:
3333 // The library provides default definitions for the global allocation
3334 // and deallocation functions. Some global allocation and deallocation
3335 // functions are replaceable ([new.delete]); these are attached to the
3336 // global module ([module.unit]).
3337 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3338 PushGlobalModuleFragment(BeginLoc: SourceLocation());
3339
3340 // C++ [basic.std.dynamic]p2:
3341 // [...] The following allocation and deallocation functions (18.4) are
3342 // implicitly declared in global scope in each translation unit of a
3343 // program
3344 //
3345 // C++03:
3346 // void* operator new(std::size_t) throw(std::bad_alloc);
3347 // void* operator new[](std::size_t) throw(std::bad_alloc);
3348 // void operator delete(void*) throw();
3349 // void operator delete[](void*) throw();
3350 // C++11:
3351 // void* operator new(std::size_t);
3352 // void* operator new[](std::size_t);
3353 // void operator delete(void*) noexcept;
3354 // void operator delete[](void*) noexcept;
3355 // C++1y:
3356 // void* operator new(std::size_t);
3357 // void* operator new[](std::size_t);
3358 // void operator delete(void*) noexcept;
3359 // void operator delete[](void*) noexcept;
3360 // void operator delete(void*, std::size_t) noexcept;
3361 // void operator delete[](void*, std::size_t) noexcept;
3362 //
3363 // These implicit declarations introduce only the function names operator
3364 // new, operator new[], operator delete, operator delete[].
3365 //
3366 // Here, we need to refer to std::bad_alloc, so we will implicitly declare
3367 // "std" or "bad_alloc" as necessary to form the exception specification.
3368 // However, we do not make these implicit declarations visible to name
3369 // lookup.
3370 if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
3371 // The "std::bad_alloc" class has not yet been declared, so build it
3372 // implicitly.
3373 StdBadAlloc = CXXRecordDecl::Create(
3374 C: Context, TK: TagTypeKind::Class, DC: getOrCreateStdNamespace(),
3375 StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3376 Id: &PP.getIdentifierTable().get(Name: "bad_alloc"), PrevDecl: nullptr);
3377 getStdBadAlloc()->setImplicit(true);
3378
3379 // The implicitly declared "std::bad_alloc" should live in global module
3380 // fragment.
3381 if (TheGlobalModuleFragment) {
3382 getStdBadAlloc()->setModuleOwnershipKind(
3383 Decl::ModuleOwnershipKind::ReachableWhenImported);
3384 getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
3385 }
3386 }
3387 if (!StdAlignValT && getLangOpts().AlignedAllocation) {
3388 // The "std::align_val_t" enum class has not yet been declared, so build it
3389 // implicitly.
3390 auto *AlignValT = EnumDecl::Create(
3391 C&: Context, DC: getOrCreateStdNamespace(), StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3392 Id: &PP.getIdentifierTable().get(Name: "align_val_t"), PrevDecl: nullptr, IsScoped: true, IsScopedUsingClassTag: true, IsFixed: true);
3393
3394 // The implicitly declared "std::align_val_t" should live in global module
3395 // fragment.
3396 if (TheGlobalModuleFragment) {
3397 AlignValT->setModuleOwnershipKind(
3398 Decl::ModuleOwnershipKind::ReachableWhenImported);
3399 AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
3400 }
3401
3402 AlignValT->setIntegerType(Context.getSizeType());
3403 AlignValT->setPromotionType(Context.getSizeType());
3404 AlignValT->setImplicit(true);
3405
3406 StdAlignValT = AlignValT;
3407 }
3408
3409 GlobalNewDeleteDeclared = true;
3410
3411 QualType VoidPtr = Context.getPointerType(T: Context.VoidTy);
3412 QualType SizeT = Context.getSizeType();
3413
3414 auto DeclareGlobalAllocationFunctions = [&](OverloadedOperatorKind Kind,
3415 QualType Return, QualType Param) {
3416 llvm::SmallVector<QualType, 3> Params;
3417 Params.push_back(Elt: Param);
3418
3419 // Create up to four variants of the function (sized/aligned).
3420 bool HasSizedVariant = getLangOpts().SizedDeallocation &&
3421 (Kind == OO_Delete || Kind == OO_Array_Delete);
3422 bool HasAlignedVariant = getLangOpts().AlignedAllocation;
3423
3424 int NumSizeVariants = (HasSizedVariant ? 2 : 1);
3425 int NumAlignVariants = (HasAlignedVariant ? 2 : 1);
3426 for (int Sized = 0; Sized < NumSizeVariants; ++Sized) {
3427 if (Sized)
3428 Params.push_back(Elt: SizeT);
3429
3430 for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
3431 if (Aligned)
3432 Params.push_back(Elt: Context.getTypeDeclType(Decl: getStdAlignValT()));
3433
3434 DeclareGlobalAllocationFunction(
3435 Name: Context.DeclarationNames.getCXXOperatorName(Op: Kind), Return, Params);
3436
3437 if (Aligned)
3438 Params.pop_back();
3439 }
3440 }
3441 };
3442
3443 DeclareGlobalAllocationFunctions(OO_New, VoidPtr, SizeT);
3444 DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
3445 DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
3446 DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
3447
3448 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3449 PopGlobalModuleFragment();
3450}
3451
3452/// DeclareGlobalAllocationFunction - Declares a single implicit global
3453/// allocation function if it doesn't already exist.
3454void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
3455 QualType Return,
3456 ArrayRef<QualType> Params) {
3457 DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
3458
3459 // Check if this function is already declared.
3460 DeclContext::lookup_result R = GlobalCtx->lookup(Name);
3461 for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end();
3462 Alloc != AllocEnd; ++Alloc) {
3463 // Only look at non-template functions, as it is the predefined,
3464 // non-templated allocation function we are trying to declare here.
3465 if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Val: *Alloc)) {
3466 if (Func->getNumParams() == Params.size()) {
3467 llvm::SmallVector<QualType, 3> FuncParams;
3468 for (auto *P : Func->parameters())
3469 FuncParams.push_back(
3470 Elt: Context.getCanonicalType(T: P->getType().getUnqualifiedType()));
3471 if (llvm::ArrayRef(FuncParams) == Params) {
3472 // Make the function visible to name lookup, even if we found it in
3473 // an unimported module. It either is an implicitly-declared global
3474 // allocation function, or is suppressing that function.
3475 Func->setVisibleDespiteOwningModule();
3476 return;
3477 }
3478 }
3479 }
3480 }
3481
3482 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
3483 /*IsVariadic=*/false, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
3484
3485 QualType BadAllocType;
3486 bool HasBadAllocExceptionSpec = Name.isAnyOperatorNew();
3487 if (HasBadAllocExceptionSpec) {
3488 if (!getLangOpts().CPlusPlus11) {
3489 BadAllocType = Context.getTypeDeclType(Decl: getStdBadAlloc());
3490 assert(StdBadAlloc && "Must have std::bad_alloc declared");
3491 EPI.ExceptionSpec.Type = EST_Dynamic;
3492 EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
3493 }
3494 if (getLangOpts().NewInfallible) {
3495 EPI.ExceptionSpec.Type = EST_DynamicNone;
3496 }
3497 } else {
3498 EPI.ExceptionSpec =
3499 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
3500 }
3501
3502 auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
3503 QualType FnType = Context.getFunctionType(ResultTy: Return, Args: Params, EPI);
3504 FunctionDecl *Alloc = FunctionDecl::Create(
3505 C&: Context, DC: GlobalCtx, StartLoc: SourceLocation(), NLoc: SourceLocation(), N: Name, T: FnType,
3506 /*TInfo=*/nullptr, SC: SC_None, UsesFPIntrin: getCurFPFeatures().isFPConstrained(), isInlineSpecified: false,
3507 hasWrittenPrototype: true);
3508 Alloc->setImplicit();
3509 // Global allocation functions should always be visible.
3510 Alloc->setVisibleDespiteOwningModule();
3511
3512 if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
3513 !getLangOpts().CheckNew)
3514 Alloc->addAttr(
3515 A: ReturnsNonNullAttr::CreateImplicit(Ctx&: Context, Range: Alloc->getLocation()));
3516
3517 // C++ [basic.stc.dynamic.general]p2:
3518 // The library provides default definitions for the global allocation
3519 // and deallocation functions. Some global allocation and deallocation
3520 // functions are replaceable ([new.delete]); these are attached to the
3521 // global module ([module.unit]).
3522 //
3523 // In the language wording, these functions are attched to the global
3524 // module all the time. But in the implementation, the global module
3525 // is only meaningful when we're in a module unit. So here we attach
3526 // these allocation functions to global module conditionally.
3527 if (TheGlobalModuleFragment) {
3528 Alloc->setModuleOwnershipKind(
3529 Decl::ModuleOwnershipKind::ReachableWhenImported);
3530 Alloc->setLocalOwningModule(TheGlobalModuleFragment);
3531 }
3532
3533 if (LangOpts.hasGlobalAllocationFunctionVisibility())
3534 Alloc->addAttr(A: VisibilityAttr::CreateImplicit(
3535 Ctx&: Context, Visibility: LangOpts.hasHiddenGlobalAllocationFunctionVisibility()
3536 ? VisibilityAttr::Hidden
3537 : LangOpts.hasProtectedGlobalAllocationFunctionVisibility()
3538 ? VisibilityAttr::Protected
3539 : VisibilityAttr::Default));
3540
3541 llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
3542 for (QualType T : Params) {
3543 ParamDecls.push_back(Elt: ParmVarDecl::Create(
3544 C&: Context, DC: Alloc, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T,
3545 /*TInfo=*/nullptr, S: SC_None, DefArg: nullptr));
3546 ParamDecls.back()->setImplicit();
3547 }
3548 Alloc->setParams(ParamDecls);
3549 if (ExtraAttr)
3550 Alloc->addAttr(A: ExtraAttr);
3551 AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD: Alloc);
3552 Context.getTranslationUnitDecl()->addDecl(D: Alloc);
3553 IdResolver.tryAddTopLevelDecl(D: Alloc, Name);
3554 };
3555
3556 if (!LangOpts.CUDA)
3557 CreateAllocationFunctionDecl(nullptr);
3558 else {
3559 // Host and device get their own declaration so each can be
3560 // defined or re-declared independently.
3561 CreateAllocationFunctionDecl(CUDAHostAttr::CreateImplicit(Ctx&: Context));
3562 CreateAllocationFunctionDecl(CUDADeviceAttr::CreateImplicit(Ctx&: Context));
3563 }
3564}
3565
3566FunctionDecl *
3567Sema::FindUsualDeallocationFunction(SourceLocation StartLoc,
3568 ImplicitDeallocationParameters IDP,
3569 DeclarationName Name) {
3570 DeclareGlobalNewDelete();
3571
3572 LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
3573 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete,
3574 Mode: DeallocLookupMode::OptionallyTyped, Name);
3575
3576 // FIXME: It's possible for this to result in ambiguity, through a
3577 // user-declared variadic operator delete or the enable_if attribute. We
3578 // should probably not consider those cases to be usual deallocation
3579 // functions. But for now we just make an arbitrary choice in that case.
3580 auto Result = resolveDeallocationOverload(S&: *this, R&: FoundDelete, IDP, Loc: StartLoc);
3581 if (!Result)
3582 return nullptr;
3583
3584 if (CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, /*Diagnose=*/true,
3585 NamingClass: FoundDelete.getNamingClass(), Decl: Result.Found,
3586 Operator: Result.FD))
3587 return nullptr;
3588
3589 assert(Result.FD && "operator delete missing from global scope?");
3590 return Result.FD;
3591}
3592
3593FunctionDecl *Sema::FindDeallocationFunctionForDestructor(SourceLocation Loc,
3594 CXXRecordDecl *RD,
3595 bool Diagnose) {
3596 DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(Op: OO_Delete);
3597
3598 FunctionDecl *OperatorDelete = nullptr;
3599 QualType DeallocType = Context.getRecordType(Decl: RD);
3600 ImplicitDeallocationParameters IDP = {
3601 DeallocType, ShouldUseTypeAwareOperatorNewOrDelete(),
3602 AlignedAllocationMode::No, SizedDeallocationMode::No};
3603
3604 if (FindDeallocationFunction(StartLoc: Loc, RD, Name, Operator&: OperatorDelete, IDP, Diagnose))
3605 return nullptr;
3606
3607 if (OperatorDelete)
3608 return OperatorDelete;
3609
3610 // If there's no class-specific operator delete, look up the global
3611 // non-array delete.
3612 IDP.PassAlignment = alignedAllocationModeFromBool(
3613 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: DeallocType));
3614 IDP.PassSize = SizedDeallocationMode::Yes;
3615 return FindUsualDeallocationFunction(StartLoc: Loc, IDP, Name);
3616}
3617
3618bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
3619 DeclarationName Name,
3620 FunctionDecl *&Operator,
3621 ImplicitDeallocationParameters IDP,
3622 bool Diagnose) {
3623 LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
3624 // Try to find operator delete/operator delete[] in class scope.
3625 LookupQualifiedName(R&: Found, LookupCtx: RD);
3626
3627 if (Found.isAmbiguous())
3628 return true;
3629
3630 Found.suppressDiagnostics();
3631
3632 if (!isAlignedAllocation(Mode: IDP.PassAlignment) &&
3633 hasNewExtendedAlignment(S&: *this, AllocType: Context.getRecordType(Decl: RD)))
3634 IDP.PassAlignment = AlignedAllocationMode::Yes;
3635
3636 // C++17 [expr.delete]p10:
3637 // If the deallocation functions have class scope, the one without a
3638 // parameter of type std::size_t is selected.
3639 llvm::SmallVector<UsualDeallocFnInfo, 4> Matches;
3640 resolveDeallocationOverload(S&: *this, R&: Found, IDP, Loc: StartLoc, BestFns: &Matches);
3641
3642 // If we could find an overload, use it.
3643 if (Matches.size() == 1) {
3644 Operator = cast<CXXMethodDecl>(Val: Matches[0].FD);
3645 return CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, Diagnose,
3646 NamingClass: Found.getNamingClass(), Decl: Matches[0].Found,
3647 Operator);
3648 }
3649
3650 // We found multiple suitable operators; complain about the ambiguity.
3651 // FIXME: The standard doesn't say to do this; it appears that the intent
3652 // is that this should never happen.
3653 if (!Matches.empty()) {
3654 if (Diagnose) {
3655 Diag(Loc: StartLoc, DiagID: diag::err_ambiguous_suitable_delete_member_function_found)
3656 << Name << RD;
3657 for (auto &Match : Matches)
3658 Diag(Loc: Match.FD->getLocation(), DiagID: diag::note_member_declared_here) << Name;
3659 }
3660 return true;
3661 }
3662
3663 // We did find operator delete/operator delete[] declarations, but
3664 // none of them were suitable.
3665 if (!Found.empty()) {
3666 if (Diagnose) {
3667 Diag(Loc: StartLoc, DiagID: diag::err_no_suitable_delete_member_function_found)
3668 << Name << RD;
3669
3670 for (NamedDecl *D : Found)
3671 Diag(Loc: D->getUnderlyingDecl()->getLocation(),
3672 DiagID: diag::note_member_declared_here) << Name;
3673 }
3674 return true;
3675 }
3676
3677 Operator = nullptr;
3678 return false;
3679}
3680
3681namespace {
3682/// Checks whether delete-expression, and new-expression used for
3683/// initializing deletee have the same array form.
3684class MismatchingNewDeleteDetector {
3685public:
3686 enum MismatchResult {
3687 /// Indicates that there is no mismatch or a mismatch cannot be proven.
3688 NoMismatch,
3689 /// Indicates that variable is initialized with mismatching form of \a new.
3690 VarInitMismatches,
3691 /// Indicates that member is initialized with mismatching form of \a new.
3692 MemberInitMismatches,
3693 /// Indicates that 1 or more constructors' definitions could not been
3694 /// analyzed, and they will be checked again at the end of translation unit.
3695 AnalyzeLater
3696 };
3697
3698 /// \param EndOfTU True, if this is the final analysis at the end of
3699 /// translation unit. False, if this is the initial analysis at the point
3700 /// delete-expression was encountered.
3701 explicit MismatchingNewDeleteDetector(bool EndOfTU)
3702 : Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
3703 HasUndefinedConstructors(false) {}
3704
3705 /// Checks whether pointee of a delete-expression is initialized with
3706 /// matching form of new-expression.
3707 ///
3708 /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
3709 /// point where delete-expression is encountered, then a warning will be
3710 /// issued immediately. If return value is \c AnalyzeLater at the point where
3711 /// delete-expression is seen, then member will be analyzed at the end of
3712 /// translation unit. \c AnalyzeLater is returned iff at least one constructor
3713 /// couldn't be analyzed. If at least one constructor initializes the member
3714 /// with matching type of new, the return value is \c NoMismatch.
3715 MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
3716 /// Analyzes a class member.
3717 /// \param Field Class member to analyze.
3718 /// \param DeleteWasArrayForm Array form-ness of the delete-expression used
3719 /// for deleting the \p Field.
3720 MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
3721 FieldDecl *Field;
3722 /// List of mismatching new-expressions used for initialization of the pointee
3723 llvm::SmallVector<const CXXNewExpr *, 4> NewExprs;
3724 /// Indicates whether delete-expression was in array form.
3725 bool IsArrayForm;
3726
3727private:
3728 const bool EndOfTU;
3729 /// Indicates that there is at least one constructor without body.
3730 bool HasUndefinedConstructors;
3731 /// Returns \c CXXNewExpr from given initialization expression.
3732 /// \param E Expression used for initializing pointee in delete-expression.
3733 /// E can be a single-element \c InitListExpr consisting of new-expression.
3734 const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
3735 /// Returns whether member is initialized with mismatching form of
3736 /// \c new either by the member initializer or in-class initialization.
3737 ///
3738 /// If bodies of all constructors are not visible at the end of translation
3739 /// unit or at least one constructor initializes member with the matching
3740 /// form of \c new, mismatch cannot be proven, and this function will return
3741 /// \c NoMismatch.
3742 MismatchResult analyzeMemberExpr(const MemberExpr *ME);
3743 /// Returns whether variable is initialized with mismatching form of
3744 /// \c new.
3745 ///
3746 /// If variable is initialized with matching form of \c new or variable is not
3747 /// initialized with a \c new expression, this function will return true.
3748 /// If variable is initialized with mismatching form of \c new, returns false.
3749 /// \param D Variable to analyze.
3750 bool hasMatchingVarInit(const DeclRefExpr *D);
3751 /// Checks whether the constructor initializes pointee with mismatching
3752 /// form of \c new.
3753 ///
3754 /// Returns true, if member is initialized with matching form of \c new in
3755 /// member initializer list. Returns false, if member is initialized with the
3756 /// matching form of \c new in this constructor's initializer or given
3757 /// constructor isn't defined at the point where delete-expression is seen, or
3758 /// member isn't initialized by the constructor.
3759 bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
3760 /// Checks whether member is initialized with matching form of
3761 /// \c new in member initializer list.
3762 bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
3763 /// Checks whether member is initialized with mismatching form of \c new by
3764 /// in-class initializer.
3765 MismatchResult analyzeInClassInitializer();
3766};
3767}
3768
3769MismatchingNewDeleteDetector::MismatchResult
3770MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) {
3771 NewExprs.clear();
3772 assert(DE && "Expected delete-expression");
3773 IsArrayForm = DE->isArrayForm();
3774 const Expr *E = DE->getArgument()->IgnoreParenImpCasts();
3775 if (const MemberExpr *ME = dyn_cast<const MemberExpr>(Val: E)) {
3776 return analyzeMemberExpr(ME);
3777 } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(Val: E)) {
3778 if (!hasMatchingVarInit(D))
3779 return VarInitMismatches;
3780 }
3781 return NoMismatch;
3782}
3783
3784const CXXNewExpr *
3785MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) {
3786 assert(E != nullptr && "Expected a valid initializer expression");
3787 E = E->IgnoreParenImpCasts();
3788 if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(Val: E)) {
3789 if (ILE->getNumInits() == 1)
3790 E = dyn_cast<const CXXNewExpr>(Val: ILE->getInit(Init: 0)->IgnoreParenImpCasts());
3791 }
3792
3793 return dyn_cast_or_null<const CXXNewExpr>(Val: E);
3794}
3795
3796bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit(
3797 const CXXCtorInitializer *CI) {
3798 const CXXNewExpr *NE = nullptr;
3799 if (Field == CI->getMember() &&
3800 (NE = getNewExprFromInitListOrExpr(E: CI->getInit()))) {
3801 if (NE->isArray() == IsArrayForm)
3802 return true;
3803 else
3804 NewExprs.push_back(Elt: NE);
3805 }
3806 return false;
3807}
3808
3809bool MismatchingNewDeleteDetector::hasMatchingNewInCtor(
3810 const CXXConstructorDecl *CD) {
3811 if (CD->isImplicit())
3812 return false;
3813 const FunctionDecl *Definition = CD;
3814 if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) {
3815 HasUndefinedConstructors = true;
3816 return EndOfTU;
3817 }
3818 for (const auto *CI : cast<const CXXConstructorDecl>(Val: Definition)->inits()) {
3819 if (hasMatchingNewInCtorInit(CI))
3820 return true;
3821 }
3822 return false;
3823}
3824
3825MismatchingNewDeleteDetector::MismatchResult
3826MismatchingNewDeleteDetector::analyzeInClassInitializer() {
3827 assert(Field != nullptr && "This should be called only for members");
3828 const Expr *InitExpr = Field->getInClassInitializer();
3829 if (!InitExpr)
3830 return EndOfTU ? NoMismatch : AnalyzeLater;
3831 if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(E: InitExpr)) {
3832 if (NE->isArray() != IsArrayForm) {
3833 NewExprs.push_back(Elt: NE);
3834 return MemberInitMismatches;
3835 }
3836 }
3837 return NoMismatch;
3838}
3839
3840MismatchingNewDeleteDetector::MismatchResult
3841MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field,
3842 bool DeleteWasArrayForm) {
3843 assert(Field != nullptr && "Analysis requires a valid class member.");
3844 this->Field = Field;
3845 IsArrayForm = DeleteWasArrayForm;
3846 const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Val: Field->getParent());
3847 for (const auto *CD : RD->ctors()) {
3848 if (hasMatchingNewInCtor(CD))
3849 return NoMismatch;
3850 }
3851 if (HasUndefinedConstructors)
3852 return EndOfTU ? NoMismatch : AnalyzeLater;
3853 if (!NewExprs.empty())
3854 return MemberInitMismatches;
3855 return Field->hasInClassInitializer() ? analyzeInClassInitializer()
3856 : NoMismatch;
3857}
3858
3859MismatchingNewDeleteDetector::MismatchResult
3860MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) {
3861 assert(ME != nullptr && "Expected a member expression");
3862 if (FieldDecl *F = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()))
3863 return analyzeField(Field: F, DeleteWasArrayForm: IsArrayForm);
3864 return NoMismatch;
3865}
3866
3867bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) {
3868 const CXXNewExpr *NE = nullptr;
3869 if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D->getDecl())) {
3870 if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(E: VD->getInit())) &&
3871 NE->isArray() != IsArrayForm) {
3872 NewExprs.push_back(Elt: NE);
3873 }
3874 }
3875 return NewExprs.empty();
3876}
3877
3878static void
3879DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc,
3880 const MismatchingNewDeleteDetector &Detector) {
3881 SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(Loc: DeleteLoc);
3882 FixItHint H;
3883 if (!Detector.IsArrayForm)
3884 H = FixItHint::CreateInsertion(InsertionLoc: EndOfDelete, Code: "[]");
3885 else {
3886 SourceLocation RSquare = Lexer::findLocationAfterToken(
3887 loc: DeleteLoc, TKind: tok::l_square, SM: SemaRef.getSourceManager(),
3888 LangOpts: SemaRef.getLangOpts(), SkipTrailingWhitespaceAndNewLine: true);
3889 if (RSquare.isValid())
3890 H = FixItHint::CreateRemoval(RemoveRange: SourceRange(EndOfDelete, RSquare));
3891 }
3892 SemaRef.Diag(Loc: DeleteLoc, DiagID: diag::warn_mismatched_delete_new)
3893 << Detector.IsArrayForm << H;
3894
3895 for (const auto *NE : Detector.NewExprs)
3896 SemaRef.Diag(Loc: NE->getExprLoc(), DiagID: diag::note_allocated_here)
3897 << Detector.IsArrayForm;
3898}
3899
3900void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
3901 if (Diags.isIgnored(DiagID: diag::warn_mismatched_delete_new, Loc: SourceLocation()))
3902 return;
3903 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false);
3904 switch (Detector.analyzeDeleteExpr(DE)) {
3905 case MismatchingNewDeleteDetector::VarInitMismatches:
3906 case MismatchingNewDeleteDetector::MemberInitMismatches: {
3907 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc: DE->getBeginLoc(), Detector);
3908 break;
3909 }
3910 case MismatchingNewDeleteDetector::AnalyzeLater: {
3911 DeleteExprs[Detector.Field].push_back(
3912 Elt: std::make_pair(x: DE->getBeginLoc(), y: DE->isArrayForm()));
3913 break;
3914 }
3915 case MismatchingNewDeleteDetector::NoMismatch:
3916 break;
3917 }
3918}
3919
3920void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
3921 bool DeleteWasArrayForm) {
3922 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true);
3923 switch (Detector.analyzeField(Field, DeleteWasArrayForm)) {
3924 case MismatchingNewDeleteDetector::VarInitMismatches:
3925 llvm_unreachable("This analysis should have been done for class members.");
3926 case MismatchingNewDeleteDetector::AnalyzeLater:
3927 llvm_unreachable("Analysis cannot be postponed any point beyond end of "
3928 "translation unit.");
3929 case MismatchingNewDeleteDetector::MemberInitMismatches:
3930 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc, Detector);
3931 break;
3932 case MismatchingNewDeleteDetector::NoMismatch:
3933 break;
3934 }
3935}
3936
3937ExprResult
3938Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
3939 bool ArrayForm, Expr *ExE) {
3940 // C++ [expr.delete]p1:
3941 // The operand shall have a pointer type, or a class type having a single
3942 // non-explicit conversion function to a pointer type. The result has type
3943 // void.
3944 //
3945 // DR599 amends "pointer type" to "pointer to object type" in both cases.
3946
3947 ExprResult Ex = ExE;
3948 FunctionDecl *OperatorDelete = nullptr;
3949 bool ArrayFormAsWritten = ArrayForm;
3950 bool UsualArrayDeleteWantsSize = false;
3951
3952 if (!Ex.get()->isTypeDependent()) {
3953 // Perform lvalue-to-rvalue cast, if needed.
3954 Ex = DefaultLvalueConversion(E: Ex.get());
3955 if (Ex.isInvalid())
3956 return ExprError();
3957
3958 QualType Type = Ex.get()->getType();
3959
3960 class DeleteConverter : public ContextualImplicitConverter {
3961 public:
3962 DeleteConverter() : ContextualImplicitConverter(false, true) {}
3963
3964 bool match(QualType ConvType) override {
3965 // FIXME: If we have an operator T* and an operator void*, we must pick
3966 // the operator T*.
3967 if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
3968 if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
3969 return true;
3970 return false;
3971 }
3972
3973 SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
3974 QualType T) override {
3975 return S.Diag(Loc, DiagID: diag::err_delete_operand) << T;
3976 }
3977
3978 SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
3979 QualType T) override {
3980 return S.Diag(Loc, DiagID: diag::err_delete_incomplete_class_type) << T;
3981 }
3982
3983 SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
3984 QualType T,
3985 QualType ConvTy) override {
3986 return S.Diag(Loc, DiagID: diag::err_delete_explicit_conversion) << T << ConvTy;
3987 }
3988
3989 SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
3990 QualType ConvTy) override {
3991 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
3992 << ConvTy;
3993 }
3994
3995 SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
3996 QualType T) override {
3997 return S.Diag(Loc, DiagID: diag::err_ambiguous_delete_operand) << T;
3998 }
3999
4000 SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
4001 QualType ConvTy) override {
4002 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
4003 << ConvTy;
4004 }
4005
4006 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
4007 QualType T,
4008 QualType ConvTy) override {
4009 llvm_unreachable("conversion functions are permitted");
4010 }
4011 } Converter;
4012
4013 Ex = PerformContextualImplicitConversion(Loc: StartLoc, FromE: Ex.get(), Converter);
4014 if (Ex.isInvalid())
4015 return ExprError();
4016 Type = Ex.get()->getType();
4017 if (!Converter.match(ConvType: Type))
4018 // FIXME: PerformContextualImplicitConversion should return ExprError
4019 // itself in this case.
4020 return ExprError();
4021
4022 QualType Pointee = Type->castAs<PointerType>()->getPointeeType();
4023 QualType PointeeElem = Context.getBaseElementType(QT: Pointee);
4024
4025 if (Pointee.getAddressSpace() != LangAS::Default &&
4026 !getLangOpts().OpenCLCPlusPlus)
4027 return Diag(Loc: Ex.get()->getBeginLoc(),
4028 DiagID: diag::err_address_space_qualified_delete)
4029 << Pointee.getUnqualifiedType()
4030 << Pointee.getQualifiers().getAddressSpaceAttributePrintValue();
4031
4032 CXXRecordDecl *PointeeRD = nullptr;
4033 if (Pointee->isVoidType() && !isSFINAEContext()) {
4034 // The C++ standard bans deleting a pointer to a non-object type, which
4035 // effectively bans deletion of "void*". However, most compilers support
4036 // this, so we treat it as a warning unless we're in a SFINAE context.
4037 // But we still prohibit this since C++26.
4038 Diag(Loc: StartLoc, DiagID: LangOpts.CPlusPlus26 ? diag::err_delete_incomplete
4039 : diag::ext_delete_void_ptr_operand)
4040 << (LangOpts.CPlusPlus26 ? Pointee : Type)
4041 << Ex.get()->getSourceRange();
4042 } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
4043 Pointee->isSizelessType()) {
4044 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_delete_operand)
4045 << Type << Ex.get()->getSourceRange());
4046 } else if (!Pointee->isDependentType()) {
4047 // FIXME: This can result in errors if the definition was imported from a
4048 // module but is hidden.
4049 if (Pointee->isEnumeralType() ||
4050 !RequireCompleteType(Loc: StartLoc, T: Pointee,
4051 DiagID: LangOpts.CPlusPlus26
4052 ? diag::err_delete_incomplete
4053 : diag::warn_delete_incomplete,
4054 Args: Ex.get())) {
4055 if (const RecordType *RT = PointeeElem->getAs<RecordType>())
4056 PointeeRD = cast<CXXRecordDecl>(Val: RT->getDecl());
4057 }
4058 }
4059
4060 if (Pointee->isArrayType() && !ArrayForm) {
4061 Diag(Loc: StartLoc, DiagID: diag::warn_delete_array_type)
4062 << Type << Ex.get()->getSourceRange()
4063 << FixItHint::CreateInsertion(InsertionLoc: getLocForEndOfToken(Loc: StartLoc), Code: "[]");
4064 ArrayForm = true;
4065 }
4066
4067 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
4068 Op: ArrayForm ? OO_Array_Delete : OO_Delete);
4069
4070 if (PointeeRD) {
4071 ImplicitDeallocationParameters IDP = {
4072 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4073 AlignedAllocationMode::No, SizedDeallocationMode::No};
4074 if (!UseGlobal &&
4075 FindDeallocationFunction(StartLoc, RD: PointeeRD, Name: DeleteName,
4076 Operator&: OperatorDelete, IDP))
4077 return ExprError();
4078
4079 // If we're allocating an array of records, check whether the
4080 // usual operator delete[] has a size_t parameter.
4081 if (ArrayForm) {
4082 // If the user specifically asked to use the global allocator,
4083 // we'll need to do the lookup into the class.
4084 if (UseGlobal)
4085 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
4086 S&: *this, loc: StartLoc, PassType: IDP.PassTypeIdentity, allocType: PointeeElem);
4087
4088 // Otherwise, the usual operator delete[] should be the
4089 // function we just found.
4090 else if (isa_and_nonnull<CXXMethodDecl>(Val: OperatorDelete)) {
4091 UsualDeallocFnInfo UDFI(
4092 *this, DeclAccessPair::make(D: OperatorDelete, AS: AS_public), Pointee,
4093 StartLoc);
4094 UsualArrayDeleteWantsSize = isSizedDeallocation(Mode: UDFI.IDP.PassSize);
4095 }
4096 }
4097
4098 if (!PointeeRD->hasIrrelevantDestructor()) {
4099 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4100 if (Dtor->isCalledByDelete(OpDel: OperatorDelete)) {
4101 MarkFunctionReferenced(Loc: StartLoc, Func: Dtor);
4102 if (DiagnoseUseOfDecl(D: Dtor, Locs: StartLoc))
4103 return ExprError();
4104 }
4105 }
4106 }
4107
4108 CheckVirtualDtorCall(dtor: PointeeRD->getDestructor(), Loc: StartLoc,
4109 /*IsDelete=*/true, /*CallCanBeVirtual=*/true,
4110 /*WarnOnNonAbstractTypes=*/!ArrayForm,
4111 DtorLoc: SourceLocation());
4112 }
4113
4114 if (!OperatorDelete) {
4115 if (getLangOpts().OpenCLCPlusPlus) {
4116 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default delete";
4117 return ExprError();
4118 }
4119
4120 bool IsComplete = isCompleteType(Loc: StartLoc, T: Pointee);
4121 bool CanProvideSize =
4122 IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
4123 Pointee.isDestructedType());
4124 bool Overaligned = hasNewExtendedAlignment(S&: *this, AllocType: Pointee);
4125
4126 // Look for a global declaration.
4127 ImplicitDeallocationParameters IDP = {
4128 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4129 alignedAllocationModeFromBool(IsAligned: Overaligned),
4130 sizedDeallocationModeFromBool(IsSized: CanProvideSize)};
4131 OperatorDelete = FindUsualDeallocationFunction(StartLoc, IDP, Name: DeleteName);
4132 if (!OperatorDelete)
4133 return ExprError();
4134 }
4135
4136 if (OperatorDelete->isInvalidDecl())
4137 return ExprError();
4138
4139 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
4140
4141 // Check access and ambiguity of destructor if we're going to call it.
4142 // Note that this is required even for a virtual delete.
4143 bool IsVirtualDelete = false;
4144 if (PointeeRD) {
4145 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4146 if (Dtor->isCalledByDelete(OpDel: OperatorDelete))
4147 CheckDestructorAccess(Loc: Ex.get()->getExprLoc(), Dtor,
4148 PDiag: PDiag(DiagID: diag::err_access_dtor) << PointeeElem);
4149 IsVirtualDelete = Dtor->isVirtual();
4150 }
4151 }
4152
4153 DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc);
4154
4155 unsigned AddressParamIdx = 0;
4156 if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
4157 QualType TypeIdentity = OperatorDelete->getParamDecl(i: 0)->getType();
4158 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
4159 DiagID: diag::err_incomplete_type))
4160 return ExprError();
4161 AddressParamIdx = 1;
4162 }
4163
4164 // Convert the operand to the type of the first parameter of operator
4165 // delete. This is only necessary if we selected a destroying operator
4166 // delete that we are going to call (non-virtually); converting to void*
4167 // is trivial and left to AST consumers to handle.
4168 QualType ParamType =
4169 OperatorDelete->getParamDecl(i: AddressParamIdx)->getType();
4170 if (!IsVirtualDelete && !ParamType->getPointeeType()->isVoidType()) {
4171 Qualifiers Qs = Pointee.getQualifiers();
4172 if (Qs.hasCVRQualifiers()) {
4173 // Qualifiers are irrelevant to this conversion; we're only looking
4174 // for access and ambiguity.
4175 Qs.removeCVRQualifiers();
4176 QualType Unqual = Context.getPointerType(
4177 T: Context.getQualifiedType(T: Pointee.getUnqualifiedType(), Qs));
4178 Ex = ImpCastExprToType(E: Ex.get(), Type: Unqual, CK: CK_NoOp);
4179 }
4180 Ex = PerformImplicitConversion(From: Ex.get(), ToType: ParamType,
4181 Action: AssignmentAction::Passing);
4182 if (Ex.isInvalid())
4183 return ExprError();
4184 }
4185 }
4186
4187 CXXDeleteExpr *Result = new (Context) CXXDeleteExpr(
4188 Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten,
4189 UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc);
4190 AnalyzeDeleteExprMismatch(DE: Result);
4191 return Result;
4192}
4193
4194static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
4195 bool IsDelete,
4196 FunctionDecl *&Operator) {
4197
4198 DeclarationName NewName = S.Context.DeclarationNames.getCXXOperatorName(
4199 Op: IsDelete ? OO_Delete : OO_New);
4200
4201 LookupResult R(S, NewName, TheCall->getBeginLoc(), Sema::LookupOrdinaryName);
4202 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
4203 assert(!R.empty() && "implicitly declared allocation functions not found");
4204 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
4205
4206 // We do our own custom access checks below.
4207 R.suppressDiagnostics();
4208
4209 SmallVector<Expr *, 8> Args(TheCall->arguments());
4210 OverloadCandidateSet Candidates(R.getNameLoc(),
4211 OverloadCandidateSet::CSK_Normal);
4212 for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
4213 FnOvl != FnOvlEnd; ++FnOvl) {
4214 // Even member operator new/delete are implicitly treated as
4215 // static, so don't use AddMemberCandidate.
4216 NamedDecl *D = (*FnOvl)->getUnderlyingDecl();
4217
4218 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
4219 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: FnOvl.getPair(),
4220 /*ExplicitTemplateArgs=*/nullptr, Args,
4221 CandidateSet&: Candidates,
4222 /*SuppressUserConversions=*/false);
4223 continue;
4224 }
4225
4226 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
4227 S.AddOverloadCandidate(Function: Fn, FoundDecl: FnOvl.getPair(), Args, CandidateSet&: Candidates,
4228 /*SuppressUserConversions=*/false);
4229 }
4230
4231 SourceRange Range = TheCall->getSourceRange();
4232
4233 // Do the resolution.
4234 OverloadCandidateSet::iterator Best;
4235 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
4236 case OR_Success: {
4237 // Got one!
4238 FunctionDecl *FnDecl = Best->Function;
4239 assert(R.getNamingClass() == nullptr &&
4240 "class members should not be considered");
4241
4242 if (!FnDecl->isReplaceableGlobalAllocationFunction()) {
4243 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_builtin_operator_new_delete_not_usual)
4244 << (IsDelete ? 1 : 0) << Range;
4245 S.Diag(Loc: FnDecl->getLocation(), DiagID: diag::note_non_usual_function_declared_here)
4246 << R.getLookupName() << FnDecl->getSourceRange();
4247 return true;
4248 }
4249
4250 Operator = FnDecl;
4251 return false;
4252 }
4253
4254 case OR_No_Viable_Function:
4255 Candidates.NoteCandidates(
4256 PA: PartialDiagnosticAt(R.getNameLoc(),
4257 S.PDiag(DiagID: diag::err_ovl_no_viable_function_in_call)
4258 << R.getLookupName() << Range),
4259 S, OCD: OCD_AllCandidates, Args);
4260 return true;
4261
4262 case OR_Ambiguous:
4263 Candidates.NoteCandidates(
4264 PA: PartialDiagnosticAt(R.getNameLoc(),
4265 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
4266 << R.getLookupName() << Range),
4267 S, OCD: OCD_AmbiguousCandidates, Args);
4268 return true;
4269
4270 case OR_Deleted:
4271 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
4272 CandidateSet&: Candidates, Fn: Best->Function, Args);
4273 return true;
4274 }
4275 llvm_unreachable("Unreachable, bad result from BestViableFunction");
4276}
4277
4278ExprResult Sema::BuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
4279 bool IsDelete) {
4280 CallExpr *TheCall = cast<CallExpr>(Val: TheCallResult.get());
4281 if (!getLangOpts().CPlusPlus) {
4282 Diag(Loc: TheCall->getExprLoc(), DiagID: diag::err_builtin_requires_language)
4283 << (IsDelete ? "__builtin_operator_delete" : "__builtin_operator_new")
4284 << "C++";
4285 return ExprError();
4286 }
4287 // CodeGen assumes it can find the global new and delete to call,
4288 // so ensure that they are declared.
4289 DeclareGlobalNewDelete();
4290
4291 FunctionDecl *OperatorNewOrDelete = nullptr;
4292 if (resolveBuiltinNewDeleteOverload(S&: *this, TheCall, IsDelete,
4293 Operator&: OperatorNewOrDelete))
4294 return ExprError();
4295 assert(OperatorNewOrDelete && "should be found");
4296
4297 DiagnoseUseOfDecl(D: OperatorNewOrDelete, Locs: TheCall->getExprLoc());
4298 MarkFunctionReferenced(Loc: TheCall->getExprLoc(), Func: OperatorNewOrDelete);
4299
4300 TheCall->setType(OperatorNewOrDelete->getReturnType());
4301 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
4302 QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
4303 InitializedEntity Entity =
4304 InitializedEntity::InitializeParameter(Context, Type: ParamTy, Consumed: false);
4305 ExprResult Arg = PerformCopyInitialization(
4306 Entity, EqualLoc: TheCall->getArg(Arg: i)->getBeginLoc(), Init: TheCall->getArg(Arg: i));
4307 if (Arg.isInvalid())
4308 return ExprError();
4309 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
4310 }
4311 auto Callee = dyn_cast<ImplicitCastExpr>(Val: TheCall->getCallee());
4312 assert(Callee && Callee->getCastKind() == CK_BuiltinFnToFnPtr &&
4313 "Callee expected to be implicit cast to a builtin function pointer");
4314 Callee->setType(OperatorNewOrDelete->getType());
4315
4316 return TheCallResult;
4317}
4318
4319void Sema::CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
4320 bool IsDelete, bool CallCanBeVirtual,
4321 bool WarnOnNonAbstractTypes,
4322 SourceLocation DtorLoc) {
4323 if (!dtor || dtor->isVirtual() || !CallCanBeVirtual || isUnevaluatedContext())
4324 return;
4325
4326 // C++ [expr.delete]p3:
4327 // In the first alternative (delete object), if the static type of the
4328 // object to be deleted is different from its dynamic type, the static
4329 // type shall be a base class of the dynamic type of the object to be
4330 // deleted and the static type shall have a virtual destructor or the
4331 // behavior is undefined.
4332 //
4333 const CXXRecordDecl *PointeeRD = dtor->getParent();
4334 // Note: a final class cannot be derived from, no issue there
4335 if (!PointeeRD->isPolymorphic() || PointeeRD->hasAttr<FinalAttr>())
4336 return;
4337
4338 // If the superclass is in a system header, there's nothing that can be done.
4339 // The `delete` (where we emit the warning) can be in a system header,
4340 // what matters for this warning is where the deleted type is defined.
4341 if (getSourceManager().isInSystemHeader(Loc: PointeeRD->getLocation()))
4342 return;
4343
4344 QualType ClassType = dtor->getFunctionObjectParameterType();
4345 if (PointeeRD->isAbstract()) {
4346 // If the class is abstract, we warn by default, because we're
4347 // sure the code has undefined behavior.
4348 Diag(Loc, DiagID: diag::warn_delete_abstract_non_virtual_dtor) << (IsDelete ? 0 : 1)
4349 << ClassType;
4350 } else if (WarnOnNonAbstractTypes) {
4351 // Otherwise, if this is not an array delete, it's a bit suspect,
4352 // but not necessarily wrong.
4353 Diag(Loc, DiagID: diag::warn_delete_non_virtual_dtor) << (IsDelete ? 0 : 1)
4354 << ClassType;
4355 }
4356 if (!IsDelete) {
4357 std::string TypeStr;
4358 ClassType.getAsStringInternal(Str&: TypeStr, Policy: getPrintingPolicy());
4359 Diag(Loc: DtorLoc, DiagID: diag::note_delete_non_virtual)
4360 << FixItHint::CreateInsertion(InsertionLoc: DtorLoc, Code: TypeStr + "::");
4361 }
4362}
4363
4364Sema::ConditionResult Sema::ActOnConditionVariable(Decl *ConditionVar,
4365 SourceLocation StmtLoc,
4366 ConditionKind CK) {
4367 ExprResult E =
4368 CheckConditionVariable(ConditionVar: cast<VarDecl>(Val: ConditionVar), StmtLoc, CK);
4369 if (E.isInvalid())
4370 return ConditionError();
4371 E = ActOnFinishFullExpr(Expr: E.get(), /*DiscardedValue*/ false);
4372 return ConditionResult(*this, ConditionVar, E,
4373 CK == ConditionKind::ConstexprIf);
4374}
4375
4376ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
4377 SourceLocation StmtLoc,
4378 ConditionKind CK) {
4379 if (ConditionVar->isInvalidDecl())
4380 return ExprError();
4381
4382 QualType T = ConditionVar->getType();
4383
4384 // C++ [stmt.select]p2:
4385 // The declarator shall not specify a function or an array.
4386 if (T->isFunctionType())
4387 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4388 DiagID: diag::err_invalid_use_of_function_type)
4389 << ConditionVar->getSourceRange());
4390 else if (T->isArrayType())
4391 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4392 DiagID: diag::err_invalid_use_of_array_type)
4393 << ConditionVar->getSourceRange());
4394
4395 ExprResult Condition = BuildDeclRefExpr(
4396 D: ConditionVar, Ty: ConditionVar->getType().getNonReferenceType(), VK: VK_LValue,
4397 Loc: ConditionVar->getLocation());
4398
4399 switch (CK) {
4400 case ConditionKind::Boolean:
4401 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get());
4402
4403 case ConditionKind::ConstexprIf:
4404 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get(), IsConstexpr: true);
4405
4406 case ConditionKind::Switch:
4407 return CheckSwitchCondition(SwitchLoc: StmtLoc, Cond: Condition.get());
4408 }
4409
4410 llvm_unreachable("unexpected condition kind");
4411}
4412
4413ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
4414 // C++11 6.4p4:
4415 // The value of a condition that is an initialized declaration in a statement
4416 // other than a switch statement is the value of the declared variable
4417 // implicitly converted to type bool. If that conversion is ill-formed, the
4418 // program is ill-formed.
4419 // The value of a condition that is an expression is the value of the
4420 // expression, implicitly converted to bool.
4421 //
4422 // C++23 8.5.2p2
4423 // If the if statement is of the form if constexpr, the value of the condition
4424 // is contextually converted to bool and the converted expression shall be
4425 // a constant expression.
4426 //
4427
4428 ExprResult E = PerformContextuallyConvertToBool(From: CondExpr);
4429 if (!IsConstexpr || E.isInvalid() || E.get()->isValueDependent())
4430 return E;
4431
4432 E = ActOnFinishFullExpr(Expr: E.get(), CC: E.get()->getExprLoc(),
4433 /*DiscardedValue*/ false,
4434 /*IsConstexpr*/ true);
4435 if (E.isInvalid())
4436 return E;
4437
4438 // FIXME: Return this value to the caller so they don't need to recompute it.
4439 llvm::APSInt Cond;
4440 E = VerifyIntegerConstantExpression(
4441 E: E.get(), Result: &Cond,
4442 DiagID: diag::err_constexpr_if_condition_expression_is_not_constant);
4443 return E;
4444}
4445
4446bool
4447Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
4448 // Look inside the implicit cast, if it exists.
4449 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Val: From))
4450 From = Cast->getSubExpr();
4451
4452 // A string literal (2.13.4) that is not a wide string literal can
4453 // be converted to an rvalue of type "pointer to char"; a wide
4454 // string literal can be converted to an rvalue of type "pointer
4455 // to wchar_t" (C++ 4.2p2).
4456 if (StringLiteral *StrLit = dyn_cast<StringLiteral>(Val: From->IgnoreParens()))
4457 if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
4458 if (const BuiltinType *ToPointeeType
4459 = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
4460 // This conversion is considered only when there is an
4461 // explicit appropriate pointer target type (C++ 4.2p2).
4462 if (!ToPtrType->getPointeeType().hasQualifiers()) {
4463 switch (StrLit->getKind()) {
4464 case StringLiteralKind::UTF8:
4465 case StringLiteralKind::UTF16:
4466 case StringLiteralKind::UTF32:
4467 // We don't allow UTF literals to be implicitly converted
4468 break;
4469 case StringLiteralKind::Ordinary:
4470 case StringLiteralKind::Binary:
4471 return (ToPointeeType->getKind() == BuiltinType::Char_U ||
4472 ToPointeeType->getKind() == BuiltinType::Char_S);
4473 case StringLiteralKind::Wide:
4474 return Context.typesAreCompatible(T1: Context.getWideCharType(),
4475 T2: QualType(ToPointeeType, 0));
4476 case StringLiteralKind::Unevaluated:
4477 assert(false && "Unevaluated string literal in expression");
4478 break;
4479 }
4480 }
4481 }
4482
4483 return false;
4484}
4485
4486static ExprResult BuildCXXCastArgument(Sema &S,
4487 SourceLocation CastLoc,
4488 QualType Ty,
4489 CastKind Kind,
4490 CXXMethodDecl *Method,
4491 DeclAccessPair FoundDecl,
4492 bool HadMultipleCandidates,
4493 Expr *From) {
4494 switch (Kind) {
4495 default: llvm_unreachable("Unhandled cast kind!");
4496 case CK_ConstructorConversion: {
4497 CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Val: Method);
4498 SmallVector<Expr*, 8> ConstructorArgs;
4499
4500 if (S.RequireNonAbstractType(Loc: CastLoc, T: Ty,
4501 DiagID: diag::err_allocation_of_abstract_type))
4502 return ExprError();
4503
4504 if (S.CompleteConstructorCall(Constructor, DeclInitType: Ty, ArgsPtr: From, Loc: CastLoc,
4505 ConvertedArgs&: ConstructorArgs))
4506 return ExprError();
4507
4508 S.CheckConstructorAccess(Loc: CastLoc, D: Constructor, FoundDecl,
4509 Entity: InitializedEntity::InitializeTemporary(Type: Ty));
4510 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4511 return ExprError();
4512
4513 ExprResult Result = S.BuildCXXConstructExpr(
4514 ConstructLoc: CastLoc, DeclInitType: Ty, FoundDecl, Constructor: cast<CXXConstructorDecl>(Val: Method),
4515 Exprs: ConstructorArgs, HadMultipleCandidates,
4516 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4517 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4518 if (Result.isInvalid())
4519 return ExprError();
4520
4521 return S.MaybeBindToTemporary(E: Result.getAs<Expr>());
4522 }
4523
4524 case CK_UserDefinedConversion: {
4525 assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
4526
4527 S.CheckMemberOperatorAccess(Loc: CastLoc, ObjectExpr: From, /*arg*/ ArgExpr: nullptr, FoundDecl);
4528 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4529 return ExprError();
4530
4531 // Create an implicit call expr that calls it.
4532 CXXConversionDecl *Conv = cast<CXXConversionDecl>(Val: Method);
4533 ExprResult Result = S.BuildCXXMemberCallExpr(Exp: From, FoundDecl, Method: Conv,
4534 HadMultipleCandidates);
4535 if (Result.isInvalid())
4536 return ExprError();
4537 // Record usage of conversion in an implicit cast.
4538 Result = ImplicitCastExpr::Create(Context: S.Context, T: Result.get()->getType(),
4539 Kind: CK_UserDefinedConversion, Operand: Result.get(),
4540 BasePath: nullptr, Cat: Result.get()->getValueKind(),
4541 FPO: S.CurFPFeatureOverrides());
4542
4543 return S.MaybeBindToTemporary(E: Result.get());
4544 }
4545 }
4546}
4547
4548ExprResult
4549Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4550 const ImplicitConversionSequence &ICS,
4551 AssignmentAction Action,
4552 CheckedConversionKind CCK) {
4553 // C++ [over.match.oper]p7: [...] operands of class type are converted [...]
4554 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp &&
4555 !From->getType()->isRecordType())
4556 return From;
4557
4558 switch (ICS.getKind()) {
4559 case ImplicitConversionSequence::StandardConversion: {
4560 ExprResult Res = PerformImplicitConversion(From, ToType, SCS: ICS.Standard,
4561 Action, CCK);
4562 if (Res.isInvalid())
4563 return ExprError();
4564 From = Res.get();
4565 break;
4566 }
4567
4568 case ImplicitConversionSequence::UserDefinedConversion: {
4569
4570 FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
4571 CastKind CastKind;
4572 QualType BeforeToType;
4573 assert(FD && "no conversion function for user-defined conversion seq");
4574 if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(Val: FD)) {
4575 CastKind = CK_UserDefinedConversion;
4576
4577 // If the user-defined conversion is specified by a conversion function,
4578 // the initial standard conversion sequence converts the source type to
4579 // the implicit object parameter of the conversion function.
4580 BeforeToType = Context.getTagDeclType(Decl: Conv->getParent());
4581 } else {
4582 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(Val: FD);
4583 CastKind = CK_ConstructorConversion;
4584 // Do no conversion if dealing with ... for the first conversion.
4585 if (!ICS.UserDefined.EllipsisConversion) {
4586 // If the user-defined conversion is specified by a constructor, the
4587 // initial standard conversion sequence converts the source type to
4588 // the type required by the argument of the constructor
4589 BeforeToType = Ctor->getParamDecl(i: 0)->getType().getNonReferenceType();
4590 }
4591 }
4592 // Watch out for ellipsis conversion.
4593 if (!ICS.UserDefined.EllipsisConversion) {
4594 ExprResult Res = PerformImplicitConversion(
4595 From, ToType: BeforeToType, SCS: ICS.UserDefined.Before,
4596 Action: AssignmentAction::Converting, CCK);
4597 if (Res.isInvalid())
4598 return ExprError();
4599 From = Res.get();
4600 }
4601
4602 ExprResult CastArg = BuildCXXCastArgument(
4603 S&: *this, CastLoc: From->getBeginLoc(), Ty: ToType.getNonReferenceType(), Kind: CastKind,
4604 Method: cast<CXXMethodDecl>(Val: FD), FoundDecl: ICS.UserDefined.FoundConversionFunction,
4605 HadMultipleCandidates: ICS.UserDefined.HadMultipleCandidates, From);
4606
4607 if (CastArg.isInvalid())
4608 return ExprError();
4609
4610 From = CastArg.get();
4611
4612 // C++ [over.match.oper]p7:
4613 // [...] the second standard conversion sequence of a user-defined
4614 // conversion sequence is not applied.
4615 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp)
4616 return From;
4617
4618 return PerformImplicitConversion(From, ToType, SCS: ICS.UserDefined.After,
4619 Action: AssignmentAction::Converting, CCK);
4620 }
4621
4622 case ImplicitConversionSequence::AmbiguousConversion:
4623 ICS.DiagnoseAmbiguousConversion(S&: *this, CaretLoc: From->getExprLoc(),
4624 PDiag: PDiag(DiagID: diag::err_typecheck_ambiguous_condition)
4625 << From->getSourceRange());
4626 return ExprError();
4627
4628 case ImplicitConversionSequence::EllipsisConversion:
4629 case ImplicitConversionSequence::StaticObjectArgumentConversion:
4630 llvm_unreachable("bad conversion");
4631
4632 case ImplicitConversionSequence::BadConversion:
4633 AssignConvertType ConvTy =
4634 CheckAssignmentConstraints(Loc: From->getExprLoc(), LHSType: ToType, RHSType: From->getType());
4635 bool Diagnosed = DiagnoseAssignmentResult(
4636 ConvTy: ConvTy == AssignConvertType::Compatible
4637 ? AssignConvertType::Incompatible
4638 : ConvTy,
4639 Loc: From->getExprLoc(), DstType: ToType, SrcType: From->getType(), SrcExpr: From, Action);
4640 assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
4641 return ExprError();
4642 }
4643
4644 // Everything went well.
4645 return From;
4646}
4647
4648// adjustVectorType - Compute the intermediate cast type casting elements of the
4649// from type to the elements of the to type without resizing the vector.
4650static QualType adjustVectorType(ASTContext &Context, QualType FromTy,
4651 QualType ToType, QualType *ElTy = nullptr) {
4652 QualType ElType = ToType;
4653 if (auto *ToVec = ToType->getAs<VectorType>())
4654 ElType = ToVec->getElementType();
4655
4656 if (ElTy)
4657 *ElTy = ElType;
4658 if (!FromTy->isVectorType())
4659 return ElType;
4660 auto *FromVec = FromTy->castAs<VectorType>();
4661 return Context.getExtVectorType(VectorType: ElType, NumElts: FromVec->getNumElements());
4662}
4663
4664ExprResult
4665Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4666 const StandardConversionSequence& SCS,
4667 AssignmentAction Action,
4668 CheckedConversionKind CCK) {
4669 bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
4670 CCK == CheckedConversionKind::FunctionalCast);
4671
4672 // Overall FIXME: we are recomputing too many types here and doing far too
4673 // much extra work. What this means is that we need to keep track of more
4674 // information that is computed when we try the implicit conversion initially,
4675 // so that we don't need to recompute anything here.
4676 QualType FromType = From->getType();
4677
4678 if (SCS.CopyConstructor) {
4679 // FIXME: When can ToType be a reference type?
4680 assert(!ToType->isReferenceType());
4681 if (SCS.Second == ICK_Derived_To_Base) {
4682 SmallVector<Expr*, 8> ConstructorArgs;
4683 if (CompleteConstructorCall(
4684 Constructor: cast<CXXConstructorDecl>(Val: SCS.CopyConstructor), DeclInitType: ToType, ArgsPtr: From,
4685 /*FIXME:ConstructLoc*/ Loc: SourceLocation(), ConvertedArgs&: ConstructorArgs))
4686 return ExprError();
4687 return BuildCXXConstructExpr(
4688 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4689 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: ConstructorArgs,
4690 /*HadMultipleCandidates*/ false,
4691 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4692 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4693 }
4694 return BuildCXXConstructExpr(
4695 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4696 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: From,
4697 /*HadMultipleCandidates*/ false,
4698 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4699 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4700 }
4701
4702 // Resolve overloaded function references.
4703 if (Context.hasSameType(T1: FromType, T2: Context.OverloadTy)) {
4704 DeclAccessPair Found;
4705 FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(AddressOfExpr: From, TargetType: ToType,
4706 Complain: true, Found);
4707 if (!Fn)
4708 return ExprError();
4709
4710 if (DiagnoseUseOfDecl(D: Fn, Locs: From->getBeginLoc()))
4711 return ExprError();
4712
4713 ExprResult Res = FixOverloadedFunctionReference(E: From, FoundDecl: Found, Fn);
4714 if (Res.isInvalid())
4715 return ExprError();
4716
4717 // We might get back another placeholder expression if we resolved to a
4718 // builtin.
4719 Res = CheckPlaceholderExpr(E: Res.get());
4720 if (Res.isInvalid())
4721 return ExprError();
4722
4723 From = Res.get();
4724 FromType = From->getType();
4725 }
4726
4727 // If we're converting to an atomic type, first convert to the corresponding
4728 // non-atomic type.
4729 QualType ToAtomicType;
4730 if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) {
4731 ToAtomicType = ToType;
4732 ToType = ToAtomic->getValueType();
4733 }
4734
4735 QualType InitialFromType = FromType;
4736 // Perform the first implicit conversion.
4737 switch (SCS.First) {
4738 case ICK_Identity:
4739 if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
4740 FromType = FromAtomic->getValueType().getUnqualifiedType();
4741 From = ImplicitCastExpr::Create(Context, T: FromType, Kind: CK_AtomicToNonAtomic,
4742 Operand: From, /*BasePath=*/nullptr, Cat: VK_PRValue,
4743 FPO: FPOptionsOverride());
4744 }
4745 break;
4746
4747 case ICK_Lvalue_To_Rvalue: {
4748 assert(From->getObjectKind() != OK_ObjCProperty);
4749 ExprResult FromRes = DefaultLvalueConversion(E: From);
4750 if (FromRes.isInvalid())
4751 return ExprError();
4752
4753 From = FromRes.get();
4754 FromType = From->getType();
4755 break;
4756 }
4757
4758 case ICK_Array_To_Pointer:
4759 FromType = Context.getArrayDecayedType(T: FromType);
4760 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_ArrayToPointerDecay, VK: VK_PRValue,
4761 /*BasePath=*/nullptr, CCK)
4762 .get();
4763 break;
4764
4765 case ICK_HLSL_Array_RValue:
4766 if (ToType->isArrayParameterType()) {
4767 FromType = Context.getArrayParameterType(Ty: FromType);
4768 } else if (FromType->isArrayParameterType()) {
4769 const ArrayParameterType *APT = cast<ArrayParameterType>(Val&: FromType);
4770 FromType = APT->getConstantArrayType(Ctx: Context);
4771 }
4772 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_HLSLArrayRValue, VK: VK_PRValue,
4773 /*BasePath=*/nullptr, CCK)
4774 .get();
4775 break;
4776
4777 case ICK_Function_To_Pointer:
4778 FromType = Context.getPointerType(T: FromType);
4779 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_FunctionToPointerDecay,
4780 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
4781 .get();
4782 break;
4783
4784 default:
4785 llvm_unreachable("Improper first standard conversion");
4786 }
4787
4788 // Perform the second implicit conversion
4789 switch (SCS.Second) {
4790 case ICK_Identity:
4791 // C++ [except.spec]p5:
4792 // [For] assignment to and initialization of pointers to functions,
4793 // pointers to member functions, and references to functions: the
4794 // target entity shall allow at least the exceptions allowed by the
4795 // source value in the assignment or initialization.
4796 switch (Action) {
4797 case AssignmentAction::Assigning:
4798 case AssignmentAction::Initializing:
4799 // Note, function argument passing and returning are initialization.
4800 case AssignmentAction::Passing:
4801 case AssignmentAction::Returning:
4802 case AssignmentAction::Sending:
4803 case AssignmentAction::Passing_CFAudited:
4804 if (CheckExceptionSpecCompatibility(From, ToType))
4805 return ExprError();
4806 break;
4807
4808 case AssignmentAction::Casting:
4809 case AssignmentAction::Converting:
4810 // Casts and implicit conversions are not initialization, so are not
4811 // checked for exception specification mismatches.
4812 break;
4813 }
4814 // Nothing else to do.
4815 break;
4816
4817 case ICK_Integral_Promotion:
4818 case ICK_Integral_Conversion: {
4819 QualType ElTy = ToType;
4820 QualType StepTy = ToType;
4821 if (FromType->isVectorType() || ToType->isVectorType())
4822 StepTy = adjustVectorType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4823 if (ElTy->isBooleanType()) {
4824 assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
4825 SCS.Second == ICK_Integral_Promotion &&
4826 "only enums with fixed underlying type can promote to bool");
4827 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToBoolean, VK: VK_PRValue,
4828 /*BasePath=*/nullptr, CCK)
4829 .get();
4830 } else {
4831 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralCast, VK: VK_PRValue,
4832 /*BasePath=*/nullptr, CCK)
4833 .get();
4834 }
4835 break;
4836 }
4837
4838 case ICK_Floating_Promotion:
4839 case ICK_Floating_Conversion: {
4840 QualType StepTy = ToType;
4841 if (FromType->isVectorType() || ToType->isVectorType())
4842 StepTy = adjustVectorType(Context, FromTy: FromType, ToType);
4843 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingCast, VK: VK_PRValue,
4844 /*BasePath=*/nullptr, CCK)
4845 .get();
4846 break;
4847 }
4848
4849 case ICK_Complex_Promotion:
4850 case ICK_Complex_Conversion: {
4851 QualType FromEl = From->getType()->castAs<ComplexType>()->getElementType();
4852 QualType ToEl = ToType->castAs<ComplexType>()->getElementType();
4853 CastKind CK;
4854 if (FromEl->isRealFloatingType()) {
4855 if (ToEl->isRealFloatingType())
4856 CK = CK_FloatingComplexCast;
4857 else
4858 CK = CK_FloatingComplexToIntegralComplex;
4859 } else if (ToEl->isRealFloatingType()) {
4860 CK = CK_IntegralComplexToFloatingComplex;
4861 } else {
4862 CK = CK_IntegralComplexCast;
4863 }
4864 From = ImpCastExprToType(E: From, Type: ToType, CK, VK: VK_PRValue, /*BasePath=*/nullptr,
4865 CCK)
4866 .get();
4867 break;
4868 }
4869
4870 case ICK_Floating_Integral: {
4871 QualType ElTy = ToType;
4872 QualType StepTy = ToType;
4873 if (FromType->isVectorType() || ToType->isVectorType())
4874 StepTy = adjustVectorType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4875 if (ElTy->isRealFloatingType())
4876 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToFloating, VK: VK_PRValue,
4877 /*BasePath=*/nullptr, CCK)
4878 .get();
4879 else
4880 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingToIntegral, VK: VK_PRValue,
4881 /*BasePath=*/nullptr, CCK)
4882 .get();
4883 break;
4884 }
4885
4886 case ICK_Fixed_Point_Conversion:
4887 assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
4888 "Attempting implicit fixed point conversion without a fixed "
4889 "point operand");
4890 if (FromType->isFloatingType())
4891 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FloatingToFixedPoint,
4892 VK: VK_PRValue,
4893 /*BasePath=*/nullptr, CCK).get();
4894 else if (ToType->isFloatingType())
4895 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToFloating,
4896 VK: VK_PRValue,
4897 /*BasePath=*/nullptr, CCK).get();
4898 else if (FromType->isIntegralType(Ctx: Context))
4899 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_IntegralToFixedPoint,
4900 VK: VK_PRValue,
4901 /*BasePath=*/nullptr, CCK).get();
4902 else if (ToType->isIntegralType(Ctx: Context))
4903 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToIntegral,
4904 VK: VK_PRValue,
4905 /*BasePath=*/nullptr, CCK).get();
4906 else if (ToType->isBooleanType())
4907 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToBoolean,
4908 VK: VK_PRValue,
4909 /*BasePath=*/nullptr, CCK).get();
4910 else
4911 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointCast,
4912 VK: VK_PRValue,
4913 /*BasePath=*/nullptr, CCK).get();
4914 break;
4915
4916 case ICK_Compatible_Conversion:
4917 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: From->getValueKind(),
4918 /*BasePath=*/nullptr, CCK).get();
4919 break;
4920
4921 case ICK_Writeback_Conversion:
4922 case ICK_Pointer_Conversion: {
4923 if (SCS.IncompatibleObjC && Action != AssignmentAction::Casting) {
4924 // Diagnose incompatible Objective-C conversions
4925 if (Action == AssignmentAction::Initializing ||
4926 Action == AssignmentAction::Assigning)
4927 Diag(Loc: From->getBeginLoc(),
4928 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
4929 << ToType << From->getType() << Action << From->getSourceRange()
4930 << 0;
4931 else
4932 Diag(Loc: From->getBeginLoc(),
4933 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
4934 << From->getType() << ToType << Action << From->getSourceRange()
4935 << 0;
4936
4937 if (From->getType()->isObjCObjectPointerType() &&
4938 ToType->isObjCObjectPointerType())
4939 ObjC().EmitRelatedResultTypeNote(E: From);
4940 } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
4941 !ObjC().CheckObjCARCUnavailableWeakConversion(castType: ToType,
4942 ExprType: From->getType())) {
4943 if (Action == AssignmentAction::Initializing)
4944 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_weak_unavailable_assign);
4945 else
4946 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_convesion_of_weak_unavailable)
4947 << (Action == AssignmentAction::Casting) << From->getType()
4948 << ToType << From->getSourceRange();
4949 }
4950
4951 // Defer address space conversion to the third conversion.
4952 QualType FromPteeType = From->getType()->getPointeeType();
4953 QualType ToPteeType = ToType->getPointeeType();
4954 QualType NewToType = ToType;
4955 if (!FromPteeType.isNull() && !ToPteeType.isNull() &&
4956 FromPteeType.getAddressSpace() != ToPteeType.getAddressSpace()) {
4957 NewToType = Context.removeAddrSpaceQualType(T: ToPteeType);
4958 NewToType = Context.getAddrSpaceQualType(T: NewToType,
4959 AddressSpace: FromPteeType.getAddressSpace());
4960 if (ToType->isObjCObjectPointerType())
4961 NewToType = Context.getObjCObjectPointerType(OIT: NewToType);
4962 else if (ToType->isBlockPointerType())
4963 NewToType = Context.getBlockPointerType(T: NewToType);
4964 else
4965 NewToType = Context.getPointerType(T: NewToType);
4966 }
4967
4968 CastKind Kind;
4969 CXXCastPath BasePath;
4970 if (CheckPointerConversion(From, ToType: NewToType, Kind, BasePath, IgnoreBaseAccess: CStyle))
4971 return ExprError();
4972
4973 // Make sure we extend blocks if necessary.
4974 // FIXME: doing this here is really ugly.
4975 if (Kind == CK_BlockPointerToObjCPointerCast) {
4976 ExprResult E = From;
4977 (void)ObjC().PrepareCastToObjCObjectPointer(E);
4978 From = E.get();
4979 }
4980 if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
4981 ObjC().CheckObjCConversion(castRange: SourceRange(), castType: NewToType, op&: From, CCK);
4982 From = ImpCastExprToType(E: From, Type: NewToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK)
4983 .get();
4984 break;
4985 }
4986
4987 case ICK_Pointer_Member: {
4988 CastKind Kind;
4989 CXXCastPath BasePath;
4990 switch (CheckMemberPointerConversion(
4991 FromType: From->getType(), ToPtrType: ToType->castAs<MemberPointerType>(), Kind, BasePath,
4992 CheckLoc: From->getExprLoc(), OpRange: From->getSourceRange(), IgnoreBaseAccess: CStyle,
4993 Direction: MemberPointerConversionDirection::Downcast)) {
4994 case MemberPointerConversionResult::Success:
4995 assert((Kind != CK_NullToMemberPointer ||
4996 From->isNullPointerConstant(Context,
4997 Expr::NPC_ValueDependentIsNull)) &&
4998 "Expr must be null pointer constant!");
4999 break;
5000 case MemberPointerConversionResult::Inaccessible:
5001 break;
5002 case MemberPointerConversionResult::DifferentPointee:
5003 llvm_unreachable("unexpected result");
5004 case MemberPointerConversionResult::NotDerived:
5005 llvm_unreachable("Should not have been called if derivation isn't OK.");
5006 case MemberPointerConversionResult::Ambiguous:
5007 case MemberPointerConversionResult::Virtual:
5008 return ExprError();
5009 }
5010 if (CheckExceptionSpecCompatibility(From, ToType))
5011 return ExprError();
5012
5013 From =
5014 ImpCastExprToType(E: From, Type: ToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK).get();
5015 break;
5016 }
5017
5018 case ICK_Boolean_Conversion: {
5019 // Perform half-to-boolean conversion via float.
5020 if (From->getType()->isHalfType()) {
5021 From = ImpCastExprToType(E: From, Type: Context.FloatTy, CK: CK_FloatingCast).get();
5022 FromType = Context.FloatTy;
5023 }
5024 QualType ElTy = FromType;
5025 QualType StepTy = ToType;
5026 if (FromType->isVectorType())
5027 ElTy = FromType->castAs<VectorType>()->getElementType();
5028 if (getLangOpts().HLSL &&
5029 (FromType->isVectorType() || ToType->isVectorType()))
5030 StepTy = adjustVectorType(Context, FromTy: FromType, ToType);
5031
5032 From = ImpCastExprToType(E: From, Type: StepTy, CK: ScalarTypeToBooleanCastKind(ScalarTy: ElTy),
5033 VK: VK_PRValue,
5034 /*BasePath=*/nullptr, CCK)
5035 .get();
5036 break;
5037 }
5038
5039 case ICK_Derived_To_Base: {
5040 CXXCastPath BasePath;
5041 if (CheckDerivedToBaseConversion(
5042 Derived: From->getType(), Base: ToType.getNonReferenceType(), Loc: From->getBeginLoc(),
5043 Range: From->getSourceRange(), BasePath: &BasePath, IgnoreAccess: CStyle))
5044 return ExprError();
5045
5046 From = ImpCastExprToType(E: From, Type: ToType.getNonReferenceType(),
5047 CK: CK_DerivedToBase, VK: From->getValueKind(),
5048 BasePath: &BasePath, CCK).get();
5049 break;
5050 }
5051
5052 case ICK_Vector_Conversion:
5053 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5054 /*BasePath=*/nullptr, CCK)
5055 .get();
5056 break;
5057
5058 case ICK_SVE_Vector_Conversion:
5059 case ICK_RVV_Vector_Conversion:
5060 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5061 /*BasePath=*/nullptr, CCK)
5062 .get();
5063 break;
5064
5065 case ICK_Vector_Splat: {
5066 // Vector splat from any arithmetic type to a vector.
5067 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5068 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5069 /*BasePath=*/nullptr, CCK)
5070 .get();
5071 break;
5072 }
5073
5074 case ICK_Complex_Real:
5075 // Case 1. x -> _Complex y
5076 if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
5077 QualType ElType = ToComplex->getElementType();
5078 bool isFloatingComplex = ElType->isRealFloatingType();
5079
5080 // x -> y
5081 if (Context.hasSameUnqualifiedType(T1: ElType, T2: From->getType())) {
5082 // do nothing
5083 } else if (From->getType()->isRealFloatingType()) {
5084 From = ImpCastExprToType(E: From, Type: ElType,
5085 CK: isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get();
5086 } else {
5087 assert(From->getType()->isIntegerType());
5088 From = ImpCastExprToType(E: From, Type: ElType,
5089 CK: isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get();
5090 }
5091 // y -> _Complex y
5092 From = ImpCastExprToType(E: From, Type: ToType,
5093 CK: isFloatingComplex ? CK_FloatingRealToComplex
5094 : CK_IntegralRealToComplex).get();
5095
5096 // Case 2. _Complex x -> y
5097 } else {
5098 auto *FromComplex = From->getType()->castAs<ComplexType>();
5099 QualType ElType = FromComplex->getElementType();
5100 bool isFloatingComplex = ElType->isRealFloatingType();
5101
5102 // _Complex x -> x
5103 From = ImpCastExprToType(E: From, Type: ElType,
5104 CK: isFloatingComplex ? CK_FloatingComplexToReal
5105 : CK_IntegralComplexToReal,
5106 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5107 .get();
5108
5109 // x -> y
5110 if (Context.hasSameUnqualifiedType(T1: ElType, T2: ToType)) {
5111 // do nothing
5112 } else if (ToType->isRealFloatingType()) {
5113 From = ImpCastExprToType(E: From, Type: ToType,
5114 CK: isFloatingComplex ? CK_FloatingCast
5115 : CK_IntegralToFloating,
5116 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5117 .get();
5118 } else {
5119 assert(ToType->isIntegerType());
5120 From = ImpCastExprToType(E: From, Type: ToType,
5121 CK: isFloatingComplex ? CK_FloatingToIntegral
5122 : CK_IntegralCast,
5123 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5124 .get();
5125 }
5126 }
5127 break;
5128
5129 case ICK_Block_Pointer_Conversion: {
5130 LangAS AddrSpaceL =
5131 ToType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5132 LangAS AddrSpaceR =
5133 FromType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5134 assert(Qualifiers::isAddressSpaceSupersetOf(AddrSpaceL, AddrSpaceR,
5135 getASTContext()) &&
5136 "Invalid cast");
5137 CastKind Kind =
5138 AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
5139 From = ImpCastExprToType(E: From, Type: ToType.getUnqualifiedType(), CK: Kind,
5140 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5141 .get();
5142 break;
5143 }
5144
5145 case ICK_TransparentUnionConversion: {
5146 ExprResult FromRes = From;
5147 AssignConvertType ConvTy =
5148 CheckTransparentUnionArgumentConstraints(ArgType: ToType, RHS&: FromRes);
5149 if (FromRes.isInvalid())
5150 return ExprError();
5151 From = FromRes.get();
5152 assert((ConvTy == AssignConvertType::Compatible) &&
5153 "Improper transparent union conversion");
5154 (void)ConvTy;
5155 break;
5156 }
5157
5158 case ICK_Zero_Event_Conversion:
5159 case ICK_Zero_Queue_Conversion:
5160 From = ImpCastExprToType(E: From, Type: ToType,
5161 CK: CK_ZeroToOCLOpaqueType,
5162 VK: From->getValueKind()).get();
5163 break;
5164
5165 case ICK_Lvalue_To_Rvalue:
5166 case ICK_Array_To_Pointer:
5167 case ICK_Function_To_Pointer:
5168 case ICK_Function_Conversion:
5169 case ICK_Qualification:
5170 case ICK_Num_Conversion_Kinds:
5171 case ICK_C_Only_Conversion:
5172 case ICK_Incompatible_Pointer_Conversion:
5173 case ICK_HLSL_Array_RValue:
5174 case ICK_HLSL_Vector_Truncation:
5175 case ICK_HLSL_Vector_Splat:
5176 llvm_unreachable("Improper second standard conversion");
5177 }
5178
5179 if (SCS.Dimension != ICK_Identity) {
5180 // If SCS.Element is not ICK_Identity the To and From types must be HLSL
5181 // vectors or matrices.
5182
5183 // TODO: Support HLSL matrices.
5184 assert((!From->getType()->isMatrixType() && !ToType->isMatrixType()) &&
5185 "Dimension conversion for matrix types is not implemented yet.");
5186 assert((ToType->isVectorType() || ToType->isBuiltinType()) &&
5187 "Dimension conversion output must be vector or scalar type.");
5188 switch (SCS.Dimension) {
5189 case ICK_HLSL_Vector_Splat: {
5190 // Vector splat from any arithmetic type to a vector.
5191 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5192 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5193 /*BasePath=*/nullptr, CCK)
5194 .get();
5195 break;
5196 }
5197 case ICK_HLSL_Vector_Truncation: {
5198 // Note: HLSL built-in vectors are ExtVectors. Since this truncates a
5199 // vector to a smaller vector or to a scalar, this can only operate on
5200 // arguments where the source type is an ExtVector and the destination
5201 // type is destination type is either an ExtVectorType or a builtin scalar
5202 // type.
5203 auto *FromVec = From->getType()->castAs<VectorType>();
5204 QualType TruncTy = FromVec->getElementType();
5205 if (auto *ToVec = ToType->getAs<VectorType>())
5206 TruncTy = Context.getExtVectorType(VectorType: TruncTy, NumElts: ToVec->getNumElements());
5207 From = ImpCastExprToType(E: From, Type: TruncTy, CK: CK_HLSLVectorTruncation,
5208 VK: From->getValueKind())
5209 .get();
5210
5211 break;
5212 }
5213 case ICK_Identity:
5214 default:
5215 llvm_unreachable("Improper element standard conversion");
5216 }
5217 }
5218
5219 switch (SCS.Third) {
5220 case ICK_Identity:
5221 // Nothing to do.
5222 break;
5223
5224 case ICK_Function_Conversion:
5225 // If both sides are functions (or pointers/references to them), there could
5226 // be incompatible exception declarations.
5227 if (CheckExceptionSpecCompatibility(From, ToType))
5228 return ExprError();
5229
5230 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: VK_PRValue,
5231 /*BasePath=*/nullptr, CCK)
5232 .get();
5233 break;
5234
5235 case ICK_Qualification: {
5236 ExprValueKind VK = From->getValueKind();
5237 CastKind CK = CK_NoOp;
5238
5239 if (ToType->isReferenceType() &&
5240 ToType->getPointeeType().getAddressSpace() !=
5241 From->getType().getAddressSpace())
5242 CK = CK_AddressSpaceConversion;
5243
5244 if (ToType->isPointerType() &&
5245 ToType->getPointeeType().getAddressSpace() !=
5246 From->getType()->getPointeeType().getAddressSpace())
5247 CK = CK_AddressSpaceConversion;
5248
5249 if (!isCast(CCK) &&
5250 !ToType->getPointeeType().getQualifiers().hasUnaligned() &&
5251 From->getType()->getPointeeType().getQualifiers().hasUnaligned()) {
5252 Diag(Loc: From->getBeginLoc(), DiagID: diag::warn_imp_cast_drops_unaligned)
5253 << InitialFromType << ToType;
5254 }
5255
5256 From = ImpCastExprToType(E: From, Type: ToType.getNonLValueExprType(Context), CK, VK,
5257 /*BasePath=*/nullptr, CCK)
5258 .get();
5259
5260 if (SCS.DeprecatedStringLiteralToCharPtr &&
5261 !getLangOpts().WritableStrings) {
5262 Diag(Loc: From->getBeginLoc(),
5263 DiagID: getLangOpts().CPlusPlus11
5264 ? diag::ext_deprecated_string_literal_conversion
5265 : diag::warn_deprecated_string_literal_conversion)
5266 << ToType.getNonReferenceType();
5267 }
5268
5269 break;
5270 }
5271
5272 default:
5273 llvm_unreachable("Improper third standard conversion");
5274 }
5275
5276 // If this conversion sequence involved a scalar -> atomic conversion, perform
5277 // that conversion now.
5278 if (!ToAtomicType.isNull()) {
5279 assert(Context.hasSameType(
5280 ToAtomicType->castAs<AtomicType>()->getValueType(), From->getType()));
5281 From = ImpCastExprToType(E: From, Type: ToAtomicType, CK: CK_NonAtomicToAtomic,
5282 VK: VK_PRValue, BasePath: nullptr, CCK)
5283 .get();
5284 }
5285
5286 // Materialize a temporary if we're implicitly converting to a reference
5287 // type. This is not required by the C++ rules but is necessary to maintain
5288 // AST invariants.
5289 if (ToType->isReferenceType() && From->isPRValue()) {
5290 ExprResult Res = TemporaryMaterializationConversion(E: From);
5291 if (Res.isInvalid())
5292 return ExprError();
5293 From = Res.get();
5294 }
5295
5296 // If this conversion sequence succeeded and involved implicitly converting a
5297 // _Nullable type to a _Nonnull one, complain.
5298 if (!isCast(CCK))
5299 diagnoseNullableToNonnullConversion(DstType: ToType, SrcType: InitialFromType,
5300 Loc: From->getBeginLoc());
5301
5302 return From;
5303}
5304
5305QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
5306 ExprValueKind &VK,
5307 SourceLocation Loc,
5308 bool isIndirect) {
5309 assert(!LHS.get()->hasPlaceholderType() && !RHS.get()->hasPlaceholderType() &&
5310 "placeholders should have been weeded out by now");
5311
5312 // The LHS undergoes lvalue conversions if this is ->*, and undergoes the
5313 // temporary materialization conversion otherwise.
5314 if (isIndirect)
5315 LHS = DefaultLvalueConversion(E: LHS.get());
5316 else if (LHS.get()->isPRValue())
5317 LHS = TemporaryMaterializationConversion(E: LHS.get());
5318 if (LHS.isInvalid())
5319 return QualType();
5320
5321 // The RHS always undergoes lvalue conversions.
5322 RHS = DefaultLvalueConversion(E: RHS.get());
5323 if (RHS.isInvalid()) return QualType();
5324
5325 const char *OpSpelling = isIndirect ? "->*" : ".*";
5326 // C++ 5.5p2
5327 // The binary operator .* [p3: ->*] binds its second operand, which shall
5328 // be of type "pointer to member of T" (where T is a completely-defined
5329 // class type) [...]
5330 QualType RHSType = RHS.get()->getType();
5331 const MemberPointerType *MemPtr = RHSType->getAs<MemberPointerType>();
5332 if (!MemPtr) {
5333 Diag(Loc, DiagID: diag::err_bad_memptr_rhs)
5334 << OpSpelling << RHSType << RHS.get()->getSourceRange();
5335 return QualType();
5336 }
5337
5338 CXXRecordDecl *RHSClass = MemPtr->getMostRecentCXXRecordDecl();
5339
5340 // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the
5341 // member pointer points must be completely-defined. However, there is no
5342 // reason for this semantic distinction, and the rule is not enforced by
5343 // other compilers. Therefore, we do not check this property, as it is
5344 // likely to be considered a defect.
5345
5346 // C++ 5.5p2
5347 // [...] to its first operand, which shall be of class T or of a class of
5348 // which T is an unambiguous and accessible base class. [p3: a pointer to
5349 // such a class]
5350 QualType LHSType = LHS.get()->getType();
5351 if (isIndirect) {
5352 if (const PointerType *Ptr = LHSType->getAs<PointerType>())
5353 LHSType = Ptr->getPointeeType();
5354 else {
5355 Diag(Loc, DiagID: diag::err_bad_memptr_lhs)
5356 << OpSpelling << 1 << LHSType
5357 << FixItHint::CreateReplacement(RemoveRange: SourceRange(Loc), Code: ".*");
5358 return QualType();
5359 }
5360 }
5361 CXXRecordDecl *LHSClass = LHSType->getAsCXXRecordDecl();
5362
5363 if (!declaresSameEntity(D1: LHSClass, D2: RHSClass)) {
5364 // If we want to check the hierarchy, we need a complete type.
5365 if (RequireCompleteType(Loc, T: LHSType, DiagID: diag::err_bad_memptr_lhs,
5366 Args: OpSpelling, Args: (int)isIndirect)) {
5367 return QualType();
5368 }
5369
5370 if (!IsDerivedFrom(Loc, Derived: LHSClass, Base: RHSClass)) {
5371 Diag(Loc, DiagID: diag::err_bad_memptr_lhs) << OpSpelling
5372 << (int)isIndirect << LHS.get()->getType();
5373 return QualType();
5374 }
5375
5376 CXXCastPath BasePath;
5377 if (CheckDerivedToBaseConversion(
5378 Derived: LHSType, Base: QualType(RHSClass->getTypeForDecl(), 0), Loc,
5379 Range: SourceRange(LHS.get()->getBeginLoc(), RHS.get()->getEndLoc()),
5380 BasePath: &BasePath))
5381 return QualType();
5382
5383 // Cast LHS to type of use.
5384 QualType UseType = Context.getQualifiedType(T: RHSClass->getTypeForDecl(),
5385 Qs: LHSType.getQualifiers());
5386 if (isIndirect)
5387 UseType = Context.getPointerType(T: UseType);
5388 ExprValueKind VK = isIndirect ? VK_PRValue : LHS.get()->getValueKind();
5389 LHS = ImpCastExprToType(E: LHS.get(), Type: UseType, CK: CK_DerivedToBase, VK,
5390 BasePath: &BasePath);
5391 }
5392
5393 if (isa<CXXScalarValueInitExpr>(Val: RHS.get()->IgnoreParens())) {
5394 // Diagnose use of pointer-to-member type which when used as
5395 // the functional cast in a pointer-to-member expression.
5396 Diag(Loc, DiagID: diag::err_pointer_to_member_type) << isIndirect;
5397 return QualType();
5398 }
5399
5400 // C++ 5.5p2
5401 // The result is an object or a function of the type specified by the
5402 // second operand.
5403 // The cv qualifiers are the union of those in the pointer and the left side,
5404 // in accordance with 5.5p5 and 5.2.5.
5405 QualType Result = MemPtr->getPointeeType();
5406 Result = Context.getCVRQualifiedType(T: Result, CVR: LHSType.getCVRQualifiers());
5407
5408 // C++0x [expr.mptr.oper]p6:
5409 // In a .* expression whose object expression is an rvalue, the program is
5410 // ill-formed if the second operand is a pointer to member function with
5411 // ref-qualifier &. In a ->* expression or in a .* expression whose object
5412 // expression is an lvalue, the program is ill-formed if the second operand
5413 // is a pointer to member function with ref-qualifier &&.
5414 if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) {
5415 switch (Proto->getRefQualifier()) {
5416 case RQ_None:
5417 // Do nothing
5418 break;
5419
5420 case RQ_LValue:
5421 if (!isIndirect && !LHS.get()->Classify(Ctx&: Context).isLValue()) {
5422 // C++2a allows functions with ref-qualifier & if their cv-qualifier-seq
5423 // is (exactly) 'const'.
5424 if (Proto->isConst() && !Proto->isVolatile())
5425 Diag(Loc, DiagID: getLangOpts().CPlusPlus20
5426 ? diag::warn_cxx17_compat_pointer_to_const_ref_member_on_rvalue
5427 : diag::ext_pointer_to_const_ref_member_on_rvalue);
5428 else
5429 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5430 << RHSType << 1 << LHS.get()->getSourceRange();
5431 }
5432 break;
5433
5434 case RQ_RValue:
5435 if (isIndirect || !LHS.get()->Classify(Ctx&: Context).isRValue())
5436 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5437 << RHSType << 0 << LHS.get()->getSourceRange();
5438 break;
5439 }
5440 }
5441
5442 // C++ [expr.mptr.oper]p6:
5443 // The result of a .* expression whose second operand is a pointer
5444 // to a data member is of the same value category as its
5445 // first operand. The result of a .* expression whose second
5446 // operand is a pointer to a member function is a prvalue. The
5447 // result of an ->* expression is an lvalue if its second operand
5448 // is a pointer to data member and a prvalue otherwise.
5449 if (Result->isFunctionType()) {
5450 VK = VK_PRValue;
5451 return Context.BoundMemberTy;
5452 } else if (isIndirect) {
5453 VK = VK_LValue;
5454 } else {
5455 VK = LHS.get()->getValueKind();
5456 }
5457
5458 return Result;
5459}
5460
5461/// Try to convert a type to another according to C++11 5.16p3.
5462///
5463/// This is part of the parameter validation for the ? operator. If either
5464/// value operand is a class type, the two operands are attempted to be
5465/// converted to each other. This function does the conversion in one direction.
5466/// It returns true if the program is ill-formed and has already been diagnosed
5467/// as such.
5468static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
5469 SourceLocation QuestionLoc,
5470 bool &HaveConversion,
5471 QualType &ToType) {
5472 HaveConversion = false;
5473 ToType = To->getType();
5474
5475 InitializationKind Kind =
5476 InitializationKind::CreateCopy(InitLoc: To->getBeginLoc(), EqualLoc: SourceLocation());
5477 // C++11 5.16p3
5478 // The process for determining whether an operand expression E1 of type T1
5479 // can be converted to match an operand expression E2 of type T2 is defined
5480 // as follows:
5481 // -- If E2 is an lvalue: E1 can be converted to match E2 if E1 can be
5482 // implicitly converted to type "lvalue reference to T2", subject to the
5483 // constraint that in the conversion the reference must bind directly to
5484 // an lvalue.
5485 // -- If E2 is an xvalue: E1 can be converted to match E2 if E1 can be
5486 // implicitly converted to the type "rvalue reference to R2", subject to
5487 // the constraint that the reference must bind directly.
5488 if (To->isGLValue()) {
5489 QualType T = Self.Context.getReferenceQualifiedType(e: To);
5490 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5491
5492 InitializationSequence InitSeq(Self, Entity, Kind, From);
5493 if (InitSeq.isDirectReferenceBinding()) {
5494 ToType = T;
5495 HaveConversion = true;
5496 return false;
5497 }
5498
5499 if (InitSeq.isAmbiguous())
5500 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5501 }
5502
5503 // -- If E2 is an rvalue, or if the conversion above cannot be done:
5504 // -- if E1 and E2 have class type, and the underlying class types are
5505 // the same or one is a base class of the other:
5506 QualType FTy = From->getType();
5507 QualType TTy = To->getType();
5508 const RecordType *FRec = FTy->getAs<RecordType>();
5509 const RecordType *TRec = TTy->getAs<RecordType>();
5510 bool FDerivedFromT = FRec && TRec && FRec != TRec &&
5511 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: FTy, Base: TTy);
5512 if (FRec && TRec && (FRec == TRec || FDerivedFromT ||
5513 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: TTy, Base: FTy))) {
5514 // E1 can be converted to match E2 if the class of T2 is the
5515 // same type as, or a base class of, the class of T1, and
5516 // [cv2 > cv1].
5517 if (FRec == TRec || FDerivedFromT) {
5518 if (TTy.isAtLeastAsQualifiedAs(other: FTy, Ctx: Self.getASTContext())) {
5519 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5520 InitializationSequence InitSeq(Self, Entity, Kind, From);
5521 if (InitSeq) {
5522 HaveConversion = true;
5523 return false;
5524 }
5525
5526 if (InitSeq.isAmbiguous())
5527 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5528 }
5529 }
5530
5531 return false;
5532 }
5533
5534 // -- Otherwise: E1 can be converted to match E2 if E1 can be
5535 // implicitly converted to the type that expression E2 would have
5536 // if E2 were converted to an rvalue (or the type it has, if E2 is
5537 // an rvalue).
5538 //
5539 // This actually refers very narrowly to the lvalue-to-rvalue conversion, not
5540 // to the array-to-pointer or function-to-pointer conversions.
5541 TTy = TTy.getNonLValueExprType(Context: Self.Context);
5542
5543 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5544 InitializationSequence InitSeq(Self, Entity, Kind, From);
5545 HaveConversion = !InitSeq.Failed();
5546 ToType = TTy;
5547 if (InitSeq.isAmbiguous())
5548 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5549
5550 return false;
5551}
5552
5553/// Try to find a common type for two according to C++0x 5.16p5.
5554///
5555/// This is part of the parameter validation for the ? operator. If either
5556/// value operand is a class type, overload resolution is used to find a
5557/// conversion to a common type.
5558static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS,
5559 SourceLocation QuestionLoc) {
5560 Expr *Args[2] = { LHS.get(), RHS.get() };
5561 OverloadCandidateSet CandidateSet(QuestionLoc,
5562 OverloadCandidateSet::CSK_Operator);
5563 Self.AddBuiltinOperatorCandidates(Op: OO_Conditional, OpLoc: QuestionLoc, Args,
5564 CandidateSet);
5565
5566 OverloadCandidateSet::iterator Best;
5567 switch (CandidateSet.BestViableFunction(S&: Self, Loc: QuestionLoc, Best)) {
5568 case OR_Success: {
5569 // We found a match. Perform the conversions on the arguments and move on.
5570 ExprResult LHSRes = Self.PerformImplicitConversion(
5571 From: LHS.get(), ToType: Best->BuiltinParamTypes[0], ICS: Best->Conversions[0],
5572 Action: AssignmentAction::Converting);
5573 if (LHSRes.isInvalid())
5574 break;
5575 LHS = LHSRes;
5576
5577 ExprResult RHSRes = Self.PerformImplicitConversion(
5578 From: RHS.get(), ToType: Best->BuiltinParamTypes[1], ICS: Best->Conversions[1],
5579 Action: AssignmentAction::Converting);
5580 if (RHSRes.isInvalid())
5581 break;
5582 RHS = RHSRes;
5583 if (Best->Function)
5584 Self.MarkFunctionReferenced(Loc: QuestionLoc, Func: Best->Function);
5585 return false;
5586 }
5587
5588 case OR_No_Viable_Function:
5589
5590 // Emit a better diagnostic if one of the expressions is a null pointer
5591 // constant and the other is a pointer type. In this case, the user most
5592 // likely forgot to take the address of the other expression.
5593 if (Self.DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
5594 return true;
5595
5596 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
5597 << LHS.get()->getType() << RHS.get()->getType()
5598 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5599 return true;
5600
5601 case OR_Ambiguous:
5602 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous_ovl)
5603 << LHS.get()->getType() << RHS.get()->getType()
5604 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5605 // FIXME: Print the possible common types by printing the return types of
5606 // the viable candidates.
5607 break;
5608
5609 case OR_Deleted:
5610 llvm_unreachable("Conditional operator has only built-in overloads");
5611 }
5612 return true;
5613}
5614
5615/// Perform an "extended" implicit conversion as returned by
5616/// TryClassUnification.
5617static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
5618 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5619 InitializationKind Kind =
5620 InitializationKind::CreateCopy(InitLoc: E.get()->getBeginLoc(), EqualLoc: SourceLocation());
5621 Expr *Arg = E.get();
5622 InitializationSequence InitSeq(Self, Entity, Kind, Arg);
5623 ExprResult Result = InitSeq.Perform(S&: Self, Entity, Kind, Args: Arg);
5624 if (Result.isInvalid())
5625 return true;
5626
5627 E = Result;
5628 return false;
5629}
5630
5631// Check the condition operand of ?: to see if it is valid for the GCC
5632// extension.
5633static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
5634 QualType CondTy) {
5635 if (!CondTy->isVectorType() && !CondTy->isExtVectorType())
5636 return false;
5637 const QualType EltTy =
5638 cast<VectorType>(Val: CondTy.getCanonicalType())->getElementType();
5639 assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
5640 return EltTy->isIntegralType(Ctx);
5641}
5642
5643static bool isValidSizelessVectorForConditionalCondition(ASTContext &Ctx,
5644 QualType CondTy) {
5645 if (!CondTy->isSveVLSBuiltinType())
5646 return false;
5647 const QualType EltTy =
5648 cast<BuiltinType>(Val: CondTy.getCanonicalType())->getSveEltType(Ctx);
5649 assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
5650 return EltTy->isIntegralType(Ctx);
5651}
5652
5653QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
5654 ExprResult &RHS,
5655 SourceLocation QuestionLoc) {
5656 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
5657 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
5658
5659 QualType CondType = Cond.get()->getType();
5660 const auto *CondVT = CondType->castAs<VectorType>();
5661 QualType CondElementTy = CondVT->getElementType();
5662 unsigned CondElementCount = CondVT->getNumElements();
5663 QualType LHSType = LHS.get()->getType();
5664 const auto *LHSVT = LHSType->getAs<VectorType>();
5665 QualType RHSType = RHS.get()->getType();
5666 const auto *RHSVT = RHSType->getAs<VectorType>();
5667
5668 QualType ResultType;
5669
5670
5671 if (LHSVT && RHSVT) {
5672 if (isa<ExtVectorType>(Val: CondVT) != isa<ExtVectorType>(Val: LHSVT)) {
5673 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_cond_result_mismatch)
5674 << /*isExtVector*/ isa<ExtVectorType>(Val: CondVT);
5675 return {};
5676 }
5677
5678 // If both are vector types, they must be the same type.
5679 if (!Context.hasSameType(T1: LHSType, T2: RHSType)) {
5680 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_mismatched)
5681 << LHSType << RHSType;
5682 return {};
5683 }
5684 ResultType = Context.getCommonSugaredType(X: LHSType, Y: RHSType);
5685 } else if (LHSVT || RHSVT) {
5686 ResultType = CheckVectorOperands(
5687 LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false, /*AllowBothBool*/ true,
5688 /*AllowBoolConversions*/ AllowBoolConversion: false,
5689 /*AllowBoolOperation*/ true,
5690 /*ReportInvalid*/ true);
5691 if (ResultType.isNull())
5692 return {};
5693 } else {
5694 // Both are scalar.
5695 LHSType = LHSType.getUnqualifiedType();
5696 RHSType = RHSType.getUnqualifiedType();
5697 QualType ResultElementTy =
5698 Context.hasSameType(T1: LHSType, T2: RHSType)
5699 ? Context.getCommonSugaredType(X: LHSType, Y: RHSType)
5700 : UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
5701 ACK: ArithConvKind::Conditional);
5702
5703 if (ResultElementTy->isEnumeralType()) {
5704 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_operand_type)
5705 << ResultElementTy;
5706 return {};
5707 }
5708 if (CondType->isExtVectorType())
5709 ResultType =
5710 Context.getExtVectorType(VectorType: ResultElementTy, NumElts: CondVT->getNumElements());
5711 else
5712 ResultType = Context.getVectorType(
5713 VectorType: ResultElementTy, NumElts: CondVT->getNumElements(), VecKind: VectorKind::Generic);
5714
5715 LHS = ImpCastExprToType(E: LHS.get(), Type: ResultType, CK: CK_VectorSplat);
5716 RHS = ImpCastExprToType(E: RHS.get(), Type: ResultType, CK: CK_VectorSplat);
5717 }
5718
5719 assert(!ResultType.isNull() && ResultType->isVectorType() &&
5720 (!CondType->isExtVectorType() || ResultType->isExtVectorType()) &&
5721 "Result should have been a vector type");
5722 auto *ResultVectorTy = ResultType->castAs<VectorType>();
5723 QualType ResultElementTy = ResultVectorTy->getElementType();
5724 unsigned ResultElementCount = ResultVectorTy->getNumElements();
5725
5726 if (ResultElementCount != CondElementCount) {
5727 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_size) << CondType
5728 << ResultType;
5729 return {};
5730 }
5731
5732 if (Context.getTypeSize(T: ResultElementTy) !=
5733 Context.getTypeSize(T: CondElementTy)) {
5734 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_element_size) << CondType
5735 << ResultType;
5736 return {};
5737 }
5738
5739 return ResultType;
5740}
5741
5742QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond,
5743 ExprResult &LHS,
5744 ExprResult &RHS,
5745 SourceLocation QuestionLoc) {
5746 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
5747 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
5748
5749 QualType CondType = Cond.get()->getType();
5750 const auto *CondBT = CondType->castAs<BuiltinType>();
5751 QualType CondElementTy = CondBT->getSveEltType(Ctx: Context);
5752 llvm::ElementCount CondElementCount =
5753 Context.getBuiltinVectorTypeInfo(VecTy: CondBT).EC;
5754
5755 QualType LHSType = LHS.get()->getType();
5756 const auto *LHSBT =
5757 LHSType->isSveVLSBuiltinType() ? LHSType->getAs<BuiltinType>() : nullptr;
5758 QualType RHSType = RHS.get()->getType();
5759 const auto *RHSBT =
5760 RHSType->isSveVLSBuiltinType() ? RHSType->getAs<BuiltinType>() : nullptr;
5761
5762 QualType ResultType;
5763
5764 if (LHSBT && RHSBT) {
5765 // If both are sizeless vector types, they must be the same type.
5766 if (!Context.hasSameType(T1: LHSType, T2: RHSType)) {
5767 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_mismatched)
5768 << LHSType << RHSType;
5769 return QualType();
5770 }
5771 ResultType = LHSType;
5772 } else if (LHSBT || RHSBT) {
5773 ResultType = CheckSizelessVectorOperands(LHS, RHS, Loc: QuestionLoc,
5774 /*IsCompAssign*/ false,
5775 OperationKind: ArithConvKind::Conditional);
5776 if (ResultType.isNull())
5777 return QualType();
5778 } else {
5779 // Both are scalar so splat
5780 QualType ResultElementTy;
5781 LHSType = LHSType.getCanonicalType().getUnqualifiedType();
5782 RHSType = RHSType.getCanonicalType().getUnqualifiedType();
5783
5784 if (Context.hasSameType(T1: LHSType, T2: RHSType))
5785 ResultElementTy = LHSType;
5786 else
5787 ResultElementTy = UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
5788 ACK: ArithConvKind::Conditional);
5789
5790 if (ResultElementTy->isEnumeralType()) {
5791 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_operand_type)
5792 << ResultElementTy;
5793 return QualType();
5794 }
5795
5796 ResultType = Context.getScalableVectorType(
5797 EltTy: ResultElementTy, NumElts: CondElementCount.getKnownMinValue());
5798
5799 LHS = ImpCastExprToType(E: LHS.get(), Type: ResultType, CK: CK_VectorSplat);
5800 RHS = ImpCastExprToType(E: RHS.get(), Type: ResultType, CK: CK_VectorSplat);
5801 }
5802
5803 assert(!ResultType.isNull() && ResultType->isSveVLSBuiltinType() &&
5804 "Result should have been a vector type");
5805 auto *ResultBuiltinTy = ResultType->castAs<BuiltinType>();
5806 QualType ResultElementTy = ResultBuiltinTy->getSveEltType(Ctx: Context);
5807 llvm::ElementCount ResultElementCount =
5808 Context.getBuiltinVectorTypeInfo(VecTy: ResultBuiltinTy).EC;
5809
5810 if (ResultElementCount != CondElementCount) {
5811 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_size)
5812 << CondType << ResultType;
5813 return QualType();
5814 }
5815
5816 if (Context.getTypeSize(T: ResultElementTy) !=
5817 Context.getTypeSize(T: CondElementTy)) {
5818 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_element_size)
5819 << CondType << ResultType;
5820 return QualType();
5821 }
5822
5823 return ResultType;
5824}
5825
5826QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
5827 ExprResult &RHS, ExprValueKind &VK,
5828 ExprObjectKind &OK,
5829 SourceLocation QuestionLoc) {
5830 // FIXME: Handle C99's complex types, block pointers and Obj-C++ interface
5831 // pointers.
5832
5833 // Assume r-value.
5834 VK = VK_PRValue;
5835 OK = OK_Ordinary;
5836 bool IsVectorConditional =
5837 isValidVectorForConditionalCondition(Ctx&: Context, CondTy: Cond.get()->getType());
5838
5839 bool IsSizelessVectorConditional =
5840 isValidSizelessVectorForConditionalCondition(Ctx&: Context,
5841 CondTy: Cond.get()->getType());
5842
5843 // C++11 [expr.cond]p1
5844 // The first expression is contextually converted to bool.
5845 if (!Cond.get()->isTypeDependent()) {
5846 ExprResult CondRes = IsVectorConditional || IsSizelessVectorConditional
5847 ? DefaultFunctionArrayLvalueConversion(E: Cond.get())
5848 : CheckCXXBooleanCondition(CondExpr: Cond.get());
5849 if (CondRes.isInvalid())
5850 return QualType();
5851 Cond = CondRes;
5852 } else {
5853 // To implement C++, the first expression typically doesn't alter the result
5854 // type of the conditional, however the GCC compatible vector extension
5855 // changes the result type to be that of the conditional. Since we cannot
5856 // know if this is a vector extension here, delay the conversion of the
5857 // LHS/RHS below until later.
5858 return Context.DependentTy;
5859 }
5860
5861
5862 // Either of the arguments dependent?
5863 if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent())
5864 return Context.DependentTy;
5865
5866 // C++11 [expr.cond]p2
5867 // If either the second or the third operand has type (cv) void, ...
5868 QualType LTy = LHS.get()->getType();
5869 QualType RTy = RHS.get()->getType();
5870 bool LVoid = LTy->isVoidType();
5871 bool RVoid = RTy->isVoidType();
5872 if (LVoid || RVoid) {
5873 // ... one of the following shall hold:
5874 // -- The second or the third operand (but not both) is a (possibly
5875 // parenthesized) throw-expression; the result is of the type
5876 // and value category of the other.
5877 bool LThrow = isa<CXXThrowExpr>(Val: LHS.get()->IgnoreParenImpCasts());
5878 bool RThrow = isa<CXXThrowExpr>(Val: RHS.get()->IgnoreParenImpCasts());
5879
5880 // Void expressions aren't legal in the vector-conditional expressions.
5881 if (IsVectorConditional) {
5882 SourceRange DiagLoc =
5883 LVoid ? LHS.get()->getSourceRange() : RHS.get()->getSourceRange();
5884 bool IsThrow = LVoid ? LThrow : RThrow;
5885 Diag(Loc: DiagLoc.getBegin(), DiagID: diag::err_conditional_vector_has_void)
5886 << DiagLoc << IsThrow;
5887 return QualType();
5888 }
5889
5890 if (LThrow != RThrow) {
5891 Expr *NonThrow = LThrow ? RHS.get() : LHS.get();
5892 VK = NonThrow->getValueKind();
5893 // DR (no number yet): the result is a bit-field if the
5894 // non-throw-expression operand is a bit-field.
5895 OK = NonThrow->getObjectKind();
5896 return NonThrow->getType();
5897 }
5898
5899 // -- Both the second and third operands have type void; the result is of
5900 // type void and is a prvalue.
5901 if (LVoid && RVoid)
5902 return Context.getCommonSugaredType(X: LTy, Y: RTy);
5903
5904 // Neither holds, error.
5905 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_void_nonvoid)
5906 << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1)
5907 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5908 return QualType();
5909 }
5910
5911 // Neither is void.
5912 if (IsVectorConditional)
5913 return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
5914
5915 if (IsSizelessVectorConditional)
5916 return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
5917
5918 // WebAssembly tables are not allowed as conditional LHS or RHS.
5919 if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) {
5920 Diag(Loc: QuestionLoc, DiagID: diag::err_wasm_table_conditional_expression)
5921 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5922 return QualType();
5923 }
5924
5925 // C++11 [expr.cond]p3
5926 // Otherwise, if the second and third operand have different types, and
5927 // either has (cv) class type [...] an attempt is made to convert each of
5928 // those operands to the type of the other.
5929 if (!Context.hasSameType(T1: LTy, T2: RTy) &&
5930 (LTy->isRecordType() || RTy->isRecordType())) {
5931 // These return true if a single direction is already ambiguous.
5932 QualType L2RType, R2LType;
5933 bool HaveL2R, HaveR2L;
5934 if (TryClassUnification(Self&: *this, From: LHS.get(), To: RHS.get(), QuestionLoc, HaveConversion&: HaveL2R, ToType&: L2RType))
5935 return QualType();
5936 if (TryClassUnification(Self&: *this, From: RHS.get(), To: LHS.get(), QuestionLoc, HaveConversion&: HaveR2L, ToType&: R2LType))
5937 return QualType();
5938
5939 // If both can be converted, [...] the program is ill-formed.
5940 if (HaveL2R && HaveR2L) {
5941 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous)
5942 << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5943 return QualType();
5944 }
5945
5946 // If exactly one conversion is possible, that conversion is applied to
5947 // the chosen operand and the converted operands are used in place of the
5948 // original operands for the remainder of this section.
5949 if (HaveL2R) {
5950 if (ConvertForConditional(Self&: *this, E&: LHS, T: L2RType) || LHS.isInvalid())
5951 return QualType();
5952 LTy = LHS.get()->getType();
5953 } else if (HaveR2L) {
5954 if (ConvertForConditional(Self&: *this, E&: RHS, T: R2LType) || RHS.isInvalid())
5955 return QualType();
5956 RTy = RHS.get()->getType();
5957 }
5958 }
5959
5960 // C++11 [expr.cond]p3
5961 // if both are glvalues of the same value category and the same type except
5962 // for cv-qualification, an attempt is made to convert each of those
5963 // operands to the type of the other.
5964 // FIXME:
5965 // Resolving a defect in P0012R1: we extend this to cover all cases where
5966 // one of the operands is reference-compatible with the other, in order
5967 // to support conditionals between functions differing in noexcept. This
5968 // will similarly cover difference in array bounds after P0388R4.
5969 // FIXME: If LTy and RTy have a composite pointer type, should we convert to
5970 // that instead?
5971 ExprValueKind LVK = LHS.get()->getValueKind();
5972 ExprValueKind RVK = RHS.get()->getValueKind();
5973 if (!Context.hasSameType(T1: LTy, T2: RTy) && LVK == RVK && LVK != VK_PRValue) {
5974 // DerivedToBase was already handled by the class-specific case above.
5975 // FIXME: Should we allow ObjC conversions here?
5976 const ReferenceConversions AllowedConversions =
5977 ReferenceConversions::Qualification |
5978 ReferenceConversions::NestedQualification |
5979 ReferenceConversions::Function;
5980
5981 ReferenceConversions RefConv;
5982 if (CompareReferenceRelationship(Loc: QuestionLoc, T1: LTy, T2: RTy, Conv: &RefConv) ==
5983 Ref_Compatible &&
5984 !(RefConv & ~AllowedConversions) &&
5985 // [...] subject to the constraint that the reference must bind
5986 // directly [...]
5987 !RHS.get()->refersToBitField() && !RHS.get()->refersToVectorElement()) {
5988 RHS = ImpCastExprToType(E: RHS.get(), Type: LTy, CK: CK_NoOp, VK: RVK);
5989 RTy = RHS.get()->getType();
5990 } else if (CompareReferenceRelationship(Loc: QuestionLoc, T1: RTy, T2: LTy, Conv: &RefConv) ==
5991 Ref_Compatible &&
5992 !(RefConv & ~AllowedConversions) &&
5993 !LHS.get()->refersToBitField() &&
5994 !LHS.get()->refersToVectorElement()) {
5995 LHS = ImpCastExprToType(E: LHS.get(), Type: RTy, CK: CK_NoOp, VK: LVK);
5996 LTy = LHS.get()->getType();
5997 }
5998 }
5999
6000 // C++11 [expr.cond]p4
6001 // If the second and third operands are glvalues of the same value
6002 // category and have the same type, the result is of that type and
6003 // value category and it is a bit-field if the second or the third
6004 // operand is a bit-field, or if both are bit-fields.
6005 // We only extend this to bitfields, not to the crazy other kinds of
6006 // l-values.
6007 bool Same = Context.hasSameType(T1: LTy, T2: RTy);
6008 if (Same && LVK == RVK && LVK != VK_PRValue &&
6009 LHS.get()->isOrdinaryOrBitFieldObject() &&
6010 RHS.get()->isOrdinaryOrBitFieldObject()) {
6011 VK = LHS.get()->getValueKind();
6012 if (LHS.get()->getObjectKind() == OK_BitField ||
6013 RHS.get()->getObjectKind() == OK_BitField)
6014 OK = OK_BitField;
6015 return Context.getCommonSugaredType(X: LTy, Y: RTy);
6016 }
6017
6018 // C++11 [expr.cond]p5
6019 // Otherwise, the result is a prvalue. If the second and third operands
6020 // do not have the same type, and either has (cv) class type, ...
6021 if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
6022 // ... overload resolution is used to determine the conversions (if any)
6023 // to be applied to the operands. If the overload resolution fails, the
6024 // program is ill-formed.
6025 if (FindConditionalOverload(Self&: *this, LHS, RHS, QuestionLoc))
6026 return QualType();
6027 }
6028
6029 // C++11 [expr.cond]p6
6030 // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
6031 // conversions are performed on the second and third operands.
6032 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
6033 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
6034 if (LHS.isInvalid() || RHS.isInvalid())
6035 return QualType();
6036 LTy = LHS.get()->getType();
6037 RTy = RHS.get()->getType();
6038
6039 // After those conversions, one of the following shall hold:
6040 // -- The second and third operands have the same type; the result
6041 // is of that type. If the operands have class type, the result
6042 // is a prvalue temporary of the result type, which is
6043 // copy-initialized from either the second operand or the third
6044 // operand depending on the value of the first operand.
6045 if (Context.hasSameType(T1: LTy, T2: RTy)) {
6046 if (LTy->isRecordType()) {
6047 // The operands have class type. Make a temporary copy.
6048 ExprResult LHSCopy = PerformCopyInitialization(
6049 Entity: InitializedEntity::InitializeTemporary(Type: LTy), EqualLoc: SourceLocation(), Init: LHS);
6050 if (LHSCopy.isInvalid())
6051 return QualType();
6052
6053 ExprResult RHSCopy = PerformCopyInitialization(
6054 Entity: InitializedEntity::InitializeTemporary(Type: RTy), EqualLoc: SourceLocation(), Init: RHS);
6055 if (RHSCopy.isInvalid())
6056 return QualType();
6057
6058 LHS = LHSCopy;
6059 RHS = RHSCopy;
6060 }
6061 return Context.getCommonSugaredType(X: LTy, Y: RTy);
6062 }
6063
6064 // Extension: conditional operator involving vector types.
6065 if (LTy->isVectorType() || RTy->isVectorType())
6066 return CheckVectorOperands(LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false,
6067 /*AllowBothBool*/ true,
6068 /*AllowBoolConversions*/ AllowBoolConversion: false,
6069 /*AllowBoolOperation*/ false,
6070 /*ReportInvalid*/ true);
6071
6072 // -- The second and third operands have arithmetic or enumeration type;
6073 // the usual arithmetic conversions are performed to bring them to a
6074 // common type, and the result is of that type.
6075 if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
6076 QualType ResTy = UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
6077 ACK: ArithConvKind::Conditional);
6078 if (LHS.isInvalid() || RHS.isInvalid())
6079 return QualType();
6080 if (ResTy.isNull()) {
6081 Diag(Loc: QuestionLoc,
6082 DiagID: diag::err_typecheck_cond_incompatible_operands) << LTy << RTy
6083 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6084 return QualType();
6085 }
6086
6087 LHS = ImpCastExprToType(E: LHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: LHS, destType: ResTy));
6088 RHS = ImpCastExprToType(E: RHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: RHS, destType: ResTy));
6089
6090 return ResTy;
6091 }
6092
6093 // -- The second and third operands have pointer type, or one has pointer
6094 // type and the other is a null pointer constant, or both are null
6095 // pointer constants, at least one of which is non-integral; pointer
6096 // conversions and qualification conversions are performed to bring them
6097 // to their composite pointer type. The result is of the composite
6098 // pointer type.
6099 // -- The second and third operands have pointer to member type, or one has
6100 // pointer to member type and the other is a null pointer constant;
6101 // pointer to member conversions and qualification conversions are
6102 // performed to bring them to a common type, whose cv-qualification
6103 // shall match the cv-qualification of either the second or the third
6104 // operand. The result is of the common type.
6105 QualType Composite = FindCompositePointerType(Loc: QuestionLoc, E1&: LHS, E2&: RHS);
6106 if (!Composite.isNull())
6107 return Composite;
6108
6109 // Similarly, attempt to find composite type of two objective-c pointers.
6110 Composite = ObjC().FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
6111 if (LHS.isInvalid() || RHS.isInvalid())
6112 return QualType();
6113 if (!Composite.isNull())
6114 return Composite;
6115
6116 // Check if we are using a null with a non-pointer type.
6117 if (DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
6118 return QualType();
6119
6120 Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
6121 << LHS.get()->getType() << RHS.get()->getType()
6122 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6123 return QualType();
6124}
6125
6126QualType Sema::FindCompositePointerType(SourceLocation Loc,
6127 Expr *&E1, Expr *&E2,
6128 bool ConvertArgs) {
6129 assert(getLangOpts().CPlusPlus && "This function assumes C++");
6130
6131 // C++1z [expr]p14:
6132 // The composite pointer type of two operands p1 and p2 having types T1
6133 // and T2
6134 QualType T1 = E1->getType(), T2 = E2->getType();
6135
6136 // where at least one is a pointer or pointer to member type or
6137 // std::nullptr_t is:
6138 bool T1IsPointerLike = T1->isAnyPointerType() || T1->isMemberPointerType() ||
6139 T1->isNullPtrType();
6140 bool T2IsPointerLike = T2->isAnyPointerType() || T2->isMemberPointerType() ||
6141 T2->isNullPtrType();
6142 if (!T1IsPointerLike && !T2IsPointerLike)
6143 return QualType();
6144
6145 // - if both p1 and p2 are null pointer constants, std::nullptr_t;
6146 // This can't actually happen, following the standard, but we also use this
6147 // to implement the end of [expr.conv], which hits this case.
6148 //
6149 // - if either p1 or p2 is a null pointer constant, T2 or T1, respectively;
6150 if (T1IsPointerLike &&
6151 E2->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6152 if (ConvertArgs)
6153 E2 = ImpCastExprToType(E: E2, Type: T1, CK: T1->isMemberPointerType()
6154 ? CK_NullToMemberPointer
6155 : CK_NullToPointer).get();
6156 return T1;
6157 }
6158 if (T2IsPointerLike &&
6159 E1->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6160 if (ConvertArgs)
6161 E1 = ImpCastExprToType(E: E1, Type: T2, CK: T2->isMemberPointerType()
6162 ? CK_NullToMemberPointer
6163 : CK_NullToPointer).get();
6164 return T2;
6165 }
6166
6167 // Now both have to be pointers or member pointers.
6168 if (!T1IsPointerLike || !T2IsPointerLike)
6169 return QualType();
6170 assert(!T1->isNullPtrType() && !T2->isNullPtrType() &&
6171 "nullptr_t should be a null pointer constant");
6172
6173 struct Step {
6174 enum Kind { Pointer, ObjCPointer, MemberPointer, Array } K;
6175 // Qualifiers to apply under the step kind.
6176 Qualifiers Quals;
6177 /// The class for a pointer-to-member; a constant array type with a bound
6178 /// (if any) for an array.
6179 /// FIXME: Store Qualifier for pointer-to-member.
6180 const Type *ClassOrBound;
6181
6182 Step(Kind K, const Type *ClassOrBound = nullptr)
6183 : K(K), ClassOrBound(ClassOrBound) {}
6184 QualType rebuild(ASTContext &Ctx, QualType T) const {
6185 T = Ctx.getQualifiedType(T, Qs: Quals);
6186 switch (K) {
6187 case Pointer:
6188 return Ctx.getPointerType(T);
6189 case MemberPointer:
6190 return Ctx.getMemberPointerType(T, /*Qualifier=*/nullptr,
6191 Cls: ClassOrBound->getAsCXXRecordDecl());
6192 case ObjCPointer:
6193 return Ctx.getObjCObjectPointerType(OIT: T);
6194 case Array:
6195 if (auto *CAT = cast_or_null<ConstantArrayType>(Val: ClassOrBound))
6196 return Ctx.getConstantArrayType(EltTy: T, ArySize: CAT->getSize(), SizeExpr: nullptr,
6197 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6198 else
6199 return Ctx.getIncompleteArrayType(EltTy: T, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6200 }
6201 llvm_unreachable("unknown step kind");
6202 }
6203 };
6204
6205 SmallVector<Step, 8> Steps;
6206
6207 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6208 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6209 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and T1,
6210 // respectively;
6211 // - if T1 is "pointer to member of C1 of type cv1 U1" and T2 is "pointer
6212 // to member of C2 of type cv2 U2" for some non-function type U, where
6213 // C1 is reference-related to C2 or C2 is reference-related to C1, the
6214 // cv-combined type of T2 and T1 or the cv-combined type of T1 and T2,
6215 // respectively;
6216 // - if T1 and T2 are similar types (4.5), the cv-combined type of T1 and
6217 // T2;
6218 //
6219 // Dismantle T1 and T2 to simultaneously determine whether they are similar
6220 // and to prepare to form the cv-combined type if so.
6221 QualType Composite1 = T1;
6222 QualType Composite2 = T2;
6223 unsigned NeedConstBefore = 0;
6224 while (true) {
6225 assert(!Composite1.isNull() && !Composite2.isNull());
6226
6227 Qualifiers Q1, Q2;
6228 Composite1 = Context.getUnqualifiedArrayType(T: Composite1, Quals&: Q1);
6229 Composite2 = Context.getUnqualifiedArrayType(T: Composite2, Quals&: Q2);
6230
6231 // Top-level qualifiers are ignored. Merge at all lower levels.
6232 if (!Steps.empty()) {
6233 // Find the qualifier union: (approximately) the unique minimal set of
6234 // qualifiers that is compatible with both types.
6235 Qualifiers Quals = Qualifiers::fromCVRUMask(CVRU: Q1.getCVRUQualifiers() |
6236 Q2.getCVRUQualifiers());
6237
6238 // Under one level of pointer or pointer-to-member, we can change to an
6239 // unambiguous compatible address space.
6240 if (Q1.getAddressSpace() == Q2.getAddressSpace()) {
6241 Quals.setAddressSpace(Q1.getAddressSpace());
6242 } else if (Steps.size() == 1) {
6243 bool MaybeQ1 = Q1.isAddressSpaceSupersetOf(other: Q2, Ctx: getASTContext());
6244 bool MaybeQ2 = Q2.isAddressSpaceSupersetOf(other: Q1, Ctx: getASTContext());
6245 if (MaybeQ1 == MaybeQ2) {
6246 // Exception for ptr size address spaces. Should be able to choose
6247 // either address space during comparison.
6248 if (isPtrSizeAddressSpace(AS: Q1.getAddressSpace()) ||
6249 isPtrSizeAddressSpace(AS: Q2.getAddressSpace()))
6250 MaybeQ1 = true;
6251 else
6252 return QualType(); // No unique best address space.
6253 }
6254 Quals.setAddressSpace(MaybeQ1 ? Q1.getAddressSpace()
6255 : Q2.getAddressSpace());
6256 } else {
6257 return QualType();
6258 }
6259
6260 // FIXME: In C, we merge __strong and none to __strong at the top level.
6261 if (Q1.getObjCGCAttr() == Q2.getObjCGCAttr())
6262 Quals.setObjCGCAttr(Q1.getObjCGCAttr());
6263 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6264 assert(Steps.size() == 1);
6265 else
6266 return QualType();
6267
6268 // Mismatched lifetime qualifiers never compatibly include each other.
6269 if (Q1.getObjCLifetime() == Q2.getObjCLifetime())
6270 Quals.setObjCLifetime(Q1.getObjCLifetime());
6271 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6272 assert(Steps.size() == 1);
6273 else
6274 return QualType();
6275
6276 if (Q1.getPointerAuth().isEquivalent(Other: Q2.getPointerAuth()))
6277 Quals.setPointerAuth(Q1.getPointerAuth());
6278 else
6279 return QualType();
6280
6281 Steps.back().Quals = Quals;
6282 if (Q1 != Quals || Q2 != Quals)
6283 NeedConstBefore = Steps.size() - 1;
6284 }
6285
6286 // FIXME: Can we unify the following with UnwrapSimilarTypes?
6287
6288 const ArrayType *Arr1, *Arr2;
6289 if ((Arr1 = Context.getAsArrayType(T: Composite1)) &&
6290 (Arr2 = Context.getAsArrayType(T: Composite2))) {
6291 auto *CAT1 = dyn_cast<ConstantArrayType>(Val: Arr1);
6292 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: Arr2);
6293 if (CAT1 && CAT2 && CAT1->getSize() == CAT2->getSize()) {
6294 Composite1 = Arr1->getElementType();
6295 Composite2 = Arr2->getElementType();
6296 Steps.emplace_back(Args: Step::Array, Args&: CAT1);
6297 continue;
6298 }
6299 bool IAT1 = isa<IncompleteArrayType>(Val: Arr1);
6300 bool IAT2 = isa<IncompleteArrayType>(Val: Arr2);
6301 if ((IAT1 && IAT2) ||
6302 (getLangOpts().CPlusPlus20 && (IAT1 != IAT2) &&
6303 ((bool)CAT1 != (bool)CAT2) &&
6304 (Steps.empty() || Steps.back().K != Step::Array))) {
6305 // In C++20 onwards, we can unify an array of N T with an array of
6306 // a different or unknown bound. But we can't form an array whose
6307 // element type is an array of unknown bound by doing so.
6308 Composite1 = Arr1->getElementType();
6309 Composite2 = Arr2->getElementType();
6310 Steps.emplace_back(Args: Step::Array);
6311 if (CAT1 || CAT2)
6312 NeedConstBefore = Steps.size();
6313 continue;
6314 }
6315 }
6316
6317 const PointerType *Ptr1, *Ptr2;
6318 if ((Ptr1 = Composite1->getAs<PointerType>()) &&
6319 (Ptr2 = Composite2->getAs<PointerType>())) {
6320 Composite1 = Ptr1->getPointeeType();
6321 Composite2 = Ptr2->getPointeeType();
6322 Steps.emplace_back(Args: Step::Pointer);
6323 continue;
6324 }
6325
6326 const ObjCObjectPointerType *ObjPtr1, *ObjPtr2;
6327 if ((ObjPtr1 = Composite1->getAs<ObjCObjectPointerType>()) &&
6328 (ObjPtr2 = Composite2->getAs<ObjCObjectPointerType>())) {
6329 Composite1 = ObjPtr1->getPointeeType();
6330 Composite2 = ObjPtr2->getPointeeType();
6331 Steps.emplace_back(Args: Step::ObjCPointer);
6332 continue;
6333 }
6334
6335 const MemberPointerType *MemPtr1, *MemPtr2;
6336 if ((MemPtr1 = Composite1->getAs<MemberPointerType>()) &&
6337 (MemPtr2 = Composite2->getAs<MemberPointerType>())) {
6338 Composite1 = MemPtr1->getPointeeType();
6339 Composite2 = MemPtr2->getPointeeType();
6340
6341 // At the top level, we can perform a base-to-derived pointer-to-member
6342 // conversion:
6343 //
6344 // - [...] where C1 is reference-related to C2 or C2 is
6345 // reference-related to C1
6346 //
6347 // (Note that the only kinds of reference-relatedness in scope here are
6348 // "same type or derived from".) At any other level, the class must
6349 // exactly match.
6350 CXXRecordDecl *Cls = nullptr,
6351 *Cls1 = MemPtr1->getMostRecentCXXRecordDecl(),
6352 *Cls2 = MemPtr2->getMostRecentCXXRecordDecl();
6353 if (declaresSameEntity(D1: Cls1, D2: Cls2))
6354 Cls = Cls1;
6355 else if (Steps.empty())
6356 Cls = IsDerivedFrom(Loc, Derived: Cls1, Base: Cls2) ? Cls1
6357 : IsDerivedFrom(Loc, Derived: Cls2, Base: Cls1) ? Cls2
6358 : nullptr;
6359 if (!Cls)
6360 return QualType();
6361
6362 Steps.emplace_back(Args: Step::MemberPointer,
6363 Args: Context.getTypeDeclType(Decl: Cls).getTypePtr());
6364 continue;
6365 }
6366
6367 // Special case: at the top level, we can decompose an Objective-C pointer
6368 // and a 'cv void *'. Unify the qualifiers.
6369 if (Steps.empty() && ((Composite1->isVoidPointerType() &&
6370 Composite2->isObjCObjectPointerType()) ||
6371 (Composite1->isObjCObjectPointerType() &&
6372 Composite2->isVoidPointerType()))) {
6373 Composite1 = Composite1->getPointeeType();
6374 Composite2 = Composite2->getPointeeType();
6375 Steps.emplace_back(Args: Step::Pointer);
6376 continue;
6377 }
6378
6379 // FIXME: block pointer types?
6380
6381 // Cannot unwrap any more types.
6382 break;
6383 }
6384
6385 // - if T1 or T2 is "pointer to noexcept function" and the other type is
6386 // "pointer to function", where the function types are otherwise the same,
6387 // "pointer to function";
6388 // - if T1 or T2 is "pointer to member of C1 of type function", the other
6389 // type is "pointer to member of C2 of type noexcept function", and C1
6390 // is reference-related to C2 or C2 is reference-related to C1, where
6391 // the function types are otherwise the same, "pointer to member of C2 of
6392 // type function" or "pointer to member of C1 of type function",
6393 // respectively;
6394 //
6395 // We also support 'noreturn' here, so as a Clang extension we generalize the
6396 // above to:
6397 //
6398 // - [Clang] If T1 and T2 are both of type "pointer to function" or
6399 // "pointer to member function" and the pointee types can be unified
6400 // by a function pointer conversion, that conversion is applied
6401 // before checking the following rules.
6402 //
6403 // We've already unwrapped down to the function types, and we want to merge
6404 // rather than just convert, so do this ourselves rather than calling
6405 // IsFunctionConversion.
6406 //
6407 // FIXME: In order to match the standard wording as closely as possible, we
6408 // currently only do this under a single level of pointers. Ideally, we would
6409 // allow this in general, and set NeedConstBefore to the relevant depth on
6410 // the side(s) where we changed anything. If we permit that, we should also
6411 // consider this conversion when determining type similarity and model it as
6412 // a qualification conversion.
6413 if (Steps.size() == 1) {
6414 if (auto *FPT1 = Composite1->getAs<FunctionProtoType>()) {
6415 if (auto *FPT2 = Composite2->getAs<FunctionProtoType>()) {
6416 FunctionProtoType::ExtProtoInfo EPI1 = FPT1->getExtProtoInfo();
6417 FunctionProtoType::ExtProtoInfo EPI2 = FPT2->getExtProtoInfo();
6418
6419 // The result is noreturn if both operands are.
6420 bool Noreturn =
6421 EPI1.ExtInfo.getNoReturn() && EPI2.ExtInfo.getNoReturn();
6422 EPI1.ExtInfo = EPI1.ExtInfo.withNoReturn(noReturn: Noreturn);
6423 EPI2.ExtInfo = EPI2.ExtInfo.withNoReturn(noReturn: Noreturn);
6424
6425 bool CFIUncheckedCallee =
6426 EPI1.CFIUncheckedCallee || EPI2.CFIUncheckedCallee;
6427 EPI1.CFIUncheckedCallee = CFIUncheckedCallee;
6428 EPI2.CFIUncheckedCallee = CFIUncheckedCallee;
6429
6430 // The result is nothrow if both operands are.
6431 SmallVector<QualType, 8> ExceptionTypeStorage;
6432 EPI1.ExceptionSpec = EPI2.ExceptionSpec = Context.mergeExceptionSpecs(
6433 ESI1: EPI1.ExceptionSpec, ESI2: EPI2.ExceptionSpec, ExceptionTypeStorage,
6434 AcceptDependent: getLangOpts().CPlusPlus17);
6435
6436 Composite1 = Context.getFunctionType(ResultTy: FPT1->getReturnType(),
6437 Args: FPT1->getParamTypes(), EPI: EPI1);
6438 Composite2 = Context.getFunctionType(ResultTy: FPT2->getReturnType(),
6439 Args: FPT2->getParamTypes(), EPI: EPI2);
6440 }
6441 }
6442 }
6443
6444 // There are some more conversions we can perform under exactly one pointer.
6445 if (Steps.size() == 1 && Steps.front().K == Step::Pointer &&
6446 !Context.hasSameType(T1: Composite1, T2: Composite2)) {
6447 // - if T1 or T2 is "pointer to cv1 void" and the other type is
6448 // "pointer to cv2 T", where T is an object type or void,
6449 // "pointer to cv12 void", where cv12 is the union of cv1 and cv2;
6450 if (Composite1->isVoidType() && Composite2->isObjectType())
6451 Composite2 = Composite1;
6452 else if (Composite2->isVoidType() && Composite1->isObjectType())
6453 Composite1 = Composite2;
6454 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6455 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6456 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and
6457 // T1, respectively;
6458 //
6459 // The "similar type" handling covers all of this except for the "T1 is a
6460 // base class of T2" case in the definition of reference-related.
6461 else if (IsDerivedFrom(Loc, Derived: Composite1, Base: Composite2))
6462 Composite1 = Composite2;
6463 else if (IsDerivedFrom(Loc, Derived: Composite2, Base: Composite1))
6464 Composite2 = Composite1;
6465 }
6466
6467 // At this point, either the inner types are the same or we have failed to
6468 // find a composite pointer type.
6469 if (!Context.hasSameType(T1: Composite1, T2: Composite2))
6470 return QualType();
6471
6472 // Per C++ [conv.qual]p3, add 'const' to every level before the last
6473 // differing qualifier.
6474 for (unsigned I = 0; I != NeedConstBefore; ++I)
6475 Steps[I].Quals.addConst();
6476
6477 // Rebuild the composite type.
6478 QualType Composite = Context.getCommonSugaredType(X: Composite1, Y: Composite2);
6479 for (auto &S : llvm::reverse(C&: Steps))
6480 Composite = S.rebuild(Ctx&: Context, T: Composite);
6481
6482 if (ConvertArgs) {
6483 // Convert the expressions to the composite pointer type.
6484 InitializedEntity Entity =
6485 InitializedEntity::InitializeTemporary(Type: Composite);
6486 InitializationKind Kind =
6487 InitializationKind::CreateCopy(InitLoc: Loc, EqualLoc: SourceLocation());
6488
6489 InitializationSequence E1ToC(*this, Entity, Kind, E1);
6490 if (!E1ToC)
6491 return QualType();
6492
6493 InitializationSequence E2ToC(*this, Entity, Kind, E2);
6494 if (!E2ToC)
6495 return QualType();
6496
6497 // FIXME: Let the caller know if these fail to avoid duplicate diagnostics.
6498 ExprResult E1Result = E1ToC.Perform(S&: *this, Entity, Kind, Args: E1);
6499 if (E1Result.isInvalid())
6500 return QualType();
6501 E1 = E1Result.get();
6502
6503 ExprResult E2Result = E2ToC.Perform(S&: *this, Entity, Kind, Args: E2);
6504 if (E2Result.isInvalid())
6505 return QualType();
6506 E2 = E2Result.get();
6507 }
6508
6509 return Composite;
6510}
6511
6512ExprResult Sema::MaybeBindToTemporary(Expr *E) {
6513 if (!E)
6514 return ExprError();
6515
6516 assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
6517
6518 // If the result is a glvalue, we shouldn't bind it.
6519 if (E->isGLValue())
6520 return E;
6521
6522 // In ARC, calls that return a retainable type can return retained,
6523 // in which case we have to insert a consuming cast.
6524 if (getLangOpts().ObjCAutoRefCount &&
6525 E->getType()->isObjCRetainableType()) {
6526
6527 bool ReturnsRetained;
6528
6529 // For actual calls, we compute this by examining the type of the
6530 // called value.
6531 if (CallExpr *Call = dyn_cast<CallExpr>(Val: E)) {
6532 Expr *Callee = Call->getCallee()->IgnoreParens();
6533 QualType T = Callee->getType();
6534
6535 if (T == Context.BoundMemberTy) {
6536 // Handle pointer-to-members.
6537 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val: Callee))
6538 T = BinOp->getRHS()->getType();
6539 else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Val: Callee))
6540 T = Mem->getMemberDecl()->getType();
6541 }
6542
6543 if (const PointerType *Ptr = T->getAs<PointerType>())
6544 T = Ptr->getPointeeType();
6545 else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
6546 T = Ptr->getPointeeType();
6547 else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
6548 T = MemPtr->getPointeeType();
6549
6550 auto *FTy = T->castAs<FunctionType>();
6551 ReturnsRetained = FTy->getExtInfo().getProducesResult();
6552
6553 // ActOnStmtExpr arranges things so that StmtExprs of retainable
6554 // type always produce a +1 object.
6555 } else if (isa<StmtExpr>(Val: E)) {
6556 ReturnsRetained = true;
6557
6558 // We hit this case with the lambda conversion-to-block optimization;
6559 // we don't want any extra casts here.
6560 } else if (isa<CastExpr>(Val: E) &&
6561 isa<BlockExpr>(Val: cast<CastExpr>(Val: E)->getSubExpr())) {
6562 return E;
6563
6564 // For message sends and property references, we try to find an
6565 // actual method. FIXME: we should infer retention by selector in
6566 // cases where we don't have an actual method.
6567 } else {
6568 ObjCMethodDecl *D = nullptr;
6569 if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(Val: E)) {
6570 D = Send->getMethodDecl();
6571 } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(Val: E)) {
6572 D = BoxedExpr->getBoxingMethod();
6573 } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(Val: E)) {
6574 // Don't do reclaims if we're using the zero-element array
6575 // constant.
6576 if (ArrayLit->getNumElements() == 0 &&
6577 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6578 return E;
6579
6580 D = ArrayLit->getArrayWithObjectsMethod();
6581 } else if (ObjCDictionaryLiteral *DictLit
6582 = dyn_cast<ObjCDictionaryLiteral>(Val: E)) {
6583 // Don't do reclaims if we're using the zero-element dictionary
6584 // constant.
6585 if (DictLit->getNumElements() == 0 &&
6586 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6587 return E;
6588
6589 D = DictLit->getDictWithObjectsMethod();
6590 }
6591
6592 ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
6593
6594 // Don't do reclaims on performSelector calls; despite their
6595 // return type, the invoked method doesn't necessarily actually
6596 // return an object.
6597 if (!ReturnsRetained &&
6598 D && D->getMethodFamily() == OMF_performSelector)
6599 return E;
6600 }
6601
6602 // Don't reclaim an object of Class type.
6603 if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType())
6604 return E;
6605
6606 Cleanup.setExprNeedsCleanups(true);
6607
6608 CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
6609 : CK_ARCReclaimReturnedObject);
6610 return ImplicitCastExpr::Create(Context, T: E->getType(), Kind: ck, Operand: E, BasePath: nullptr,
6611 Cat: VK_PRValue, FPO: FPOptionsOverride());
6612 }
6613
6614 if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
6615 Cleanup.setExprNeedsCleanups(true);
6616
6617 if (!getLangOpts().CPlusPlus)
6618 return E;
6619
6620 // Search for the base element type (cf. ASTContext::getBaseElementType) with
6621 // a fast path for the common case that the type is directly a RecordType.
6622 const Type *T = Context.getCanonicalType(T: E->getType().getTypePtr());
6623 const RecordType *RT = nullptr;
6624 while (!RT) {
6625 switch (T->getTypeClass()) {
6626 case Type::Record:
6627 RT = cast<RecordType>(Val: T);
6628 break;
6629 case Type::ConstantArray:
6630 case Type::IncompleteArray:
6631 case Type::VariableArray:
6632 case Type::DependentSizedArray:
6633 T = cast<ArrayType>(Val: T)->getElementType().getTypePtr();
6634 break;
6635 default:
6636 return E;
6637 }
6638 }
6639
6640 // That should be enough to guarantee that this type is complete, if we're
6641 // not processing a decltype expression.
6642 CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RT->getDecl());
6643 if (RD->isInvalidDecl() || RD->isDependentContext())
6644 return E;
6645
6646 bool IsDecltype = ExprEvalContexts.back().ExprContext ==
6647 ExpressionEvaluationContextRecord::EK_Decltype;
6648 CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(Class: RD);
6649
6650 if (Destructor) {
6651 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
6652 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
6653 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6654 << E->getType());
6655 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
6656 return ExprError();
6657
6658 // If destructor is trivial, we can avoid the extra copy.
6659 if (Destructor->isTrivial())
6660 return E;
6661
6662 // We need a cleanup, but we don't need to remember the temporary.
6663 Cleanup.setExprNeedsCleanups(true);
6664 }
6665
6666 CXXTemporary *Temp = CXXTemporary::Create(C: Context, Destructor);
6667 CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(C: Context, Temp, SubExpr: E);
6668
6669 if (IsDecltype)
6670 ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Elt: Bind);
6671
6672 return Bind;
6673}
6674
6675ExprResult
6676Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
6677 if (SubExpr.isInvalid())
6678 return ExprError();
6679
6680 return MaybeCreateExprWithCleanups(SubExpr: SubExpr.get());
6681}
6682
6683Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
6684 assert(SubExpr && "subexpression can't be null!");
6685
6686 CleanupVarDeclMarking();
6687
6688 unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects;
6689 assert(ExprCleanupObjects.size() >= FirstCleanup);
6690 assert(Cleanup.exprNeedsCleanups() ||
6691 ExprCleanupObjects.size() == FirstCleanup);
6692 if (!Cleanup.exprNeedsCleanups())
6693 return SubExpr;
6694
6695 auto Cleanups = llvm::ArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
6696 ExprCleanupObjects.size() - FirstCleanup);
6697
6698 auto *E = ExprWithCleanups::Create(
6699 C: Context, subexpr: SubExpr, CleanupsHaveSideEffects: Cleanup.cleanupsHaveSideEffects(), objects: Cleanups);
6700 DiscardCleanupsInEvaluationContext();
6701
6702 return E;
6703}
6704
6705Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
6706 assert(SubStmt && "sub-statement can't be null!");
6707
6708 CleanupVarDeclMarking();
6709
6710 if (!Cleanup.exprNeedsCleanups())
6711 return SubStmt;
6712
6713 // FIXME: In order to attach the temporaries, wrap the statement into
6714 // a StmtExpr; currently this is only used for asm statements.
6715 // This is hacky, either create a new CXXStmtWithTemporaries statement or
6716 // a new AsmStmtWithTemporaries.
6717 CompoundStmt *CompStmt =
6718 CompoundStmt::Create(C: Context, Stmts: SubStmt, FPFeatures: FPOptionsOverride(),
6719 LB: SourceLocation(), RB: SourceLocation());
6720 Expr *E = new (Context)
6721 StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation(),
6722 /*FIXME TemplateDepth=*/0);
6723 return MaybeCreateExprWithCleanups(SubExpr: E);
6724}
6725
6726ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
6727 assert(ExprEvalContexts.back().ExprContext ==
6728 ExpressionEvaluationContextRecord::EK_Decltype &&
6729 "not in a decltype expression");
6730
6731 ExprResult Result = CheckPlaceholderExpr(E);
6732 if (Result.isInvalid())
6733 return ExprError();
6734 E = Result.get();
6735
6736 // C++11 [expr.call]p11:
6737 // If a function call is a prvalue of object type,
6738 // -- if the function call is either
6739 // -- the operand of a decltype-specifier, or
6740 // -- the right operand of a comma operator that is the operand of a
6741 // decltype-specifier,
6742 // a temporary object is not introduced for the prvalue.
6743
6744 // Recursively rebuild ParenExprs and comma expressions to strip out the
6745 // outermost CXXBindTemporaryExpr, if any.
6746 if (ParenExpr *PE = dyn_cast<ParenExpr>(Val: E)) {
6747 ExprResult SubExpr = ActOnDecltypeExpression(E: PE->getSubExpr());
6748 if (SubExpr.isInvalid())
6749 return ExprError();
6750 if (SubExpr.get() == PE->getSubExpr())
6751 return E;
6752 return ActOnParenExpr(L: PE->getLParen(), R: PE->getRParen(), E: SubExpr.get());
6753 }
6754 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
6755 if (BO->getOpcode() == BO_Comma) {
6756 ExprResult RHS = ActOnDecltypeExpression(E: BO->getRHS());
6757 if (RHS.isInvalid())
6758 return ExprError();
6759 if (RHS.get() == BO->getRHS())
6760 return E;
6761 return BinaryOperator::Create(C: Context, lhs: BO->getLHS(), rhs: RHS.get(), opc: BO_Comma,
6762 ResTy: BO->getType(), VK: BO->getValueKind(),
6763 OK: BO->getObjectKind(), opLoc: BO->getOperatorLoc(),
6764 FPFeatures: BO->getFPFeatures());
6765 }
6766 }
6767
6768 CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(Val: E);
6769 CallExpr *TopCall = TopBind ? dyn_cast<CallExpr>(Val: TopBind->getSubExpr())
6770 : nullptr;
6771 if (TopCall)
6772 E = TopCall;
6773 else
6774 TopBind = nullptr;
6775
6776 // Disable the special decltype handling now.
6777 ExprEvalContexts.back().ExprContext =
6778 ExpressionEvaluationContextRecord::EK_Other;
6779
6780 Result = CheckUnevaluatedOperand(E);
6781 if (Result.isInvalid())
6782 return ExprError();
6783 E = Result.get();
6784
6785 // In MS mode, don't perform any extra checking of call return types within a
6786 // decltype expression.
6787 if (getLangOpts().MSVCCompat)
6788 return E;
6789
6790 // Perform the semantic checks we delayed until this point.
6791 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size();
6792 I != N; ++I) {
6793 CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I];
6794 if (Call == TopCall)
6795 continue;
6796
6797 if (CheckCallReturnType(ReturnType: Call->getCallReturnType(Ctx: Context),
6798 Loc: Call->getBeginLoc(), CE: Call, FD: Call->getDirectCallee()))
6799 return ExprError();
6800 }
6801
6802 // Now all relevant types are complete, check the destructors are accessible
6803 // and non-deleted, and annotate them on the temporaries.
6804 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeBinds.size();
6805 I != N; ++I) {
6806 CXXBindTemporaryExpr *Bind =
6807 ExprEvalContexts.back().DelayedDecltypeBinds[I];
6808 if (Bind == TopBind)
6809 continue;
6810
6811 CXXTemporary *Temp = Bind->getTemporary();
6812
6813 CXXRecordDecl *RD =
6814 Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
6815 CXXDestructorDecl *Destructor = LookupDestructor(Class: RD);
6816 Temp->setDestructor(Destructor);
6817
6818 MarkFunctionReferenced(Loc: Bind->getExprLoc(), Func: Destructor);
6819 CheckDestructorAccess(Loc: Bind->getExprLoc(), Dtor: Destructor,
6820 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6821 << Bind->getType());
6822 if (DiagnoseUseOfDecl(D: Destructor, Locs: Bind->getExprLoc()))
6823 return ExprError();
6824
6825 // We need a cleanup, but we don't need to remember the temporary.
6826 Cleanup.setExprNeedsCleanups(true);
6827 }
6828
6829 // Possibly strip off the top CXXBindTemporaryExpr.
6830 return E;
6831}
6832
6833/// Note a set of 'operator->' functions that were used for a member access.
6834static void noteOperatorArrows(Sema &S,
6835 ArrayRef<FunctionDecl *> OperatorArrows) {
6836 unsigned SkipStart = OperatorArrows.size(), SkipCount = 0;
6837 // FIXME: Make this configurable?
6838 unsigned Limit = 9;
6839 if (OperatorArrows.size() > Limit) {
6840 // Produce Limit-1 normal notes and one 'skipping' note.
6841 SkipStart = (Limit - 1) / 2 + (Limit - 1) % 2;
6842 SkipCount = OperatorArrows.size() - (Limit - 1);
6843 }
6844
6845 for (unsigned I = 0; I < OperatorArrows.size(); /**/) {
6846 if (I == SkipStart) {
6847 S.Diag(Loc: OperatorArrows[I]->getLocation(),
6848 DiagID: diag::note_operator_arrows_suppressed)
6849 << SkipCount;
6850 I += SkipCount;
6851 } else {
6852 S.Diag(Loc: OperatorArrows[I]->getLocation(), DiagID: diag::note_operator_arrow_here)
6853 << OperatorArrows[I]->getCallResultType();
6854 ++I;
6855 }
6856 }
6857}
6858
6859ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
6860 SourceLocation OpLoc,
6861 tok::TokenKind OpKind,
6862 ParsedType &ObjectType,
6863 bool &MayBePseudoDestructor) {
6864 // Since this might be a postfix expression, get rid of ParenListExprs.
6865 ExprResult Result = MaybeConvertParenListExprToParenExpr(S, ME: Base);
6866 if (Result.isInvalid()) return ExprError();
6867 Base = Result.get();
6868
6869 Result = CheckPlaceholderExpr(E: Base);
6870 if (Result.isInvalid()) return ExprError();
6871 Base = Result.get();
6872
6873 QualType BaseType = Base->getType();
6874 MayBePseudoDestructor = false;
6875 if (BaseType->isDependentType()) {
6876 // If we have a pointer to a dependent type and are using the -> operator,
6877 // the object type is the type that the pointer points to. We might still
6878 // have enough information about that type to do something useful.
6879 if (OpKind == tok::arrow)
6880 if (const PointerType *Ptr = BaseType->getAs<PointerType>())
6881 BaseType = Ptr->getPointeeType();
6882
6883 ObjectType = ParsedType::make(P: BaseType);
6884 MayBePseudoDestructor = true;
6885 return Base;
6886 }
6887
6888 // C++ [over.match.oper]p8:
6889 // [...] When operator->returns, the operator-> is applied to the value
6890 // returned, with the original second operand.
6891 if (OpKind == tok::arrow) {
6892 QualType StartingType = BaseType;
6893 bool NoArrowOperatorFound = false;
6894 bool FirstIteration = true;
6895 FunctionDecl *CurFD = dyn_cast<FunctionDecl>(Val: CurContext);
6896 // The set of types we've considered so far.
6897 llvm::SmallPtrSet<CanQualType,8> CTypes;
6898 SmallVector<FunctionDecl*, 8> OperatorArrows;
6899 CTypes.insert(Ptr: Context.getCanonicalType(T: BaseType));
6900
6901 while (BaseType->isRecordType()) {
6902 if (OperatorArrows.size() >= getLangOpts().ArrowDepth) {
6903 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_depth_exceeded)
6904 << StartingType << getLangOpts().ArrowDepth << Base->getSourceRange();
6905 noteOperatorArrows(S&: *this, OperatorArrows);
6906 Diag(Loc: OpLoc, DiagID: diag::note_operator_arrow_depth)
6907 << getLangOpts().ArrowDepth;
6908 return ExprError();
6909 }
6910
6911 Result = BuildOverloadedArrowExpr(
6912 S, Base, OpLoc,
6913 // When in a template specialization and on the first loop iteration,
6914 // potentially give the default diagnostic (with the fixit in a
6915 // separate note) instead of having the error reported back to here
6916 // and giving a diagnostic with a fixit attached to the error itself.
6917 NoArrowOperatorFound: (FirstIteration && CurFD && CurFD->isFunctionTemplateSpecialization())
6918 ? nullptr
6919 : &NoArrowOperatorFound);
6920 if (Result.isInvalid()) {
6921 if (NoArrowOperatorFound) {
6922 if (FirstIteration) {
6923 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
6924 << BaseType << 1 << Base->getSourceRange()
6925 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
6926 OpKind = tok::period;
6927 break;
6928 }
6929 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_arrow)
6930 << BaseType << Base->getSourceRange();
6931 CallExpr *CE = dyn_cast<CallExpr>(Val: Base);
6932 if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) {
6933 Diag(Loc: CD->getBeginLoc(),
6934 DiagID: diag::note_member_reference_arrow_from_operator_arrow);
6935 }
6936 }
6937 return ExprError();
6938 }
6939 Base = Result.get();
6940 if (CXXOperatorCallExpr *OpCall = dyn_cast<CXXOperatorCallExpr>(Val: Base))
6941 OperatorArrows.push_back(Elt: OpCall->getDirectCallee());
6942 BaseType = Base->getType();
6943 CanQualType CBaseType = Context.getCanonicalType(T: BaseType);
6944 if (!CTypes.insert(Ptr: CBaseType).second) {
6945 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_circular) << StartingType;
6946 noteOperatorArrows(S&: *this, OperatorArrows);
6947 return ExprError();
6948 }
6949 FirstIteration = false;
6950 }
6951
6952 if (OpKind == tok::arrow) {
6953 if (BaseType->isPointerType())
6954 BaseType = BaseType->getPointeeType();
6955 else if (auto *AT = Context.getAsArrayType(T: BaseType))
6956 BaseType = AT->getElementType();
6957 }
6958 }
6959
6960 // Objective-C properties allow "." access on Objective-C pointer types,
6961 // so adjust the base type to the object type itself.
6962 if (BaseType->isObjCObjectPointerType())
6963 BaseType = BaseType->getPointeeType();
6964
6965 // C++ [basic.lookup.classref]p2:
6966 // [...] If the type of the object expression is of pointer to scalar
6967 // type, the unqualified-id is looked up in the context of the complete
6968 // postfix-expression.
6969 //
6970 // This also indicates that we could be parsing a pseudo-destructor-name.
6971 // Note that Objective-C class and object types can be pseudo-destructor
6972 // expressions or normal member (ivar or property) access expressions, and
6973 // it's legal for the type to be incomplete if this is a pseudo-destructor
6974 // call. We'll do more incomplete-type checks later in the lookup process,
6975 // so just skip this check for ObjC types.
6976 if (!BaseType->isRecordType()) {
6977 ObjectType = ParsedType::make(P: BaseType);
6978 MayBePseudoDestructor = true;
6979 return Base;
6980 }
6981
6982 // The object type must be complete (or dependent), or
6983 // C++11 [expr.prim.general]p3:
6984 // Unlike the object expression in other contexts, *this is not required to
6985 // be of complete type for purposes of class member access (5.2.5) outside
6986 // the member function body.
6987 if (!BaseType->isDependentType() &&
6988 !isThisOutsideMemberFunctionBody(BaseType) &&
6989 RequireCompleteType(Loc: OpLoc, T: BaseType,
6990 DiagID: diag::err_incomplete_member_access)) {
6991 return CreateRecoveryExpr(Begin: Base->getBeginLoc(), End: Base->getEndLoc(), SubExprs: {Base});
6992 }
6993
6994 // C++ [basic.lookup.classref]p2:
6995 // If the id-expression in a class member access (5.2.5) is an
6996 // unqualified-id, and the type of the object expression is of a class
6997 // type C (or of pointer to a class type C), the unqualified-id is looked
6998 // up in the scope of class C. [...]
6999 ObjectType = ParsedType::make(P: BaseType);
7000 return Base;
7001}
7002
7003static bool CheckArrow(Sema &S, QualType &ObjectType, Expr *&Base,
7004 tok::TokenKind &OpKind, SourceLocation OpLoc) {
7005 if (Base->hasPlaceholderType()) {
7006 ExprResult result = S.CheckPlaceholderExpr(E: Base);
7007 if (result.isInvalid()) return true;
7008 Base = result.get();
7009 }
7010 ObjectType = Base->getType();
7011
7012 // C++ [expr.pseudo]p2:
7013 // The left-hand side of the dot operator shall be of scalar type. The
7014 // left-hand side of the arrow operator shall be of pointer to scalar type.
7015 // This scalar type is the object type.
7016 // Note that this is rather different from the normal handling for the
7017 // arrow operator.
7018 if (OpKind == tok::arrow) {
7019 // The operator requires a prvalue, so perform lvalue conversions.
7020 // Only do this if we might plausibly end with a pointer, as otherwise
7021 // this was likely to be intended to be a '.'.
7022 if (ObjectType->isPointerType() || ObjectType->isArrayType() ||
7023 ObjectType->isFunctionType()) {
7024 ExprResult BaseResult = S.DefaultFunctionArrayLvalueConversion(E: Base);
7025 if (BaseResult.isInvalid())
7026 return true;
7027 Base = BaseResult.get();
7028 ObjectType = Base->getType();
7029 }
7030
7031 if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
7032 ObjectType = Ptr->getPointeeType();
7033 } else if (!Base->isTypeDependent()) {
7034 // The user wrote "p->" when they probably meant "p."; fix it.
7035 S.Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7036 << ObjectType << true
7037 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
7038 if (S.isSFINAEContext())
7039 return true;
7040
7041 OpKind = tok::period;
7042 }
7043 }
7044
7045 return false;
7046}
7047
7048/// Check if it's ok to try and recover dot pseudo destructor calls on
7049/// pointer objects.
7050static bool
7051canRecoverDotPseudoDestructorCallsOnPointerObjects(Sema &SemaRef,
7052 QualType DestructedType) {
7053 // If this is a record type, check if its destructor is callable.
7054 if (auto *RD = DestructedType->getAsCXXRecordDecl()) {
7055 if (RD->hasDefinition())
7056 if (CXXDestructorDecl *D = SemaRef.LookupDestructor(Class: RD))
7057 return SemaRef.CanUseDecl(D, /*TreatUnavailableAsInvalid=*/false);
7058 return false;
7059 }
7060
7061 // Otherwise, check if it's a type for which it's valid to use a pseudo-dtor.
7062 return DestructedType->isDependentType() || DestructedType->isScalarType() ||
7063 DestructedType->isVectorType();
7064}
7065
7066ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
7067 SourceLocation OpLoc,
7068 tok::TokenKind OpKind,
7069 const CXXScopeSpec &SS,
7070 TypeSourceInfo *ScopeTypeInfo,
7071 SourceLocation CCLoc,
7072 SourceLocation TildeLoc,
7073 PseudoDestructorTypeStorage Destructed) {
7074 TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
7075
7076 QualType ObjectType;
7077 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7078 return ExprError();
7079
7080 if (!ObjectType->isDependentType() && !ObjectType->isScalarType() &&
7081 !ObjectType->isVectorType() && !ObjectType->isMatrixType()) {
7082 if (getLangOpts().MSVCCompat && ObjectType->isVoidType())
7083 Diag(Loc: OpLoc, DiagID: diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
7084 else {
7085 Diag(Loc: OpLoc, DiagID: diag::err_pseudo_dtor_base_not_scalar)
7086 << ObjectType << Base->getSourceRange();
7087 return ExprError();
7088 }
7089 }
7090
7091 // C++ [expr.pseudo]p2:
7092 // [...] The cv-unqualified versions of the object type and of the type
7093 // designated by the pseudo-destructor-name shall be the same type.
7094 if (DestructedTypeInfo) {
7095 QualType DestructedType = DestructedTypeInfo->getType();
7096 SourceLocation DestructedTypeStart =
7097 DestructedTypeInfo->getTypeLoc().getBeginLoc();
7098 if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
7099 if (!Context.hasSameUnqualifiedType(T1: DestructedType, T2: ObjectType)) {
7100 // Detect dot pseudo destructor calls on pointer objects, e.g.:
7101 // Foo *foo;
7102 // foo.~Foo();
7103 if (OpKind == tok::period && ObjectType->isPointerType() &&
7104 Context.hasSameUnqualifiedType(T1: DestructedType,
7105 T2: ObjectType->getPointeeType())) {
7106 auto Diagnostic =
7107 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7108 << ObjectType << /*IsArrow=*/0 << Base->getSourceRange();
7109
7110 // Issue a fixit only when the destructor is valid.
7111 if (canRecoverDotPseudoDestructorCallsOnPointerObjects(
7112 SemaRef&: *this, DestructedType))
7113 Diagnostic << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: "->");
7114
7115 // Recover by setting the object type to the destructed type and the
7116 // operator to '->'.
7117 ObjectType = DestructedType;
7118 OpKind = tok::arrow;
7119 } else {
7120 Diag(Loc: DestructedTypeStart, DiagID: diag::err_pseudo_dtor_type_mismatch)
7121 << ObjectType << DestructedType << Base->getSourceRange()
7122 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7123
7124 // Recover by setting the destructed type to the object type.
7125 DestructedType = ObjectType;
7126 DestructedTypeInfo =
7127 Context.getTrivialTypeSourceInfo(T: ObjectType, Loc: DestructedTypeStart);
7128 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7129 }
7130 } else if (DestructedType.getObjCLifetime() !=
7131 ObjectType.getObjCLifetime()) {
7132
7133 if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
7134 // Okay: just pretend that the user provided the correctly-qualified
7135 // type.
7136 } else {
7137 Diag(Loc: DestructedTypeStart, DiagID: diag::err_arc_pseudo_dtor_inconstant_quals)
7138 << ObjectType << DestructedType << Base->getSourceRange()
7139 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7140 }
7141
7142 // Recover by setting the destructed type to the object type.
7143 DestructedType = ObjectType;
7144 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: ObjectType,
7145 Loc: DestructedTypeStart);
7146 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7147 }
7148 }
7149 }
7150
7151 // C++ [expr.pseudo]p2:
7152 // [...] Furthermore, the two type-names in a pseudo-destructor-name of the
7153 // form
7154 //
7155 // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
7156 //
7157 // shall designate the same scalar type.
7158 if (ScopeTypeInfo) {
7159 QualType ScopeType = ScopeTypeInfo->getType();
7160 if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
7161 !Context.hasSameUnqualifiedType(T1: ScopeType, T2: ObjectType)) {
7162
7163 Diag(Loc: ScopeTypeInfo->getTypeLoc().getSourceRange().getBegin(),
7164 DiagID: diag::err_pseudo_dtor_type_mismatch)
7165 << ObjectType << ScopeType << Base->getSourceRange()
7166 << ScopeTypeInfo->getTypeLoc().getSourceRange();
7167
7168 ScopeType = QualType();
7169 ScopeTypeInfo = nullptr;
7170 }
7171 }
7172
7173 Expr *Result
7174 = new (Context) CXXPseudoDestructorExpr(Context, Base,
7175 OpKind == tok::arrow, OpLoc,
7176 SS.getWithLocInContext(Context),
7177 ScopeTypeInfo,
7178 CCLoc,
7179 TildeLoc,
7180 Destructed);
7181
7182 return Result;
7183}
7184
7185ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7186 SourceLocation OpLoc,
7187 tok::TokenKind OpKind,
7188 CXXScopeSpec &SS,
7189 UnqualifiedId &FirstTypeName,
7190 SourceLocation CCLoc,
7191 SourceLocation TildeLoc,
7192 UnqualifiedId &SecondTypeName) {
7193 assert((FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7194 FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7195 "Invalid first type name in pseudo-destructor");
7196 assert((SecondTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7197 SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7198 "Invalid second type name in pseudo-destructor");
7199
7200 QualType ObjectType;
7201 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7202 return ExprError();
7203
7204 // Compute the object type that we should use for name lookup purposes. Only
7205 // record types and dependent types matter.
7206 ParsedType ObjectTypePtrForLookup;
7207 if (!SS.isSet()) {
7208 if (ObjectType->isRecordType())
7209 ObjectTypePtrForLookup = ParsedType::make(P: ObjectType);
7210 else if (ObjectType->isDependentType())
7211 ObjectTypePtrForLookup = ParsedType::make(P: Context.DependentTy);
7212 }
7213
7214 // Convert the name of the type being destructed (following the ~) into a
7215 // type (with source-location information).
7216 QualType DestructedType;
7217 TypeSourceInfo *DestructedTypeInfo = nullptr;
7218 PseudoDestructorTypeStorage Destructed;
7219 if (SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7220 ParsedType T = getTypeName(II: *SecondTypeName.Identifier,
7221 NameLoc: SecondTypeName.StartLocation,
7222 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7223 /*IsCtorOrDtorName*/true);
7224 if (!T &&
7225 ((SS.isSet() && !computeDeclContext(SS, EnteringContext: false)) ||
7226 (!SS.isSet() && ObjectType->isDependentType()))) {
7227 // The name of the type being destroyed is a dependent name, and we
7228 // couldn't find anything useful in scope. Just store the identifier and
7229 // it's location, and we'll perform (qualified) name lookup again at
7230 // template instantiation time.
7231 Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier,
7232 SecondTypeName.StartLocation);
7233 } else if (!T) {
7234 Diag(Loc: SecondTypeName.StartLocation,
7235 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7236 << SecondTypeName.Identifier << ObjectType;
7237 if (isSFINAEContext())
7238 return ExprError();
7239
7240 // Recover by assuming we had the right type all along.
7241 DestructedType = ObjectType;
7242 } else
7243 DestructedType = GetTypeFromParser(Ty: T, TInfo: &DestructedTypeInfo);
7244 } else {
7245 // Resolve the template-id to a type.
7246 TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
7247 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7248 TemplateId->NumArgs);
7249 TypeResult T = ActOnTemplateIdType(S,
7250 SS,
7251 TemplateKWLoc: TemplateId->TemplateKWLoc,
7252 Template: TemplateId->Template,
7253 TemplateII: TemplateId->Name,
7254 TemplateIILoc: TemplateId->TemplateNameLoc,
7255 LAngleLoc: TemplateId->LAngleLoc,
7256 TemplateArgs: TemplateArgsPtr,
7257 RAngleLoc: TemplateId->RAngleLoc,
7258 /*IsCtorOrDtorName*/true);
7259 if (T.isInvalid() || !T.get()) {
7260 // Recover by assuming we had the right type all along.
7261 DestructedType = ObjectType;
7262 } else
7263 DestructedType = GetTypeFromParser(Ty: T.get(), TInfo: &DestructedTypeInfo);
7264 }
7265
7266 // If we've performed some kind of recovery, (re-)build the type source
7267 // information.
7268 if (!DestructedType.isNull()) {
7269 if (!DestructedTypeInfo)
7270 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: DestructedType,
7271 Loc: SecondTypeName.StartLocation);
7272 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7273 }
7274
7275 // Convert the name of the scope type (the type prior to '::') into a type.
7276 TypeSourceInfo *ScopeTypeInfo = nullptr;
7277 QualType ScopeType;
7278 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7279 FirstTypeName.Identifier) {
7280 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7281 ParsedType T = getTypeName(II: *FirstTypeName.Identifier,
7282 NameLoc: FirstTypeName.StartLocation,
7283 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7284 /*IsCtorOrDtorName*/true);
7285 if (!T) {
7286 Diag(Loc: FirstTypeName.StartLocation,
7287 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7288 << FirstTypeName.Identifier << ObjectType;
7289
7290 if (isSFINAEContext())
7291 return ExprError();
7292
7293 // Just drop this type. It's unnecessary anyway.
7294 ScopeType = QualType();
7295 } else
7296 ScopeType = GetTypeFromParser(Ty: T, TInfo: &ScopeTypeInfo);
7297 } else {
7298 // Resolve the template-id to a type.
7299 TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
7300 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7301 TemplateId->NumArgs);
7302 TypeResult T = ActOnTemplateIdType(S,
7303 SS,
7304 TemplateKWLoc: TemplateId->TemplateKWLoc,
7305 Template: TemplateId->Template,
7306 TemplateII: TemplateId->Name,
7307 TemplateIILoc: TemplateId->TemplateNameLoc,
7308 LAngleLoc: TemplateId->LAngleLoc,
7309 TemplateArgs: TemplateArgsPtr,
7310 RAngleLoc: TemplateId->RAngleLoc,
7311 /*IsCtorOrDtorName*/true);
7312 if (T.isInvalid() || !T.get()) {
7313 // Recover by dropping this type.
7314 ScopeType = QualType();
7315 } else
7316 ScopeType = GetTypeFromParser(Ty: T.get(), TInfo: &ScopeTypeInfo);
7317 }
7318 }
7319
7320 if (!ScopeType.isNull() && !ScopeTypeInfo)
7321 ScopeTypeInfo = Context.getTrivialTypeSourceInfo(T: ScopeType,
7322 Loc: FirstTypeName.StartLocation);
7323
7324
7325 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS,
7326 ScopeTypeInfo, CCLoc, TildeLoc,
7327 Destructed);
7328}
7329
7330ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7331 SourceLocation OpLoc,
7332 tok::TokenKind OpKind,
7333 SourceLocation TildeLoc,
7334 const DeclSpec& DS) {
7335 QualType ObjectType;
7336 QualType T;
7337 TypeLocBuilder TLB;
7338 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc) ||
7339 DS.getTypeSpecType() == DeclSpec::TST_error)
7340 return ExprError();
7341
7342 switch (DS.getTypeSpecType()) {
7343 case DeclSpec::TST_decltype_auto: {
7344 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
7345 return true;
7346 }
7347 case DeclSpec::TST_decltype: {
7348 T = BuildDecltypeType(E: DS.getRepAsExpr(), /*AsUnevaluated=*/false);
7349 DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
7350 DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
7351 DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
7352 break;
7353 }
7354 case DeclSpec::TST_typename_pack_indexing: {
7355 T = ActOnPackIndexingType(Pattern: DS.getRepAsType().get(), IndexExpr: DS.getPackIndexingExpr(),
7356 Loc: DS.getBeginLoc(), EllipsisLoc: DS.getEllipsisLoc());
7357 TLB.pushTrivial(Context&: getASTContext(),
7358 T: cast<PackIndexingType>(Val: T.getTypePtr())->getPattern(),
7359 Loc: DS.getBeginLoc());
7360 PackIndexingTypeLoc PITL = TLB.push<PackIndexingTypeLoc>(T);
7361 PITL.setEllipsisLoc(DS.getEllipsisLoc());
7362 break;
7363 }
7364 default:
7365 llvm_unreachable("Unsupported type in pseudo destructor");
7366 }
7367 TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
7368 PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
7369
7370 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS: CXXScopeSpec(),
7371 ScopeTypeInfo: nullptr, CCLoc: SourceLocation(), TildeLoc,
7372 Destructed);
7373}
7374
7375ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
7376 SourceLocation RParen) {
7377 // If the operand is an unresolved lookup expression, the expression is ill-
7378 // formed per [over.over]p1, because overloaded function names cannot be used
7379 // without arguments except in explicit contexts.
7380 ExprResult R = CheckPlaceholderExpr(E: Operand);
7381 if (R.isInvalid())
7382 return R;
7383
7384 R = CheckUnevaluatedOperand(E: R.get());
7385 if (R.isInvalid())
7386 return ExprError();
7387
7388 Operand = R.get();
7389
7390 if (!inTemplateInstantiation() && !Operand->isInstantiationDependent() &&
7391 Operand->HasSideEffects(Ctx: Context, IncludePossibleEffects: false)) {
7392 // The expression operand for noexcept is in an unevaluated expression
7393 // context, so side effects could result in unintended consequences.
7394 Diag(Loc: Operand->getExprLoc(), DiagID: diag::warn_side_effects_unevaluated_context);
7395 }
7396
7397 CanThrowResult CanThrow = canThrow(E: Operand);
7398 return new (Context)
7399 CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen);
7400}
7401
7402ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
7403 Expr *Operand, SourceLocation RParen) {
7404 return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
7405}
7406
7407static void MaybeDecrementCount(
7408 Expr *E, llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
7409 DeclRefExpr *LHS = nullptr;
7410 bool IsCompoundAssign = false;
7411 bool isIncrementDecrementUnaryOp = false;
7412 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
7413 if (BO->getLHS()->getType()->isDependentType() ||
7414 BO->getRHS()->getType()->isDependentType()) {
7415 if (BO->getOpcode() != BO_Assign)
7416 return;
7417 } else if (!BO->isAssignmentOp())
7418 return;
7419 else
7420 IsCompoundAssign = BO->isCompoundAssignmentOp();
7421 LHS = dyn_cast<DeclRefExpr>(Val: BO->getLHS());
7422 } else if (CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) {
7423 if (COCE->getOperator() != OO_Equal)
7424 return;
7425 LHS = dyn_cast<DeclRefExpr>(Val: COCE->getArg(Arg: 0));
7426 } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) {
7427 if (!UO->isIncrementDecrementOp())
7428 return;
7429 isIncrementDecrementUnaryOp = true;
7430 LHS = dyn_cast<DeclRefExpr>(Val: UO->getSubExpr());
7431 }
7432 if (!LHS)
7433 return;
7434 VarDecl *VD = dyn_cast<VarDecl>(Val: LHS->getDecl());
7435 if (!VD)
7436 return;
7437 // Don't decrement RefsMinusAssignments if volatile variable with compound
7438 // assignment (+=, ...) or increment/decrement unary operator to avoid
7439 // potential unused-but-set-variable warning.
7440 if ((IsCompoundAssign || isIncrementDecrementUnaryOp) &&
7441 VD->getType().isVolatileQualified())
7442 return;
7443 auto iter = RefsMinusAssignments.find(Val: VD);
7444 if (iter == RefsMinusAssignments.end())
7445 return;
7446 iter->getSecond()--;
7447}
7448
7449/// Perform the conversions required for an expression used in a
7450/// context that ignores the result.
7451ExprResult Sema::IgnoredValueConversions(Expr *E) {
7452 MaybeDecrementCount(E, RefsMinusAssignments);
7453
7454 if (E->hasPlaceholderType()) {
7455 ExprResult result = CheckPlaceholderExpr(E);
7456 if (result.isInvalid()) return E;
7457 E = result.get();
7458 }
7459
7460 if (getLangOpts().CPlusPlus) {
7461 // The C++11 standard defines the notion of a discarded-value expression;
7462 // normally, we don't need to do anything to handle it, but if it is a
7463 // volatile lvalue with a special form, we perform an lvalue-to-rvalue
7464 // conversion.
7465 if (getLangOpts().CPlusPlus11 && E->isReadIfDiscardedInCPlusPlus11()) {
7466 ExprResult Res = DefaultLvalueConversion(E);
7467 if (Res.isInvalid())
7468 return E;
7469 E = Res.get();
7470 } else {
7471 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7472 // it occurs as a discarded-value expression.
7473 CheckUnusedVolatileAssignment(E);
7474 }
7475
7476 // C++1z:
7477 // If the expression is a prvalue after this optional conversion, the
7478 // temporary materialization conversion is applied.
7479 //
7480 // We do not materialize temporaries by default in order to avoid creating
7481 // unnecessary temporary objects. If we skip this step, IR generation is
7482 // able to synthesize the storage for itself in the aggregate case, and
7483 // adding the extra node to the AST is just clutter.
7484 if (isInLifetimeExtendingContext() && getLangOpts().CPlusPlus17 &&
7485 E->isPRValue() && !E->getType()->isVoidType()) {
7486 ExprResult Res = TemporaryMaterializationConversion(E);
7487 if (Res.isInvalid())
7488 return E;
7489 E = Res.get();
7490 }
7491 return E;
7492 }
7493
7494 // C99 6.3.2.1:
7495 // [Except in specific positions,] an lvalue that does not have
7496 // array type is converted to the value stored in the
7497 // designated object (and is no longer an lvalue).
7498 if (E->isPRValue()) {
7499 // In C, function designators (i.e. expressions of function type)
7500 // are r-values, but we still want to do function-to-pointer decay
7501 // on them. This is both technically correct and convenient for
7502 // some clients.
7503 if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
7504 return DefaultFunctionArrayConversion(E);
7505
7506 return E;
7507 }
7508
7509 // GCC seems to also exclude expressions of incomplete enum type.
7510 if (const EnumType *T = E->getType()->getAs<EnumType>()) {
7511 if (!T->getDecl()->isComplete()) {
7512 // FIXME: stupid workaround for a codegen bug!
7513 E = ImpCastExprToType(E, Type: Context.VoidTy, CK: CK_ToVoid).get();
7514 return E;
7515 }
7516 }
7517
7518 ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
7519 if (Res.isInvalid())
7520 return E;
7521 E = Res.get();
7522
7523 if (!E->getType()->isVoidType())
7524 RequireCompleteType(Loc: E->getExprLoc(), T: E->getType(),
7525 DiagID: diag::err_incomplete_type);
7526 return E;
7527}
7528
7529ExprResult Sema::CheckUnevaluatedOperand(Expr *E) {
7530 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7531 // it occurs as an unevaluated operand.
7532 CheckUnusedVolatileAssignment(E);
7533
7534 return E;
7535}
7536
7537// If we can unambiguously determine whether Var can never be used
7538// in a constant expression, return true.
7539// - if the variable and its initializer are non-dependent, then
7540// we can unambiguously check if the variable is a constant expression.
7541// - if the initializer is not value dependent - we can determine whether
7542// it can be used to initialize a constant expression. If Init can not
7543// be used to initialize a constant expression we conclude that Var can
7544// never be a constant expression.
7545// - FXIME: if the initializer is dependent, we can still do some analysis and
7546// identify certain cases unambiguously as non-const by using a Visitor:
7547// - such as those that involve odr-use of a ParmVarDecl, involve a new
7548// delete, lambda-expr, dynamic-cast, reinterpret-cast etc...
7549static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
7550 ASTContext &Context) {
7551 if (isa<ParmVarDecl>(Val: Var)) return true;
7552 const VarDecl *DefVD = nullptr;
7553
7554 // If there is no initializer - this can not be a constant expression.
7555 const Expr *Init = Var->getAnyInitializer(D&: DefVD);
7556 if (!Init)
7557 return true;
7558 assert(DefVD);
7559 if (DefVD->isWeak())
7560 return false;
7561
7562 if (Var->getType()->isDependentType() || Init->isValueDependent()) {
7563 // FIXME: Teach the constant evaluator to deal with the non-dependent parts
7564 // of value-dependent expressions, and use it here to determine whether the
7565 // initializer is a potential constant expression.
7566 return false;
7567 }
7568
7569 return !Var->isUsableInConstantExpressions(C: Context);
7570}
7571
7572/// Check if the current lambda has any potential captures
7573/// that must be captured by any of its enclosing lambdas that are ready to
7574/// capture. If there is a lambda that can capture a nested
7575/// potential-capture, go ahead and do so. Also, check to see if any
7576/// variables are uncaptureable or do not involve an odr-use so do not
7577/// need to be captured.
7578
7579static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
7580 Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) {
7581
7582 assert(!S.isUnevaluatedContext());
7583 assert(S.CurContext->isDependentContext());
7584#ifndef NDEBUG
7585 DeclContext *DC = S.CurContext;
7586 while (isa_and_nonnull<CapturedDecl>(DC))
7587 DC = DC->getParent();
7588 assert(
7589 (CurrentLSI->CallOperator == DC || !CurrentLSI->AfterParameterList) &&
7590 "The current call operator must be synchronized with Sema's CurContext");
7591#endif // NDEBUG
7592
7593 const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent();
7594
7595 // All the potentially captureable variables in the current nested
7596 // lambda (within a generic outer lambda), must be captured by an
7597 // outer lambda that is enclosed within a non-dependent context.
7598 CurrentLSI->visitPotentialCaptures(Callback: [&](ValueDecl *Var, Expr *VarExpr) {
7599 // If the variable is clearly identified as non-odr-used and the full
7600 // expression is not instantiation dependent, only then do we not
7601 // need to check enclosing lambda's for speculative captures.
7602 // For e.g.:
7603 // Even though 'x' is not odr-used, it should be captured.
7604 // int test() {
7605 // const int x = 10;
7606 // auto L = [=](auto a) {
7607 // (void) +x + a;
7608 // };
7609 // }
7610 if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(CapturingVarExpr: VarExpr) &&
7611 !IsFullExprInstantiationDependent)
7612 return;
7613
7614 VarDecl *UnderlyingVar = Var->getPotentiallyDecomposedVarDecl();
7615 if (!UnderlyingVar)
7616 return;
7617
7618 // If we have a capture-capable lambda for the variable, go ahead and
7619 // capture the variable in that lambda (and all its enclosing lambdas).
7620 if (const UnsignedOrNone Index =
7621 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7622 FunctionScopes: S.FunctionScopes, VarToCapture: Var, S))
7623 S.MarkCaptureUsedInEnclosingContext(Capture: Var, Loc: VarExpr->getExprLoc(), CapturingScopeIndex: *Index);
7624 const bool IsVarNeverAConstantExpression =
7625 VariableCanNeverBeAConstantExpression(Var: UnderlyingVar, Context&: S.Context);
7626 if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
7627 // This full expression is not instantiation dependent or the variable
7628 // can not be used in a constant expression - which means
7629 // this variable must be odr-used here, so diagnose a
7630 // capture violation early, if the variable is un-captureable.
7631 // This is purely for diagnosing errors early. Otherwise, this
7632 // error would get diagnosed when the lambda becomes capture ready.
7633 QualType CaptureType, DeclRefType;
7634 SourceLocation ExprLoc = VarExpr->getExprLoc();
7635 if (S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7636 /*EllipsisLoc*/ SourceLocation(),
7637 /*BuildAndDiagnose*/ false, CaptureType,
7638 DeclRefType, FunctionScopeIndexToStopAt: nullptr)) {
7639 // We will never be able to capture this variable, and we need
7640 // to be able to in any and all instantiations, so diagnose it.
7641 S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7642 /*EllipsisLoc*/ SourceLocation(),
7643 /*BuildAndDiagnose*/ true, CaptureType,
7644 DeclRefType, FunctionScopeIndexToStopAt: nullptr);
7645 }
7646 }
7647 });
7648
7649 // Check if 'this' needs to be captured.
7650 if (CurrentLSI->hasPotentialThisCapture()) {
7651 // If we have a capture-capable lambda for 'this', go ahead and capture
7652 // 'this' in that lambda (and all its enclosing lambdas).
7653 if (const UnsignedOrNone Index =
7654 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7655 FunctionScopes: S.FunctionScopes, /*0 is 'this'*/ VarToCapture: nullptr, S)) {
7656 const unsigned FunctionScopeIndexOfCapturableLambda = *Index;
7657 S.CheckCXXThisCapture(Loc: CurrentLSI->PotentialThisCaptureLocation,
7658 /*Explicit*/ false, /*BuildAndDiagnose*/ true,
7659 FunctionScopeIndexToStopAt: &FunctionScopeIndexOfCapturableLambda);
7660 }
7661 }
7662
7663 // Reset all the potential captures at the end of each full-expression.
7664 CurrentLSI->clearPotentialCaptures();
7665}
7666
7667ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
7668 bool DiscardedValue, bool IsConstexpr,
7669 bool IsTemplateArgument) {
7670 ExprResult FullExpr = FE;
7671
7672 if (!FullExpr.get())
7673 return ExprError();
7674
7675 if (!IsTemplateArgument && DiagnoseUnexpandedParameterPack(E: FullExpr.get()))
7676 return ExprError();
7677
7678 if (DiscardedValue) {
7679 // Top-level expressions default to 'id' when we're in a debugger.
7680 if (getLangOpts().DebuggerCastResultToId &&
7681 FullExpr.get()->getType() == Context.UnknownAnyTy) {
7682 FullExpr = forceUnknownAnyToType(E: FullExpr.get(), ToType: Context.getObjCIdType());
7683 if (FullExpr.isInvalid())
7684 return ExprError();
7685 }
7686
7687 FullExpr = CheckPlaceholderExpr(E: FullExpr.get());
7688 if (FullExpr.isInvalid())
7689 return ExprError();
7690
7691 FullExpr = IgnoredValueConversions(E: FullExpr.get());
7692 if (FullExpr.isInvalid())
7693 return ExprError();
7694
7695 DiagnoseUnusedExprResult(S: FullExpr.get(), DiagID: diag::warn_unused_expr);
7696 }
7697
7698 if (FullExpr.isInvalid())
7699 return ExprError();
7700
7701 CheckCompletedExpr(E: FullExpr.get(), CheckLoc: CC, IsConstexpr);
7702
7703 // At the end of this full expression (which could be a deeply nested
7704 // lambda), if there is a potential capture within the nested lambda,
7705 // have the outer capture-able lambda try and capture it.
7706 // Consider the following code:
7707 // void f(int, int);
7708 // void f(const int&, double);
7709 // void foo() {
7710 // const int x = 10, y = 20;
7711 // auto L = [=](auto a) {
7712 // auto M = [=](auto b) {
7713 // f(x, b); <-- requires x to be captured by L and M
7714 // f(y, a); <-- requires y to be captured by L, but not all Ms
7715 // };
7716 // };
7717 // }
7718
7719 // FIXME: Also consider what happens for something like this that involves
7720 // the gnu-extension statement-expressions or even lambda-init-captures:
7721 // void f() {
7722 // const int n = 0;
7723 // auto L = [&](auto a) {
7724 // +n + ({ 0; a; });
7725 // };
7726 // }
7727 //
7728 // Here, we see +n, and then the full-expression 0; ends, so we don't
7729 // capture n (and instead remove it from our list of potential captures),
7730 // and then the full-expression +n + ({ 0; }); ends, but it's too late
7731 // for us to see that we need to capture n after all.
7732
7733 LambdaScopeInfo *const CurrentLSI =
7734 getCurLambda(/*IgnoreCapturedRegions=*/IgnoreNonLambdaCapturingScope: true);
7735 // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer
7736 // even if CurContext is not a lambda call operator. Refer to that Bug Report
7737 // for an example of the code that might cause this asynchrony.
7738 // By ensuring we are in the context of a lambda's call operator
7739 // we can fix the bug (we only need to check whether we need to capture
7740 // if we are within a lambda's body); but per the comments in that
7741 // PR, a proper fix would entail :
7742 // "Alternative suggestion:
7743 // - Add to Sema an integer holding the smallest (outermost) scope
7744 // index that we are *lexically* within, and save/restore/set to
7745 // FunctionScopes.size() in InstantiatingTemplate's
7746 // constructor/destructor.
7747 // - Teach the handful of places that iterate over FunctionScopes to
7748 // stop at the outermost enclosing lexical scope."
7749 DeclContext *DC = CurContext;
7750 while (isa_and_nonnull<CapturedDecl>(Val: DC))
7751 DC = DC->getParent();
7752 const bool IsInLambdaDeclContext = isLambdaCallOperator(DC);
7753 if (IsInLambdaDeclContext && CurrentLSI &&
7754 CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid())
7755 CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI,
7756 S&: *this);
7757 return MaybeCreateExprWithCleanups(SubExpr: FullExpr);
7758}
7759
7760StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
7761 if (!FullStmt) return StmtError();
7762
7763 return MaybeCreateStmtWithCleanups(SubStmt: FullStmt);
7764}
7765
7766IfExistsResult
7767Sema::CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
7768 const DeclarationNameInfo &TargetNameInfo) {
7769 DeclarationName TargetName = TargetNameInfo.getName();
7770 if (!TargetName)
7771 return IfExistsResult::DoesNotExist;
7772
7773 // If the name itself is dependent, then the result is dependent.
7774 if (TargetName.isDependentName())
7775 return IfExistsResult::Dependent;
7776
7777 // Do the redeclaration lookup in the current scope.
7778 LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
7779 RedeclarationKind::NotForRedeclaration);
7780 LookupParsedName(R, S, SS: &SS, /*ObjectType=*/QualType());
7781 R.suppressDiagnostics();
7782
7783 switch (R.getResultKind()) {
7784 case LookupResultKind::Found:
7785 case LookupResultKind::FoundOverloaded:
7786 case LookupResultKind::FoundUnresolvedValue:
7787 case LookupResultKind::Ambiguous:
7788 return IfExistsResult::Exists;
7789
7790 case LookupResultKind::NotFound:
7791 return IfExistsResult::DoesNotExist;
7792
7793 case LookupResultKind::NotFoundInCurrentInstantiation:
7794 return IfExistsResult::Dependent;
7795 }
7796
7797 llvm_unreachable("Invalid LookupResult Kind!");
7798}
7799
7800IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
7801 SourceLocation KeywordLoc,
7802 bool IsIfExists,
7803 CXXScopeSpec &SS,
7804 UnqualifiedId &Name) {
7805 DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
7806
7807 // Check for an unexpanded parameter pack.
7808 auto UPPC = IsIfExists ? UPPC_IfExists : UPPC_IfNotExists;
7809 if (DiagnoseUnexpandedParameterPack(SS, UPPC) ||
7810 DiagnoseUnexpandedParameterPack(NameInfo: TargetNameInfo, UPPC))
7811 return IfExistsResult::Error;
7812
7813 return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
7814}
7815
7816concepts::Requirement *Sema::ActOnSimpleRequirement(Expr *E) {
7817 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: true,
7818 /*NoexceptLoc=*/SourceLocation(),
7819 /*ReturnTypeRequirement=*/{});
7820}
7821
7822concepts::Requirement *Sema::ActOnTypeRequirement(
7823 SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
7824 const IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId) {
7825 assert(((!TypeName && TemplateId) || (TypeName && !TemplateId)) &&
7826 "Exactly one of TypeName and TemplateId must be specified.");
7827 TypeSourceInfo *TSI = nullptr;
7828 if (TypeName) {
7829 QualType T =
7830 CheckTypenameType(Keyword: ElaboratedTypeKeyword::Typename, KeywordLoc: TypenameKWLoc,
7831 QualifierLoc: SS.getWithLocInContext(Context), II: *TypeName, IILoc: NameLoc,
7832 TSI: &TSI, /*DeducedTSTContext=*/false);
7833 if (T.isNull())
7834 return nullptr;
7835 } else {
7836 ASTTemplateArgsPtr ArgsPtr(TemplateId->getTemplateArgs(),
7837 TemplateId->NumArgs);
7838 TypeResult T = ActOnTypenameType(S: CurScope, TypenameLoc: TypenameKWLoc, SS,
7839 TemplateLoc: TemplateId->TemplateKWLoc,
7840 TemplateName: TemplateId->Template, TemplateII: TemplateId->Name,
7841 TemplateIILoc: TemplateId->TemplateNameLoc,
7842 LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: ArgsPtr,
7843 RAngleLoc: TemplateId->RAngleLoc);
7844 if (T.isInvalid())
7845 return nullptr;
7846 if (GetTypeFromParser(Ty: T.get(), TInfo: &TSI).isNull())
7847 return nullptr;
7848 }
7849 return BuildTypeRequirement(Type: TSI);
7850}
7851
7852concepts::Requirement *
7853Sema::ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc) {
7854 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7855 /*ReturnTypeRequirement=*/{});
7856}
7857
7858concepts::Requirement *
7859Sema::ActOnCompoundRequirement(
7860 Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
7861 TemplateIdAnnotation *TypeConstraint, unsigned Depth) {
7862 // C++2a [expr.prim.req.compound] p1.3.3
7863 // [..] the expression is deduced against an invented function template
7864 // F [...] F is a void function template with a single type template
7865 // parameter T declared with the constrained-parameter. Form a new
7866 // cv-qualifier-seq cv by taking the union of const and volatile specifiers
7867 // around the constrained-parameter. F has a single parameter whose
7868 // type-specifier is cv T followed by the abstract-declarator. [...]
7869 //
7870 // The cv part is done in the calling function - we get the concept with
7871 // arguments and the abstract declarator with the correct CV qualification and
7872 // have to synthesize T and the single parameter of F.
7873 auto &II = Context.Idents.get(Name: "expr-type");
7874 auto *TParam = TemplateTypeParmDecl::Create(C: Context, DC: CurContext,
7875 KeyLoc: SourceLocation(),
7876 NameLoc: SourceLocation(), D: Depth,
7877 /*Index=*/P: 0, Id: &II,
7878 /*Typename=*/true,
7879 /*ParameterPack=*/false,
7880 /*HasTypeConstraint=*/true);
7881
7882 if (BuildTypeConstraint(SS, TypeConstraint, ConstrainedParameter: TParam,
7883 /*EllipsisLoc=*/SourceLocation(),
7884 /*AllowUnexpandedPack=*/true))
7885 // Just produce a requirement with no type requirements.
7886 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc, ReturnTypeRequirement: {});
7887
7888 auto *TPL = TemplateParameterList::Create(C: Context, TemplateLoc: SourceLocation(),
7889 LAngleLoc: SourceLocation(),
7890 Params: ArrayRef<NamedDecl *>(TParam),
7891 RAngleLoc: SourceLocation(),
7892 /*RequiresClause=*/nullptr);
7893 return BuildExprRequirement(
7894 E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7895 ReturnTypeRequirement: concepts::ExprRequirement::ReturnTypeRequirement(TPL));
7896}
7897
7898concepts::ExprRequirement *
7899Sema::BuildExprRequirement(
7900 Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
7901 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
7902 auto Status = concepts::ExprRequirement::SS_Satisfied;
7903 ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
7904 if (E->isInstantiationDependent() || E->getType()->isPlaceholderType() ||
7905 ReturnTypeRequirement.isDependent())
7906 Status = concepts::ExprRequirement::SS_Dependent;
7907 else if (NoexceptLoc.isValid() && canThrow(E) == CanThrowResult::CT_Can)
7908 Status = concepts::ExprRequirement::SS_NoexceptNotMet;
7909 else if (ReturnTypeRequirement.isSubstitutionFailure())
7910 Status = concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure;
7911 else if (ReturnTypeRequirement.isTypeConstraint()) {
7912 // C++2a [expr.prim.req]p1.3.3
7913 // The immediately-declared constraint ([temp]) of decltype((E)) shall
7914 // be satisfied.
7915 TemplateParameterList *TPL =
7916 ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
7917 QualType MatchedType =
7918 Context.getReferenceQualifiedType(e: E).getCanonicalType();
7919 llvm::SmallVector<TemplateArgument, 1> Args;
7920 Args.push_back(Elt: TemplateArgument(MatchedType));
7921
7922 auto *Param = cast<TemplateTypeParmDecl>(Val: TPL->getParam(Idx: 0));
7923
7924 MultiLevelTemplateArgumentList MLTAL(Param, Args, /*Final=*/false);
7925 MLTAL.addOuterRetainedLevels(Num: TPL->getDepth());
7926 const TypeConstraint *TC = Param->getTypeConstraint();
7927 assert(TC && "Type Constraint cannot be null here");
7928 auto *IDC = TC->getImmediatelyDeclaredConstraint();
7929 assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
7930 ExprResult Constraint = SubstExpr(E: IDC, TemplateArgs: MLTAL);
7931 if (Constraint.isInvalid()) {
7932 return new (Context) concepts::ExprRequirement(
7933 createSubstDiagAt(Location: IDC->getExprLoc(),
7934 Printer: [&](llvm::raw_ostream &OS) {
7935 IDC->printPretty(OS, /*Helper=*/nullptr,
7936 Policy: getPrintingPolicy());
7937 }),
7938 IsSimple, NoexceptLoc, ReturnTypeRequirement);
7939 }
7940 SubstitutedConstraintExpr =
7941 cast<ConceptSpecializationExpr>(Val: Constraint.get());
7942 if (!SubstitutedConstraintExpr->isSatisfied())
7943 Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
7944 }
7945 return new (Context) concepts::ExprRequirement(E, IsSimple, NoexceptLoc,
7946 ReturnTypeRequirement, Status,
7947 SubstitutedConstraintExpr);
7948}
7949
7950concepts::ExprRequirement *
7951Sema::BuildExprRequirement(
7952 concepts::Requirement::SubstitutionDiagnostic *ExprSubstitutionDiagnostic,
7953 bool IsSimple, SourceLocation NoexceptLoc,
7954 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
7955 return new (Context) concepts::ExprRequirement(ExprSubstitutionDiagnostic,
7956 IsSimple, NoexceptLoc,
7957 ReturnTypeRequirement);
7958}
7959
7960concepts::TypeRequirement *
7961Sema::BuildTypeRequirement(TypeSourceInfo *Type) {
7962 return new (Context) concepts::TypeRequirement(Type);
7963}
7964
7965concepts::TypeRequirement *
7966Sema::BuildTypeRequirement(
7967 concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
7968 return new (Context) concepts::TypeRequirement(SubstDiag);
7969}
7970
7971concepts::Requirement *Sema::ActOnNestedRequirement(Expr *Constraint) {
7972 return BuildNestedRequirement(E: Constraint);
7973}
7974
7975concepts::NestedRequirement *
7976Sema::BuildNestedRequirement(Expr *Constraint) {
7977 ConstraintSatisfaction Satisfaction;
7978 if (!Constraint->isInstantiationDependent() &&
7979 CheckConstraintSatisfaction(Template: nullptr, AssociatedConstraints: AssociatedConstraint(Constraint),
7980 /*TemplateArgs=*/TemplateArgLists: {},
7981 TemplateIDRange: Constraint->getSourceRange(), Satisfaction))
7982 return nullptr;
7983 return new (Context) concepts::NestedRequirement(Context, Constraint,
7984 Satisfaction);
7985}
7986
7987concepts::NestedRequirement *
7988Sema::BuildNestedRequirement(StringRef InvalidConstraintEntity,
7989 const ASTConstraintSatisfaction &Satisfaction) {
7990 return new (Context) concepts::NestedRequirement(
7991 InvalidConstraintEntity,
7992 ASTConstraintSatisfaction::Rebuild(C: Context, Satisfaction));
7993}
7994
7995RequiresExprBodyDecl *
7996Sema::ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
7997 ArrayRef<ParmVarDecl *> LocalParameters,
7998 Scope *BodyScope) {
7999 assert(BodyScope);
8000
8001 RequiresExprBodyDecl *Body = RequiresExprBodyDecl::Create(C&: Context, DC: CurContext,
8002 StartLoc: RequiresKWLoc);
8003
8004 PushDeclContext(S: BodyScope, DC: Body);
8005
8006 for (ParmVarDecl *Param : LocalParameters) {
8007 if (Param->getType()->isVoidType()) {
8008 if (LocalParameters.size() > 1) {
8009 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_only_param);
8010 Param->setType(Context.IntTy);
8011 } else if (Param->getIdentifier()) {
8012 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_param_with_void_type);
8013 Param->setType(Context.IntTy);
8014 } else if (Param->getType().hasQualifiers()) {
8015 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_param_qualified);
8016 }
8017 } else if (Param->hasDefaultArg()) {
8018 // C++2a [expr.prim.req] p4
8019 // [...] A local parameter of a requires-expression shall not have a
8020 // default argument. [...]
8021 Diag(Loc: Param->getDefaultArgRange().getBegin(),
8022 DiagID: diag::err_requires_expr_local_parameter_default_argument);
8023 // Ignore default argument and move on
8024 } else if (Param->isExplicitObjectParameter()) {
8025 // C++23 [dcl.fct]p6:
8026 // An explicit-object-parameter-declaration is a parameter-declaration
8027 // with a this specifier. An explicit-object-parameter-declaration
8028 // shall appear only as the first parameter-declaration of a
8029 // parameter-declaration-list of either:
8030 // - a member-declarator that declares a member function, or
8031 // - a lambda-declarator.
8032 //
8033 // The parameter-declaration-list of a requires-expression is not such
8034 // a context.
8035 Diag(Loc: Param->getExplicitObjectParamThisLoc(),
8036 DiagID: diag::err_requires_expr_explicit_object_parameter);
8037 Param->setExplicitObjectParameterLoc(SourceLocation());
8038 }
8039
8040 Param->setDeclContext(Body);
8041 // If this has an identifier, add it to the scope stack.
8042 if (Param->getIdentifier()) {
8043 CheckShadow(S: BodyScope, D: Param);
8044 PushOnScopeChains(D: Param, S: BodyScope);
8045 }
8046 }
8047 return Body;
8048}
8049
8050void Sema::ActOnFinishRequiresExpr() {
8051 assert(CurContext && "DeclContext imbalance!");
8052 CurContext = CurContext->getLexicalParent();
8053 assert(CurContext && "Popped translation unit!");
8054}
8055
8056ExprResult Sema::ActOnRequiresExpr(
8057 SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body,
8058 SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters,
8059 SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements,
8060 SourceLocation ClosingBraceLoc) {
8061 auto *RE = RequiresExpr::Create(C&: Context, RequiresKWLoc, Body, LParenLoc,
8062 LocalParameters, RParenLoc, Requirements,
8063 RBraceLoc: ClosingBraceLoc);
8064 if (DiagnoseUnexpandedParameterPackInRequiresExpr(RE))
8065 return ExprError();
8066 return RE;
8067}
8068

source code of clang/lib/Sema/SemaExprCXX.cpp