1 | //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code dealing with code generation of C++ expressions |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCUDARuntime.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CGDebugInfo.h" |
16 | #include "CGObjCRuntime.h" |
17 | #include "CodeGenFunction.h" |
18 | #include "ConstantEmitter.h" |
19 | #include "TargetInfo.h" |
20 | #include "clang/Basic/CodeGenOptions.h" |
21 | #include "clang/CodeGen/CGFunctionInfo.h" |
22 | #include "llvm/IR/Intrinsics.h" |
23 | |
24 | using namespace clang; |
25 | using namespace CodeGen; |
26 | |
27 | namespace { |
28 | struct MemberCallInfo { |
29 | RequiredArgs ReqArgs; |
30 | // Number of prefix arguments for the call. Ignores the `this` pointer. |
31 | unsigned PrefixSize; |
32 | }; |
33 | } |
34 | |
35 | static MemberCallInfo |
36 | commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD, |
37 | llvm::Value *This, llvm::Value *ImplicitParam, |
38 | QualType ImplicitParamTy, const CallExpr *CE, |
39 | CallArgList &Args, CallArgList *RtlArgs) { |
40 | auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl()); |
41 | |
42 | assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) || |
43 | isa<CXXOperatorCallExpr>(CE)); |
44 | assert(MD->isImplicitObjectMemberFunction() && |
45 | "Trying to emit a member or operator call expr on a static method!" ); |
46 | |
47 | // Push the this ptr. |
48 | const CXXRecordDecl *RD = |
49 | CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD); |
50 | Args.add(rvalue: RValue::get(V: This), type: CGF.getTypes().DeriveThisType(RD, MD)); |
51 | |
52 | // If there is an implicit parameter (e.g. VTT), emit it. |
53 | if (ImplicitParam) { |
54 | Args.add(rvalue: RValue::get(V: ImplicitParam), type: ImplicitParamTy); |
55 | } |
56 | |
57 | const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); |
58 | RequiredArgs required = RequiredArgs::forPrototypePlus(prototype: FPT, additional: Args.size()); |
59 | unsigned PrefixSize = Args.size() - 1; |
60 | |
61 | // And the rest of the call args. |
62 | if (RtlArgs) { |
63 | // Special case: if the caller emitted the arguments right-to-left already |
64 | // (prior to emitting the *this argument), we're done. This happens for |
65 | // assignment operators. |
66 | Args.addFrom(other: *RtlArgs); |
67 | } else if (CE) { |
68 | // Special case: skip first argument of CXXOperatorCall (it is "this"). |
69 | unsigned ArgsToSkip = 0; |
70 | if (const auto *Op = dyn_cast<CXXOperatorCallExpr>(Val: CE)) { |
71 | if (const auto *M = dyn_cast<CXXMethodDecl>(Op->getCalleeDecl())) |
72 | ArgsToSkip = |
73 | static_cast<unsigned>(!M->isExplicitObjectMemberFunction()); |
74 | } |
75 | CGF.EmitCallArgs(Args, Prototype: FPT, ArgRange: drop_begin(RangeOrContainer: CE->arguments(), N: ArgsToSkip), |
76 | AC: CE->getDirectCallee()); |
77 | } else { |
78 | assert( |
79 | FPT->getNumParams() == 0 && |
80 | "No CallExpr specified for function with non-zero number of arguments" ); |
81 | } |
82 | return {.ReqArgs: required, .PrefixSize: PrefixSize}; |
83 | } |
84 | |
85 | RValue CodeGenFunction::EmitCXXMemberOrOperatorCall( |
86 | const CXXMethodDecl *MD, const CGCallee &Callee, |
87 | ReturnValueSlot ReturnValue, |
88 | llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, |
89 | const CallExpr *CE, CallArgList *RtlArgs) { |
90 | const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); |
91 | CallArgList Args; |
92 | MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall( |
93 | *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs); |
94 | auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall( |
95 | args: Args, type: FPT, required: CallInfo.ReqArgs, numPrefixArgs: CallInfo.PrefixSize); |
96 | return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr, |
97 | CE && CE == MustTailCall, |
98 | CE ? CE->getExprLoc() : SourceLocation()); |
99 | } |
100 | |
101 | RValue CodeGenFunction::EmitCXXDestructorCall( |
102 | GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy, |
103 | llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) { |
104 | const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Val: Dtor.getDecl()); |
105 | |
106 | assert(!ThisTy.isNull()); |
107 | assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() && |
108 | "Pointer/Object mixup" ); |
109 | |
110 | LangAS SrcAS = ThisTy.getAddressSpace(); |
111 | LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace(); |
112 | if (SrcAS != DstAS) { |
113 | QualType DstTy = DtorDecl->getThisType(); |
114 | llvm::Type *NewType = CGM.getTypes().ConvertType(T: DstTy); |
115 | This = getTargetHooks().performAddrSpaceCast(CGF&: *this, V: This, SrcAddr: SrcAS, DestAddr: DstAS, |
116 | DestTy: NewType); |
117 | } |
118 | |
119 | CallArgList Args; |
120 | commonEmitCXXMemberOrOperatorCall(CGF&: *this, GD: Dtor, This, ImplicitParam, |
121 | ImplicitParamTy, CE, Args, RtlArgs: nullptr); |
122 | return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(GD: Dtor), Callee, |
123 | ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, |
124 | CE ? CE->getExprLoc() : SourceLocation{}); |
125 | } |
126 | |
127 | RValue CodeGenFunction::EmitCXXPseudoDestructorExpr( |
128 | const CXXPseudoDestructorExpr *E) { |
129 | QualType DestroyedType = E->getDestroyedType(); |
130 | if (DestroyedType.hasStrongOrWeakObjCLifetime()) { |
131 | // Automatic Reference Counting: |
132 | // If the pseudo-expression names a retainable object with weak or |
133 | // strong lifetime, the object shall be released. |
134 | Expr *BaseExpr = E->getBase(); |
135 | Address BaseValue = Address::invalid(); |
136 | Qualifiers BaseQuals; |
137 | |
138 | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
139 | if (E->isArrow()) { |
140 | BaseValue = EmitPointerWithAlignment(Addr: BaseExpr); |
141 | const auto *PTy = BaseExpr->getType()->castAs<PointerType>(); |
142 | BaseQuals = PTy->getPointeeType().getQualifiers(); |
143 | } else { |
144 | LValue BaseLV = EmitLValue(E: BaseExpr); |
145 | BaseValue = BaseLV.getAddress(CGF&: *this); |
146 | QualType BaseTy = BaseExpr->getType(); |
147 | BaseQuals = BaseTy.getQualifiers(); |
148 | } |
149 | |
150 | switch (DestroyedType.getObjCLifetime()) { |
151 | case Qualifiers::OCL_None: |
152 | case Qualifiers::OCL_ExplicitNone: |
153 | case Qualifiers::OCL_Autoreleasing: |
154 | break; |
155 | |
156 | case Qualifiers::OCL_Strong: |
157 | EmitARCRelease(value: Builder.CreateLoad(Addr: BaseValue, |
158 | IsVolatile: DestroyedType.isVolatileQualified()), |
159 | precise: ARCPreciseLifetime); |
160 | break; |
161 | |
162 | case Qualifiers::OCL_Weak: |
163 | EmitARCDestroyWeak(addr: BaseValue); |
164 | break; |
165 | } |
166 | } else { |
167 | // C++ [expr.pseudo]p1: |
168 | // The result shall only be used as the operand for the function call |
169 | // operator (), and the result of such a call has type void. The only |
170 | // effect is the evaluation of the postfix-expression before the dot or |
171 | // arrow. |
172 | EmitIgnoredExpr(E: E->getBase()); |
173 | } |
174 | |
175 | return RValue::get(V: nullptr); |
176 | } |
177 | |
178 | static CXXRecordDecl *getCXXRecord(const Expr *E) { |
179 | QualType T = E->getType(); |
180 | if (const PointerType *PTy = T->getAs<PointerType>()) |
181 | T = PTy->getPointeeType(); |
182 | const RecordType *Ty = T->castAs<RecordType>(); |
183 | return cast<CXXRecordDecl>(Val: Ty->getDecl()); |
184 | } |
185 | |
186 | // Note: This function also emit constructor calls to support a MSVC |
187 | // extensions allowing explicit constructor function call. |
188 | RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, |
189 | ReturnValueSlot ReturnValue) { |
190 | const Expr *callee = CE->getCallee()->IgnoreParens(); |
191 | |
192 | if (isa<BinaryOperator>(Val: callee)) |
193 | return EmitCXXMemberPointerCallExpr(E: CE, ReturnValue); |
194 | |
195 | const MemberExpr *ME = cast<MemberExpr>(Val: callee); |
196 | const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: ME->getMemberDecl()); |
197 | |
198 | if (MD->isStatic()) { |
199 | // The method is static, emit it as we would a regular call. |
200 | CGCallee callee = |
201 | CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(MD), abstractInfo: GlobalDecl(MD)); |
202 | return EmitCall(getContext().getPointerType(MD->getType()), callee, CE, |
203 | ReturnValue); |
204 | } |
205 | |
206 | bool HasQualifier = ME->hasQualifier(); |
207 | NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr; |
208 | bool IsArrow = ME->isArrow(); |
209 | const Expr *Base = ME->getBase(); |
210 | |
211 | return EmitCXXMemberOrOperatorMemberCallExpr( |
212 | CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); |
213 | } |
214 | |
215 | RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( |
216 | const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, |
217 | bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, |
218 | const Expr *Base) { |
219 | assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE)); |
220 | |
221 | // Compute the object pointer. |
222 | bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; |
223 | |
224 | const CXXMethodDecl *DevirtualizedMethod = nullptr; |
225 | if (CanUseVirtualCall && |
226 | MD->getDevirtualizedMethod(Base, IsAppleKext: getLangOpts().AppleKext)) { |
227 | const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); |
228 | DevirtualizedMethod = MD->getCorrespondingMethodInClass(RD: BestDynamicDecl); |
229 | assert(DevirtualizedMethod); |
230 | const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); |
231 | const Expr *Inner = Base->IgnoreParenBaseCasts(); |
232 | if (DevirtualizedMethod->getReturnType().getCanonicalType() != |
233 | MD->getReturnType().getCanonicalType()) |
234 | // If the return types are not the same, this might be a case where more |
235 | // code needs to run to compensate for it. For example, the derived |
236 | // method might return a type that inherits form from the return |
237 | // type of MD and has a prefix. |
238 | // For now we just avoid devirtualizing these covariant cases. |
239 | DevirtualizedMethod = nullptr; |
240 | else if (getCXXRecord(E: Inner) == DevirtualizedClass) |
241 | // If the class of the Inner expression is where the dynamic method |
242 | // is defined, build the this pointer from it. |
243 | Base = Inner; |
244 | else if (getCXXRecord(E: Base) != DevirtualizedClass) { |
245 | // If the method is defined in a class that is not the best dynamic |
246 | // one or the one of the full expression, we would have to build |
247 | // a derived-to-base cast to compute the correct this pointer, but |
248 | // we don't have support for that yet, so do a virtual call. |
249 | DevirtualizedMethod = nullptr; |
250 | } |
251 | } |
252 | |
253 | bool TrivialForCodegen = |
254 | MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion()); |
255 | bool TrivialAssignment = |
256 | TrivialForCodegen && |
257 | (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && |
258 | !MD->getParent()->mayInsertExtraPadding(); |
259 | |
260 | // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment |
261 | // operator before the LHS. |
262 | CallArgList RtlArgStorage; |
263 | CallArgList *RtlArgs = nullptr; |
264 | LValue TrivialAssignmentRHS; |
265 | if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: CE)) { |
266 | if (OCE->isAssignmentOp()) { |
267 | if (TrivialAssignment) { |
268 | TrivialAssignmentRHS = EmitLValue(E: CE->getArg(Arg: 1)); |
269 | } else { |
270 | RtlArgs = &RtlArgStorage; |
271 | EmitCallArgs(Args&: *RtlArgs, Prototype: MD->getType()->castAs<FunctionProtoType>(), |
272 | ArgRange: drop_begin(RangeOrContainer: CE->arguments(), N: 1), AC: CE->getDirectCallee(), |
273 | /*ParamsToSkip*/0, Order: EvaluationOrder::ForceRightToLeft); |
274 | } |
275 | } |
276 | } |
277 | |
278 | LValue This; |
279 | if (IsArrow) { |
280 | LValueBaseInfo BaseInfo; |
281 | TBAAAccessInfo TBAAInfo; |
282 | Address ThisValue = EmitPointerWithAlignment(Addr: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
283 | This = MakeAddrLValue(Addr: ThisValue, T: Base->getType()->getPointeeType(), |
284 | BaseInfo, TBAAInfo); |
285 | } else { |
286 | This = EmitLValue(E: Base); |
287 | } |
288 | |
289 | if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD)) { |
290 | // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's |
291 | // constructing a new complete object of type Ctor. |
292 | assert(!RtlArgs); |
293 | assert(ReturnValue.isNull() && "Constructor shouldn't have return value" ); |
294 | CallArgList Args; |
295 | commonEmitCXXMemberOrOperatorCall( |
296 | CGF&: *this, GD: {Ctor, Ctor_Complete}, This: This.getPointer(CGF&: *this), |
297 | /*ImplicitParam=*/nullptr, |
298 | /*ImplicitParamTy=*/QualType(), CE, Args, RtlArgs: nullptr); |
299 | |
300 | EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, |
301 | /*Delegating=*/false, This.getAddress(CGF&: *this), Args, |
302 | AggValueSlot::DoesNotOverlap, CE->getExprLoc(), |
303 | /*NewPointerIsChecked=*/false); |
304 | return RValue::get(V: nullptr); |
305 | } |
306 | |
307 | if (TrivialForCodegen) { |
308 | if (isa<CXXDestructorDecl>(Val: MD)) |
309 | return RValue::get(V: nullptr); |
310 | |
311 | if (TrivialAssignment) { |
312 | // We don't like to generate the trivial copy/move assignment operator |
313 | // when it isn't necessary; just produce the proper effect here. |
314 | // It's important that we use the result of EmitLValue here rather than |
315 | // emitting call arguments, in order to preserve TBAA information from |
316 | // the RHS. |
317 | LValue RHS = isa<CXXOperatorCallExpr>(Val: CE) |
318 | ? TrivialAssignmentRHS |
319 | : EmitLValue(*CE->arg_begin()); |
320 | EmitAggregateAssign(Dest: This, Src: RHS, EltTy: CE->getType()); |
321 | return RValue::get(V: This.getPointer(CGF&: *this)); |
322 | } |
323 | |
324 | assert(MD->getParent()->mayInsertExtraPadding() && |
325 | "unknown trivial member function" ); |
326 | } |
327 | |
328 | // Compute the function type we're calling. |
329 | const CXXMethodDecl *CalleeDecl = |
330 | DevirtualizedMethod ? DevirtualizedMethod : MD; |
331 | const CGFunctionInfo *FInfo = nullptr; |
332 | if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: CalleeDecl)) |
333 | FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( |
334 | GD: GlobalDecl(Dtor, Dtor_Complete)); |
335 | else |
336 | FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD: CalleeDecl); |
337 | |
338 | llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(Info: *FInfo); |
339 | |
340 | // C++11 [class.mfct.non-static]p2: |
341 | // If a non-static member function of a class X is called for an object that |
342 | // is not of type X, or of a type derived from X, the behavior is undefined. |
343 | SourceLocation CallLoc; |
344 | ASTContext &C = getContext(); |
345 | if (CE) |
346 | CallLoc = CE->getExprLoc(); |
347 | |
348 | SanitizerSet SkippedChecks; |
349 | if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(Val: CE)) { |
350 | auto *IOA = CMCE->getImplicitObjectArgument(); |
351 | bool IsImplicitObjectCXXThis = IsWrappedCXXThis(E: IOA); |
352 | if (IsImplicitObjectCXXThis) |
353 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
354 | if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(Val: IOA)) |
355 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
356 | } |
357 | |
358 | if (sanitizePerformTypeCheck()) |
359 | EmitTypeCheck(TCK: CodeGenFunction::TCK_MemberCall, Loc: CallLoc, |
360 | V: This.emitRawPointer(CGF&: *this), |
361 | Type: C.getRecordType(CalleeDecl->getParent()), |
362 | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
363 | |
364 | // C++ [class.virtual]p12: |
365 | // Explicit qualification with the scope operator (5.1) suppresses the |
366 | // virtual call mechanism. |
367 | // |
368 | // We also don't emit a virtual call if the base expression has a record type |
369 | // because then we know what the type is. |
370 | bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; |
371 | |
372 | if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: CalleeDecl)) { |
373 | assert(CE->arg_begin() == CE->arg_end() && |
374 | "Destructor shouldn't have explicit parameters" ); |
375 | assert(ReturnValue.isNull() && "Destructor shouldn't have return value" ); |
376 | if (UseVirtualCall) { |
377 | CGM.getCXXABI().EmitVirtualDestructorCall(CGF&: *this, Dtor, DtorType: Dtor_Complete, |
378 | This: This.getAddress(CGF&: *this), |
379 | E: cast<CXXMemberCallExpr>(Val: CE)); |
380 | } else { |
381 | GlobalDecl GD(Dtor, Dtor_Complete); |
382 | CGCallee Callee; |
383 | if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier) |
384 | Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty); |
385 | else if (!DevirtualizedMethod) |
386 | Callee = |
387 | CGCallee::forDirect(functionPtr: CGM.getAddrOfCXXStructor(GD, FnInfo: FInfo, FnType: Ty), abstractInfo: GD); |
388 | else { |
389 | Callee = CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD, Ty), abstractInfo: GD); |
390 | } |
391 | |
392 | QualType ThisTy = |
393 | IsArrow ? Base->getType()->getPointeeType() : Base->getType(); |
394 | EmitCXXDestructorCall(Dtor: GD, Callee, This: This.getPointer(CGF&: *this), ThisTy, |
395 | /*ImplicitParam=*/nullptr, |
396 | /*ImplicitParamTy=*/QualType(), CE); |
397 | } |
398 | return RValue::get(V: nullptr); |
399 | } |
400 | |
401 | // FIXME: Uses of 'MD' past this point need to be audited. We may need to use |
402 | // 'CalleeDecl' instead. |
403 | |
404 | CGCallee Callee; |
405 | if (UseVirtualCall) { |
406 | Callee = CGCallee::forVirtual(CE, MD, This.getAddress(CGF&: *this), Ty); |
407 | } else { |
408 | if (SanOpts.has(K: SanitizerKind::CFINVCall) && |
409 | MD->getParent()->isDynamicClass()) { |
410 | llvm::Value *VTable; |
411 | const CXXRecordDecl *RD; |
412 | std::tie(args&: VTable, args&: RD) = CGM.getCXXABI().LoadVTablePtr( |
413 | CGF&: *this, This: This.getAddress(CGF&: *this), RD: CalleeDecl->getParent()); |
414 | EmitVTablePtrCheckForCall(RD, VTable, TCK: CFITCK_NVCall, Loc: CE->getBeginLoc()); |
415 | } |
416 | |
417 | if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier) |
418 | Callee = BuildAppleKextVirtualCall(MD, Qual: Qualifier, Ty); |
419 | else if (!DevirtualizedMethod) |
420 | Callee = |
421 | CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(MD, Ty), abstractInfo: GlobalDecl(MD)); |
422 | else { |
423 | Callee = |
424 | CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(DevirtualizedMethod, Ty), |
425 | abstractInfo: GlobalDecl(DevirtualizedMethod)); |
426 | } |
427 | } |
428 | |
429 | if (MD->isVirtual()) { |
430 | Address NewThisAddr = |
431 | CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall( |
432 | *this, CalleeDecl, This.getAddress(CGF&: *this), UseVirtualCall); |
433 | This.setAddress(NewThisAddr); |
434 | } |
435 | |
436 | return EmitCXXMemberOrOperatorCall( |
437 | MD: CalleeDecl, Callee, ReturnValue, This: This.getPointer(CGF&: *this), |
438 | /*ImplicitParam=*/nullptr, ImplicitParamTy: QualType(), CE, RtlArgs); |
439 | } |
440 | |
441 | RValue |
442 | CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, |
443 | ReturnValueSlot ReturnValue) { |
444 | const BinaryOperator *BO = |
445 | cast<BinaryOperator>(E->getCallee()->IgnoreParens()); |
446 | const Expr *BaseExpr = BO->getLHS(); |
447 | const Expr *MemFnExpr = BO->getRHS(); |
448 | |
449 | const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>(); |
450 | const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>(); |
451 | const auto *RD = |
452 | cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl()); |
453 | |
454 | // Emit the 'this' pointer. |
455 | Address This = Address::invalid(); |
456 | if (BO->getOpcode() == BO_PtrMemI) |
457 | This = EmitPointerWithAlignment(Addr: BaseExpr, BaseInfo: nullptr, TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull); |
458 | else |
459 | This = EmitLValue(E: BaseExpr, IsKnownNonNull: KnownNonNull).getAddress(CGF&: *this); |
460 | |
461 | EmitTypeCheck(TCK: TCK_MemberCall, Loc: E->getExprLoc(), V: This.emitRawPointer(CGF&: *this), |
462 | Type: QualType(MPT->getClass(), 0)); |
463 | |
464 | // Get the member function pointer. |
465 | llvm::Value *MemFnPtr = EmitScalarExpr(E: MemFnExpr); |
466 | |
467 | // Ask the ABI to load the callee. Note that This is modified. |
468 | llvm::Value *ThisPtrForCall = nullptr; |
469 | CGCallee Callee = |
470 | CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF&: *this, E: BO, This, |
471 | ThisPtrForCall, MemPtr: MemFnPtr, MPT: MPT); |
472 | |
473 | CallArgList Args; |
474 | |
475 | QualType ThisType = |
476 | getContext().getPointerType(getContext().getTagDeclType(Decl: RD)); |
477 | |
478 | // Push the this ptr. |
479 | Args.add(rvalue: RValue::get(V: ThisPtrForCall), type: ThisType); |
480 | |
481 | RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); |
482 | |
483 | // And the rest of the call args |
484 | EmitCallArgs(Args, Prototype: FPT, ArgRange: E->arguments()); |
485 | return EmitCall(CGM.getTypes().arrangeCXXMethodCall(args: Args, type: FPT, required, |
486 | /*PrefixSize=*/numPrefixArgs: 0), |
487 | Callee, ReturnValue, Args, nullptr, E == MustTailCall, |
488 | E->getExprLoc()); |
489 | } |
490 | |
491 | RValue |
492 | CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, |
493 | const CXXMethodDecl *MD, |
494 | ReturnValueSlot ReturnValue) { |
495 | assert(MD->isImplicitObjectMemberFunction() && |
496 | "Trying to emit a member call expr on a static method!" ); |
497 | return EmitCXXMemberOrOperatorMemberCallExpr( |
498 | CE: E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, |
499 | /*IsArrow=*/false, Base: E->getArg(0)); |
500 | } |
501 | |
502 | RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, |
503 | ReturnValueSlot ReturnValue) { |
504 | return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(CGF&: *this, E, ReturnValue); |
505 | } |
506 | |
507 | static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, |
508 | Address DestPtr, |
509 | const CXXRecordDecl *Base) { |
510 | if (Base->isEmpty()) |
511 | return; |
512 | |
513 | DestPtr = DestPtr.withElementType(ElemTy: CGF.Int8Ty); |
514 | |
515 | const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); |
516 | CharUnits NVSize = Layout.getNonVirtualSize(); |
517 | |
518 | // We cannot simply zero-initialize the entire base sub-object if vbptrs are |
519 | // present, they are initialized by the most derived class before calling the |
520 | // constructor. |
521 | SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores; |
522 | Stores.emplace_back(Args: CharUnits::Zero(), Args&: NVSize); |
523 | |
524 | // Each store is split by the existence of a vbptr. |
525 | CharUnits VBPtrWidth = CGF.getPointerSize(); |
526 | std::vector<CharUnits> VBPtrOffsets = |
527 | CGF.CGM.getCXXABI().getVBPtrOffsets(RD: Base); |
528 | for (CharUnits VBPtrOffset : VBPtrOffsets) { |
529 | // Stop before we hit any virtual base pointers located in virtual bases. |
530 | if (VBPtrOffset >= NVSize) |
531 | break; |
532 | std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val(); |
533 | CharUnits LastStoreOffset = LastStore.first; |
534 | CharUnits LastStoreSize = LastStore.second; |
535 | |
536 | CharUnits SplitBeforeOffset = LastStoreOffset; |
537 | CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset; |
538 | assert(!SplitBeforeSize.isNegative() && "negative store size!" ); |
539 | if (!SplitBeforeSize.isZero()) |
540 | Stores.emplace_back(Args&: SplitBeforeOffset, Args&: SplitBeforeSize); |
541 | |
542 | CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth; |
543 | CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset; |
544 | assert(!SplitAfterSize.isNegative() && "negative store size!" ); |
545 | if (!SplitAfterSize.isZero()) |
546 | Stores.emplace_back(Args&: SplitAfterOffset, Args&: SplitAfterSize); |
547 | } |
548 | |
549 | // If the type contains a pointer to data member we can't memset it to zero. |
550 | // Instead, create a null constant and copy it to the destination. |
551 | // TODO: there are other patterns besides zero that we can usefully memset, |
552 | // like -1, which happens to be the pattern used by member-pointers. |
553 | // TODO: isZeroInitializable can be over-conservative in the case where a |
554 | // virtual base contains a member pointer. |
555 | llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Record: Base); |
556 | if (!NullConstantForBase->isNullValue()) { |
557 | llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable( |
558 | CGF.CGM.getModule(), NullConstantForBase->getType(), |
559 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, |
560 | NullConstantForBase, Twine()); |
561 | |
562 | CharUnits Align = |
563 | std::max(a: Layout.getNonVirtualAlignment(), b: DestPtr.getAlignment()); |
564 | NullVariable->setAlignment(Align.getAsAlign()); |
565 | |
566 | Address SrcPtr(NullVariable, CGF.Int8Ty, Align); |
567 | |
568 | // Get and call the appropriate llvm.memcpy overload. |
569 | for (std::pair<CharUnits, CharUnits> Store : Stores) { |
570 | CharUnits StoreOffset = Store.first; |
571 | CharUnits StoreSize = Store.second; |
572 | llvm::Value *StoreSizeVal = CGF.CGM.getSize(numChars: StoreSize); |
573 | CGF.Builder.CreateMemCpy( |
574 | Dest: CGF.Builder.CreateConstInBoundsByteGEP(Addr: DestPtr, Offset: StoreOffset), |
575 | Src: CGF.Builder.CreateConstInBoundsByteGEP(Addr: SrcPtr, Offset: StoreOffset), |
576 | Size: StoreSizeVal); |
577 | } |
578 | |
579 | // Otherwise, just memset the whole thing to zero. This is legal |
580 | // because in LLVM, all default initializers (other than the ones we just |
581 | // handled above) are guaranteed to have a bit pattern of all zeros. |
582 | } else { |
583 | for (std::pair<CharUnits, CharUnits> Store : Stores) { |
584 | CharUnits StoreOffset = Store.first; |
585 | CharUnits StoreSize = Store.second; |
586 | llvm::Value *StoreSizeVal = CGF.CGM.getSize(numChars: StoreSize); |
587 | CGF.Builder.CreateMemSet( |
588 | Dest: CGF.Builder.CreateConstInBoundsByteGEP(Addr: DestPtr, Offset: StoreOffset), |
589 | Value: CGF.Builder.getInt8(C: 0), Size: StoreSizeVal); |
590 | } |
591 | } |
592 | } |
593 | |
594 | void |
595 | CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, |
596 | AggValueSlot Dest) { |
597 | assert(!Dest.isIgnored() && "Must have a destination!" ); |
598 | const CXXConstructorDecl *CD = E->getConstructor(); |
599 | |
600 | // If we require zero initialization before (or instead of) calling the |
601 | // constructor, as can be the case with a non-user-provided default |
602 | // constructor, emit the zero initialization now, unless destination is |
603 | // already zeroed. |
604 | if (E->requiresZeroInitialization() && !Dest.isZeroed()) { |
605 | switch (E->getConstructionKind()) { |
606 | case CXXConstructionKind::Delegating: |
607 | case CXXConstructionKind::Complete: |
608 | EmitNullInitialization(DestPtr: Dest.getAddress(), Ty: E->getType()); |
609 | break; |
610 | case CXXConstructionKind::VirtualBase: |
611 | case CXXConstructionKind::NonVirtualBase: |
612 | EmitNullBaseClassInitialization(*this, Dest.getAddress(), |
613 | CD->getParent()); |
614 | break; |
615 | } |
616 | } |
617 | |
618 | // If this is a call to a trivial default constructor, do nothing. |
619 | if (CD->isTrivial() && CD->isDefaultConstructor()) |
620 | return; |
621 | |
622 | // Elide the constructor if we're constructing from a temporary. |
623 | if (getLangOpts().ElideConstructors && E->isElidable()) { |
624 | // FIXME: This only handles the simplest case, where the source object |
625 | // is passed directly as the first argument to the constructor. |
626 | // This should also handle stepping though implicit casts and |
627 | // conversion sequences which involve two steps, with a |
628 | // conversion operator followed by a converting constructor. |
629 | const Expr *SrcObj = E->getArg(Arg: 0); |
630 | assert(SrcObj->isTemporaryObject(getContext(), CD->getParent())); |
631 | assert( |
632 | getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType())); |
633 | EmitAggExpr(E: SrcObj, AS: Dest); |
634 | return; |
635 | } |
636 | |
637 | if (const ArrayType *arrayType |
638 | = getContext().getAsArrayType(T: E->getType())) { |
639 | EmitCXXAggrConstructorCall(D: CD, ArrayTy: arrayType, ArrayPtr: Dest.getAddress(), E, |
640 | NewPointerIsChecked: Dest.isSanitizerChecked()); |
641 | } else { |
642 | CXXCtorType Type = Ctor_Complete; |
643 | bool ForVirtualBase = false; |
644 | bool Delegating = false; |
645 | |
646 | switch (E->getConstructionKind()) { |
647 | case CXXConstructionKind::Delegating: |
648 | // We should be emitting a constructor; GlobalDecl will assert this |
649 | Type = CurGD.getCtorType(); |
650 | Delegating = true; |
651 | break; |
652 | |
653 | case CXXConstructionKind::Complete: |
654 | Type = Ctor_Complete; |
655 | break; |
656 | |
657 | case CXXConstructionKind::VirtualBase: |
658 | ForVirtualBase = true; |
659 | [[fallthrough]]; |
660 | |
661 | case CXXConstructionKind::NonVirtualBase: |
662 | Type = Ctor_Base; |
663 | } |
664 | |
665 | // Call the constructor. |
666 | EmitCXXConstructorCall(D: CD, Type, ForVirtualBase, Delegating, ThisAVS: Dest, E); |
667 | } |
668 | } |
669 | |
670 | void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, |
671 | const Expr *Exp) { |
672 | if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Val: Exp)) |
673 | Exp = E->getSubExpr(); |
674 | assert(isa<CXXConstructExpr>(Exp) && |
675 | "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr" ); |
676 | const CXXConstructExpr* E = cast<CXXConstructExpr>(Val: Exp); |
677 | const CXXConstructorDecl *CD = E->getConstructor(); |
678 | RunCleanupsScope Scope(*this); |
679 | |
680 | // If we require zero initialization before (or instead of) calling the |
681 | // constructor, as can be the case with a non-user-provided default |
682 | // constructor, emit the zero initialization now. |
683 | // FIXME. Do I still need this for a copy ctor synthesis? |
684 | if (E->requiresZeroInitialization()) |
685 | EmitNullInitialization(DestPtr: Dest, Ty: E->getType()); |
686 | |
687 | assert(!getContext().getAsConstantArrayType(E->getType()) |
688 | && "EmitSynthesizedCXXCopyCtor - Copied-in Array" ); |
689 | EmitSynthesizedCXXCopyCtorCall(D: CD, This: Dest, Src, E); |
690 | } |
691 | |
692 | static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, |
693 | const CXXNewExpr *E) { |
694 | if (!E->isArray()) |
695 | return CharUnits::Zero(); |
696 | |
697 | // No cookie is required if the operator new[] being used is the |
698 | // reserved placement operator new[]. |
699 | if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) |
700 | return CharUnits::Zero(); |
701 | |
702 | return CGF.CGM.getCXXABI().GetArrayCookieSize(expr: E); |
703 | } |
704 | |
705 | static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, |
706 | const CXXNewExpr *e, |
707 | unsigned minElements, |
708 | llvm::Value *&numElements, |
709 | llvm::Value *&sizeWithoutCookie) { |
710 | QualType type = e->getAllocatedType(); |
711 | |
712 | if (!e->isArray()) { |
713 | CharUnits typeSize = CGF.getContext().getTypeSizeInChars(T: type); |
714 | sizeWithoutCookie |
715 | = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: typeSize.getQuantity()); |
716 | return sizeWithoutCookie; |
717 | } |
718 | |
719 | // The width of size_t. |
720 | unsigned sizeWidth = CGF.SizeTy->getBitWidth(); |
721 | |
722 | // Figure out the cookie size. |
723 | llvm::APInt cookieSize(sizeWidth, |
724 | CalculateCookiePadding(CGF, E: e).getQuantity()); |
725 | |
726 | // Emit the array size expression. |
727 | // We multiply the size of all dimensions for NumElements. |
728 | // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. |
729 | numElements = |
730 | ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType()); |
731 | if (!numElements) |
732 | numElements = CGF.EmitScalarExpr(E: *e->getArraySize()); |
733 | assert(isa<llvm::IntegerType>(numElements->getType())); |
734 | |
735 | // The number of elements can be have an arbitrary integer type; |
736 | // essentially, we need to multiply it by a constant factor, add a |
737 | // cookie size, and verify that the result is representable as a |
738 | // size_t. That's just a gloss, though, and it's wrong in one |
739 | // important way: if the count is negative, it's an error even if |
740 | // the cookie size would bring the total size >= 0. |
741 | bool isSigned |
742 | = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType(); |
743 | llvm::IntegerType *numElementsType |
744 | = cast<llvm::IntegerType>(Val: numElements->getType()); |
745 | unsigned numElementsWidth = numElementsType->getBitWidth(); |
746 | |
747 | // Compute the constant factor. |
748 | llvm::APInt arraySizeMultiplier(sizeWidth, 1); |
749 | while (const ConstantArrayType *CAT |
750 | = CGF.getContext().getAsConstantArrayType(T: type)) { |
751 | type = CAT->getElementType(); |
752 | arraySizeMultiplier *= CAT->getSize(); |
753 | } |
754 | |
755 | CharUnits typeSize = CGF.getContext().getTypeSizeInChars(T: type); |
756 | llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); |
757 | typeSizeMultiplier *= arraySizeMultiplier; |
758 | |
759 | // This will be a size_t. |
760 | llvm::Value *size; |
761 | |
762 | // If someone is doing 'new int[42]' there is no need to do a dynamic check. |
763 | // Don't bloat the -O0 code. |
764 | if (llvm::ConstantInt *numElementsC = |
765 | dyn_cast<llvm::ConstantInt>(Val: numElements)) { |
766 | const llvm::APInt &count = numElementsC->getValue(); |
767 | |
768 | bool hasAnyOverflow = false; |
769 | |
770 | // If 'count' was a negative number, it's an overflow. |
771 | if (isSigned && count.isNegative()) |
772 | hasAnyOverflow = true; |
773 | |
774 | // We want to do all this arithmetic in size_t. If numElements is |
775 | // wider than that, check whether it's already too big, and if so, |
776 | // overflow. |
777 | else if (numElementsWidth > sizeWidth && |
778 | numElementsWidth - sizeWidth > count.countl_zero()) |
779 | hasAnyOverflow = true; |
780 | |
781 | // Okay, compute a count at the right width. |
782 | llvm::APInt adjustedCount = count.zextOrTrunc(width: sizeWidth); |
783 | |
784 | // If there is a brace-initializer, we cannot allocate fewer elements than |
785 | // there are initializers. If we do, that's treated like an overflow. |
786 | if (adjustedCount.ult(RHS: minElements)) |
787 | hasAnyOverflow = true; |
788 | |
789 | // Scale numElements by that. This might overflow, but we don't |
790 | // care because it only overflows if allocationSize does, too, and |
791 | // if that overflows then we shouldn't use this. |
792 | numElements = llvm::ConstantInt::get(Ty: CGF.SizeTy, |
793 | V: adjustedCount * arraySizeMultiplier); |
794 | |
795 | // Compute the size before cookie, and track whether it overflowed. |
796 | bool overflow; |
797 | llvm::APInt allocationSize |
798 | = adjustedCount.umul_ov(RHS: typeSizeMultiplier, Overflow&: overflow); |
799 | hasAnyOverflow |= overflow; |
800 | |
801 | // Add in the cookie, and check whether it's overflowed. |
802 | if (cookieSize != 0) { |
803 | // Save the current size without a cookie. This shouldn't be |
804 | // used if there was overflow. |
805 | sizeWithoutCookie = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: allocationSize); |
806 | |
807 | allocationSize = allocationSize.uadd_ov(RHS: cookieSize, Overflow&: overflow); |
808 | hasAnyOverflow |= overflow; |
809 | } |
810 | |
811 | // On overflow, produce a -1 so operator new will fail. |
812 | if (hasAnyOverflow) { |
813 | size = llvm::Constant::getAllOnesValue(Ty: CGF.SizeTy); |
814 | } else { |
815 | size = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: allocationSize); |
816 | } |
817 | |
818 | // Otherwise, we might need to use the overflow intrinsics. |
819 | } else { |
820 | // There are up to five conditions we need to test for: |
821 | // 1) if isSigned, we need to check whether numElements is negative; |
822 | // 2) if numElementsWidth > sizeWidth, we need to check whether |
823 | // numElements is larger than something representable in size_t; |
824 | // 3) if minElements > 0, we need to check whether numElements is smaller |
825 | // than that. |
826 | // 4) we need to compute |
827 | // sizeWithoutCookie := numElements * typeSizeMultiplier |
828 | // and check whether it overflows; and |
829 | // 5) if we need a cookie, we need to compute |
830 | // size := sizeWithoutCookie + cookieSize |
831 | // and check whether it overflows. |
832 | |
833 | llvm::Value *hasOverflow = nullptr; |
834 | |
835 | // If numElementsWidth > sizeWidth, then one way or another, we're |
836 | // going to have to do a comparison for (2), and this happens to |
837 | // take care of (1), too. |
838 | if (numElementsWidth > sizeWidth) { |
839 | llvm::APInt threshold = |
840 | llvm::APInt::getOneBitSet(numBits: numElementsWidth, BitNo: sizeWidth); |
841 | |
842 | llvm::Value *thresholdV |
843 | = llvm::ConstantInt::get(Ty: numElementsType, V: threshold); |
844 | |
845 | hasOverflow = CGF.Builder.CreateICmpUGE(LHS: numElements, RHS: thresholdV); |
846 | numElements = CGF.Builder.CreateTrunc(V: numElements, DestTy: CGF.SizeTy); |
847 | |
848 | // Otherwise, if we're signed, we want to sext up to size_t. |
849 | } else if (isSigned) { |
850 | if (numElementsWidth < sizeWidth) |
851 | numElements = CGF.Builder.CreateSExt(V: numElements, DestTy: CGF.SizeTy); |
852 | |
853 | // If there's a non-1 type size multiplier, then we can do the |
854 | // signedness check at the same time as we do the multiply |
855 | // because a negative number times anything will cause an |
856 | // unsigned overflow. Otherwise, we have to do it here. But at least |
857 | // in this case, we can subsume the >= minElements check. |
858 | if (typeSizeMultiplier == 1) |
859 | hasOverflow = CGF.Builder.CreateICmpSLT(LHS: numElements, |
860 | RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements)); |
861 | |
862 | // Otherwise, zext up to size_t if necessary. |
863 | } else if (numElementsWidth < sizeWidth) { |
864 | numElements = CGF.Builder.CreateZExt(V: numElements, DestTy: CGF.SizeTy); |
865 | } |
866 | |
867 | assert(numElements->getType() == CGF.SizeTy); |
868 | |
869 | if (minElements) { |
870 | // Don't allow allocation of fewer elements than we have initializers. |
871 | if (!hasOverflow) { |
872 | hasOverflow = CGF.Builder.CreateICmpULT(LHS: numElements, |
873 | RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements)); |
874 | } else if (numElementsWidth > sizeWidth) { |
875 | // The other existing overflow subsumes this check. |
876 | // We do an unsigned comparison, since any signed value < -1 is |
877 | // taken care of either above or below. |
878 | hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow, |
879 | RHS: CGF.Builder.CreateICmpULT(LHS: numElements, |
880 | RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements))); |
881 | } |
882 | } |
883 | |
884 | size = numElements; |
885 | |
886 | // Multiply by the type size if necessary. This multiplier |
887 | // includes all the factors for nested arrays. |
888 | // |
889 | // This step also causes numElements to be scaled up by the |
890 | // nested-array factor if necessary. Overflow on this computation |
891 | // can be ignored because the result shouldn't be used if |
892 | // allocation fails. |
893 | if (typeSizeMultiplier != 1) { |
894 | llvm::Function *umul_with_overflow |
895 | = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); |
896 | |
897 | llvm::Value *tsmV = |
898 | llvm::ConstantInt::get(Ty: CGF.SizeTy, V: typeSizeMultiplier); |
899 | llvm::Value *result = |
900 | CGF.Builder.CreateCall(Callee: umul_with_overflow, Args: {size, tsmV}); |
901 | |
902 | llvm::Value *overflowed = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 1); |
903 | if (hasOverflow) |
904 | hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow, RHS: overflowed); |
905 | else |
906 | hasOverflow = overflowed; |
907 | |
908 | size = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 0); |
909 | |
910 | // Also scale up numElements by the array size multiplier. |
911 | if (arraySizeMultiplier != 1) { |
912 | // If the base element type size is 1, then we can re-use the |
913 | // multiply we just did. |
914 | if (typeSize.isOne()) { |
915 | assert(arraySizeMultiplier == typeSizeMultiplier); |
916 | numElements = size; |
917 | |
918 | // Otherwise we need a separate multiply. |
919 | } else { |
920 | llvm::Value *asmV = |
921 | llvm::ConstantInt::get(Ty: CGF.SizeTy, V: arraySizeMultiplier); |
922 | numElements = CGF.Builder.CreateMul(LHS: numElements, RHS: asmV); |
923 | } |
924 | } |
925 | } else { |
926 | // numElements doesn't need to be scaled. |
927 | assert(arraySizeMultiplier == 1); |
928 | } |
929 | |
930 | // Add in the cookie size if necessary. |
931 | if (cookieSize != 0) { |
932 | sizeWithoutCookie = size; |
933 | |
934 | llvm::Function *uadd_with_overflow |
935 | = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); |
936 | |
937 | llvm::Value *cookieSizeV = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: cookieSize); |
938 | llvm::Value *result = |
939 | CGF.Builder.CreateCall(Callee: uadd_with_overflow, Args: {size, cookieSizeV}); |
940 | |
941 | llvm::Value *overflowed = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 1); |
942 | if (hasOverflow) |
943 | hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow, RHS: overflowed); |
944 | else |
945 | hasOverflow = overflowed; |
946 | |
947 | size = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 0); |
948 | } |
949 | |
950 | // If we had any possibility of dynamic overflow, make a select to |
951 | // overwrite 'size' with an all-ones value, which should cause |
952 | // operator new to throw. |
953 | if (hasOverflow) |
954 | size = CGF.Builder.CreateSelect(C: hasOverflow, |
955 | True: llvm::Constant::getAllOnesValue(Ty: CGF.SizeTy), |
956 | False: size); |
957 | } |
958 | |
959 | if (cookieSize == 0) |
960 | sizeWithoutCookie = size; |
961 | else |
962 | assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?" ); |
963 | |
964 | return size; |
965 | } |
966 | |
967 | static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, |
968 | QualType AllocType, Address NewPtr, |
969 | AggValueSlot::Overlap_t MayOverlap) { |
970 | // FIXME: Refactor with EmitExprAsInit. |
971 | switch (CGF.getEvaluationKind(T: AllocType)) { |
972 | case TEK_Scalar: |
973 | CGF.EmitScalarInit(init: Init, D: nullptr, |
974 | lvalue: CGF.MakeAddrLValue(Addr: NewPtr, T: AllocType), capturedByInit: false); |
975 | return; |
976 | case TEK_Complex: |
977 | CGF.EmitComplexExprIntoLValue(E: Init, dest: CGF.MakeAddrLValue(Addr: NewPtr, T: AllocType), |
978 | /*isInit*/ true); |
979 | return; |
980 | case TEK_Aggregate: { |
981 | AggValueSlot Slot |
982 | = AggValueSlot::forAddr(addr: NewPtr, quals: AllocType.getQualifiers(), |
983 | isDestructed: AggValueSlot::IsDestructed, |
984 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
985 | isAliased: AggValueSlot::IsNotAliased, |
986 | mayOverlap: MayOverlap, isZeroed: AggValueSlot::IsNotZeroed, |
987 | isChecked: AggValueSlot::IsSanitizerChecked); |
988 | CGF.EmitAggExpr(E: Init, AS: Slot); |
989 | return; |
990 | } |
991 | } |
992 | llvm_unreachable("bad evaluation kind" ); |
993 | } |
994 | |
995 | void CodeGenFunction::EmitNewArrayInitializer( |
996 | const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy, |
997 | Address BeginPtr, llvm::Value *NumElements, |
998 | llvm::Value *AllocSizeWithoutCookie) { |
999 | // If we have a type with trivial initialization and no initializer, |
1000 | // there's nothing to do. |
1001 | if (!E->hasInitializer()) |
1002 | return; |
1003 | |
1004 | Address CurPtr = BeginPtr; |
1005 | |
1006 | unsigned InitListElements = 0; |
1007 | |
1008 | const Expr *Init = E->getInitializer(); |
1009 | Address EndOfInit = Address::invalid(); |
1010 | QualType::DestructionKind DtorKind = ElementType.isDestructedType(); |
1011 | EHScopeStack::stable_iterator Cleanup; |
1012 | llvm::Instruction *CleanupDominator = nullptr; |
1013 | |
1014 | CharUnits ElementSize = getContext().getTypeSizeInChars(T: ElementType); |
1015 | CharUnits ElementAlign = |
1016 | BeginPtr.getAlignment().alignmentOfArrayElement(elementSize: ElementSize); |
1017 | |
1018 | // Attempt to perform zero-initialization using memset. |
1019 | auto TryMemsetInitialization = [&]() -> bool { |
1020 | // FIXME: If the type is a pointer-to-data-member under the Itanium ABI, |
1021 | // we can initialize with a memset to -1. |
1022 | if (!CGM.getTypes().isZeroInitializable(T: ElementType)) |
1023 | return false; |
1024 | |
1025 | // Optimization: since zero initialization will just set the memory |
1026 | // to all zeroes, generate a single memset to do it in one shot. |
1027 | |
1028 | // Subtract out the size of any elements we've already initialized. |
1029 | auto *RemainingSize = AllocSizeWithoutCookie; |
1030 | if (InitListElements) { |
1031 | // We know this can't overflow; we check this when doing the allocation. |
1032 | auto *InitializedSize = llvm::ConstantInt::get( |
1033 | Ty: RemainingSize->getType(), |
1034 | V: getContext().getTypeSizeInChars(T: ElementType).getQuantity() * |
1035 | InitListElements); |
1036 | RemainingSize = Builder.CreateSub(LHS: RemainingSize, RHS: InitializedSize); |
1037 | } |
1038 | |
1039 | // Create the memset. |
1040 | Builder.CreateMemSet(Dest: CurPtr, Value: Builder.getInt8(C: 0), Size: RemainingSize, IsVolatile: false); |
1041 | return true; |
1042 | }; |
1043 | |
1044 | const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: Init); |
1045 | const CXXParenListInitExpr *CPLIE = nullptr; |
1046 | const StringLiteral *SL = nullptr; |
1047 | const ObjCEncodeExpr *OCEE = nullptr; |
1048 | const Expr *IgnoreParen = nullptr; |
1049 | if (!ILE) { |
1050 | IgnoreParen = Init->IgnoreParenImpCasts(); |
1051 | CPLIE = dyn_cast<CXXParenListInitExpr>(Val: IgnoreParen); |
1052 | SL = dyn_cast<StringLiteral>(Val: IgnoreParen); |
1053 | OCEE = dyn_cast<ObjCEncodeExpr>(Val: IgnoreParen); |
1054 | } |
1055 | |
1056 | // If the initializer is an initializer list, first do the explicit elements. |
1057 | if (ILE || CPLIE || SL || OCEE) { |
1058 | // Initializing from a (braced) string literal is a special case; the init |
1059 | // list element does not initialize a (single) array element. |
1060 | if ((ILE && ILE->isStringLiteralInit()) || SL || OCEE) { |
1061 | if (!ILE) |
1062 | Init = IgnoreParen; |
1063 | // Initialize the initial portion of length equal to that of the string |
1064 | // literal. The allocation must be for at least this much; we emitted a |
1065 | // check for that earlier. |
1066 | AggValueSlot Slot = |
1067 | AggValueSlot::forAddr(addr: CurPtr, quals: ElementType.getQualifiers(), |
1068 | isDestructed: AggValueSlot::IsDestructed, |
1069 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
1070 | isAliased: AggValueSlot::IsNotAliased, |
1071 | mayOverlap: AggValueSlot::DoesNotOverlap, |
1072 | isZeroed: AggValueSlot::IsNotZeroed, |
1073 | isChecked: AggValueSlot::IsSanitizerChecked); |
1074 | EmitAggExpr(E: ILE ? ILE->getInit(Init: 0) : Init, AS: Slot); |
1075 | |
1076 | // Move past these elements. |
1077 | InitListElements = |
1078 | cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe()) |
1079 | ->getZExtSize(); |
1080 | CurPtr = Builder.CreateConstInBoundsGEP( |
1081 | Addr: CurPtr, Index: InitListElements, Name: "string.init.end" ); |
1082 | |
1083 | // Zero out the rest, if any remain. |
1084 | llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(Val: NumElements); |
1085 | if (!ConstNum || !ConstNum->equalsInt(V: InitListElements)) { |
1086 | bool OK = TryMemsetInitialization(); |
1087 | (void)OK; |
1088 | assert(OK && "couldn't memset character type?" ); |
1089 | } |
1090 | return; |
1091 | } |
1092 | |
1093 | ArrayRef<const Expr *> InitExprs = |
1094 | ILE ? ILE->inits() : CPLIE->getInitExprs(); |
1095 | InitListElements = InitExprs.size(); |
1096 | |
1097 | // If this is a multi-dimensional array new, we will initialize multiple |
1098 | // elements with each init list element. |
1099 | QualType AllocType = E->getAllocatedType(); |
1100 | if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>( |
1101 | AllocType->getAsArrayTypeUnsafe())) { |
1102 | ElementTy = ConvertTypeForMem(T: AllocType); |
1103 | CurPtr = CurPtr.withElementType(ElemTy: ElementTy); |
1104 | InitListElements *= getContext().getConstantArrayElementCount(CA: CAT); |
1105 | } |
1106 | |
1107 | // Enter a partial-destruction Cleanup if necessary. |
1108 | if (needsEHCleanup(kind: DtorKind)) { |
1109 | // In principle we could tell the Cleanup where we are more |
1110 | // directly, but the control flow can get so varied here that it |
1111 | // would actually be quite complex. Therefore we go through an |
1112 | // alloca. |
1113 | EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(), |
1114 | "array.init.end" ); |
1115 | CleanupDominator = |
1116 | Builder.CreateStore(Val: BeginPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit); |
1117 | pushIrregularPartialArrayCleanup(arrayBegin: BeginPtr.emitRawPointer(CGF&: *this), |
1118 | arrayEndPointer: EndOfInit, elementType: ElementType, elementAlignment: ElementAlign, |
1119 | destroyer: getDestroyer(destructionKind: DtorKind)); |
1120 | Cleanup = EHStack.stable_begin(); |
1121 | } |
1122 | |
1123 | CharUnits StartAlign = CurPtr.getAlignment(); |
1124 | unsigned i = 0; |
1125 | for (const Expr *IE : InitExprs) { |
1126 | // Tell the cleanup that it needs to destroy up to this |
1127 | // element. TODO: some of these stores can be trivially |
1128 | // observed to be unnecessary. |
1129 | if (EndOfInit.isValid()) { |
1130 | Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit); |
1131 | } |
1132 | // FIXME: If the last initializer is an incomplete initializer list for |
1133 | // an array, and we have an array filler, we can fold together the two |
1134 | // initialization loops. |
1135 | StoreAnyExprIntoOneUnit(CGF&: *this, Init: IE, AllocType: IE->getType(), NewPtr: CurPtr, |
1136 | MayOverlap: AggValueSlot::DoesNotOverlap); |
1137 | CurPtr = Address(Builder.CreateInBoundsGEP(Ty: CurPtr.getElementType(), |
1138 | Ptr: CurPtr.emitRawPointer(CGF&: *this), |
1139 | IdxList: Builder.getSize(N: 1), |
1140 | Name: "array.exp.next" ), |
1141 | CurPtr.getElementType(), |
1142 | StartAlign.alignmentAtOffset(offset: (++i) * ElementSize)); |
1143 | } |
1144 | |
1145 | // The remaining elements are filled with the array filler expression. |
1146 | Init = ILE ? ILE->getArrayFiller() : CPLIE->getArrayFiller(); |
1147 | |
1148 | // Extract the initializer for the individual array elements by pulling |
1149 | // out the array filler from all the nested initializer lists. This avoids |
1150 | // generating a nested loop for the initialization. |
1151 | while (Init && Init->getType()->isConstantArrayType()) { |
1152 | auto *SubILE = dyn_cast<InitListExpr>(Val: Init); |
1153 | if (!SubILE) |
1154 | break; |
1155 | assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?" ); |
1156 | Init = SubILE->getArrayFiller(); |
1157 | } |
1158 | |
1159 | // Switch back to initializing one base element at a time. |
1160 | CurPtr = CurPtr.withElementType(ElemTy: BeginPtr.getElementType()); |
1161 | } |
1162 | |
1163 | // If all elements have already been initialized, skip any further |
1164 | // initialization. |
1165 | llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(Val: NumElements); |
1166 | if (ConstNum && ConstNum->getZExtValue() <= InitListElements) { |
1167 | // If there was a Cleanup, deactivate it. |
1168 | if (CleanupDominator) |
1169 | DeactivateCleanupBlock(Cleanup, DominatingIP: CleanupDominator); |
1170 | return; |
1171 | } |
1172 | |
1173 | assert(Init && "have trailing elements to initialize but no initializer" ); |
1174 | |
1175 | // If this is a constructor call, try to optimize it out, and failing that |
1176 | // emit a single loop to initialize all remaining elements. |
1177 | if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Val: Init)) { |
1178 | CXXConstructorDecl *Ctor = CCE->getConstructor(); |
1179 | if (Ctor->isTrivial()) { |
1180 | // If new expression did not specify value-initialization, then there |
1181 | // is no initialization. |
1182 | if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty()) |
1183 | return; |
1184 | |
1185 | if (TryMemsetInitialization()) |
1186 | return; |
1187 | } |
1188 | |
1189 | // Store the new Cleanup position for irregular Cleanups. |
1190 | // |
1191 | // FIXME: Share this cleanup with the constructor call emission rather than |
1192 | // having it create a cleanup of its own. |
1193 | if (EndOfInit.isValid()) |
1194 | Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit); |
1195 | |
1196 | // Emit a constructor call loop to initialize the remaining elements. |
1197 | if (InitListElements) |
1198 | NumElements = Builder.CreateSub( |
1199 | LHS: NumElements, |
1200 | RHS: llvm::ConstantInt::get(Ty: NumElements->getType(), V: InitListElements)); |
1201 | EmitCXXAggrConstructorCall(D: Ctor, NumElements, ArrayPtr: CurPtr, E: CCE, |
1202 | /*NewPointerIsChecked*/true, |
1203 | ZeroInitialization: CCE->requiresZeroInitialization()); |
1204 | return; |
1205 | } |
1206 | |
1207 | // If this is value-initialization, we can usually use memset. |
1208 | ImplicitValueInitExpr IVIE(ElementType); |
1209 | if (isa<ImplicitValueInitExpr>(Val: Init)) { |
1210 | if (TryMemsetInitialization()) |
1211 | return; |
1212 | |
1213 | // Switch to an ImplicitValueInitExpr for the element type. This handles |
1214 | // only one case: multidimensional array new of pointers to members. In |
1215 | // all other cases, we already have an initializer for the array element. |
1216 | Init = &IVIE; |
1217 | } |
1218 | |
1219 | // At this point we should have found an initializer for the individual |
1220 | // elements of the array. |
1221 | assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) && |
1222 | "got wrong type of element to initialize" ); |
1223 | |
1224 | // If we have an empty initializer list, we can usually use memset. |
1225 | if (auto *ILE = dyn_cast<InitListExpr>(Val: Init)) |
1226 | if (ILE->getNumInits() == 0 && TryMemsetInitialization()) |
1227 | return; |
1228 | |
1229 | // If we have a struct whose every field is value-initialized, we can |
1230 | // usually use memset. |
1231 | if (auto *ILE = dyn_cast<InitListExpr>(Val: Init)) { |
1232 | if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) { |
1233 | if (RType->getDecl()->isStruct()) { |
1234 | unsigned NumElements = 0; |
1235 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl())) |
1236 | NumElements = CXXRD->getNumBases(); |
1237 | for (auto *Field : RType->getDecl()->fields()) |
1238 | if (!Field->isUnnamedBitField()) |
1239 | ++NumElements; |
1240 | // FIXME: Recurse into nested InitListExprs. |
1241 | if (ILE->getNumInits() == NumElements) |
1242 | for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) |
1243 | if (!isa<ImplicitValueInitExpr>(Val: ILE->getInit(Init: i))) |
1244 | --NumElements; |
1245 | if (ILE->getNumInits() == NumElements && TryMemsetInitialization()) |
1246 | return; |
1247 | } |
1248 | } |
1249 | } |
1250 | |
1251 | // Create the loop blocks. |
1252 | llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); |
1253 | llvm::BasicBlock *LoopBB = createBasicBlock(name: "new.loop" ); |
1254 | llvm::BasicBlock *ContBB = createBasicBlock(name: "new.loop.end" ); |
1255 | |
1256 | // Find the end of the array, hoisted out of the loop. |
1257 | llvm::Value *EndPtr = Builder.CreateInBoundsGEP( |
1258 | Ty: BeginPtr.getElementType(), Ptr: BeginPtr.emitRawPointer(CGF&: *this), IdxList: NumElements, |
1259 | Name: "array.end" ); |
1260 | |
1261 | // If the number of elements isn't constant, we have to now check if there is |
1262 | // anything left to initialize. |
1263 | if (!ConstNum) { |
1264 | llvm::Value *IsEmpty = Builder.CreateICmpEQ(LHS: CurPtr.emitRawPointer(CGF&: *this), |
1265 | RHS: EndPtr, Name: "array.isempty" ); |
1266 | Builder.CreateCondBr(Cond: IsEmpty, True: ContBB, False: LoopBB); |
1267 | } |
1268 | |
1269 | // Enter the loop. |
1270 | EmitBlock(BB: LoopBB); |
1271 | |
1272 | // Set up the current-element phi. |
1273 | llvm::PHINode *CurPtrPhi = |
1274 | Builder.CreatePHI(Ty: CurPtr.getType(), NumReservedValues: 2, Name: "array.cur" ); |
1275 | CurPtrPhi->addIncoming(V: CurPtr.emitRawPointer(CGF&: *this), BB: EntryBB); |
1276 | |
1277 | CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign); |
1278 | |
1279 | // Store the new Cleanup position for irregular Cleanups. |
1280 | if (EndOfInit.isValid()) |
1281 | Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit); |
1282 | |
1283 | // Enter a partial-destruction Cleanup if necessary. |
1284 | if (!CleanupDominator && needsEHCleanup(kind: DtorKind)) { |
1285 | llvm::Value *BeginPtrRaw = BeginPtr.emitRawPointer(CGF&: *this); |
1286 | llvm::Value *CurPtrRaw = CurPtr.emitRawPointer(CGF&: *this); |
1287 | pushRegularPartialArrayCleanup(arrayBegin: BeginPtrRaw, arrayEnd: CurPtrRaw, elementType: ElementType, |
1288 | elementAlignment: ElementAlign, destroyer: getDestroyer(destructionKind: DtorKind)); |
1289 | Cleanup = EHStack.stable_begin(); |
1290 | CleanupDominator = Builder.CreateUnreachable(); |
1291 | } |
1292 | |
1293 | // Emit the initializer into this element. |
1294 | StoreAnyExprIntoOneUnit(CGF&: *this, Init, AllocType: Init->getType(), NewPtr: CurPtr, |
1295 | MayOverlap: AggValueSlot::DoesNotOverlap); |
1296 | |
1297 | // Leave the Cleanup if we entered one. |
1298 | if (CleanupDominator) { |
1299 | DeactivateCleanupBlock(Cleanup, DominatingIP: CleanupDominator); |
1300 | CleanupDominator->eraseFromParent(); |
1301 | } |
1302 | |
1303 | // Advance to the next element by adjusting the pointer type as necessary. |
1304 | llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32( |
1305 | Ty: ElementTy, Ptr: CurPtr.emitRawPointer(CGF&: *this), Idx0: 1, Name: "array.next" ); |
1306 | |
1307 | // Check whether we've gotten to the end of the array and, if so, |
1308 | // exit the loop. |
1309 | llvm::Value *IsEnd = Builder.CreateICmpEQ(LHS: NextPtr, RHS: EndPtr, Name: "array.atend" ); |
1310 | Builder.CreateCondBr(Cond: IsEnd, True: ContBB, False: LoopBB); |
1311 | CurPtrPhi->addIncoming(V: NextPtr, BB: Builder.GetInsertBlock()); |
1312 | |
1313 | EmitBlock(BB: ContBB); |
1314 | } |
1315 | |
1316 | static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, |
1317 | QualType ElementType, llvm::Type *ElementTy, |
1318 | Address NewPtr, llvm::Value *NumElements, |
1319 | llvm::Value *AllocSizeWithoutCookie) { |
1320 | ApplyDebugLocation DL(CGF, E); |
1321 | if (E->isArray()) |
1322 | CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, BeginPtr: NewPtr, NumElements, |
1323 | AllocSizeWithoutCookie); |
1324 | else if (const Expr *Init = E->getInitializer()) |
1325 | StoreAnyExprIntoOneUnit(CGF, Init, AllocType: E->getAllocatedType(), NewPtr, |
1326 | MayOverlap: AggValueSlot::DoesNotOverlap); |
1327 | } |
1328 | |
1329 | /// Emit a call to an operator new or operator delete function, as implicitly |
1330 | /// created by new-expressions and delete-expressions. |
1331 | static RValue EmitNewDeleteCall(CodeGenFunction &CGF, |
1332 | const FunctionDecl *CalleeDecl, |
1333 | const FunctionProtoType *CalleeType, |
1334 | const CallArgList &Args) { |
1335 | llvm::CallBase *CallOrInvoke; |
1336 | llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(GD: CalleeDecl); |
1337 | CGCallee Callee = CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GlobalDecl(CalleeDecl)); |
1338 | RValue RV = |
1339 | CGF.EmitCall(CallInfo: CGF.CGM.getTypes().arrangeFreeFunctionCall( |
1340 | Args, CalleeType, /*ChainCall=*/false), |
1341 | Callee, ReturnValue: ReturnValueSlot(), Args, callOrInvoke: &CallOrInvoke); |
1342 | |
1343 | /// C++1y [expr.new]p10: |
1344 | /// [In a new-expression,] an implementation is allowed to omit a call |
1345 | /// to a replaceable global allocation function. |
1346 | /// |
1347 | /// We model such elidable calls with the 'builtin' attribute. |
1348 | llvm::Function *Fn = dyn_cast<llvm::Function>(Val: CalleePtr); |
1349 | if (CalleeDecl->isReplaceableGlobalAllocationFunction() && |
1350 | Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) { |
1351 | CallOrInvoke->addFnAttr(llvm::Attribute::Builtin); |
1352 | } |
1353 | |
1354 | return RV; |
1355 | } |
1356 | |
1357 | RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, |
1358 | const CallExpr *TheCall, |
1359 | bool IsDelete) { |
1360 | CallArgList Args; |
1361 | EmitCallArgs(Args, Prototype: Type, ArgRange: TheCall->arguments()); |
1362 | // Find the allocation or deallocation function that we're calling. |
1363 | ASTContext &Ctx = getContext(); |
1364 | DeclarationName Name = Ctx.DeclarationNames |
1365 | .getCXXOperatorName(Op: IsDelete ? OO_Delete : OO_New); |
1366 | |
1367 | for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name)) |
1368 | if (auto *FD = dyn_cast<FunctionDecl>(Decl)) |
1369 | if (Ctx.hasSameType(FD->getType(), QualType(Type, 0))) |
1370 | return EmitNewDeleteCall(*this, FD, Type, Args); |
1371 | llvm_unreachable("predeclared global operator new/delete is missing" ); |
1372 | } |
1373 | |
1374 | namespace { |
1375 | /// The parameters to pass to a usual operator delete. |
1376 | struct UsualDeleteParams { |
1377 | bool DestroyingDelete = false; |
1378 | bool Size = false; |
1379 | bool Alignment = false; |
1380 | }; |
1381 | } |
1382 | |
1383 | static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { |
1384 | UsualDeleteParams Params; |
1385 | |
1386 | const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>(); |
1387 | auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); |
1388 | |
1389 | // The first argument is always a void*. |
1390 | ++AI; |
1391 | |
1392 | // The next parameter may be a std::destroying_delete_t. |
1393 | if (FD->isDestroyingOperatorDelete()) { |
1394 | Params.DestroyingDelete = true; |
1395 | assert(AI != AE); |
1396 | ++AI; |
1397 | } |
1398 | |
1399 | // Figure out what other parameters we should be implicitly passing. |
1400 | if (AI != AE && (*AI)->isIntegerType()) { |
1401 | Params.Size = true; |
1402 | ++AI; |
1403 | } |
1404 | |
1405 | if (AI != AE && (*AI)->isAlignValT()) { |
1406 | Params.Alignment = true; |
1407 | ++AI; |
1408 | } |
1409 | |
1410 | assert(AI == AE && "unexpected usual deallocation function parameter" ); |
1411 | return Params; |
1412 | } |
1413 | |
1414 | namespace { |
1415 | /// A cleanup to call the given 'operator delete' function upon abnormal |
1416 | /// exit from a new expression. Templated on a traits type that deals with |
1417 | /// ensuring that the arguments dominate the cleanup if necessary. |
1418 | template<typename Traits> |
1419 | class CallDeleteDuringNew final : public EHScopeStack::Cleanup { |
1420 | /// Type used to hold llvm::Value*s. |
1421 | typedef typename Traits::ValueTy ValueTy; |
1422 | /// Type used to hold RValues. |
1423 | typedef typename Traits::RValueTy RValueTy; |
1424 | struct PlacementArg { |
1425 | RValueTy ArgValue; |
1426 | QualType ArgType; |
1427 | }; |
1428 | |
1429 | unsigned NumPlacementArgs : 31; |
1430 | LLVM_PREFERRED_TYPE(bool) |
1431 | unsigned PassAlignmentToPlacementDelete : 1; |
1432 | const FunctionDecl *OperatorDelete; |
1433 | ValueTy Ptr; |
1434 | ValueTy AllocSize; |
1435 | CharUnits AllocAlign; |
1436 | |
1437 | PlacementArg *getPlacementArgs() { |
1438 | return reinterpret_cast<PlacementArg *>(this + 1); |
1439 | } |
1440 | |
1441 | public: |
1442 | static size_t (size_t NumPlacementArgs) { |
1443 | return NumPlacementArgs * sizeof(PlacementArg); |
1444 | } |
1445 | |
1446 | CallDeleteDuringNew(size_t NumPlacementArgs, |
1447 | const FunctionDecl *OperatorDelete, ValueTy Ptr, |
1448 | ValueTy AllocSize, bool PassAlignmentToPlacementDelete, |
1449 | CharUnits AllocAlign) |
1450 | : NumPlacementArgs(NumPlacementArgs), |
1451 | PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete), |
1452 | OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize), |
1453 | AllocAlign(AllocAlign) {} |
1454 | |
1455 | void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { |
1456 | assert(I < NumPlacementArgs && "index out of range" ); |
1457 | getPlacementArgs()[I] = {Arg, Type}; |
1458 | } |
1459 | |
1460 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1461 | const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>(); |
1462 | CallArgList DeleteArgs; |
1463 | |
1464 | // The first argument is always a void* (or C* for a destroying operator |
1465 | // delete for class type C). |
1466 | DeleteArgs.add(rvalue: Traits::get(CGF, Ptr), type: FPT->getParamType(0)); |
1467 | |
1468 | // Figure out what other parameters we should be implicitly passing. |
1469 | UsualDeleteParams Params; |
1470 | if (NumPlacementArgs) { |
1471 | // A placement deallocation function is implicitly passed an alignment |
1472 | // if the placement allocation function was, but is never passed a size. |
1473 | Params.Alignment = PassAlignmentToPlacementDelete; |
1474 | } else { |
1475 | // For a non-placement new-expression, 'operator delete' can take a |
1476 | // size and/or an alignment if it has the right parameters. |
1477 | Params = getUsualDeleteParams(FD: OperatorDelete); |
1478 | } |
1479 | |
1480 | assert(!Params.DestroyingDelete && |
1481 | "should not call destroying delete in a new-expression" ); |
1482 | |
1483 | // The second argument can be a std::size_t (for non-placement delete). |
1484 | if (Params.Size) |
1485 | DeleteArgs.add(rvalue: Traits::get(CGF, AllocSize), |
1486 | type: CGF.getContext().getSizeType()); |
1487 | |
1488 | // The next (second or third) argument can be a std::align_val_t, which |
1489 | // is an enum whose underlying type is std::size_t. |
1490 | // FIXME: Use the right type as the parameter type. Note that in a call |
1491 | // to operator delete(size_t, ...), we may not have it available. |
1492 | if (Params.Alignment) |
1493 | DeleteArgs.add(rvalue: RValue::get(V: llvm::ConstantInt::get( |
1494 | Ty: CGF.SizeTy, V: AllocAlign.getQuantity())), |
1495 | type: CGF.getContext().getSizeType()); |
1496 | |
1497 | // Pass the rest of the arguments, which must match exactly. |
1498 | for (unsigned I = 0; I != NumPlacementArgs; ++I) { |
1499 | auto Arg = getPlacementArgs()[I]; |
1500 | DeleteArgs.add(rvalue: Traits::get(CGF, Arg.ArgValue), type: Arg.ArgType); |
1501 | } |
1502 | |
1503 | // Call 'operator delete'. |
1504 | EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); |
1505 | } |
1506 | }; |
1507 | } |
1508 | |
1509 | /// Enter a cleanup to call 'operator delete' if the initializer in a |
1510 | /// new-expression throws. |
1511 | static void EnterNewDeleteCleanup(CodeGenFunction &CGF, |
1512 | const CXXNewExpr *E, |
1513 | Address NewPtr, |
1514 | llvm::Value *AllocSize, |
1515 | CharUnits AllocAlign, |
1516 | const CallArgList &NewArgs) { |
1517 | unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1; |
1518 | |
1519 | // If we're not inside a conditional branch, then the cleanup will |
1520 | // dominate and we can do the easier (and more efficient) thing. |
1521 | if (!CGF.isInConditionalBranch()) { |
1522 | struct DirectCleanupTraits { |
1523 | typedef llvm::Value *ValueTy; |
1524 | typedef RValue RValueTy; |
1525 | static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); } |
1526 | static RValue get(CodeGenFunction &, RValueTy V) { return V; } |
1527 | }; |
1528 | |
1529 | typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup; |
1530 | |
1531 | DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra<DirectCleanup>( |
1532 | Kind: EHCleanup, N: E->getNumPlacementArgs(), A: E->getOperatorDelete(), |
1533 | A: NewPtr.emitRawPointer(CGF), A: AllocSize, A: E->passAlignment(), A: AllocAlign); |
1534 | for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { |
1535 | auto &Arg = NewArgs[I + NumNonPlacementArgs]; |
1536 | Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty); |
1537 | } |
1538 | |
1539 | return; |
1540 | } |
1541 | |
1542 | // Otherwise, we need to save all this stuff. |
1543 | DominatingValue<RValue>::saved_type SavedNewPtr = |
1544 | DominatingValue<RValue>::save(CGF, value: RValue::get(Addr: NewPtr, CGF)); |
1545 | DominatingValue<RValue>::saved_type SavedAllocSize = |
1546 | DominatingValue<RValue>::save(CGF, value: RValue::get(V: AllocSize)); |
1547 | |
1548 | struct ConditionalCleanupTraits { |
1549 | typedef DominatingValue<RValue>::saved_type ValueTy; |
1550 | typedef DominatingValue<RValue>::saved_type RValueTy; |
1551 | static RValue get(CodeGenFunction &CGF, ValueTy V) { |
1552 | return V.restore(CGF); |
1553 | } |
1554 | }; |
1555 | typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup; |
1556 | |
1557 | ConditionalCleanup *Cleanup = CGF.EHStack |
1558 | .pushCleanupWithExtra<ConditionalCleanup>(Kind: EHCleanup, |
1559 | N: E->getNumPlacementArgs(), |
1560 | A: E->getOperatorDelete(), |
1561 | A: SavedNewPtr, |
1562 | A: SavedAllocSize, |
1563 | A: E->passAlignment(), |
1564 | A: AllocAlign); |
1565 | for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { |
1566 | auto &Arg = NewArgs[I + NumNonPlacementArgs]; |
1567 | Cleanup->setPlacementArg( |
1568 | I, DominatingValue<RValue>::save(CGF, value: Arg.getRValue(CGF)), Arg.Ty); |
1569 | } |
1570 | |
1571 | CGF.initFullExprCleanup(); |
1572 | } |
1573 | |
1574 | llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { |
1575 | // The element type being allocated. |
1576 | QualType allocType = getContext().getBaseElementType(QT: E->getAllocatedType()); |
1577 | |
1578 | // 1. Build a call to the allocation function. |
1579 | FunctionDecl *allocator = E->getOperatorNew(); |
1580 | |
1581 | // If there is a brace-initializer or C++20 parenthesized initializer, cannot |
1582 | // allocate fewer elements than inits. |
1583 | unsigned minElements = 0; |
1584 | if (E->isArray() && E->hasInitializer()) { |
1585 | const Expr *Init = E->getInitializer(); |
1586 | const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: Init); |
1587 | const CXXParenListInitExpr *CPLIE = dyn_cast<CXXParenListInitExpr>(Val: Init); |
1588 | const Expr *IgnoreParen = Init->IgnoreParenImpCasts(); |
1589 | if ((ILE && ILE->isStringLiteralInit()) || |
1590 | isa<StringLiteral>(Val: IgnoreParen) || isa<ObjCEncodeExpr>(Val: IgnoreParen)) { |
1591 | minElements = |
1592 | cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe()) |
1593 | ->getZExtSize(); |
1594 | } else if (ILE || CPLIE) { |
1595 | minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size(); |
1596 | } |
1597 | } |
1598 | |
1599 | llvm::Value *numElements = nullptr; |
1600 | llvm::Value *allocSizeWithoutCookie = nullptr; |
1601 | llvm::Value *allocSize = |
1602 | EmitCXXNewAllocSize(CGF&: *this, e: E, minElements, numElements, |
1603 | sizeWithoutCookie&: allocSizeWithoutCookie); |
1604 | CharUnits allocAlign = getContext().getTypeAlignInChars(T: allocType); |
1605 | |
1606 | // Emit the allocation call. If the allocator is a global placement |
1607 | // operator, just "inline" it directly. |
1608 | Address allocation = Address::invalid(); |
1609 | CallArgList allocatorArgs; |
1610 | if (allocator->isReservedGlobalPlacementOperator()) { |
1611 | assert(E->getNumPlacementArgs() == 1); |
1612 | const Expr *arg = *E->placement_arguments().begin(); |
1613 | |
1614 | LValueBaseInfo BaseInfo; |
1615 | allocation = EmitPointerWithAlignment(Addr: arg, BaseInfo: &BaseInfo); |
1616 | |
1617 | // The pointer expression will, in many cases, be an opaque void*. |
1618 | // In these cases, discard the computed alignment and use the |
1619 | // formal alignment of the allocated type. |
1620 | if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl) |
1621 | allocation.setAlignment(allocAlign); |
1622 | |
1623 | // Set up allocatorArgs for the call to operator delete if it's not |
1624 | // the reserved global operator. |
1625 | if (E->getOperatorDelete() && |
1626 | !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { |
1627 | allocatorArgs.add(rvalue: RValue::get(V: allocSize), type: getContext().getSizeType()); |
1628 | allocatorArgs.add(rvalue: RValue::get(Addr: allocation, CGF&: *this), type: arg->getType()); |
1629 | } |
1630 | |
1631 | } else { |
1632 | const FunctionProtoType *allocatorType = |
1633 | allocator->getType()->castAs<FunctionProtoType>(); |
1634 | unsigned ParamsToSkip = 0; |
1635 | |
1636 | // The allocation size is the first argument. |
1637 | QualType sizeType = getContext().getSizeType(); |
1638 | allocatorArgs.add(rvalue: RValue::get(V: allocSize), type: sizeType); |
1639 | ++ParamsToSkip; |
1640 | |
1641 | if (allocSize != allocSizeWithoutCookie) { |
1642 | CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI. |
1643 | allocAlign = std::max(a: allocAlign, b: cookieAlign); |
1644 | } |
1645 | |
1646 | // The allocation alignment may be passed as the second argument. |
1647 | if (E->passAlignment()) { |
1648 | QualType AlignValT = sizeType; |
1649 | if (allocatorType->getNumParams() > 1) { |
1650 | AlignValT = allocatorType->getParamType(i: 1); |
1651 | assert(getContext().hasSameUnqualifiedType( |
1652 | AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(), |
1653 | sizeType) && |
1654 | "wrong type for alignment parameter" ); |
1655 | ++ParamsToSkip; |
1656 | } else { |
1657 | // Corner case, passing alignment to 'operator new(size_t, ...)'. |
1658 | assert(allocator->isVariadic() && "can't pass alignment to allocator" ); |
1659 | } |
1660 | allocatorArgs.add( |
1661 | rvalue: RValue::get(V: llvm::ConstantInt::get(Ty: SizeTy, V: allocAlign.getQuantity())), |
1662 | type: AlignValT); |
1663 | } |
1664 | |
1665 | // FIXME: Why do we not pass a CalleeDecl here? |
1666 | EmitCallArgs(Args&: allocatorArgs, Prototype: allocatorType, ArgRange: E->placement_arguments(), |
1667 | /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip); |
1668 | |
1669 | RValue RV = |
1670 | EmitNewDeleteCall(CGF&: *this, CalleeDecl: allocator, CalleeType: allocatorType, Args: allocatorArgs); |
1671 | |
1672 | // Set !heapallocsite metadata on the call to operator new. |
1673 | if (getDebugInfo()) |
1674 | if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal())) |
1675 | getDebugInfo()->addHeapAllocSiteMetadata(CallSite: newCall, AllocatedTy: allocType, |
1676 | Loc: E->getExprLoc()); |
1677 | |
1678 | // If this was a call to a global replaceable allocation function that does |
1679 | // not take an alignment argument, the allocator is known to produce |
1680 | // storage that's suitably aligned for any object that fits, up to a known |
1681 | // threshold. Otherwise assume it's suitably aligned for the allocated type. |
1682 | CharUnits allocationAlign = allocAlign; |
1683 | if (!E->passAlignment() && |
1684 | allocator->isReplaceableGlobalAllocationFunction()) { |
1685 | unsigned AllocatorAlign = llvm::bit_floor(Value: std::min<uint64_t>( |
1686 | a: Target.getNewAlign(), b: getContext().getTypeSize(T: allocType))); |
1687 | allocationAlign = std::max( |
1688 | a: allocationAlign, b: getContext().toCharUnitsFromBits(BitSize: AllocatorAlign)); |
1689 | } |
1690 | |
1691 | allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign); |
1692 | } |
1693 | |
1694 | // Emit a null check on the allocation result if the allocation |
1695 | // function is allowed to return null (because it has a non-throwing |
1696 | // exception spec or is the reserved placement new) and we have an |
1697 | // interesting initializer will be running sanitizers on the initialization. |
1698 | bool nullCheck = E->shouldNullCheckAllocation() && |
1699 | (!allocType.isPODType(Context: getContext()) || E->hasInitializer() || |
1700 | sanitizePerformTypeCheck()); |
1701 | |
1702 | llvm::BasicBlock *nullCheckBB = nullptr; |
1703 | llvm::BasicBlock *contBB = nullptr; |
1704 | |
1705 | // The null-check means that the initializer is conditionally |
1706 | // evaluated. |
1707 | ConditionalEvaluation conditional(*this); |
1708 | |
1709 | if (nullCheck) { |
1710 | conditional.begin(CGF&: *this); |
1711 | |
1712 | nullCheckBB = Builder.GetInsertBlock(); |
1713 | llvm::BasicBlock *notNullBB = createBasicBlock(name: "new.notnull" ); |
1714 | contBB = createBasicBlock(name: "new.cont" ); |
1715 | |
1716 | llvm::Value *isNull = Builder.CreateIsNull(Addr: allocation, Name: "new.isnull" ); |
1717 | Builder.CreateCondBr(Cond: isNull, True: contBB, False: notNullBB); |
1718 | EmitBlock(BB: notNullBB); |
1719 | } |
1720 | |
1721 | // If there's an operator delete, enter a cleanup to call it if an |
1722 | // exception is thrown. |
1723 | EHScopeStack::stable_iterator operatorDeleteCleanup; |
1724 | llvm::Instruction *cleanupDominator = nullptr; |
1725 | if (E->getOperatorDelete() && |
1726 | !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { |
1727 | EnterNewDeleteCleanup(CGF&: *this, E, NewPtr: allocation, AllocSize: allocSize, AllocAlign: allocAlign, |
1728 | NewArgs: allocatorArgs); |
1729 | operatorDeleteCleanup = EHStack.stable_begin(); |
1730 | cleanupDominator = Builder.CreateUnreachable(); |
1731 | } |
1732 | |
1733 | assert((allocSize == allocSizeWithoutCookie) == |
1734 | CalculateCookiePadding(*this, E).isZero()); |
1735 | if (allocSize != allocSizeWithoutCookie) { |
1736 | assert(E->isArray()); |
1737 | allocation = CGM.getCXXABI().InitializeArrayCookie(CGF&: *this, NewPtr: allocation, |
1738 | NumElements: numElements, |
1739 | expr: E, ElementType: allocType); |
1740 | } |
1741 | |
1742 | llvm::Type *elementTy = ConvertTypeForMem(T: allocType); |
1743 | Address result = allocation.withElementType(ElemTy: elementTy); |
1744 | |
1745 | // Passing pointer through launder.invariant.group to avoid propagation of |
1746 | // vptrs information which may be included in previous type. |
1747 | // To not break LTO with different optimizations levels, we do it regardless |
1748 | // of optimization level. |
1749 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
1750 | allocator->isReservedGlobalPlacementOperator()) |
1751 | result = Builder.CreateLaunderInvariantGroup(Addr: result); |
1752 | |
1753 | // Emit sanitizer checks for pointer value now, so that in the case of an |
1754 | // array it was checked only once and not at each constructor call. We may |
1755 | // have already checked that the pointer is non-null. |
1756 | // FIXME: If we have an array cookie and a potentially-throwing allocator, |
1757 | // we'll null check the wrong pointer here. |
1758 | SanitizerSet SkippedChecks; |
1759 | SkippedChecks.set(K: SanitizerKind::Null, Value: nullCheck); |
1760 | EmitTypeCheck(TCK: CodeGenFunction::TCK_ConstructorCall, |
1761 | Loc: E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), |
1762 | Addr: result, Type: allocType, Alignment: result.getAlignment(), SkippedChecks, |
1763 | ArraySize: numElements); |
1764 | |
1765 | EmitNewInitializer(CGF&: *this, E, ElementType: allocType, ElementTy: elementTy, NewPtr: result, NumElements: numElements, |
1766 | AllocSizeWithoutCookie: allocSizeWithoutCookie); |
1767 | llvm::Value *resultPtr = result.emitRawPointer(CGF&: *this); |
1768 | if (E->isArray()) { |
1769 | // NewPtr is a pointer to the base element type. If we're |
1770 | // allocating an array of arrays, we'll need to cast back to the |
1771 | // array pointer type. |
1772 | llvm::Type *resultType = ConvertTypeForMem(T: E->getType()); |
1773 | if (resultPtr->getType() != resultType) |
1774 | resultPtr = Builder.CreateBitCast(V: resultPtr, DestTy: resultType); |
1775 | } |
1776 | |
1777 | // Deactivate the 'operator delete' cleanup if we finished |
1778 | // initialization. |
1779 | if (operatorDeleteCleanup.isValid()) { |
1780 | DeactivateCleanupBlock(Cleanup: operatorDeleteCleanup, DominatingIP: cleanupDominator); |
1781 | cleanupDominator->eraseFromParent(); |
1782 | } |
1783 | |
1784 | if (nullCheck) { |
1785 | conditional.end(CGF&: *this); |
1786 | |
1787 | llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); |
1788 | EmitBlock(BB: contBB); |
1789 | |
1790 | llvm::PHINode *PHI = Builder.CreatePHI(Ty: resultPtr->getType(), NumReservedValues: 2); |
1791 | PHI->addIncoming(V: resultPtr, BB: notNullBB); |
1792 | PHI->addIncoming(V: llvm::Constant::getNullValue(Ty: resultPtr->getType()), |
1793 | BB: nullCheckBB); |
1794 | |
1795 | resultPtr = PHI; |
1796 | } |
1797 | |
1798 | return resultPtr; |
1799 | } |
1800 | |
1801 | void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, |
1802 | llvm::Value *Ptr, QualType DeleteTy, |
1803 | llvm::Value *NumElements, |
1804 | CharUnits CookieSize) { |
1805 | assert((!NumElements && CookieSize.isZero()) || |
1806 | DeleteFD->getOverloadedOperator() == OO_Array_Delete); |
1807 | |
1808 | const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>(); |
1809 | CallArgList DeleteArgs; |
1810 | |
1811 | auto Params = getUsualDeleteParams(FD: DeleteFD); |
1812 | auto ParamTypeIt = DeleteFTy->param_type_begin(); |
1813 | |
1814 | // Pass the pointer itself. |
1815 | QualType ArgTy = *ParamTypeIt++; |
1816 | llvm::Value *DeletePtr = Builder.CreateBitCast(V: Ptr, DestTy: ConvertType(T: ArgTy)); |
1817 | DeleteArgs.add(rvalue: RValue::get(V: DeletePtr), type: ArgTy); |
1818 | |
1819 | // Pass the std::destroying_delete tag if present. |
1820 | llvm::AllocaInst *DestroyingDeleteTag = nullptr; |
1821 | if (Params.DestroyingDelete) { |
1822 | QualType DDTag = *ParamTypeIt++; |
1823 | llvm::Type *Ty = getTypes().ConvertType(T: DDTag); |
1824 | CharUnits Align = CGM.getNaturalTypeAlignment(T: DDTag); |
1825 | DestroyingDeleteTag = CreateTempAlloca(Ty, Name: "destroying.delete.tag" ); |
1826 | DestroyingDeleteTag->setAlignment(Align.getAsAlign()); |
1827 | DeleteArgs.add( |
1828 | rvalue: RValue::getAggregate(addr: Address(DestroyingDeleteTag, Ty, Align)), type: DDTag); |
1829 | } |
1830 | |
1831 | // Pass the size if the delete function has a size_t parameter. |
1832 | if (Params.Size) { |
1833 | QualType SizeType = *ParamTypeIt++; |
1834 | CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(T: DeleteTy); |
1835 | llvm::Value *Size = llvm::ConstantInt::get(Ty: ConvertType(T: SizeType), |
1836 | V: DeleteTypeSize.getQuantity()); |
1837 | |
1838 | // For array new, multiply by the number of elements. |
1839 | if (NumElements) |
1840 | Size = Builder.CreateMul(LHS: Size, RHS: NumElements); |
1841 | |
1842 | // If there is a cookie, add the cookie size. |
1843 | if (!CookieSize.isZero()) |
1844 | Size = Builder.CreateAdd( |
1845 | LHS: Size, RHS: llvm::ConstantInt::get(Ty: SizeTy, V: CookieSize.getQuantity())); |
1846 | |
1847 | DeleteArgs.add(rvalue: RValue::get(V: Size), type: SizeType); |
1848 | } |
1849 | |
1850 | // Pass the alignment if the delete function has an align_val_t parameter. |
1851 | if (Params.Alignment) { |
1852 | QualType AlignValType = *ParamTypeIt++; |
1853 | CharUnits DeleteTypeAlign = |
1854 | getContext().toCharUnitsFromBits(BitSize: getContext().getTypeAlignIfKnown( |
1855 | T: DeleteTy, NeedsPreferredAlignment: true /* NeedsPreferredAlignment */)); |
1856 | llvm::Value *Align = llvm::ConstantInt::get(Ty: ConvertType(T: AlignValType), |
1857 | V: DeleteTypeAlign.getQuantity()); |
1858 | DeleteArgs.add(rvalue: RValue::get(V: Align), type: AlignValType); |
1859 | } |
1860 | |
1861 | assert(ParamTypeIt == DeleteFTy->param_type_end() && |
1862 | "unknown parameter to usual delete function" ); |
1863 | |
1864 | // Emit the call to delete. |
1865 | EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); |
1866 | |
1867 | // If call argument lowering didn't use the destroying_delete_t alloca, |
1868 | // remove it again. |
1869 | if (DestroyingDeleteTag && DestroyingDeleteTag->use_empty()) |
1870 | DestroyingDeleteTag->eraseFromParent(); |
1871 | } |
1872 | |
1873 | namespace { |
1874 | /// Calls the given 'operator delete' on a single object. |
1875 | struct CallObjectDelete final : EHScopeStack::Cleanup { |
1876 | llvm::Value *Ptr; |
1877 | const FunctionDecl *OperatorDelete; |
1878 | QualType ElementType; |
1879 | |
1880 | CallObjectDelete(llvm::Value *Ptr, |
1881 | const FunctionDecl *OperatorDelete, |
1882 | QualType ElementType) |
1883 | : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} |
1884 | |
1885 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1886 | CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); |
1887 | } |
1888 | }; |
1889 | } |
1890 | |
1891 | void |
1892 | CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, |
1893 | llvm::Value *CompletePtr, |
1894 | QualType ElementType) { |
1895 | EHStack.pushCleanup<CallObjectDelete>(Kind: NormalAndEHCleanup, A: CompletePtr, |
1896 | A: OperatorDelete, A: ElementType); |
1897 | } |
1898 | |
1899 | /// Emit the code for deleting a single object with a destroying operator |
1900 | /// delete. If the element type has a non-virtual destructor, Ptr has already |
1901 | /// been converted to the type of the parameter of 'operator delete'. Otherwise |
1902 | /// Ptr points to an object of the static type. |
1903 | static void EmitDestroyingObjectDelete(CodeGenFunction &CGF, |
1904 | const CXXDeleteExpr *DE, Address Ptr, |
1905 | QualType ElementType) { |
1906 | auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor(); |
1907 | if (Dtor && Dtor->isVirtual()) |
1908 | CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, |
1909 | Dtor); |
1910 | else |
1911 | CGF.EmitDeleteCall(DeleteFD: DE->getOperatorDelete(), Ptr: Ptr.emitRawPointer(CGF), |
1912 | DeleteTy: ElementType); |
1913 | } |
1914 | |
1915 | /// Emit the code for deleting a single object. |
1916 | /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false |
1917 | /// if not. |
1918 | static bool EmitObjectDelete(CodeGenFunction &CGF, |
1919 | const CXXDeleteExpr *DE, |
1920 | Address Ptr, |
1921 | QualType ElementType, |
1922 | llvm::BasicBlock *UnconditionalDeleteBlock) { |
1923 | // C++11 [expr.delete]p3: |
1924 | // If the static type of the object to be deleted is different from its |
1925 | // dynamic type, the static type shall be a base class of the dynamic type |
1926 | // of the object to be deleted and the static type shall have a virtual |
1927 | // destructor or the behavior is undefined. |
1928 | CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, DE->getExprLoc(), Ptr, |
1929 | ElementType); |
1930 | |
1931 | const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); |
1932 | assert(!OperatorDelete->isDestroyingOperatorDelete()); |
1933 | |
1934 | // Find the destructor for the type, if applicable. If the |
1935 | // destructor is virtual, we'll just emit the vcall and return. |
1936 | const CXXDestructorDecl *Dtor = nullptr; |
1937 | if (const RecordType *RT = ElementType->getAs<RecordType>()) { |
1938 | CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RT->getDecl()); |
1939 | if (RD->hasDefinition() && !RD->hasTrivialDestructor()) { |
1940 | Dtor = RD->getDestructor(); |
1941 | |
1942 | if (Dtor->isVirtual()) { |
1943 | bool UseVirtualCall = true; |
1944 | const Expr *Base = DE->getArgument(); |
1945 | if (auto *DevirtualizedDtor = |
1946 | dyn_cast_or_null<const CXXDestructorDecl>( |
1947 | Dtor->getDevirtualizedMethod( |
1948 | Base, CGF.CGM.getLangOpts().AppleKext))) { |
1949 | UseVirtualCall = false; |
1950 | const CXXRecordDecl *DevirtualizedClass = |
1951 | DevirtualizedDtor->getParent(); |
1952 | if (declaresSameEntity(getCXXRecord(E: Base), DevirtualizedClass)) { |
1953 | // Devirtualized to the class of the base type (the type of the |
1954 | // whole expression). |
1955 | Dtor = DevirtualizedDtor; |
1956 | } else { |
1957 | // Devirtualized to some other type. Would need to cast the this |
1958 | // pointer to that type but we don't have support for that yet, so |
1959 | // do a virtual call. FIXME: handle the case where it is |
1960 | // devirtualized to the derived type (the type of the inner |
1961 | // expression) as in EmitCXXMemberOrOperatorMemberCallExpr. |
1962 | UseVirtualCall = true; |
1963 | } |
1964 | } |
1965 | if (UseVirtualCall) { |
1966 | CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, |
1967 | Dtor); |
1968 | return false; |
1969 | } |
1970 | } |
1971 | } |
1972 | } |
1973 | |
1974 | // Make sure that we call delete even if the dtor throws. |
1975 | // This doesn't have to a conditional cleanup because we're going |
1976 | // to pop it off in a second. |
1977 | CGF.EHStack.pushCleanup<CallObjectDelete>( |
1978 | Kind: NormalAndEHCleanup, A: Ptr.emitRawPointer(CGF), A: OperatorDelete, A: ElementType); |
1979 | |
1980 | if (Dtor) |
1981 | CGF.EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete, |
1982 | /*ForVirtualBase=*/false, |
1983 | /*Delegating=*/false, |
1984 | This: Ptr, ThisTy: ElementType); |
1985 | else if (auto Lifetime = ElementType.getObjCLifetime()) { |
1986 | switch (Lifetime) { |
1987 | case Qualifiers::OCL_None: |
1988 | case Qualifiers::OCL_ExplicitNone: |
1989 | case Qualifiers::OCL_Autoreleasing: |
1990 | break; |
1991 | |
1992 | case Qualifiers::OCL_Strong: |
1993 | CGF.EmitARCDestroyStrong(addr: Ptr, precise: ARCPreciseLifetime); |
1994 | break; |
1995 | |
1996 | case Qualifiers::OCL_Weak: |
1997 | CGF.EmitARCDestroyWeak(addr: Ptr); |
1998 | break; |
1999 | } |
2000 | } |
2001 | |
2002 | // When optimizing for size, call 'operator delete' unconditionally. |
2003 | if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) { |
2004 | CGF.EmitBlock(BB: UnconditionalDeleteBlock); |
2005 | CGF.PopCleanupBlock(); |
2006 | return true; |
2007 | } |
2008 | |
2009 | CGF.PopCleanupBlock(); |
2010 | return false; |
2011 | } |
2012 | |
2013 | namespace { |
2014 | /// Calls the given 'operator delete' on an array of objects. |
2015 | struct CallArrayDelete final : EHScopeStack::Cleanup { |
2016 | llvm::Value *Ptr; |
2017 | const FunctionDecl *OperatorDelete; |
2018 | llvm::Value *NumElements; |
2019 | QualType ElementType; |
2020 | CharUnits CookieSize; |
2021 | |
2022 | CallArrayDelete(llvm::Value *Ptr, |
2023 | const FunctionDecl *OperatorDelete, |
2024 | llvm::Value *NumElements, |
2025 | QualType ElementType, |
2026 | CharUnits CookieSize) |
2027 | : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), |
2028 | ElementType(ElementType), CookieSize(CookieSize) {} |
2029 | |
2030 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2031 | CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements, |
2032 | CookieSize); |
2033 | } |
2034 | }; |
2035 | } |
2036 | |
2037 | /// Emit the code for deleting an array of objects. |
2038 | static void EmitArrayDelete(CodeGenFunction &CGF, |
2039 | const CXXDeleteExpr *E, |
2040 | Address deletedPtr, |
2041 | QualType elementType) { |
2042 | llvm::Value *numElements = nullptr; |
2043 | llvm::Value *allocatedPtr = nullptr; |
2044 | CharUnits cookieSize; |
2045 | CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr: deletedPtr, expr: E, ElementType: elementType, |
2046 | NumElements&: numElements, AllocPtr&: allocatedPtr, CookieSize&: cookieSize); |
2047 | |
2048 | assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer" ); |
2049 | |
2050 | // Make sure that we call delete even if one of the dtors throws. |
2051 | const FunctionDecl *operatorDelete = E->getOperatorDelete(); |
2052 | CGF.EHStack.pushCleanup<CallArrayDelete>(Kind: NormalAndEHCleanup, |
2053 | A: allocatedPtr, A: operatorDelete, |
2054 | A: numElements, A: elementType, |
2055 | A: cookieSize); |
2056 | |
2057 | // Destroy the elements. |
2058 | if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) { |
2059 | assert(numElements && "no element count for a type with a destructor!" ); |
2060 | |
2061 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: elementType); |
2062 | CharUnits elementAlign = |
2063 | deletedPtr.getAlignment().alignmentOfArrayElement(elementSize); |
2064 | |
2065 | llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF); |
2066 | llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP( |
2067 | Ty: deletedPtr.getElementType(), Ptr: arrayBegin, IdxList: numElements, Name: "delete.end" ); |
2068 | |
2069 | // Note that it is legal to allocate a zero-length array, and we |
2070 | // can never fold the check away because the length should always |
2071 | // come from a cookie. |
2072 | CGF.emitArrayDestroy(begin: arrayBegin, end: arrayEnd, elementType, elementAlign, |
2073 | destroyer: CGF.getDestroyer(destructionKind: dtorKind), |
2074 | /*checkZeroLength*/ true, |
2075 | useEHCleanup: CGF.needsEHCleanup(kind: dtorKind)); |
2076 | } |
2077 | |
2078 | // Pop the cleanup block. |
2079 | CGF.PopCleanupBlock(); |
2080 | } |
2081 | |
2082 | void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { |
2083 | const Expr *Arg = E->getArgument(); |
2084 | Address Ptr = EmitPointerWithAlignment(Addr: Arg); |
2085 | |
2086 | // Null check the pointer. |
2087 | // |
2088 | // We could avoid this null check if we can determine that the object |
2089 | // destruction is trivial and doesn't require an array cookie; we can |
2090 | // unconditionally perform the operator delete call in that case. For now, we |
2091 | // assume that deleted pointers are null rarely enough that it's better to |
2092 | // keep the branch. This might be worth revisiting for a -O0 code size win. |
2093 | llvm::BasicBlock *DeleteNotNull = createBasicBlock(name: "delete.notnull" ); |
2094 | llvm::BasicBlock *DeleteEnd = createBasicBlock(name: "delete.end" ); |
2095 | |
2096 | llvm::Value *IsNull = Builder.CreateIsNull(Addr: Ptr, Name: "isnull" ); |
2097 | |
2098 | Builder.CreateCondBr(Cond: IsNull, True: DeleteEnd, False: DeleteNotNull); |
2099 | EmitBlock(BB: DeleteNotNull); |
2100 | Ptr.setKnownNonNull(); |
2101 | |
2102 | QualType DeleteTy = E->getDestroyedType(); |
2103 | |
2104 | // A destroying operator delete overrides the entire operation of the |
2105 | // delete expression. |
2106 | if (E->getOperatorDelete()->isDestroyingOperatorDelete()) { |
2107 | EmitDestroyingObjectDelete(CGF&: *this, DE: E, Ptr, ElementType: DeleteTy); |
2108 | EmitBlock(BB: DeleteEnd); |
2109 | return; |
2110 | } |
2111 | |
2112 | // We might be deleting a pointer to array. If so, GEP down to the |
2113 | // first non-array element. |
2114 | // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) |
2115 | if (DeleteTy->isConstantArrayType()) { |
2116 | llvm::Value *Zero = Builder.getInt32(C: 0); |
2117 | SmallVector<llvm::Value*,8> GEP; |
2118 | |
2119 | GEP.push_back(Elt: Zero); // point at the outermost array |
2120 | |
2121 | // For each layer of array type we're pointing at: |
2122 | while (const ConstantArrayType *Arr |
2123 | = getContext().getAsConstantArrayType(T: DeleteTy)) { |
2124 | // 1. Unpeel the array type. |
2125 | DeleteTy = Arr->getElementType(); |
2126 | |
2127 | // 2. GEP to the first element of the array. |
2128 | GEP.push_back(Elt: Zero); |
2129 | } |
2130 | |
2131 | Ptr = Builder.CreateInBoundsGEP(Addr: Ptr, IdxList: GEP, ElementType: ConvertTypeForMem(T: DeleteTy), |
2132 | Align: Ptr.getAlignment(), Name: "del.first" ); |
2133 | } |
2134 | |
2135 | assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType()); |
2136 | |
2137 | if (E->isArrayForm()) { |
2138 | EmitArrayDelete(CGF&: *this, E, deletedPtr: Ptr, elementType: DeleteTy); |
2139 | EmitBlock(BB: DeleteEnd); |
2140 | } else { |
2141 | if (!EmitObjectDelete(CGF&: *this, DE: E, Ptr, ElementType: DeleteTy, UnconditionalDeleteBlock: DeleteEnd)) |
2142 | EmitBlock(BB: DeleteEnd); |
2143 | } |
2144 | } |
2145 | |
2146 | static bool isGLValueFromPointerDeref(const Expr *E) { |
2147 | E = E->IgnoreParens(); |
2148 | |
2149 | if (const auto *CE = dyn_cast<CastExpr>(Val: E)) { |
2150 | if (!CE->getSubExpr()->isGLValue()) |
2151 | return false; |
2152 | return isGLValueFromPointerDeref(E: CE->getSubExpr()); |
2153 | } |
2154 | |
2155 | if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Val: E)) |
2156 | return isGLValueFromPointerDeref(E: OVE->getSourceExpr()); |
2157 | |
2158 | if (const auto *BO = dyn_cast<BinaryOperator>(Val: E)) |
2159 | if (BO->getOpcode() == BO_Comma) |
2160 | return isGLValueFromPointerDeref(E: BO->getRHS()); |
2161 | |
2162 | if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(Val: E)) |
2163 | return isGLValueFromPointerDeref(E: ACO->getTrueExpr()) || |
2164 | isGLValueFromPointerDeref(E: ACO->getFalseExpr()); |
2165 | |
2166 | // C++11 [expr.sub]p1: |
2167 | // The expression E1[E2] is identical (by definition) to *((E1)+(E2)) |
2168 | if (isa<ArraySubscriptExpr>(Val: E)) |
2169 | return true; |
2170 | |
2171 | if (const auto *UO = dyn_cast<UnaryOperator>(Val: E)) |
2172 | if (UO->getOpcode() == UO_Deref) |
2173 | return true; |
2174 | |
2175 | return false; |
2176 | } |
2177 | |
2178 | static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, |
2179 | llvm::Type *StdTypeInfoPtrTy) { |
2180 | // Get the vtable pointer. |
2181 | Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF); |
2182 | |
2183 | QualType SrcRecordTy = E->getType(); |
2184 | |
2185 | // C++ [class.cdtor]p4: |
2186 | // If the operand of typeid refers to the object under construction or |
2187 | // destruction and the static type of the operand is neither the constructor |
2188 | // or destructor’s class nor one of its bases, the behavior is undefined. |
2189 | CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DynamicOperation, Loc: E->getExprLoc(), |
2190 | Addr: ThisPtr, Type: SrcRecordTy); |
2191 | |
2192 | // C++ [expr.typeid]p2: |
2193 | // If the glvalue expression is obtained by applying the unary * operator to |
2194 | // a pointer and the pointer is a null pointer value, the typeid expression |
2195 | // throws the std::bad_typeid exception. |
2196 | // |
2197 | // However, this paragraph's intent is not clear. We choose a very generous |
2198 | // interpretation which implores us to consider comma operators, conditional |
2199 | // operators, parentheses and other such constructs. |
2200 | if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked( |
2201 | IsDeref: isGLValueFromPointerDeref(E), SrcRecordTy)) { |
2202 | llvm::BasicBlock *BadTypeidBlock = |
2203 | CGF.createBasicBlock(name: "typeid.bad_typeid" ); |
2204 | llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "typeid.end" ); |
2205 | |
2206 | llvm::Value *IsNull = CGF.Builder.CreateIsNull(Addr: ThisPtr); |
2207 | CGF.Builder.CreateCondBr(Cond: IsNull, True: BadTypeidBlock, False: EndBlock); |
2208 | |
2209 | CGF.EmitBlock(BB: BadTypeidBlock); |
2210 | CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF); |
2211 | CGF.EmitBlock(BB: EndBlock); |
2212 | } |
2213 | |
2214 | return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr, |
2215 | StdTypeInfoPtrTy); |
2216 | } |
2217 | |
2218 | llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { |
2219 | llvm::Type *PtrTy = llvm::PointerType::getUnqual(C&: getLLVMContext()); |
2220 | LangAS GlobAS = CGM.GetGlobalVarAddressSpace(D: nullptr); |
2221 | |
2222 | auto MaybeASCast = [=](auto &&TypeInfo) { |
2223 | if (GlobAS == LangAS::Default) |
2224 | return TypeInfo; |
2225 | return getTargetHooks().performAddrSpaceCast(CGM,TypeInfo, GlobAS, |
2226 | LangAS::Default, PtrTy); |
2227 | }; |
2228 | |
2229 | if (E->isTypeOperand()) { |
2230 | llvm::Constant *TypeInfo = |
2231 | CGM.GetAddrOfRTTIDescriptor(Ty: E->getTypeOperand(Context&: getContext())); |
2232 | return MaybeASCast(TypeInfo); |
2233 | } |
2234 | |
2235 | // C++ [expr.typeid]p2: |
2236 | // When typeid is applied to a glvalue expression whose type is a |
2237 | // polymorphic class type, the result refers to a std::type_info object |
2238 | // representing the type of the most derived object (that is, the dynamic |
2239 | // type) to which the glvalue refers. |
2240 | // If the operand is already most derived object, no need to look up vtable. |
2241 | if (E->isPotentiallyEvaluated() && !E->isMostDerived(Context&: getContext())) |
2242 | return EmitTypeidFromVTable(CGF&: *this, E: E->getExprOperand(), StdTypeInfoPtrTy: PtrTy); |
2243 | |
2244 | QualType OperandTy = E->getExprOperand()->getType(); |
2245 | return MaybeASCast(CGM.GetAddrOfRTTIDescriptor(Ty: OperandTy)); |
2246 | } |
2247 | |
2248 | static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, |
2249 | QualType DestTy) { |
2250 | llvm::Type *DestLTy = CGF.ConvertType(T: DestTy); |
2251 | if (DestTy->isPointerType()) |
2252 | return llvm::Constant::getNullValue(Ty: DestLTy); |
2253 | |
2254 | /// C++ [expr.dynamic.cast]p9: |
2255 | /// A failed cast to reference type throws std::bad_cast |
2256 | if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF)) |
2257 | return nullptr; |
2258 | |
2259 | CGF.Builder.ClearInsertionPoint(); |
2260 | return llvm::PoisonValue::get(T: DestLTy); |
2261 | } |
2262 | |
2263 | llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, |
2264 | const CXXDynamicCastExpr *DCE) { |
2265 | CGM.EmitExplicitCastExprType(DCE, this); |
2266 | QualType DestTy = DCE->getTypeAsWritten(); |
2267 | |
2268 | QualType SrcTy = DCE->getSubExpr()->getType(); |
2269 | |
2270 | // C++ [expr.dynamic.cast]p7: |
2271 | // If T is "pointer to cv void," then the result is a pointer to the most |
2272 | // derived object pointed to by v. |
2273 | bool IsDynamicCastToVoid = DestTy->isVoidPointerType(); |
2274 | QualType SrcRecordTy; |
2275 | QualType DestRecordTy; |
2276 | if (IsDynamicCastToVoid) { |
2277 | SrcRecordTy = SrcTy->getPointeeType(); |
2278 | // No DestRecordTy. |
2279 | } else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) { |
2280 | SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); |
2281 | DestRecordTy = DestPTy->getPointeeType(); |
2282 | } else { |
2283 | SrcRecordTy = SrcTy; |
2284 | DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); |
2285 | } |
2286 | |
2287 | // C++ [class.cdtor]p5: |
2288 | // If the operand of the dynamic_cast refers to the object under |
2289 | // construction or destruction and the static type of the operand is not a |
2290 | // pointer to or object of the constructor or destructor’s own class or one |
2291 | // of its bases, the dynamic_cast results in undefined behavior. |
2292 | EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr, SrcRecordTy); |
2293 | |
2294 | if (DCE->isAlwaysNull()) { |
2295 | if (llvm::Value *T = EmitDynamicCastToNull(CGF&: *this, DestTy)) { |
2296 | // Expression emission is expected to retain a valid insertion point. |
2297 | if (!Builder.GetInsertBlock()) |
2298 | EmitBlock(BB: createBasicBlock(name: "dynamic_cast.unreachable" )); |
2299 | return T; |
2300 | } |
2301 | } |
2302 | |
2303 | assert(SrcRecordTy->isRecordType() && "source type must be a record type!" ); |
2304 | |
2305 | // If the destination is effectively final, the cast succeeds if and only |
2306 | // if the dynamic type of the pointer is exactly the destination type. |
2307 | bool IsExact = !IsDynamicCastToVoid && |
2308 | CGM.getCodeGenOpts().OptimizationLevel > 0 && |
2309 | DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() && |
2310 | CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy); |
2311 | |
2312 | // C++ [expr.dynamic.cast]p4: |
2313 | // If the value of v is a null pointer value in the pointer case, the result |
2314 | // is the null pointer value of type T. |
2315 | bool ShouldNullCheckSrcValue = |
2316 | IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked( |
2317 | SrcIsPtr: SrcTy->isPointerType(), SrcRecordTy); |
2318 | |
2319 | llvm::BasicBlock *CastNull = nullptr; |
2320 | llvm::BasicBlock *CastNotNull = nullptr; |
2321 | llvm::BasicBlock *CastEnd = createBasicBlock(name: "dynamic_cast.end" ); |
2322 | |
2323 | if (ShouldNullCheckSrcValue) { |
2324 | CastNull = createBasicBlock(name: "dynamic_cast.null" ); |
2325 | CastNotNull = createBasicBlock(name: "dynamic_cast.notnull" ); |
2326 | |
2327 | llvm::Value *IsNull = Builder.CreateIsNull(Addr: ThisAddr); |
2328 | Builder.CreateCondBr(Cond: IsNull, True: CastNull, False: CastNotNull); |
2329 | EmitBlock(BB: CastNotNull); |
2330 | } |
2331 | |
2332 | llvm::Value *Value; |
2333 | if (IsDynamicCastToVoid) { |
2334 | Value = CGM.getCXXABI().emitDynamicCastToVoid(CGF&: *this, Value: ThisAddr, SrcRecordTy); |
2335 | } else if (IsExact) { |
2336 | // If the destination type is effectively final, this pointer points to the |
2337 | // right type if and only if its vptr has the right value. |
2338 | Value = CGM.getCXXABI().emitExactDynamicCast( |
2339 | CGF&: *this, Value: ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastSuccess: CastEnd, CastFail: CastNull); |
2340 | } else { |
2341 | assert(DestRecordTy->isRecordType() && |
2342 | "destination type must be a record type!" ); |
2343 | Value = CGM.getCXXABI().emitDynamicCastCall(CGF&: *this, Value: ThisAddr, SrcRecordTy, |
2344 | DestTy, DestRecordTy, CastEnd); |
2345 | } |
2346 | CastNotNull = Builder.GetInsertBlock(); |
2347 | |
2348 | llvm::Value *NullValue = nullptr; |
2349 | if (ShouldNullCheckSrcValue) { |
2350 | EmitBranch(Block: CastEnd); |
2351 | |
2352 | EmitBlock(BB: CastNull); |
2353 | NullValue = EmitDynamicCastToNull(CGF&: *this, DestTy); |
2354 | CastNull = Builder.GetInsertBlock(); |
2355 | |
2356 | EmitBranch(Block: CastEnd); |
2357 | } |
2358 | |
2359 | EmitBlock(BB: CastEnd); |
2360 | |
2361 | if (CastNull) { |
2362 | llvm::PHINode *PHI = Builder.CreatePHI(Ty: Value->getType(), NumReservedValues: 2); |
2363 | PHI->addIncoming(V: Value, BB: CastNotNull); |
2364 | PHI->addIncoming(V: NullValue, BB: CastNull); |
2365 | |
2366 | Value = PHI; |
2367 | } |
2368 | |
2369 | return Value; |
2370 | } |
2371 | |