1 | //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Expr nodes as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCUDARuntime.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CGCall.h" |
16 | #include "CGCleanup.h" |
17 | #include "CGDebugInfo.h" |
18 | #include "CGObjCRuntime.h" |
19 | #include "CGOpenMPRuntime.h" |
20 | #include "CGRecordLayout.h" |
21 | #include "CodeGenFunction.h" |
22 | #include "CodeGenModule.h" |
23 | #include "ConstantEmitter.h" |
24 | #include "TargetInfo.h" |
25 | #include "clang/AST/ASTContext.h" |
26 | #include "clang/AST/Attr.h" |
27 | #include "clang/AST/DeclObjC.h" |
28 | #include "clang/AST/NSAPI.h" |
29 | #include "clang/AST/StmtVisitor.h" |
30 | #include "clang/Basic/Builtins.h" |
31 | #include "clang/Basic/CodeGenOptions.h" |
32 | #include "clang/Basic/SourceManager.h" |
33 | #include "llvm/ADT/Hashing.h" |
34 | #include "llvm/ADT/STLExtras.h" |
35 | #include "llvm/ADT/StringExtras.h" |
36 | #include "llvm/IR/DataLayout.h" |
37 | #include "llvm/IR/Intrinsics.h" |
38 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
39 | #include "llvm/IR/LLVMContext.h" |
40 | #include "llvm/IR/MDBuilder.h" |
41 | #include "llvm/IR/MatrixBuilder.h" |
42 | #include "llvm/Passes/OptimizationLevel.h" |
43 | #include "llvm/Support/ConvertUTF.h" |
44 | #include "llvm/Support/MathExtras.h" |
45 | #include "llvm/Support/Path.h" |
46 | #include "llvm/Support/SaveAndRestore.h" |
47 | #include "llvm/Support/xxhash.h" |
48 | #include "llvm/Transforms/Utils/SanitizerStats.h" |
49 | |
50 | #include <optional> |
51 | #include <string> |
52 | |
53 | using namespace clang; |
54 | using namespace CodeGen; |
55 | |
56 | // Experiment to make sanitizers easier to debug |
57 | static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization( |
58 | "ubsan-unique-traps" , llvm::cl::Optional, |
59 | llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check." )); |
60 | |
61 | // TODO: Introduce frontend options to enabled per sanitizers, similar to |
62 | // `fsanitize-trap`. |
63 | static llvm::cl::opt<bool> ClSanitizeGuardChecks( |
64 | "ubsan-guard-checks" , llvm::cl::Optional, |
65 | llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`." )); |
66 | |
67 | //===--------------------------------------------------------------------===// |
68 | // Miscellaneous Helper Methods |
69 | //===--------------------------------------------------------------------===// |
70 | |
71 | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
72 | /// block. |
73 | RawAddress |
74 | CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align, |
75 | const Twine &Name, |
76 | llvm::Value *ArraySize) { |
77 | auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); |
78 | Alloca->setAlignment(Align.getAsAlign()); |
79 | return RawAddress(Alloca, Ty, Align, KnownNonNull); |
80 | } |
81 | |
82 | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
83 | /// block. The alloca is casted to default address space if necessary. |
84 | RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, |
85 | const Twine &Name, |
86 | llvm::Value *ArraySize, |
87 | RawAddress *AllocaAddr) { |
88 | auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); |
89 | if (AllocaAddr) |
90 | *AllocaAddr = Alloca; |
91 | llvm::Value *V = Alloca.getPointer(); |
92 | // Alloca always returns a pointer in alloca address space, which may |
93 | // be different from the type defined by the language. For example, |
94 | // in C++ the auto variables are in the default address space. Therefore |
95 | // cast alloca to the default address space when necessary. |
96 | if (getASTAllocaAddressSpace() != LangAS::Default) { |
97 | auto DestAddrSpace = getContext().getTargetAddressSpace(AS: LangAS::Default); |
98 | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
99 | // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, |
100 | // otherwise alloca is inserted at the current insertion point of the |
101 | // builder. |
102 | if (!ArraySize) |
103 | Builder.SetInsertPoint(getPostAllocaInsertPoint()); |
104 | V = getTargetHooks().performAddrSpaceCast( |
105 | *this, V, getASTAllocaAddressSpace(), LangAS::Default, |
106 | Ty->getPointerTo(AddrSpace: DestAddrSpace), /*non-null*/ true); |
107 | } |
108 | |
109 | return RawAddress(V, Ty, Align, KnownNonNull); |
110 | } |
111 | |
112 | /// CreateTempAlloca - This creates an alloca and inserts it into the entry |
113 | /// block if \p ArraySize is nullptr, otherwise inserts it at the current |
114 | /// insertion point of the builder. |
115 | llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, |
116 | const Twine &Name, |
117 | llvm::Value *ArraySize) { |
118 | if (ArraySize) |
119 | return Builder.CreateAlloca(Ty, ArraySize, Name); |
120 | return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), |
121 | ArraySize, Name, AllocaInsertPt); |
122 | } |
123 | |
124 | /// CreateDefaultAlignTempAlloca - This creates an alloca with the |
125 | /// default alignment of the corresponding LLVM type, which is *not* |
126 | /// guaranteed to be related in any way to the expected alignment of |
127 | /// an AST type that might have been lowered to Ty. |
128 | RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, |
129 | const Twine &Name) { |
130 | CharUnits Align = |
131 | CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty)); |
132 | return CreateTempAlloca(Ty, Align, Name); |
133 | } |
134 | |
135 | RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { |
136 | CharUnits Align = getContext().getTypeAlignInChars(T: Ty); |
137 | return CreateTempAlloca(Ty: ConvertType(T: Ty), Align, Name); |
138 | } |
139 | |
140 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, |
141 | RawAddress *Alloca) { |
142 | // FIXME: Should we prefer the preferred type alignment here? |
143 | return CreateMemTemp(T: Ty, Align: getContext().getTypeAlignInChars(T: Ty), Name, Alloca); |
144 | } |
145 | |
146 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, |
147 | const Twine &Name, |
148 | RawAddress *Alloca) { |
149 | RawAddress Result = CreateTempAlloca(Ty: ConvertTypeForMem(T: Ty), Align, Name, |
150 | /*ArraySize=*/nullptr, AllocaAddr: Alloca); |
151 | |
152 | if (Ty->isConstantMatrixType()) { |
153 | auto *ArrayTy = cast<llvm::ArrayType>(Val: Result.getElementType()); |
154 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
155 | NumElts: ArrayTy->getNumElements()); |
156 | |
157 | Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(), |
158 | KnownNonNull); |
159 | } |
160 | return Result; |
161 | } |
162 | |
163 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
164 | CharUnits Align, |
165 | const Twine &Name) { |
166 | return CreateTempAllocaWithoutCast(Ty: ConvertTypeForMem(T: Ty), Align, Name); |
167 | } |
168 | |
169 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
170 | const Twine &Name) { |
171 | return CreateMemTempWithoutCast(Ty, Align: getContext().getTypeAlignInChars(T: Ty), |
172 | Name); |
173 | } |
174 | |
175 | /// EvaluateExprAsBool - Perform the usual unary conversions on the specified |
176 | /// expression and compare the result against zero, returning an Int1Ty value. |
177 | llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { |
178 | PGO.setCurrentStmt(E); |
179 | if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { |
180 | llvm::Value *MemPtr = EmitScalarExpr(E); |
181 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr, MPT); |
182 | } |
183 | |
184 | QualType BoolTy = getContext().BoolTy; |
185 | SourceLocation Loc = E->getExprLoc(); |
186 | CGFPOptionsRAII FPOptsRAII(*this, E); |
187 | if (!E->getType()->isAnyComplexType()) |
188 | return EmitScalarConversion(Src: EmitScalarExpr(E), SrcTy: E->getType(), DstTy: BoolTy, Loc); |
189 | |
190 | return EmitComplexToScalarConversion(Src: EmitComplexExpr(E), SrcTy: E->getType(), DstTy: BoolTy, |
191 | Loc); |
192 | } |
193 | |
194 | /// EmitIgnoredExpr - Emit code to compute the specified expression, |
195 | /// ignoring the result. |
196 | void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { |
197 | if (E->isPRValue()) |
198 | return (void)EmitAnyExpr(E, aggSlot: AggValueSlot::ignored(), ignoreResult: true); |
199 | |
200 | // if this is a bitfield-resulting conditional operator, we can special case |
201 | // emit this. The normal 'EmitLValue' version of this is particularly |
202 | // difficult to codegen for, since creating a single "LValue" for two |
203 | // different sized arguments here is not particularly doable. |
204 | if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>( |
205 | Val: E->IgnoreParenNoopCasts(Ctx: getContext()))) { |
206 | if (CondOp->getObjectKind() == OK_BitField) |
207 | return EmitIgnoredConditionalOperator(E: CondOp); |
208 | } |
209 | |
210 | // Just emit it as an l-value and drop the result. |
211 | EmitLValue(E); |
212 | } |
213 | |
214 | /// EmitAnyExpr - Emit code to compute the specified expression which |
215 | /// can have any type. The result is returned as an RValue struct. |
216 | /// If this is an aggregate expression, AggSlot indicates where the |
217 | /// result should be returned. |
218 | RValue CodeGenFunction::EmitAnyExpr(const Expr *E, |
219 | AggValueSlot aggSlot, |
220 | bool ignoreResult) { |
221 | switch (getEvaluationKind(T: E->getType())) { |
222 | case TEK_Scalar: |
223 | return RValue::get(V: EmitScalarExpr(E, IgnoreResultAssign: ignoreResult)); |
224 | case TEK_Complex: |
225 | return RValue::getComplex(C: EmitComplexExpr(E, IgnoreReal: ignoreResult, IgnoreImag: ignoreResult)); |
226 | case TEK_Aggregate: |
227 | if (!ignoreResult && aggSlot.isIgnored()) |
228 | aggSlot = CreateAggTemp(T: E->getType(), Name: "agg-temp" ); |
229 | EmitAggExpr(E, AS: aggSlot); |
230 | return aggSlot.asRValue(); |
231 | } |
232 | llvm_unreachable("bad evaluation kind" ); |
233 | } |
234 | |
235 | /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will |
236 | /// always be accessible even if no aggregate location is provided. |
237 | RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { |
238 | AggValueSlot AggSlot = AggValueSlot::ignored(); |
239 | |
240 | if (hasAggregateEvaluationKind(T: E->getType())) |
241 | AggSlot = CreateAggTemp(T: E->getType(), Name: "agg.tmp" ); |
242 | return EmitAnyExpr(E, aggSlot: AggSlot); |
243 | } |
244 | |
245 | /// EmitAnyExprToMem - Evaluate an expression into a given memory |
246 | /// location. |
247 | void CodeGenFunction::EmitAnyExprToMem(const Expr *E, |
248 | Address Location, |
249 | Qualifiers Quals, |
250 | bool IsInit) { |
251 | // FIXME: This function should take an LValue as an argument. |
252 | switch (getEvaluationKind(T: E->getType())) { |
253 | case TEK_Complex: |
254 | EmitComplexExprIntoLValue(E, dest: MakeAddrLValue(Addr: Location, T: E->getType()), |
255 | /*isInit*/ false); |
256 | return; |
257 | |
258 | case TEK_Aggregate: { |
259 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Location, quals: Quals, |
260 | isDestructed: AggValueSlot::IsDestructed_t(IsInit), |
261 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
262 | isAliased: AggValueSlot::IsAliased_t(!IsInit), |
263 | mayOverlap: AggValueSlot::MayOverlap)); |
264 | return; |
265 | } |
266 | |
267 | case TEK_Scalar: { |
268 | RValue RV = RValue::get(V: EmitScalarExpr(E, /*Ignore*/ IgnoreResultAssign: false)); |
269 | LValue LV = MakeAddrLValue(Addr: Location, T: E->getType()); |
270 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
271 | return; |
272 | } |
273 | } |
274 | llvm_unreachable("bad evaluation kind" ); |
275 | } |
276 | |
277 | static void |
278 | pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, |
279 | const Expr *E, Address ReferenceTemporary) { |
280 | // Objective-C++ ARC: |
281 | // If we are binding a reference to a temporary that has ownership, we |
282 | // need to perform retain/release operations on the temporary. |
283 | // |
284 | // FIXME: This should be looking at E, not M. |
285 | if (auto Lifetime = M->getType().getObjCLifetime()) { |
286 | switch (Lifetime) { |
287 | case Qualifiers::OCL_None: |
288 | case Qualifiers::OCL_ExplicitNone: |
289 | // Carry on to normal cleanup handling. |
290 | break; |
291 | |
292 | case Qualifiers::OCL_Autoreleasing: |
293 | // Nothing to do; cleaned up by an autorelease pool. |
294 | return; |
295 | |
296 | case Qualifiers::OCL_Strong: |
297 | case Qualifiers::OCL_Weak: |
298 | switch (StorageDuration Duration = M->getStorageDuration()) { |
299 | case SD_Static: |
300 | // Note: we intentionally do not register a cleanup to release |
301 | // the object on program termination. |
302 | return; |
303 | |
304 | case SD_Thread: |
305 | // FIXME: We should probably register a cleanup in this case. |
306 | return; |
307 | |
308 | case SD_Automatic: |
309 | case SD_FullExpression: |
310 | CodeGenFunction::Destroyer *Destroy; |
311 | CleanupKind CleanupKind; |
312 | if (Lifetime == Qualifiers::OCL_Strong) { |
313 | const ValueDecl *VD = M->getExtendingDecl(); |
314 | bool Precise = |
315 | VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>(); |
316 | CleanupKind = CGF.getARCCleanupKind(); |
317 | Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise |
318 | : &CodeGenFunction::destroyARCStrongImprecise; |
319 | } else { |
320 | // __weak objects always get EH cleanups; otherwise, exceptions |
321 | // could cause really nasty crashes instead of mere leaks. |
322 | CleanupKind = NormalAndEHCleanup; |
323 | Destroy = &CodeGenFunction::destroyARCWeak; |
324 | } |
325 | if (Duration == SD_FullExpression) |
326 | CGF.pushDestroy(CleanupKind, ReferenceTemporary, |
327 | M->getType(), *Destroy, |
328 | CleanupKind & EHCleanup); |
329 | else |
330 | CGF.pushLifetimeExtendedDestroy(kind: CleanupKind, addr: ReferenceTemporary, |
331 | type: M->getType(), |
332 | destroyer: *Destroy, useEHCleanupForArray: CleanupKind & EHCleanup); |
333 | return; |
334 | |
335 | case SD_Dynamic: |
336 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
337 | } |
338 | llvm_unreachable("unknown storage duration" ); |
339 | } |
340 | } |
341 | |
342 | CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; |
343 | if (const RecordType *RT = |
344 | E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { |
345 | // Get the destructor for the reference temporary. |
346 | auto *ClassDecl = cast<CXXRecordDecl>(Val: RT->getDecl()); |
347 | if (!ClassDecl->hasTrivialDestructor()) |
348 | ReferenceTemporaryDtor = ClassDecl->getDestructor(); |
349 | } |
350 | |
351 | if (!ReferenceTemporaryDtor) |
352 | return; |
353 | |
354 | // Call the destructor for the temporary. |
355 | switch (M->getStorageDuration()) { |
356 | case SD_Static: |
357 | case SD_Thread: { |
358 | llvm::FunctionCallee CleanupFn; |
359 | llvm::Constant *CleanupArg; |
360 | if (E->getType()->isArrayType()) { |
361 | CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( |
362 | addr: ReferenceTemporary, type: E->getType(), |
363 | destroyer: CodeGenFunction::destroyCXXObject, useEHCleanupForArray: CGF.getLangOpts().Exceptions, |
364 | VD: dyn_cast_or_null<VarDecl>(Val: M->getExtendingDecl())); |
365 | CleanupArg = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy); |
366 | } else { |
367 | CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( |
368 | GD: GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); |
369 | CleanupArg = cast<llvm::Constant>(Val: ReferenceTemporary.emitRawPointer(CGF)); |
370 | } |
371 | CGF.CGM.getCXXABI().registerGlobalDtor( |
372 | CGF, D: *cast<VarDecl>(Val: M->getExtendingDecl()), Dtor: CleanupFn, Addr: CleanupArg); |
373 | break; |
374 | } |
375 | |
376 | case SD_FullExpression: |
377 | CGF.pushDestroy(kind: NormalAndEHCleanup, addr: ReferenceTemporary, type: E->getType(), |
378 | destroyer: CodeGenFunction::destroyCXXObject, |
379 | useEHCleanupForArray: CGF.getLangOpts().Exceptions); |
380 | break; |
381 | |
382 | case SD_Automatic: |
383 | CGF.pushLifetimeExtendedDestroy(kind: NormalAndEHCleanup, |
384 | addr: ReferenceTemporary, type: E->getType(), |
385 | destroyer: CodeGenFunction::destroyCXXObject, |
386 | useEHCleanupForArray: CGF.getLangOpts().Exceptions); |
387 | break; |
388 | |
389 | case SD_Dynamic: |
390 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
391 | } |
392 | } |
393 | |
394 | static RawAddress createReferenceTemporary(CodeGenFunction &CGF, |
395 | const MaterializeTemporaryExpr *M, |
396 | const Expr *Inner, |
397 | RawAddress *Alloca = nullptr) { |
398 | auto &TCG = CGF.getTargetHooks(); |
399 | switch (M->getStorageDuration()) { |
400 | case SD_FullExpression: |
401 | case SD_Automatic: { |
402 | // If we have a constant temporary array or record try to promote it into a |
403 | // constant global under the same rules a normal constant would've been |
404 | // promoted. This is easier on the optimizer and generally emits fewer |
405 | // instructions. |
406 | QualType Ty = Inner->getType(); |
407 | if (CGF.CGM.getCodeGenOpts().MergeAllConstants && |
408 | (Ty->isArrayType() || Ty->isRecordType()) && |
409 | Ty.isConstantStorage(Ctx: CGF.getContext(), ExcludeCtor: true, ExcludeDtor: false)) |
410 | if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(E: Inner, T: Ty)) { |
411 | auto AS = CGF.CGM.GetGlobalConstantAddressSpace(); |
412 | auto *GV = new llvm::GlobalVariable( |
413 | CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, |
414 | llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp" , nullptr, |
415 | llvm::GlobalValue::NotThreadLocal, |
416 | CGF.getContext().getTargetAddressSpace(AS)); |
417 | CharUnits alignment = CGF.getContext().getTypeAlignInChars(T: Ty); |
418 | GV->setAlignment(alignment.getAsAlign()); |
419 | llvm::Constant *C = GV; |
420 | if (AS != LangAS::Default) |
421 | C = TCG.performAddrSpaceCast( |
422 | CGM&: CGF.CGM, V: GV, SrcAddr: AS, DestAddr: LangAS::Default, |
423 | DestTy: GV->getValueType()->getPointerTo( |
424 | AddrSpace: CGF.getContext().getTargetAddressSpace(AS: LangAS::Default))); |
425 | // FIXME: Should we put the new global into a COMDAT? |
426 | return RawAddress(C, GV->getValueType(), alignment); |
427 | } |
428 | return CGF.CreateMemTemp(Ty, Name: "ref.tmp" , Alloca); |
429 | } |
430 | case SD_Thread: |
431 | case SD_Static: |
432 | return CGF.CGM.GetAddrOfGlobalTemporary(E: M, Inner); |
433 | |
434 | case SD_Dynamic: |
435 | llvm_unreachable("temporary can't have dynamic storage duration" ); |
436 | } |
437 | llvm_unreachable("unknown storage duration" ); |
438 | } |
439 | |
440 | /// Helper method to check if the underlying ABI is AAPCS |
441 | static bool isAAPCS(const TargetInfo &TargetInfo) { |
442 | return TargetInfo.getABI().starts_with(Prefix: "aapcs" ); |
443 | } |
444 | |
445 | LValue CodeGenFunction:: |
446 | EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { |
447 | const Expr *E = M->getSubExpr(); |
448 | |
449 | assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || |
450 | !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && |
451 | "Reference should never be pseudo-strong!" ); |
452 | |
453 | // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so |
454 | // as that will cause the lifetime adjustment to be lost for ARC |
455 | auto ownership = M->getType().getObjCLifetime(); |
456 | if (ownership != Qualifiers::OCL_None && |
457 | ownership != Qualifiers::OCL_ExplicitNone) { |
458 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E); |
459 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: Object.getPointer())) { |
460 | llvm::Type *Ty = ConvertTypeForMem(T: E->getType()); |
461 | Object = Object.withElementType(ElemTy: Ty); |
462 | |
463 | // createReferenceTemporary will promote the temporary to a global with a |
464 | // constant initializer if it can. It can only do this to a value of |
465 | // ARC-manageable type if the value is global and therefore "immune" to |
466 | // ref-counting operations. Therefore we have no need to emit either a |
467 | // dynamic initialization or a cleanup and we can just return the address |
468 | // of the temporary. |
469 | if (Var->hasInitializer()) |
470 | return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); |
471 | |
472 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
473 | } |
474 | LValue RefTempDst = MakeAddrLValue(Object, M->getType(), |
475 | AlignmentSource::Decl); |
476 | |
477 | switch (getEvaluationKind(T: E->getType())) { |
478 | default: llvm_unreachable("expected scalar or aggregate expression" ); |
479 | case TEK_Scalar: |
480 | EmitScalarInit(init: E, D: M->getExtendingDecl(), lvalue: RefTempDst, capturedByInit: false); |
481 | break; |
482 | case TEK_Aggregate: { |
483 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Object, |
484 | quals: E->getType().getQualifiers(), |
485 | isDestructed: AggValueSlot::IsDestructed, |
486 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
487 | isAliased: AggValueSlot::IsNotAliased, |
488 | mayOverlap: AggValueSlot::DoesNotOverlap)); |
489 | break; |
490 | } |
491 | } |
492 | |
493 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
494 | return RefTempDst; |
495 | } |
496 | |
497 | SmallVector<const Expr *, 2> CommaLHSs; |
498 | SmallVector<SubobjectAdjustment, 2> Adjustments; |
499 | E = E->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments); |
500 | |
501 | for (const auto &Ignored : CommaLHSs) |
502 | EmitIgnoredExpr(E: Ignored); |
503 | |
504 | if (const auto *opaque = dyn_cast<OpaqueValueExpr>(Val: E)) { |
505 | if (opaque->getType()->isRecordType()) { |
506 | assert(Adjustments.empty()); |
507 | return EmitOpaqueValueLValue(e: opaque); |
508 | } |
509 | } |
510 | |
511 | // Create and initialize the reference temporary. |
512 | RawAddress Alloca = Address::invalid(); |
513 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E, Alloca: &Alloca); |
514 | if (auto *Var = dyn_cast<llvm::GlobalVariable>( |
515 | Val: Object.getPointer()->stripPointerCasts())) { |
516 | llvm::Type *TemporaryType = ConvertTypeForMem(T: E->getType()); |
517 | Object = Object.withElementType(ElemTy: TemporaryType); |
518 | // If the temporary is a global and has a constant initializer or is a |
519 | // constant temporary that we promoted to a global, we may have already |
520 | // initialized it. |
521 | if (!Var->hasInitializer()) { |
522 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
523 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true); |
524 | } |
525 | } else { |
526 | switch (M->getStorageDuration()) { |
527 | case SD_Automatic: |
528 | if (auto *Size = EmitLifetimeStart( |
529 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
530 | Addr: Alloca.getPointer())) { |
531 | pushCleanupAfterFullExpr<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker, |
532 | A: Alloca, A: Size); |
533 | } |
534 | break; |
535 | |
536 | case SD_FullExpression: { |
537 | if (!ShouldEmitLifetimeMarkers) |
538 | break; |
539 | |
540 | // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end |
541 | // marker. Instead, start the lifetime of a conditional temporary earlier |
542 | // so that it's unconditional. Don't do this with sanitizers which need |
543 | // more precise lifetime marks. However when inside an "await.suspend" |
544 | // block, we should always avoid conditional cleanup because it creates |
545 | // boolean marker that lives across await_suspend, which can destroy coro |
546 | // frame. |
547 | ConditionalEvaluation *OldConditional = nullptr; |
548 | CGBuilderTy::InsertPoint OldIP; |
549 | if (isInConditionalBranch() && !E->getType().isDestructedType() && |
550 | ((!SanOpts.has(K: SanitizerKind::HWAddress) && |
551 | !SanOpts.has(K: SanitizerKind::Memory) && |
552 | !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) || |
553 | inSuspendBlock())) { |
554 | OldConditional = OutermostConditional; |
555 | OutermostConditional = nullptr; |
556 | |
557 | OldIP = Builder.saveIP(); |
558 | llvm::BasicBlock *Block = OldConditional->getStartingBlock(); |
559 | Builder.restoreIP(IP: CGBuilderTy::InsertPoint( |
560 | Block, llvm::BasicBlock::iterator(Block->back()))); |
561 | } |
562 | |
563 | if (auto *Size = EmitLifetimeStart( |
564 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
565 | Addr: Alloca.getPointer())) { |
566 | pushFullExprCleanup<CallLifetimeEnd>(kind: NormalEHLifetimeMarker, A: Alloca, |
567 | A: Size); |
568 | } |
569 | |
570 | if (OldConditional) { |
571 | OutermostConditional = OldConditional; |
572 | Builder.restoreIP(IP: OldIP); |
573 | } |
574 | break; |
575 | } |
576 | |
577 | default: |
578 | break; |
579 | } |
580 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true); |
581 | } |
582 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
583 | |
584 | // Perform derived-to-base casts and/or field accesses, to get from the |
585 | // temporary object we created (and, potentially, for which we extended |
586 | // the lifetime) to the subobject we're binding the reference to. |
587 | for (SubobjectAdjustment &Adjustment : llvm::reverse(C&: Adjustments)) { |
588 | switch (Adjustment.Kind) { |
589 | case SubobjectAdjustment::DerivedToBaseAdjustment: |
590 | Object = |
591 | GetAddressOfBaseClass(Value: Object, Derived: Adjustment.DerivedToBase.DerivedClass, |
592 | PathBegin: Adjustment.DerivedToBase.BasePath->path_begin(), |
593 | PathEnd: Adjustment.DerivedToBase.BasePath->path_end(), |
594 | /*NullCheckValue=*/ false, Loc: E->getExprLoc()); |
595 | break; |
596 | |
597 | case SubobjectAdjustment::FieldAdjustment: { |
598 | LValue LV = MakeAddrLValue(Addr: Object, T: E->getType(), Source: AlignmentSource::Decl); |
599 | LV = EmitLValueForField(Base: LV, Field: Adjustment.Field); |
600 | assert(LV.isSimple() && |
601 | "materialized temporary field is not a simple lvalue" ); |
602 | Object = LV.getAddress(CGF&: *this); |
603 | break; |
604 | } |
605 | |
606 | case SubobjectAdjustment::MemberPointerAdjustment: { |
607 | llvm::Value *Ptr = EmitScalarExpr(E: Adjustment.Ptr.RHS); |
608 | Object = EmitCXXMemberDataPointerAddress(E, base: Object, memberPtr: Ptr, |
609 | memberPtrType: Adjustment.Ptr.MPT); |
610 | break; |
611 | } |
612 | } |
613 | } |
614 | |
615 | return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); |
616 | } |
617 | |
618 | RValue |
619 | CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { |
620 | // Emit the expression as an lvalue. |
621 | LValue LV = EmitLValue(E); |
622 | assert(LV.isSimple()); |
623 | llvm::Value *Value = LV.getPointer(CGF&: *this); |
624 | |
625 | if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { |
626 | // C++11 [dcl.ref]p5 (as amended by core issue 453): |
627 | // If a glvalue to which a reference is directly bound designates neither |
628 | // an existing object or function of an appropriate type nor a region of |
629 | // storage of suitable size and alignment to contain an object of the |
630 | // reference's type, the behavior is undefined. |
631 | QualType Ty = E->getType(); |
632 | EmitTypeCheck(TCK: TCK_ReferenceBinding, Loc: E->getExprLoc(), V: Value, Type: Ty); |
633 | } |
634 | |
635 | return RValue::get(V: Value); |
636 | } |
637 | |
638 | |
639 | /// getAccessedFieldNo - Given an encoded value and a result number, return the |
640 | /// input field number being accessed. |
641 | unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, |
642 | const llvm::Constant *Elts) { |
643 | return cast<llvm::ConstantInt>(Val: Elts->getAggregateElement(Elt: Idx)) |
644 | ->getZExtValue(); |
645 | } |
646 | |
647 | /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. |
648 | static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, |
649 | llvm::Value *High) { |
650 | llvm::Value *KMul = Builder.getInt64(C: 0x9ddfea08eb382d69ULL); |
651 | llvm::Value *K47 = Builder.getInt64(C: 47); |
652 | llvm::Value *A0 = Builder.CreateMul(LHS: Builder.CreateXor(LHS: Low, RHS: High), RHS: KMul); |
653 | llvm::Value *A1 = Builder.CreateXor(LHS: Builder.CreateLShr(LHS: A0, RHS: K47), RHS: A0); |
654 | llvm::Value *B0 = Builder.CreateMul(LHS: Builder.CreateXor(LHS: High, RHS: A1), RHS: KMul); |
655 | llvm::Value *B1 = Builder.CreateXor(LHS: Builder.CreateLShr(LHS: B0, RHS: K47), RHS: B0); |
656 | return Builder.CreateMul(LHS: B1, RHS: KMul); |
657 | } |
658 | |
659 | bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { |
660 | return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || |
661 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; |
662 | } |
663 | |
664 | bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { |
665 | CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); |
666 | return (RD && RD->hasDefinition() && RD->isDynamicClass()) && |
667 | (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || |
668 | TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || |
669 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); |
670 | } |
671 | |
672 | bool CodeGenFunction::sanitizePerformTypeCheck() const { |
673 | return SanOpts.has(K: SanitizerKind::Null) || |
674 | SanOpts.has(K: SanitizerKind::Alignment) || |
675 | SanOpts.has(K: SanitizerKind::ObjectSize) || |
676 | SanOpts.has(K: SanitizerKind::Vptr); |
677 | } |
678 | |
679 | void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, |
680 | llvm::Value *Ptr, QualType Ty, |
681 | CharUnits Alignment, |
682 | SanitizerSet SkippedChecks, |
683 | llvm::Value *ArraySize) { |
684 | if (!sanitizePerformTypeCheck()) |
685 | return; |
686 | |
687 | // Don't check pointers outside the default address space. The null check |
688 | // isn't correct, the object-size check isn't supported by LLVM, and we can't |
689 | // communicate the addresses to the runtime handler for the vptr check. |
690 | if (Ptr->getType()->getPointerAddressSpace()) |
691 | return; |
692 | |
693 | // Don't check pointers to volatile data. The behavior here is implementation- |
694 | // defined. |
695 | if (Ty.isVolatileQualified()) |
696 | return; |
697 | |
698 | SanitizerScope SanScope(this); |
699 | |
700 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; |
701 | llvm::BasicBlock *Done = nullptr; |
702 | |
703 | // Quickly determine whether we have a pointer to an alloca. It's possible |
704 | // to skip null checks, and some alignment checks, for these pointers. This |
705 | // can reduce compile-time significantly. |
706 | auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Val: Ptr->stripPointerCasts()); |
707 | |
708 | llvm::Value *True = llvm::ConstantInt::getTrue(Context&: getLLVMContext()); |
709 | llvm::Value *IsNonNull = nullptr; |
710 | bool IsGuaranteedNonNull = |
711 | SkippedChecks.has(K: SanitizerKind::Null) || PtrToAlloca; |
712 | bool AllowNullPointers = isNullPointerAllowed(TCK); |
713 | if ((SanOpts.has(K: SanitizerKind::Null) || AllowNullPointers) && |
714 | !IsGuaranteedNonNull) { |
715 | // The glvalue must not be an empty glvalue. |
716 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
717 | |
718 | // The IR builder can constant-fold the null check if the pointer points to |
719 | // a constant. |
720 | IsGuaranteedNonNull = IsNonNull == True; |
721 | |
722 | // Skip the null check if the pointer is known to be non-null. |
723 | if (!IsGuaranteedNonNull) { |
724 | if (AllowNullPointers) { |
725 | // When performing pointer casts, it's OK if the value is null. |
726 | // Skip the remaining checks in that case. |
727 | Done = createBasicBlock(name: "null" ); |
728 | llvm::BasicBlock *Rest = createBasicBlock(name: "not.null" ); |
729 | Builder.CreateCondBr(Cond: IsNonNull, True: Rest, False: Done); |
730 | EmitBlock(BB: Rest); |
731 | } else { |
732 | Checks.push_back(Elt: std::make_pair(x&: IsNonNull, y: SanitizerKind::Null)); |
733 | } |
734 | } |
735 | } |
736 | |
737 | if (SanOpts.has(K: SanitizerKind::ObjectSize) && |
738 | !SkippedChecks.has(K: SanitizerKind::ObjectSize) && |
739 | !Ty->isIncompleteType()) { |
740 | uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); |
741 | llvm::Value *Size = llvm::ConstantInt::get(Ty: IntPtrTy, V: TySize); |
742 | if (ArraySize) |
743 | Size = Builder.CreateMul(LHS: Size, RHS: ArraySize); |
744 | |
745 | // Degenerate case: new X[0] does not need an objectsize check. |
746 | llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Val: Size); |
747 | if (!ConstantSize || !ConstantSize->isNullValue()) { |
748 | // The glvalue must refer to a large enough storage region. |
749 | // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation |
750 | // to check this. |
751 | // FIXME: Get object address space |
752 | llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; |
753 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); |
754 | llvm::Value *Min = Builder.getFalse(); |
755 | llvm::Value *NullIsUnknown = Builder.getFalse(); |
756 | llvm::Value *Dynamic = Builder.getFalse(); |
757 | llvm::Value *LargeEnough = Builder.CreateICmpUGE( |
758 | LHS: Builder.CreateCall(Callee: F, Args: {Ptr, Min, NullIsUnknown, Dynamic}), RHS: Size); |
759 | Checks.push_back(Elt: std::make_pair(x&: LargeEnough, y: SanitizerKind::ObjectSize)); |
760 | } |
761 | } |
762 | |
763 | llvm::MaybeAlign AlignVal; |
764 | llvm::Value *PtrAsInt = nullptr; |
765 | |
766 | if (SanOpts.has(K: SanitizerKind::Alignment) && |
767 | !SkippedChecks.has(K: SanitizerKind::Alignment)) { |
768 | AlignVal = Alignment.getAsMaybeAlign(); |
769 | if (!Ty->isIncompleteType() && !AlignVal) |
770 | AlignVal = CGM.getNaturalTypeAlignment(T: Ty, BaseInfo: nullptr, TBAAInfo: nullptr, |
771 | /*ForPointeeType=*/forPointeeType: true) |
772 | .getAsMaybeAlign(); |
773 | |
774 | // The glvalue must be suitably aligned. |
775 | if (AlignVal && *AlignVal > llvm::Align(1) && |
776 | (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) { |
777 | PtrAsInt = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy); |
778 | llvm::Value *Align = Builder.CreateAnd( |
779 | LHS: PtrAsInt, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: AlignVal->value() - 1)); |
780 | llvm::Value *Aligned = |
781 | Builder.CreateICmpEQ(LHS: Align, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: 0)); |
782 | if (Aligned != True) |
783 | Checks.push_back(Elt: std::make_pair(x&: Aligned, y: SanitizerKind::Alignment)); |
784 | } |
785 | } |
786 | |
787 | if (Checks.size() > 0) { |
788 | llvm::Constant *StaticData[] = { |
789 | EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: Ty), |
790 | llvm::ConstantInt::get(Ty: Int8Ty, V: AlignVal ? llvm::Log2(A: *AlignVal) : 1), |
791 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK)}; |
792 | EmitCheck(Checked: Checks, Check: SanitizerHandler::TypeMismatch, StaticArgs: StaticData, |
793 | DynamicArgs: PtrAsInt ? PtrAsInt : Ptr); |
794 | } |
795 | |
796 | // If possible, check that the vptr indicates that there is a subobject of |
797 | // type Ty at offset zero within this object. |
798 | // |
799 | // C++11 [basic.life]p5,6: |
800 | // [For storage which does not refer to an object within its lifetime] |
801 | // The program has undefined behavior if: |
802 | // -- the [pointer or glvalue] is used to access a non-static data member |
803 | // or call a non-static member function |
804 | if (SanOpts.has(K: SanitizerKind::Vptr) && |
805 | !SkippedChecks.has(K: SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { |
806 | // Ensure that the pointer is non-null before loading it. If there is no |
807 | // compile-time guarantee, reuse the run-time null check or emit a new one. |
808 | if (!IsGuaranteedNonNull) { |
809 | if (!IsNonNull) |
810 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
811 | if (!Done) |
812 | Done = createBasicBlock(name: "vptr.null" ); |
813 | llvm::BasicBlock *VptrNotNull = createBasicBlock(name: "vptr.not.null" ); |
814 | Builder.CreateCondBr(Cond: IsNonNull, True: VptrNotNull, False: Done); |
815 | EmitBlock(BB: VptrNotNull); |
816 | } |
817 | |
818 | // Compute a hash of the mangled name of the type. |
819 | // |
820 | // FIXME: This is not guaranteed to be deterministic! Move to a |
821 | // fingerprinting mechanism once LLVM provides one. For the time |
822 | // being the implementation happens to be deterministic. |
823 | SmallString<64> MangledName; |
824 | llvm::raw_svector_ostream Out(MangledName); |
825 | CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty.getUnqualifiedType(), |
826 | Out); |
827 | |
828 | // Contained in NoSanitizeList based on the mangled type. |
829 | if (!CGM.getContext().getNoSanitizeList().containsType(Mask: SanitizerKind::Vptr, |
830 | MangledTypeName: Out.str())) { |
831 | llvm::hash_code TypeHash = hash_value(S: Out.str()); |
832 | |
833 | // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). |
834 | llvm::Value *Low = llvm::ConstantInt::get(Ty: Int64Ty, V: TypeHash); |
835 | Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign()); |
836 | llvm::Value *VPtrVal = Builder.CreateLoad(Addr: VPtrAddr); |
837 | llvm::Value *High = Builder.CreateZExt(V: VPtrVal, DestTy: Int64Ty); |
838 | |
839 | llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); |
840 | Hash = Builder.CreateTrunc(V: Hash, DestTy: IntPtrTy); |
841 | |
842 | // Look the hash up in our cache. |
843 | const int CacheSize = 128; |
844 | llvm::Type *HashTable = llvm::ArrayType::get(ElementType: IntPtrTy, NumElements: CacheSize); |
845 | llvm::Value *Cache = CGM.CreateRuntimeVariable(Ty: HashTable, |
846 | Name: "__ubsan_vptr_type_cache" ); |
847 | llvm::Value *Slot = Builder.CreateAnd(LHS: Hash, |
848 | RHS: llvm::ConstantInt::get(Ty: IntPtrTy, |
849 | V: CacheSize-1)); |
850 | llvm::Value *Indices[] = { Builder.getInt32(C: 0), Slot }; |
851 | llvm::Value *CacheVal = Builder.CreateAlignedLoad( |
852 | IntPtrTy, Builder.CreateInBoundsGEP(Ty: HashTable, Ptr: Cache, IdxList: Indices), |
853 | getPointerAlign()); |
854 | |
855 | // If the hash isn't in the cache, call a runtime handler to perform the |
856 | // hard work of checking whether the vptr is for an object of the right |
857 | // type. This will either fill in the cache and return, or produce a |
858 | // diagnostic. |
859 | llvm::Value *EqualHash = Builder.CreateICmpEQ(LHS: CacheVal, RHS: Hash); |
860 | llvm::Constant *StaticData[] = { |
861 | EmitCheckSourceLocation(Loc), |
862 | EmitCheckTypeDescriptor(T: Ty), |
863 | CGM.GetAddrOfRTTIDescriptor(Ty: Ty.getUnqualifiedType()), |
864 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK) |
865 | }; |
866 | llvm::Value *DynamicData[] = { Ptr, Hash }; |
867 | EmitCheck(Checked: std::make_pair(x&: EqualHash, y: SanitizerKind::Vptr), |
868 | Check: SanitizerHandler::DynamicTypeCacheMiss, StaticArgs: StaticData, |
869 | DynamicArgs: DynamicData); |
870 | } |
871 | } |
872 | |
873 | if (Done) { |
874 | Builder.CreateBr(Dest: Done); |
875 | EmitBlock(BB: Done); |
876 | } |
877 | } |
878 | |
879 | llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, |
880 | QualType EltTy) { |
881 | ASTContext &C = getContext(); |
882 | uint64_t EltSize = C.getTypeSizeInChars(T: EltTy).getQuantity(); |
883 | if (!EltSize) |
884 | return nullptr; |
885 | |
886 | auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts()); |
887 | if (!ArrayDeclRef) |
888 | return nullptr; |
889 | |
890 | auto *ParamDecl = dyn_cast<ParmVarDecl>(Val: ArrayDeclRef->getDecl()); |
891 | if (!ParamDecl) |
892 | return nullptr; |
893 | |
894 | auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); |
895 | if (!POSAttr) |
896 | return nullptr; |
897 | |
898 | // Don't load the size if it's a lower bound. |
899 | int POSType = POSAttr->getType(); |
900 | if (POSType != 0 && POSType != 1) |
901 | return nullptr; |
902 | |
903 | // Find the implicit size parameter. |
904 | auto PassedSizeIt = SizeArguments.find(Val: ParamDecl); |
905 | if (PassedSizeIt == SizeArguments.end()) |
906 | return nullptr; |
907 | |
908 | const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; |
909 | assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable" ); |
910 | Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second; |
911 | llvm::Value *SizeInBytes = EmitLoadOfScalar(Addr: AddrOfSize, /*Volatile=*/false, |
912 | Ty: C.getSizeType(), Loc: E->getExprLoc()); |
913 | llvm::Value *SizeOfElement = |
914 | llvm::ConstantInt::get(Ty: SizeInBytes->getType(), V: EltSize); |
915 | return Builder.CreateUDiv(LHS: SizeInBytes, RHS: SizeOfElement); |
916 | } |
917 | |
918 | /// If Base is known to point to the start of an array, return the length of |
919 | /// that array. Return 0 if the length cannot be determined. |
920 | static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF, |
921 | const Expr *Base, |
922 | QualType &IndexedType, |
923 | LangOptions::StrictFlexArraysLevelKind |
924 | StrictFlexArraysLevel) { |
925 | // For the vector indexing extension, the bound is the number of elements. |
926 | if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { |
927 | IndexedType = Base->getType(); |
928 | return CGF.Builder.getInt32(C: VT->getNumElements()); |
929 | } |
930 | |
931 | Base = Base->IgnoreParens(); |
932 | |
933 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
934 | if (CE->getCastKind() == CK_ArrayToPointerDecay && |
935 | !CE->getSubExpr()->isFlexibleArrayMemberLike(Context&: CGF.getContext(), |
936 | StrictFlexArraysLevel)) { |
937 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
938 | |
939 | IndexedType = CE->getSubExpr()->getType(); |
940 | const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); |
941 | if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) |
942 | return CGF.Builder.getInt(AI: CAT->getSize()); |
943 | |
944 | if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) |
945 | return CGF.getVLASize(VAT).NumElts; |
946 | // Ignore pass_object_size here. It's not applicable on decayed pointers. |
947 | } |
948 | } |
949 | |
950 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
951 | |
952 | QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; |
953 | if (llvm::Value *POS = CGF.LoadPassedObjectSize(E: Base, EltTy)) { |
954 | IndexedType = Base->getType(); |
955 | return POS; |
956 | } |
957 | |
958 | return nullptr; |
959 | } |
960 | |
961 | namespace { |
962 | |
963 | /// \p StructAccessBase returns the base \p Expr of a field access. It returns |
964 | /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.: |
965 | /// |
966 | /// p in p-> a.b.c |
967 | /// |
968 | /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're |
969 | /// looking for: |
970 | /// |
971 | /// struct s { |
972 | /// struct s *ptr; |
973 | /// int count; |
974 | /// char array[] __attribute__((counted_by(count))); |
975 | /// }; |
976 | /// |
977 | /// If we have an expression like \p p->ptr->array[index], we want the |
978 | /// \p MemberExpr for \p p->ptr instead of \p p. |
979 | class StructAccessBase |
980 | : public ConstStmtVisitor<StructAccessBase, const Expr *> { |
981 | const RecordDecl *ExpectedRD; |
982 | |
983 | bool IsExpectedRecordDecl(const Expr *E) const { |
984 | QualType Ty = E->getType(); |
985 | if (Ty->isPointerType()) |
986 | Ty = Ty->getPointeeType(); |
987 | return ExpectedRD == Ty->getAsRecordDecl(); |
988 | } |
989 | |
990 | public: |
991 | StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {} |
992 | |
993 | //===--------------------------------------------------------------------===// |
994 | // Visitor Methods |
995 | //===--------------------------------------------------------------------===// |
996 | |
997 | // NOTE: If we build C++ support for counted_by, then we'll have to handle |
998 | // horrors like this: |
999 | // |
1000 | // struct S { |
1001 | // int x, y; |
1002 | // int blah[] __attribute__((counted_by(x))); |
1003 | // } s; |
1004 | // |
1005 | // int foo(int index, int val) { |
1006 | // int (S::*IHatePMDs)[] = &S::blah; |
1007 | // (s.*IHatePMDs)[index] = val; |
1008 | // } |
1009 | |
1010 | const Expr *Visit(const Expr *E) { |
1011 | return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E); |
1012 | } |
1013 | |
1014 | const Expr *VisitStmt(const Stmt *S) { return nullptr; } |
1015 | |
1016 | // These are the types we expect to return (in order of most to least |
1017 | // likely): |
1018 | // |
1019 | // 1. DeclRefExpr - This is the expression for the base of the structure. |
1020 | // It's exactly what we want to build an access to the \p counted_by |
1021 | // field. |
1022 | // 2. MemberExpr - This is the expression that has the same \p RecordDecl |
1023 | // as the flexble array member's lexical enclosing \p RecordDecl. This |
1024 | // allows us to catch things like: "p->p->array" |
1025 | // 3. CompoundLiteralExpr - This is for people who create something |
1026 | // heretical like (struct foo has a flexible array member): |
1027 | // |
1028 | // (struct foo){ 1, 2 }.blah[idx]; |
1029 | const Expr *VisitDeclRefExpr(const DeclRefExpr *E) { |
1030 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1031 | } |
1032 | const Expr *VisitMemberExpr(const MemberExpr *E) { |
1033 | if (IsExpectedRecordDecl(E) && E->isArrow()) |
1034 | return E; |
1035 | const Expr *Res = Visit(E: E->getBase()); |
1036 | return !Res && IsExpectedRecordDecl(E) ? E : Res; |
1037 | } |
1038 | const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { |
1039 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1040 | } |
1041 | const Expr *VisitCallExpr(const CallExpr *E) { |
1042 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1043 | } |
1044 | |
1045 | const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { |
1046 | if (IsExpectedRecordDecl(E)) |
1047 | return E; |
1048 | return Visit(E: E->getBase()); |
1049 | } |
1050 | const Expr *VisitCastExpr(const CastExpr *E) { |
1051 | return Visit(E: E->getSubExpr()); |
1052 | } |
1053 | const Expr *VisitParenExpr(const ParenExpr *E) { |
1054 | return Visit(E: E->getSubExpr()); |
1055 | } |
1056 | const Expr *VisitUnaryAddrOf(const UnaryOperator *E) { |
1057 | return Visit(E: E->getSubExpr()); |
1058 | } |
1059 | const Expr *VisitUnaryDeref(const UnaryOperator *E) { |
1060 | return Visit(E: E->getSubExpr()); |
1061 | } |
1062 | }; |
1063 | |
1064 | } // end anonymous namespace |
1065 | |
1066 | using RecIndicesTy = |
1067 | SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>; |
1068 | |
1069 | static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, |
1070 | const FieldDecl *FD, RecIndicesTy &Indices) { |
1071 | const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD); |
1072 | int64_t FieldNo = -1; |
1073 | for (const Decl *D : RD->decls()) { |
1074 | if (const auto *Field = dyn_cast<FieldDecl>(D)) { |
1075 | FieldNo = Layout.getLLVMFieldNo(Field); |
1076 | if (FD == Field) { |
1077 | Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo))); |
1078 | return true; |
1079 | } |
1080 | } |
1081 | |
1082 | if (const auto *Record = dyn_cast<RecordDecl>(D)) { |
1083 | ++FieldNo; |
1084 | if (getGEPIndicesToField(CGF, Record, FD, Indices)) { |
1085 | if (RD->isUnion()) |
1086 | FieldNo = 0; |
1087 | Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo))); |
1088 | return true; |
1089 | } |
1090 | } |
1091 | } |
1092 | |
1093 | return false; |
1094 | } |
1095 | |
1096 | /// This method is typically called in contexts where we can't generate |
1097 | /// side-effects, like in __builtin_dynamic_object_size. When finding |
1098 | /// expressions, only choose those that have either already been emitted or can |
1099 | /// be loaded without side-effects. |
1100 | /// |
1101 | /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be |
1102 | /// within the top-level struct. |
1103 | /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl. |
1104 | llvm::Value *CodeGenFunction::EmitCountedByFieldExpr( |
1105 | const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { |
1106 | const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext(); |
1107 | |
1108 | // Find the base struct expr (i.e. p in p->a.b.c.d). |
1109 | const Expr *StructBase = StructAccessBase(RD).Visit(E: Base); |
1110 | if (!StructBase || StructBase->HasSideEffects(Ctx: getContext())) |
1111 | return nullptr; |
1112 | |
1113 | llvm::Value *Res = nullptr; |
1114 | if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) { |
1115 | Res = EmitDeclRefLValue(E: DRE).getPointer(*this); |
1116 | Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res, |
1117 | getPointerAlign(), "dre.load" ); |
1118 | } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: StructBase)) { |
1119 | LValue LV = EmitMemberExpr(E: ME); |
1120 | Address Addr = LV.getAddress(CGF&: *this); |
1121 | Res = Addr.emitRawPointer(CGF&: *this); |
1122 | } else if (StructBase->getType()->isPointerType()) { |
1123 | LValueBaseInfo BaseInfo; |
1124 | TBAAAccessInfo TBAAInfo; |
1125 | Address Addr = EmitPointerWithAlignment(Addr: StructBase, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
1126 | Res = Addr.emitRawPointer(CGF&: *this); |
1127 | } else { |
1128 | return nullptr; |
1129 | } |
1130 | |
1131 | llvm::Value *Zero = Builder.getInt32(C: 0); |
1132 | RecIndicesTy Indices; |
1133 | |
1134 | getGEPIndicesToField(CGF&: *this, RD, FD: CountDecl, Indices); |
1135 | |
1136 | for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I) |
1137 | Res = Builder.CreateInBoundsGEP( |
1138 | Ty: ConvertType(T: QualType(I->first->getTypeForDecl(), 0)), Ptr: Res, |
1139 | IdxList: {Zero, I->second}, Name: "..counted_by.gep" ); |
1140 | |
1141 | return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res, |
1142 | getIntAlign(), "..counted_by.load" ); |
1143 | } |
1144 | |
1145 | const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) { |
1146 | if (!FD) |
1147 | return nullptr; |
1148 | |
1149 | const auto *CAT = FD->getType()->getAs<CountAttributedType>(); |
1150 | if (!CAT) |
1151 | return nullptr; |
1152 | |
1153 | const auto *CountDRE = cast<DeclRefExpr>(CAT->getCountExpr()); |
1154 | const auto *CountDecl = CountDRE->getDecl(); |
1155 | if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl)) |
1156 | CountDecl = IFD->getAnonField(); |
1157 | |
1158 | return dyn_cast<FieldDecl>(CountDecl); |
1159 | } |
1160 | |
1161 | void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, |
1162 | llvm::Value *Index, QualType IndexType, |
1163 | bool Accessed) { |
1164 | assert(SanOpts.has(SanitizerKind::ArrayBounds) && |
1165 | "should not be called unless adding bounds checks" ); |
1166 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
1167 | getLangOpts().getStrictFlexArraysLevel(); |
1168 | QualType IndexedType; |
1169 | llvm::Value *Bound = |
1170 | getArrayIndexingBound(CGF&: *this, Base, IndexedType, StrictFlexArraysLevel); |
1171 | |
1172 | EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed); |
1173 | } |
1174 | |
1175 | void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, |
1176 | llvm::Value *Index, |
1177 | QualType IndexType, |
1178 | QualType IndexedType, bool Accessed) { |
1179 | if (!Bound) |
1180 | return; |
1181 | |
1182 | SanitizerScope SanScope(this); |
1183 | |
1184 | bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); |
1185 | llvm::Value *IndexVal = Builder.CreateIntCast(V: Index, DestTy: SizeTy, isSigned: IndexSigned); |
1186 | llvm::Value *BoundVal = Builder.CreateIntCast(V: Bound, DestTy: SizeTy, isSigned: false); |
1187 | |
1188 | llvm::Constant *StaticData[] = { |
1189 | EmitCheckSourceLocation(Loc: E->getExprLoc()), |
1190 | EmitCheckTypeDescriptor(T: IndexedType), |
1191 | EmitCheckTypeDescriptor(T: IndexType) |
1192 | }; |
1193 | llvm::Value *Check = Accessed ? Builder.CreateICmpULT(LHS: IndexVal, RHS: BoundVal) |
1194 | : Builder.CreateICmpULE(LHS: IndexVal, RHS: BoundVal); |
1195 | EmitCheck(Checked: std::make_pair(x&: Check, y: SanitizerKind::ArrayBounds), |
1196 | Check: SanitizerHandler::OutOfBounds, StaticArgs: StaticData, DynamicArgs: Index); |
1197 | } |
1198 | |
1199 | CodeGenFunction::ComplexPairTy CodeGenFunction:: |
1200 | EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, |
1201 | bool isInc, bool isPre) { |
1202 | ComplexPairTy InVal = EmitLoadOfComplex(src: LV, loc: E->getExprLoc()); |
1203 | |
1204 | llvm::Value *NextVal; |
1205 | if (isa<llvm::IntegerType>(Val: InVal.first->getType())) { |
1206 | uint64_t AmountVal = isInc ? 1 : -1; |
1207 | NextVal = llvm::ConstantInt::get(Ty: InVal.first->getType(), V: AmountVal, IsSigned: true); |
1208 | |
1209 | // Add the inc/dec to the real part. |
1210 | NextVal = Builder.CreateAdd(LHS: InVal.first, RHS: NextVal, Name: isInc ? "inc" : "dec" ); |
1211 | } else { |
1212 | QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); |
1213 | llvm::APFloat FVal(getContext().getFloatTypeSemantics(T: ElemTy), 1); |
1214 | if (!isInc) |
1215 | FVal.changeSign(); |
1216 | NextVal = llvm::ConstantFP::get(Context&: getLLVMContext(), V: FVal); |
1217 | |
1218 | // Add the inc/dec to the real part. |
1219 | NextVal = Builder.CreateFAdd(L: InVal.first, R: NextVal, Name: isInc ? "inc" : "dec" ); |
1220 | } |
1221 | |
1222 | ComplexPairTy IncVal(NextVal, InVal.second); |
1223 | |
1224 | // Store the updated result through the lvalue. |
1225 | EmitStoreOfComplex(V: IncVal, dest: LV, /*init*/ isInit: false); |
1226 | if (getLangOpts().OpenMP) |
1227 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
1228 | LHS: E->getSubExpr()); |
1229 | |
1230 | // If this is a postinc, return the value read from memory, otherwise use the |
1231 | // updated value. |
1232 | return isPre ? IncVal : InVal; |
1233 | } |
1234 | |
1235 | void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, |
1236 | CodeGenFunction *CGF) { |
1237 | // Bind VLAs in the cast type. |
1238 | if (CGF && E->getType()->isVariablyModifiedType()) |
1239 | CGF->EmitVariablyModifiedType(Ty: E->getType()); |
1240 | |
1241 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1242 | DI->EmitExplicitCastType(Ty: E->getType()); |
1243 | } |
1244 | |
1245 | //===----------------------------------------------------------------------===// |
1246 | // LValue Expression Emission |
1247 | //===----------------------------------------------------------------------===// |
1248 | |
1249 | static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, |
1250 | TBAAAccessInfo *TBAAInfo, |
1251 | KnownNonNull_t IsKnownNonNull, |
1252 | CodeGenFunction &CGF) { |
1253 | // We allow this with ObjC object pointers because of fragile ABIs. |
1254 | assert(E->getType()->isPointerType() || |
1255 | E->getType()->isObjCObjectPointerType()); |
1256 | E = E->IgnoreParens(); |
1257 | |
1258 | // Casts: |
1259 | if (const CastExpr *CE = dyn_cast<CastExpr>(Val: E)) { |
1260 | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: CE)) |
1261 | CGF.CGM.EmitExplicitCastExprType(E: ECE, CGF: &CGF); |
1262 | |
1263 | switch (CE->getCastKind()) { |
1264 | // Non-converting casts (but not C's implicit conversion from void*). |
1265 | case CK_BitCast: |
1266 | case CK_NoOp: |
1267 | case CK_AddressSpaceConversion: |
1268 | if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { |
1269 | if (PtrTy->getPointeeType()->isVoidType()) |
1270 | break; |
1271 | |
1272 | LValueBaseInfo InnerBaseInfo; |
1273 | TBAAAccessInfo InnerTBAAInfo; |
1274 | Address Addr = CGF.EmitPointerWithAlignment( |
1275 | Addr: CE->getSubExpr(), BaseInfo: &InnerBaseInfo, TBAAInfo: &InnerTBAAInfo, IsKnownNonNull); |
1276 | if (BaseInfo) *BaseInfo = InnerBaseInfo; |
1277 | if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; |
1278 | |
1279 | if (isa<ExplicitCastExpr>(Val: CE)) { |
1280 | LValueBaseInfo TargetTypeBaseInfo; |
1281 | TBAAAccessInfo TargetTypeTBAAInfo; |
1282 | CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( |
1283 | T: E->getType(), BaseInfo: &TargetTypeBaseInfo, TBAAInfo: &TargetTypeTBAAInfo); |
1284 | if (TBAAInfo) |
1285 | *TBAAInfo = |
1286 | CGF.CGM.mergeTBAAInfoForCast(SourceInfo: *TBAAInfo, TargetInfo: TargetTypeTBAAInfo); |
1287 | // If the source l-value is opaque, honor the alignment of the |
1288 | // casted-to type. |
1289 | if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { |
1290 | if (BaseInfo) |
1291 | BaseInfo->mergeForCast(Info: TargetTypeBaseInfo); |
1292 | Addr.setAlignment(Align); |
1293 | } |
1294 | } |
1295 | |
1296 | if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast) && |
1297 | CE->getCastKind() == CK_BitCast) { |
1298 | if (auto PT = E->getType()->getAs<PointerType>()) |
1299 | CGF.EmitVTablePtrCheckForCast(T: PT->getPointeeType(), Derived: Addr, |
1300 | /*MayBeNull=*/true, |
1301 | TCK: CodeGenFunction::CFITCK_UnrelatedCast, |
1302 | Loc: CE->getBeginLoc()); |
1303 | } |
1304 | |
1305 | llvm::Type *ElemTy = |
1306 | CGF.ConvertTypeForMem(T: E->getType()->getPointeeType()); |
1307 | Addr = Addr.withElementType(ElemTy); |
1308 | if (CE->getCastKind() == CK_AddressSpaceConversion) |
1309 | Addr = CGF.Builder.CreateAddrSpaceCast( |
1310 | Addr, Ty: CGF.ConvertType(T: E->getType()), ElementTy: ElemTy); |
1311 | return Addr; |
1312 | } |
1313 | break; |
1314 | |
1315 | // Array-to-pointer decay. |
1316 | case CK_ArrayToPointerDecay: |
1317 | return CGF.EmitArrayToPointerDecay(Array: CE->getSubExpr(), BaseInfo, TBAAInfo); |
1318 | |
1319 | // Derived-to-base conversions. |
1320 | case CK_UncheckedDerivedToBase: |
1321 | case CK_DerivedToBase: { |
1322 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
1323 | // conservatively pretend that the complete object is of the base class |
1324 | // type. |
1325 | if (TBAAInfo) |
1326 | *TBAAInfo = CGF.CGM.getTBAAAccessInfo(AccessType: E->getType()); |
1327 | Address Addr = CGF.EmitPointerWithAlignment( |
1328 | Addr: CE->getSubExpr(), BaseInfo, TBAAInfo: nullptr, |
1329 | IsKnownNonNull: (KnownNonNull_t)(IsKnownNonNull || |
1330 | CE->getCastKind() == CK_UncheckedDerivedToBase)); |
1331 | auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); |
1332 | return CGF.GetAddressOfBaseClass( |
1333 | Value: Addr, Derived, PathBegin: CE->path_begin(), PathEnd: CE->path_end(), |
1334 | NullCheckValue: CGF.ShouldNullCheckClassCastValue(Cast: CE), Loc: CE->getExprLoc()); |
1335 | } |
1336 | |
1337 | // TODO: Is there any reason to treat base-to-derived conversions |
1338 | // specially? |
1339 | default: |
1340 | break; |
1341 | } |
1342 | } |
1343 | |
1344 | // Unary &. |
1345 | if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) { |
1346 | if (UO->getOpcode() == UO_AddrOf) { |
1347 | LValue LV = CGF.EmitLValue(E: UO->getSubExpr(), IsKnownNonNull); |
1348 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1349 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1350 | return LV.getAddress(CGF); |
1351 | } |
1352 | } |
1353 | |
1354 | // std::addressof and variants. |
1355 | if (auto *Call = dyn_cast<CallExpr>(Val: E)) { |
1356 | switch (Call->getBuiltinCallee()) { |
1357 | default: |
1358 | break; |
1359 | case Builtin::BIaddressof: |
1360 | case Builtin::BI__addressof: |
1361 | case Builtin::BI__builtin_addressof: { |
1362 | LValue LV = CGF.EmitLValue(E: Call->getArg(Arg: 0), IsKnownNonNull); |
1363 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1364 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1365 | return LV.getAddress(CGF); |
1366 | } |
1367 | } |
1368 | } |
1369 | |
1370 | // TODO: conditional operators, comma. |
1371 | |
1372 | // Otherwise, use the alignment of the type. |
1373 | return CGF.makeNaturalAddressForPointer( |
1374 | Ptr: CGF.EmitScalarExpr(E), T: E->getType()->getPointeeType(), Alignment: CharUnits(), |
1375 | /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull); |
1376 | } |
1377 | |
1378 | /// EmitPointerWithAlignment - Given an expression of pointer type, try to |
1379 | /// derive a more accurate bound on the alignment of the pointer. |
1380 | Address CodeGenFunction::EmitPointerWithAlignment( |
1381 | const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, |
1382 | KnownNonNull_t IsKnownNonNull) { |
1383 | Address Addr = |
1384 | ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, CGF&: *this); |
1385 | if (IsKnownNonNull && !Addr.isKnownNonNull()) |
1386 | Addr.setKnownNonNull(); |
1387 | return Addr; |
1388 | } |
1389 | |
1390 | llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { |
1391 | llvm::Value *V = RV.getScalarVal(); |
1392 | if (auto MPT = T->getAs<MemberPointerType>()) |
1393 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr: V, MPT); |
1394 | return Builder.CreateICmpNE(LHS: V, RHS: llvm::Constant::getNullValue(Ty: V->getType())); |
1395 | } |
1396 | |
1397 | RValue CodeGenFunction::GetUndefRValue(QualType Ty) { |
1398 | if (Ty->isVoidType()) |
1399 | return RValue::get(V: nullptr); |
1400 | |
1401 | switch (getEvaluationKind(T: Ty)) { |
1402 | case TEK_Complex: { |
1403 | llvm::Type *EltTy = |
1404 | ConvertType(T: Ty->castAs<ComplexType>()->getElementType()); |
1405 | llvm::Value *U = llvm::UndefValue::get(T: EltTy); |
1406 | return RValue::getComplex(C: std::make_pair(x&: U, y&: U)); |
1407 | } |
1408 | |
1409 | // If this is a use of an undefined aggregate type, the aggregate must have an |
1410 | // identifiable address. Just because the contents of the value are undefined |
1411 | // doesn't mean that the address can't be taken and compared. |
1412 | case TEK_Aggregate: { |
1413 | Address DestPtr = CreateMemTemp(Ty, Name: "undef.agg.tmp" ); |
1414 | return RValue::getAggregate(addr: DestPtr); |
1415 | } |
1416 | |
1417 | case TEK_Scalar: |
1418 | return RValue::get(V: llvm::UndefValue::get(T: ConvertType(T: Ty))); |
1419 | } |
1420 | llvm_unreachable("bad evaluation kind" ); |
1421 | } |
1422 | |
1423 | RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, |
1424 | const char *Name) { |
1425 | ErrorUnsupported(E, Name); |
1426 | return GetUndefRValue(Ty: E->getType()); |
1427 | } |
1428 | |
1429 | LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, |
1430 | const char *Name) { |
1431 | ErrorUnsupported(E, Name); |
1432 | llvm::Type *ElTy = ConvertType(T: E->getType()); |
1433 | llvm::Type *Ty = UnqualPtrTy; |
1434 | return MakeAddrLValue( |
1435 | Addr: Address(llvm::UndefValue::get(T: Ty), ElTy, CharUnits::One()), T: E->getType()); |
1436 | } |
1437 | |
1438 | bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { |
1439 | const Expr *Base = Obj; |
1440 | while (!isa<CXXThisExpr>(Val: Base)) { |
1441 | // The result of a dynamic_cast can be null. |
1442 | if (isa<CXXDynamicCastExpr>(Val: Base)) |
1443 | return false; |
1444 | |
1445 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
1446 | Base = CE->getSubExpr(); |
1447 | } else if (const auto *PE = dyn_cast<ParenExpr>(Val: Base)) { |
1448 | Base = PE->getSubExpr(); |
1449 | } else if (const auto *UO = dyn_cast<UnaryOperator>(Val: Base)) { |
1450 | if (UO->getOpcode() == UO_Extension) |
1451 | Base = UO->getSubExpr(); |
1452 | else |
1453 | return false; |
1454 | } else { |
1455 | return false; |
1456 | } |
1457 | } |
1458 | return true; |
1459 | } |
1460 | |
1461 | LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { |
1462 | LValue LV; |
1463 | if (SanOpts.has(K: SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(Val: E)) |
1464 | LV = EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E), /*Accessed*/true); |
1465 | else |
1466 | LV = EmitLValue(E); |
1467 | if (!isa<DeclRefExpr>(Val: E) && !LV.isBitField() && LV.isSimple()) { |
1468 | SanitizerSet SkippedChecks; |
1469 | if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) { |
1470 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: ME->getBase()); |
1471 | if (IsBaseCXXThis) |
1472 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
1473 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: ME->getBase())) |
1474 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
1475 | } |
1476 | EmitTypeCheck(TCK, Loc: E->getExprLoc(), LV, Type: E->getType(), SkippedChecks); |
1477 | } |
1478 | return LV; |
1479 | } |
1480 | |
1481 | /// EmitLValue - Emit code to compute a designator that specifies the location |
1482 | /// of the expression. |
1483 | /// |
1484 | /// This can return one of two things: a simple address or a bitfield reference. |
1485 | /// In either case, the LLVM Value* in the LValue structure is guaranteed to be |
1486 | /// an LLVM pointer type. |
1487 | /// |
1488 | /// If this returns a bitfield reference, nothing about the pointee type of the |
1489 | /// LLVM value is known: For example, it may not be a pointer to an integer. |
1490 | /// |
1491 | /// If this returns a normal address, and if the lvalue's C type is fixed size, |
1492 | /// this method guarantees that the returned pointer type will point to an LLVM |
1493 | /// type of the same size of the lvalue's type. If the lvalue has a variable |
1494 | /// length type, this is not possible. |
1495 | /// |
1496 | LValue CodeGenFunction::EmitLValue(const Expr *E, |
1497 | KnownNonNull_t IsKnownNonNull) { |
1498 | LValue LV = EmitLValueHelper(E, IsKnownNonNull); |
1499 | if (IsKnownNonNull && !LV.isKnownNonNull()) |
1500 | LV.setKnownNonNull(); |
1501 | return LV; |
1502 | } |
1503 | |
1504 | static QualType getConstantExprReferredType(const FullExpr *E, |
1505 | const ASTContext &Ctx) { |
1506 | const Expr *SE = E->getSubExpr()->IgnoreImplicit(); |
1507 | if (isa<OpaqueValueExpr>(Val: SE)) |
1508 | return SE->getType(); |
1509 | return cast<CallExpr>(Val: SE)->getCallReturnType(Ctx)->getPointeeType(); |
1510 | } |
1511 | |
1512 | LValue CodeGenFunction::EmitLValueHelper(const Expr *E, |
1513 | KnownNonNull_t IsKnownNonNull) { |
1514 | ApplyDebugLocation DL(*this, E); |
1515 | switch (E->getStmtClass()) { |
1516 | default: return EmitUnsupportedLValue(E, Name: "l-value expression" ); |
1517 | |
1518 | case Expr::ObjCPropertyRefExprClass: |
1519 | llvm_unreachable("cannot emit a property reference directly" ); |
1520 | |
1521 | case Expr::ObjCSelectorExprClass: |
1522 | return EmitObjCSelectorLValue(E: cast<ObjCSelectorExpr>(Val: E)); |
1523 | case Expr::ObjCIsaExprClass: |
1524 | return EmitObjCIsaExpr(E: cast<ObjCIsaExpr>(Val: E)); |
1525 | case Expr::BinaryOperatorClass: |
1526 | return EmitBinaryOperatorLValue(E: cast<BinaryOperator>(Val: E)); |
1527 | case Expr::CompoundAssignOperatorClass: { |
1528 | QualType Ty = E->getType(); |
1529 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1530 | Ty = AT->getValueType(); |
1531 | if (!Ty->isAnyComplexType()) |
1532 | return EmitCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1533 | return EmitComplexCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1534 | } |
1535 | case Expr::CallExprClass: |
1536 | case Expr::CXXMemberCallExprClass: |
1537 | case Expr::CXXOperatorCallExprClass: |
1538 | case Expr::UserDefinedLiteralClass: |
1539 | return EmitCallExprLValue(E: cast<CallExpr>(Val: E)); |
1540 | case Expr::CXXRewrittenBinaryOperatorClass: |
1541 | return EmitLValue(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(), |
1542 | IsKnownNonNull); |
1543 | case Expr::VAArgExprClass: |
1544 | return EmitVAArgExprLValue(E: cast<VAArgExpr>(Val: E)); |
1545 | case Expr::DeclRefExprClass: |
1546 | return EmitDeclRefLValue(E: cast<DeclRefExpr>(Val: E)); |
1547 | case Expr::ConstantExprClass: { |
1548 | const ConstantExpr *CE = cast<ConstantExpr>(Val: E); |
1549 | if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { |
1550 | QualType RetType = getConstantExprReferredType(CE, getContext()); |
1551 | return MakeNaturalAlignAddrLValue(V: Result, T: RetType); |
1552 | } |
1553 | return EmitLValue(E: cast<ConstantExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1554 | } |
1555 | case Expr::ParenExprClass: |
1556 | return EmitLValue(E: cast<ParenExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1557 | case Expr::GenericSelectionExprClass: |
1558 | return EmitLValue(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(), |
1559 | IsKnownNonNull); |
1560 | case Expr::PredefinedExprClass: |
1561 | return EmitPredefinedLValue(E: cast<PredefinedExpr>(Val: E)); |
1562 | case Expr::StringLiteralClass: |
1563 | return EmitStringLiteralLValue(E: cast<StringLiteral>(Val: E)); |
1564 | case Expr::ObjCEncodeExprClass: |
1565 | return EmitObjCEncodeExprLValue(E: cast<ObjCEncodeExpr>(Val: E)); |
1566 | case Expr::PseudoObjectExprClass: |
1567 | return EmitPseudoObjectLValue(e: cast<PseudoObjectExpr>(Val: E)); |
1568 | case Expr::InitListExprClass: |
1569 | return EmitInitListLValue(E: cast<InitListExpr>(Val: E)); |
1570 | case Expr::CXXTemporaryObjectExprClass: |
1571 | case Expr::CXXConstructExprClass: |
1572 | return EmitCXXConstructLValue(E: cast<CXXConstructExpr>(Val: E)); |
1573 | case Expr::CXXBindTemporaryExprClass: |
1574 | return EmitCXXBindTemporaryLValue(E: cast<CXXBindTemporaryExpr>(Val: E)); |
1575 | case Expr::CXXUuidofExprClass: |
1576 | return EmitCXXUuidofLValue(E: cast<CXXUuidofExpr>(Val: E)); |
1577 | case Expr::LambdaExprClass: |
1578 | return EmitAggExprToLValue(E); |
1579 | |
1580 | case Expr::ExprWithCleanupsClass: { |
1581 | const auto *cleanups = cast<ExprWithCleanups>(Val: E); |
1582 | RunCleanupsScope Scope(*this); |
1583 | LValue LV = EmitLValue(E: cleanups->getSubExpr(), IsKnownNonNull); |
1584 | if (LV.isSimple()) { |
1585 | // Defend against branches out of gnu statement expressions surrounded by |
1586 | // cleanups. |
1587 | Address Addr = LV.getAddress(CGF&: *this); |
1588 | llvm::Value *V = Addr.getBasePointer(); |
1589 | Scope.ForceCleanup(ValuesToReload: {&V}); |
1590 | Addr.replaceBasePointer(P: V); |
1591 | return LValue::MakeAddr(Addr, type: LV.getType(), Context&: getContext(), |
1592 | BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo()); |
1593 | } |
1594 | // FIXME: Is it possible to create an ExprWithCleanups that produces a |
1595 | // bitfield lvalue or some other non-simple lvalue? |
1596 | return LV; |
1597 | } |
1598 | |
1599 | case Expr::CXXDefaultArgExprClass: { |
1600 | auto *DAE = cast<CXXDefaultArgExpr>(Val: E); |
1601 | CXXDefaultArgExprScope Scope(*this, DAE); |
1602 | return EmitLValue(E: DAE->getExpr(), IsKnownNonNull); |
1603 | } |
1604 | case Expr::CXXDefaultInitExprClass: { |
1605 | auto *DIE = cast<CXXDefaultInitExpr>(Val: E); |
1606 | CXXDefaultInitExprScope Scope(*this, DIE); |
1607 | return EmitLValue(E: DIE->getExpr(), IsKnownNonNull); |
1608 | } |
1609 | case Expr::CXXTypeidExprClass: |
1610 | return EmitCXXTypeidLValue(E: cast<CXXTypeidExpr>(Val: E)); |
1611 | |
1612 | case Expr::ObjCMessageExprClass: |
1613 | return EmitObjCMessageExprLValue(E: cast<ObjCMessageExpr>(Val: E)); |
1614 | case Expr::ObjCIvarRefExprClass: |
1615 | return EmitObjCIvarRefLValue(E: cast<ObjCIvarRefExpr>(Val: E)); |
1616 | case Expr::StmtExprClass: |
1617 | return EmitStmtExprLValue(E: cast<StmtExpr>(Val: E)); |
1618 | case Expr::UnaryOperatorClass: |
1619 | return EmitUnaryOpLValue(E: cast<UnaryOperator>(Val: E)); |
1620 | case Expr::ArraySubscriptExprClass: |
1621 | return EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E)); |
1622 | case Expr::MatrixSubscriptExprClass: |
1623 | return EmitMatrixSubscriptExpr(E: cast<MatrixSubscriptExpr>(Val: E)); |
1624 | case Expr::OMPArraySectionExprClass: |
1625 | return EmitOMPArraySectionExpr(E: cast<OMPArraySectionExpr>(Val: E)); |
1626 | case Expr::ExtVectorElementExprClass: |
1627 | return EmitExtVectorElementExpr(E: cast<ExtVectorElementExpr>(Val: E)); |
1628 | case Expr::CXXThisExprClass: |
1629 | return MakeAddrLValue(Addr: LoadCXXThisAddress(), T: E->getType()); |
1630 | case Expr::MemberExprClass: |
1631 | return EmitMemberExpr(E: cast<MemberExpr>(Val: E)); |
1632 | case Expr::CompoundLiteralExprClass: |
1633 | return EmitCompoundLiteralLValue(E: cast<CompoundLiteralExpr>(Val: E)); |
1634 | case Expr::ConditionalOperatorClass: |
1635 | return EmitConditionalOperatorLValue(cast<ConditionalOperator>(Val: E)); |
1636 | case Expr::BinaryConditionalOperatorClass: |
1637 | return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(Val: E)); |
1638 | case Expr::ChooseExprClass: |
1639 | return EmitLValue(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), IsKnownNonNull); |
1640 | case Expr::OpaqueValueExprClass: |
1641 | return EmitOpaqueValueLValue(e: cast<OpaqueValueExpr>(Val: E)); |
1642 | case Expr::SubstNonTypeTemplateParmExprClass: |
1643 | return EmitLValue(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(), |
1644 | IsKnownNonNull); |
1645 | case Expr::ImplicitCastExprClass: |
1646 | case Expr::CStyleCastExprClass: |
1647 | case Expr::CXXFunctionalCastExprClass: |
1648 | case Expr::CXXStaticCastExprClass: |
1649 | case Expr::CXXDynamicCastExprClass: |
1650 | case Expr::CXXReinterpretCastExprClass: |
1651 | case Expr::CXXConstCastExprClass: |
1652 | case Expr::CXXAddrspaceCastExprClass: |
1653 | case Expr::ObjCBridgedCastExprClass: |
1654 | return EmitCastLValue(E: cast<CastExpr>(Val: E)); |
1655 | |
1656 | case Expr::MaterializeTemporaryExprClass: |
1657 | return EmitMaterializeTemporaryExpr(M: cast<MaterializeTemporaryExpr>(Val: E)); |
1658 | |
1659 | case Expr::CoawaitExprClass: |
1660 | return EmitCoawaitLValue(E: cast<CoawaitExpr>(Val: E)); |
1661 | case Expr::CoyieldExprClass: |
1662 | return EmitCoyieldLValue(E: cast<CoyieldExpr>(Val: E)); |
1663 | case Expr::PackIndexingExprClass: |
1664 | return EmitLValue(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr()); |
1665 | } |
1666 | } |
1667 | |
1668 | /// Given an object of the given canonical type, can we safely copy a |
1669 | /// value out of it based on its initializer? |
1670 | static bool isConstantEmittableObjectType(QualType type) { |
1671 | assert(type.isCanonical()); |
1672 | assert(!type->isReferenceType()); |
1673 | |
1674 | // Must be const-qualified but non-volatile. |
1675 | Qualifiers qs = type.getLocalQualifiers(); |
1676 | if (!qs.hasConst() || qs.hasVolatile()) return false; |
1677 | |
1678 | // Otherwise, all object types satisfy this except C++ classes with |
1679 | // mutable subobjects or non-trivial copy/destroy behavior. |
1680 | if (const auto *RT = dyn_cast<RecordType>(Val&: type)) |
1681 | if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
1682 | if (RD->hasMutableFields() || !RD->isTrivial()) |
1683 | return false; |
1684 | |
1685 | return true; |
1686 | } |
1687 | |
1688 | /// Can we constant-emit a load of a reference to a variable of the |
1689 | /// given type? This is different from predicates like |
1690 | /// Decl::mightBeUsableInConstantExpressions because we do want it to apply |
1691 | /// in situations that don't necessarily satisfy the language's rules |
1692 | /// for this (e.g. C++'s ODR-use rules). For example, we want to able |
1693 | /// to do this with const float variables even if those variables |
1694 | /// aren't marked 'constexpr'. |
1695 | enum ConstantEmissionKind { |
1696 | CEK_None, |
1697 | CEK_AsReferenceOnly, |
1698 | CEK_AsValueOrReference, |
1699 | CEK_AsValueOnly |
1700 | }; |
1701 | static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { |
1702 | type = type.getCanonicalType(); |
1703 | if (const auto *ref = dyn_cast<ReferenceType>(Val&: type)) { |
1704 | if (isConstantEmittableObjectType(type: ref->getPointeeType())) |
1705 | return CEK_AsValueOrReference; |
1706 | return CEK_AsReferenceOnly; |
1707 | } |
1708 | if (isConstantEmittableObjectType(type)) |
1709 | return CEK_AsValueOnly; |
1710 | return CEK_None; |
1711 | } |
1712 | |
1713 | /// Try to emit a reference to the given value without producing it as |
1714 | /// an l-value. This is just an optimization, but it avoids us needing |
1715 | /// to emit global copies of variables if they're named without triggering |
1716 | /// a formal use in a context where we can't emit a direct reference to them, |
1717 | /// for instance if a block or lambda or a member of a local class uses a |
1718 | /// const int variable or constexpr variable from an enclosing function. |
1719 | CodeGenFunction::ConstantEmission |
1720 | CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { |
1721 | ValueDecl *value = refExpr->getDecl(); |
1722 | |
1723 | // The value needs to be an enum constant or a constant variable. |
1724 | ConstantEmissionKind CEK; |
1725 | if (isa<ParmVarDecl>(Val: value)) { |
1726 | CEK = CEK_None; |
1727 | } else if (auto *var = dyn_cast<VarDecl>(Val: value)) { |
1728 | CEK = checkVarTypeForConstantEmission(var->getType()); |
1729 | } else if (isa<EnumConstantDecl>(Val: value)) { |
1730 | CEK = CEK_AsValueOnly; |
1731 | } else { |
1732 | CEK = CEK_None; |
1733 | } |
1734 | if (CEK == CEK_None) return ConstantEmission(); |
1735 | |
1736 | Expr::EvalResult result; |
1737 | bool resultIsReference; |
1738 | QualType resultType; |
1739 | |
1740 | // It's best to evaluate all the way as an r-value if that's permitted. |
1741 | if (CEK != CEK_AsReferenceOnly && |
1742 | refExpr->EvaluateAsRValue(result, getContext())) { |
1743 | resultIsReference = false; |
1744 | resultType = refExpr->getType(); |
1745 | |
1746 | // Otherwise, try to evaluate as an l-value. |
1747 | } else if (CEK != CEK_AsValueOnly && |
1748 | refExpr->EvaluateAsLValue(result, getContext())) { |
1749 | resultIsReference = true; |
1750 | resultType = value->getType(); |
1751 | |
1752 | // Failure. |
1753 | } else { |
1754 | return ConstantEmission(); |
1755 | } |
1756 | |
1757 | // In any case, if the initializer has side-effects, abandon ship. |
1758 | if (result.HasSideEffects) |
1759 | return ConstantEmission(); |
1760 | |
1761 | // In CUDA/HIP device compilation, a lambda may capture a reference variable |
1762 | // referencing a global host variable by copy. In this case the lambda should |
1763 | // make a copy of the value of the global host variable. The DRE of the |
1764 | // captured reference variable cannot be emitted as load from the host |
1765 | // global variable as compile time constant, since the host variable is not |
1766 | // accessible on device. The DRE of the captured reference variable has to be |
1767 | // loaded from captures. |
1768 | if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && |
1769 | refExpr->refersToEnclosingVariableOrCapture()) { |
1770 | auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: CurCodeDecl); |
1771 | if (MD && MD->getParent()->isLambda() && |
1772 | MD->getOverloadedOperator() == OO_Call) { |
1773 | const APValue::LValueBase &base = result.Val.getLValueBase(); |
1774 | if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { |
1775 | if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D)) { |
1776 | if (!VD->hasAttr<CUDADeviceAttr>()) { |
1777 | return ConstantEmission(); |
1778 | } |
1779 | } |
1780 | } |
1781 | } |
1782 | } |
1783 | |
1784 | // Emit as a constant. |
1785 | auto C = ConstantEmitter(*this).emitAbstract(loc: refExpr->getLocation(), |
1786 | value: result.Val, T: resultType); |
1787 | |
1788 | // Make sure we emit a debug reference to the global variable. |
1789 | // This should probably fire even for |
1790 | if (isa<VarDecl>(Val: value)) { |
1791 | if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Val: value))) |
1792 | EmitDeclRefExprDbgValue(E: refExpr, Init: result.Val); |
1793 | } else { |
1794 | assert(isa<EnumConstantDecl>(value)); |
1795 | EmitDeclRefExprDbgValue(E: refExpr, Init: result.Val); |
1796 | } |
1797 | |
1798 | // If we emitted a reference constant, we need to dereference that. |
1799 | if (resultIsReference) |
1800 | return ConstantEmission::forReference(C); |
1801 | |
1802 | return ConstantEmission::forValue(C); |
1803 | } |
1804 | |
1805 | static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, |
1806 | const MemberExpr *ME) { |
1807 | if (auto *VD = dyn_cast<VarDecl>(Val: ME->getMemberDecl())) { |
1808 | // Try to emit static variable member expressions as DREs. |
1809 | return DeclRefExpr::Create( |
1810 | CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, |
1811 | /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), |
1812 | ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); |
1813 | } |
1814 | return nullptr; |
1815 | } |
1816 | |
1817 | CodeGenFunction::ConstantEmission |
1818 | CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { |
1819 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME)) |
1820 | return tryEmitAsConstant(refExpr: DRE); |
1821 | return ConstantEmission(); |
1822 | } |
1823 | |
1824 | llvm::Value *CodeGenFunction::emitScalarConstant( |
1825 | const CodeGenFunction::ConstantEmission &Constant, Expr *E) { |
1826 | assert(Constant && "not a constant" ); |
1827 | if (Constant.isReference()) |
1828 | return EmitLoadOfLValue(V: Constant.getReferenceLValue(CGF&: *this, refExpr: E), |
1829 | Loc: E->getExprLoc()) |
1830 | .getScalarVal(); |
1831 | return Constant.getValue(); |
1832 | } |
1833 | |
1834 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, |
1835 | SourceLocation Loc) { |
1836 | return EmitLoadOfScalar(Addr: lvalue.getAddress(CGF&: *this), Volatile: lvalue.isVolatile(), |
1837 | Ty: lvalue.getType(), Loc, BaseInfo: lvalue.getBaseInfo(), |
1838 | TBAAInfo: lvalue.getTBAAInfo(), isNontemporal: lvalue.isNontemporal()); |
1839 | } |
1840 | |
1841 | static bool hasBooleanRepresentation(QualType Ty) { |
1842 | if (Ty->isBooleanType()) |
1843 | return true; |
1844 | |
1845 | if (const EnumType *ET = Ty->getAs<EnumType>()) |
1846 | return ET->getDecl()->getIntegerType()->isBooleanType(); |
1847 | |
1848 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1849 | return hasBooleanRepresentation(Ty: AT->getValueType()); |
1850 | |
1851 | return false; |
1852 | } |
1853 | |
1854 | static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, |
1855 | llvm::APInt &Min, llvm::APInt &End, |
1856 | bool StrictEnums, bool IsBool) { |
1857 | const EnumType *ET = Ty->getAs<EnumType>(); |
1858 | bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && |
1859 | ET && !ET->getDecl()->isFixed(); |
1860 | if (!IsBool && !IsRegularCPlusPlusEnum) |
1861 | return false; |
1862 | |
1863 | if (IsBool) { |
1864 | Min = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 0); |
1865 | End = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 2); |
1866 | } else { |
1867 | const EnumDecl *ED = ET->getDecl(); |
1868 | ED->getValueRange(Max&: End, Min); |
1869 | } |
1870 | return true; |
1871 | } |
1872 | |
1873 | llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { |
1874 | llvm::APInt Min, End; |
1875 | if (!getRangeForType(CGF&: *this, Ty, Min, End, StrictEnums: CGM.getCodeGenOpts().StrictEnums, |
1876 | IsBool: hasBooleanRepresentation(Ty))) |
1877 | return nullptr; |
1878 | |
1879 | llvm::MDBuilder MDHelper(getLLVMContext()); |
1880 | return MDHelper.createRange(Lo: Min, Hi: End); |
1881 | } |
1882 | |
1883 | bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, |
1884 | SourceLocation Loc) { |
1885 | bool HasBoolCheck = SanOpts.has(K: SanitizerKind::Bool); |
1886 | bool HasEnumCheck = SanOpts.has(K: SanitizerKind::Enum); |
1887 | if (!HasBoolCheck && !HasEnumCheck) |
1888 | return false; |
1889 | |
1890 | bool IsBool = hasBooleanRepresentation(Ty) || |
1891 | NSAPI(CGM.getContext()).isObjCBOOLType(T: Ty); |
1892 | bool NeedsBoolCheck = HasBoolCheck && IsBool; |
1893 | bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); |
1894 | if (!NeedsBoolCheck && !NeedsEnumCheck) |
1895 | return false; |
1896 | |
1897 | // Single-bit booleans don't need to be checked. Special-case this to avoid |
1898 | // a bit width mismatch when handling bitfield values. This is handled by |
1899 | // EmitFromMemory for the non-bitfield case. |
1900 | if (IsBool && |
1901 | cast<llvm::IntegerType>(Val: Value->getType())->getBitWidth() == 1) |
1902 | return false; |
1903 | |
1904 | llvm::APInt Min, End; |
1905 | if (!getRangeForType(CGF&: *this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) |
1906 | return true; |
1907 | |
1908 | auto &Ctx = getLLVMContext(); |
1909 | SanitizerScope SanScope(this); |
1910 | llvm::Value *Check; |
1911 | --End; |
1912 | if (!Min) { |
1913 | Check = Builder.CreateICmpULE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1914 | } else { |
1915 | llvm::Value *Upper = |
1916 | Builder.CreateICmpSLE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1917 | llvm::Value *Lower = |
1918 | Builder.CreateICmpSGE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: Min)); |
1919 | Check = Builder.CreateAnd(LHS: Upper, RHS: Lower); |
1920 | } |
1921 | llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), |
1922 | EmitCheckTypeDescriptor(T: Ty)}; |
1923 | SanitizerMask Kind = |
1924 | NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; |
1925 | EmitCheck(Checked: std::make_pair(x&: Check, y&: Kind), Check: SanitizerHandler::LoadInvalidValue, |
1926 | StaticArgs, DynamicArgs: EmitCheckValue(V: Value)); |
1927 | return true; |
1928 | } |
1929 | |
1930 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, |
1931 | QualType Ty, |
1932 | SourceLocation Loc, |
1933 | LValueBaseInfo BaseInfo, |
1934 | TBAAAccessInfo TBAAInfo, |
1935 | bool isNontemporal) { |
1936 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
1937 | if (GV->isThreadLocal()) |
1938 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
1939 | IsKnownNonNull: NotKnownNonNull); |
1940 | |
1941 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
1942 | // Boolean vectors use `iN` as storage type. |
1943 | if (ClangVecTy->isExtVectorBoolType()) { |
1944 | llvm::Type *ValTy = ConvertType(T: Ty); |
1945 | unsigned ValNumElems = |
1946 | cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
1947 | // Load the `iP` storage object (P is the padded vector size). |
1948 | auto *RawIntV = Builder.CreateLoad(Addr, IsVolatile: Volatile, Name: "load_bits" ); |
1949 | const auto *RawIntTy = RawIntV->getType(); |
1950 | assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors" ); |
1951 | // Bitcast iP --> <P x i1>. |
1952 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
1953 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
1954 | llvm::Value *V = Builder.CreateBitCast(V: RawIntV, DestTy: PaddedVecTy); |
1955 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
1956 | V = emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
1957 | |
1958 | return EmitFromMemory(Value: V, Ty); |
1959 | } |
1960 | |
1961 | // Handle vectors of size 3 like size 4 for better performance. |
1962 | const llvm::Type *EltTy = Addr.getElementType(); |
1963 | const auto *VTy = cast<llvm::FixedVectorType>(Val: EltTy); |
1964 | |
1965 | if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) { |
1966 | |
1967 | llvm::VectorType *vec4Ty = |
1968 | llvm::FixedVectorType::get(ElementType: VTy->getElementType(), NumElts: 4); |
1969 | Address Cast = Addr.withElementType(ElemTy: vec4Ty); |
1970 | // Now load value. |
1971 | llvm::Value *V = Builder.CreateLoad(Addr: Cast, IsVolatile: Volatile, Name: "loadVec4" ); |
1972 | |
1973 | // Shuffle vector to get vec3. |
1974 | V = Builder.CreateShuffleVector(V, Mask: ArrayRef<int>{0, 1, 2}, Name: "extractVec" ); |
1975 | return EmitFromMemory(Value: V, Ty); |
1976 | } |
1977 | } |
1978 | |
1979 | // Atomic operations have to be done on integral types. |
1980 | LValue AtomicLValue = |
1981 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
1982 | if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(Src: AtomicLValue)) { |
1983 | return EmitAtomicLoad(LV: AtomicLValue, SL: Loc).getScalarVal(); |
1984 | } |
1985 | |
1986 | llvm::LoadInst *Load = Builder.CreateLoad(Addr, IsVolatile: Volatile); |
1987 | if (isNontemporal) { |
1988 | llvm::MDNode *Node = llvm::MDNode::get( |
1989 | Context&: Load->getContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
1990 | Load->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
1991 | } |
1992 | |
1993 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo); |
1994 | |
1995 | if (EmitScalarRangeCheck(Value: Load, Ty, Loc)) { |
1996 | // In order to prevent the optimizer from throwing away the check, don't |
1997 | // attach range metadata to the load. |
1998 | } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) |
1999 | if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) { |
2000 | Load->setMetadata(KindID: llvm::LLVMContext::MD_range, Node: RangeInfo); |
2001 | Load->setMetadata(KindID: llvm::LLVMContext::MD_noundef, |
2002 | Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: std::nullopt)); |
2003 | } |
2004 | |
2005 | return EmitFromMemory(Value: Load, Ty); |
2006 | } |
2007 | |
2008 | llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { |
2009 | // Bool has a different representation in memory than in registers. |
2010 | if (hasBooleanRepresentation(Ty)) { |
2011 | // This should really always be an i1, but sometimes it's already |
2012 | // an i8, and it's awkward to track those cases down. |
2013 | if (Value->getType()->isIntegerTy(Bitwidth: 1)) |
2014 | return Builder.CreateZExt(V: Value, DestTy: ConvertTypeForMem(T: Ty), Name: "frombool" ); |
2015 | assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && |
2016 | "wrong value rep of bool" ); |
2017 | } |
2018 | |
2019 | return Value; |
2020 | } |
2021 | |
2022 | llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { |
2023 | // Bool has a different representation in memory than in registers. |
2024 | if (hasBooleanRepresentation(Ty)) { |
2025 | assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && |
2026 | "wrong value rep of bool" ); |
2027 | return Builder.CreateTrunc(V: Value, DestTy: Builder.getInt1Ty(), Name: "tobool" ); |
2028 | } |
2029 | if (Ty->isExtVectorBoolType()) { |
2030 | const auto *RawIntTy = Value->getType(); |
2031 | // Bitcast iP --> <P x i1>. |
2032 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
2033 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
2034 | auto *V = Builder.CreateBitCast(V: Value, DestTy: PaddedVecTy); |
2035 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
2036 | llvm::Type *ValTy = ConvertType(T: Ty); |
2037 | unsigned ValNumElems = cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
2038 | return emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
2039 | } |
2040 | |
2041 | return Value; |
2042 | } |
2043 | |
2044 | // Convert the pointer of \p Addr to a pointer to a vector (the value type of |
2045 | // MatrixType), if it points to a array (the memory type of MatrixType). |
2046 | static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, |
2047 | CodeGenFunction &CGF, |
2048 | bool IsVector = true) { |
2049 | auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: Addr.getElementType()); |
2050 | if (ArrayTy && IsVector) { |
2051 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
2052 | NumElts: ArrayTy->getNumElements()); |
2053 | |
2054 | return Addr.withElementType(ElemTy: VectorTy); |
2055 | } |
2056 | auto *VectorTy = dyn_cast<llvm::VectorType>(Val: Addr.getElementType()); |
2057 | if (VectorTy && !IsVector) { |
2058 | auto *ArrayTy = llvm::ArrayType::get( |
2059 | ElementType: VectorTy->getElementType(), |
2060 | NumElements: cast<llvm::FixedVectorType>(Val: VectorTy)->getNumElements()); |
2061 | |
2062 | return Addr.withElementType(ElemTy: ArrayTy); |
2063 | } |
2064 | |
2065 | return Addr; |
2066 | } |
2067 | |
2068 | // Emit a store of a matrix LValue. This may require casting the original |
2069 | // pointer to memory address (ArrayType) to a pointer to the value type |
2070 | // (VectorType). |
2071 | static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, |
2072 | bool isInit, CodeGenFunction &CGF) { |
2073 | Address Addr = MaybeConvertMatrixAddress(Addr: lvalue.getAddress(CGF), CGF, |
2074 | IsVector: value->getType()->isVectorTy()); |
2075 | CGF.EmitStoreOfScalar(Value: value, Addr, Volatile: lvalue.isVolatile(), Ty: lvalue.getType(), |
2076 | BaseInfo: lvalue.getBaseInfo(), TBAAInfo: lvalue.getTBAAInfo(), isInit, |
2077 | isNontemporal: lvalue.isNontemporal()); |
2078 | } |
2079 | |
2080 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, |
2081 | bool Volatile, QualType Ty, |
2082 | LValueBaseInfo BaseInfo, |
2083 | TBAAAccessInfo TBAAInfo, |
2084 | bool isInit, bool isNontemporal) { |
2085 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
2086 | if (GV->isThreadLocal()) |
2087 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
2088 | IsKnownNonNull: NotKnownNonNull); |
2089 | |
2090 | llvm::Type *SrcTy = Value->getType(); |
2091 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
2092 | auto *VecTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy); |
2093 | if (VecTy && ClangVecTy->isExtVectorBoolType()) { |
2094 | auto *MemIntTy = cast<llvm::IntegerType>(Val: Addr.getElementType()); |
2095 | // Expand to the memory bit width. |
2096 | unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits(); |
2097 | // <N x i1> --> <P x i1>. |
2098 | Value = emitBoolVecConversion(SrcVec: Value, NumElementsDst: MemNumElems, Name: "insertvec" ); |
2099 | // <P x i1> --> iP. |
2100 | Value = Builder.CreateBitCast(V: Value, DestTy: MemIntTy); |
2101 | } else if (!CGM.getCodeGenOpts().PreserveVec3Type) { |
2102 | // Handle vec3 special. |
2103 | if (VecTy && cast<llvm::FixedVectorType>(Val: VecTy)->getNumElements() == 3) { |
2104 | // Our source is a vec3, do a shuffle vector to make it a vec4. |
2105 | Value = Builder.CreateShuffleVector(V: Value, Mask: ArrayRef<int>{0, 1, 2, -1}, |
2106 | Name: "extractVec" ); |
2107 | SrcTy = llvm::FixedVectorType::get(ElementType: VecTy->getElementType(), NumElts: 4); |
2108 | } |
2109 | if (Addr.getElementType() != SrcTy) { |
2110 | Addr = Addr.withElementType(ElemTy: SrcTy); |
2111 | } |
2112 | } |
2113 | } |
2114 | |
2115 | Value = EmitToMemory(Value, Ty); |
2116 | |
2117 | LValue AtomicLValue = |
2118 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
2119 | if (Ty->isAtomicType() || |
2120 | (!isInit && LValueIsSuitableForInlineAtomic(Src: AtomicLValue))) { |
2121 | EmitAtomicStore(rvalue: RValue::get(V: Value), lvalue: AtomicLValue, isInit); |
2122 | return; |
2123 | } |
2124 | |
2125 | llvm::StoreInst *Store = Builder.CreateStore(Val: Value, Addr, IsVolatile: Volatile); |
2126 | if (isNontemporal) { |
2127 | llvm::MDNode *Node = |
2128 | llvm::MDNode::get(Context&: Store->getContext(), |
2129 | MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
2130 | Store->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
2131 | } |
2132 | |
2133 | CGM.DecorateInstructionWithTBAA(Inst: Store, TBAAInfo); |
2134 | } |
2135 | |
2136 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, |
2137 | bool isInit) { |
2138 | if (lvalue.getType()->isConstantMatrixType()) { |
2139 | EmitStoreOfMatrixScalar(value, lvalue, isInit, CGF&: *this); |
2140 | return; |
2141 | } |
2142 | |
2143 | EmitStoreOfScalar(Value: value, Addr: lvalue.getAddress(CGF&: *this), Volatile: lvalue.isVolatile(), |
2144 | Ty: lvalue.getType(), BaseInfo: lvalue.getBaseInfo(), |
2145 | TBAAInfo: lvalue.getTBAAInfo(), isInit, isNontemporal: lvalue.isNontemporal()); |
2146 | } |
2147 | |
2148 | // Emit a load of a LValue of matrix type. This may require casting the pointer |
2149 | // to memory address (ArrayType) to a pointer to the value type (VectorType). |
2150 | static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, |
2151 | CodeGenFunction &CGF) { |
2152 | assert(LV.getType()->isConstantMatrixType()); |
2153 | Address Addr = MaybeConvertMatrixAddress(Addr: LV.getAddress(CGF), CGF); |
2154 | LV.setAddress(Addr); |
2155 | return RValue::get(V: CGF.EmitLoadOfScalar(lvalue: LV, Loc)); |
2156 | } |
2157 | |
2158 | /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this |
2159 | /// method emits the address of the lvalue, then loads the result as an rvalue, |
2160 | /// returning the rvalue. |
2161 | RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { |
2162 | if (LV.isObjCWeak()) { |
2163 | // load of a __weak object. |
2164 | Address AddrWeakObj = LV.getAddress(CGF&: *this); |
2165 | return RValue::get(V: CGM.getObjCRuntime().EmitObjCWeakRead(CGF&: *this, |
2166 | AddrWeakObj)); |
2167 | } |
2168 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
2169 | // In MRC mode, we do a load+autorelease. |
2170 | if (!getLangOpts().ObjCAutoRefCount) { |
2171 | return RValue::get(V: EmitARCLoadWeak(addr: LV.getAddress(CGF&: *this))); |
2172 | } |
2173 | |
2174 | // In ARC mode, we load retained and then consume the value. |
2175 | llvm::Value *Object = EmitARCLoadWeakRetained(addr: LV.getAddress(CGF&: *this)); |
2176 | Object = EmitObjCConsumeObject(T: LV.getType(), Ptr: Object); |
2177 | return RValue::get(V: Object); |
2178 | } |
2179 | |
2180 | if (LV.isSimple()) { |
2181 | assert(!LV.getType()->isFunctionType()); |
2182 | |
2183 | if (LV.getType()->isConstantMatrixType()) |
2184 | return EmitLoadOfMatrixLValue(LV, Loc, CGF&: *this); |
2185 | |
2186 | // Everything needs a load. |
2187 | return RValue::get(V: EmitLoadOfScalar(lvalue: LV, Loc)); |
2188 | } |
2189 | |
2190 | if (LV.isVectorElt()) { |
2191 | llvm::LoadInst *Load = Builder.CreateLoad(Addr: LV.getVectorAddress(), |
2192 | IsVolatile: LV.isVolatileQualified()); |
2193 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx: LV.getVectorIdx(), |
2194 | Name: "vecext" )); |
2195 | } |
2196 | |
2197 | // If this is a reference to a subset of the elements of a vector, either |
2198 | // shuffle the input or extract/insert them as appropriate. |
2199 | if (LV.isExtVectorElt()) { |
2200 | return EmitLoadOfExtVectorElementLValue(V: LV); |
2201 | } |
2202 | |
2203 | // Global Register variables always invoke intrinsics |
2204 | if (LV.isGlobalReg()) |
2205 | return EmitLoadOfGlobalRegLValue(LV); |
2206 | |
2207 | if (LV.isMatrixElt()) { |
2208 | llvm::Value *Idx = LV.getMatrixIdx(); |
2209 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2210 | const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>(); |
2211 | llvm::MatrixBuilder MB(Builder); |
2212 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2213 | } |
2214 | llvm::LoadInst *Load = |
2215 | Builder.CreateLoad(Addr: LV.getMatrixAddress(), IsVolatile: LV.isVolatileQualified()); |
2216 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx, Name: "matrixext" )); |
2217 | } |
2218 | |
2219 | assert(LV.isBitField() && "Unknown LValue type!" ); |
2220 | return EmitLoadOfBitfieldLValue(LV, Loc); |
2221 | } |
2222 | |
2223 | RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, |
2224 | SourceLocation Loc) { |
2225 | const CGBitFieldInfo &Info = LV.getBitFieldInfo(); |
2226 | |
2227 | // Get the output type. |
2228 | llvm::Type *ResLTy = ConvertType(T: LV.getType()); |
2229 | |
2230 | Address Ptr = LV.getBitFieldAddress(); |
2231 | llvm::Value *Val = |
2232 | Builder.CreateLoad(Addr: Ptr, IsVolatile: LV.isVolatileQualified(), Name: "bf.load" ); |
2233 | |
2234 | bool UseVolatile = LV.isVolatileQualified() && |
2235 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2236 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2237 | const unsigned StorageSize = |
2238 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2239 | if (Info.IsSigned) { |
2240 | assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); |
2241 | unsigned HighBits = StorageSize - Offset - Info.Size; |
2242 | if (HighBits) |
2243 | Val = Builder.CreateShl(LHS: Val, RHS: HighBits, Name: "bf.shl" ); |
2244 | if (Offset + HighBits) |
2245 | Val = Builder.CreateAShr(LHS: Val, RHS: Offset + HighBits, Name: "bf.ashr" ); |
2246 | } else { |
2247 | if (Offset) |
2248 | Val = Builder.CreateLShr(LHS: Val, RHS: Offset, Name: "bf.lshr" ); |
2249 | if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) |
2250 | Val = Builder.CreateAnd( |
2251 | LHS: Val, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), Name: "bf.clear" ); |
2252 | } |
2253 | Val = Builder.CreateIntCast(V: Val, DestTy: ResLTy, isSigned: Info.IsSigned, Name: "bf.cast" ); |
2254 | EmitScalarRangeCheck(Value: Val, Ty: LV.getType(), Loc); |
2255 | return RValue::get(V: Val); |
2256 | } |
2257 | |
2258 | // If this is a reference to a subset of the elements of a vector, create an |
2259 | // appropriate shufflevector. |
2260 | RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { |
2261 | llvm::Value *Vec = Builder.CreateLoad(Addr: LV.getExtVectorAddress(), |
2262 | IsVolatile: LV.isVolatileQualified()); |
2263 | |
2264 | // HLSL allows treating scalars as one-element vectors. Converting the scalar |
2265 | // IR value to a vector here allows the rest of codegen to behave as normal. |
2266 | if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) { |
2267 | llvm::Type *DstTy = llvm::FixedVectorType::get(ElementType: Vec->getType(), NumElts: 1); |
2268 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
2269 | Vec = Builder.CreateInsertElement(VecTy: DstTy, NewElt: Vec, Idx: Zero, Name: "cast.splat" ); |
2270 | } |
2271 | |
2272 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2273 | |
2274 | // If the result of the expression is a non-vector type, we must be extracting |
2275 | // a single element. Just codegen as an extractelement. |
2276 | const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); |
2277 | if (!ExprVT) { |
2278 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2279 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2280 | return RValue::get(V: Builder.CreateExtractElement(Vec, Idx: Elt)); |
2281 | } |
2282 | |
2283 | // Always use shuffle vector to try to retain the original program structure |
2284 | unsigned NumResultElts = ExprVT->getNumElements(); |
2285 | |
2286 | SmallVector<int, 4> Mask; |
2287 | for (unsigned i = 0; i != NumResultElts; ++i) |
2288 | Mask.push_back(Elt: getAccessedFieldNo(Idx: i, Elts)); |
2289 | |
2290 | Vec = Builder.CreateShuffleVector(V: Vec, Mask); |
2291 | return RValue::get(V: Vec); |
2292 | } |
2293 | |
2294 | /// Generates lvalue for partial ext_vector access. |
2295 | Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { |
2296 | Address VectorAddress = LV.getExtVectorAddress(); |
2297 | QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); |
2298 | llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(T: EQT); |
2299 | |
2300 | Address CastToPointerElement = VectorAddress.withElementType(ElemTy: VectorElementTy); |
2301 | |
2302 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2303 | unsigned ix = getAccessedFieldNo(Idx: 0, Elts); |
2304 | |
2305 | Address VectorBasePtrPlusIx = |
2306 | Builder.CreateConstInBoundsGEP(Addr: CastToPointerElement, Index: ix, |
2307 | Name: "vector.elt" ); |
2308 | |
2309 | return VectorBasePtrPlusIx; |
2310 | } |
2311 | |
2312 | /// Load of global gamed gegisters are always calls to intrinsics. |
2313 | RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { |
2314 | assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && |
2315 | "Bad type for register variable" ); |
2316 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2317 | Val: cast<llvm::MetadataAsValue>(Val: LV.getGlobalReg())->getMetadata()); |
2318 | |
2319 | // We accept integer and pointer types only |
2320 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: LV.getType()); |
2321 | llvm::Type *Ty = OrigTy; |
2322 | if (OrigTy->isPointerTy()) |
2323 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2324 | llvm::Type *Types[] = { Ty }; |
2325 | |
2326 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
2327 | llvm::Value *Call = Builder.CreateCall( |
2328 | Callee: F, Args: llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName)); |
2329 | if (OrigTy->isPointerTy()) |
2330 | Call = Builder.CreateIntToPtr(V: Call, DestTy: OrigTy); |
2331 | return RValue::get(V: Call); |
2332 | } |
2333 | |
2334 | /// EmitStoreThroughLValue - Store the specified rvalue into the specified |
2335 | /// lvalue, where both are guaranteed to the have the same type, and that type |
2336 | /// is 'Ty'. |
2337 | void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, |
2338 | bool isInit) { |
2339 | if (!Dst.isSimple()) { |
2340 | if (Dst.isVectorElt()) { |
2341 | // Read/modify/write the vector, inserting the new element. |
2342 | llvm::Value *Vec = Builder.CreateLoad(Addr: Dst.getVectorAddress(), |
2343 | IsVolatile: Dst.isVolatileQualified()); |
2344 | auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Val: Vec->getType()); |
2345 | if (IRStoreTy) { |
2346 | auto *IRVecTy = llvm::FixedVectorType::get( |
2347 | ElementType: Builder.getInt1Ty(), NumElts: IRStoreTy->getPrimitiveSizeInBits()); |
2348 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRVecTy); |
2349 | // iN --> <N x i1>. |
2350 | } |
2351 | Vec = Builder.CreateInsertElement(Vec, NewElt: Src.getScalarVal(), |
2352 | Idx: Dst.getVectorIdx(), Name: "vecins" ); |
2353 | if (IRStoreTy) { |
2354 | // <N x i1> --> <iN>. |
2355 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRStoreTy); |
2356 | } |
2357 | Builder.CreateStore(Val: Vec, Addr: Dst.getVectorAddress(), |
2358 | IsVolatile: Dst.isVolatileQualified()); |
2359 | return; |
2360 | } |
2361 | |
2362 | // If this is an update of extended vector elements, insert them as |
2363 | // appropriate. |
2364 | if (Dst.isExtVectorElt()) |
2365 | return EmitStoreThroughExtVectorComponentLValue(Src, Dst); |
2366 | |
2367 | if (Dst.isGlobalReg()) |
2368 | return EmitStoreThroughGlobalRegLValue(Src, Dst); |
2369 | |
2370 | if (Dst.isMatrixElt()) { |
2371 | llvm::Value *Idx = Dst.getMatrixIdx(); |
2372 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2373 | const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>(); |
2374 | llvm::MatrixBuilder MB(Builder); |
2375 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2376 | } |
2377 | llvm::Instruction *Load = Builder.CreateLoad(Addr: Dst.getMatrixAddress()); |
2378 | llvm::Value *Vec = |
2379 | Builder.CreateInsertElement(Vec: Load, NewElt: Src.getScalarVal(), Idx, Name: "matins" ); |
2380 | Builder.CreateStore(Val: Vec, Addr: Dst.getMatrixAddress(), |
2381 | IsVolatile: Dst.isVolatileQualified()); |
2382 | return; |
2383 | } |
2384 | |
2385 | assert(Dst.isBitField() && "Unknown LValue type" ); |
2386 | return EmitStoreThroughBitfieldLValue(Src, Dst); |
2387 | } |
2388 | |
2389 | // There's special magic for assigning into an ARC-qualified l-value. |
2390 | if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { |
2391 | switch (Lifetime) { |
2392 | case Qualifiers::OCL_None: |
2393 | llvm_unreachable("present but none" ); |
2394 | |
2395 | case Qualifiers::OCL_ExplicitNone: |
2396 | // nothing special |
2397 | break; |
2398 | |
2399 | case Qualifiers::OCL_Strong: |
2400 | if (isInit) { |
2401 | Src = RValue::get(V: EmitARCRetain(type: Dst.getType(), value: Src.getScalarVal())); |
2402 | break; |
2403 | } |
2404 | EmitARCStoreStrong(lvalue: Dst, value: Src.getScalarVal(), /*ignore*/ resultIgnored: true); |
2405 | return; |
2406 | |
2407 | case Qualifiers::OCL_Weak: |
2408 | if (isInit) |
2409 | // Initialize and then skip the primitive store. |
2410 | EmitARCInitWeak(addr: Dst.getAddress(CGF&: *this), value: Src.getScalarVal()); |
2411 | else |
2412 | EmitARCStoreWeak(addr: Dst.getAddress(CGF&: *this), value: Src.getScalarVal(), |
2413 | /*ignore*/ ignored: true); |
2414 | return; |
2415 | |
2416 | case Qualifiers::OCL_Autoreleasing: |
2417 | Src = RValue::get(V: EmitObjCExtendObjectLifetime(T: Dst.getType(), |
2418 | Ptr: Src.getScalarVal())); |
2419 | // fall into the normal path |
2420 | break; |
2421 | } |
2422 | } |
2423 | |
2424 | if (Dst.isObjCWeak() && !Dst.isNonGC()) { |
2425 | // load of a __weak object. |
2426 | Address LvalueDst = Dst.getAddress(CGF&: *this); |
2427 | llvm::Value *src = Src.getScalarVal(); |
2428 | CGM.getObjCRuntime().EmitObjCWeakAssign(CGF&: *this, src, dest: LvalueDst); |
2429 | return; |
2430 | } |
2431 | |
2432 | if (Dst.isObjCStrong() && !Dst.isNonGC()) { |
2433 | // load of a __strong object. |
2434 | Address LvalueDst = Dst.getAddress(CGF&: *this); |
2435 | llvm::Value *src = Src.getScalarVal(); |
2436 | if (Dst.isObjCIvar()) { |
2437 | assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL" ); |
2438 | llvm::Type *ResultType = IntPtrTy; |
2439 | Address dst = EmitPointerWithAlignment(E: Dst.getBaseIvarExp()); |
2440 | llvm::Value *RHS = dst.emitRawPointer(CGF&: *this); |
2441 | RHS = Builder.CreatePtrToInt(V: RHS, DestTy: ResultType, Name: "sub.ptr.rhs.cast" ); |
2442 | llvm::Value *LHS = Builder.CreatePtrToInt(V: LvalueDst.emitRawPointer(CGF&: *this), |
2443 | DestTy: ResultType, Name: "sub.ptr.lhs.cast" ); |
2444 | llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, Name: "ivar.offset" ); |
2445 | CGM.getObjCRuntime().EmitObjCIvarAssign(CGF&: *this, src, dest: dst, ivarOffset: BytesBetween); |
2446 | } else if (Dst.isGlobalObjCRef()) { |
2447 | CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF&: *this, src, dest: LvalueDst, |
2448 | threadlocal: Dst.isThreadLocalRef()); |
2449 | } |
2450 | else |
2451 | CGM.getObjCRuntime().EmitObjCStrongCastAssign(CGF&: *this, src, dest: LvalueDst); |
2452 | return; |
2453 | } |
2454 | |
2455 | assert(Src.isScalar() && "Can't emit an agg store with this method" ); |
2456 | EmitStoreOfScalar(value: Src.getScalarVal(), lvalue: Dst, isInit); |
2457 | } |
2458 | |
2459 | void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, |
2460 | llvm::Value **Result) { |
2461 | const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); |
2462 | llvm::Type *ResLTy = ConvertTypeForMem(T: Dst.getType()); |
2463 | Address Ptr = Dst.getBitFieldAddress(); |
2464 | |
2465 | // Get the source value, truncated to the width of the bit-field. |
2466 | llvm::Value *SrcVal = Src.getScalarVal(); |
2467 | |
2468 | // Cast the source to the storage type and shift it into place. |
2469 | SrcVal = Builder.CreateIntCast(V: SrcVal, DestTy: Ptr.getElementType(), |
2470 | /*isSigned=*/false); |
2471 | llvm::Value *MaskedVal = SrcVal; |
2472 | |
2473 | const bool UseVolatile = |
2474 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && |
2475 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2476 | const unsigned StorageSize = |
2477 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2478 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2479 | // See if there are other bits in the bitfield's storage we'll need to load |
2480 | // and mask together with source before storing. |
2481 | if (StorageSize != Info.Size) { |
2482 | assert(StorageSize > Info.Size && "Invalid bitfield size." ); |
2483 | llvm::Value *Val = |
2484 | Builder.CreateLoad(Addr: Ptr, IsVolatile: Dst.isVolatileQualified(), Name: "bf.load" ); |
2485 | |
2486 | // Mask the source value as needed. |
2487 | if (!hasBooleanRepresentation(Ty: Dst.getType())) |
2488 | SrcVal = Builder.CreateAnd( |
2489 | LHS: SrcVal, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), |
2490 | Name: "bf.value" ); |
2491 | MaskedVal = SrcVal; |
2492 | if (Offset) |
2493 | SrcVal = Builder.CreateShl(LHS: SrcVal, RHS: Offset, Name: "bf.shl" ); |
2494 | |
2495 | // Mask out the original value. |
2496 | Val = Builder.CreateAnd( |
2497 | LHS: Val, RHS: ~llvm::APInt::getBitsSet(numBits: StorageSize, loBit: Offset, hiBit: Offset + Info.Size), |
2498 | Name: "bf.clear" ); |
2499 | |
2500 | // Or together the unchanged values and the source value. |
2501 | SrcVal = Builder.CreateOr(LHS: Val, RHS: SrcVal, Name: "bf.set" ); |
2502 | } else { |
2503 | assert(Offset == 0); |
2504 | // According to the AACPS: |
2505 | // When a volatile bit-field is written, and its container does not overlap |
2506 | // with any non-bit-field member, its container must be read exactly once |
2507 | // and written exactly once using the access width appropriate to the type |
2508 | // of the container. The two accesses are not atomic. |
2509 | if (Dst.isVolatileQualified() && isAAPCS(TargetInfo: CGM.getTarget()) && |
2510 | CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) |
2511 | Builder.CreateLoad(Addr: Ptr, IsVolatile: true, Name: "bf.load" ); |
2512 | } |
2513 | |
2514 | // Write the new value back out. |
2515 | Builder.CreateStore(Val: SrcVal, Addr: Ptr, IsVolatile: Dst.isVolatileQualified()); |
2516 | |
2517 | // Return the new value of the bit-field, if requested. |
2518 | if (Result) { |
2519 | llvm::Value *ResultVal = MaskedVal; |
2520 | |
2521 | // Sign extend the value if needed. |
2522 | if (Info.IsSigned) { |
2523 | assert(Info.Size <= StorageSize); |
2524 | unsigned HighBits = StorageSize - Info.Size; |
2525 | if (HighBits) { |
2526 | ResultVal = Builder.CreateShl(LHS: ResultVal, RHS: HighBits, Name: "bf.result.shl" ); |
2527 | ResultVal = Builder.CreateAShr(LHS: ResultVal, RHS: HighBits, Name: "bf.result.ashr" ); |
2528 | } |
2529 | } |
2530 | |
2531 | ResultVal = Builder.CreateIntCast(V: ResultVal, DestTy: ResLTy, isSigned: Info.IsSigned, |
2532 | Name: "bf.result.cast" ); |
2533 | *Result = EmitFromMemory(Value: ResultVal, Ty: Dst.getType()); |
2534 | } |
2535 | } |
2536 | |
2537 | void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, |
2538 | LValue Dst) { |
2539 | // HLSL allows storing to scalar values through ExtVector component LValues. |
2540 | // To support this we need to handle the case where the destination address is |
2541 | // a scalar. |
2542 | Address DstAddr = Dst.getExtVectorAddress(); |
2543 | if (!DstAddr.getElementType()->isVectorTy()) { |
2544 | assert(!Dst.getType()->isVectorType() && |
2545 | "this should only occur for non-vector l-values" ); |
2546 | Builder.CreateStore(Val: Src.getScalarVal(), Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2547 | return; |
2548 | } |
2549 | |
2550 | // This access turns into a read/modify/write of the vector. Load the input |
2551 | // value now. |
2552 | llvm::Value *Vec = Builder.CreateLoad(Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2553 | const llvm::Constant *Elts = Dst.getExtVectorElts(); |
2554 | |
2555 | llvm::Value *SrcVal = Src.getScalarVal(); |
2556 | |
2557 | if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { |
2558 | unsigned NumSrcElts = VTy->getNumElements(); |
2559 | unsigned NumDstElts = |
2560 | cast<llvm::FixedVectorType>(Val: Vec->getType())->getNumElements(); |
2561 | if (NumDstElts == NumSrcElts) { |
2562 | // Use shuffle vector is the src and destination are the same number of |
2563 | // elements and restore the vector mask since it is on the side it will be |
2564 | // stored. |
2565 | SmallVector<int, 4> Mask(NumDstElts); |
2566 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2567 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i; |
2568 | |
2569 | Vec = Builder.CreateShuffleVector(V: SrcVal, Mask); |
2570 | } else if (NumDstElts > NumSrcElts) { |
2571 | // Extended the source vector to the same length and then shuffle it |
2572 | // into the destination. |
2573 | // FIXME: since we're shuffling with undef, can we just use the indices |
2574 | // into that? This could be simpler. |
2575 | SmallVector<int, 4> ExtMask; |
2576 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2577 | ExtMask.push_back(Elt: i); |
2578 | ExtMask.resize(N: NumDstElts, NV: -1); |
2579 | llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(V: SrcVal, Mask: ExtMask); |
2580 | // build identity |
2581 | SmallVector<int, 4> Mask; |
2582 | for (unsigned i = 0; i != NumDstElts; ++i) |
2583 | Mask.push_back(Elt: i); |
2584 | |
2585 | // When the vector size is odd and .odd or .hi is used, the last element |
2586 | // of the Elts constant array will be one past the size of the vector. |
2587 | // Ignore the last element here, if it is greater than the mask size. |
2588 | if (getAccessedFieldNo(Idx: NumSrcElts - 1, Elts) == Mask.size()) |
2589 | NumSrcElts--; |
2590 | |
2591 | // modify when what gets shuffled in |
2592 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2593 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i + NumDstElts; |
2594 | Vec = Builder.CreateShuffleVector(V1: Vec, V2: ExtSrcVal, Mask); |
2595 | } else { |
2596 | // We should never shorten the vector |
2597 | llvm_unreachable("unexpected shorten vector length" ); |
2598 | } |
2599 | } else { |
2600 | // If the Src is a scalar (not a vector), and the target is a vector it must |
2601 | // be updating one element. |
2602 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2603 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2604 | Vec = Builder.CreateInsertElement(Vec, NewElt: SrcVal, Idx: Elt); |
2605 | } |
2606 | |
2607 | Builder.CreateStore(Val: Vec, Addr: Dst.getExtVectorAddress(), |
2608 | IsVolatile: Dst.isVolatileQualified()); |
2609 | } |
2610 | |
2611 | /// Store of global named registers are always calls to intrinsics. |
2612 | void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { |
2613 | assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && |
2614 | "Bad type for register variable" ); |
2615 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2616 | Val: cast<llvm::MetadataAsValue>(Val: Dst.getGlobalReg())->getMetadata()); |
2617 | assert(RegName && "Register LValue is not metadata" ); |
2618 | |
2619 | // We accept integer and pointer types only |
2620 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: Dst.getType()); |
2621 | llvm::Type *Ty = OrigTy; |
2622 | if (OrigTy->isPointerTy()) |
2623 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2624 | llvm::Type *Types[] = { Ty }; |
2625 | |
2626 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
2627 | llvm::Value *Value = Src.getScalarVal(); |
2628 | if (OrigTy->isPointerTy()) |
2629 | Value = Builder.CreatePtrToInt(V: Value, DestTy: Ty); |
2630 | Builder.CreateCall( |
2631 | Callee: F, Args: {llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName), Value}); |
2632 | } |
2633 | |
2634 | // setObjCGCLValueClass - sets class of the lvalue for the purpose of |
2635 | // generating write-barries API. It is currently a global, ivar, |
2636 | // or neither. |
2637 | static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, |
2638 | LValue &LV, |
2639 | bool IsMemberAccess=false) { |
2640 | if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) |
2641 | return; |
2642 | |
2643 | if (isa<ObjCIvarRefExpr>(Val: E)) { |
2644 | QualType ExpTy = E->getType(); |
2645 | if (IsMemberAccess && ExpTy->isPointerType()) { |
2646 | // If ivar is a structure pointer, assigning to field of |
2647 | // this struct follows gcc's behavior and makes it a non-ivar |
2648 | // writer-barrier conservatively. |
2649 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2650 | if (ExpTy->isRecordType()) { |
2651 | LV.setObjCIvar(false); |
2652 | return; |
2653 | } |
2654 | } |
2655 | LV.setObjCIvar(true); |
2656 | auto *Exp = cast<ObjCIvarRefExpr>(Val: const_cast<Expr *>(E)); |
2657 | LV.setBaseIvarExp(Exp->getBase()); |
2658 | LV.setObjCArray(E->getType()->isArrayType()); |
2659 | return; |
2660 | } |
2661 | |
2662 | if (const auto *Exp = dyn_cast<DeclRefExpr>(Val: E)) { |
2663 | if (const auto *VD = dyn_cast<VarDecl>(Val: Exp->getDecl())) { |
2664 | if (VD->hasGlobalStorage()) { |
2665 | LV.setGlobalObjCRef(true); |
2666 | LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); |
2667 | } |
2668 | } |
2669 | LV.setObjCArray(E->getType()->isArrayType()); |
2670 | return; |
2671 | } |
2672 | |
2673 | if (const auto *Exp = dyn_cast<UnaryOperator>(Val: E)) { |
2674 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2675 | return; |
2676 | } |
2677 | |
2678 | if (const auto *Exp = dyn_cast<ParenExpr>(Val: E)) { |
2679 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2680 | if (LV.isObjCIvar()) { |
2681 | // If cast is to a structure pointer, follow gcc's behavior and make it |
2682 | // a non-ivar write-barrier. |
2683 | QualType ExpTy = E->getType(); |
2684 | if (ExpTy->isPointerType()) |
2685 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2686 | if (ExpTy->isRecordType()) |
2687 | LV.setObjCIvar(false); |
2688 | } |
2689 | return; |
2690 | } |
2691 | |
2692 | if (const auto *Exp = dyn_cast<GenericSelectionExpr>(Val: E)) { |
2693 | setObjCGCLValueClass(Ctx, E: Exp->getResultExpr(), LV); |
2694 | return; |
2695 | } |
2696 | |
2697 | if (const auto *Exp = dyn_cast<ImplicitCastExpr>(Val: E)) { |
2698 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2699 | return; |
2700 | } |
2701 | |
2702 | if (const auto *Exp = dyn_cast<CStyleCastExpr>(Val: E)) { |
2703 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2704 | return; |
2705 | } |
2706 | |
2707 | if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(Val: E)) { |
2708 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2709 | return; |
2710 | } |
2711 | |
2712 | if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(Val: E)) { |
2713 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV); |
2714 | if (LV.isObjCIvar() && !LV.isObjCArray()) |
2715 | // Using array syntax to assigning to what an ivar points to is not |
2716 | // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; |
2717 | LV.setObjCIvar(false); |
2718 | else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) |
2719 | // Using array syntax to assigning to what global points to is not |
2720 | // same as assigning to the global itself. {id *G;} G[i] = 0; |
2721 | LV.setGlobalObjCRef(false); |
2722 | return; |
2723 | } |
2724 | |
2725 | if (const auto *Exp = dyn_cast<MemberExpr>(Val: E)) { |
2726 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV, IsMemberAccess: true); |
2727 | // We don't know if member is an 'ivar', but this flag is looked at |
2728 | // only in the context of LV.isObjCIvar(). |
2729 | LV.setObjCArray(E->getType()->isArrayType()); |
2730 | return; |
2731 | } |
2732 | } |
2733 | |
2734 | static LValue EmitThreadPrivateVarDeclLValue( |
2735 | CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, |
2736 | llvm::Type *RealVarTy, SourceLocation Loc) { |
2737 | if (CGF.CGM.getLangOpts().OpenMPIRBuilder) |
2738 | Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( |
2739 | CGF, VD, VDAddr: Addr, Loc); |
2740 | else |
2741 | Addr = |
2742 | CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, VDAddr: Addr, Loc); |
2743 | |
2744 | Addr = Addr.withElementType(ElemTy: RealVarTy); |
2745 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2746 | } |
2747 | |
2748 | static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, |
2749 | const VarDecl *VD, QualType T) { |
2750 | std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
2751 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); |
2752 | // Return an invalid address if variable is MT_To (or MT_Enter starting with |
2753 | // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link |
2754 | // and MT_To (or MT_Enter) with unified memory, return a valid address. |
2755 | if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2756 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2757 | !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) |
2758 | return Address::invalid(); |
2759 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
2760 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2761 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2762 | CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && |
2763 | "Expected link clause OR to clause with unified memory enabled." ); |
2764 | QualType PtrTy = CGF.getContext().getPointerType(VD->getType()); |
2765 | Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
2766 | return CGF.EmitLoadOfPointer(Ptr: Addr, PtrTy: PtrTy->castAs<PointerType>()); |
2767 | } |
2768 | |
2769 | Address |
2770 | CodeGenFunction::EmitLoadOfReference(LValue RefLVal, |
2771 | LValueBaseInfo *PointeeBaseInfo, |
2772 | TBAAAccessInfo *PointeeTBAAInfo) { |
2773 | llvm::LoadInst *Load = |
2774 | Builder.CreateLoad(Addr: RefLVal.getAddress(CGF&: *this), IsVolatile: RefLVal.isVolatile()); |
2775 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo: RefLVal.getTBAAInfo()); |
2776 | return makeNaturalAddressForPointer(Ptr: Load, T: RefLVal.getType()->getPointeeType(), |
2777 | Alignment: CharUnits(), /*ForPointeeType=*/true, |
2778 | BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo); |
2779 | } |
2780 | |
2781 | LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { |
2782 | LValueBaseInfo PointeeBaseInfo; |
2783 | TBAAAccessInfo PointeeTBAAInfo; |
2784 | Address PointeeAddr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &PointeeBaseInfo, |
2785 | PointeeTBAAInfo: &PointeeTBAAInfo); |
2786 | return MakeAddrLValue(Addr: PointeeAddr, T: RefLVal.getType()->getPointeeType(), |
2787 | BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo); |
2788 | } |
2789 | |
2790 | Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, |
2791 | const PointerType *PtrTy, |
2792 | LValueBaseInfo *BaseInfo, |
2793 | TBAAAccessInfo *TBAAInfo) { |
2794 | llvm::Value *Addr = Builder.CreateLoad(Addr: Ptr); |
2795 | return makeNaturalAddressForPointer(Ptr: Addr, T: PtrTy->getPointeeType(), |
2796 | Alignment: CharUnits(), /*ForPointeeType=*/true, |
2797 | BaseInfo, TBAAInfo); |
2798 | } |
2799 | |
2800 | LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, |
2801 | const PointerType *PtrTy) { |
2802 | LValueBaseInfo BaseInfo; |
2803 | TBAAAccessInfo TBAAInfo; |
2804 | Address Addr = EmitLoadOfPointer(Ptr: PtrAddr, PtrTy, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
2805 | return MakeAddrLValue(Addr, T: PtrTy->getPointeeType(), BaseInfo, TBAAInfo); |
2806 | } |
2807 | |
2808 | static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, |
2809 | const Expr *E, const VarDecl *VD) { |
2810 | QualType T = E->getType(); |
2811 | |
2812 | // If it's thread_local, emit a call to its wrapper function instead. |
2813 | if (VD->getTLSKind() == VarDecl::TLS_Dynamic && |
2814 | CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) |
2815 | return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, LValType: T); |
2816 | // Check if the variable is marked as declare target with link clause in |
2817 | // device codegen. |
2818 | if (CGF.getLangOpts().OpenMPIsTargetDevice) { |
2819 | Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); |
2820 | if (Addr.isValid()) |
2821 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2822 | } |
2823 | |
2824 | llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(D: VD); |
2825 | |
2826 | if (VD->getTLSKind() != VarDecl::TLS_None) |
2827 | V = CGF.Builder.CreateThreadLocalAddress(Ptr: V); |
2828 | |
2829 | llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(T: VD->getType()); |
2830 | CharUnits Alignment = CGF.getContext().getDeclAlign(VD); |
2831 | Address Addr(V, RealVarTy, Alignment); |
2832 | // Emit reference to the private copy of the variable if it is an OpenMP |
2833 | // threadprivate variable. |
2834 | if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && |
2835 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
2836 | return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, |
2837 | Loc: E->getExprLoc()); |
2838 | } |
2839 | LValue LV = VD->getType()->isReferenceType() ? |
2840 | CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), |
2841 | AlignmentSource::Decl) : |
2842 | CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2843 | setObjCGCLValueClass(Ctx: CGF.getContext(), E, LV); |
2844 | return LV; |
2845 | } |
2846 | |
2847 | static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, |
2848 | GlobalDecl GD) { |
2849 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2850 | if (FD->hasAttr<WeakRefAttr>()) { |
2851 | ConstantAddress aliasee = CGM.GetWeakRefReference(FD); |
2852 | return aliasee.getPointer(); |
2853 | } |
2854 | |
2855 | llvm::Constant *V = CGM.GetAddrOfFunction(GD); |
2856 | return V; |
2857 | } |
2858 | |
2859 | static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, |
2860 | GlobalDecl GD) { |
2861 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2862 | llvm::Value *V = EmitFunctionDeclPointer(CGM&: CGF.CGM, GD); |
2863 | CharUnits Alignment = CGF.getContext().getDeclAlign(FD); |
2864 | return CGF.MakeAddrLValue(V, T: E->getType(), Alignment, |
2865 | Source: AlignmentSource::Decl); |
2866 | } |
2867 | |
2868 | static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, |
2869 | llvm::Value *ThisValue) { |
2870 | |
2871 | return CGF.EmitLValueForLambdaField(Field: FD, ThisValue); |
2872 | } |
2873 | |
2874 | /// Named Registers are named metadata pointing to the register name |
2875 | /// which will be read from/written to as an argument to the intrinsic |
2876 | /// @llvm.read/write_register. |
2877 | /// So far, only the name is being passed down, but other options such as |
2878 | /// register type, allocation type or even optimization options could be |
2879 | /// passed down via the metadata node. |
2880 | static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { |
2881 | SmallString<64> Name("llvm.named.register." ); |
2882 | AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); |
2883 | assert(Asm->getLabel().size() < 64-Name.size() && |
2884 | "Register name too big" ); |
2885 | Name.append(Asm->getLabel()); |
2886 | llvm::NamedMDNode *M = |
2887 | CGM.getModule().getOrInsertNamedMetadata(Name); |
2888 | if (M->getNumOperands() == 0) { |
2889 | llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), |
2890 | Asm->getLabel()); |
2891 | llvm::Metadata *Ops[] = {Str}; |
2892 | M->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops)); |
2893 | } |
2894 | |
2895 | CharUnits Alignment = CGM.getContext().getDeclAlign(VD); |
2896 | |
2897 | llvm::Value *Ptr = |
2898 | llvm::MetadataAsValue::get(Context&: CGM.getLLVMContext(), MD: M->getOperand(i: 0)); |
2899 | return LValue::MakeGlobalReg(V: Ptr, alignment: Alignment, type: VD->getType()); |
2900 | } |
2901 | |
2902 | /// Determine whether we can emit a reference to \p VD from the current |
2903 | /// context, despite not necessarily having seen an odr-use of the variable in |
2904 | /// this context. |
2905 | static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, |
2906 | const DeclRefExpr *E, |
2907 | const VarDecl *VD) { |
2908 | // For a variable declared in an enclosing scope, do not emit a spurious |
2909 | // reference even if we have a capture, as that will emit an unwarranted |
2910 | // reference to our capture state, and will likely generate worse code than |
2911 | // emitting a local copy. |
2912 | if (E->refersToEnclosingVariableOrCapture()) |
2913 | return false; |
2914 | |
2915 | // For a local declaration declared in this function, we can always reference |
2916 | // it even if we don't have an odr-use. |
2917 | if (VD->hasLocalStorage()) { |
2918 | return VD->getDeclContext() == |
2919 | dyn_cast_or_null<DeclContext>(Val: CGF.CurCodeDecl); |
2920 | } |
2921 | |
2922 | // For a global declaration, we can emit a reference to it if we know |
2923 | // for sure that we are able to emit a definition of it. |
2924 | VD = VD->getDefinition(C&: CGF.getContext()); |
2925 | if (!VD) |
2926 | return false; |
2927 | |
2928 | // Don't emit a spurious reference if it might be to a variable that only |
2929 | // exists on a different device / target. |
2930 | // FIXME: This is unnecessarily broad. Check whether this would actually be a |
2931 | // cross-target reference. |
2932 | if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || |
2933 | CGF.getLangOpts().OpenCL) { |
2934 | return false; |
2935 | } |
2936 | |
2937 | // We can emit a spurious reference only if the linkage implies that we'll |
2938 | // be emitting a non-interposable symbol that will be retained until link |
2939 | // time. |
2940 | switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) { |
2941 | case llvm::GlobalValue::ExternalLinkage: |
2942 | case llvm::GlobalValue::LinkOnceODRLinkage: |
2943 | case llvm::GlobalValue::WeakODRLinkage: |
2944 | case llvm::GlobalValue::InternalLinkage: |
2945 | case llvm::GlobalValue::PrivateLinkage: |
2946 | return true; |
2947 | default: |
2948 | return false; |
2949 | } |
2950 | } |
2951 | |
2952 | LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { |
2953 | const NamedDecl *ND = E->getDecl(); |
2954 | QualType T = E->getType(); |
2955 | |
2956 | assert(E->isNonOdrUse() != NOUR_Unevaluated && |
2957 | "should not emit an unevaluated operand" ); |
2958 | |
2959 | if (const auto *VD = dyn_cast<VarDecl>(ND)) { |
2960 | // Global Named registers access via intrinsics only |
2961 | if (VD->getStorageClass() == SC_Register && |
2962 | VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) |
2963 | return EmitGlobalNamedRegister(VD, CGM); |
2964 | |
2965 | // If this DeclRefExpr does not constitute an odr-use of the variable, |
2966 | // we're not permitted to emit a reference to it in general, and it might |
2967 | // not be captured if capture would be necessary for a use. Emit the |
2968 | // constant value directly instead. |
2969 | if (E->isNonOdrUse() == NOUR_Constant && |
2970 | (VD->getType()->isReferenceType() || |
2971 | !canEmitSpuriousReferenceToVariable(*this, E, VD))) { |
2972 | VD->getAnyInitializer(VD); |
2973 | llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( |
2974 | E->getLocation(), *VD->evaluateValue(), VD->getType()); |
2975 | assert(Val && "failed to emit constant expression" ); |
2976 | |
2977 | Address Addr = Address::invalid(); |
2978 | if (!VD->getType()->isReferenceType()) { |
2979 | // Spill the constant value to a global. |
2980 | Addr = CGM.createUnnamedGlobalFrom(D: *VD, Constant: Val, |
2981 | Align: getContext().getDeclAlign(D: VD)); |
2982 | llvm::Type *VarTy = getTypes().ConvertTypeForMem(T: VD->getType()); |
2983 | auto *PTy = llvm::PointerType::get( |
2984 | VarTy, getTypes().getTargetAddressSpace(T: VD->getType())); |
2985 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy); |
2986 | } else { |
2987 | // Should we be using the alignment of the constant pointer we emitted? |
2988 | CharUnits Alignment = |
2989 | CGM.getNaturalTypeAlignment(T: E->getType(), |
2990 | /* BaseInfo= */ nullptr, |
2991 | /* TBAAInfo= */ nullptr, |
2992 | /* forPointeeType= */ true); |
2993 | Addr = makeNaturalAddressForPointer(Ptr: Val, T, Alignment); |
2994 | } |
2995 | return MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2996 | } |
2997 | |
2998 | // FIXME: Handle other kinds of non-odr-use DeclRefExprs. |
2999 | |
3000 | // Check for captured variables. |
3001 | if (E->refersToEnclosingVariableOrCapture()) { |
3002 | VD = VD->getCanonicalDecl(); |
3003 | if (auto *FD = LambdaCaptureFields.lookup(VD)) |
3004 | return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); |
3005 | if (CapturedStmtInfo) { |
3006 | auto I = LocalDeclMap.find(VD); |
3007 | if (I != LocalDeclMap.end()) { |
3008 | LValue CapLVal; |
3009 | if (VD->getType()->isReferenceType()) |
3010 | CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(), |
3011 | AlignmentSource::Decl); |
3012 | else |
3013 | CapLVal = MakeAddrLValue(I->second, T); |
3014 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3015 | // in simd context. |
3016 | if (getLangOpts().OpenMP && |
3017 | CGM.getOpenMPRuntime().isNontemporalDecl(VD: VD)) |
3018 | CapLVal.setNontemporal(/*Value=*/true); |
3019 | return CapLVal; |
3020 | } |
3021 | LValue CapLVal = |
3022 | EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD: VD), |
3023 | CapturedStmtInfo->getContextValue()); |
3024 | Address LValueAddress = CapLVal.getAddress(CGF&: *this); |
3025 | CapLVal = MakeAddrLValue(Addr: Address(LValueAddress.emitRawPointer(CGF&: *this), |
3026 | LValueAddress.getElementType(), |
3027 | getContext().getDeclAlign(D: VD)), |
3028 | T: CapLVal.getType(), |
3029 | BaseInfo: LValueBaseInfo(AlignmentSource::Decl), |
3030 | TBAAInfo: CapLVal.getTBAAInfo()); |
3031 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3032 | // in simd context. |
3033 | if (getLangOpts().OpenMP && |
3034 | CGM.getOpenMPRuntime().isNontemporalDecl(VD: VD)) |
3035 | CapLVal.setNontemporal(/*Value=*/true); |
3036 | return CapLVal; |
3037 | } |
3038 | |
3039 | assert(isa<BlockDecl>(CurCodeDecl)); |
3040 | Address addr = GetAddrOfBlockDecl(var: VD); |
3041 | return MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3042 | } |
3043 | } |
3044 | |
3045 | // FIXME: We should be able to assert this for FunctionDecls as well! |
3046 | // FIXME: We should be able to assert this for all DeclRefExprs, not just |
3047 | // those with a valid source location. |
3048 | assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || |
3049 | !E->getLocation().isValid()) && |
3050 | "Should not use decl without marking it used!" ); |
3051 | |
3052 | if (ND->hasAttr<WeakRefAttr>()) { |
3053 | const auto *VD = cast<ValueDecl>(Val: ND); |
3054 | ConstantAddress Aliasee = CGM.GetWeakRefReference(VD: VD); |
3055 | return MakeAddrLValue(Addr: Aliasee, T, Source: AlignmentSource::Decl); |
3056 | } |
3057 | |
3058 | if (const auto *VD = dyn_cast<VarDecl>(ND)) { |
3059 | // Check if this is a global variable. |
3060 | if (VD->hasLinkage() || VD->isStaticDataMember()) |
3061 | return EmitGlobalVarDeclLValue(*this, E, VD); |
3062 | |
3063 | Address addr = Address::invalid(); |
3064 | |
3065 | // The variable should generally be present in the local decl map. |
3066 | auto iter = LocalDeclMap.find(VD); |
3067 | if (iter != LocalDeclMap.end()) { |
3068 | addr = iter->second; |
3069 | |
3070 | // Otherwise, it might be static local we haven't emitted yet for |
3071 | // some reason; most likely, because it's in an outer function. |
3072 | } else if (VD->isStaticLocal()) { |
3073 | llvm::Constant *var = CGM.getOrCreateStaticVarDecl( |
3074 | D: *VD, Linkage: CGM.getLLVMLinkageVarDefinition(VD: VD)); |
3075 | addr = Address( |
3076 | var, ConvertTypeForMem(T: VD->getType()), getContext().getDeclAlign(D: VD)); |
3077 | |
3078 | // No other cases for now. |
3079 | } else { |
3080 | llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?" ); |
3081 | } |
3082 | |
3083 | // Handle threadlocal function locals. |
3084 | if (VD->getTLSKind() != VarDecl::TLS_None) |
3085 | addr = addr.withPointer( |
3086 | NewPointer: Builder.CreateThreadLocalAddress(Ptr: addr.getBasePointer()), |
3087 | IsKnownNonNull: NotKnownNonNull); |
3088 | |
3089 | // Check for OpenMP threadprivate variables. |
3090 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && |
3091 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
3092 | return EmitThreadPrivateVarDeclLValue( |
3093 | *this, VD, T, addr, getTypes().ConvertTypeForMem(T: VD->getType()), |
3094 | E->getExprLoc()); |
3095 | } |
3096 | |
3097 | // Drill into block byref variables. |
3098 | bool isBlockByref = VD->isEscapingByref(); |
3099 | if (isBlockByref) { |
3100 | addr = emitBlockByrefAddress(addr, VD); |
3101 | } |
3102 | |
3103 | // Drill into reference types. |
3104 | LValue LV = VD->getType()->isReferenceType() ? |
3105 | EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) : |
3106 | MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3107 | |
3108 | bool isLocalStorage = VD->hasLocalStorage(); |
3109 | |
3110 | bool NonGCable = isLocalStorage && |
3111 | !VD->getType()->isReferenceType() && |
3112 | !isBlockByref; |
3113 | if (NonGCable) { |
3114 | LV.getQuals().removeObjCGCAttr(); |
3115 | LV.setNonGC(true); |
3116 | } |
3117 | |
3118 | bool isImpreciseLifetime = |
3119 | (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); |
3120 | if (isImpreciseLifetime) |
3121 | LV.setARCPreciseLifetime(ARCImpreciseLifetime); |
3122 | setObjCGCLValueClass(getContext(), E, LV); |
3123 | return LV; |
3124 | } |
3125 | |
3126 | if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { |
3127 | LValue LV = EmitFunctionDeclLValue(*this, E, FD); |
3128 | |
3129 | // Emit debuginfo for the function declaration if the target wants to. |
3130 | if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) { |
3131 | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) { |
3132 | auto *Fn = |
3133 | cast<llvm::Function>(Val: LV.getPointer(CGF&: *this)->stripPointerCasts()); |
3134 | if (!Fn->getSubprogram()) |
3135 | DI->EmitFunctionDecl(GD: FD, Loc: FD->getLocation(), FnType: T, Fn: Fn); |
3136 | } |
3137 | } |
3138 | |
3139 | return LV; |
3140 | } |
3141 | |
3142 | // FIXME: While we're emitting a binding from an enclosing scope, all other |
3143 | // DeclRefExprs we see should be implicitly treated as if they also refer to |
3144 | // an enclosing scope. |
3145 | if (const auto *BD = dyn_cast<BindingDecl>(ND)) { |
3146 | if (E->refersToEnclosingVariableOrCapture()) { |
3147 | auto *FD = LambdaCaptureFields.lookup(Val: BD); |
3148 | return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); |
3149 | } |
3150 | return EmitLValue(E: BD->getBinding()); |
3151 | } |
3152 | |
3153 | // We can form DeclRefExprs naming GUID declarations when reconstituting |
3154 | // non-type template parameters into expressions. |
3155 | if (const auto *GD = dyn_cast<MSGuidDecl>(ND)) |
3156 | return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD: GD), T, |
3157 | AlignmentSource::Decl); |
3158 | |
3159 | if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) { |
3160 | auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO: TPO); |
3161 | auto AS = getLangASFromTargetAS(ATPO.getAddressSpace()); |
3162 | |
3163 | if (AS != T.getAddressSpace()) { |
3164 | auto TargetAS = getContext().getTargetAddressSpace(AS: T.getAddressSpace()); |
3165 | auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS); |
3166 | auto ASC = getTargetHooks().performAddrSpaceCast( |
3167 | CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy); |
3168 | ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment()); |
3169 | } |
3170 | |
3171 | return MakeAddrLValue(ATPO, T, AlignmentSource::Decl); |
3172 | } |
3173 | |
3174 | llvm_unreachable("Unhandled DeclRefExpr" ); |
3175 | } |
3176 | |
3177 | LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { |
3178 | // __extension__ doesn't affect lvalue-ness. |
3179 | if (E->getOpcode() == UO_Extension) |
3180 | return EmitLValue(E: E->getSubExpr()); |
3181 | |
3182 | QualType ExprTy = getContext().getCanonicalType(T: E->getSubExpr()->getType()); |
3183 | switch (E->getOpcode()) { |
3184 | default: llvm_unreachable("Unknown unary operator lvalue!" ); |
3185 | case UO_Deref: { |
3186 | QualType T = E->getSubExpr()->getType()->getPointeeType(); |
3187 | assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type" ); |
3188 | |
3189 | LValueBaseInfo BaseInfo; |
3190 | TBAAAccessInfo TBAAInfo; |
3191 | Address Addr = EmitPointerWithAlignment(E: E->getSubExpr(), BaseInfo: &BaseInfo, |
3192 | TBAAInfo: &TBAAInfo); |
3193 | LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); |
3194 | LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); |
3195 | |
3196 | // We should not generate __weak write barrier on indirect reference |
3197 | // of a pointer to object; as in void foo (__weak id *param); *param = 0; |
3198 | // But, we continue to generate __strong write barrier on indirect write |
3199 | // into a pointer to object. |
3200 | if (getLangOpts().ObjC && |
3201 | getLangOpts().getGC() != LangOptions::NonGC && |
3202 | LV.isObjCWeak()) |
3203 | LV.setNonGC(!E->isOBJCGCCandidate(getContext())); |
3204 | return LV; |
3205 | } |
3206 | case UO_Real: |
3207 | case UO_Imag: { |
3208 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3209 | assert(LV.isSimple() && "real/imag on non-ordinary l-value" ); |
3210 | |
3211 | // __real is valid on scalars. This is a faster way of testing that. |
3212 | // __imag can only produce an rvalue on scalars. |
3213 | if (E->getOpcode() == UO_Real && |
3214 | !LV.getAddress(CGF&: *this).getElementType()->isStructTy()) { |
3215 | assert(E->getSubExpr()->getType()->isArithmeticType()); |
3216 | return LV; |
3217 | } |
3218 | |
3219 | QualType T = ExprTy->castAs<ComplexType>()->getElementType(); |
3220 | |
3221 | Address Component = |
3222 | (E->getOpcode() == UO_Real |
3223 | ? emitAddrOfRealComponent(complex: LV.getAddress(CGF&: *this), complexType: LV.getType()) |
3224 | : emitAddrOfImagComponent(complex: LV.getAddress(CGF&: *this), complexType: LV.getType())); |
3225 | LValue ElemLV = MakeAddrLValue(Addr: Component, T, BaseInfo: LV.getBaseInfo(), |
3226 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: T)); |
3227 | ElemLV.getQuals().addQualifiers(Q: LV.getQuals()); |
3228 | return ElemLV; |
3229 | } |
3230 | case UO_PreInc: |
3231 | case UO_PreDec: { |
3232 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3233 | bool isInc = E->getOpcode() == UO_PreInc; |
3234 | |
3235 | if (E->getType()->isAnyComplexType()) |
3236 | EmitComplexPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3237 | else |
3238 | EmitScalarPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3239 | return LV; |
3240 | } |
3241 | } |
3242 | } |
3243 | |
3244 | LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { |
3245 | return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(S: E), |
3246 | E->getType(), AlignmentSource::Decl); |
3247 | } |
3248 | |
3249 | LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { |
3250 | return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), |
3251 | E->getType(), AlignmentSource::Decl); |
3252 | } |
3253 | |
3254 | LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { |
3255 | auto SL = E->getFunctionName(); |
3256 | assert(SL != nullptr && "No StringLiteral name in PredefinedExpr" ); |
3257 | StringRef FnName = CurFn->getName(); |
3258 | if (FnName.starts_with(Prefix: "\01" )) |
3259 | FnName = FnName.substr(Start: 1); |
3260 | StringRef NameItems[] = { |
3261 | PredefinedExpr::getIdentKindName(IK: E->getIdentKind()), FnName}; |
3262 | std::string GVName = llvm::join(Begin: NameItems, End: NameItems + 2, Separator: "." ); |
3263 | if (auto *BD = dyn_cast_or_null<BlockDecl>(Val: CurCodeDecl)) { |
3264 | std::string Name = std::string(SL->getString()); |
3265 | if (!Name.empty()) { |
3266 | unsigned Discriminator = |
3267 | CGM.getCXXABI().getMangleContext().getBlockId(BD, Local: true); |
3268 | if (Discriminator) |
3269 | Name += "_" + Twine(Discriminator + 1).str(); |
3270 | auto C = CGM.GetAddrOfConstantCString(Str: Name, GlobalName: GVName.c_str()); |
3271 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
3272 | } else { |
3273 | auto C = |
3274 | CGM.GetAddrOfConstantCString(Str: std::string(FnName), GlobalName: GVName.c_str()); |
3275 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
3276 | } |
3277 | } |
3278 | auto C = CGM.GetAddrOfConstantStringFromLiteral(S: SL, Name: GVName); |
3279 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
3280 | } |
3281 | |
3282 | /// Emit a type description suitable for use by a runtime sanitizer library. The |
3283 | /// format of a type descriptor is |
3284 | /// |
3285 | /// \code |
3286 | /// { i16 TypeKind, i16 TypeInfo } |
3287 | /// \endcode |
3288 | /// |
3289 | /// followed by an array of i8 containing the type name. TypeKind is 0 for an |
3290 | /// integer, 1 for a floating point value, and -1 for anything else. |
3291 | llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { |
3292 | // Only emit each type's descriptor once. |
3293 | if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(Ty: T)) |
3294 | return C; |
3295 | |
3296 | uint16_t TypeKind = -1; |
3297 | uint16_t TypeInfo = 0; |
3298 | |
3299 | if (T->isIntegerType()) { |
3300 | TypeKind = 0; |
3301 | TypeInfo = (llvm::Log2_32(Value: getContext().getTypeSize(T)) << 1) | |
3302 | (T->isSignedIntegerType() ? 1 : 0); |
3303 | } else if (T->isFloatingType()) { |
3304 | TypeKind = 1; |
3305 | TypeInfo = getContext().getTypeSize(T); |
3306 | } |
3307 | |
3308 | // Format the type name as if for a diagnostic, including quotes and |
3309 | // optionally an 'aka'. |
3310 | SmallString<32> Buffer; |
3311 | CGM.getDiags().ConvertArgToString( |
3312 | Kind: DiagnosticsEngine::ak_qualtype, Val: (intptr_t)T.getAsOpaquePtr(), Modifier: StringRef(), |
3313 | Argument: StringRef(), PrevArgs: std::nullopt, Output&: Buffer, QualTypeVals: std::nullopt); |
3314 | |
3315 | llvm::Constant *Components[] = { |
3316 | Builder.getInt16(C: TypeKind), Builder.getInt16(C: TypeInfo), |
3317 | llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Buffer) |
3318 | }; |
3319 | llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(V: Components); |
3320 | |
3321 | auto *GV = new llvm::GlobalVariable( |
3322 | CGM.getModule(), Descriptor->getType(), |
3323 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); |
3324 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3325 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
3326 | |
3327 | // Remember the descriptor for this type. |
3328 | CGM.setTypeDescriptorInMap(Ty: T, C: GV); |
3329 | |
3330 | return GV; |
3331 | } |
3332 | |
3333 | llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { |
3334 | llvm::Type *TargetTy = IntPtrTy; |
3335 | |
3336 | if (V->getType() == TargetTy) |
3337 | return V; |
3338 | |
3339 | // Floating-point types which fit into intptr_t are bitcast to integers |
3340 | // and then passed directly (after zero-extension, if necessary). |
3341 | if (V->getType()->isFloatingPointTy()) { |
3342 | unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); |
3343 | if (Bits <= TargetTy->getIntegerBitWidth()) |
3344 | V = Builder.CreateBitCast(V, DestTy: llvm::Type::getIntNTy(C&: getLLVMContext(), |
3345 | N: Bits)); |
3346 | } |
3347 | |
3348 | // Integers which fit in intptr_t are zero-extended and passed directly. |
3349 | if (V->getType()->isIntegerTy() && |
3350 | V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) |
3351 | return Builder.CreateZExt(V, DestTy: TargetTy); |
3352 | |
3353 | // Pointers are passed directly, everything else is passed by address. |
3354 | if (!V->getType()->isPointerTy()) { |
3355 | RawAddress Ptr = CreateDefaultAlignTempAlloca(Ty: V->getType()); |
3356 | Builder.CreateStore(Val: V, Addr: Ptr); |
3357 | V = Ptr.getPointer(); |
3358 | } |
3359 | return Builder.CreatePtrToInt(V, DestTy: TargetTy); |
3360 | } |
3361 | |
3362 | /// Emit a representation of a SourceLocation for passing to a handler |
3363 | /// in a sanitizer runtime library. The format for this data is: |
3364 | /// \code |
3365 | /// struct SourceLocation { |
3366 | /// const char *Filename; |
3367 | /// int32_t Line, Column; |
3368 | /// }; |
3369 | /// \endcode |
3370 | /// For an invalid SourceLocation, the Filename pointer is null. |
3371 | llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { |
3372 | llvm::Constant *Filename; |
3373 | int Line, Column; |
3374 | |
3375 | PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); |
3376 | if (PLoc.isValid()) { |
3377 | StringRef FilenameString = PLoc.getFilename(); |
3378 | |
3379 | int PathComponentsToStrip = |
3380 | CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; |
3381 | if (PathComponentsToStrip < 0) { |
3382 | assert(PathComponentsToStrip != INT_MIN); |
3383 | int PathComponentsToKeep = -PathComponentsToStrip; |
3384 | auto I = llvm::sys::path::rbegin(path: FilenameString); |
3385 | auto E = llvm::sys::path::rend(path: FilenameString); |
3386 | while (I != E && --PathComponentsToKeep) |
3387 | ++I; |
3388 | |
3389 | FilenameString = FilenameString.substr(Start: I - E); |
3390 | } else if (PathComponentsToStrip > 0) { |
3391 | auto I = llvm::sys::path::begin(path: FilenameString); |
3392 | auto E = llvm::sys::path::end(path: FilenameString); |
3393 | while (I != E && PathComponentsToStrip--) |
3394 | ++I; |
3395 | |
3396 | if (I != E) |
3397 | FilenameString = |
3398 | FilenameString.substr(Start: I - llvm::sys::path::begin(path: FilenameString)); |
3399 | else |
3400 | FilenameString = llvm::sys::path::filename(path: FilenameString); |
3401 | } |
3402 | |
3403 | auto FilenameGV = |
3404 | CGM.GetAddrOfConstantCString(Str: std::string(FilenameString), GlobalName: ".src" ); |
3405 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal( |
3406 | GV: cast<llvm::GlobalVariable>( |
3407 | Val: FilenameGV.getPointer()->stripPointerCasts())); |
3408 | Filename = FilenameGV.getPointer(); |
3409 | Line = PLoc.getLine(); |
3410 | Column = PLoc.getColumn(); |
3411 | } else { |
3412 | Filename = llvm::Constant::getNullValue(Ty: Int8PtrTy); |
3413 | Line = Column = 0; |
3414 | } |
3415 | |
3416 | llvm::Constant *Data[] = {Filename, Builder.getInt32(C: Line), |
3417 | Builder.getInt32(C: Column)}; |
3418 | |
3419 | return llvm::ConstantStruct::getAnon(V: Data); |
3420 | } |
3421 | |
3422 | namespace { |
3423 | /// Specify under what conditions this check can be recovered |
3424 | enum class CheckRecoverableKind { |
3425 | /// Always terminate program execution if this check fails. |
3426 | Unrecoverable, |
3427 | /// Check supports recovering, runtime has both fatal (noreturn) and |
3428 | /// non-fatal handlers for this check. |
3429 | Recoverable, |
3430 | /// Runtime conditionally aborts, always need to support recovery. |
3431 | AlwaysRecoverable |
3432 | }; |
3433 | } |
3434 | |
3435 | static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { |
3436 | assert(Kind.countPopulation() == 1); |
3437 | if (Kind == SanitizerKind::Vptr) |
3438 | return CheckRecoverableKind::AlwaysRecoverable; |
3439 | else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable) |
3440 | return CheckRecoverableKind::Unrecoverable; |
3441 | else |
3442 | return CheckRecoverableKind::Recoverable; |
3443 | } |
3444 | |
3445 | namespace { |
3446 | struct SanitizerHandlerInfo { |
3447 | char const *const Name; |
3448 | unsigned Version; |
3449 | }; |
3450 | } |
3451 | |
3452 | const SanitizerHandlerInfo SanitizerHandlers[] = { |
3453 | #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, |
3454 | LIST_SANITIZER_CHECKS |
3455 | #undef SANITIZER_CHECK |
3456 | }; |
3457 | |
3458 | static void emitCheckHandlerCall(CodeGenFunction &CGF, |
3459 | llvm::FunctionType *FnType, |
3460 | ArrayRef<llvm::Value *> FnArgs, |
3461 | SanitizerHandler CheckHandler, |
3462 | CheckRecoverableKind RecoverKind, bool IsFatal, |
3463 | llvm::BasicBlock *ContBB) { |
3464 | assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); |
3465 | std::optional<ApplyDebugLocation> DL; |
3466 | if (!CGF.Builder.getCurrentDebugLocation()) { |
3467 | // Ensure that the call has at least an artificial debug location. |
3468 | DL.emplace(args&: CGF, args: SourceLocation()); |
3469 | } |
3470 | bool NeedsAbortSuffix = |
3471 | IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; |
3472 | bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; |
3473 | const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; |
3474 | const StringRef CheckName = CheckInfo.Name; |
3475 | std::string FnName = "__ubsan_handle_" + CheckName.str(); |
3476 | if (CheckInfo.Version && !MinimalRuntime) |
3477 | FnName += "_v" + llvm::utostr(X: CheckInfo.Version); |
3478 | if (MinimalRuntime) |
3479 | FnName += "_minimal" ; |
3480 | if (NeedsAbortSuffix) |
3481 | FnName += "_abort" ; |
3482 | bool MayReturn = |
3483 | !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; |
3484 | |
3485 | llvm::AttrBuilder B(CGF.getLLVMContext()); |
3486 | if (!MayReturn) { |
3487 | B.addAttribute(llvm::Attribute::NoReturn) |
3488 | .addAttribute(llvm::Attribute::NoUnwind); |
3489 | } |
3490 | B.addUWTableAttr(Kind: llvm::UWTableKind::Default); |
3491 | |
3492 | llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( |
3493 | Ty: FnType, Name: FnName, |
3494 | ExtraAttrs: llvm::AttributeList::get(C&: CGF.getLLVMContext(), |
3495 | Index: llvm::AttributeList::FunctionIndex, B), |
3496 | /*Local=*/true); |
3497 | llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(callee: Fn, args: FnArgs); |
3498 | if (!MayReturn) { |
3499 | HandlerCall->setDoesNotReturn(); |
3500 | CGF.Builder.CreateUnreachable(); |
3501 | } else { |
3502 | CGF.Builder.CreateBr(Dest: ContBB); |
3503 | } |
3504 | } |
3505 | |
3506 | void CodeGenFunction::EmitCheck( |
3507 | ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, |
3508 | SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, |
3509 | ArrayRef<llvm::Value *> DynamicArgs) { |
3510 | assert(IsSanitizerScope); |
3511 | assert(Checked.size() > 0); |
3512 | assert(CheckHandler >= 0 && |
3513 | size_t(CheckHandler) < std::size(SanitizerHandlers)); |
3514 | const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; |
3515 | |
3516 | llvm::Value *FatalCond = nullptr; |
3517 | llvm::Value *RecoverableCond = nullptr; |
3518 | llvm::Value *TrapCond = nullptr; |
3519 | for (int i = 0, n = Checked.size(); i < n; ++i) { |
3520 | llvm::Value *Check = Checked[i].first; |
3521 | // -fsanitize-trap= overrides -fsanitize-recover=. |
3522 | llvm::Value *&Cond = |
3523 | CGM.getCodeGenOpts().SanitizeTrap.has(K: Checked[i].second) |
3524 | ? TrapCond |
3525 | : CGM.getCodeGenOpts().SanitizeRecover.has(K: Checked[i].second) |
3526 | ? RecoverableCond |
3527 | : FatalCond; |
3528 | Cond = Cond ? Builder.CreateAnd(LHS: Cond, RHS: Check) : Check; |
3529 | } |
3530 | |
3531 | if (ClSanitizeGuardChecks) { |
3532 | llvm::Value *Allow = |
3533 | Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check), |
3534 | llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler)); |
3535 | |
3536 | for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) { |
3537 | if (*Cond) |
3538 | *Cond = Builder.CreateOr(LHS: *Cond, RHS: Builder.CreateNot(V: Allow)); |
3539 | } |
3540 | } |
3541 | |
3542 | if (TrapCond) |
3543 | EmitTrapCheck(Checked: TrapCond, CheckHandlerID: CheckHandler); |
3544 | if (!FatalCond && !RecoverableCond) |
3545 | return; |
3546 | |
3547 | llvm::Value *JointCond; |
3548 | if (FatalCond && RecoverableCond) |
3549 | JointCond = Builder.CreateAnd(LHS: FatalCond, RHS: RecoverableCond); |
3550 | else |
3551 | JointCond = FatalCond ? FatalCond : RecoverableCond; |
3552 | assert(JointCond); |
3553 | |
3554 | CheckRecoverableKind RecoverKind = getRecoverableKind(Kind: Checked[0].second); |
3555 | assert(SanOpts.has(Checked[0].second)); |
3556 | #ifndef NDEBUG |
3557 | for (int i = 1, n = Checked.size(); i < n; ++i) { |
3558 | assert(RecoverKind == getRecoverableKind(Checked[i].second) && |
3559 | "All recoverable kinds in a single check must be same!" ); |
3560 | assert(SanOpts.has(Checked[i].second)); |
3561 | } |
3562 | #endif |
3563 | |
3564 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
3565 | llvm::BasicBlock *Handlers = createBasicBlock(name: "handler." + CheckName); |
3566 | llvm::Instruction *Branch = Builder.CreateCondBr(Cond: JointCond, True: Cont, False: Handlers); |
3567 | // Give hint that we very much don't expect to execute the handler |
3568 | // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp |
3569 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3570 | llvm::MDNode *Node = MDHelper.createBranchWeights(TrueWeight: (1U << 20) - 1, FalseWeight: 1); |
3571 | Branch->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3572 | EmitBlock(BB: Handlers); |
3573 | |
3574 | // Handler functions take an i8* pointing to the (handler-specific) static |
3575 | // information block, followed by a sequence of intptr_t arguments |
3576 | // representing operand values. |
3577 | SmallVector<llvm::Value *, 4> Args; |
3578 | SmallVector<llvm::Type *, 4> ArgTypes; |
3579 | if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { |
3580 | Args.reserve(N: DynamicArgs.size() + 1); |
3581 | ArgTypes.reserve(N: DynamicArgs.size() + 1); |
3582 | |
3583 | // Emit handler arguments and create handler function type. |
3584 | if (!StaticArgs.empty()) { |
3585 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3586 | auto *InfoPtr = new llvm::GlobalVariable( |
3587 | CGM.getModule(), Info->getType(), false, |
3588 | llvm::GlobalVariable::PrivateLinkage, Info, "" , nullptr, |
3589 | llvm::GlobalVariable::NotThreadLocal, |
3590 | CGM.getDataLayout().getDefaultGlobalsAddressSpace()); |
3591 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3592 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3593 | Args.push_back(Elt: InfoPtr); |
3594 | ArgTypes.push_back(Elt: Args.back()->getType()); |
3595 | } |
3596 | |
3597 | for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { |
3598 | Args.push_back(Elt: EmitCheckValue(V: DynamicArgs[i])); |
3599 | ArgTypes.push_back(Elt: IntPtrTy); |
3600 | } |
3601 | } |
3602 | |
3603 | llvm::FunctionType *FnType = |
3604 | llvm::FunctionType::get(Result: CGM.VoidTy, Params: ArgTypes, isVarArg: false); |
3605 | |
3606 | if (!FatalCond || !RecoverableCond) { |
3607 | // Simple case: we need to generate a single handler call, either |
3608 | // fatal, or non-fatal. |
3609 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, |
3610 | IsFatal: (FatalCond != nullptr), ContBB: Cont); |
3611 | } else { |
3612 | // Emit two handler calls: first one for set of unrecoverable checks, |
3613 | // another one for recoverable. |
3614 | llvm::BasicBlock *NonFatalHandlerBB = |
3615 | createBasicBlock(name: "non_fatal." + CheckName); |
3616 | llvm::BasicBlock *FatalHandlerBB = createBasicBlock(name: "fatal." + CheckName); |
3617 | Builder.CreateCondBr(Cond: FatalCond, True: NonFatalHandlerBB, False: FatalHandlerBB); |
3618 | EmitBlock(BB: FatalHandlerBB); |
3619 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: true, |
3620 | ContBB: NonFatalHandlerBB); |
3621 | EmitBlock(BB: NonFatalHandlerBB); |
3622 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: false, |
3623 | ContBB: Cont); |
3624 | } |
3625 | |
3626 | EmitBlock(BB: Cont); |
3627 | } |
3628 | |
3629 | void CodeGenFunction::EmitCfiSlowPathCheck( |
3630 | SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, |
3631 | llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { |
3632 | llvm::BasicBlock *Cont = createBasicBlock(name: "cfi.cont" ); |
3633 | |
3634 | llvm::BasicBlock *CheckBB = createBasicBlock(name: "cfi.slowpath" ); |
3635 | llvm::BranchInst *BI = Builder.CreateCondBr(Cond, True: Cont, False: CheckBB); |
3636 | |
3637 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3638 | llvm::MDNode *Node = MDHelper.createBranchWeights(TrueWeight: (1U << 20) - 1, FalseWeight: 1); |
3639 | BI->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3640 | |
3641 | EmitBlock(BB: CheckBB); |
3642 | |
3643 | bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(K: Kind); |
3644 | |
3645 | llvm::CallInst *CheckCall; |
3646 | llvm::FunctionCallee SlowPathFn; |
3647 | if (WithDiag) { |
3648 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3649 | auto *InfoPtr = |
3650 | new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, |
3651 | llvm::GlobalVariable::PrivateLinkage, Info); |
3652 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3653 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3654 | |
3655 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3656 | Name: "__cfi_slowpath_diag" , |
3657 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy, Int8PtrTy}, |
3658 | isVarArg: false)); |
3659 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr, InfoPtr}); |
3660 | } else { |
3661 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3662 | Name: "__cfi_slowpath" , |
3663 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy}, isVarArg: false)); |
3664 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr}); |
3665 | } |
3666 | |
3667 | CGM.setDSOLocal( |
3668 | cast<llvm::GlobalValue>(Val: SlowPathFn.getCallee()->stripPointerCasts())); |
3669 | CheckCall->setDoesNotThrow(); |
3670 | |
3671 | EmitBlock(BB: Cont); |
3672 | } |
3673 | |
3674 | // Emit a stub for __cfi_check function so that the linker knows about this |
3675 | // symbol in LTO mode. |
3676 | void CodeGenFunction::EmitCfiCheckStub() { |
3677 | llvm::Module *M = &CGM.getModule(); |
3678 | ASTContext &C = getContext(); |
3679 | QualType QInt64Ty = C.getIntTypeForBitwidth(DestWidth: 64, Signed: false); |
3680 | |
3681 | FunctionArgList FnArgs; |
3682 | ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other); |
3683 | ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other); |
3684 | ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy, |
3685 | ImplicitParamKind::Other); |
3686 | FnArgs.push_back(&ArgCallsiteTypeId); |
3687 | FnArgs.push_back(&ArgAddr); |
3688 | FnArgs.push_back(&ArgCFICheckFailData); |
3689 | const CGFunctionInfo &FI = |
3690 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs); |
3691 | |
3692 | llvm::Function *F = llvm::Function::Create( |
3693 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3694 | Linkage: llvm::GlobalValue::WeakAnyLinkage, N: "__cfi_check" , M); |
3695 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3696 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3697 | F->setAlignment(llvm::Align(4096)); |
3698 | CGM.setDSOLocal(F); |
3699 | |
3700 | llvm::LLVMContext &Ctx = M->getContext(); |
3701 | llvm::BasicBlock *BB = llvm::BasicBlock::Create(Context&: Ctx, Name: "entry" , Parent: F); |
3702 | // CrossDSOCFI pass is not executed if there is no executable code. |
3703 | SmallVector<llvm::Value*> Args{F->getArg(i: 2), F->getArg(i: 1)}; |
3704 | llvm::CallInst::Create(Func: M->getFunction(Name: "__cfi_check_fail" ), Args, NameStr: "" , InsertAtEnd: BB); |
3705 | llvm::ReturnInst::Create(C&: Ctx, retVal: nullptr, InsertAtEnd: BB); |
3706 | } |
3707 | |
3708 | // This function is basically a switch over the CFI failure kind, which is |
3709 | // extracted from CFICheckFailData (1st function argument). Each case is either |
3710 | // llvm.trap or a call to one of the two runtime handlers, based on |
3711 | // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid |
3712 | // failure kind) traps, but this should really never happen. CFICheckFailData |
3713 | // can be nullptr if the calling module has -fsanitize-trap behavior for this |
3714 | // check kind; in this case __cfi_check_fail traps as well. |
3715 | void CodeGenFunction::EmitCfiCheckFail() { |
3716 | SanitizerScope SanScope(this); |
3717 | FunctionArgList Args; |
3718 | ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, |
3719 | ImplicitParamKind::Other); |
3720 | ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, |
3721 | ImplicitParamKind::Other); |
3722 | Args.push_back(&ArgData); |
3723 | Args.push_back(&ArgAddr); |
3724 | |
3725 | const CGFunctionInfo &FI = |
3726 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); |
3727 | |
3728 | llvm::Function *F = llvm::Function::Create( |
3729 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3730 | Linkage: llvm::GlobalValue::WeakODRLinkage, N: "__cfi_check_fail" , M: &CGM.getModule()); |
3731 | |
3732 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3733 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3734 | F->setVisibility(llvm::GlobalValue::HiddenVisibility); |
3735 | |
3736 | StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: F, FnInfo: FI, Args, |
3737 | Loc: SourceLocation()); |
3738 | |
3739 | // This function is not affected by NoSanitizeList. This function does |
3740 | // not have a source location, but "src:*" would still apply. Revert any |
3741 | // changes to SanOpts made in StartFunction. |
3742 | SanOpts = CGM.getLangOpts().Sanitize; |
3743 | |
3744 | llvm::Value *Data = |
3745 | EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, |
3746 | CGM.getContext().VoidPtrTy, ArgData.getLocation()); |
3747 | llvm::Value *Addr = |
3748 | EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, |
3749 | CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); |
3750 | |
3751 | // Data == nullptr means the calling module has trap behaviour for this check. |
3752 | llvm::Value *DataIsNotNullPtr = |
3753 | Builder.CreateICmpNE(LHS: Data, RHS: llvm::ConstantPointerNull::get(T: Int8PtrTy)); |
3754 | EmitTrapCheck(Checked: DataIsNotNullPtr, CheckHandlerID: SanitizerHandler::CFICheckFail); |
3755 | |
3756 | llvm::StructType *SourceLocationTy = |
3757 | llvm::StructType::get(elt1: VoidPtrTy, elts: Int32Ty, elts: Int32Ty); |
3758 | llvm::StructType *CfiCheckFailDataTy = |
3759 | llvm::StructType::get(elt1: Int8Ty, elts: SourceLocationTy, elts: VoidPtrTy); |
3760 | |
3761 | llvm::Value *V = Builder.CreateConstGEP2_32( |
3762 | Ty: CfiCheckFailDataTy, |
3763 | Ptr: Builder.CreatePointerCast(V: Data, DestTy: CfiCheckFailDataTy->getPointerTo(AddrSpace: 0)), Idx0: 0, |
3764 | Idx1: 0); |
3765 | |
3766 | Address CheckKindAddr(V, Int8Ty, getIntAlign()); |
3767 | llvm::Value *CheckKind = Builder.CreateLoad(Addr: CheckKindAddr); |
3768 | |
3769 | llvm::Value *AllVtables = llvm::MetadataAsValue::get( |
3770 | Context&: CGM.getLLVMContext(), |
3771 | MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables" )); |
3772 | llvm::Value *ValidVtable = Builder.CreateZExt( |
3773 | Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), |
3774 | {Addr, AllVtables}), |
3775 | IntPtrTy); |
3776 | |
3777 | const std::pair<int, SanitizerMask> CheckKinds[] = { |
3778 | {CFITCK_VCall, SanitizerKind::CFIVCall}, |
3779 | {CFITCK_NVCall, SanitizerKind::CFINVCall}, |
3780 | {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, |
3781 | {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, |
3782 | {CFITCK_ICall, SanitizerKind::CFIICall}}; |
3783 | |
3784 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; |
3785 | for (auto CheckKindMaskPair : CheckKinds) { |
3786 | int Kind = CheckKindMaskPair.first; |
3787 | SanitizerMask Mask = CheckKindMaskPair.second; |
3788 | llvm::Value *Cond = |
3789 | Builder.CreateICmpNE(LHS: CheckKind, RHS: llvm::ConstantInt::get(Ty: Int8Ty, V: Kind)); |
3790 | if (CGM.getLangOpts().Sanitize.has(K: Mask)) |
3791 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: Mask), CheckHandler: SanitizerHandler::CFICheckFail, StaticArgs: {}, |
3792 | DynamicArgs: {Data, Addr, ValidVtable}); |
3793 | else |
3794 | EmitTrapCheck(Checked: Cond, CheckHandlerID: SanitizerHandler::CFICheckFail); |
3795 | } |
3796 | |
3797 | FinishFunction(); |
3798 | // The only reference to this function will be created during LTO link. |
3799 | // Make sure it survives until then. |
3800 | CGM.addUsedGlobal(GV: F); |
3801 | } |
3802 | |
3803 | void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { |
3804 | if (SanOpts.has(K: SanitizerKind::Unreachable)) { |
3805 | SanitizerScope SanScope(this); |
3806 | EmitCheck(Checked: std::make_pair(x: static_cast<llvm::Value *>(Builder.getFalse()), |
3807 | y: SanitizerKind::Unreachable), |
3808 | CheckHandler: SanitizerHandler::BuiltinUnreachable, |
3809 | StaticArgs: EmitCheckSourceLocation(Loc), DynamicArgs: std::nullopt); |
3810 | } |
3811 | Builder.CreateUnreachable(); |
3812 | } |
3813 | |
3814 | void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, |
3815 | SanitizerHandler CheckHandlerID) { |
3816 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
3817 | |
3818 | // If we're optimizing, collapse all calls to trap down to just one per |
3819 | // check-type per function to save on code size. |
3820 | if ((int)TrapBBs.size() <= CheckHandlerID) |
3821 | TrapBBs.resize(N: CheckHandlerID + 1); |
3822 | |
3823 | llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; |
3824 | |
3825 | if (!ClSanitizeDebugDeoptimization && |
3826 | CGM.getCodeGenOpts().OptimizationLevel && TrapBB && |
3827 | (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) { |
3828 | auto Call = TrapBB->begin(); |
3829 | assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB" ); |
3830 | |
3831 | Call->applyMergedLocation(LocA: Call->getDebugLoc(), |
3832 | LocB: Builder.getCurrentDebugLocation()); |
3833 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB); |
3834 | } else { |
3835 | TrapBB = createBasicBlock(name: "trap" ); |
3836 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB); |
3837 | EmitBlock(BB: TrapBB); |
3838 | |
3839 | llvm::CallInst *TrapCall = Builder.CreateCall( |
3840 | CGM.getIntrinsic(llvm::Intrinsic::ubsantrap), |
3841 | llvm::ConstantInt::get(CGM.Int8Ty, ClSanitizeDebugDeoptimization |
3842 | ? TrapBB->getParent()->size() |
3843 | : CheckHandlerID)); |
3844 | |
3845 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3846 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
3847 | Val: CGM.getCodeGenOpts().TrapFuncName); |
3848 | TrapCall->addFnAttr(Attr: A); |
3849 | } |
3850 | TrapCall->setDoesNotReturn(); |
3851 | TrapCall->setDoesNotThrow(); |
3852 | Builder.CreateUnreachable(); |
3853 | } |
3854 | |
3855 | EmitBlock(BB: Cont); |
3856 | } |
3857 | |
3858 | llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { |
3859 | llvm::CallInst *TrapCall = |
3860 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: IntrID)); |
3861 | |
3862 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3863 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
3864 | Val: CGM.getCodeGenOpts().TrapFuncName); |
3865 | TrapCall->addFnAttr(Attr: A); |
3866 | } |
3867 | |
3868 | return TrapCall; |
3869 | } |
3870 | |
3871 | Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, |
3872 | LValueBaseInfo *BaseInfo, |
3873 | TBAAAccessInfo *TBAAInfo) { |
3874 | assert(E->getType()->isArrayType() && |
3875 | "Array to pointer decay must have array source type!" ); |
3876 | |
3877 | // Expressions of array type can't be bitfields or vector elements. |
3878 | LValue LV = EmitLValue(E); |
3879 | Address Addr = LV.getAddress(CGF&: *this); |
3880 | |
3881 | // If the array type was an incomplete type, we need to make sure |
3882 | // the decay ends up being the right type. |
3883 | llvm::Type *NewTy = ConvertType(T: E->getType()); |
3884 | Addr = Addr.withElementType(ElemTy: NewTy); |
3885 | |
3886 | // Note that VLA pointers are always decayed, so we don't need to do |
3887 | // anything here. |
3888 | if (!E->getType()->isVariableArrayType()) { |
3889 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
3890 | "Expected pointer to array" ); |
3891 | Addr = Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
3892 | } |
3893 | |
3894 | // The result of this decay conversion points to an array element within the |
3895 | // base lvalue. However, since TBAA currently does not support representing |
3896 | // accesses to elements of member arrays, we conservatively represent accesses |
3897 | // to the pointee object as if it had no any base lvalue specified. |
3898 | // TODO: Support TBAA for member arrays. |
3899 | QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); |
3900 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
3901 | if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(AccessType: EltType); |
3902 | |
3903 | return Addr.withElementType(ElemTy: ConvertTypeForMem(T: EltType)); |
3904 | } |
3905 | |
3906 | /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an |
3907 | /// array to pointer, return the array subexpression. |
3908 | static const Expr *isSimpleArrayDecayOperand(const Expr *E) { |
3909 | // If this isn't just an array->pointer decay, bail out. |
3910 | const auto *CE = dyn_cast<CastExpr>(Val: E); |
3911 | if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) |
3912 | return nullptr; |
3913 | |
3914 | // If this is a decay from variable width array, bail out. |
3915 | const Expr *SubExpr = CE->getSubExpr(); |
3916 | if (SubExpr->getType()->isVariableArrayType()) |
3917 | return nullptr; |
3918 | |
3919 | return SubExpr; |
3920 | } |
3921 | |
3922 | static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, |
3923 | llvm::Type *elemType, |
3924 | llvm::Value *ptr, |
3925 | ArrayRef<llvm::Value*> indices, |
3926 | bool inbounds, |
3927 | bool signedIndices, |
3928 | SourceLocation loc, |
3929 | const llvm::Twine &name = "arrayidx" ) { |
3930 | if (inbounds) { |
3931 | return CGF.EmitCheckedInBoundsGEP(ElemTy: elemType, Ptr: ptr, IdxList: indices, SignedIndices: signedIndices, |
3932 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
3933 | Name: name); |
3934 | } else { |
3935 | return CGF.Builder.CreateGEP(Ty: elemType, Ptr: ptr, IdxList: indices, Name: name); |
3936 | } |
3937 | } |
3938 | |
3939 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
3940 | ArrayRef<llvm::Value *> indices, |
3941 | llvm::Type *elementType, bool inbounds, |
3942 | bool signedIndices, SourceLocation loc, |
3943 | CharUnits align, |
3944 | const llvm::Twine &name = "arrayidx" ) { |
3945 | if (inbounds) { |
3946 | return CGF.EmitCheckedInBoundsGEP(Addr: addr, IdxList: indices, elementType, SignedIndices: signedIndices, |
3947 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
3948 | Align: align, Name: name); |
3949 | } else { |
3950 | return CGF.Builder.CreateGEP(Addr: addr, IdxList: indices, ElementType: elementType, Align: align, Name: name); |
3951 | } |
3952 | } |
3953 | |
3954 | static CharUnits getArrayElementAlign(CharUnits arrayAlign, |
3955 | llvm::Value *idx, |
3956 | CharUnits eltSize) { |
3957 | // If we have a constant index, we can use the exact offset of the |
3958 | // element we're accessing. |
3959 | if (auto constantIdx = dyn_cast<llvm::ConstantInt>(Val: idx)) { |
3960 | CharUnits offset = constantIdx->getZExtValue() * eltSize; |
3961 | return arrayAlign.alignmentAtOffset(offset); |
3962 | |
3963 | // Otherwise, use the worst-case alignment for any element. |
3964 | } else { |
3965 | return arrayAlign.alignmentOfArrayElement(elementSize: eltSize); |
3966 | } |
3967 | } |
3968 | |
3969 | static QualType getFixedSizeElementType(const ASTContext &ctx, |
3970 | const VariableArrayType *vla) { |
3971 | QualType eltType; |
3972 | do { |
3973 | eltType = vla->getElementType(); |
3974 | } while ((vla = ctx.getAsVariableArrayType(T: eltType))); |
3975 | return eltType; |
3976 | } |
3977 | |
3978 | static bool hasBPFPreserveStaticOffset(const RecordDecl *D) { |
3979 | return D && D->hasAttr<BPFPreserveStaticOffsetAttr>(); |
3980 | } |
3981 | |
3982 | static bool hasBPFPreserveStaticOffset(const Expr *E) { |
3983 | if (!E) |
3984 | return false; |
3985 | QualType PointeeType = E->getType()->getPointeeType(); |
3986 | if (PointeeType.isNull()) |
3987 | return false; |
3988 | if (const auto *BaseDecl = PointeeType->getAsRecordDecl()) |
3989 | return hasBPFPreserveStaticOffset(D: BaseDecl); |
3990 | return false; |
3991 | } |
3992 | |
3993 | // Wraps Addr with a call to llvm.preserve.static.offset intrinsic. |
3994 | static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, |
3995 | Address &Addr) { |
3996 | if (!CGF.getTarget().getTriple().isBPF()) |
3997 | return Addr; |
3998 | |
3999 | llvm::Function *Fn = |
4000 | CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset); |
4001 | llvm::CallInst *Call = CGF.Builder.CreateCall(Callee: Fn, Args: {Addr.emitRawPointer(CGF)}); |
4002 | return Address(Call, Addr.getElementType(), Addr.getAlignment()); |
4003 | } |
4004 | |
4005 | /// Given an array base, check whether its member access belongs to a record |
4006 | /// with preserve_access_index attribute or not. |
4007 | static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { |
4008 | if (!ArrayBase || !CGF.getDebugInfo()) |
4009 | return false; |
4010 | |
4011 | // Only support base as either a MemberExpr or DeclRefExpr. |
4012 | // DeclRefExpr to cover cases like: |
4013 | // struct s { int a; int b[10]; }; |
4014 | // struct s *p; |
4015 | // p[1].a |
4016 | // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. |
4017 | // p->b[5] is a MemberExpr example. |
4018 | const Expr *E = ArrayBase->IgnoreImpCasts(); |
4019 | if (const auto *ME = dyn_cast<MemberExpr>(E)) |
4020 | return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4021 | |
4022 | if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
4023 | const auto *VarDef = dyn_cast<VarDecl>(Val: DRE->getDecl()); |
4024 | if (!VarDef) |
4025 | return false; |
4026 | |
4027 | const auto *PtrT = VarDef->getType()->getAs<PointerType>(); |
4028 | if (!PtrT) |
4029 | return false; |
4030 | |
4031 | const auto *PointeeT = PtrT->getPointeeType() |
4032 | ->getUnqualifiedDesugaredType(); |
4033 | if (const auto *RecT = dyn_cast<RecordType>(PointeeT)) |
4034 | return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4035 | return false; |
4036 | } |
4037 | |
4038 | return false; |
4039 | } |
4040 | |
4041 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
4042 | ArrayRef<llvm::Value *> indices, |
4043 | QualType eltType, bool inbounds, |
4044 | bool signedIndices, SourceLocation loc, |
4045 | QualType *arrayType = nullptr, |
4046 | const Expr *Base = nullptr, |
4047 | const llvm::Twine &name = "arrayidx" ) { |
4048 | // All the indices except that last must be zero. |
4049 | #ifndef NDEBUG |
4050 | for (auto *idx : indices.drop_back()) |
4051 | assert(isa<llvm::ConstantInt>(idx) && |
4052 | cast<llvm::ConstantInt>(idx)->isZero()); |
4053 | #endif |
4054 | |
4055 | // Determine the element size of the statically-sized base. This is |
4056 | // the thing that the indices are expressed in terms of. |
4057 | if (auto vla = CGF.getContext().getAsVariableArrayType(T: eltType)) { |
4058 | eltType = getFixedSizeElementType(ctx: CGF.getContext(), vla); |
4059 | } |
4060 | |
4061 | // We can use that to compute the best alignment of the element. |
4062 | CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: eltType); |
4063 | CharUnits eltAlign = |
4064 | getArrayElementAlign(arrayAlign: addr.getAlignment(), idx: indices.back(), eltSize); |
4065 | |
4066 | if (hasBPFPreserveStaticOffset(E: Base)) |
4067 | addr = wrapWithBPFPreserveStaticOffset(CGF, Addr&: addr); |
4068 | |
4069 | llvm::Value *eltPtr; |
4070 | auto LastIndex = dyn_cast<llvm::ConstantInt>(Val: indices.back()); |
4071 | if (!LastIndex || |
4072 | (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, ArrayBase: Base))) { |
4073 | addr = emitArraySubscriptGEP(CGF, addr, indices, |
4074 | elementType: CGF.ConvertTypeForMem(T: eltType), inbounds, |
4075 | signedIndices, loc, align: eltAlign, name); |
4076 | return addr; |
4077 | } else { |
4078 | // Remember the original array subscript for bpf target |
4079 | unsigned idx = LastIndex->getZExtValue(); |
4080 | llvm::DIType *DbgInfo = nullptr; |
4081 | if (arrayType) |
4082 | DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(Ty: *arrayType, Loc: loc); |
4083 | eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex( |
4084 | ElTy: addr.getElementType(), Base: addr.emitRawPointer(CGF), Dimension: indices.size() - 1, |
4085 | LastIndex: idx, DbgInfo); |
4086 | } |
4087 | |
4088 | return Address(eltPtr, CGF.ConvertTypeForMem(T: eltType), eltAlign); |
4089 | } |
4090 | |
4091 | /// The offset of a field from the beginning of the record. |
4092 | static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, |
4093 | const FieldDecl *FD, int64_t &Offset) { |
4094 | ASTContext &Ctx = CGF.getContext(); |
4095 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: RD); |
4096 | unsigned FieldNo = 0; |
4097 | |
4098 | for (const Decl *D : RD->decls()) { |
4099 | if (const auto *Record = dyn_cast<RecordDecl>(D)) |
4100 | if (getFieldOffsetInBits(CGF, Record, FD, Offset)) { |
4101 | Offset += Layout.getFieldOffset(FieldNo); |
4102 | return true; |
4103 | } |
4104 | |
4105 | if (const auto *Field = dyn_cast<FieldDecl>(D)) |
4106 | if (FD == Field) { |
4107 | Offset += Layout.getFieldOffset(FieldNo); |
4108 | return true; |
4109 | } |
4110 | |
4111 | if (isa<FieldDecl>(D)) |
4112 | ++FieldNo; |
4113 | } |
4114 | |
4115 | return false; |
4116 | } |
4117 | |
4118 | /// Returns the relative offset difference between \p FD1 and \p FD2. |
4119 | /// \code |
4120 | /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2) |
4121 | /// \endcode |
4122 | /// Both fields must be within the same struct. |
4123 | static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF, |
4124 | const FieldDecl *FD1, |
4125 | const FieldDecl *FD2) { |
4126 | const RecordDecl *FD1OuterRec = |
4127 | FD1->getParent()->getOuterLexicalRecordContext(); |
4128 | const RecordDecl *FD2OuterRec = |
4129 | FD2->getParent()->getOuterLexicalRecordContext(); |
4130 | |
4131 | if (FD1OuterRec != FD2OuterRec) |
4132 | // Fields must be within the same RecordDecl. |
4133 | return std::optional<int64_t>(); |
4134 | |
4135 | int64_t FD1Offset = 0; |
4136 | if (!getFieldOffsetInBits(CGF, RD: FD1OuterRec, FD: FD1, Offset&: FD1Offset)) |
4137 | return std::optional<int64_t>(); |
4138 | |
4139 | int64_t FD2Offset = 0; |
4140 | if (!getFieldOffsetInBits(CGF, RD: FD2OuterRec, FD: FD2, Offset&: FD2Offset)) |
4141 | return std::optional<int64_t>(); |
4142 | |
4143 | return std::make_optional<int64_t>(t: FD1Offset - FD2Offset); |
4144 | } |
4145 | |
4146 | LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, |
4147 | bool Accessed) { |
4148 | // The index must always be an integer, which is not an aggregate. Emit it |
4149 | // in lexical order (this complexity is, sadly, required by C++17). |
4150 | llvm::Value *IdxPre = |
4151 | (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E: E->getIdx()) : nullptr; |
4152 | bool SignedIndices = false; |
4153 | auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { |
4154 | auto *Idx = IdxPre; |
4155 | if (E->getLHS() != E->getIdx()) { |
4156 | assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS" ); |
4157 | Idx = EmitScalarExpr(E: E->getIdx()); |
4158 | } |
4159 | |
4160 | QualType IdxTy = E->getIdx()->getType(); |
4161 | bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); |
4162 | SignedIndices |= IdxSigned; |
4163 | |
4164 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) |
4165 | EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); |
4166 | |
4167 | // Extend or truncate the index type to 32 or 64-bits. |
4168 | if (Promote && Idx->getType() != IntPtrTy) |
4169 | Idx = Builder.CreateIntCast(V: Idx, DestTy: IntPtrTy, isSigned: IdxSigned, Name: "idxprom" ); |
4170 | |
4171 | return Idx; |
4172 | }; |
4173 | IdxPre = nullptr; |
4174 | |
4175 | // If the base is a vector type, then we are forming a vector element lvalue |
4176 | // with this subscript. |
4177 | if (E->getBase()->getType()->isVectorType() && |
4178 | !isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4179 | // Emit the vector as an lvalue to get its address. |
4180 | LValue LHS = EmitLValue(E: E->getBase()); |
4181 | auto *Idx = EmitIdxAfterBase(/*Promote*/false); |
4182 | assert(LHS.isSimple() && "Can only subscript lvalue vectors here!" ); |
4183 | return LValue::MakeVectorElt(vecAddress: LHS.getAddress(CGF&: *this), Idx, |
4184 | type: E->getBase()->getType(), BaseInfo: LHS.getBaseInfo(), |
4185 | TBAAInfo: TBAAAccessInfo()); |
4186 | } |
4187 | |
4188 | // All the other cases basically behave like simple offsetting. |
4189 | |
4190 | // Handle the extvector case we ignored above. |
4191 | if (isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4192 | LValue LV = EmitLValue(E: E->getBase()); |
4193 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4194 | Address Addr = EmitExtVectorElementLValue(LV); |
4195 | |
4196 | QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); |
4197 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: EltType, /*inbounds*/ true, |
4198 | signedIndices: SignedIndices, loc: E->getExprLoc()); |
4199 | return MakeAddrLValue(Addr, T: EltType, BaseInfo: LV.getBaseInfo(), |
4200 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: EltType)); |
4201 | } |
4202 | |
4203 | LValueBaseInfo EltBaseInfo; |
4204 | TBAAAccessInfo EltTBAAInfo; |
4205 | Address Addr = Address::invalid(); |
4206 | if (const VariableArrayType *vla = |
4207 | getContext().getAsVariableArrayType(T: E->getType())) { |
4208 | // The base must be a pointer, which is not an aggregate. Emit |
4209 | // it. It needs to be emitted first in case it's what captures |
4210 | // the VLA bounds. |
4211 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4212 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4213 | |
4214 | // The element count here is the total number of non-VLA elements. |
4215 | llvm::Value *numElements = getVLASize(vla).NumElts; |
4216 | |
4217 | // Effectively, the multiply by the VLA size is part of the GEP. |
4218 | // GEP indexes are signed, and scaling an index isn't permitted to |
4219 | // signed-overflow, so we use the same semantics for our explicit |
4220 | // multiply. We suppress this if overflow is not undefined behavior. |
4221 | if (getLangOpts().isSignedOverflowDefined()) { |
4222 | Idx = Builder.CreateMul(LHS: Idx, RHS: numElements); |
4223 | } else { |
4224 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: numElements); |
4225 | } |
4226 | |
4227 | Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), |
4228 | !getLangOpts().isSignedOverflowDefined(), |
4229 | SignedIndices, E->getExprLoc()); |
4230 | |
4231 | } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ |
4232 | // Indexing over an interface, as in "NSString *P; P[4];" |
4233 | |
4234 | // Emit the base pointer. |
4235 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4236 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4237 | |
4238 | CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); |
4239 | llvm::Value *InterfaceSizeVal = |
4240 | llvm::ConstantInt::get(Ty: Idx->getType(), V: InterfaceSize.getQuantity()); |
4241 | |
4242 | llvm::Value *ScaledIdx = Builder.CreateMul(LHS: Idx, RHS: InterfaceSizeVal); |
4243 | |
4244 | // We don't necessarily build correct LLVM struct types for ObjC |
4245 | // interfaces, so we can't rely on GEP to do this scaling |
4246 | // correctly, so we need to cast to i8*. FIXME: is this actually |
4247 | // true? A lot of other things in the fragile ABI would break... |
4248 | llvm::Type *OrigBaseElemTy = Addr.getElementType(); |
4249 | |
4250 | // Do the GEP. |
4251 | CharUnits EltAlign = |
4252 | getArrayElementAlign(arrayAlign: Addr.getAlignment(), idx: Idx, eltSize: InterfaceSize); |
4253 | llvm::Value *EltPtr = |
4254 | emitArraySubscriptGEP(CGF&: *this, elemType: Int8Ty, ptr: Addr.emitRawPointer(CGF&: *this), |
4255 | indices: ScaledIdx, inbounds: false, signedIndices: SignedIndices, loc: E->getExprLoc()); |
4256 | Addr = Address(EltPtr, OrigBaseElemTy, EltAlign); |
4257 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4258 | // If this is A[i] where A is an array, the frontend will have decayed the |
4259 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4260 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4261 | // "gep x, i" here. Emit one "gep A, 0, i". |
4262 | assert(Array->getType()->isArrayType() && |
4263 | "Array to pointer decay must have array source type!" ); |
4264 | LValue ArrayLV; |
4265 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4266 | // better bounds-checking of the base expression. |
4267 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4268 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4269 | else |
4270 | ArrayLV = EmitLValue(E: Array); |
4271 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4272 | |
4273 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) { |
4274 | // If the array being accessed has a "counted_by" attribute, generate |
4275 | // bounds checking code. The "count" field is at the top level of the |
4276 | // struct or in an anonymous struct, that's also at the top level. Future |
4277 | // expansions may allow the "count" to reside at any place in the struct, |
4278 | // but the value of "counted_by" will be a "simple" path to the count, |
4279 | // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or |
4280 | // similar to emit the correct GEP. |
4281 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
4282 | getLangOpts().getStrictFlexArraysLevel(); |
4283 | |
4284 | if (const auto *ME = dyn_cast<MemberExpr>(Val: Array); |
4285 | ME && |
4286 | ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) && |
4287 | ME->getMemberDecl()->getType()->isCountAttributedType()) { |
4288 | const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()); |
4289 | if (const FieldDecl *CountFD = FindCountedByField(FD: FAMDecl)) { |
4290 | if (std::optional<int64_t> Diff = |
4291 | getOffsetDifferenceInBits(CGF&: *this, FD1: CountFD, FD2: FAMDecl)) { |
4292 | CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(BitSize: *Diff); |
4293 | |
4294 | // Create a GEP with a byte offset between the FAM and count and |
4295 | // use that to load the count value. |
4296 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast( |
4297 | Addr: ArrayLV.getAddress(CGF&: *this), Ty: Int8PtrTy, ElementTy: Int8Ty); |
4298 | |
4299 | llvm::Type *CountTy = ConvertType(CountFD->getType()); |
4300 | llvm::Value *Res = Builder.CreateInBoundsGEP( |
4301 | Ty: Int8Ty, Ptr: Addr.emitRawPointer(CGF&: *this), |
4302 | IdxList: Builder.getInt32(C: OffsetDiff.getQuantity()), Name: ".counted_by.gep" ); |
4303 | Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(), |
4304 | ".counted_by.load" ); |
4305 | |
4306 | // Now emit the bounds checking. |
4307 | EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(), |
4308 | Array->getType(), Accessed); |
4309 | } |
4310 | } |
4311 | } |
4312 | } |
4313 | |
4314 | // Propagate the alignment from the array itself to the result. |
4315 | QualType arrayType = Array->getType(); |
4316 | Addr = emitArraySubscriptGEP( |
4317 | *this, ArrayLV.getAddress(CGF&: *this), {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4318 | E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, |
4319 | E->getExprLoc(), &arrayType, E->getBase()); |
4320 | EltBaseInfo = ArrayLV.getBaseInfo(); |
4321 | EltTBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: E->getType()); |
4322 | } else { |
4323 | // The base must be a pointer; emit it with an estimate of its alignment. |
4324 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4325 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4326 | QualType ptrType = E->getBase()->getType(); |
4327 | Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), |
4328 | !getLangOpts().isSignedOverflowDefined(), |
4329 | SignedIndices, E->getExprLoc(), &ptrType, |
4330 | E->getBase()); |
4331 | } |
4332 | |
4333 | LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); |
4334 | |
4335 | if (getLangOpts().ObjC && |
4336 | getLangOpts().getGC() != LangOptions::NonGC) { |
4337 | LV.setNonGC(!E->isOBJCGCCandidate(getContext())); |
4338 | setObjCGCLValueClass(getContext(), E, LV); |
4339 | } |
4340 | return LV; |
4341 | } |
4342 | |
4343 | LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { |
4344 | assert( |
4345 | !E->isIncomplete() && |
4346 | "incomplete matrix subscript expressions should be rejected during Sema" ); |
4347 | LValue Base = EmitLValue(E: E->getBase()); |
4348 | llvm::Value *RowIdx = EmitScalarExpr(E: E->getRowIdx()); |
4349 | llvm::Value *ColIdx = EmitScalarExpr(E: E->getColumnIdx()); |
4350 | llvm::Value *NumRows = Builder.getIntN( |
4351 | N: RowIdx->getType()->getScalarSizeInBits(), |
4352 | C: E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); |
4353 | llvm::Value *FinalIdx = |
4354 | Builder.CreateAdd(LHS: Builder.CreateMul(LHS: ColIdx, RHS: NumRows), RHS: RowIdx); |
4355 | return LValue::MakeMatrixElt( |
4356 | matAddress: MaybeConvertMatrixAddress(Addr: Base.getAddress(CGF&: *this), CGF&: *this), Idx: FinalIdx, |
4357 | type: E->getBase()->getType(), BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4358 | } |
4359 | |
4360 | static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, |
4361 | LValueBaseInfo &BaseInfo, |
4362 | TBAAAccessInfo &TBAAInfo, |
4363 | QualType BaseTy, QualType ElTy, |
4364 | bool IsLowerBound) { |
4365 | LValue BaseLVal; |
4366 | if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Val: Base->IgnoreParenImpCasts())) { |
4367 | BaseLVal = CGF.EmitOMPArraySectionExpr(E: ASE, IsLowerBound); |
4368 | if (BaseTy->isArrayType()) { |
4369 | Address Addr = BaseLVal.getAddress(CGF); |
4370 | BaseInfo = BaseLVal.getBaseInfo(); |
4371 | |
4372 | // If the array type was an incomplete type, we need to make sure |
4373 | // the decay ends up being the right type. |
4374 | llvm::Type *NewTy = CGF.ConvertType(T: BaseTy); |
4375 | Addr = Addr.withElementType(ElemTy: NewTy); |
4376 | |
4377 | // Note that VLA pointers are always decayed, so we don't need to do |
4378 | // anything here. |
4379 | if (!BaseTy->isVariableArrayType()) { |
4380 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
4381 | "Expected pointer to array" ); |
4382 | Addr = CGF.Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
4383 | } |
4384 | |
4385 | return Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: ElTy)); |
4386 | } |
4387 | LValueBaseInfo TypeBaseInfo; |
4388 | TBAAAccessInfo TypeTBAAInfo; |
4389 | CharUnits Align = |
4390 | CGF.CGM.getNaturalTypeAlignment(T: ElTy, BaseInfo: &TypeBaseInfo, TBAAInfo: &TypeTBAAInfo); |
4391 | BaseInfo.mergeForCast(Info: TypeBaseInfo); |
4392 | TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(SourceInfo: TBAAInfo, TargetInfo: TypeTBAAInfo); |
4393 | return Address(CGF.Builder.CreateLoad(Addr: BaseLVal.getAddress(CGF)), |
4394 | CGF.ConvertTypeForMem(T: ElTy), Align); |
4395 | } |
4396 | return CGF.EmitPointerWithAlignment(E: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4397 | } |
4398 | |
4399 | LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, |
4400 | bool IsLowerBound) { |
4401 | QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(Base: E->getBase()); |
4402 | QualType ResultExprTy; |
4403 | if (auto *AT = getContext().getAsArrayType(T: BaseTy)) |
4404 | ResultExprTy = AT->getElementType(); |
4405 | else |
4406 | ResultExprTy = BaseTy->getPointeeType(); |
4407 | llvm::Value *Idx = nullptr; |
4408 | if (IsLowerBound || E->getColonLocFirst().isInvalid()) { |
4409 | // Requesting lower bound or upper bound, but without provided length and |
4410 | // without ':' symbol for the default length -> length = 1. |
4411 | // Idx = LowerBound ?: 0; |
4412 | if (auto *LowerBound = E->getLowerBound()) { |
4413 | Idx = Builder.CreateIntCast( |
4414 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4415 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()); |
4416 | } else |
4417 | Idx = llvm::ConstantInt::getNullValue(Ty: IntPtrTy); |
4418 | } else { |
4419 | // Try to emit length or lower bound as constant. If this is possible, 1 |
4420 | // is subtracted from constant length or lower bound. Otherwise, emit LLVM |
4421 | // IR (LB + Len) - 1. |
4422 | auto &C = CGM.getContext(); |
4423 | auto *Length = E->getLength(); |
4424 | llvm::APSInt ConstLength; |
4425 | if (Length) { |
4426 | // Idx = LowerBound + Length - 1; |
4427 | if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(Ctx: C)) { |
4428 | ConstLength = CL->zextOrTrunc(width: PointerWidthInBits); |
4429 | Length = nullptr; |
4430 | } |
4431 | auto *LowerBound = E->getLowerBound(); |
4432 | llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); |
4433 | if (LowerBound) { |
4434 | if (std::optional<llvm::APSInt> LB = |
4435 | LowerBound->getIntegerConstantExpr(Ctx: C)) { |
4436 | ConstLowerBound = LB->zextOrTrunc(width: PointerWidthInBits); |
4437 | LowerBound = nullptr; |
4438 | } |
4439 | } |
4440 | if (!Length) |
4441 | --ConstLength; |
4442 | else if (!LowerBound) |
4443 | --ConstLowerBound; |
4444 | |
4445 | if (Length || LowerBound) { |
4446 | auto *LowerBoundVal = |
4447 | LowerBound |
4448 | ? Builder.CreateIntCast( |
4449 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4450 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()) |
4451 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLowerBound); |
4452 | auto *LengthVal = |
4453 | Length |
4454 | ? Builder.CreateIntCast( |
4455 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4456 | isSigned: Length->getType()->hasSignedIntegerRepresentation()) |
4457 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4458 | Idx = Builder.CreateAdd(LHS: LowerBoundVal, RHS: LengthVal, Name: "lb_add_len" , |
4459 | /*HasNUW=*/false, |
4460 | HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4461 | if (Length && LowerBound) { |
4462 | Idx = Builder.CreateSub( |
4463 | LHS: Idx, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "idx_sub_1" , |
4464 | /*HasNUW=*/false, HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4465 | } |
4466 | } else |
4467 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength + ConstLowerBound); |
4468 | } else { |
4469 | // Idx = ArraySize - 1; |
4470 | QualType ArrayTy = BaseTy->isPointerType() |
4471 | ? E->getBase()->IgnoreParenImpCasts()->getType() |
4472 | : BaseTy; |
4473 | if (auto *VAT = C.getAsVariableArrayType(T: ArrayTy)) { |
4474 | Length = VAT->getSizeExpr(); |
4475 | if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(Ctx: C)) { |
4476 | ConstLength = *L; |
4477 | Length = nullptr; |
4478 | } |
4479 | } else { |
4480 | auto *CAT = C.getAsConstantArrayType(T: ArrayTy); |
4481 | assert(CAT && "unexpected type for array initializer" ); |
4482 | ConstLength = CAT->getSize(); |
4483 | } |
4484 | if (Length) { |
4485 | auto *LengthVal = Builder.CreateIntCast( |
4486 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4487 | isSigned: Length->getType()->hasSignedIntegerRepresentation()); |
4488 | Idx = Builder.CreateSub( |
4489 | LHS: LengthVal, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "len_sub_1" , |
4490 | /*HasNUW=*/false, HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4491 | } else { |
4492 | ConstLength = ConstLength.zextOrTrunc(width: PointerWidthInBits); |
4493 | --ConstLength; |
4494 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4495 | } |
4496 | } |
4497 | } |
4498 | assert(Idx); |
4499 | |
4500 | Address EltPtr = Address::invalid(); |
4501 | LValueBaseInfo BaseInfo; |
4502 | TBAAAccessInfo TBAAInfo; |
4503 | if (auto *VLA = getContext().getAsVariableArrayType(T: ResultExprTy)) { |
4504 | // The base must be a pointer, which is not an aggregate. Emit |
4505 | // it. It needs to be emitted first in case it's what captures |
4506 | // the VLA bounds. |
4507 | Address Base = |
4508 | emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, |
4509 | BaseTy, VLA->getElementType(), IsLowerBound); |
4510 | // The element count here is the total number of non-VLA elements. |
4511 | llvm::Value *NumElements = getVLASize(vla: VLA).NumElts; |
4512 | |
4513 | // Effectively, the multiply by the VLA size is part of the GEP. |
4514 | // GEP indexes are signed, and scaling an index isn't permitted to |
4515 | // signed-overflow, so we use the same semantics for our explicit |
4516 | // multiply. We suppress this if overflow is not undefined behavior. |
4517 | if (getLangOpts().isSignedOverflowDefined()) |
4518 | Idx = Builder.CreateMul(LHS: Idx, RHS: NumElements); |
4519 | else |
4520 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: NumElements); |
4521 | EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), |
4522 | !getLangOpts().isSignedOverflowDefined(), |
4523 | /*signedIndices=*/false, E->getExprLoc()); |
4524 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4525 | // If this is A[i] where A is an array, the frontend will have decayed the |
4526 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4527 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4528 | // "gep x, i" here. Emit one "gep A, 0, i". |
4529 | assert(Array->getType()->isArrayType() && |
4530 | "Array to pointer decay must have array source type!" ); |
4531 | LValue ArrayLV; |
4532 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4533 | // better bounds-checking of the base expression. |
4534 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4535 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4536 | else |
4537 | ArrayLV = EmitLValue(E: Array); |
4538 | |
4539 | // Propagate the alignment from the array itself to the result. |
4540 | EltPtr = emitArraySubscriptGEP( |
4541 | CGF&: *this, addr: ArrayLV.getAddress(CGF&: *this), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4542 | eltType: ResultExprTy, inbounds: !getLangOpts().isSignedOverflowDefined(), |
4543 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4544 | BaseInfo = ArrayLV.getBaseInfo(); |
4545 | TBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: ResultExprTy); |
4546 | } else { |
4547 | Address Base = |
4548 | emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, BaseTy, |
4549 | ElTy: ResultExprTy, IsLowerBound); |
4550 | EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: ResultExprTy, |
4551 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4552 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4553 | } |
4554 | |
4555 | return MakeAddrLValue(Addr: EltPtr, T: ResultExprTy, BaseInfo, TBAAInfo); |
4556 | } |
4557 | |
4558 | LValue CodeGenFunction:: |
4559 | EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { |
4560 | // Emit the base vector as an l-value. |
4561 | LValue Base; |
4562 | |
4563 | // ExtVectorElementExpr's base can either be a vector or pointer to vector. |
4564 | if (E->isArrow()) { |
4565 | // If it is a pointer to a vector, emit the address and form an lvalue with |
4566 | // it. |
4567 | LValueBaseInfo BaseInfo; |
4568 | TBAAAccessInfo TBAAInfo; |
4569 | Address Ptr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4570 | const auto *PT = E->getBase()->getType()->castAs<PointerType>(); |
4571 | Base = MakeAddrLValue(Addr: Ptr, T: PT->getPointeeType(), BaseInfo, TBAAInfo); |
4572 | Base.getQuals().removeObjCGCAttr(); |
4573 | } else if (E->getBase()->isGLValue()) { |
4574 | // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), |
4575 | // emit the base as an lvalue. |
4576 | assert(E->getBase()->getType()->isVectorType()); |
4577 | Base = EmitLValue(E: E->getBase()); |
4578 | } else { |
4579 | // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. |
4580 | assert(E->getBase()->getType()->isVectorType() && |
4581 | "Result must be a vector" ); |
4582 | llvm::Value *Vec = EmitScalarExpr(E: E->getBase()); |
4583 | |
4584 | // Store the vector to memory (because LValue wants an address). |
4585 | Address VecMem = CreateMemTemp(Ty: E->getBase()->getType()); |
4586 | Builder.CreateStore(Val: Vec, Addr: VecMem); |
4587 | Base = MakeAddrLValue(Addr: VecMem, T: E->getBase()->getType(), |
4588 | Source: AlignmentSource::Decl); |
4589 | } |
4590 | |
4591 | QualType type = |
4592 | E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); |
4593 | |
4594 | // Encode the element access list into a vector of unsigned indices. |
4595 | SmallVector<uint32_t, 4> Indices; |
4596 | E->getEncodedElementAccess(Elts&: Indices); |
4597 | |
4598 | if (Base.isSimple()) { |
4599 | llvm::Constant *CV = |
4600 | llvm::ConstantDataVector::get(Context&: getLLVMContext(), Elts: Indices); |
4601 | return LValue::MakeExtVectorElt(Addr: Base.getAddress(CGF&: *this), Elts: CV, type, |
4602 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4603 | } |
4604 | assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!" ); |
4605 | |
4606 | llvm::Constant *BaseElts = Base.getExtVectorElts(); |
4607 | SmallVector<llvm::Constant *, 4> CElts; |
4608 | |
4609 | for (unsigned i = 0, e = Indices.size(); i != e; ++i) |
4610 | CElts.push_back(Elt: BaseElts->getAggregateElement(Elt: Indices[i])); |
4611 | llvm::Constant *CV = llvm::ConstantVector::get(V: CElts); |
4612 | return LValue::MakeExtVectorElt(Addr: Base.getExtVectorAddress(), Elts: CV, type, |
4613 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4614 | } |
4615 | |
4616 | LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { |
4617 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME: E)) { |
4618 | EmitIgnoredExpr(E: E->getBase()); |
4619 | return EmitDeclRefLValue(E: DRE); |
4620 | } |
4621 | |
4622 | Expr *BaseExpr = E->getBase(); |
4623 | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
4624 | LValue BaseLV; |
4625 | if (E->isArrow()) { |
4626 | LValueBaseInfo BaseInfo; |
4627 | TBAAAccessInfo TBAAInfo; |
4628 | Address Addr = EmitPointerWithAlignment(E: BaseExpr, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4629 | QualType PtrTy = BaseExpr->getType()->getPointeeType(); |
4630 | SanitizerSet SkippedChecks; |
4631 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: BaseExpr); |
4632 | if (IsBaseCXXThis) |
4633 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
4634 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: BaseExpr)) |
4635 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
4636 | EmitTypeCheck(TCK: TCK_MemberAccess, Loc: E->getExprLoc(), Addr, Type: PtrTy, |
4637 | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
4638 | BaseLV = MakeAddrLValue(Addr, T: PtrTy, BaseInfo, TBAAInfo); |
4639 | } else |
4640 | BaseLV = EmitCheckedLValue(E: BaseExpr, TCK: TCK_MemberAccess); |
4641 | |
4642 | NamedDecl *ND = E->getMemberDecl(); |
4643 | if (auto *Field = dyn_cast<FieldDecl>(ND)) { |
4644 | LValue LV = EmitLValueForField(Base: BaseLV, Field: Field); |
4645 | setObjCGCLValueClass(getContext(), E, LV); |
4646 | if (getLangOpts().OpenMP) { |
4647 | // If the member was explicitly marked as nontemporal, mark it as |
4648 | // nontemporal. If the base lvalue is marked as nontemporal, mark access |
4649 | // to children as nontemporal too. |
4650 | if ((IsWrappedCXXThis(Obj: BaseExpr) && |
4651 | CGM.getOpenMPRuntime().isNontemporalDecl(VD: Field)) || |
4652 | BaseLV.isNontemporal()) |
4653 | LV.setNontemporal(/*Value=*/true); |
4654 | } |
4655 | return LV; |
4656 | } |
4657 | |
4658 | if (const auto *FD = dyn_cast<FunctionDecl>(ND)) |
4659 | return EmitFunctionDeclLValue(*this, E, FD); |
4660 | |
4661 | llvm_unreachable("Unhandled member declaration!" ); |
4662 | } |
4663 | |
4664 | /// Given that we are currently emitting a lambda, emit an l-value for |
4665 | /// one of its members. |
4666 | /// |
4667 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field, |
4668 | llvm::Value *ThisValue) { |
4669 | bool HasExplicitObjectParameter = false; |
4670 | if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Val: CurCodeDecl)) { |
4671 | HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction(); |
4672 | assert(MD->getParent()->isLambda()); |
4673 | assert(MD->getParent() == Field->getParent()); |
4674 | } |
4675 | LValue LambdaLV; |
4676 | if (HasExplicitObjectParameter) { |
4677 | const VarDecl *D = cast<CXXMethodDecl>(Val: CurCodeDecl)->getParamDecl(0); |
4678 | auto It = LocalDeclMap.find(D); |
4679 | assert(It != LocalDeclMap.end() && "explicit parameter not loaded?" ); |
4680 | Address AddrOfExplicitObject = It->getSecond(); |
4681 | if (D->getType()->isReferenceType()) |
4682 | LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(), |
4683 | AlignmentSource::Decl); |
4684 | else |
4685 | LambdaLV = MakeAddrLValue(AddrOfExplicitObject, |
4686 | D->getType().getNonReferenceType()); |
4687 | } else { |
4688 | QualType LambdaTagType = getContext().getTagDeclType(Field->getParent()); |
4689 | LambdaLV = MakeNaturalAlignAddrLValue(V: ThisValue, T: LambdaTagType); |
4690 | } |
4691 | return EmitLValueForField(Base: LambdaLV, Field); |
4692 | } |
4693 | |
4694 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { |
4695 | return EmitLValueForLambdaField(Field, ThisValue: CXXABIThisValue); |
4696 | } |
4697 | |
4698 | /// Get the field index in the debug info. The debug info structure/union |
4699 | /// will ignore the unnamed bitfields. |
4700 | unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, |
4701 | unsigned FieldIndex) { |
4702 | unsigned I = 0, Skipped = 0; |
4703 | |
4704 | for (auto *F : Rec->getDefinition()->fields()) { |
4705 | if (I == FieldIndex) |
4706 | break; |
4707 | if (F->isUnnamedBitField()) |
4708 | Skipped++; |
4709 | I++; |
4710 | } |
4711 | |
4712 | return FieldIndex - Skipped; |
4713 | } |
4714 | |
4715 | /// Get the address of a zero-sized field within a record. The resulting |
4716 | /// address doesn't necessarily have the right type. |
4717 | static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, |
4718 | const FieldDecl *Field) { |
4719 | CharUnits Offset = CGF.getContext().toCharUnitsFromBits( |
4720 | BitSize: CGF.getContext().getFieldOffset(Field)); |
4721 | if (Offset.isZero()) |
4722 | return Base; |
4723 | Base = Base.withElementType(ElemTy: CGF.Int8Ty); |
4724 | return CGF.Builder.CreateConstInBoundsByteGEP(Addr: Base, Offset); |
4725 | } |
4726 | |
4727 | /// Drill down to the storage of a field without walking into |
4728 | /// reference types. |
4729 | /// |
4730 | /// The resulting address doesn't necessarily have the right type. |
4731 | static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, |
4732 | const FieldDecl *field) { |
4733 | if (field->isZeroSize(Ctx: CGF.getContext())) |
4734 | return emitAddrOfZeroSizeField(CGF, Base: base, Field: field); |
4735 | |
4736 | const RecordDecl *rec = field->getParent(); |
4737 | |
4738 | unsigned idx = |
4739 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
4740 | |
4741 | return CGF.Builder.CreateStructGEP(base, idx, field->getName()); |
4742 | } |
4743 | |
4744 | static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, |
4745 | Address addr, const FieldDecl *field) { |
4746 | const RecordDecl *rec = field->getParent(); |
4747 | llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( |
4748 | Ty: base.getType(), Loc: rec->getLocation()); |
4749 | |
4750 | unsigned idx = |
4751 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
4752 | |
4753 | return CGF.Builder.CreatePreserveStructAccessIndex( |
4754 | Addr: addr, Index: idx, FieldIndex: CGF.getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo); |
4755 | } |
4756 | |
4757 | static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { |
4758 | const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); |
4759 | if (!RD) |
4760 | return false; |
4761 | |
4762 | if (RD->isDynamicClass()) |
4763 | return true; |
4764 | |
4765 | for (const auto &Base : RD->bases()) |
4766 | if (hasAnyVptr(Type: Base.getType(), Context)) |
4767 | return true; |
4768 | |
4769 | for (const FieldDecl *Field : RD->fields()) |
4770 | if (hasAnyVptr(Field->getType(), Context)) |
4771 | return true; |
4772 | |
4773 | return false; |
4774 | } |
4775 | |
4776 | LValue CodeGenFunction::EmitLValueForField(LValue base, |
4777 | const FieldDecl *field) { |
4778 | LValueBaseInfo BaseInfo = base.getBaseInfo(); |
4779 | |
4780 | if (field->isBitField()) { |
4781 | const CGRecordLayout &RL = |
4782 | CGM.getTypes().getCGRecordLayout(field->getParent()); |
4783 | const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: field); |
4784 | const bool UseVolatile = isAAPCS(TargetInfo: CGM.getTarget()) && |
4785 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && |
4786 | Info.VolatileStorageSize != 0 && |
4787 | field->getType() |
4788 | .withCVRQualifiers(base.getVRQualifiers()) |
4789 | .isVolatileQualified(); |
4790 | Address Addr = base.getAddress(CGF&: *this); |
4791 | unsigned Idx = RL.getLLVMFieldNo(FD: field); |
4792 | const RecordDecl *rec = field->getParent(); |
4793 | if (hasBPFPreserveStaticOffset(D: rec)) |
4794 | Addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr); |
4795 | if (!UseVolatile) { |
4796 | if (!IsInPreservedAIRegion && |
4797 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4798 | if (Idx != 0) |
4799 | // For structs, we GEP to the field that the record layout suggests. |
4800 | Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); |
4801 | } else { |
4802 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( |
4803 | Ty: getContext().getRecordType(Decl: rec), L: rec->getLocation()); |
4804 | Addr = Builder.CreatePreserveStructAccessIndex( |
4805 | Addr, Index: Idx, FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), |
4806 | DbgInfo); |
4807 | } |
4808 | } |
4809 | const unsigned SS = |
4810 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
4811 | // Get the access type. |
4812 | llvm::Type *FieldIntTy = llvm::Type::getIntNTy(C&: getLLVMContext(), N: SS); |
4813 | Addr = Addr.withElementType(ElemTy: FieldIntTy); |
4814 | if (UseVolatile) { |
4815 | const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); |
4816 | if (VolatileOffset) |
4817 | Addr = Builder.CreateConstInBoundsGEP(Addr, Index: VolatileOffset); |
4818 | } |
4819 | |
4820 | QualType fieldType = |
4821 | field->getType().withCVRQualifiers(base.getVRQualifiers()); |
4822 | // TODO: Support TBAA for bit fields. |
4823 | LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); |
4824 | return LValue::MakeBitfield(Addr, Info, type: fieldType, BaseInfo: FieldBaseInfo, |
4825 | TBAAInfo: TBAAAccessInfo()); |
4826 | } |
4827 | |
4828 | // Fields of may-alias structures are may-alias themselves. |
4829 | // FIXME: this should get propagated down through anonymous structs |
4830 | // and unions. |
4831 | QualType FieldType = field->getType(); |
4832 | const RecordDecl *rec = field->getParent(); |
4833 | AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); |
4834 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: BaseAlignSource)); |
4835 | TBAAAccessInfo FieldTBAAInfo; |
4836 | if (base.getTBAAInfo().isMayAlias() || |
4837 | rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { |
4838 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4839 | } else if (rec->isUnion()) { |
4840 | // TODO: Support TBAA for unions. |
4841 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4842 | } else { |
4843 | // If no base type been assigned for the base access, then try to generate |
4844 | // one for this base lvalue. |
4845 | FieldTBAAInfo = base.getTBAAInfo(); |
4846 | if (!FieldTBAAInfo.BaseType) { |
4847 | FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(QTy: base.getType()); |
4848 | assert(!FieldTBAAInfo.Offset && |
4849 | "Nonzero offset for an access with no base type!" ); |
4850 | } |
4851 | |
4852 | // Adjust offset to be relative to the base type. |
4853 | const ASTRecordLayout &Layout = |
4854 | getContext().getASTRecordLayout(D: field->getParent()); |
4855 | unsigned CharWidth = getContext().getCharWidth(); |
4856 | if (FieldTBAAInfo.BaseType) |
4857 | FieldTBAAInfo.Offset += |
4858 | Layout.getFieldOffset(FieldNo: field->getFieldIndex()) / CharWidth; |
4859 | |
4860 | // Update the final access type and size. |
4861 | FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(QTy: FieldType); |
4862 | FieldTBAAInfo.Size = |
4863 | getContext().getTypeSizeInChars(T: FieldType).getQuantity(); |
4864 | } |
4865 | |
4866 | Address addr = base.getAddress(CGF&: *this); |
4867 | if (hasBPFPreserveStaticOffset(D: rec)) |
4868 | addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr&: addr); |
4869 | if (auto *ClassDef = dyn_cast<CXXRecordDecl>(Val: rec)) { |
4870 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4871 | ClassDef->isDynamicClass()) { |
4872 | // Getting to any field of dynamic object requires stripping dynamic |
4873 | // information provided by invariant.group. This is because accessing |
4874 | // fields may leak the real address of dynamic object, which could result |
4875 | // in miscompilation when leaked pointer would be compared. |
4876 | auto *stripped = |
4877 | Builder.CreateStripInvariantGroup(Ptr: addr.emitRawPointer(CGF&: *this)); |
4878 | addr = Address(stripped, addr.getElementType(), addr.getAlignment()); |
4879 | } |
4880 | } |
4881 | |
4882 | unsigned RecordCVR = base.getVRQualifiers(); |
4883 | if (rec->isUnion()) { |
4884 | // For unions, there is no pointer adjustment. |
4885 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4886 | hasAnyVptr(Type: FieldType, Context: getContext())) |
4887 | // Because unions can easily skip invariant.barriers, we need to add |
4888 | // a barrier every time CXXRecord field with vptr is referenced. |
4889 | addr = Builder.CreateLaunderInvariantGroup(Addr: addr); |
4890 | |
4891 | if (IsInPreservedAIRegion || |
4892 | (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4893 | // Remember the original union field index |
4894 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(Ty: base.getType(), |
4895 | Loc: rec->getLocation()); |
4896 | addr = |
4897 | Address(Builder.CreatePreserveUnionAccessIndex( |
4898 | Base: addr.emitRawPointer(CGF&: *this), |
4899 | FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo), |
4900 | addr.getElementType(), addr.getAlignment()); |
4901 | } |
4902 | |
4903 | if (FieldType->isReferenceType()) |
4904 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
4905 | } else { |
4906 | if (!IsInPreservedAIRegion && |
4907 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) |
4908 | // For structs, we GEP to the field that the record layout suggests. |
4909 | addr = emitAddrOfFieldStorage(CGF&: *this, base: addr, field); |
4910 | else |
4911 | // Remember the original struct field index |
4912 | addr = emitPreserveStructAccess(CGF&: *this, base, addr, field); |
4913 | } |
4914 | |
4915 | // If this is a reference field, load the reference right now. |
4916 | if (FieldType->isReferenceType()) { |
4917 | LValue RefLVal = |
4918 | MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
4919 | if (RecordCVR & Qualifiers::Volatile) |
4920 | RefLVal.getQuals().addVolatile(); |
4921 | addr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &FieldBaseInfo, PointeeTBAAInfo: &FieldTBAAInfo); |
4922 | |
4923 | // Qualifiers on the struct don't apply to the referencee. |
4924 | RecordCVR = 0; |
4925 | FieldType = FieldType->getPointeeType(); |
4926 | } |
4927 | |
4928 | // Make sure that the address is pointing to the right type. This is critical |
4929 | // for both unions and structs. |
4930 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
4931 | |
4932 | if (field->hasAttr<AnnotateAttr>()) |
4933 | addr = EmitFieldAnnotations(D: field, V: addr); |
4934 | |
4935 | LValue LV = MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
4936 | LV.getQuals().addCVRQualifiers(mask: RecordCVR); |
4937 | |
4938 | // __weak attribute on a field is ignored. |
4939 | if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) |
4940 | LV.getQuals().removeObjCGCAttr(); |
4941 | |
4942 | return LV; |
4943 | } |
4944 | |
4945 | LValue |
4946 | CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, |
4947 | const FieldDecl *Field) { |
4948 | QualType FieldType = Field->getType(); |
4949 | |
4950 | if (!FieldType->isReferenceType()) |
4951 | return EmitLValueForField(base: Base, field: Field); |
4952 | |
4953 | Address V = emitAddrOfFieldStorage(CGF&: *this, base: Base.getAddress(CGF&: *this), field: Field); |
4954 | |
4955 | // Make sure that the address is pointing to the right type. |
4956 | llvm::Type *llvmType = ConvertTypeForMem(T: FieldType); |
4957 | V = V.withElementType(ElemTy: llvmType); |
4958 | |
4959 | // TODO: Generate TBAA information that describes this access as a structure |
4960 | // member access and not just an access to an object of the field's type. This |
4961 | // should be similar to what we do in EmitLValueForField(). |
4962 | LValueBaseInfo BaseInfo = Base.getBaseInfo(); |
4963 | AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); |
4964 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: FieldAlignSource)); |
4965 | return MakeAddrLValue(Addr: V, T: FieldType, BaseInfo: FieldBaseInfo, |
4966 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base, AccessType: FieldType)); |
4967 | } |
4968 | |
4969 | LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ |
4970 | if (E->isFileScope()) { |
4971 | ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); |
4972 | return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); |
4973 | } |
4974 | if (E->getType()->isVariablyModifiedType()) |
4975 | // make sure to emit the VLA size. |
4976 | EmitVariablyModifiedType(Ty: E->getType()); |
4977 | |
4978 | Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral" ); |
4979 | const Expr *InitExpr = E->getInitializer(); |
4980 | LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); |
4981 | |
4982 | EmitAnyExprToMem(E: InitExpr, Location: DeclPtr, Quals: E->getType().getQualifiers(), |
4983 | /*Init*/ IsInit: true); |
4984 | |
4985 | // Block-scope compound literals are destroyed at the end of the enclosing |
4986 | // scope in C. |
4987 | if (!getLangOpts().CPlusPlus) |
4988 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
4989 | pushLifetimeExtendedDestroy(kind: getCleanupKind(kind: DtorKind), addr: DeclPtr, |
4990 | type: E->getType(), destroyer: getDestroyer(destructionKind: DtorKind), |
4991 | useEHCleanupForArray: DtorKind & EHCleanup); |
4992 | |
4993 | return Result; |
4994 | } |
4995 | |
4996 | LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { |
4997 | if (!E->isGLValue()) |
4998 | // Initializing an aggregate temporary in C++11: T{...}. |
4999 | return EmitAggExprToLValue(E); |
5000 | |
5001 | // An lvalue initializer list must be initializing a reference. |
5002 | assert(E->isTransparent() && "non-transparent glvalue init list" ); |
5003 | return EmitLValue(E: E->getInit(Init: 0)); |
5004 | } |
5005 | |
5006 | /// Emit the operand of a glvalue conditional operator. This is either a glvalue |
5007 | /// or a (possibly-parenthesized) throw-expression. If this is a throw, no |
5008 | /// LValue is returned and the current block has been terminated. |
5009 | static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, |
5010 | const Expr *Operand) { |
5011 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Operand->IgnoreParens())) { |
5012 | CGF.EmitCXXThrowExpr(E: ThrowExpr, /*KeepInsertionPoint*/false); |
5013 | return std::nullopt; |
5014 | } |
5015 | |
5016 | return CGF.EmitLValue(E: Operand); |
5017 | } |
5018 | |
5019 | namespace { |
5020 | // Handle the case where the condition is a constant evaluatable simple integer, |
5021 | // which means we don't have to separately handle the true/false blocks. |
5022 | std::optional<LValue> HandleConditionalOperatorLValueSimpleCase( |
5023 | CodeGenFunction &CGF, const AbstractConditionalOperator *E) { |
5024 | const Expr *condExpr = E->getCond(); |
5025 | bool CondExprBool; |
5026 | if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) { |
5027 | const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); |
5028 | if (!CondExprBool) |
5029 | std::swap(a&: Live, b&: Dead); |
5030 | |
5031 | if (!CGF.ContainsLabel(Dead)) { |
5032 | // If the true case is live, we need to track its region. |
5033 | if (CondExprBool) |
5034 | CGF.incrementProfileCounter(E); |
5035 | // If a throw expression we emit it and return an undefined lvalue |
5036 | // because it can't be used. |
5037 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Live->IgnoreParens())) { |
5038 | CGF.EmitCXXThrowExpr(E: ThrowExpr); |
5039 | llvm::Type *ElemTy = CGF.ConvertType(T: Dead->getType()); |
5040 | llvm::Type *Ty = CGF.UnqualPtrTy; |
5041 | return CGF.MakeAddrLValue( |
5042 | Addr: Address(llvm::UndefValue::get(T: Ty), ElemTy, CharUnits::One()), |
5043 | T: Dead->getType()); |
5044 | } |
5045 | return CGF.EmitLValue(E: Live); |
5046 | } |
5047 | } |
5048 | return std::nullopt; |
5049 | } |
5050 | struct ConditionalInfo { |
5051 | llvm::BasicBlock *lhsBlock, *rhsBlock; |
5052 | std::optional<LValue> LHS, RHS; |
5053 | }; |
5054 | |
5055 | // Create and generate the 3 blocks for a conditional operator. |
5056 | // Leaves the 'current block' in the continuation basic block. |
5057 | template<typename FuncTy> |
5058 | ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF, |
5059 | const AbstractConditionalOperator *E, |
5060 | const FuncTy &BranchGenFunc) { |
5061 | ConditionalInfo Info{CGF.createBasicBlock(name: "cond.true" ), |
5062 | CGF.createBasicBlock(name: "cond.false" ), std::nullopt, |
5063 | std::nullopt}; |
5064 | llvm::BasicBlock *endBlock = CGF.createBasicBlock(name: "cond.end" ); |
5065 | |
5066 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
5067 | CGF.EmitBranchOnBoolExpr(Cond: E->getCond(), TrueBlock: Info.lhsBlock, FalseBlock: Info.rhsBlock, |
5068 | TrueCount: CGF.getProfileCount(E)); |
5069 | |
5070 | // Any temporaries created here are conditional. |
5071 | CGF.EmitBlock(BB: Info.lhsBlock); |
5072 | CGF.incrementProfileCounter(E); |
5073 | eval.begin(CGF); |
5074 | Info.LHS = BranchGenFunc(CGF, E->getTrueExpr()); |
5075 | eval.end(CGF); |
5076 | Info.lhsBlock = CGF.Builder.GetInsertBlock(); |
5077 | |
5078 | if (Info.LHS) |
5079 | CGF.Builder.CreateBr(Dest: endBlock); |
5080 | |
5081 | // Any temporaries created here are conditional. |
5082 | CGF.EmitBlock(BB: Info.rhsBlock); |
5083 | eval.begin(CGF); |
5084 | Info.RHS = BranchGenFunc(CGF, E->getFalseExpr()); |
5085 | eval.end(CGF); |
5086 | Info.rhsBlock = CGF.Builder.GetInsertBlock(); |
5087 | CGF.EmitBlock(BB: endBlock); |
5088 | |
5089 | return Info; |
5090 | } |
5091 | } // namespace |
5092 | |
5093 | void CodeGenFunction::EmitIgnoredConditionalOperator( |
5094 | const AbstractConditionalOperator *E) { |
5095 | if (!E->isGLValue()) { |
5096 | // ?: here should be an aggregate. |
5097 | assert(hasAggregateEvaluationKind(E->getType()) && |
5098 | "Unexpected conditional operator!" ); |
5099 | return (void)EmitAggExprToLValue(E); |
5100 | } |
5101 | |
5102 | OpaqueValueMapping binding(*this, E); |
5103 | if (HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E)) |
5104 | return; |
5105 | |
5106 | EmitConditionalBlocks(CGF&: *this, E, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5107 | CGF.EmitIgnoredExpr(E); |
5108 | return LValue{}; |
5109 | }); |
5110 | } |
5111 | LValue CodeGenFunction::EmitConditionalOperatorLValue( |
5112 | const AbstractConditionalOperator *expr) { |
5113 | if (!expr->isGLValue()) { |
5114 | // ?: here should be an aggregate. |
5115 | assert(hasAggregateEvaluationKind(expr->getType()) && |
5116 | "Unexpected conditional operator!" ); |
5117 | return EmitAggExprToLValue(expr); |
5118 | } |
5119 | |
5120 | OpaqueValueMapping binding(*this, expr); |
5121 | if (std::optional<LValue> Res = |
5122 | HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E: expr)) |
5123 | return *Res; |
5124 | |
5125 | ConditionalInfo Info = EmitConditionalBlocks( |
5126 | CGF&: *this, E: expr, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5127 | return EmitLValueOrThrowExpression(CGF, E); |
5128 | }); |
5129 | |
5130 | if ((Info.LHS && !Info.LHS->isSimple()) || |
5131 | (Info.RHS && !Info.RHS->isSimple())) |
5132 | return EmitUnsupportedLValue(expr, "conditional operator" ); |
5133 | |
5134 | if (Info.LHS && Info.RHS) { |
5135 | Address lhsAddr = Info.LHS->getAddress(*this); |
5136 | Address rhsAddr = Info.RHS->getAddress(*this); |
5137 | Address result = mergeAddressesInConditionalExpr( |
5138 | LHS: lhsAddr, RHS: rhsAddr, LHSBlock: Info.lhsBlock, RHSBlock: Info.rhsBlock, |
5139 | MergeBlock: Builder.GetInsertBlock(), MergedType: expr->getType()); |
5140 | AlignmentSource alignSource = |
5141 | std::max(Info.LHS->getBaseInfo().getAlignmentSource(), |
5142 | Info.RHS->getBaseInfo().getAlignmentSource()); |
5143 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( |
5144 | InfoA: Info.LHS->getTBAAInfo(), InfoB: Info.RHS->getTBAAInfo()); |
5145 | return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), |
5146 | TBAAInfo); |
5147 | } else { |
5148 | assert((Info.LHS || Info.RHS) && |
5149 | "both operands of glvalue conditional are throw-expressions?" ); |
5150 | return Info.LHS ? *Info.LHS : *Info.RHS; |
5151 | } |
5152 | } |
5153 | |
5154 | /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference |
5155 | /// type. If the cast is to a reference, we can have the usual lvalue result, |
5156 | /// otherwise if a cast is needed by the code generator in an lvalue context, |
5157 | /// then it must mean that we need the address of an aggregate in order to |
5158 | /// access one of its members. This can happen for all the reasons that casts |
5159 | /// are permitted with aggregate result, including noop aggregate casts, and |
5160 | /// cast from scalar to union. |
5161 | LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { |
5162 | switch (E->getCastKind()) { |
5163 | case CK_ToVoid: |
5164 | case CK_BitCast: |
5165 | case CK_LValueToRValueBitCast: |
5166 | case CK_ArrayToPointerDecay: |
5167 | case CK_FunctionToPointerDecay: |
5168 | case CK_NullToMemberPointer: |
5169 | case CK_NullToPointer: |
5170 | case CK_IntegralToPointer: |
5171 | case CK_PointerToIntegral: |
5172 | case CK_PointerToBoolean: |
5173 | case CK_IntegralCast: |
5174 | case CK_BooleanToSignedIntegral: |
5175 | case CK_IntegralToBoolean: |
5176 | case CK_IntegralToFloating: |
5177 | case CK_FloatingToIntegral: |
5178 | case CK_FloatingToBoolean: |
5179 | case CK_FloatingCast: |
5180 | case CK_FloatingRealToComplex: |
5181 | case CK_FloatingComplexToReal: |
5182 | case CK_FloatingComplexToBoolean: |
5183 | case CK_FloatingComplexCast: |
5184 | case CK_FloatingComplexToIntegralComplex: |
5185 | case CK_IntegralRealToComplex: |
5186 | case CK_IntegralComplexToReal: |
5187 | case CK_IntegralComplexToBoolean: |
5188 | case CK_IntegralComplexCast: |
5189 | case CK_IntegralComplexToFloatingComplex: |
5190 | case CK_DerivedToBaseMemberPointer: |
5191 | case CK_BaseToDerivedMemberPointer: |
5192 | case CK_MemberPointerToBoolean: |
5193 | case CK_ReinterpretMemberPointer: |
5194 | case CK_AnyPointerToBlockPointerCast: |
5195 | case CK_ARCProduceObject: |
5196 | case CK_ARCConsumeObject: |
5197 | case CK_ARCReclaimReturnedObject: |
5198 | case CK_ARCExtendBlockObject: |
5199 | case CK_CopyAndAutoreleaseBlockObject: |
5200 | case CK_IntToOCLSampler: |
5201 | case CK_FloatingToFixedPoint: |
5202 | case CK_FixedPointToFloating: |
5203 | case CK_FixedPointCast: |
5204 | case CK_FixedPointToBoolean: |
5205 | case CK_FixedPointToIntegral: |
5206 | case CK_IntegralToFixedPoint: |
5207 | case CK_MatrixCast: |
5208 | case CK_HLSLVectorTruncation: |
5209 | case CK_HLSLArrayRValue: |
5210 | return EmitUnsupportedLValue(E, "unexpected cast lvalue" ); |
5211 | |
5212 | case CK_Dependent: |
5213 | llvm_unreachable("dependent cast kind in IR gen!" ); |
5214 | |
5215 | case CK_BuiltinFnToFnPtr: |
5216 | llvm_unreachable("builtin functions are handled elsewhere" ); |
5217 | |
5218 | // These are never l-values; just use the aggregate emission code. |
5219 | case CK_NonAtomicToAtomic: |
5220 | case CK_AtomicToNonAtomic: |
5221 | return EmitAggExprToLValue(E); |
5222 | |
5223 | case CK_Dynamic: { |
5224 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5225 | Address V = LV.getAddress(CGF&: *this); |
5226 | const auto *DCE = cast<CXXDynamicCastExpr>(Val: E); |
5227 | return MakeNaturalAlignRawAddrLValue(V: EmitDynamicCast(V, DCE), T: E->getType()); |
5228 | } |
5229 | |
5230 | case CK_ConstructorConversion: |
5231 | case CK_UserDefinedConversion: |
5232 | case CK_CPointerToObjCPointerCast: |
5233 | case CK_BlockPointerToObjCPointerCast: |
5234 | case CK_LValueToRValue: |
5235 | return EmitLValue(E: E->getSubExpr()); |
5236 | |
5237 | case CK_NoOp: { |
5238 | // CK_NoOp can model a qualification conversion, which can remove an array |
5239 | // bound and change the IR type. |
5240 | // FIXME: Once pointee types are removed from IR, remove this. |
5241 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5242 | // Propagate the volatile qualifer to LValue, if exist in E. |
5243 | if (E->changesVolatileQualification()) |
5244 | LV.getQuals() = E->getType().getQualifiers(); |
5245 | if (LV.isSimple()) { |
5246 | Address V = LV.getAddress(CGF&: *this); |
5247 | if (V.isValid()) { |
5248 | llvm::Type *T = ConvertTypeForMem(T: E->getType()); |
5249 | if (V.getElementType() != T) |
5250 | LV.setAddress(V.withElementType(ElemTy: T)); |
5251 | } |
5252 | } |
5253 | return LV; |
5254 | } |
5255 | |
5256 | case CK_UncheckedDerivedToBase: |
5257 | case CK_DerivedToBase: { |
5258 | const auto *DerivedClassTy = |
5259 | E->getSubExpr()->getType()->castAs<RecordType>(); |
5260 | auto *DerivedClassDecl = cast<CXXRecordDecl>(Val: DerivedClassTy->getDecl()); |
5261 | |
5262 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5263 | Address This = LV.getAddress(CGF&: *this); |
5264 | |
5265 | // Perform the derived-to-base conversion |
5266 | Address Base = GetAddressOfBaseClass( |
5267 | Value: This, Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5268 | /*NullCheckValue=*/false, Loc: E->getExprLoc()); |
5269 | |
5270 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
5271 | // conservatively pretend that the complete object is of the base class |
5272 | // type. |
5273 | return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(), |
5274 | CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5275 | } |
5276 | case CK_ToUnion: |
5277 | return EmitAggExprToLValue(E); |
5278 | case CK_BaseToDerived: { |
5279 | const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); |
5280 | auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); |
5281 | |
5282 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5283 | |
5284 | // Perform the base-to-derived conversion |
5285 | Address Derived = GetAddressOfDerivedClass( |
5286 | Value: LV.getAddress(CGF&: *this), Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5287 | /*NullCheckValue=*/false); |
5288 | |
5289 | // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is |
5290 | // performed and the object is not of the derived type. |
5291 | if (sanitizePerformTypeCheck()) |
5292 | EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived, |
5293 | E->getType()); |
5294 | |
5295 | if (SanOpts.has(K: SanitizerKind::CFIDerivedCast)) |
5296 | EmitVTablePtrCheckForCast(T: E->getType(), Derived, |
5297 | /*MayBeNull=*/false, TCK: CFITCK_DerivedCast, |
5298 | Loc: E->getBeginLoc()); |
5299 | |
5300 | return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(), |
5301 | CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5302 | } |
5303 | case CK_LValueBitCast: { |
5304 | // This must be a reinterpret_cast (or c-style equivalent). |
5305 | const auto *CE = cast<ExplicitCastExpr>(Val: E); |
5306 | |
5307 | CGM.EmitExplicitCastExprType(E: CE, CGF: this); |
5308 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5309 | Address V = LV.getAddress(CGF&: *this).withElementType( |
5310 | ElemTy: ConvertTypeForMem(T: CE->getTypeAsWritten()->getPointeeType())); |
5311 | |
5312 | if (SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) |
5313 | EmitVTablePtrCheckForCast(T: E->getType(), Derived: V, |
5314 | /*MayBeNull=*/false, TCK: CFITCK_UnrelatedCast, |
5315 | Loc: E->getBeginLoc()); |
5316 | |
5317 | return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), |
5318 | CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5319 | } |
5320 | case CK_AddressSpaceConversion: { |
5321 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5322 | QualType DestTy = getContext().getPointerType(E->getType()); |
5323 | llvm::Value *V = getTargetHooks().performAddrSpaceCast( |
5324 | *this, LV.getPointer(CGF&: *this), |
5325 | E->getSubExpr()->getType().getAddressSpace(), |
5326 | E->getType().getAddressSpace(), ConvertType(T: DestTy)); |
5327 | return MakeAddrLValue(Address(V, ConvertTypeForMem(T: E->getType()), |
5328 | LV.getAddress(CGF&: *this).getAlignment()), |
5329 | E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); |
5330 | } |
5331 | case CK_ObjCObjectLValueCast: { |
5332 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5333 | Address V = LV.getAddress(CGF&: *this).withElementType(ElemTy: ConvertType(E->getType())); |
5334 | return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), |
5335 | CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5336 | } |
5337 | case CK_ZeroToOCLOpaqueType: |
5338 | llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid" ); |
5339 | |
5340 | case CK_VectorSplat: { |
5341 | // LValue results of vector splats are only supported in HLSL. |
5342 | if (!getLangOpts().HLSL) |
5343 | return EmitUnsupportedLValue(E, "unexpected cast lvalue" ); |
5344 | return EmitLValue(E: E->getSubExpr()); |
5345 | } |
5346 | } |
5347 | |
5348 | llvm_unreachable("Unhandled lvalue cast kind?" ); |
5349 | } |
5350 | |
5351 | LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { |
5352 | assert(OpaqueValueMappingData::shouldBindAsLValue(e)); |
5353 | return getOrCreateOpaqueLValueMapping(e); |
5354 | } |
5355 | |
5356 | LValue |
5357 | CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { |
5358 | assert(OpaqueValueMapping::shouldBindAsLValue(e)); |
5359 | |
5360 | llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator |
5361 | it = OpaqueLValues.find(Val: e); |
5362 | |
5363 | if (it != OpaqueLValues.end()) |
5364 | return it->second; |
5365 | |
5366 | assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted" ); |
5367 | return EmitLValue(E: e->getSourceExpr()); |
5368 | } |
5369 | |
5370 | RValue |
5371 | CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { |
5372 | assert(!OpaqueValueMapping::shouldBindAsLValue(e)); |
5373 | |
5374 | llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator |
5375 | it = OpaqueRValues.find(Val: e); |
5376 | |
5377 | if (it != OpaqueRValues.end()) |
5378 | return it->second; |
5379 | |
5380 | assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted" ); |
5381 | return EmitAnyExpr(E: e->getSourceExpr()); |
5382 | } |
5383 | |
5384 | RValue CodeGenFunction::EmitRValueForField(LValue LV, |
5385 | const FieldDecl *FD, |
5386 | SourceLocation Loc) { |
5387 | QualType FT = FD->getType(); |
5388 | LValue FieldLV = EmitLValueForField(base: LV, field: FD); |
5389 | switch (getEvaluationKind(T: FT)) { |
5390 | case TEK_Complex: |
5391 | return RValue::getComplex(C: EmitLoadOfComplex(src: FieldLV, loc: Loc)); |
5392 | case TEK_Aggregate: |
5393 | return FieldLV.asAggregateRValue(CGF&: *this); |
5394 | case TEK_Scalar: |
5395 | // This routine is used to load fields one-by-one to perform a copy, so |
5396 | // don't load reference fields. |
5397 | if (FD->getType()->isReferenceType()) |
5398 | return RValue::get(V: FieldLV.getPointer(CGF&: *this)); |
5399 | // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a |
5400 | // primitive load. |
5401 | if (FieldLV.isBitField()) |
5402 | return EmitLoadOfLValue(LV: FieldLV, Loc); |
5403 | return RValue::get(V: EmitLoadOfScalar(lvalue: FieldLV, Loc)); |
5404 | } |
5405 | llvm_unreachable("bad evaluation kind" ); |
5406 | } |
5407 | |
5408 | //===--------------------------------------------------------------------===// |
5409 | // Expression Emission |
5410 | //===--------------------------------------------------------------------===// |
5411 | |
5412 | RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, |
5413 | ReturnValueSlot ReturnValue) { |
5414 | // Builtins never have block type. |
5415 | if (E->getCallee()->getType()->isBlockPointerType()) |
5416 | return EmitBlockCallExpr(E, ReturnValue); |
5417 | |
5418 | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: E)) |
5419 | return EmitCXXMemberCallExpr(E: CE, ReturnValue); |
5420 | |
5421 | if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(Val: E)) |
5422 | return EmitCUDAKernelCallExpr(E: CE, ReturnValue); |
5423 | |
5424 | // A CXXOperatorCallExpr is created even for explicit object methods, but |
5425 | // these should be treated like static function call. |
5426 | if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(Val: E)) |
5427 | if (const auto *MD = |
5428 | dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl()); |
5429 | MD && MD->isImplicitObjectMemberFunction()) |
5430 | return EmitCXXOperatorMemberCallExpr(E: CE, MD: MD, ReturnValue); |
5431 | |
5432 | CGCallee callee = EmitCallee(E: E->getCallee()); |
5433 | |
5434 | if (callee.isBuiltin()) { |
5435 | return EmitBuiltinExpr(GD: callee.getBuiltinDecl(), BuiltinID: callee.getBuiltinID(), |
5436 | E, ReturnValue); |
5437 | } |
5438 | |
5439 | if (callee.isPseudoDestructor()) { |
5440 | return EmitCXXPseudoDestructorExpr(E: callee.getPseudoDestructorExpr()); |
5441 | } |
5442 | |
5443 | return EmitCall(FnType: E->getCallee()->getType(), Callee: callee, E, ReturnValue); |
5444 | } |
5445 | |
5446 | /// Emit a CallExpr without considering whether it might be a subclass. |
5447 | RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, |
5448 | ReturnValueSlot ReturnValue) { |
5449 | CGCallee Callee = EmitCallee(E: E->getCallee()); |
5450 | return EmitCall(FnType: E->getCallee()->getType(), Callee, E, ReturnValue); |
5451 | } |
5452 | |
5453 | // Detect the unusual situation where an inline version is shadowed by a |
5454 | // non-inline version. In that case we should pick the external one |
5455 | // everywhere. That's GCC behavior too. |
5456 | static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { |
5457 | for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) |
5458 | if (!PD->isInlineBuiltinDeclaration()) |
5459 | return false; |
5460 | return true; |
5461 | } |
5462 | |
5463 | static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { |
5464 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
5465 | |
5466 | if (auto builtinID = FD->getBuiltinID()) { |
5467 | std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); |
5468 | std::string NoBuiltins = "no-builtins" ; |
5469 | |
5470 | StringRef Ident = CGF.CGM.getMangledName(GD); |
5471 | std::string FDInlineName = (Ident + ".inline" ).str(); |
5472 | |
5473 | bool IsPredefinedLibFunction = |
5474 | CGF.getContext().BuiltinInfo.isPredefinedLibFunction(ID: builtinID); |
5475 | bool HasAttributeNoBuiltin = |
5476 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltinFD) || |
5477 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltins); |
5478 | |
5479 | // When directing calling an inline builtin, call it through it's mangled |
5480 | // name to make it clear it's not the actual builtin. |
5481 | if (CGF.CurFn->getName() != FDInlineName && |
5482 | OnlyHasInlineBuiltinDeclaration(FD)) { |
5483 | llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGM&: CGF.CGM, GD); |
5484 | llvm::Function *Fn = llvm::cast<llvm::Function>(Val: CalleePtr); |
5485 | llvm::Module *M = Fn->getParent(); |
5486 | llvm::Function *Clone = M->getFunction(Name: FDInlineName); |
5487 | if (!Clone) { |
5488 | Clone = llvm::Function::Create(Ty: Fn->getFunctionType(), |
5489 | Linkage: llvm::GlobalValue::InternalLinkage, |
5490 | AddrSpace: Fn->getAddressSpace(), N: FDInlineName, M); |
5491 | Clone->addFnAttr(llvm::Attribute::AlwaysInline); |
5492 | } |
5493 | return CGCallee::forDirect(functionPtr: Clone, abstractInfo: GD); |
5494 | } |
5495 | |
5496 | // Replaceable builtins provide their own implementation of a builtin. If we |
5497 | // are in an inline builtin implementation, avoid trivial infinite |
5498 | // recursion. Honor __attribute__((no_builtin("foo"))) or |
5499 | // __attribute__((no_builtin)) on the current function unless foo is |
5500 | // not a predefined library function which means we must generate the |
5501 | // builtin no matter what. |
5502 | else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) |
5503 | return CGCallee::forBuiltin(builtinID, builtinDecl: FD); |
5504 | } |
5505 | |
5506 | llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGM&: CGF.CGM, GD); |
5507 | if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice && |
5508 | FD->hasAttr<CUDAGlobalAttr>()) |
5509 | CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub( |
5510 | Handle: cast<llvm::GlobalValue>(Val: CalleePtr->stripPointerCasts())); |
5511 | |
5512 | return CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GD); |
5513 | } |
5514 | |
5515 | CGCallee CodeGenFunction::EmitCallee(const Expr *E) { |
5516 | E = E->IgnoreParens(); |
5517 | |
5518 | // Look through function-to-pointer decay. |
5519 | if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: E)) { |
5520 | if (ICE->getCastKind() == CK_FunctionToPointerDecay || |
5521 | ICE->getCastKind() == CK_BuiltinFnToFnPtr) { |
5522 | return EmitCallee(E: ICE->getSubExpr()); |
5523 | } |
5524 | |
5525 | // Resolve direct calls. |
5526 | } else if (auto DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
5527 | if (auto FD = dyn_cast<FunctionDecl>(Val: DRE->getDecl())) { |
5528 | return EmitDirectCallee(CGF&: *this, GD: FD); |
5529 | } |
5530 | } else if (auto ME = dyn_cast<MemberExpr>(Val: E)) { |
5531 | if (auto FD = dyn_cast<FunctionDecl>(Val: ME->getMemberDecl())) { |
5532 | EmitIgnoredExpr(E: ME->getBase()); |
5533 | return EmitDirectCallee(CGF&: *this, GD: FD); |
5534 | } |
5535 | |
5536 | // Look through template substitutions. |
5537 | } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(Val: E)) { |
5538 | return EmitCallee(E: NTTP->getReplacement()); |
5539 | |
5540 | // Treat pseudo-destructor calls differently. |
5541 | } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: E)) { |
5542 | return CGCallee::forPseudoDestructor(E: PDE); |
5543 | } |
5544 | |
5545 | // Otherwise, we have an indirect reference. |
5546 | llvm::Value *calleePtr; |
5547 | QualType functionType; |
5548 | if (auto ptrType = E->getType()->getAs<PointerType>()) { |
5549 | calleePtr = EmitScalarExpr(E); |
5550 | functionType = ptrType->getPointeeType(); |
5551 | } else { |
5552 | functionType = E->getType(); |
5553 | calleePtr = EmitLValue(E, IsKnownNonNull: KnownNonNull).getPointer(CGF&: *this); |
5554 | } |
5555 | assert(functionType->isFunctionType()); |
5556 | |
5557 | GlobalDecl GD; |
5558 | if (const auto *VD = |
5559 | dyn_cast_or_null<VarDecl>(Val: E->getReferencedDeclOfCallee())) |
5560 | GD = GlobalDecl(VD); |
5561 | |
5562 | CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); |
5563 | CGCallee callee(calleeInfo, calleePtr); |
5564 | return callee; |
5565 | } |
5566 | |
5567 | LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { |
5568 | // Comma expressions just emit their LHS then their RHS as an l-value. |
5569 | if (E->getOpcode() == BO_Comma) { |
5570 | EmitIgnoredExpr(E: E->getLHS()); |
5571 | EnsureInsertPoint(); |
5572 | return EmitLValue(E: E->getRHS()); |
5573 | } |
5574 | |
5575 | if (E->getOpcode() == BO_PtrMemD || |
5576 | E->getOpcode() == BO_PtrMemI) |
5577 | return EmitPointerToDataMemberBinaryExpr(E); |
5578 | |
5579 | assert(E->getOpcode() == BO_Assign && "unexpected binary l-value" ); |
5580 | |
5581 | // Note that in all of these cases, __block variables need the RHS |
5582 | // evaluated first just in case the variable gets moved by the RHS. |
5583 | |
5584 | switch (getEvaluationKind(T: E->getType())) { |
5585 | case TEK_Scalar: { |
5586 | switch (E->getLHS()->getType().getObjCLifetime()) { |
5587 | case Qualifiers::OCL_Strong: |
5588 | return EmitARCStoreStrong(E, /*ignored*/ false).first; |
5589 | |
5590 | case Qualifiers::OCL_Autoreleasing: |
5591 | return EmitARCStoreAutoreleasing(e: E).first; |
5592 | |
5593 | // No reason to do any of these differently. |
5594 | case Qualifiers::OCL_None: |
5595 | case Qualifiers::OCL_ExplicitNone: |
5596 | case Qualifiers::OCL_Weak: |
5597 | break; |
5598 | } |
5599 | |
5600 | // TODO: Can we de-duplicate this code with the corresponding code in |
5601 | // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works? |
5602 | RValue RV; |
5603 | llvm::Value *Previous = nullptr; |
5604 | QualType SrcType = E->getRHS()->getType(); |
5605 | // Check if LHS is a bitfield, if RHS contains an implicit cast expression |
5606 | // we want to extract that value and potentially (if the bitfield sanitizer |
5607 | // is enabled) use it to check for an implicit conversion. |
5608 | if (E->getLHS()->refersToBitField()) { |
5609 | llvm::Value *RHS = |
5610 | EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType); |
5611 | RV = RValue::get(V: RHS); |
5612 | } else |
5613 | RV = EmitAnyExpr(E: E->getRHS()); |
5614 | |
5615 | LValue LV = EmitCheckedLValue(E: E->getLHS(), TCK: TCK_Store); |
5616 | |
5617 | if (RV.isScalar()) |
5618 | EmitNullabilityCheck(LHS: LV, RHS: RV.getScalarVal(), Loc: E->getExprLoc()); |
5619 | |
5620 | if (LV.isBitField()) { |
5621 | llvm::Value *Result = nullptr; |
5622 | // If bitfield sanitizers are enabled we want to use the result |
5623 | // to check whether a truncation or sign change has occurred. |
5624 | if (SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) |
5625 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV, Result: &Result); |
5626 | else |
5627 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV); |
5628 | |
5629 | // If the expression contained an implicit conversion, make sure |
5630 | // to use the value before the scalar conversion. |
5631 | llvm::Value *Src = Previous ? Previous : RV.getScalarVal(); |
5632 | QualType DstType = E->getLHS()->getType(); |
5633 | EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType, |
5634 | Info: LV.getBitFieldInfo(), Loc: E->getExprLoc()); |
5635 | } else |
5636 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
5637 | |
5638 | if (getLangOpts().OpenMP) |
5639 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
5640 | LHS: E->getLHS()); |
5641 | return LV; |
5642 | } |
5643 | |
5644 | case TEK_Complex: |
5645 | return EmitComplexAssignmentLValue(E); |
5646 | |
5647 | case TEK_Aggregate: |
5648 | return EmitAggExprToLValue(E); |
5649 | } |
5650 | llvm_unreachable("bad evaluation kind" ); |
5651 | } |
5652 | |
5653 | LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { |
5654 | RValue RV = EmitCallExpr(E); |
5655 | |
5656 | if (!RV.isScalar()) |
5657 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5658 | AlignmentSource::Decl); |
5659 | |
5660 | assert(E->getCallReturnType(getContext())->isReferenceType() && |
5661 | "Can't have a scalar return unless the return type is a " |
5662 | "reference type!" ); |
5663 | |
5664 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
5665 | } |
5666 | |
5667 | LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { |
5668 | // FIXME: This shouldn't require another copy. |
5669 | return EmitAggExprToLValue(E); |
5670 | } |
5671 | |
5672 | LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { |
5673 | assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() |
5674 | && "binding l-value to type which needs a temporary" ); |
5675 | AggValueSlot Slot = CreateAggTemp(T: E->getType()); |
5676 | EmitCXXConstructExpr(E, Dest: Slot); |
5677 | return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); |
5678 | } |
5679 | |
5680 | LValue |
5681 | CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { |
5682 | return MakeNaturalAlignRawAddrLValue(V: EmitCXXTypeidExpr(E), T: E->getType()); |
5683 | } |
5684 | |
5685 | Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { |
5686 | return CGM.GetAddrOfMSGuidDecl(GD: E->getGuidDecl()) |
5687 | .withElementType(ElemTy: ConvertType(E->getType())); |
5688 | } |
5689 | |
5690 | LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { |
5691 | return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), |
5692 | AlignmentSource::Decl); |
5693 | } |
5694 | |
5695 | LValue |
5696 | CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { |
5697 | AggValueSlot Slot = CreateAggTemp(T: E->getType(), Name: "temp.lvalue" ); |
5698 | Slot.setExternallyDestructed(); |
5699 | EmitAggExpr(E: E->getSubExpr(), AS: Slot); |
5700 | EmitCXXTemporary(Temporary: E->getTemporary(), TempType: E->getType(), Ptr: Slot.getAddress()); |
5701 | return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); |
5702 | } |
5703 | |
5704 | LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { |
5705 | RValue RV = EmitObjCMessageExpr(E); |
5706 | |
5707 | if (!RV.isScalar()) |
5708 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5709 | AlignmentSource::Decl); |
5710 | |
5711 | assert(E->getMethodDecl()->getReturnType()->isReferenceType() && |
5712 | "Can't have a scalar return unless the return type is a " |
5713 | "reference type!" ); |
5714 | |
5715 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
5716 | } |
5717 | |
5718 | LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { |
5719 | Address V = |
5720 | CGM.getObjCRuntime().GetAddrOfSelector(CGF&: *this, Sel: E->getSelector()); |
5721 | return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); |
5722 | } |
5723 | |
5724 | llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, |
5725 | const ObjCIvarDecl *Ivar) { |
5726 | return CGM.getObjCRuntime().EmitIvarOffset(CGF&: *this, Interface, Ivar); |
5727 | } |
5728 | |
5729 | llvm::Value * |
5730 | CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, |
5731 | const ObjCIvarDecl *Ivar) { |
5732 | llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar); |
5733 | QualType PointerDiffType = getContext().getPointerDiffType(); |
5734 | return Builder.CreateZExtOrTrunc(V: OffsetValue, |
5735 | DestTy: getTypes().ConvertType(T: PointerDiffType)); |
5736 | } |
5737 | |
5738 | LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, |
5739 | llvm::Value *BaseValue, |
5740 | const ObjCIvarDecl *Ivar, |
5741 | unsigned CVRQualifiers) { |
5742 | return CGM.getObjCRuntime().EmitObjCValueForIvar(CGF&: *this, ObjectTy, BaseValue, |
5743 | Ivar, CVRQualifiers); |
5744 | } |
5745 | |
5746 | LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { |
5747 | // FIXME: A lot of the code below could be shared with EmitMemberExpr. |
5748 | llvm::Value *BaseValue = nullptr; |
5749 | const Expr *BaseExpr = E->getBase(); |
5750 | Qualifiers BaseQuals; |
5751 | QualType ObjectTy; |
5752 | if (E->isArrow()) { |
5753 | BaseValue = EmitScalarExpr(E: BaseExpr); |
5754 | ObjectTy = BaseExpr->getType()->getPointeeType(); |
5755 | BaseQuals = ObjectTy.getQualifiers(); |
5756 | } else { |
5757 | LValue BaseLV = EmitLValue(E: BaseExpr); |
5758 | BaseValue = BaseLV.getPointer(CGF&: *this); |
5759 | ObjectTy = BaseExpr->getType(); |
5760 | BaseQuals = ObjectTy.getQualifiers(); |
5761 | } |
5762 | |
5763 | LValue LV = |
5764 | EmitLValueForIvar(ObjectTy, BaseValue, Ivar: E->getDecl(), |
5765 | CVRQualifiers: BaseQuals.getCVRQualifiers()); |
5766 | setObjCGCLValueClass(getContext(), E, LV); |
5767 | return LV; |
5768 | } |
5769 | |
5770 | LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { |
5771 | // Can only get l-value for message expression returning aggregate type |
5772 | RValue RV = EmitAnyExprToTemp(E); |
5773 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5774 | AlignmentSource::Decl); |
5775 | } |
5776 | |
5777 | RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, |
5778 | const CallExpr *E, ReturnValueSlot ReturnValue, |
5779 | llvm::Value *Chain) { |
5780 | // Get the actual function type. The callee type will always be a pointer to |
5781 | // function type or a block pointer type. |
5782 | assert(CalleeType->isFunctionPointerType() && |
5783 | "Call must have function pointer type!" ); |
5784 | |
5785 | const Decl *TargetDecl = |
5786 | OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); |
5787 | |
5788 | assert((!isa_and_present<FunctionDecl>(TargetDecl) || |
5789 | !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) && |
5790 | "trying to emit a call to an immediate function" ); |
5791 | |
5792 | CalleeType = getContext().getCanonicalType(T: CalleeType); |
5793 | |
5794 | auto PointeeType = cast<PointerType>(Val&: CalleeType)->getPointeeType(); |
5795 | |
5796 | CGCallee Callee = OrigCallee; |
5797 | |
5798 | if (SanOpts.has(K: SanitizerKind::Function) && |
5799 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl)) && |
5800 | !isa<FunctionNoProtoType>(Val: PointeeType)) { |
5801 | if (llvm::Constant *PrefixSig = |
5802 | CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { |
5803 | SanitizerScope SanScope(this); |
5804 | auto *TypeHash = getUBSanFunctionTypeHash(T: PointeeType); |
5805 | |
5806 | llvm::Type *PrefixSigType = PrefixSig->getType(); |
5807 | llvm::StructType *PrefixStructTy = llvm::StructType::get( |
5808 | Context&: CGM.getLLVMContext(), Elements: {PrefixSigType, Int32Ty}, /*isPacked=*/true); |
5809 | |
5810 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5811 | |
5812 | // On 32-bit Arm, the low bit of a function pointer indicates whether |
5813 | // it's using the Arm or Thumb instruction set. The actual first |
5814 | // instruction lives at the same address either way, so we must clear |
5815 | // that low bit before using the function address to find the prefix |
5816 | // structure. |
5817 | // |
5818 | // This applies to both Arm and Thumb target triples, because |
5819 | // either one could be used in an interworking context where it |
5820 | // might be passed function pointers of both types. |
5821 | llvm::Value *AlignedCalleePtr; |
5822 | if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) { |
5823 | llvm::Value *CalleeAddress = |
5824 | Builder.CreatePtrToInt(V: CalleePtr, DestTy: IntPtrTy); |
5825 | llvm::Value *Mask = llvm::ConstantInt::get(Ty: IntPtrTy, V: ~1); |
5826 | llvm::Value *AlignedCalleeAddress = |
5827 | Builder.CreateAnd(LHS: CalleeAddress, RHS: Mask); |
5828 | AlignedCalleePtr = |
5829 | Builder.CreateIntToPtr(V: AlignedCalleeAddress, DestTy: CalleePtr->getType()); |
5830 | } else { |
5831 | AlignedCalleePtr = CalleePtr; |
5832 | } |
5833 | |
5834 | llvm::Value *CalleePrefixStruct = AlignedCalleePtr; |
5835 | llvm::Value *CalleeSigPtr = |
5836 | Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 0); |
5837 | llvm::Value *CalleeSig = |
5838 | Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign()); |
5839 | llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(LHS: CalleeSig, RHS: PrefixSig); |
5840 | |
5841 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
5842 | llvm::BasicBlock *TypeCheck = createBasicBlock(name: "typecheck" ); |
5843 | Builder.CreateCondBr(Cond: CalleeSigMatch, True: TypeCheck, False: Cont); |
5844 | |
5845 | EmitBlock(BB: TypeCheck); |
5846 | llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad( |
5847 | Int32Ty, |
5848 | Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 1), |
5849 | getPointerAlign()); |
5850 | llvm::Value *CalleeTypeHashMatch = |
5851 | Builder.CreateICmpEQ(LHS: CalleeTypeHash, RHS: TypeHash); |
5852 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
5853 | EmitCheckTypeDescriptor(T: CalleeType)}; |
5854 | EmitCheck(Checked: std::make_pair(x&: CalleeTypeHashMatch, y: SanitizerKind::Function), |
5855 | CheckHandler: SanitizerHandler::FunctionTypeMismatch, StaticArgs: StaticData, |
5856 | DynamicArgs: {CalleePtr}); |
5857 | |
5858 | Builder.CreateBr(Dest: Cont); |
5859 | EmitBlock(BB: Cont); |
5860 | } |
5861 | } |
5862 | |
5863 | const auto *FnType = cast<FunctionType>(Val&: PointeeType); |
5864 | |
5865 | // If we are checking indirect calls and this call is indirect, check that the |
5866 | // function pointer is a member of the bit set for the function type. |
5867 | if (SanOpts.has(K: SanitizerKind::CFIICall) && |
5868 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) { |
5869 | SanitizerScope SanScope(this); |
5870 | EmitSanitizerStatReport(SSK: llvm::SanStat_CFI_ICall); |
5871 | |
5872 | llvm::Metadata *MD; |
5873 | if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) |
5874 | MD = CGM.CreateMetadataIdentifierGeneralized(T: QualType(FnType, 0)); |
5875 | else |
5876 | MD = CGM.CreateMetadataIdentifierForType(T: QualType(FnType, 0)); |
5877 | |
5878 | llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: getLLVMContext(), MD); |
5879 | |
5880 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5881 | llvm::Value *TypeTest = Builder.CreateCall( |
5882 | CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId}); |
5883 | |
5884 | auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); |
5885 | llvm::Constant *StaticData[] = { |
5886 | llvm::ConstantInt::get(Ty: Int8Ty, V: CFITCK_ICall), |
5887 | EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
5888 | EmitCheckTypeDescriptor(T: QualType(FnType, 0)), |
5889 | }; |
5890 | if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { |
5891 | EmitCfiSlowPathCheck(Kind: SanitizerKind::CFIICall, Cond: TypeTest, TypeId: CrossDsoTypeId, |
5892 | Ptr: CalleePtr, StaticArgs: StaticData); |
5893 | } else { |
5894 | EmitCheck(Checked: std::make_pair(x&: TypeTest, y: SanitizerKind::CFIICall), |
5895 | CheckHandler: SanitizerHandler::CFICheckFail, StaticArgs: StaticData, |
5896 | DynamicArgs: {CalleePtr, llvm::UndefValue::get(T: IntPtrTy)}); |
5897 | } |
5898 | } |
5899 | |
5900 | CallArgList Args; |
5901 | if (Chain) |
5902 | Args.add(rvalue: RValue::get(V: Chain), type: CGM.getContext().VoidPtrTy); |
5903 | |
5904 | // C++17 requires that we evaluate arguments to a call using assignment syntax |
5905 | // right-to-left, and that we evaluate arguments to certain other operators |
5906 | // left-to-right. Note that we allow this to override the order dictated by |
5907 | // the calling convention on the MS ABI, which means that parameter |
5908 | // destruction order is not necessarily reverse construction order. |
5909 | // FIXME: Revisit this based on C++ committee response to unimplementability. |
5910 | EvaluationOrder Order = EvaluationOrder::Default; |
5911 | bool StaticOperator = false; |
5912 | if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) { |
5913 | if (OCE->isAssignmentOp()) |
5914 | Order = EvaluationOrder::ForceRightToLeft; |
5915 | else { |
5916 | switch (OCE->getOperator()) { |
5917 | case OO_LessLess: |
5918 | case OO_GreaterGreater: |
5919 | case OO_AmpAmp: |
5920 | case OO_PipePipe: |
5921 | case OO_Comma: |
5922 | case OO_ArrowStar: |
5923 | Order = EvaluationOrder::ForceLeftToRight; |
5924 | break; |
5925 | default: |
5926 | break; |
5927 | } |
5928 | } |
5929 | |
5930 | if (const auto *MD = |
5931 | dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl()); |
5932 | MD && MD->isStatic()) |
5933 | StaticOperator = true; |
5934 | } |
5935 | |
5936 | auto Arguments = E->arguments(); |
5937 | if (StaticOperator) { |
5938 | // If we're calling a static operator, we need to emit the object argument |
5939 | // and ignore it. |
5940 | EmitIgnoredExpr(E: E->getArg(Arg: 0)); |
5941 | Arguments = drop_begin(RangeOrContainer&: Arguments, N: 1); |
5942 | } |
5943 | EmitCallArgs(Args, Prototype: dyn_cast<FunctionProtoType>(Val: FnType), ArgRange: Arguments, |
5944 | AC: E->getDirectCallee(), /*ParamsToSkip=*/0, Order); |
5945 | |
5946 | const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( |
5947 | Args, Ty: FnType, /*ChainCall=*/Chain); |
5948 | |
5949 | // C99 6.5.2.2p6: |
5950 | // If the expression that denotes the called function has a type |
5951 | // that does not include a prototype, [the default argument |
5952 | // promotions are performed]. If the number of arguments does not |
5953 | // equal the number of parameters, the behavior is undefined. If |
5954 | // the function is defined with a type that includes a prototype, |
5955 | // and either the prototype ends with an ellipsis (, ...) or the |
5956 | // types of the arguments after promotion are not compatible with |
5957 | // the types of the parameters, the behavior is undefined. If the |
5958 | // function is defined with a type that does not include a |
5959 | // prototype, and the types of the arguments after promotion are |
5960 | // not compatible with those of the parameters after promotion, |
5961 | // the behavior is undefined [except in some trivial cases]. |
5962 | // That is, in the general case, we should assume that a call |
5963 | // through an unprototyped function type works like a *non-variadic* |
5964 | // call. The way we make this work is to cast to the exact type |
5965 | // of the promoted arguments. |
5966 | // |
5967 | // Chain calls use this same code path to add the invisible chain parameter |
5968 | // to the function type. |
5969 | if (isa<FunctionNoProtoType>(Val: FnType) || Chain) { |
5970 | llvm::Type *CalleeTy = getTypes().GetFunctionType(Info: FnInfo); |
5971 | int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); |
5972 | CalleeTy = CalleeTy->getPointerTo(AddrSpace: AS); |
5973 | |
5974 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5975 | CalleePtr = Builder.CreateBitCast(V: CalleePtr, DestTy: CalleeTy, Name: "callee.knr.cast" ); |
5976 | Callee.setFunctionPointer(CalleePtr); |
5977 | } |
5978 | |
5979 | // HIP function pointer contains kernel handle when it is used in triple |
5980 | // chevron. The kernel stub needs to be loaded from kernel handle and used |
5981 | // as callee. |
5982 | if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice && |
5983 | isa<CUDAKernelCallExpr>(Val: E) && |
5984 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) { |
5985 | llvm::Value *Handle = Callee.getFunctionPointer(); |
5986 | auto *Stub = Builder.CreateLoad( |
5987 | Addr: Address(Handle, Handle->getType(), CGM.getPointerAlign())); |
5988 | Callee.setFunctionPointer(Stub); |
5989 | } |
5990 | llvm::CallBase *CallOrInvoke = nullptr; |
5991 | RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke, |
5992 | E == MustTailCall, E->getExprLoc()); |
5993 | |
5994 | // Generate function declaration DISuprogram in order to be used |
5995 | // in debug info about call sites. |
5996 | if (CGDebugInfo *DI = getDebugInfo()) { |
5997 | if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
5998 | FunctionArgList Args; |
5999 | QualType ResTy = BuildFunctionArgList(GD: CalleeDecl, Args); |
6000 | DI->EmitFuncDeclForCallSite(CallOrInvoke, |
6001 | CalleeType: DI->getFunctionType(FD: CalleeDecl, RetTy: ResTy, Args), |
6002 | CalleeDecl); |
6003 | } |
6004 | } |
6005 | |
6006 | return Call; |
6007 | } |
6008 | |
6009 | LValue CodeGenFunction:: |
6010 | EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { |
6011 | Address BaseAddr = Address::invalid(); |
6012 | if (E->getOpcode() == BO_PtrMemI) { |
6013 | BaseAddr = EmitPointerWithAlignment(E: E->getLHS()); |
6014 | } else { |
6015 | BaseAddr = EmitLValue(E: E->getLHS()).getAddress(CGF&: *this); |
6016 | } |
6017 | |
6018 | llvm::Value *OffsetV = EmitScalarExpr(E: E->getRHS()); |
6019 | const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>(); |
6020 | |
6021 | LValueBaseInfo BaseInfo; |
6022 | TBAAAccessInfo TBAAInfo; |
6023 | Address MemberAddr = |
6024 | EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo, |
6025 | &TBAAInfo); |
6026 | |
6027 | return MakeAddrLValue(Addr: MemberAddr, T: MPT->getPointeeType(), BaseInfo, TBAAInfo); |
6028 | } |
6029 | |
6030 | /// Given the address of a temporary variable, produce an r-value of |
6031 | /// its type. |
6032 | RValue CodeGenFunction::convertTempToRValue(Address addr, |
6033 | QualType type, |
6034 | SourceLocation loc) { |
6035 | LValue lvalue = MakeAddrLValue(Addr: addr, T: type, Source: AlignmentSource::Decl); |
6036 | switch (getEvaluationKind(T: type)) { |
6037 | case TEK_Complex: |
6038 | return RValue::getComplex(C: EmitLoadOfComplex(src: lvalue, loc)); |
6039 | case TEK_Aggregate: |
6040 | return lvalue.asAggregateRValue(CGF&: *this); |
6041 | case TEK_Scalar: |
6042 | return RValue::get(V: EmitLoadOfScalar(lvalue, Loc: loc)); |
6043 | } |
6044 | llvm_unreachable("bad evaluation kind" ); |
6045 | } |
6046 | |
6047 | void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { |
6048 | assert(Val->getType()->isFPOrFPVectorTy()); |
6049 | if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) |
6050 | return; |
6051 | |
6052 | llvm::MDBuilder MDHelper(getLLVMContext()); |
6053 | llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); |
6054 | |
6055 | cast<llvm::Instruction>(Val)->setMetadata(KindID: llvm::LLVMContext::MD_fpmath, Node); |
6056 | } |
6057 | |
6058 | void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) { |
6059 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6060 | if (!EltTy->isFloatTy()) |
6061 | return; |
6062 | |
6063 | if ((getLangOpts().OpenCL && |
6064 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6065 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6066 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6067 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp |
6068 | // |
6069 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6070 | // build option allows an application to specify that single precision |
6071 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6072 | // source are correctly rounded. |
6073 | // |
6074 | // TODO: CUDA has a prec-sqrt flag |
6075 | SetFPAccuracy(Val, Accuracy: 3.0f); |
6076 | } |
6077 | } |
6078 | |
6079 | void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) { |
6080 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6081 | if (!EltTy->isFloatTy()) |
6082 | return; |
6083 | |
6084 | if ((getLangOpts().OpenCL && |
6085 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6086 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6087 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6088 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp |
6089 | // |
6090 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6091 | // build option allows an application to specify that single precision |
6092 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6093 | // source are correctly rounded. |
6094 | // |
6095 | // TODO: CUDA has a prec-div flag |
6096 | SetFPAccuracy(Val, Accuracy: 2.5f); |
6097 | } |
6098 | } |
6099 | |
6100 | namespace { |
6101 | struct LValueOrRValue { |
6102 | LValue LV; |
6103 | RValue RV; |
6104 | }; |
6105 | } |
6106 | |
6107 | static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, |
6108 | const PseudoObjectExpr *E, |
6109 | bool forLValue, |
6110 | AggValueSlot slot) { |
6111 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
6112 | |
6113 | // Find the result expression, if any. |
6114 | const Expr *resultExpr = E->getResultExpr(); |
6115 | LValueOrRValue result; |
6116 | |
6117 | for (PseudoObjectExpr::const_semantics_iterator |
6118 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { |
6119 | const Expr *semantic = *i; |
6120 | |
6121 | // If this semantic expression is an opaque value, bind it |
6122 | // to the result of its source expression. |
6123 | if (const auto *ov = dyn_cast<OpaqueValueExpr>(Val: semantic)) { |
6124 | // Skip unique OVEs. |
6125 | if (ov->isUnique()) { |
6126 | assert(ov != resultExpr && |
6127 | "A unique OVE cannot be used as the result expression" ); |
6128 | continue; |
6129 | } |
6130 | |
6131 | // If this is the result expression, we may need to evaluate |
6132 | // directly into the slot. |
6133 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
6134 | OVMA opaqueData; |
6135 | if (ov == resultExpr && ov->isPRValue() && !forLValue && |
6136 | CodeGenFunction::hasAggregateEvaluationKind(T: ov->getType())) { |
6137 | CGF.EmitAggExpr(E: ov->getSourceExpr(), AS: slot); |
6138 | LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(), |
6139 | AlignmentSource::Decl); |
6140 | opaqueData = OVMA::bind(CGF, ov, lv: LV); |
6141 | result.RV = slot.asRValue(); |
6142 | |
6143 | // Otherwise, emit as normal. |
6144 | } else { |
6145 | opaqueData = OVMA::bind(CGF, ov, e: ov->getSourceExpr()); |
6146 | |
6147 | // If this is the result, also evaluate the result now. |
6148 | if (ov == resultExpr) { |
6149 | if (forLValue) |
6150 | result.LV = CGF.EmitLValue(ov); |
6151 | else |
6152 | result.RV = CGF.EmitAnyExpr(ov, slot); |
6153 | } |
6154 | } |
6155 | |
6156 | opaques.push_back(Elt: opaqueData); |
6157 | |
6158 | // Otherwise, if the expression is the result, evaluate it |
6159 | // and remember the result. |
6160 | } else if (semantic == resultExpr) { |
6161 | if (forLValue) |
6162 | result.LV = CGF.EmitLValue(E: semantic); |
6163 | else |
6164 | result.RV = CGF.EmitAnyExpr(E: semantic, aggSlot: slot); |
6165 | |
6166 | // Otherwise, evaluate the expression in an ignored context. |
6167 | } else { |
6168 | CGF.EmitIgnoredExpr(E: semantic); |
6169 | } |
6170 | } |
6171 | |
6172 | // Unbind all the opaques now. |
6173 | for (unsigned i = 0, e = opaques.size(); i != e; ++i) |
6174 | opaques[i].unbind(CGF); |
6175 | |
6176 | return result; |
6177 | } |
6178 | |
6179 | RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, |
6180 | AggValueSlot slot) { |
6181 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: false, slot).RV; |
6182 | } |
6183 | |
6184 | LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { |
6185 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: true, slot: AggValueSlot::ignored()).LV; |
6186 | } |
6187 | |