1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/Attr.h"
26#include "clang/AST/DeclObjC.h"
27#include "clang/AST/Expr.h"
28#include "clang/AST/ParentMapContext.h"
29#include "clang/AST/RecordLayout.h"
30#include "clang/AST/StmtVisitor.h"
31#include "clang/Basic/CodeGenOptions.h"
32#include "clang/Basic/TargetInfo.h"
33#include "llvm/ADT/APFixedPoint.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/CFG.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/DerivedTypes.h"
39#include "llvm/IR/FixedPointBuilder.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/GEPNoWrapFlags.h"
42#include "llvm/IR/GetElementPtrTypeIterator.h"
43#include "llvm/IR/GlobalVariable.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/IntrinsicsPowerPC.h"
46#include "llvm/IR/MatrixBuilder.h"
47#include "llvm/IR/Module.h"
48#include "llvm/Support/TypeSize.h"
49#include <cstdarg>
50#include <optional>
51
52using namespace clang;
53using namespace CodeGen;
54using llvm::Value;
55
56//===----------------------------------------------------------------------===//
57// Scalar Expression Emitter
58//===----------------------------------------------------------------------===//
59
60namespace llvm {
61extern cl::opt<bool> EnableSingleByteCoverage;
62} // namespace llvm
63
64namespace {
65
66/// Determine whether the given binary operation may overflow.
67/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
68/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
69/// the returned overflow check is precise. The returned value is 'true' for
70/// all other opcodes, to be conservative.
71bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
72 BinaryOperator::Opcode Opcode, bool Signed,
73 llvm::APInt &Result) {
74 // Assume overflow is possible, unless we can prove otherwise.
75 bool Overflow = true;
76 const auto &LHSAP = LHS->getValue();
77 const auto &RHSAP = RHS->getValue();
78 if (Opcode == BO_Add) {
79 Result = Signed ? LHSAP.sadd_ov(RHS: RHSAP, Overflow)
80 : LHSAP.uadd_ov(RHS: RHSAP, Overflow);
81 } else if (Opcode == BO_Sub) {
82 Result = Signed ? LHSAP.ssub_ov(RHS: RHSAP, Overflow)
83 : LHSAP.usub_ov(RHS: RHSAP, Overflow);
84 } else if (Opcode == BO_Mul) {
85 Result = Signed ? LHSAP.smul_ov(RHS: RHSAP, Overflow)
86 : LHSAP.umul_ov(RHS: RHSAP, Overflow);
87 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
88 if (Signed && !RHS->isZero())
89 Result = LHSAP.sdiv_ov(RHS: RHSAP, Overflow);
90 else
91 return false;
92 }
93 return Overflow;
94}
95
96struct BinOpInfo {
97 Value *LHS;
98 Value *RHS;
99 QualType Ty; // Computation Type.
100 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
101 FPOptions FPFeatures;
102 const Expr *E; // Entire expr, for error unsupported. May not be binop.
103
104 /// Check if the binop can result in integer overflow.
105 bool mayHaveIntegerOverflow() const {
106 // Without constant input, we can't rule out overflow.
107 auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS);
108 auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS);
109 if (!LHSCI || !RHSCI)
110 return true;
111
112 llvm::APInt Result;
113 return ::mayHaveIntegerOverflow(
114 LHS: LHSCI, RHS: RHSCI, Opcode, Signed: Ty->hasSignedIntegerRepresentation(), Result);
115 }
116
117 /// Check if the binop computes a division or a remainder.
118 bool isDivremOp() const {
119 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
120 Opcode == BO_RemAssign;
121 }
122
123 /// Check if the binop can result in an integer division by zero.
124 bool mayHaveIntegerDivisionByZero() const {
125 if (isDivremOp())
126 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: RHS))
127 return CI->isZero();
128 return true;
129 }
130
131 /// Check if the binop can result in a float division by zero.
132 bool mayHaveFloatDivisionByZero() const {
133 if (isDivremOp())
134 if (auto *CFP = dyn_cast<llvm::ConstantFP>(Val: RHS))
135 return CFP->isZero();
136 return true;
137 }
138
139 /// Check if at least one operand is a fixed point type. In such cases, this
140 /// operation did not follow usual arithmetic conversion and both operands
141 /// might not be of the same type.
142 bool isFixedPointOp() const {
143 // We cannot simply check the result type since comparison operations return
144 // an int.
145 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
146 QualType LHSType = BinOp->getLHS()->getType();
147 QualType RHSType = BinOp->getRHS()->getType();
148 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
149 }
150 if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: E))
151 return UnOp->getSubExpr()->getType()->isFixedPointType();
152 return false;
153 }
154
155 /// Check if the RHS has a signed integer representation.
156 bool rhsHasSignedIntegerRepresentation() const {
157 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
158 QualType RHSType = BinOp->getRHS()->getType();
159 return RHSType->hasSignedIntegerRepresentation();
160 }
161 return false;
162 }
163};
164
165static bool MustVisitNullValue(const Expr *E) {
166 // If a null pointer expression's type is the C++0x nullptr_t, then
167 // it's not necessarily a simple constant and it must be evaluated
168 // for its potential side effects.
169 return E->getType()->isNullPtrType();
170}
171
172/// If \p E is a widened promoted integer, get its base (unpromoted) type.
173static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
174 const Expr *E) {
175 const Expr *Base = E->IgnoreImpCasts();
176 if (E == Base)
177 return std::nullopt;
178
179 QualType BaseTy = Base->getType();
180 if (!Ctx.isPromotableIntegerType(T: BaseTy) ||
181 Ctx.getTypeSize(T: BaseTy) >= Ctx.getTypeSize(T: E->getType()))
182 return std::nullopt;
183
184 return BaseTy;
185}
186
187/// Check if \p E is a widened promoted integer.
188static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
189 return getUnwidenedIntegerType(Ctx, E).has_value();
190}
191
192/// Check if we can skip the overflow check for \p Op.
193static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
194 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
195 "Expected a unary or binary operator");
196
197 // If the binop has constant inputs and we can prove there is no overflow,
198 // we can elide the overflow check.
199 if (!Op.mayHaveIntegerOverflow())
200 return true;
201
202 if (Op.Ty->isSignedIntegerType() &&
203 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::SignedIntegerOverflow,
204 Ty: Op.Ty)) {
205 return true;
206 }
207
208 if (Op.Ty->isUnsignedIntegerType() &&
209 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::UnsignedIntegerOverflow,
210 Ty: Op.Ty)) {
211 return true;
212 }
213
214 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Op.E);
215
216 if (UO && UO->getOpcode() == UO_Minus &&
217 Ctx.getLangOpts().isOverflowPatternExcluded(
218 Kind: LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) &&
219 UO->isIntegerConstantExpr(Ctx))
220 return true;
221
222 // If a unary op has a widened operand, the op cannot overflow.
223 if (UO)
224 return !UO->canOverflow();
225
226 // We usually don't need overflow checks for binops with widened operands.
227 // Multiplication with promoted unsigned operands is a special case.
228 const auto *BO = cast<BinaryOperator>(Val: Op.E);
229 if (BO->hasExcludedOverflowPattern())
230 return true;
231
232 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, E: BO->getLHS());
233 if (!OptionalLHSTy)
234 return false;
235
236 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, E: BO->getRHS());
237 if (!OptionalRHSTy)
238 return false;
239
240 QualType LHSTy = *OptionalLHSTy;
241 QualType RHSTy = *OptionalRHSTy;
242
243 // This is the simple case: binops without unsigned multiplication, and with
244 // widened operands. No overflow check is needed here.
245 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
246 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
247 return true;
248
249 // For unsigned multiplication the overflow check can be elided if either one
250 // of the unpromoted types are less than half the size of the promoted type.
251 unsigned PromotedSize = Ctx.getTypeSize(T: Op.E->getType());
252 return (2 * Ctx.getTypeSize(T: LHSTy)) < PromotedSize ||
253 (2 * Ctx.getTypeSize(T: RHSTy)) < PromotedSize;
254}
255
256class ScalarExprEmitter
257 : public StmtVisitor<ScalarExprEmitter, Value*> {
258 CodeGenFunction &CGF;
259 CGBuilderTy &Builder;
260 bool IgnoreResultAssign;
261 llvm::LLVMContext &VMContext;
262public:
263
264 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
265 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
266 VMContext(cgf.getLLVMContext()) {
267 }
268
269 //===--------------------------------------------------------------------===//
270 // Utilities
271 //===--------------------------------------------------------------------===//
272
273 bool TestAndClearIgnoreResultAssign() {
274 bool I = IgnoreResultAssign;
275 IgnoreResultAssign = false;
276 return I;
277 }
278
279 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
280 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
281 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
282 return CGF.EmitCheckedLValue(E, TCK);
283 }
284
285 void EmitBinOpCheck(
286 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
287 const BinOpInfo &Info);
288
289 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
290 return CGF.EmitLoadOfLValue(V: LV, Loc).getScalarVal();
291 }
292
293 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
294 const AlignValueAttr *AVAttr = nullptr;
295 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
296 const ValueDecl *VD = DRE->getDecl();
297
298 if (VD->getType()->isReferenceType()) {
299 if (const auto *TTy =
300 VD->getType().getNonReferenceType()->getAs<TypedefType>())
301 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
302 } else {
303 // Assumptions for function parameters are emitted at the start of the
304 // function, so there is no need to repeat that here,
305 // unless the alignment-assumption sanitizer is enabled,
306 // then we prefer the assumption over alignment attribute
307 // on IR function param.
308 if (isa<ParmVarDecl>(Val: VD) && !CGF.SanOpts.has(K: SanitizerKind::Alignment))
309 return;
310
311 AVAttr = VD->getAttr<AlignValueAttr>();
312 }
313 }
314
315 if (!AVAttr)
316 if (const auto *TTy = E->getType()->getAs<TypedefType>())
317 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
318
319 if (!AVAttr)
320 return;
321
322 Value *AlignmentValue = CGF.EmitScalarExpr(E: AVAttr->getAlignment());
323 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Val: AlignmentValue);
324 CGF.emitAlignmentAssumption(PtrValue: V, E, AssumptionLoc: AVAttr->getLocation(), Alignment: AlignmentCI);
325 }
326
327 /// EmitLoadOfLValue - Given an expression with complex type that represents a
328 /// value l-value, this method emits the address of the l-value, then loads
329 /// and returns the result.
330 Value *EmitLoadOfLValue(const Expr *E) {
331 Value *V = EmitLoadOfLValue(LV: EmitCheckedLValue(E, TCK: CodeGenFunction::TCK_Load),
332 Loc: E->getExprLoc());
333
334 EmitLValueAlignmentAssumption(E, V);
335 return V;
336 }
337
338 /// EmitConversionToBool - Convert the specified expression value to a
339 /// boolean (i1) truth value. This is equivalent to "Val != 0".
340 Value *EmitConversionToBool(Value *Src, QualType DstTy);
341
342 /// Emit a check that a conversion from a floating-point type does not
343 /// overflow.
344 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
345 Value *Src, QualType SrcType, QualType DstType,
346 llvm::Type *DstTy, SourceLocation Loc);
347
348 /// Known implicit conversion check kinds.
349 /// This is used for bitfield conversion checks as well.
350 /// Keep in sync with the enum of the same name in ubsan_handlers.h
351 enum ImplicitConversionCheckKind : unsigned char {
352 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
353 ICCK_UnsignedIntegerTruncation = 1,
354 ICCK_SignedIntegerTruncation = 2,
355 ICCK_IntegerSignChange = 3,
356 ICCK_SignedIntegerTruncationOrSignChange = 4,
357 };
358
359 /// Emit a check that an [implicit] truncation of an integer does not
360 /// discard any bits. It is not UB, so we use the value after truncation.
361 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
362 QualType DstType, SourceLocation Loc);
363
364 /// Emit a check that an [implicit] conversion of an integer does not change
365 /// the sign of the value. It is not UB, so we use the value after conversion.
366 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
367 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
368 QualType DstType, SourceLocation Loc);
369
370 /// Emit a conversion from the specified type to the specified destination
371 /// type, both of which are LLVM scalar types.
372 struct ScalarConversionOpts {
373 bool TreatBooleanAsSigned;
374 bool EmitImplicitIntegerTruncationChecks;
375 bool EmitImplicitIntegerSignChangeChecks;
376
377 ScalarConversionOpts()
378 : TreatBooleanAsSigned(false),
379 EmitImplicitIntegerTruncationChecks(false),
380 EmitImplicitIntegerSignChangeChecks(false) {}
381
382 ScalarConversionOpts(clang::SanitizerSet SanOpts)
383 : TreatBooleanAsSigned(false),
384 EmitImplicitIntegerTruncationChecks(
385 SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
386 EmitImplicitIntegerSignChangeChecks(
387 SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)) {}
388 };
389 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
390 llvm::Type *SrcTy, llvm::Type *DstTy,
391 ScalarConversionOpts Opts);
392 Value *
393 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
394 SourceLocation Loc,
395 ScalarConversionOpts Opts = ScalarConversionOpts());
396
397 /// Convert between either a fixed point and other fixed point or fixed point
398 /// and an integer.
399 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
400 SourceLocation Loc);
401
402 /// Emit a conversion from the specified complex type to the specified
403 /// destination type, where the destination type is an LLVM scalar type.
404 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
405 QualType SrcTy, QualType DstTy,
406 SourceLocation Loc);
407
408 /// EmitNullValue - Emit a value that corresponds to null for the given type.
409 Value *EmitNullValue(QualType Ty);
410
411 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
412 Value *EmitFloatToBoolConversion(Value *V) {
413 // Compare against 0.0 for fp scalars.
414 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: V->getType());
415 return Builder.CreateFCmpUNE(LHS: V, RHS: Zero, Name: "tobool");
416 }
417
418 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
419 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
420 Value *Zero = CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: V->getType()), QT);
421
422 return Builder.CreateICmpNE(LHS: V, RHS: Zero, Name: "tobool");
423 }
424
425 Value *EmitIntToBoolConversion(Value *V) {
426 // Because of the type rules of C, we often end up computing a
427 // logical value, then zero extending it to int, then wanting it
428 // as a logical value again. Optimize this common case.
429 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Val: V)) {
430 if (ZI->getOperand(i_nocapture: 0)->getType() == Builder.getInt1Ty()) {
431 Value *Result = ZI->getOperand(i_nocapture: 0);
432 // If there aren't any more uses, zap the instruction to save space.
433 // Note that there can be more uses, for example if this
434 // is the result of an assignment.
435 if (ZI->use_empty())
436 ZI->eraseFromParent();
437 return Result;
438 }
439 }
440
441 return Builder.CreateIsNotNull(Arg: V, Name: "tobool");
442 }
443
444 //===--------------------------------------------------------------------===//
445 // Visitor Methods
446 //===--------------------------------------------------------------------===//
447
448 Value *Visit(Expr *E) {
449 ApplyDebugLocation DL(CGF, E);
450 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(S: E);
451 }
452
453 Value *VisitStmt(Stmt *S) {
454 S->dump(OS&: llvm::errs(), Context: CGF.getContext());
455 llvm_unreachable("Stmt can't have complex result type!");
456 }
457 Value *VisitExpr(Expr *S);
458
459 Value *VisitConstantExpr(ConstantExpr *E) {
460 // A constant expression of type 'void' generates no code and produces no
461 // value.
462 if (E->getType()->isVoidType())
463 return nullptr;
464
465 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
466 if (E->isGLValue())
467 return CGF.EmitLoadOfScalar(
468 Addr: Address(Result, CGF.convertTypeForLoadStore(ASTTy: E->getType()),
469 CGF.getContext().getTypeAlignInChars(T: E->getType())),
470 /*Volatile*/ false, Ty: E->getType(), Loc: E->getExprLoc());
471 return Result;
472 }
473 return Visit(E: E->getSubExpr());
474 }
475 Value *VisitParenExpr(ParenExpr *PE) {
476 return Visit(E: PE->getSubExpr());
477 }
478 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
479 return Visit(E: E->getReplacement());
480 }
481 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
482 return Visit(E: GE->getResultExpr());
483 }
484 Value *VisitCoawaitExpr(CoawaitExpr *S) {
485 return CGF.EmitCoawaitExpr(E: *S).getScalarVal();
486 }
487 Value *VisitCoyieldExpr(CoyieldExpr *S) {
488 return CGF.EmitCoyieldExpr(E: *S).getScalarVal();
489 }
490 Value *VisitUnaryCoawait(const UnaryOperator *E) {
491 return Visit(E: E->getSubExpr());
492 }
493
494 // Leaves.
495 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
496 return Builder.getInt(AI: E->getValue());
497 }
498 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
499 return Builder.getInt(AI: E->getValue());
500 }
501 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
502 return llvm::ConstantFP::get(Context&: VMContext, V: E->getValue());
503 }
504 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
505 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
506 }
507 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
508 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
509 }
510 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
511 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
512 }
513 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
514 if (E->getType()->isVoidType())
515 return nullptr;
516
517 return EmitNullValue(Ty: E->getType());
518 }
519 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
520 return EmitNullValue(Ty: E->getType());
521 }
522 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
523 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
524 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
525 llvm::Value *V = CGF.GetAddrOfLabel(L: E->getLabel());
526 return Builder.CreateBitCast(V, DestTy: ConvertType(T: E->getType()));
527 }
528
529 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
530 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),V: E->getPackLength());
531 }
532
533 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
534 return CGF.EmitPseudoObjectRValue(e: E).getScalarVal();
535 }
536
537 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
538 Value *VisitEmbedExpr(EmbedExpr *E);
539
540 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
541 if (E->isGLValue())
542 return EmitLoadOfLValue(LV: CGF.getOrCreateOpaqueLValueMapping(e: E),
543 Loc: E->getExprLoc());
544
545 // Otherwise, assume the mapping is the scalar directly.
546 return CGF.getOrCreateOpaqueRValueMapping(e: E).getScalarVal();
547 }
548
549 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
550 llvm_unreachable("Codegen for this isn't defined/implemented");
551 }
552
553 // l-values.
554 Value *VisitDeclRefExpr(DeclRefExpr *E) {
555 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(RefExpr: E))
556 return CGF.emitScalarConstant(Constant, E);
557 return EmitLoadOfLValue(E);
558 }
559
560 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
561 return CGF.EmitObjCSelectorExpr(E);
562 }
563 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
564 return CGF.EmitObjCProtocolExpr(E);
565 }
566 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
567 return EmitLoadOfLValue(E);
568 }
569 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
570 if (E->getMethodDecl() &&
571 E->getMethodDecl()->getReturnType()->isReferenceType())
572 return EmitLoadOfLValue(E);
573 return CGF.EmitObjCMessageExpr(E).getScalarVal();
574 }
575
576 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
577 LValue LV = CGF.EmitObjCIsaExpr(E);
578 Value *V = CGF.EmitLoadOfLValue(V: LV, Loc: E->getExprLoc()).getScalarVal();
579 return V;
580 }
581
582 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
583 VersionTuple Version = E->getVersion();
584
585 // If we're checking for a platform older than our minimum deployment
586 // target, we can fold the check away.
587 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
588 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: 1);
589
590 return CGF.EmitBuiltinAvailable(Version);
591 }
592
593 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
594 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
595 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
596 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
597 Value *VisitMemberExpr(MemberExpr *E);
598 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
599 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
600 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
601 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
602 // literals aren't l-values in C++. We do so simply because that's the
603 // cleanest way to handle compound literals in C++.
604 // See the discussion here: https://reviews.llvm.org/D64464
605 return EmitLoadOfLValue(E);
606 }
607
608 Value *VisitInitListExpr(InitListExpr *E);
609
610 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
611 assert(CGF.getArrayInitIndex() &&
612 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
613 return CGF.getArrayInitIndex();
614 }
615
616 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
617 return EmitNullValue(Ty: E->getType());
618 }
619 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
620 CGF.CGM.EmitExplicitCastExprType(E, CGF: &CGF);
621 return VisitCastExpr(E);
622 }
623 Value *VisitCastExpr(CastExpr *E);
624
625 Value *VisitCallExpr(const CallExpr *E) {
626 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType())
627 return EmitLoadOfLValue(E);
628
629 Value *V = CGF.EmitCallExpr(E).getScalarVal();
630
631 EmitLValueAlignmentAssumption(E, V);
632 return V;
633 }
634
635 Value *VisitStmtExpr(const StmtExpr *E);
636
637 // Unary Operators.
638 Value *VisitUnaryPostDec(const UnaryOperator *E) {
639 LValue LV = EmitLValue(E: E->getSubExpr());
640 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: false);
641 }
642 Value *VisitUnaryPostInc(const UnaryOperator *E) {
643 LValue LV = EmitLValue(E: E->getSubExpr());
644 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: false);
645 }
646 Value *VisitUnaryPreDec(const UnaryOperator *E) {
647 LValue LV = EmitLValue(E: E->getSubExpr());
648 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: true);
649 }
650 Value *VisitUnaryPreInc(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E: E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: true);
653 }
654
655 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
656 llvm::Value *InVal,
657 bool IsInc);
658
659 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
660 bool isInc, bool isPre);
661
662
663 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
664 if (isa<MemberPointerType>(Val: E->getType())) // never sugared
665 return CGF.CGM.getMemberPointerConstant(e: E);
666
667 return EmitLValue(E: E->getSubExpr()).getPointer(CGF);
668 }
669 Value *VisitUnaryDeref(const UnaryOperator *E) {
670 if (E->getType()->isVoidType())
671 return Visit(E: E->getSubExpr()); // the actual value should be unused
672 return EmitLoadOfLValue(E);
673 }
674
675 Value *VisitUnaryPlus(const UnaryOperator *E,
676 QualType PromotionType = QualType());
677 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
678 Value *VisitUnaryMinus(const UnaryOperator *E,
679 QualType PromotionType = QualType());
680 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
681
682 Value *VisitUnaryNot (const UnaryOperator *E);
683 Value *VisitUnaryLNot (const UnaryOperator *E);
684 Value *VisitUnaryReal(const UnaryOperator *E,
685 QualType PromotionType = QualType());
686 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
687 Value *VisitUnaryImag(const UnaryOperator *E,
688 QualType PromotionType = QualType());
689 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
690 Value *VisitUnaryExtension(const UnaryOperator *E) {
691 return Visit(E: E->getSubExpr());
692 }
693
694 // C++
695 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
696 return EmitLoadOfLValue(E);
697 }
698 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
699 auto &Ctx = CGF.getContext();
700 APValue Evaluated =
701 SLE->EvaluateInContext(Ctx, DefaultExpr: CGF.CurSourceLocExprScope.getDefaultExpr());
702 return ConstantEmitter(CGF).emitAbstract(loc: SLE->getLocation(), value: Evaluated,
703 T: SLE->getType());
704 }
705
706 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
707 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
708 return Visit(E: DAE->getExpr());
709 }
710 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
711 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
712 return Visit(E: DIE->getExpr());
713 }
714 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
715 return CGF.LoadCXXThis();
716 }
717
718 Value *VisitExprWithCleanups(ExprWithCleanups *E);
719 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
720 return CGF.EmitCXXNewExpr(E);
721 }
722 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
723 CGF.EmitCXXDeleteExpr(E);
724 return nullptr;
725 }
726
727 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
728 if (E->isStoredAsBoolean())
729 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),
730 V: E->getBoolValue());
731 assert(E->getAPValue().isInt() && "APValue type not supported");
732 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),
733 V: E->getAPValue().getInt());
734 }
735
736 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
737 return Builder.getInt1(V: E->isSatisfied());
738 }
739
740 Value *VisitRequiresExpr(const RequiresExpr *E) {
741 return Builder.getInt1(V: E->isSatisfied());
742 }
743
744 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
745 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
746 }
747
748 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
749 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: E->getValue());
750 }
751
752 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
753 // C++ [expr.pseudo]p1:
754 // The result shall only be used as the operand for the function call
755 // operator (), and the result of such a call has type void. The only
756 // effect is the evaluation of the postfix-expression before the dot or
757 // arrow.
758 CGF.EmitScalarExpr(E: E->getBase());
759 return nullptr;
760 }
761
762 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
763 return EmitNullValue(Ty: E->getType());
764 }
765
766 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
767 CGF.EmitCXXThrowExpr(E);
768 return nullptr;
769 }
770
771 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
772 return Builder.getInt1(V: E->getValue());
773 }
774
775 // Binary Operators.
776 Value *EmitMul(const BinOpInfo &Ops) {
777 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
778 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
779 case LangOptions::SOB_Defined:
780 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
781 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
782 [[fallthrough]];
783 case LangOptions::SOB_Undefined:
784 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
785 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
786 [[fallthrough]];
787 case LangOptions::SOB_Trapping:
788 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
789 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
790 return EmitOverflowCheckedBinOp(Ops);
791 }
792 }
793
794 if (Ops.Ty->isConstantMatrixType()) {
795 llvm::MatrixBuilder MB(Builder);
796 // We need to check the types of the operands of the operator to get the
797 // correct matrix dimensions.
798 auto *BO = cast<BinaryOperator>(Val: Ops.E);
799 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
800 Val: BO->getLHS()->getType().getCanonicalType());
801 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
802 Val: BO->getRHS()->getType().getCanonicalType());
803 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
804 if (LHSMatTy && RHSMatTy)
805 return MB.CreateMatrixMultiply(LHS: Ops.LHS, RHS: Ops.RHS, LHSRows: LHSMatTy->getNumRows(),
806 LHSColumns: LHSMatTy->getNumColumns(),
807 RHSColumns: RHSMatTy->getNumColumns());
808 return MB.CreateScalarMultiply(LHS: Ops.LHS, RHS: Ops.RHS);
809 }
810
811 if (Ops.Ty->isUnsignedIntegerType() &&
812 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
813 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
814 return EmitOverflowCheckedBinOp(Ops);
815
816 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
817 // Preserve the old values
818 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
819 return Builder.CreateFMul(L: Ops.LHS, R: Ops.RHS, Name: "mul");
820 }
821 if (Ops.isFixedPointOp())
822 return EmitFixedPointBinOp(Ops);
823 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
824 }
825 /// Create a binary op that checks for overflow.
826 /// Currently only supports +, - and *.
827 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
828
829 // Check for undefined division and modulus behaviors.
830 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
831 llvm::Value *Zero,bool isDiv);
832 // Common helper for getting how wide LHS of shift is.
833 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
834
835 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
836 // non powers of two.
837 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
838
839 Value *EmitDiv(const BinOpInfo &Ops);
840 Value *EmitRem(const BinOpInfo &Ops);
841 Value *EmitAdd(const BinOpInfo &Ops);
842 Value *EmitSub(const BinOpInfo &Ops);
843 Value *EmitShl(const BinOpInfo &Ops);
844 Value *EmitShr(const BinOpInfo &Ops);
845 Value *EmitAnd(const BinOpInfo &Ops) {
846 return Builder.CreateAnd(LHS: Ops.LHS, RHS: Ops.RHS, Name: "and");
847 }
848 Value *EmitXor(const BinOpInfo &Ops) {
849 return Builder.CreateXor(LHS: Ops.LHS, RHS: Ops.RHS, Name: "xor");
850 }
851 Value *EmitOr (const BinOpInfo &Ops) {
852 return Builder.CreateOr(LHS: Ops.LHS, RHS: Ops.RHS, Name: "or");
853 }
854
855 // Helper functions for fixed point binary operations.
856 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
857
858 BinOpInfo EmitBinOps(const BinaryOperator *E,
859 QualType PromotionTy = QualType());
860
861 Value *EmitPromotedValue(Value *result, QualType PromotionType);
862 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
863 Value *EmitPromoted(const Expr *E, QualType PromotionType);
864
865 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
866 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
867 Value *&Result);
868
869 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
870 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
871
872 QualType getPromotionType(QualType Ty) {
873 const auto &Ctx = CGF.getContext();
874 if (auto *CT = Ty->getAs<ComplexType>()) {
875 QualType ElementType = CT->getElementType();
876 if (ElementType.UseExcessPrecision(Ctx))
877 return Ctx.getComplexType(T: Ctx.FloatTy);
878 }
879
880 if (Ty.UseExcessPrecision(Ctx)) {
881 if (auto *VT = Ty->getAs<VectorType>()) {
882 unsigned NumElements = VT->getNumElements();
883 return Ctx.getVectorType(VectorType: Ctx.FloatTy, NumElts: NumElements, VecKind: VT->getVectorKind());
884 }
885 return Ctx.FloatTy;
886 }
887
888 return QualType();
889 }
890
891 // Binary operators and binary compound assignment operators.
892#define HANDLEBINOP(OP) \
893 Value *VisitBin##OP(const BinaryOperator *E) { \
894 QualType promotionTy = getPromotionType(E->getType()); \
895 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
896 if (result && !promotionTy.isNull()) \
897 result = EmitUnPromotedValue(result, E->getType()); \
898 return result; \
899 } \
900 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
901 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
902 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
903 }
904 HANDLEBINOP(Mul)
905 HANDLEBINOP(Div)
906 HANDLEBINOP(Rem)
907 HANDLEBINOP(Add)
908 HANDLEBINOP(Sub)
909 HANDLEBINOP(Shl)
910 HANDLEBINOP(Shr)
911 HANDLEBINOP(And)
912 HANDLEBINOP(Xor)
913 HANDLEBINOP(Or)
914#undef HANDLEBINOP
915
916 // Comparisons.
917 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
918 llvm::CmpInst::Predicate SICmpOpc,
919 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
920#define VISITCOMP(CODE, UI, SI, FP, SIG) \
921 Value *VisitBin##CODE(const BinaryOperator *E) { \
922 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
923 llvm::FCmpInst::FP, SIG); }
924 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
925 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
926 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
927 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
928 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
929 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
930#undef VISITCOMP
931
932 Value *VisitBinAssign (const BinaryOperator *E);
933
934 Value *VisitBinLAnd (const BinaryOperator *E);
935 Value *VisitBinLOr (const BinaryOperator *E);
936 Value *VisitBinComma (const BinaryOperator *E);
937
938 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
939 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
940
941 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
942 return Visit(E: E->getSemanticForm());
943 }
944
945 // Other Operators.
946 Value *VisitBlockExpr(const BlockExpr *BE);
947 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
948 Value *VisitChooseExpr(ChooseExpr *CE);
949 Value *VisitVAArgExpr(VAArgExpr *VE);
950 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
951 return CGF.EmitObjCStringLiteral(E);
952 }
953 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
954 return CGF.EmitObjCBoxedExpr(E);
955 }
956 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
957 return CGF.EmitObjCArrayLiteral(E);
958 }
959 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
960 return CGF.EmitObjCDictionaryLiteral(E);
961 }
962 Value *VisitAsTypeExpr(AsTypeExpr *CE);
963 Value *VisitAtomicExpr(AtomicExpr *AE);
964 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
965 return Visit(E: E->getSelectedExpr());
966 }
967};
968} // end anonymous namespace.
969
970//===----------------------------------------------------------------------===//
971// Utilities
972//===----------------------------------------------------------------------===//
973
974/// EmitConversionToBool - Convert the specified expression value to a
975/// boolean (i1) truth value. This is equivalent to "Val != 0".
976Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
977 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
978
979 if (SrcType->isRealFloatingType())
980 return EmitFloatToBoolConversion(V: Src);
981
982 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(Val&: SrcType))
983 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr: Src, MPT);
984
985 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
986 "Unknown scalar type to convert");
987
988 if (isa<llvm::IntegerType>(Val: Src->getType()))
989 return EmitIntToBoolConversion(V: Src);
990
991 assert(isa<llvm::PointerType>(Src->getType()));
992 return EmitPointerToBoolConversion(V: Src, QT: SrcType);
993}
994
995void ScalarExprEmitter::EmitFloatConversionCheck(
996 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
997 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
998 assert(SrcType->isFloatingType() && "not a conversion from floating point");
999 if (!isa<llvm::IntegerType>(Val: DstTy))
1000 return;
1001
1002 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1003 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1004 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1005 using llvm::APFloat;
1006 using llvm::APSInt;
1007
1008 llvm::Value *Check = nullptr;
1009 const llvm::fltSemantics &SrcSema =
1010 CGF.getContext().getFloatTypeSemantics(T: OrigSrcType);
1011
1012 // Floating-point to integer. This has undefined behavior if the source is
1013 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1014 // to an integer).
1015 unsigned Width = CGF.getContext().getIntWidth(T: DstType);
1016 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
1017
1018 APSInt Min = APSInt::getMinValue(numBits: Width, Unsigned);
1019 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1020 if (MinSrc.convertFromAPInt(Input: Min, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1021 APFloat::opOverflow)
1022 // Don't need an overflow check for lower bound. Just check for
1023 // -Inf/NaN.
1024 MinSrc = APFloat::getInf(Sem: SrcSema, Negative: true);
1025 else
1026 // Find the largest value which is too small to represent (before
1027 // truncation toward zero).
1028 MinSrc.subtract(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardNegative);
1029
1030 APSInt Max = APSInt::getMaxValue(numBits: Width, Unsigned);
1031 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1032 if (MaxSrc.convertFromAPInt(Input: Max, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1033 APFloat::opOverflow)
1034 // Don't need an overflow check for upper bound. Just check for
1035 // +Inf/NaN.
1036 MaxSrc = APFloat::getInf(Sem: SrcSema, Negative: false);
1037 else
1038 // Find the smallest value which is too large to represent (before
1039 // truncation toward zero).
1040 MaxSrc.add(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardPositive);
1041
1042 // If we're converting from __half, convert the range to float to match
1043 // the type of src.
1044 if (OrigSrcType->isHalfType()) {
1045 const llvm::fltSemantics &Sema =
1046 CGF.getContext().getFloatTypeSemantics(T: SrcType);
1047 bool IsInexact;
1048 MinSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1049 MaxSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1050 }
1051
1052 llvm::Value *GE =
1053 Builder.CreateFCmpOGT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MinSrc));
1054 llvm::Value *LE =
1055 Builder.CreateFCmpOLT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MaxSrc));
1056 Check = Builder.CreateAnd(LHS: GE, RHS: LE);
1057
1058 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1059 CGF.EmitCheckTypeDescriptor(T: OrigSrcType),
1060 CGF.EmitCheckTypeDescriptor(T: DstType)};
1061 CGF.EmitCheck(Checked: std::make_pair(x&: Check, y&: CheckOrdinal), Check: CheckHandler, StaticArgs,
1062 DynamicArgs: OrigSrc);
1063}
1064
1065// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1066// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1067static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1068 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1069EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1070 QualType DstType, CGBuilderTy &Builder) {
1071 llvm::Type *SrcTy = Src->getType();
1072 llvm::Type *DstTy = Dst->getType();
1073 (void)DstTy; // Only used in assert()
1074
1075 // This should be truncation of integral types.
1076 assert(Src != Dst);
1077 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1078 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1079 "non-integer llvm type");
1080
1081 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1082 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1083
1084 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1085 // Else, it is a signed truncation.
1086 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1087 SanitizerKind::SanitizerOrdinal Ordinal;
1088 if (!SrcSigned && !DstSigned) {
1089 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1090 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1091 } else {
1092 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1093 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1094 }
1095
1096 llvm::Value *Check = nullptr;
1097 // 1. Extend the truncated value back to the same width as the Src.
1098 Check = Builder.CreateIntCast(V: Dst, DestTy: SrcTy, isSigned: DstSigned, Name: "anyext");
1099 // 2. Equality-compare with the original source value
1100 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "truncheck");
1101 // If the comparison result is 'i1 false', then the truncation was lossy.
1102 return std::make_pair(x&: Kind, y: std::make_pair(x&: Check, y&: Ordinal));
1103}
1104
1105static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1106 QualType SrcType, QualType DstType) {
1107 return SrcType->isIntegerType() && DstType->isIntegerType();
1108}
1109
1110void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1111 Value *Dst, QualType DstType,
1112 SourceLocation Loc) {
1113 if (!CGF.SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation))
1114 return;
1115
1116 // We only care about int->int conversions here.
1117 // We ignore conversions to/from pointer and/or bool.
1118 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1119 DstType))
1120 return;
1121
1122 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1123 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1124 // This must be truncation. Else we do not care.
1125 if (SrcBits <= DstBits)
1126 return;
1127
1128 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1129
1130 // If the integer sign change sanitizer is enabled,
1131 // and we are truncating from larger unsigned type to smaller signed type,
1132 // let that next sanitizer deal with it.
1133 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1134 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1135 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange) &&
1136 (!SrcSigned && DstSigned))
1137 return;
1138
1139 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1140 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1141 Check;
1142
1143 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1144 {
1145 // We don't know the check kind until we call
1146 // EmitIntegerTruncationCheckHelper, but we want to annotate
1147 // EmitIntegerTruncationCheckHelper's instructions too.
1148 SanitizerDebugLocation SanScope(
1149 &CGF,
1150 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1151 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1152 CheckHandler);
1153 Check =
1154 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1155 // If the comparison result is 'i1 false', then the truncation was lossy.
1156 }
1157
1158 // Do we care about this type of truncation?
1159 if (!CGF.SanOpts.has(O: Check.second.second))
1160 return;
1161
1162 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1163
1164 // Does some SSCL ignore this type?
1165 if (CGF.getContext().isTypeIgnoredBySanitizer(
1166 Mask: SanitizerMask::bitPosToMask(Pos: Check.second.second), Ty: DstType))
1167 return;
1168
1169 llvm::Constant *StaticArgs[] = {
1170 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1171 CGF.EmitCheckTypeDescriptor(T: DstType),
1172 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Check.first),
1173 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1174
1175 CGF.EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1176}
1177
1178static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1179 const char *Name,
1180 CGBuilderTy &Builder) {
1181 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1182 llvm::Type *VTy = V->getType();
1183 if (!VSigned) {
1184 // If the value is unsigned, then it is never negative.
1185 return llvm::ConstantInt::getFalse(Context&: VTy->getContext());
1186 }
1187 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: VTy, V: 0);
1188 return Builder.CreateICmp(P: llvm::ICmpInst::ICMP_SLT, LHS: V, RHS: Zero,
1189 Name: llvm::Twine(Name) + "." + V->getName() +
1190 ".negativitycheck");
1191}
1192
1193// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1194// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1195static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1196 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1197EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1198 QualType DstType, CGBuilderTy &Builder) {
1199 llvm::Type *SrcTy = Src->getType();
1200 llvm::Type *DstTy = Dst->getType();
1201
1202 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1203 "non-integer llvm type");
1204
1205 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1206 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1207 (void)SrcSigned; // Only used in assert()
1208 (void)DstSigned; // Only used in assert()
1209 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1210 unsigned DstBits = DstTy->getScalarSizeInBits();
1211 (void)SrcBits; // Only used in assert()
1212 (void)DstBits; // Only used in assert()
1213
1214 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1215 "either the widths should be different, or the signednesses.");
1216
1217 // 1. Was the old Value negative?
1218 llvm::Value *SrcIsNegative =
1219 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "src", Builder);
1220 // 2. Is the new Value negative?
1221 llvm::Value *DstIsNegative =
1222 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "dst", Builder);
1223 // 3. Now, was the 'negativity status' preserved during the conversion?
1224 // NOTE: conversion from negative to zero is considered to change the sign.
1225 // (We want to get 'false' when the conversion changed the sign)
1226 // So we should just equality-compare the negativity statuses.
1227 llvm::Value *Check = nullptr;
1228 Check = Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "signchangecheck");
1229 // If the comparison result is 'false', then the conversion changed the sign.
1230 return std::make_pair(
1231 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1232 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitIntegerSignChange));
1233}
1234
1235void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1236 Value *Dst, QualType DstType,
1237 SourceLocation Loc) {
1238 if (!CGF.SanOpts.has(O: SanitizerKind::SO_ImplicitIntegerSignChange))
1239 return;
1240
1241 llvm::Type *SrcTy = Src->getType();
1242 llvm::Type *DstTy = Dst->getType();
1243
1244 // We only care about int->int conversions here.
1245 // We ignore conversions to/from pointer and/or bool.
1246 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1247 DstType))
1248 return;
1249
1250 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1251 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1252 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1253 unsigned DstBits = DstTy->getScalarSizeInBits();
1254
1255 // Now, we do not need to emit the check in *all* of the cases.
1256 // We can avoid emitting it in some obvious cases where it would have been
1257 // dropped by the opt passes (instcombine) always anyways.
1258 // If it's a cast between effectively the same type, no check.
1259 // NOTE: this is *not* equivalent to checking the canonical types.
1260 if (SrcSigned == DstSigned && SrcBits == DstBits)
1261 return;
1262 // At least one of the values needs to have signed type.
1263 // If both are unsigned, then obviously, neither of them can be negative.
1264 if (!SrcSigned && !DstSigned)
1265 return;
1266 // If the conversion is to *larger* *signed* type, then no check is needed.
1267 // Because either sign-extension happens (so the sign will remain),
1268 // or zero-extension will happen (the sign bit will be zero.)
1269 if ((DstBits > SrcBits) && DstSigned)
1270 return;
1271 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1272 (SrcBits > DstBits) && SrcSigned) {
1273 // If the signed integer truncation sanitizer is enabled,
1274 // and this is a truncation from signed type, then no check is needed.
1275 // Because here sign change check is interchangeable with truncation check.
1276 return;
1277 }
1278 // Does an SSCL have an entry for the DstType under its respective sanitizer
1279 // section?
1280 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1281 Mask: SanitizerKind::ImplicitSignedIntegerTruncation, Ty: DstType))
1282 return;
1283 if (!DstSigned &&
1284 CGF.getContext().isTypeIgnoredBySanitizer(
1285 Mask: SanitizerKind::ImplicitUnsignedIntegerTruncation, Ty: DstType))
1286 return;
1287 // That's it. We can't rule out any more cases with the data we have.
1288
1289 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1290 SanitizerDebugLocation SanScope(
1291 &CGF,
1292 {SanitizerKind::SO_ImplicitIntegerSignChange,
1293 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1294 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1295 CheckHandler);
1296
1297 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1298 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1299 Check;
1300
1301 // Each of these checks needs to return 'false' when an issue was detected.
1302 ImplicitConversionCheckKind CheckKind;
1303 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1304 2>
1305 Checks;
1306 // So we can 'and' all the checks together, and still get 'false',
1307 // if at least one of the checks detected an issue.
1308
1309 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1310 CheckKind = Check.first;
1311 Checks.emplace_back(Args&: Check.second);
1312
1313 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1314 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1315 // If the signed integer truncation sanitizer was enabled,
1316 // and we are truncating from larger unsigned type to smaller signed type,
1317 // let's handle the case we skipped in that check.
1318 Check =
1319 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1320 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1321 Checks.emplace_back(Args&: Check.second);
1322 // If the comparison result is 'i1 false', then the truncation was lossy.
1323 }
1324
1325 llvm::Constant *StaticArgs[] = {
1326 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1327 CGF.EmitCheckTypeDescriptor(T: DstType),
1328 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1329 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1330 // EmitCheck() will 'and' all the checks together.
1331 CGF.EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1332}
1333
1334// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1335// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1336static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1337 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1338EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1339 QualType DstType, CGBuilderTy &Builder) {
1340 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1341 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1342
1343 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1344 if (!SrcSigned && !DstSigned)
1345 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1346 else
1347 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1348
1349 llvm::Value *Check = nullptr;
1350 // 1. Extend the truncated value back to the same width as the Src.
1351 Check = Builder.CreateIntCast(V: Dst, DestTy: Src->getType(), isSigned: DstSigned, Name: "bf.anyext");
1352 // 2. Equality-compare with the original source value
1353 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "bf.truncheck");
1354 // If the comparison result is 'i1 false', then the truncation was lossy.
1355
1356 return std::make_pair(
1357 x&: Kind,
1358 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1359}
1360
1361// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1362// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1363static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1364 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1365EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1366 QualType DstType, CGBuilderTy &Builder) {
1367 // 1. Was the old Value negative?
1368 llvm::Value *SrcIsNegative =
1369 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "bf.src", Builder);
1370 // 2. Is the new Value negative?
1371 llvm::Value *DstIsNegative =
1372 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "bf.dst", Builder);
1373 // 3. Now, was the 'negativity status' preserved during the conversion?
1374 // NOTE: conversion from negative to zero is considered to change the sign.
1375 // (We want to get 'false' when the conversion changed the sign)
1376 // So we should just equality-compare the negativity statuses.
1377 llvm::Value *Check = nullptr;
1378 Check =
1379 Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "bf.signchangecheck");
1380 // If the comparison result is 'false', then the conversion changed the sign.
1381 return std::make_pair(
1382 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1383 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1384}
1385
1386void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1387 Value *Dst, QualType DstType,
1388 const CGBitFieldInfo &Info,
1389 SourceLocation Loc) {
1390
1391 if (!SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion))
1392 return;
1393
1394 // We only care about int->int conversions here.
1395 // We ignore conversions to/from pointer and/or bool.
1396 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1397 DstType))
1398 return;
1399
1400 if (DstType->isBooleanType() || SrcType->isBooleanType())
1401 return;
1402
1403 // This should be truncation of integral types.
1404 assert(isa<llvm::IntegerType>(Src->getType()) &&
1405 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1406
1407 // TODO: Calculate src width to avoid emitting code
1408 // for unecessary cases.
1409 unsigned SrcBits = ConvertType(T: SrcType)->getScalarSizeInBits();
1410 unsigned DstBits = Info.Size;
1411
1412 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1413 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1414
1415 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1416 SanitizerDebugLocation SanScope(
1417 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1418
1419 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1420 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1421 Check;
1422
1423 // Truncation
1424 bool EmitTruncation = DstBits < SrcBits;
1425 // If Dst is signed and Src unsigned, we want to be more specific
1426 // about the CheckKind we emit, in this case we want to emit
1427 // ICCK_SignedIntegerTruncationOrSignChange.
1428 bool EmitTruncationFromUnsignedToSigned =
1429 EmitTruncation && DstSigned && !SrcSigned;
1430 // Sign change
1431 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1432 bool BothUnsigned = !SrcSigned && !DstSigned;
1433 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1434 // We can avoid emitting sign change checks in some obvious cases
1435 // 1. If Src and Dst have the same signedness and size
1436 // 2. If both are unsigned sign check is unecessary!
1437 // 3. If Dst is signed and bigger than Src, either
1438 // sign-extension or zero-extension will make sure
1439 // the sign remains.
1440 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1441
1442 if (EmitTruncation)
1443 Check =
1444 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1445 else if (EmitSignChange) {
1446 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1447 "either the widths should be different, or the signednesses.");
1448 Check =
1449 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1450 } else
1451 return;
1452
1453 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1454 if (EmitTruncationFromUnsignedToSigned)
1455 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1456
1457 llvm::Constant *StaticArgs[] = {
1458 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: SrcType),
1459 EmitCheckTypeDescriptor(T: DstType),
1460 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1461 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Info.Size)};
1462
1463 EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1464}
1465
1466Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1467 QualType DstType, llvm::Type *SrcTy,
1468 llvm::Type *DstTy,
1469 ScalarConversionOpts Opts) {
1470 // The Element types determine the type of cast to perform.
1471 llvm::Type *SrcElementTy;
1472 llvm::Type *DstElementTy;
1473 QualType SrcElementType;
1474 QualType DstElementType;
1475 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1476 SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1477 DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1478 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1479 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1480 } else {
1481 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1482 "cannot cast between matrix and non-matrix types");
1483 SrcElementTy = SrcTy;
1484 DstElementTy = DstTy;
1485 SrcElementType = SrcType;
1486 DstElementType = DstType;
1487 }
1488
1489 if (isa<llvm::IntegerType>(Val: SrcElementTy)) {
1490 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1491 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1492 InputSigned = true;
1493 }
1494
1495 if (isa<llvm::IntegerType>(Val: DstElementTy))
1496 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1497 if (InputSigned)
1498 return Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1499 return Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1500 }
1501
1502 if (isa<llvm::IntegerType>(Val: DstElementTy)) {
1503 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1504 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1505
1506 // If we can't recognize overflow as undefined behavior, assume that
1507 // overflow saturates. This protects against normal optimizations if we are
1508 // compiling with non-standard FP semantics.
1509 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1510 llvm::Intrinsic::ID IID =
1511 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1512 return Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID, Tys: {DstTy, SrcTy}), Args: Src);
1513 }
1514
1515 if (IsSigned)
1516 return Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1517 return Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1518 }
1519
1520 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1521 Value *FloatVal = Builder.CreateFPExt(V: Src, DestTy: Builder.getFloatTy(), Name: "fpext");
1522 return Builder.CreateFPTrunc(V: FloatVal, DestTy: DstTy, Name: "fptrunc");
1523 }
1524 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1525 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1526 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1527}
1528
1529/// Emit a conversion from the specified type to the specified destination type,
1530/// both of which are LLVM scalar types.
1531Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1532 QualType DstType,
1533 SourceLocation Loc,
1534 ScalarConversionOpts Opts) {
1535 // All conversions involving fixed point types should be handled by the
1536 // EmitFixedPoint family functions. This is done to prevent bloating up this
1537 // function more, and although fixed point numbers are represented by
1538 // integers, we do not want to follow any logic that assumes they should be
1539 // treated as integers.
1540 // TODO(leonardchan): When necessary, add another if statement checking for
1541 // conversions to fixed point types from other types.
1542 if (SrcType->isFixedPointType()) {
1543 if (DstType->isBooleanType())
1544 // It is important that we check this before checking if the dest type is
1545 // an integer because booleans are technically integer types.
1546 // We do not need to check the padding bit on unsigned types if unsigned
1547 // padding is enabled because overflow into this bit is undefined
1548 // behavior.
1549 return Builder.CreateIsNotNull(Arg: Src, Name: "tobool");
1550 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1551 DstType->isRealFloatingType())
1552 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1553
1554 llvm_unreachable(
1555 "Unhandled scalar conversion from a fixed point type to another type.");
1556 } else if (DstType->isFixedPointType()) {
1557 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1558 // This also includes converting booleans and enums to fixed point types.
1559 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1560
1561 llvm_unreachable(
1562 "Unhandled scalar conversion to a fixed point type from another type.");
1563 }
1564
1565 QualType NoncanonicalSrcType = SrcType;
1566 QualType NoncanonicalDstType = DstType;
1567
1568 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1569 DstType = CGF.getContext().getCanonicalType(T: DstType);
1570 if (SrcType == DstType) return Src;
1571
1572 if (DstType->isVoidType()) return nullptr;
1573
1574 llvm::Value *OrigSrc = Src;
1575 QualType OrigSrcType = SrcType;
1576 llvm::Type *SrcTy = Src->getType();
1577
1578 // Handle conversions to bool first, they are special: comparisons against 0.
1579 if (DstType->isBooleanType())
1580 return EmitConversionToBool(Src, SrcType);
1581
1582 llvm::Type *DstTy = ConvertType(T: DstType);
1583
1584 // Cast from half through float if half isn't a native type.
1585 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1586 // Cast to FP using the intrinsic if the half type itself isn't supported.
1587 if (DstTy->isFloatingPointTy()) {
1588 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1589 return Builder.CreateCall(
1590 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16, Tys: DstTy),
1591 Args: Src);
1592 } else {
1593 // Cast to other types through float, using either the intrinsic or FPExt,
1594 // depending on whether the half type itself is supported
1595 // (as opposed to operations on half, available with NativeHalfType).
1596 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1597 Src = Builder.CreateCall(
1598 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16,
1599 Tys: CGF.CGM.FloatTy),
1600 Args: Src);
1601 } else {
1602 Src = Builder.CreateFPExt(V: Src, DestTy: CGF.CGM.FloatTy, Name: "conv");
1603 }
1604 SrcType = CGF.getContext().FloatTy;
1605 SrcTy = CGF.FloatTy;
1606 }
1607 }
1608
1609 // Ignore conversions like int -> uint.
1610 if (SrcTy == DstTy) {
1611 if (Opts.EmitImplicitIntegerSignChangeChecks)
1612 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Src,
1613 DstType: NoncanonicalDstType, Loc);
1614
1615 return Src;
1616 }
1617
1618 // Handle pointer conversions next: pointers can only be converted to/from
1619 // other pointers and integers. Check for pointer types in terms of LLVM, as
1620 // some native types (like Obj-C id) may map to a pointer type.
1621 if (auto DstPT = dyn_cast<llvm::PointerType>(Val: DstTy)) {
1622 // The source value may be an integer, or a pointer.
1623 if (isa<llvm::PointerType>(Val: SrcTy))
1624 return Src;
1625
1626 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1627 // First, convert to the correct width so that we control the kind of
1628 // extension.
1629 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1630 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1631 llvm::Value* IntResult =
1632 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
1633 // Then, cast to pointer.
1634 return Builder.CreateIntToPtr(V: IntResult, DestTy: DstTy, Name: "conv");
1635 }
1636
1637 if (isa<llvm::PointerType>(Val: SrcTy)) {
1638 // Must be an ptr to int cast.
1639 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1640 return Builder.CreatePtrToInt(V: Src, DestTy: DstTy, Name: "conv");
1641 }
1642
1643 // A scalar can be splatted to an extended vector of the same element type
1644 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1645 // Sema should add casts to make sure that the source expression's type is
1646 // the same as the vector's element type (sans qualifiers)
1647 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1648 SrcType.getTypePtr() &&
1649 "Splatted expr doesn't match with vector element type?");
1650
1651 // Splat the element across to all elements
1652 unsigned NumElements = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements();
1653 return Builder.CreateVectorSplat(NumElts: NumElements, V: Src, Name: "splat");
1654 }
1655
1656 if (SrcType->isMatrixType() && DstType->isMatrixType())
1657 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1658
1659 if (isa<llvm::VectorType>(Val: SrcTy) || isa<llvm::VectorType>(Val: DstTy)) {
1660 // Allow bitcast from vector to integer/fp of the same size.
1661 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1662 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1663 if (SrcSize == DstSize)
1664 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1665
1666 // Conversions between vectors of different sizes are not allowed except
1667 // when vectors of half are involved. Operations on storage-only half
1668 // vectors require promoting half vector operands to float vectors and
1669 // truncating the result, which is either an int or float vector, to a
1670 // short or half vector.
1671
1672 // Source and destination are both expected to be vectors.
1673 llvm::Type *SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1674 llvm::Type *DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1675 (void)DstElementTy;
1676
1677 assert(((SrcElementTy->isIntegerTy() &&
1678 DstElementTy->isIntegerTy()) ||
1679 (SrcElementTy->isFloatingPointTy() &&
1680 DstElementTy->isFloatingPointTy())) &&
1681 "unexpected conversion between a floating-point vector and an "
1682 "integer vector");
1683
1684 // Truncate an i32 vector to an i16 vector.
1685 if (SrcElementTy->isIntegerTy())
1686 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: false, Name: "conv");
1687
1688 // Truncate a float vector to a half vector.
1689 if (SrcSize > DstSize)
1690 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1691
1692 // Promote a half vector to a float vector.
1693 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1694 }
1695
1696 // Finally, we have the arithmetic types: real int/float.
1697 Value *Res = nullptr;
1698 llvm::Type *ResTy = DstTy;
1699
1700 // An overflowing conversion has undefined behavior if either the source type
1701 // or the destination type is a floating-point type. However, we consider the
1702 // range of representable values for all floating-point types to be
1703 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1704 // floating-point type.
1705 if (CGF.SanOpts.has(K: SanitizerKind::FloatCastOverflow) &&
1706 OrigSrcType->isFloatingType())
1707 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1708 Loc);
1709
1710 // Cast to half through float if half isn't a native type.
1711 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1712 // Make sure we cast in a single step if from another FP type.
1713 if (SrcTy->isFloatingPointTy()) {
1714 // Use the intrinsic if the half type itself isn't supported
1715 // (as opposed to operations on half, available with NativeHalfType).
1716 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1717 return Builder.CreateCall(
1718 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16, Tys: SrcTy), Args: Src);
1719 // If the half type is supported, just use an fptrunc.
1720 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy);
1721 }
1722 DstTy = CGF.FloatTy;
1723 }
1724
1725 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1726
1727 if (DstTy != ResTy) {
1728 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1729 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1730 Res = Builder.CreateCall(
1731 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16, Tys: CGF.CGM.FloatTy),
1732 Args: Res);
1733 } else {
1734 Res = Builder.CreateFPTrunc(V: Res, DestTy: ResTy, Name: "conv");
1735 }
1736 }
1737
1738 if (Opts.EmitImplicitIntegerTruncationChecks)
1739 EmitIntegerTruncationCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1740 DstType: NoncanonicalDstType, Loc);
1741
1742 if (Opts.EmitImplicitIntegerSignChangeChecks)
1743 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1744 DstType: NoncanonicalDstType, Loc);
1745
1746 return Res;
1747}
1748
1749Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1750 QualType DstTy,
1751 SourceLocation Loc) {
1752 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1753 llvm::Value *Result;
1754 if (SrcTy->isRealFloatingType())
1755 Result = FPBuilder.CreateFloatingToFixed(Src,
1756 DstSema: CGF.getContext().getFixedPointSemantics(Ty: DstTy));
1757 else if (DstTy->isRealFloatingType())
1758 Result = FPBuilder.CreateFixedToFloating(Src,
1759 SrcSema: CGF.getContext().getFixedPointSemantics(Ty: SrcTy),
1760 DstTy: ConvertType(T: DstTy));
1761 else {
1762 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(Ty: SrcTy);
1763 auto DstFPSema = CGF.getContext().getFixedPointSemantics(Ty: DstTy);
1764
1765 if (DstTy->isIntegerType())
1766 Result = FPBuilder.CreateFixedToInteger(Src, SrcSema: SrcFPSema,
1767 DstWidth: DstFPSema.getWidth(),
1768 DstIsSigned: DstFPSema.isSigned());
1769 else if (SrcTy->isIntegerType())
1770 Result = FPBuilder.CreateIntegerToFixed(Src, SrcIsSigned: SrcFPSema.isSigned(),
1771 DstSema: DstFPSema);
1772 else
1773 Result = FPBuilder.CreateFixedToFixed(Src, SrcSema: SrcFPSema, DstSema: DstFPSema);
1774 }
1775 return Result;
1776}
1777
1778/// Emit a conversion from the specified complex type to the specified
1779/// destination type, where the destination type is an LLVM scalar type.
1780Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1781 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1782 SourceLocation Loc) {
1783 // Get the source element type.
1784 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1785
1786 // Handle conversions to bool first, they are special: comparisons against 0.
1787 if (DstTy->isBooleanType()) {
1788 // Complex != 0 -> (Real != 0) | (Imag != 0)
1789 Src.first = EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1790 Src.second = EmitScalarConversion(Src: Src.second, SrcType: SrcTy, DstType: DstTy, Loc);
1791 return Builder.CreateOr(LHS: Src.first, RHS: Src.second, Name: "tobool");
1792 }
1793
1794 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1795 // the imaginary part of the complex value is discarded and the value of the
1796 // real part is converted according to the conversion rules for the
1797 // corresponding real type.
1798 return EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1799}
1800
1801Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1802 return CGF.EmitFromMemory(Value: CGF.CGM.EmitNullConstant(T: Ty), Ty);
1803}
1804
1805/// Emit a sanitization check for the given "binary" operation (which
1806/// might actually be a unary increment which has been lowered to a binary
1807/// operation). The check passes if all values in \p Checks (which are \c i1),
1808/// are \c true.
1809void ScalarExprEmitter::EmitBinOpCheck(
1810 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1811 const BinOpInfo &Info) {
1812 assert(CGF.IsSanitizerScope);
1813 SanitizerHandler Check;
1814 SmallVector<llvm::Constant *, 4> StaticData;
1815 SmallVector<llvm::Value *, 2> DynamicData;
1816
1817 BinaryOperatorKind Opcode = Info.Opcode;
1818 if (BinaryOperator::isCompoundAssignmentOp(Opc: Opcode))
1819 Opcode = BinaryOperator::getOpForCompoundAssignment(Opc: Opcode);
1820
1821 StaticData.push_back(Elt: CGF.EmitCheckSourceLocation(Loc: Info.E->getExprLoc()));
1822 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Info.E);
1823 if (UO && UO->getOpcode() == UO_Minus) {
1824 Check = SanitizerHandler::NegateOverflow;
1825 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: UO->getType()));
1826 DynamicData.push_back(Elt: Info.RHS);
1827 } else {
1828 if (BinaryOperator::isShiftOp(Opc: Opcode)) {
1829 // Shift LHS negative or too large, or RHS out of bounds.
1830 Check = SanitizerHandler::ShiftOutOfBounds;
1831 const BinaryOperator *BO = cast<BinaryOperator>(Val: Info.E);
1832 StaticData.push_back(
1833 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getLHS()->getType()));
1834 StaticData.push_back(
1835 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getRHS()->getType()));
1836 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1837 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1838 Check = SanitizerHandler::DivremOverflow;
1839 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1840 } else {
1841 // Arithmetic overflow (+, -, *).
1842 switch (Opcode) {
1843 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1844 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1845 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1846 default: llvm_unreachable("unexpected opcode for bin op check");
1847 }
1848 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1849 }
1850 DynamicData.push_back(Elt: Info.LHS);
1851 DynamicData.push_back(Elt: Info.RHS);
1852 }
1853
1854 CGF.EmitCheck(Checked: Checks, Check, StaticArgs: StaticData, DynamicArgs: DynamicData);
1855}
1856
1857//===----------------------------------------------------------------------===//
1858// Visitor Methods
1859//===----------------------------------------------------------------------===//
1860
1861Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1862 CGF.ErrorUnsupported(S: E, Type: "scalar expression");
1863 if (E->getType()->isVoidType())
1864 return nullptr;
1865 return llvm::PoisonValue::get(T: CGF.ConvertType(T: E->getType()));
1866}
1867
1868Value *
1869ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1870 ASTContext &Context = CGF.getContext();
1871 unsigned AddrSpace =
1872 Context.getTargetAddressSpace(AS: CGF.CGM.GetGlobalConstantAddressSpace());
1873 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1874 Str: E->ComputeName(Context), Name: "__usn_str", AddressSpace: AddrSpace);
1875
1876 llvm::Type *ExprTy = ConvertType(T: E->getType());
1877 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: GlobalConstStr, DestTy: ExprTy,
1878 Name: "usn_addr_cast");
1879}
1880
1881Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1882 assert(E->getDataElementCount() == 1);
1883 auto It = E->begin();
1884 return Builder.getInt(AI: (*It)->getValue());
1885}
1886
1887Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1888 // Vector Mask Case
1889 if (E->getNumSubExprs() == 2) {
1890 Value *LHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1891 Value *RHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1892 Value *Mask;
1893
1894 auto *LTy = cast<llvm::FixedVectorType>(Val: LHS->getType());
1895 unsigned LHSElts = LTy->getNumElements();
1896
1897 Mask = RHS;
1898
1899 auto *MTy = cast<llvm::FixedVectorType>(Val: Mask->getType());
1900
1901 // Mask off the high bits of each shuffle index.
1902 Value *MaskBits =
1903 llvm::ConstantInt::get(Ty: MTy, V: llvm::NextPowerOf2(A: LHSElts - 1) - 1);
1904 Mask = Builder.CreateAnd(LHS: Mask, RHS: MaskBits, Name: "mask");
1905
1906 // newv = undef
1907 // mask = mask & maskbits
1908 // for each elt
1909 // n = extract mask i
1910 // x = extract val n
1911 // newv = insert newv, x, i
1912 auto *RTy = llvm::FixedVectorType::get(ElementType: LTy->getElementType(),
1913 NumElts: MTy->getNumElements());
1914 Value* NewV = llvm::PoisonValue::get(T: RTy);
1915 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1916 Value *IIndx = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: i);
1917 Value *Indx = Builder.CreateExtractElement(Vec: Mask, Idx: IIndx, Name: "shuf_idx");
1918
1919 Value *VExt = Builder.CreateExtractElement(Vec: LHS, Idx: Indx, Name: "shuf_elt");
1920 NewV = Builder.CreateInsertElement(Vec: NewV, NewElt: VExt, Idx: IIndx, Name: "shuf_ins");
1921 }
1922 return NewV;
1923 }
1924
1925 Value* V1 = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1926 Value* V2 = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1927
1928 SmallVector<int, 32> Indices;
1929 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1930 llvm::APSInt Idx = E->getShuffleMaskIdx(N: i - 2);
1931 // Check for -1 and output it as undef in the IR.
1932 if (Idx.isSigned() && Idx.isAllOnes())
1933 Indices.push_back(Elt: -1);
1934 else
1935 Indices.push_back(Elt: Idx.getZExtValue());
1936 }
1937
1938 return Builder.CreateShuffleVector(V1, V2, Mask: Indices, Name: "shuffle");
1939}
1940
1941Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1942 QualType SrcType = E->getSrcExpr()->getType(),
1943 DstType = E->getType();
1944
1945 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
1946
1947 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1948 DstType = CGF.getContext().getCanonicalType(T: DstType);
1949 if (SrcType == DstType) return Src;
1950
1951 assert(SrcType->isVectorType() &&
1952 "ConvertVector source type must be a vector");
1953 assert(DstType->isVectorType() &&
1954 "ConvertVector destination type must be a vector");
1955
1956 llvm::Type *SrcTy = Src->getType();
1957 llvm::Type *DstTy = ConvertType(T: DstType);
1958
1959 // Ignore conversions like int -> uint.
1960 if (SrcTy == DstTy)
1961 return Src;
1962
1963 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1964 DstEltType = DstType->castAs<VectorType>()->getElementType();
1965
1966 assert(SrcTy->isVectorTy() &&
1967 "ConvertVector source IR type must be a vector");
1968 assert(DstTy->isVectorTy() &&
1969 "ConvertVector destination IR type must be a vector");
1970
1971 llvm::Type *SrcEltTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType(),
1972 *DstEltTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1973
1974 if (DstEltType->isBooleanType()) {
1975 assert((SrcEltTy->isFloatingPointTy() ||
1976 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1977
1978 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: SrcTy);
1979 if (SrcEltTy->isFloatingPointTy()) {
1980 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
1981 return Builder.CreateFCmpUNE(LHS: Src, RHS: Zero, Name: "tobool");
1982 } else {
1983 return Builder.CreateICmpNE(LHS: Src, RHS: Zero, Name: "tobool");
1984 }
1985 }
1986
1987 // We have the arithmetic types: real int/float.
1988 Value *Res = nullptr;
1989
1990 if (isa<llvm::IntegerType>(Val: SrcEltTy)) {
1991 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1992 if (isa<llvm::IntegerType>(Val: DstEltTy))
1993 Res = Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1994 else {
1995 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
1996 if (InputSigned)
1997 Res = Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1998 else
1999 Res = Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
2000 }
2001 } else if (isa<llvm::IntegerType>(Val: DstEltTy)) {
2002 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2003 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2004 if (DstEltType->isSignedIntegerOrEnumerationType())
2005 Res = Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
2006 else
2007 Res = Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
2008 } else {
2009 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2010 "Unknown real conversion");
2011 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2012 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2013 Res = Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
2014 else
2015 Res = Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
2016 }
2017
2018 return Res;
2019}
2020
2021Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2022 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(ME: E)) {
2023 CGF.EmitIgnoredExpr(E: E->getBase());
2024 return CGF.emitScalarConstant(Constant, E);
2025 } else {
2026 Expr::EvalResult Result;
2027 if (E->EvaluateAsInt(Result, Ctx: CGF.getContext(), AllowSideEffects: Expr::SE_AllowSideEffects)) {
2028 llvm::APSInt Value = Result.Val.getInt();
2029 CGF.EmitIgnoredExpr(E: E->getBase());
2030 return Builder.getInt(AI: Value);
2031 }
2032 }
2033
2034 llvm::Value *Result = EmitLoadOfLValue(E);
2035
2036 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2037 // debug info for the pointer, even if there is no variable associated with
2038 // the pointer's expression.
2039 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2040 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Val: Result)) {
2041 if (llvm::GetElementPtrInst *GEP =
2042 dyn_cast<llvm::GetElementPtrInst>(Val: Load->getPointerOperand())) {
2043 if (llvm::Instruction *Pointer =
2044 dyn_cast<llvm::Instruction>(Val: GEP->getPointerOperand())) {
2045 QualType Ty = E->getBase()->getType();
2046 if (!E->isArrow())
2047 Ty = CGF.getContext().getPointerType(T: Ty);
2048 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Value: Pointer, Ty);
2049 }
2050 }
2051 }
2052 }
2053 return Result;
2054}
2055
2056Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2057 TestAndClearIgnoreResultAssign();
2058
2059 // Emit subscript expressions in rvalue context's. For most cases, this just
2060 // loads the lvalue formed by the subscript expr. However, we have to be
2061 // careful, because the base of a vector subscript is occasionally an rvalue,
2062 // so we can't get it as an lvalue.
2063 if (!E->getBase()->getType()->isVectorType() &&
2064 !E->getBase()->getType()->isSveVLSBuiltinType())
2065 return EmitLoadOfLValue(E);
2066
2067 // Handle the vector case. The base must be a vector, the index must be an
2068 // integer value.
2069 Value *Base = Visit(E: E->getBase());
2070 Value *Idx = Visit(E: E->getIdx());
2071 QualType IdxTy = E->getIdx()->getType();
2072
2073 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
2074 CGF.EmitBoundsCheck(E, Base: E->getBase(), Index: Idx, IndexType: IdxTy, /*Accessed*/true);
2075
2076 return Builder.CreateExtractElement(Vec: Base, Idx, Name: "vecext");
2077}
2078
2079Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2080 TestAndClearIgnoreResultAssign();
2081
2082 // Handle the vector case. The base must be a vector, the index must be an
2083 // integer value.
2084 Value *RowIdx = CGF.EmitMatrixIndexExpr(E: E->getRowIdx());
2085 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E: E->getColumnIdx());
2086
2087 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2088 unsigned NumRows = MatrixTy->getNumRows();
2089 llvm::MatrixBuilder MB(Builder);
2090 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2091 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2092 MB.CreateIndexAssumption(Idx, NumElements: MatrixTy->getNumElementsFlattened());
2093
2094 Value *Matrix = Visit(E: E->getBase());
2095
2096 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2097 return Builder.CreateExtractElement(Vec: Matrix, Idx, Name: "matrixext");
2098}
2099
2100static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2101 unsigned Off) {
2102 int MV = SVI->getMaskValue(Elt: Idx);
2103 if (MV == -1)
2104 return -1;
2105 return Off + MV;
2106}
2107
2108static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2109 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2110 "Index operand too large for shufflevector mask!");
2111 return C->getZExtValue();
2112}
2113
2114Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2115 bool Ignore = TestAndClearIgnoreResultAssign();
2116 (void)Ignore;
2117 unsigned NumInitElements = E->getNumInits();
2118 assert(Ignore == false ||
2119 (NumInitElements == 0 && E->getType()->isVoidType()) &&
2120 "init list ignored");
2121
2122 // HLSL initialization lists in the AST are an expansion which can contain
2123 // side-effecting expressions wrapped in opaque value expressions. To properly
2124 // emit these we need to emit the opaque values before we emit the argument
2125 // expressions themselves. This is a little hacky, but it prevents us needing
2126 // to do a bigger AST-level change for a language feature that we need
2127 // deprecate in the near future. See related HLSL language proposals in the
2128 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2129 // * 0005-strict-initializer-lists.md
2130 // * 0032-constructors.md
2131 if (CGF.getLangOpts().HLSL)
2132 CGF.CGM.getHLSLRuntime().emitInitListOpaqueValues(CGF, E);
2133
2134 if (E->hadArrayRangeDesignator())
2135 CGF.ErrorUnsupported(S: E, Type: "GNU array range designator extension");
2136
2137 llvm::VectorType *VType =
2138 dyn_cast<llvm::VectorType>(Val: ConvertType(T: E->getType()));
2139
2140 if (!VType) {
2141 if (NumInitElements == 0) {
2142 // C++11 value-initialization for the scalar.
2143 return EmitNullValue(Ty: E->getType());
2144 }
2145 // We have a scalar in braces. Just use the first element.
2146 return Visit(E: E->getInit(Init: 0));
2147 }
2148
2149 if (isa<llvm::ScalableVectorType>(Val: VType)) {
2150 if (NumInitElements == 0) {
2151 // C++11 value-initialization for the vector.
2152 return EmitNullValue(Ty: E->getType());
2153 }
2154
2155 if (NumInitElements == 1) {
2156 Expr *InitVector = E->getInit(Init: 0);
2157
2158 // Initialize from another scalable vector of the same type.
2159 if (InitVector->getType().getCanonicalType() ==
2160 E->getType().getCanonicalType())
2161 return Visit(E: InitVector);
2162 }
2163
2164 llvm_unreachable("Unexpected initialization of a scalable vector!");
2165 }
2166
2167 unsigned ResElts = cast<llvm::FixedVectorType>(Val: VType)->getNumElements();
2168
2169 // Loop over initializers collecting the Value for each, and remembering
2170 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2171 // us to fold the shuffle for the swizzle into the shuffle for the vector
2172 // initializer, since LLVM optimizers generally do not want to touch
2173 // shuffles.
2174 unsigned CurIdx = 0;
2175 bool VIsPoisonShuffle = false;
2176 llvm::Value *V = llvm::PoisonValue::get(T: VType);
2177 for (unsigned i = 0; i != NumInitElements; ++i) {
2178 Expr *IE = E->getInit(Init: i);
2179 Value *Init = Visit(E: IE);
2180 SmallVector<int, 16> Args;
2181
2182 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Val: Init->getType());
2183
2184 // Handle scalar elements. If the scalar initializer is actually one
2185 // element of a different vector of the same width, use shuffle instead of
2186 // extract+insert.
2187 if (!VVT) {
2188 if (isa<ExtVectorElementExpr>(Val: IE)) {
2189 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Val: Init);
2190
2191 if (cast<llvm::FixedVectorType>(Val: EI->getVectorOperandType())
2192 ->getNumElements() == ResElts) {
2193 llvm::ConstantInt *C = cast<llvm::ConstantInt>(Val: EI->getIndexOperand());
2194 Value *LHS = nullptr, *RHS = nullptr;
2195 if (CurIdx == 0) {
2196 // insert into poison -> shuffle (src, poison)
2197 // shufflemask must use an i32
2198 Args.push_back(Elt: getAsInt32(C, I32Ty: CGF.Int32Ty));
2199 Args.resize(N: ResElts, NV: -1);
2200
2201 LHS = EI->getVectorOperand();
2202 RHS = V;
2203 VIsPoisonShuffle = true;
2204 } else if (VIsPoisonShuffle) {
2205 // insert into poison shuffle && size match -> shuffle (v, src)
2206 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(Val: V);
2207 for (unsigned j = 0; j != CurIdx; ++j)
2208 Args.push_back(Elt: getMaskElt(SVI: SVV, Idx: j, Off: 0));
2209 Args.push_back(Elt: ResElts + C->getZExtValue());
2210 Args.resize(N: ResElts, NV: -1);
2211
2212 LHS = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2213 RHS = EI->getVectorOperand();
2214 VIsPoisonShuffle = false;
2215 }
2216 if (!Args.empty()) {
2217 V = Builder.CreateShuffleVector(V1: LHS, V2: RHS, Mask: Args);
2218 ++CurIdx;
2219 continue;
2220 }
2221 }
2222 }
2223 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx: Builder.getInt32(C: CurIdx),
2224 Name: "vecinit");
2225 VIsPoisonShuffle = false;
2226 ++CurIdx;
2227 continue;
2228 }
2229
2230 unsigned InitElts = cast<llvm::FixedVectorType>(Val: VVT)->getNumElements();
2231
2232 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2233 // input is the same width as the vector being constructed, generate an
2234 // optimized shuffle of the swizzle input into the result.
2235 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2236 if (isa<ExtVectorElementExpr>(Val: IE)) {
2237 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Val: Init);
2238 Value *SVOp = SVI->getOperand(i_nocapture: 0);
2239 auto *OpTy = cast<llvm::FixedVectorType>(Val: SVOp->getType());
2240
2241 if (OpTy->getNumElements() == ResElts) {
2242 for (unsigned j = 0; j != CurIdx; ++j) {
2243 // If the current vector initializer is a shuffle with poison, merge
2244 // this shuffle directly into it.
2245 if (VIsPoisonShuffle) {
2246 Args.push_back(Elt: getMaskElt(SVI: cast<llvm::ShuffleVectorInst>(Val: V), Idx: j, Off: 0));
2247 } else {
2248 Args.push_back(Elt: j);
2249 }
2250 }
2251 for (unsigned j = 0, je = InitElts; j != je; ++j)
2252 Args.push_back(Elt: getMaskElt(SVI, Idx: j, Off: Offset));
2253 Args.resize(N: ResElts, NV: -1);
2254
2255 if (VIsPoisonShuffle)
2256 V = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2257
2258 Init = SVOp;
2259 }
2260 }
2261
2262 // Extend init to result vector length, and then shuffle its contribution
2263 // to the vector initializer into V.
2264 if (Args.empty()) {
2265 for (unsigned j = 0; j != InitElts; ++j)
2266 Args.push_back(Elt: j);
2267 Args.resize(N: ResElts, NV: -1);
2268 Init = Builder.CreateShuffleVector(V: Init, Mask: Args, Name: "vext");
2269
2270 Args.clear();
2271 for (unsigned j = 0; j != CurIdx; ++j)
2272 Args.push_back(Elt: j);
2273 for (unsigned j = 0; j != InitElts; ++j)
2274 Args.push_back(Elt: j + Offset);
2275 Args.resize(N: ResElts, NV: -1);
2276 }
2277
2278 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2279 // merging subsequent shuffles into this one.
2280 if (CurIdx == 0)
2281 std::swap(a&: V, b&: Init);
2282 V = Builder.CreateShuffleVector(V1: V, V2: Init, Mask: Args, Name: "vecinit");
2283 VIsPoisonShuffle = isa<llvm::PoisonValue>(Val: Init);
2284 CurIdx += InitElts;
2285 }
2286
2287 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2288 // Emit remaining default initializers.
2289 llvm::Type *EltTy = VType->getElementType();
2290
2291 // Emit remaining default initializers
2292 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2293 Value *Idx = Builder.getInt32(C: CurIdx);
2294 llvm::Value *Init = llvm::Constant::getNullValue(Ty: EltTy);
2295 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx, Name: "vecinit");
2296 }
2297 return V;
2298}
2299
2300static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D) {
2301 return !D->isWeak();
2302}
2303
2304static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2305 E = E->IgnoreParens();
2306
2307 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2308 if (UO->getOpcode() == UO_Deref)
2309 return CGF.isPointerKnownNonNull(E: UO->getSubExpr());
2310
2311 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E))
2312 return isDeclRefKnownNonNull(CGF, D: DRE->getDecl());
2313
2314 if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) {
2315 if (isa<FieldDecl>(Val: ME->getMemberDecl()))
2316 return true;
2317 return isDeclRefKnownNonNull(CGF, D: ME->getMemberDecl());
2318 }
2319
2320 // Array subscripts? Anything else?
2321
2322 return false;
2323}
2324
2325bool CodeGenFunction::isPointerKnownNonNull(const Expr *E) {
2326 assert(E->getType()->isSignableType(getContext()));
2327
2328 E = E->IgnoreParens();
2329
2330 if (isa<CXXThisExpr>(Val: E))
2331 return true;
2332
2333 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2334 if (UO->getOpcode() == UO_AddrOf)
2335 return isLValueKnownNonNull(CGF&: *this, E: UO->getSubExpr());
2336
2337 if (const auto *CE = dyn_cast<CastExpr>(Val: E))
2338 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2339 CE->getCastKind() == CK_ArrayToPointerDecay)
2340 return isLValueKnownNonNull(CGF&: *this, E: CE->getSubExpr());
2341
2342 // Maybe honor __nonnull?
2343
2344 return false;
2345}
2346
2347bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2348 const Expr *E = CE->getSubExpr();
2349
2350 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2351 return false;
2352
2353 if (isa<CXXThisExpr>(Val: E->IgnoreParens())) {
2354 // We always assume that 'this' is never null.
2355 return false;
2356 }
2357
2358 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2359 // And that glvalue casts are never null.
2360 if (ICE->isGLValue())
2361 return false;
2362 }
2363
2364 return true;
2365}
2366
2367// RHS is an aggregate type
2368static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address RHSVal,
2369 QualType RHSTy, QualType LHSTy,
2370 SourceLocation Loc) {
2371 SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList;
2372 SmallVector<QualType, 16> SrcTypes; // Flattened type
2373 CGF.FlattenAccessAndType(Addr: RHSVal, AddrTy: RHSTy, AccessList&: LoadGEPList, FlatTypes&: SrcTypes);
2374 // LHS is either a vector or a builtin?
2375 // if its a vector create a temp alloca to store into and return that
2376 if (auto *VecTy = LHSTy->getAs<VectorType>()) {
2377 assert(SrcTypes.size() >= VecTy->getNumElements() &&
2378 "Flattened type on RHS must have more elements than vector on LHS.");
2379 llvm::Value *V =
2380 CGF.Builder.CreateLoad(Addr: CGF.CreateIRTemp(T: LHSTy, Name: "flatcast.tmp"));
2381 // write to V.
2382 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2383 llvm::Value *Load = CGF.Builder.CreateLoad(Addr: LoadGEPList[I].first, Name: "load");
2384 llvm::Value *Idx = LoadGEPList[I].second;
2385 Load = Idx ? CGF.Builder.CreateExtractElement(Vec: Load, Idx, Name: "vec.extract")
2386 : Load;
2387 llvm::Value *Cast = CGF.EmitScalarConversion(
2388 Src: Load, SrcTy: SrcTypes[I], DstTy: VecTy->getElementType(), Loc);
2389 V = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx: I);
2390 }
2391 return V;
2392 }
2393 // i its a builtin just do an extract element or load.
2394 assert(LHSTy->isBuiltinType() &&
2395 "Destination type must be a vector or builtin type.");
2396 llvm::Value *Load = CGF.Builder.CreateLoad(Addr: LoadGEPList[0].first, Name: "load");
2397 llvm::Value *Idx = LoadGEPList[0].second;
2398 Load =
2399 Idx ? CGF.Builder.CreateExtractElement(Vec: Load, Idx, Name: "vec.extract") : Load;
2400 return CGF.EmitScalarConversion(Src: Load, SrcTy: LHSTy, DstTy: SrcTypes[0], Loc);
2401}
2402
2403// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2404// have to handle a more broad range of conversions than explicit casts, as they
2405// handle things like function to ptr-to-function decay etc.
2406Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2407 Expr *E = CE->getSubExpr();
2408 QualType DestTy = CE->getType();
2409 CastKind Kind = CE->getCastKind();
2410 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2411
2412 // These cases are generally not written to ignore the result of
2413 // evaluating their sub-expressions, so we clear this now.
2414 bool Ignored = TestAndClearIgnoreResultAssign();
2415
2416 // Since almost all cast kinds apply to scalars, this switch doesn't have
2417 // a default case, so the compiler will warn on a missing case. The cases
2418 // are in the same order as in the CastKind enum.
2419 switch (Kind) {
2420 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2421 case CK_BuiltinFnToFnPtr:
2422 llvm_unreachable("builtin functions are handled elsewhere");
2423
2424 case CK_LValueBitCast:
2425 case CK_ObjCObjectLValueCast: {
2426 Address Addr = EmitLValue(E).getAddress();
2427 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2428 LValue LV = CGF.MakeAddrLValue(Addr, T: DestTy);
2429 return EmitLoadOfLValue(LV, Loc: CE->getExprLoc());
2430 }
2431
2432 case CK_LValueToRValueBitCast: {
2433 LValue SourceLVal = CGF.EmitLValue(E);
2434 Address Addr =
2435 SourceLVal.getAddress().withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2436 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2437 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2438 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2439 }
2440
2441 case CK_CPointerToObjCPointerCast:
2442 case CK_BlockPointerToObjCPointerCast:
2443 case CK_AnyPointerToBlockPointerCast:
2444 case CK_BitCast: {
2445 Value *Src = Visit(E);
2446 llvm::Type *SrcTy = Src->getType();
2447 llvm::Type *DstTy = ConvertType(T: DestTy);
2448
2449 // FIXME: this is a gross but seemingly necessary workaround for an issue
2450 // manifesting when a target uses a non-default AS for indirect sret args,
2451 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2452 // on the address of a local struct that gets returned by value yields an
2453 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2454 // DefaultAS. We can only do this subversive thing because sret args are
2455 // manufactured and them residing in the IndirectAS is a target specific
2456 // detail, and doing an AS cast here still retains the semantics the user
2457 // expects. It is desirable to remove this iff a better solution is found.
2458 if (auto A = dyn_cast<llvm::Argument>(Val: Src); A && A->hasStructRetAttr())
2459 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2460 CGF, V: Src, SrcAddr: E->getType().getAddressSpace(), DestTy: DstTy);
2461
2462 assert(
2463 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2464 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2465 "Address-space cast must be used to convert address spaces");
2466
2467 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
2468 if (auto *PT = DestTy->getAs<PointerType>()) {
2469 CGF.EmitVTablePtrCheckForCast(
2470 T: PT->getPointeeType(),
2471 Derived: Address(Src,
2472 CGF.ConvertTypeForMem(
2473 T: E->getType()->castAs<PointerType>()->getPointeeType()),
2474 CGF.getPointerAlign()),
2475 /*MayBeNull=*/true, TCK: CodeGenFunction::CFITCK_UnrelatedCast,
2476 Loc: CE->getBeginLoc());
2477 }
2478 }
2479
2480 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2481 const QualType SrcType = E->getType();
2482
2483 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2484 // Casting to pointer that could carry dynamic information (provided by
2485 // invariant.group) requires launder.
2486 Src = Builder.CreateLaunderInvariantGroup(Ptr: Src);
2487 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2488 // Casting to pointer that does not carry dynamic information (provided
2489 // by invariant.group) requires stripping it. Note that we don't do it
2490 // if the source could not be dynamic type and destination could be
2491 // dynamic because dynamic information is already laundered. It is
2492 // because launder(strip(src)) == launder(src), so there is no need to
2493 // add extra strip before launder.
2494 Src = Builder.CreateStripInvariantGroup(Ptr: Src);
2495 }
2496 }
2497
2498 // Update heapallocsite metadata when there is an explicit pointer cast.
2499 if (auto *CI = dyn_cast<llvm::CallBase>(Val: Src)) {
2500 if (CI->getMetadata(Kind: "heapallocsite") && isa<ExplicitCastExpr>(Val: CE) &&
2501 !isa<CastExpr>(Val: E)) {
2502 QualType PointeeType = DestTy->getPointeeType();
2503 if (!PointeeType.isNull())
2504 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: PointeeType,
2505 Loc: CE->getExprLoc());
2506 }
2507 }
2508
2509 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2510 // same element type, use the llvm.vector.insert intrinsic to perform the
2511 // bitcast.
2512 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2513 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Val: DstTy)) {
2514 // If we are casting a fixed i8 vector to a scalable i1 predicate
2515 // vector, use a vector insert and bitcast the result.
2516 if (ScalableDstTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2517 FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2518 ScalableDstTy = llvm::ScalableVectorType::get(
2519 ElementType: FixedSrcTy->getElementType(),
2520 MinNumElts: llvm::divideCeil(
2521 Numerator: ScalableDstTy->getElementCount().getKnownMinValue(), Denominator: 8));
2522 }
2523 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2524 llvm::Value *PoisonVec = llvm::PoisonValue::get(T: ScalableDstTy);
2525 llvm::Value *Result = Builder.CreateInsertVector(
2526 DstType: ScalableDstTy, SrcVec: PoisonVec, SubVec: Src, Idx: uint64_t(0), Name: "cast.scalable");
2527 ScalableDstTy = cast<llvm::ScalableVectorType>(
2528 Val: llvm::VectorType::getWithSizeAndScalar(SizeTy: ScalableDstTy, EltTy: DstTy));
2529 if (Result->getType() != ScalableDstTy)
2530 Result = Builder.CreateBitCast(V: Result, DestTy: ScalableDstTy);
2531 if (Result->getType() != DstTy)
2532 Result = Builder.CreateExtractVector(DstType: DstTy, SrcVec: Result, Idx: uint64_t(0));
2533 return Result;
2534 }
2535 }
2536 }
2537
2538 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2539 // same element type, use the llvm.vector.extract intrinsic to perform the
2540 // bitcast.
2541 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(Val: SrcTy)) {
2542 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(Val: DstTy)) {
2543 // If we are casting a scalable i1 predicate vector to a fixed i8
2544 // vector, bitcast the source and use a vector extract.
2545 if (ScalableSrcTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2546 FixedDstTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2547 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(RHS: 8)) {
2548 ScalableSrcTy = llvm::ScalableVectorType::get(
2549 ElementType: ScalableSrcTy->getElementType(),
2550 MinNumElts: llvm::alignTo<8>(
2551 Value: ScalableSrcTy->getElementCount().getKnownMinValue()));
2552 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: ScalableSrcTy);
2553 Src = Builder.CreateInsertVector(DstType: ScalableSrcTy, SrcVec: ZeroVec, SubVec: Src,
2554 Idx: uint64_t(0));
2555 }
2556
2557 ScalableSrcTy = llvm::ScalableVectorType::get(
2558 ElementType: FixedDstTy->getElementType(),
2559 MinNumElts: ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2560 Src = Builder.CreateBitCast(V: Src, DestTy: ScalableSrcTy);
2561 }
2562 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2563 return Builder.CreateExtractVector(DstType: DstTy, SrcVec: Src, Idx: uint64_t(0),
2564 Name: "cast.fixed");
2565 }
2566 }
2567
2568 // Perform VLAT <-> VLST bitcast through memory.
2569 // TODO: since the llvm.vector.{insert,extract} intrinsics
2570 // require the element types of the vectors to be the same, we
2571 // need to keep this around for bitcasts between VLAT <-> VLST where
2572 // the element types of the vectors are not the same, until we figure
2573 // out a better way of doing these casts.
2574 if ((isa<llvm::FixedVectorType>(Val: SrcTy) &&
2575 isa<llvm::ScalableVectorType>(Val: DstTy)) ||
2576 (isa<llvm::ScalableVectorType>(Val: SrcTy) &&
2577 isa<llvm::FixedVectorType>(Val: DstTy))) {
2578 Address Addr = CGF.CreateDefaultAlignTempAlloca(Ty: SrcTy, Name: "saved-value");
2579 LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType());
2580 CGF.EmitStoreOfScalar(value: Src, lvalue: LV);
2581 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2582 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2583 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2584 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2585 }
2586
2587 llvm::Value *Result = Builder.CreateBitCast(V: Src, DestTy: DstTy);
2588 return CGF.authPointerToPointerCast(ResultPtr: Result, SourceType: E->getType(), DestType: DestTy);
2589 }
2590 case CK_AddressSpaceConversion: {
2591 Expr::EvalResult Result;
2592 if (E->EvaluateAsRValue(Result, Ctx: CGF.getContext()) &&
2593 Result.Val.isNullPointer()) {
2594 // If E has side effect, it is emitted even if its final result is a
2595 // null pointer. In that case, a DCE pass should be able to
2596 // eliminate the useless instructions emitted during translating E.
2597 if (Result.HasSideEffects)
2598 Visit(E);
2599 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(
2600 Val: ConvertType(T: DestTy)), QT: DestTy);
2601 }
2602 // Since target may map different address spaces in AST to the same address
2603 // space, an address space conversion may end up as a bitcast.
2604 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2605 CGF, V: Visit(E), SrcAddr: E->getType()->getPointeeType().getAddressSpace(),
2606 DestTy: ConvertType(T: DestTy));
2607 }
2608 case CK_AtomicToNonAtomic:
2609 case CK_NonAtomicToAtomic:
2610 case CK_UserDefinedConversion:
2611 return Visit(E);
2612
2613 case CK_NoOp: {
2614 return CE->changesVolatileQualification() ? EmitLoadOfLValue(E: CE) : Visit(E);
2615 }
2616
2617 case CK_BaseToDerived: {
2618 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2619 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2620
2621 Address Base = CGF.EmitPointerWithAlignment(Addr: E);
2622 Address Derived =
2623 CGF.GetAddressOfDerivedClass(Value: Base, Derived: DerivedClassDecl,
2624 PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
2625 NullCheckValue: CGF.ShouldNullCheckClassCastValue(CE));
2626
2627 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2628 // performed and the object is not of the derived type.
2629 if (CGF.sanitizePerformTypeCheck())
2630 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DowncastPointer, Loc: CE->getExprLoc(),
2631 Addr: Derived, Type: DestTy->getPointeeType());
2632
2633 if (CGF.SanOpts.has(K: SanitizerKind::CFIDerivedCast))
2634 CGF.EmitVTablePtrCheckForCast(T: DestTy->getPointeeType(), Derived,
2635 /*MayBeNull=*/true,
2636 TCK: CodeGenFunction::CFITCK_DerivedCast,
2637 Loc: CE->getBeginLoc());
2638
2639 return CGF.getAsNaturalPointerTo(Addr: Derived, PointeeType: CE->getType()->getPointeeType());
2640 }
2641 case CK_UncheckedDerivedToBase:
2642 case CK_DerivedToBase: {
2643 // The EmitPointerWithAlignment path does this fine; just discard
2644 // the alignment.
2645 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitPointerWithAlignment(Addr: CE),
2646 PointeeType: CE->getType()->getPointeeType());
2647 }
2648
2649 case CK_Dynamic: {
2650 Address V = CGF.EmitPointerWithAlignment(Addr: E);
2651 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(Val: CE);
2652 return CGF.EmitDynamicCast(V, DCE);
2653 }
2654
2655 case CK_ArrayToPointerDecay:
2656 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitArrayToPointerDecay(Array: E),
2657 PointeeType: CE->getType()->getPointeeType());
2658 case CK_FunctionToPointerDecay:
2659 return EmitLValue(E).getPointer(CGF);
2660
2661 case CK_NullToPointer:
2662 if (MustVisitNullValue(E))
2663 CGF.EmitIgnoredExpr(E);
2664
2665 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: ConvertType(T: DestTy)),
2666 QT: DestTy);
2667
2668 case CK_NullToMemberPointer: {
2669 if (MustVisitNullValue(E))
2670 CGF.EmitIgnoredExpr(E);
2671
2672 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2673 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2674 }
2675
2676 case CK_ReinterpretMemberPointer:
2677 case CK_BaseToDerivedMemberPointer:
2678 case CK_DerivedToBaseMemberPointer: {
2679 Value *Src = Visit(E);
2680
2681 // Note that the AST doesn't distinguish between checked and
2682 // unchecked member pointer conversions, so we always have to
2683 // implement checked conversions here. This is inefficient when
2684 // actual control flow may be required in order to perform the
2685 // check, which it is for data member pointers (but not member
2686 // function pointers on Itanium and ARM).
2687 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, E: CE, Src);
2688 }
2689
2690 case CK_ARCProduceObject:
2691 return CGF.EmitARCRetainScalarExpr(expr: E);
2692 case CK_ARCConsumeObject:
2693 return CGF.EmitObjCConsumeObject(T: E->getType(), Ptr: Visit(E));
2694 case CK_ARCReclaimReturnedObject:
2695 return CGF.EmitARCReclaimReturnedObject(e: E, /*allowUnsafe*/ allowUnsafeClaim: Ignored);
2696 case CK_ARCExtendBlockObject:
2697 return CGF.EmitARCExtendBlockObject(expr: E);
2698
2699 case CK_CopyAndAutoreleaseBlockObject:
2700 return CGF.EmitBlockCopyAndAutorelease(Block: Visit(E), Ty: E->getType());
2701
2702 case CK_FloatingRealToComplex:
2703 case CK_FloatingComplexCast:
2704 case CK_IntegralRealToComplex:
2705 case CK_IntegralComplexCast:
2706 case CK_IntegralComplexToFloatingComplex:
2707 case CK_FloatingComplexToIntegralComplex:
2708 case CK_ConstructorConversion:
2709 case CK_ToUnion:
2710 case CK_HLSLArrayRValue:
2711 llvm_unreachable("scalar cast to non-scalar value");
2712
2713 case CK_LValueToRValue:
2714 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2715 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2716 return Visit(E);
2717
2718 case CK_IntegralToPointer: {
2719 Value *Src = Visit(E);
2720
2721 // First, convert to the correct width so that we control the kind of
2722 // extension.
2723 auto DestLLVMTy = ConvertType(T: DestTy);
2724 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2725 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2726 llvm::Value* IntResult =
2727 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
2728
2729 auto *IntToPtr = Builder.CreateIntToPtr(V: IntResult, DestTy: DestLLVMTy);
2730
2731 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2732 // Going from integer to pointer that could be dynamic requires reloading
2733 // dynamic information from invariant.group.
2734 if (DestTy.mayBeDynamicClass())
2735 IntToPtr = Builder.CreateLaunderInvariantGroup(Ptr: IntToPtr);
2736 }
2737
2738 IntToPtr = CGF.authPointerToPointerCast(ResultPtr: IntToPtr, SourceType: E->getType(), DestType: DestTy);
2739 return IntToPtr;
2740 }
2741 case CK_PointerToIntegral: {
2742 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2743 auto *PtrExpr = Visit(E);
2744
2745 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2746 const QualType SrcType = E->getType();
2747
2748 // Casting to integer requires stripping dynamic information as it does
2749 // not carries it.
2750 if (SrcType.mayBeDynamicClass())
2751 PtrExpr = Builder.CreateStripInvariantGroup(Ptr: PtrExpr);
2752 }
2753
2754 PtrExpr = CGF.authPointerToPointerCast(ResultPtr: PtrExpr, SourceType: E->getType(), DestType: DestTy);
2755 return Builder.CreatePtrToInt(V: PtrExpr, DestTy: ConvertType(T: DestTy));
2756 }
2757 case CK_ToVoid: {
2758 CGF.EmitIgnoredExpr(E);
2759 return nullptr;
2760 }
2761 case CK_MatrixCast: {
2762 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2763 Loc: CE->getExprLoc());
2764 }
2765 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2766 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2767 // To perform any necessary Scalar Cast, so this Cast can be handled
2768 // by the regular Vector Splat cast code.
2769 case CK_HLSLAggregateSplatCast:
2770 case CK_VectorSplat: {
2771 llvm::Type *DstTy = ConvertType(T: DestTy);
2772 Value *Elt = Visit(E);
2773 // Splat the element across to all elements
2774 llvm::ElementCount NumElements =
2775 cast<llvm::VectorType>(Val: DstTy)->getElementCount();
2776 return Builder.CreateVectorSplat(EC: NumElements, V: Elt, Name: "splat");
2777 }
2778
2779 case CK_FixedPointCast:
2780 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2781 Loc: CE->getExprLoc());
2782
2783 case CK_FixedPointToBoolean:
2784 assert(E->getType()->isFixedPointType() &&
2785 "Expected src type to be fixed point type");
2786 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2787 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2788 Loc: CE->getExprLoc());
2789
2790 case CK_FixedPointToIntegral:
2791 assert(E->getType()->isFixedPointType() &&
2792 "Expected src type to be fixed point type");
2793 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2794 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2795 Loc: CE->getExprLoc());
2796
2797 case CK_IntegralToFixedPoint:
2798 assert(E->getType()->isIntegerType() &&
2799 "Expected src type to be an integer");
2800 assert(DestTy->isFixedPointType() &&
2801 "Expected dest type to be fixed point type");
2802 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2803 Loc: CE->getExprLoc());
2804
2805 case CK_IntegralCast: {
2806 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2807 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2808 return Builder.CreateIntCast(V: Visit(E), DestTy: ConvertType(T: DestTy),
2809 isSigned: SrcElTy->isSignedIntegerOrEnumerationType(),
2810 Name: "conv");
2811 }
2812 ScalarConversionOpts Opts;
2813 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2814 if (!ICE->isPartOfExplicitCast())
2815 Opts = ScalarConversionOpts(CGF.SanOpts);
2816 }
2817 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2818 Loc: CE->getExprLoc(), Opts);
2819 }
2820 case CK_IntegralToFloating: {
2821 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2822 // TODO: Support constrained FP intrinsics.
2823 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2824 if (SrcElTy->isSignedIntegerOrEnumerationType())
2825 return Builder.CreateSIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2826 return Builder.CreateUIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2827 }
2828 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2829 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2830 Loc: CE->getExprLoc());
2831 }
2832 case CK_FloatingToIntegral: {
2833 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2834 // TODO: Support constrained FP intrinsics.
2835 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2836 if (DstElTy->isSignedIntegerOrEnumerationType())
2837 return Builder.CreateFPToSI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2838 return Builder.CreateFPToUI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2839 }
2840 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2841 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2842 Loc: CE->getExprLoc());
2843 }
2844 case CK_FloatingCast: {
2845 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2846 // TODO: Support constrained FP intrinsics.
2847 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2848 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2849 if (DstElTy->castAs<BuiltinType>()->getKind() <
2850 SrcElTy->castAs<BuiltinType>()->getKind())
2851 return Builder.CreateFPTrunc(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2852 return Builder.CreateFPExt(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2853 }
2854 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2855 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2856 Loc: CE->getExprLoc());
2857 }
2858 case CK_FixedPointToFloating:
2859 case CK_FloatingToFixedPoint: {
2860 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2861 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2862 Loc: CE->getExprLoc());
2863 }
2864 case CK_BooleanToSignedIntegral: {
2865 ScalarConversionOpts Opts;
2866 Opts.TreatBooleanAsSigned = true;
2867 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2868 Loc: CE->getExprLoc(), Opts);
2869 }
2870 case CK_IntegralToBoolean:
2871 return EmitIntToBoolConversion(V: Visit(E));
2872 case CK_PointerToBoolean:
2873 return EmitPointerToBoolConversion(V: Visit(E), QT: E->getType());
2874 case CK_FloatingToBoolean: {
2875 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2876 return EmitFloatToBoolConversion(V: Visit(E));
2877 }
2878 case CK_MemberPointerToBoolean: {
2879 llvm::Value *MemPtr = Visit(E);
2880 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2881 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2882 }
2883
2884 case CK_FloatingComplexToReal:
2885 case CK_IntegralComplexToReal:
2886 return CGF.EmitComplexExpr(E, IgnoreReal: false, IgnoreImag: true).first;
2887
2888 case CK_FloatingComplexToBoolean:
2889 case CK_IntegralComplexToBoolean: {
2890 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2891
2892 // TODO: kill this function off, inline appropriate case here
2893 return EmitComplexToScalarConversion(Src: V, SrcTy: E->getType(), DstTy: DestTy,
2894 Loc: CE->getExprLoc());
2895 }
2896
2897 case CK_ZeroToOCLOpaqueType: {
2898 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2899 DestTy->isOCLIntelSubgroupAVCType()) &&
2900 "CK_ZeroToOCLEvent cast on non-event type");
2901 return llvm::Constant::getNullValue(Ty: ConvertType(T: DestTy));
2902 }
2903
2904 case CK_IntToOCLSampler:
2905 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2906
2907 case CK_HLSLVectorTruncation: {
2908 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
2909 "Destination type must be a vector or builtin type.");
2910 Value *Vec = Visit(E);
2911 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2912 SmallVector<int> Mask;
2913 unsigned NumElts = VecTy->getNumElements();
2914 for (unsigned I = 0; I != NumElts; ++I)
2915 Mask.push_back(Elt: I);
2916
2917 return Builder.CreateShuffleVector(V: Vec, Mask, Name: "trunc");
2918 }
2919 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.SizeTy);
2920 return Builder.CreateExtractElement(Vec, Idx: Zero, Name: "cast.vtrunc");
2921 }
2922 case CK_HLSLElementwiseCast: {
2923 RValue RV = CGF.EmitAnyExpr(E);
2924 SourceLocation Loc = CE->getExprLoc();
2925 QualType SrcTy = E->getType();
2926
2927 assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast.");
2928 // RHS is an aggregate
2929 Address SrcVal = RV.getAggregateAddress();
2930 return EmitHLSLElementwiseCast(CGF, RHSVal: SrcVal, RHSTy: SrcTy, LHSTy: DestTy, Loc);
2931 }
2932 } // end of switch
2933
2934 llvm_unreachable("unknown scalar cast");
2935}
2936
2937Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2938 CodeGenFunction::StmtExprEvaluation eval(CGF);
2939 Address RetAlloca = CGF.EmitCompoundStmt(S: *E->getSubStmt(),
2940 GetLast: !E->getType()->isVoidType());
2941 if (!RetAlloca.isValid())
2942 return nullptr;
2943 return CGF.EmitLoadOfScalar(lvalue: CGF.MakeAddrLValue(Addr: RetAlloca, T: E->getType()),
2944 Loc: E->getExprLoc());
2945}
2946
2947Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2948 CodeGenFunction::RunCleanupsScope Scope(CGF);
2949 Value *V = Visit(E: E->getSubExpr());
2950 // Defend against dominance problems caused by jumps out of expression
2951 // evaluation through the shared cleanup block.
2952 Scope.ForceCleanup(ValuesToReload: {&V});
2953 return V;
2954}
2955
2956//===----------------------------------------------------------------------===//
2957// Unary Operators
2958//===----------------------------------------------------------------------===//
2959
2960static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2961 llvm::Value *InVal, bool IsInc,
2962 FPOptions FPFeatures) {
2963 BinOpInfo BinOp;
2964 BinOp.LHS = InVal;
2965 BinOp.RHS = llvm::ConstantInt::get(Ty: InVal->getType(), V: 1, IsSigned: false);
2966 BinOp.Ty = E->getType();
2967 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2968 BinOp.FPFeatures = FPFeatures;
2969 BinOp.E = E;
2970 return BinOp;
2971}
2972
2973llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2974 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2975 llvm::Value *Amount =
2976 llvm::ConstantInt::get(Ty: InVal->getType(), V: IsInc ? 1 : -1, IsSigned: true);
2977 StringRef Name = IsInc ? "inc" : "dec";
2978 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2979 case LangOptions::SOB_Defined:
2980 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2981 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
2982 [[fallthrough]];
2983 case LangOptions::SOB_Undefined:
2984 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2985 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2986 [[fallthrough]];
2987 case LangOptions::SOB_Trapping:
2988 BinOpInfo Info = createBinOpInfoFromIncDec(
2989 E, InVal, IsInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
2990 if (!E->canOverflow() || CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Info))
2991 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2992 return EmitOverflowCheckedBinOp(Ops: Info);
2993 }
2994 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2995}
2996
2997/// For the purposes of overflow pattern exclusion, does this match the
2998/// "while(i--)" pattern?
2999static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3000 bool isPre, ASTContext &Ctx) {
3001 if (isInc || isPre)
3002 return false;
3003
3004 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3005 if (!Ctx.getLangOpts().isOverflowPatternExcluded(
3006 Kind: LangOptions::OverflowPatternExclusionKind::PostDecrInWhile))
3007 return false;
3008
3009 // all Parents (usually just one) must be a WhileStmt
3010 for (const auto &Parent : Ctx.getParentMapContext().getParents(Node: *UO))
3011 if (!Parent.get<WhileStmt>())
3012 return false;
3013
3014 return true;
3015}
3016
3017namespace {
3018/// Handles check and update for lastprivate conditional variables.
3019class OMPLastprivateConditionalUpdateRAII {
3020private:
3021 CodeGenFunction &CGF;
3022 const UnaryOperator *E;
3023
3024public:
3025 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3026 const UnaryOperator *E)
3027 : CGF(CGF), E(E) {}
3028 ~OMPLastprivateConditionalUpdateRAII() {
3029 if (CGF.getLangOpts().OpenMP)
3030 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
3031 CGF, LHS: E->getSubExpr());
3032 }
3033};
3034} // namespace
3035
3036llvm::Value *
3037ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3038 bool isInc, bool isPre) {
3039 ApplyAtomGroup Grp(CGF.getDebugInfo());
3040 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3041 QualType type = E->getSubExpr()->getType();
3042 llvm::PHINode *atomicPHI = nullptr;
3043 llvm::Value *value;
3044 llvm::Value *input;
3045 llvm::Value *Previous = nullptr;
3046 QualType SrcType = E->getType();
3047
3048 int amount = (isInc ? 1 : -1);
3049 bool isSubtraction = !isInc;
3050
3051 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3052 type = atomicTy->getValueType();
3053 if (isInc && type->isBooleanType()) {
3054 llvm::Value *True = CGF.EmitToMemory(Value: Builder.getTrue(), Ty: type);
3055 if (isPre) {
3056 Builder.CreateStore(Val: True, Addr: LV.getAddress(), IsVolatile: LV.isVolatileQualified())
3057 ->setAtomic(Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3058 return Builder.getTrue();
3059 }
3060 // For atomic bool increment, we just store true and return it for
3061 // preincrement, do an atomic swap with true for postincrement
3062 return Builder.CreateAtomicRMW(
3063 Op: llvm::AtomicRMWInst::Xchg, Addr: LV.getAddress(), Val: True,
3064 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3065 }
3066 // Special case for atomic increment / decrement on integers, emit
3067 // atomicrmw instructions. We skip this if we want to be doing overflow
3068 // checking, and fall into the slow path with the atomic cmpxchg loop.
3069 if (!type->isBooleanType() && type->isIntegerType() &&
3070 !(type->isUnsignedIntegerType() &&
3071 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3072 CGF.getLangOpts().getSignedOverflowBehavior() !=
3073 LangOptions::SOB_Trapping) {
3074 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3075 llvm::AtomicRMWInst::Sub;
3076 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3077 llvm::Instruction::Sub;
3078 llvm::Value *amt = CGF.EmitToMemory(
3079 Value: llvm::ConstantInt::get(Ty: ConvertType(T: type), V: 1, IsSigned: true), Ty: type);
3080 llvm::Value *old =
3081 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(), Val: amt,
3082 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3083 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3084 }
3085 // Special case for atomic increment/decrement on floats.
3086 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3087 if (type->isFloatingType()) {
3088 llvm::Type *Ty = ConvertType(T: type);
3089 if (llvm::has_single_bit(Value: Ty->getScalarSizeInBits())) {
3090 llvm::AtomicRMWInst::BinOp aop =
3091 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3092 llvm::Instruction::BinaryOps op =
3093 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3094 llvm::Value *amt = llvm::ConstantFP::get(Ty, V: 1.0);
3095 llvm::AtomicRMWInst *old =
3096 CGF.emitAtomicRMWInst(Op: aop, Addr: LV.getAddress(), Val: amt,
3097 Order: llvm::AtomicOrdering::SequentiallyConsistent);
3098
3099 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3100 }
3101 }
3102 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3103 input = value;
3104 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3105 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3106 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3107 value = CGF.EmitToMemory(Value: value, Ty: type);
3108 Builder.CreateBr(Dest: opBB);
3109 Builder.SetInsertPoint(opBB);
3110 atomicPHI = Builder.CreatePHI(Ty: value->getType(), NumReservedValues: 2);
3111 atomicPHI->addIncoming(V: value, BB: startBB);
3112 value = atomicPHI;
3113 } else {
3114 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3115 input = value;
3116 }
3117
3118 // Special case of integer increment that we have to check first: bool++.
3119 // Due to promotion rules, we get:
3120 // bool++ -> bool = bool + 1
3121 // -> bool = (int)bool + 1
3122 // -> bool = ((int)bool + 1 != 0)
3123 // An interesting aspect of this is that increment is always true.
3124 // Decrement does not have this property.
3125 if (isInc && type->isBooleanType()) {
3126 value = Builder.getTrue();
3127
3128 // Most common case by far: integer increment.
3129 } else if (type->isIntegerType()) {
3130 QualType promotedType;
3131 bool canPerformLossyDemotionCheck = false;
3132
3133 bool excludeOverflowPattern =
3134 matchesPostDecrInWhile(UO: E, isInc, isPre, Ctx&: CGF.getContext());
3135
3136 if (CGF.getContext().isPromotableIntegerType(T: type)) {
3137 promotedType = CGF.getContext().getPromotedIntegerType(PromotableType: type);
3138 assert(promotedType != type && "Shouldn't promote to the same type.");
3139 canPerformLossyDemotionCheck = true;
3140 canPerformLossyDemotionCheck &=
3141 CGF.getContext().getCanonicalType(T: type) !=
3142 CGF.getContext().getCanonicalType(T: promotedType);
3143 canPerformLossyDemotionCheck &=
3144 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
3145 SrcType: type, DstType: promotedType);
3146 assert((!canPerformLossyDemotionCheck ||
3147 type->isSignedIntegerOrEnumerationType() ||
3148 promotedType->isSignedIntegerOrEnumerationType() ||
3149 ConvertType(type)->getScalarSizeInBits() ==
3150 ConvertType(promotedType)->getScalarSizeInBits()) &&
3151 "The following check expects that if we do promotion to different "
3152 "underlying canonical type, at least one of the types (either "
3153 "base or promoted) will be signed, or the bitwidths will match.");
3154 }
3155 if (CGF.SanOpts.hasOneOf(
3156 K: SanitizerKind::ImplicitIntegerArithmeticValueChange |
3157 SanitizerKind::ImplicitBitfieldConversion) &&
3158 canPerformLossyDemotionCheck) {
3159 // While `x += 1` (for `x` with width less than int) is modeled as
3160 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3161 // ease; inc/dec with width less than int can't overflow because of
3162 // promotion rules, so we omit promotion+demotion, which means that we can
3163 // not catch lossy "demotion". Because we still want to catch these cases
3164 // when the sanitizer is enabled, we perform the promotion, then perform
3165 // the increment/decrement in the wider type, and finally
3166 // perform the demotion. This will catch lossy demotions.
3167
3168 // We have a special case for bitfields defined using all the bits of the
3169 // type. In this case we need to do the same trick as for the integer
3170 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3171
3172 value = EmitScalarConversion(Src: value, SrcType: type, DstType: promotedType, Loc: E->getExprLoc());
3173 Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
3174 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3175 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3176 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3177 // checks will take care of the conversion.
3178 ScalarConversionOpts Opts;
3179 if (!LV.isBitField())
3180 Opts = ScalarConversionOpts(CGF.SanOpts);
3181 else if (CGF.SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) {
3182 Previous = value;
3183 SrcType = promotedType;
3184 }
3185
3186 value = EmitScalarConversion(Src: value, SrcType: promotedType, DstType: type, Loc: E->getExprLoc(),
3187 Opts);
3188
3189 // Note that signed integer inc/dec with width less than int can't
3190 // overflow because of promotion rules; we're just eliding a few steps
3191 // here.
3192 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3193 value = EmitIncDecConsiderOverflowBehavior(E, InVal: value, IsInc: isInc);
3194 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3195 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
3196 !excludeOverflowPattern &&
3197 !CGF.getContext().isTypeIgnoredBySanitizer(
3198 Mask: SanitizerKind::UnsignedIntegerOverflow, Ty: E->getType())) {
3199 value = EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
3200 E, InVal: value, IsInc: isInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
3201 } else {
3202 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
3203 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3204 }
3205
3206 // Next most common: pointer increment.
3207 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3208 QualType type = ptr->getPointeeType();
3209
3210 // VLA types don't have constant size.
3211 if (const VariableArrayType *vla
3212 = CGF.getContext().getAsVariableArrayType(T: type)) {
3213 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3214 if (!isInc) numElts = Builder.CreateNSWNeg(V: numElts, Name: "vla.negsize");
3215 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
3216 if (CGF.getLangOpts().PointerOverflowDefined)
3217 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: numElts, Name: "vla.inc");
3218 else
3219 value = CGF.EmitCheckedInBoundsGEP(
3220 ElemTy: elemTy, Ptr: value, IdxList: numElts, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3221 Loc: E->getExprLoc(), Name: "vla.inc");
3222
3223 // Arithmetic on function pointers (!) is just +-1.
3224 } else if (type->isFunctionType()) {
3225 llvm::Value *amt = Builder.getInt32(C: amount);
3226
3227 if (CGF.getLangOpts().PointerOverflowDefined)
3228 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: amt, Name: "incdec.funcptr");
3229 else
3230 value =
3231 CGF.EmitCheckedInBoundsGEP(ElemTy: CGF.Int8Ty, Ptr: value, IdxList: amt,
3232 /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3233 Loc: E->getExprLoc(), Name: "incdec.funcptr");
3234
3235 // For everything else, we can just do a simple increment.
3236 } else {
3237 llvm::Value *amt = Builder.getInt32(C: amount);
3238 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
3239 if (CGF.getLangOpts().PointerOverflowDefined)
3240 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: amt, Name: "incdec.ptr");
3241 else
3242 value = CGF.EmitCheckedInBoundsGEP(
3243 ElemTy: elemTy, Ptr: value, IdxList: amt, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3244 Loc: E->getExprLoc(), Name: "incdec.ptr");
3245 }
3246
3247 // Vector increment/decrement.
3248 } else if (type->isVectorType()) {
3249 if (type->hasIntegerRepresentation()) {
3250 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount);
3251
3252 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3253 } else {
3254 value = Builder.CreateFAdd(
3255 L: value,
3256 R: llvm::ConstantFP::get(Ty: value->getType(), V: amount),
3257 Name: isInc ? "inc" : "dec");
3258 }
3259
3260 // Floating point.
3261 } else if (type->isRealFloatingType()) {
3262 // Add the inc/dec to the real part.
3263 llvm::Value *amt;
3264 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3265
3266 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3267 // Another special case: half FP increment should be done via float
3268 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3269 value = Builder.CreateCall(
3270 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16,
3271 Tys: CGF.CGM.FloatTy),
3272 Args: input, Name: "incdec.conv");
3273 } else {
3274 value = Builder.CreateFPExt(V: input, DestTy: CGF.CGM.FloatTy, Name: "incdec.conv");
3275 }
3276 }
3277
3278 if (value->getType()->isFloatTy())
3279 amt = llvm::ConstantFP::get(Context&: VMContext,
3280 V: llvm::APFloat(static_cast<float>(amount)));
3281 else if (value->getType()->isDoubleTy())
3282 amt = llvm::ConstantFP::get(Context&: VMContext,
3283 V: llvm::APFloat(static_cast<double>(amount)));
3284 else {
3285 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3286 // Convert from float.
3287 llvm::APFloat F(static_cast<float>(amount));
3288 bool ignored;
3289 const llvm::fltSemantics *FS;
3290 // Don't use getFloatTypeSemantics because Half isn't
3291 // necessarily represented using the "half" LLVM type.
3292 if (value->getType()->isFP128Ty())
3293 FS = &CGF.getTarget().getFloat128Format();
3294 else if (value->getType()->isHalfTy())
3295 FS = &CGF.getTarget().getHalfFormat();
3296 else if (value->getType()->isBFloatTy())
3297 FS = &CGF.getTarget().getBFloat16Format();
3298 else if (value->getType()->isPPC_FP128Ty())
3299 FS = &CGF.getTarget().getIbm128Format();
3300 else
3301 FS = &CGF.getTarget().getLongDoubleFormat();
3302 F.convert(ToSemantics: *FS, RM: llvm::APFloat::rmTowardZero, losesInfo: &ignored);
3303 amt = llvm::ConstantFP::get(Context&: VMContext, V: F);
3304 }
3305 value = Builder.CreateFAdd(L: value, R: amt, Name: isInc ? "inc" : "dec");
3306
3307 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3308 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3309 value = Builder.CreateCall(
3310 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16,
3311 Tys: CGF.CGM.FloatTy),
3312 Args: value, Name: "incdec.conv");
3313 } else {
3314 value = Builder.CreateFPTrunc(V: value, DestTy: input->getType(), Name: "incdec.conv");
3315 }
3316 }
3317
3318 // Fixed-point types.
3319 } else if (type->isFixedPointType()) {
3320 // Fixed-point types are tricky. In some cases, it isn't possible to
3321 // represent a 1 or a -1 in the type at all. Piggyback off of
3322 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3323 BinOpInfo Info;
3324 Info.E = E;
3325 Info.Ty = E->getType();
3326 Info.Opcode = isInc ? BO_Add : BO_Sub;
3327 Info.LHS = value;
3328 Info.RHS = llvm::ConstantInt::get(Ty: value->getType(), V: 1, IsSigned: false);
3329 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3330 // since -1 is guaranteed to be representable.
3331 if (type->isSignedFixedPointType()) {
3332 Info.Opcode = isInc ? BO_Sub : BO_Add;
3333 Info.RHS = Builder.CreateNeg(V: Info.RHS);
3334 }
3335 // Now, convert from our invented integer literal to the type of the unary
3336 // op. This will upscale and saturate if necessary. This value can become
3337 // undef in some cases.
3338 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3339 auto DstSema = CGF.getContext().getFixedPointSemantics(Ty: Info.Ty);
3340 Info.RHS = FPBuilder.CreateIntegerToFixed(Src: Info.RHS, SrcIsSigned: true, DstSema);
3341 value = EmitFixedPointBinOp(Ops: Info);
3342
3343 // Objective-C pointer types.
3344 } else {
3345 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3346
3347 CharUnits size = CGF.getContext().getTypeSizeInChars(T: OPT->getObjectType());
3348 if (!isInc) size = -size;
3349 llvm::Value *sizeValue =
3350 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: size.getQuantity());
3351
3352 if (CGF.getLangOpts().PointerOverflowDefined)
3353 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, Name: "incdec.objptr");
3354 else
3355 value = CGF.EmitCheckedInBoundsGEP(
3356 ElemTy: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3357 Loc: E->getExprLoc(), Name: "incdec.objptr");
3358 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
3359 }
3360
3361 if (atomicPHI) {
3362 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3363 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3364 auto Pair = CGF.EmitAtomicCompareExchange(
3365 Obj: LV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: value), Loc: E->getExprLoc());
3366 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: type);
3367 llvm::Value *success = Pair.second;
3368 atomicPHI->addIncoming(V: old, BB: curBlock);
3369 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3370 Builder.SetInsertPoint(contBB);
3371 return isPre ? value : input;
3372 }
3373
3374 // Store the updated result through the lvalue.
3375 if (LV.isBitField()) {
3376 Value *Src = Previous ? Previous : value;
3377 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: value), Dst: LV, Result: &value);
3378 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: value, DstType: E->getType(),
3379 Info: LV.getBitFieldInfo(), Loc: E->getExprLoc());
3380 } else
3381 CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV);
3382
3383 // If this is a postinc, return the value read from memory, otherwise use the
3384 // updated value.
3385 return isPre ? value : input;
3386}
3387
3388
3389Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3390 QualType PromotionType) {
3391 QualType promotionTy = PromotionType.isNull()
3392 ? getPromotionType(Ty: E->getSubExpr()->getType())
3393 : PromotionType;
3394 Value *result = VisitPlus(E, PromotionType: promotionTy);
3395 if (result && !promotionTy.isNull())
3396 result = EmitUnPromotedValue(result, ExprType: E->getType());
3397 return result;
3398}
3399
3400Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3401 QualType PromotionType) {
3402 // This differs from gcc, though, most likely due to a bug in gcc.
3403 TestAndClearIgnoreResultAssign();
3404 if (!PromotionType.isNull())
3405 return CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3406 return Visit(E: E->getSubExpr());
3407}
3408
3409Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3410 QualType PromotionType) {
3411 QualType promotionTy = PromotionType.isNull()
3412 ? getPromotionType(Ty: E->getSubExpr()->getType())
3413 : PromotionType;
3414 Value *result = VisitMinus(E, PromotionType: promotionTy);
3415 if (result && !promotionTy.isNull())
3416 result = EmitUnPromotedValue(result, ExprType: E->getType());
3417 return result;
3418}
3419
3420Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3421 QualType PromotionType) {
3422 TestAndClearIgnoreResultAssign();
3423 Value *Op;
3424 if (!PromotionType.isNull())
3425 Op = CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3426 else
3427 Op = Visit(E: E->getSubExpr());
3428
3429 // Generate a unary FNeg for FP ops.
3430 if (Op->getType()->isFPOrFPVectorTy())
3431 return Builder.CreateFNeg(V: Op, Name: "fneg");
3432
3433 // Emit unary minus with EmitSub so we handle overflow cases etc.
3434 BinOpInfo BinOp;
3435 BinOp.RHS = Op;
3436 BinOp.LHS = llvm::Constant::getNullValue(Ty: BinOp.RHS->getType());
3437 BinOp.Ty = E->getType();
3438 BinOp.Opcode = BO_Sub;
3439 BinOp.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3440 BinOp.E = E;
3441 return EmitSub(Ops: BinOp);
3442}
3443
3444Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3445 TestAndClearIgnoreResultAssign();
3446 Value *Op = Visit(E: E->getSubExpr());
3447 return Builder.CreateNot(V: Op, Name: "not");
3448}
3449
3450Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3451 // Perform vector logical not on comparison with zero vector.
3452 if (E->getType()->isVectorType() &&
3453 E->getType()->castAs<VectorType>()->getVectorKind() ==
3454 VectorKind::Generic) {
3455 Value *Oper = Visit(E: E->getSubExpr());
3456 Value *Zero = llvm::Constant::getNullValue(Ty: Oper->getType());
3457 Value *Result;
3458 if (Oper->getType()->isFPOrFPVectorTy()) {
3459 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3460 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
3461 Result = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_OEQ, LHS: Oper, RHS: Zero, Name: "cmp");
3462 } else
3463 Result = Builder.CreateICmp(P: llvm::CmpInst::ICMP_EQ, LHS: Oper, RHS: Zero, Name: "cmp");
3464 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
3465 }
3466
3467 // Compare operand to zero.
3468 Value *BoolVal = CGF.EvaluateExprAsBool(E: E->getSubExpr());
3469
3470 // Invert value.
3471 // TODO: Could dynamically modify easy computations here. For example, if
3472 // the operand is an icmp ne, turn into icmp eq.
3473 BoolVal = Builder.CreateNot(V: BoolVal, Name: "lnot");
3474
3475 // ZExt result to the expr type.
3476 return Builder.CreateZExt(V: BoolVal, DestTy: ConvertType(T: E->getType()), Name: "lnot.ext");
3477}
3478
3479Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3480 // Try folding the offsetof to a constant.
3481 Expr::EvalResult EVResult;
3482 if (E->EvaluateAsInt(Result&: EVResult, Ctx: CGF.getContext())) {
3483 llvm::APSInt Value = EVResult.Val.getInt();
3484 return Builder.getInt(AI: Value);
3485 }
3486
3487 // Loop over the components of the offsetof to compute the value.
3488 unsigned n = E->getNumComponents();
3489 llvm::Type* ResultType = ConvertType(T: E->getType());
3490 llvm::Value* Result = llvm::Constant::getNullValue(Ty: ResultType);
3491 QualType CurrentType = E->getTypeSourceInfo()->getType();
3492 for (unsigned i = 0; i != n; ++i) {
3493 OffsetOfNode ON = E->getComponent(Idx: i);
3494 llvm::Value *Offset = nullptr;
3495 switch (ON.getKind()) {
3496 case OffsetOfNode::Array: {
3497 // Compute the index
3498 Expr *IdxExpr = E->getIndexExpr(Idx: ON.getArrayExprIndex());
3499 llvm::Value* Idx = CGF.EmitScalarExpr(E: IdxExpr);
3500 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3501 Idx = Builder.CreateIntCast(V: Idx, DestTy: ResultType, isSigned: IdxSigned, Name: "conv");
3502
3503 // Save the element type
3504 CurrentType =
3505 CGF.getContext().getAsArrayType(T: CurrentType)->getElementType();
3506
3507 // Compute the element size
3508 llvm::Value* ElemSize = llvm::ConstantInt::get(Ty: ResultType,
3509 V: CGF.getContext().getTypeSizeInChars(T: CurrentType).getQuantity());
3510
3511 // Multiply out to compute the result
3512 Offset = Builder.CreateMul(LHS: Idx, RHS: ElemSize);
3513 break;
3514 }
3515
3516 case OffsetOfNode::Field: {
3517 FieldDecl *MemberDecl = ON.getField();
3518 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3519 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3520
3521 // Compute the index of the field in its parent.
3522 unsigned i = 0;
3523 // FIXME: It would be nice if we didn't have to loop here!
3524 for (RecordDecl::field_iterator Field = RD->field_begin(),
3525 FieldEnd = RD->field_end();
3526 Field != FieldEnd; ++Field, ++i) {
3527 if (*Field == MemberDecl)
3528 break;
3529 }
3530 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3531
3532 // Compute the offset to the field
3533 int64_t OffsetInt = RL.getFieldOffset(FieldNo: i) /
3534 CGF.getContext().getCharWidth();
3535 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt);
3536
3537 // Save the element type.
3538 CurrentType = MemberDecl->getType();
3539 break;
3540 }
3541
3542 case OffsetOfNode::Identifier:
3543 llvm_unreachable("dependent __builtin_offsetof");
3544
3545 case OffsetOfNode::Base: {
3546 if (ON.getBase()->isVirtual()) {
3547 CGF.ErrorUnsupported(S: E, Type: "virtual base in offsetof");
3548 continue;
3549 }
3550
3551 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3552 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3553
3554 // Save the element type.
3555 CurrentType = ON.getBase()->getType();
3556
3557 // Compute the offset to the base.
3558 auto *BaseRT = CurrentType->castAs<RecordType>();
3559 auto *BaseRD = cast<CXXRecordDecl>(Val: BaseRT->getDecl());
3560 CharUnits OffsetInt = RL.getBaseClassOffset(Base: BaseRD);
3561 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt.getQuantity());
3562 break;
3563 }
3564 }
3565 Result = Builder.CreateAdd(LHS: Result, RHS: Offset);
3566 }
3567 return Result;
3568}
3569
3570/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3571/// argument of the sizeof expression as an integer.
3572Value *
3573ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3574 const UnaryExprOrTypeTraitExpr *E) {
3575 QualType TypeToSize = E->getTypeOfArgument();
3576 if (auto Kind = E->getKind();
3577 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3578 if (const VariableArrayType *VAT =
3579 CGF.getContext().getAsVariableArrayType(T: TypeToSize)) {
3580 // For _Countof, we only want to evaluate if the extent is actually
3581 // variable as opposed to a multi-dimensional array whose extent is
3582 // constant but whose element type is variable.
3583 bool EvaluateExtent = true;
3584 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3585 EvaluateExtent =
3586 !VAT->getSizeExpr()->isIntegerConstantExpr(Ctx: CGF.getContext());
3587 }
3588 if (EvaluateExtent) {
3589 if (E->isArgumentType()) {
3590 // sizeof(type) - make sure to emit the VLA size.
3591 CGF.EmitVariablyModifiedType(Ty: TypeToSize);
3592 } else {
3593 // C99 6.5.3.4p2: If the argument is an expression of type
3594 // VLA, it is evaluated.
3595 CGF.EmitIgnoredExpr(E: E->getArgumentExpr());
3596 }
3597
3598 // For _Countof, we just want to return the size of a single dimension.
3599 if (Kind == UETT_CountOf)
3600 return CGF.getVLAElements1D(vla: VAT).NumElts;
3601
3602 // For sizeof and __datasizeof, we need to scale the number of elements
3603 // by the size of the array element type.
3604 auto VlaSize = CGF.getVLASize(vla: VAT);
3605
3606 // Scale the number of non-VLA elements by the non-VLA element size.
3607 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: VlaSize.Type);
3608 if (!eltSize.isOne())
3609 return CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize),
3610 RHS: VlaSize.NumElts);
3611 return VlaSize.NumElts;
3612 }
3613 }
3614 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3615 auto Alignment =
3616 CGF.getContext()
3617 .toCharUnitsFromBits(BitSize: CGF.getContext().getOpenMPDefaultSimdAlign(
3618 T: E->getTypeOfArgument()->getPointeeType()))
3619 .getQuantity();
3620 return llvm::ConstantInt::get(Ty: CGF.SizeTy, V: Alignment);
3621 } else if (E->getKind() == UETT_VectorElements) {
3622 auto *VecTy = cast<llvm::VectorType>(Val: ConvertType(T: E->getTypeOfArgument()));
3623 return Builder.CreateElementCount(Ty: CGF.SizeTy, EC: VecTy->getElementCount());
3624 }
3625
3626 // If this isn't sizeof(vla), the result must be constant; use the constant
3627 // folding logic so we don't have to duplicate it here.
3628 return Builder.getInt(AI: E->EvaluateKnownConstInt(Ctx: CGF.getContext()));
3629}
3630
3631Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3632 QualType PromotionType) {
3633 QualType promotionTy = PromotionType.isNull()
3634 ? getPromotionType(Ty: E->getSubExpr()->getType())
3635 : PromotionType;
3636 Value *result = VisitReal(E, PromotionType: promotionTy);
3637 if (result && !promotionTy.isNull())
3638 result = EmitUnPromotedValue(result, ExprType: E->getType());
3639 return result;
3640}
3641
3642Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3643 QualType PromotionType) {
3644 Expr *Op = E->getSubExpr();
3645 if (Op->getType()->isAnyComplexType()) {
3646 // If it's an l-value, load through the appropriate subobject l-value.
3647 // Note that we have to ask E because Op might be an l-value that
3648 // this won't work for, e.g. an Obj-C property.
3649 if (E->isGLValue()) {
3650 if (!PromotionType.isNull()) {
3651 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3652 E: Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3653 if (result.first)
3654 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3655 return result.first;
3656 } else {
3657 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3658 .getScalarVal();
3659 }
3660 }
3661 // Otherwise, calculate and project.
3662 return CGF.EmitComplexExpr(E: Op, IgnoreReal: false, IgnoreImag: true).first;
3663 }
3664
3665 if (!PromotionType.isNull())
3666 return CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3667 return Visit(E: Op);
3668}
3669
3670Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3671 QualType PromotionType) {
3672 QualType promotionTy = PromotionType.isNull()
3673 ? getPromotionType(Ty: E->getSubExpr()->getType())
3674 : PromotionType;
3675 Value *result = VisitImag(E, PromotionType: promotionTy);
3676 if (result && !promotionTy.isNull())
3677 result = EmitUnPromotedValue(result, ExprType: E->getType());
3678 return result;
3679}
3680
3681Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3682 QualType PromotionType) {
3683 Expr *Op = E->getSubExpr();
3684 if (Op->getType()->isAnyComplexType()) {
3685 // If it's an l-value, load through the appropriate subobject l-value.
3686 // Note that we have to ask E because Op might be an l-value that
3687 // this won't work for, e.g. an Obj-C property.
3688 if (Op->isGLValue()) {
3689 if (!PromotionType.isNull()) {
3690 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3691 E: Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3692 if (result.second)
3693 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3694 return result.second;
3695 } else {
3696 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3697 .getScalarVal();
3698 }
3699 }
3700 // Otherwise, calculate and project.
3701 return CGF.EmitComplexExpr(E: Op, IgnoreReal: true, IgnoreImag: false).second;
3702 }
3703
3704 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3705 // effects are evaluated, but not the actual value.
3706 if (Op->isGLValue())
3707 CGF.EmitLValue(E: Op);
3708 else if (!PromotionType.isNull())
3709 CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3710 else
3711 CGF.EmitScalarExpr(E: Op, IgnoreResultAssign: true);
3712 if (!PromotionType.isNull())
3713 return llvm::Constant::getNullValue(Ty: ConvertType(T: PromotionType));
3714 return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType()));
3715}
3716
3717//===----------------------------------------------------------------------===//
3718// Binary Operators
3719//===----------------------------------------------------------------------===//
3720
3721Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3722 QualType PromotionType) {
3723 return CGF.Builder.CreateFPExt(V: result, DestTy: ConvertType(T: PromotionType), Name: "ext");
3724}
3725
3726Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3727 QualType ExprType) {
3728 return CGF.Builder.CreateFPTrunc(V: result, DestTy: ConvertType(T: ExprType), Name: "unpromotion");
3729}
3730
3731Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3732 E = E->IgnoreParens();
3733 if (auto BO = dyn_cast<BinaryOperator>(Val: E)) {
3734 switch (BO->getOpcode()) {
3735#define HANDLE_BINOP(OP) \
3736 case BO_##OP: \
3737 return Emit##OP(EmitBinOps(BO, PromotionType));
3738 HANDLE_BINOP(Add)
3739 HANDLE_BINOP(Sub)
3740 HANDLE_BINOP(Mul)
3741 HANDLE_BINOP(Div)
3742#undef HANDLE_BINOP
3743 default:
3744 break;
3745 }
3746 } else if (auto UO = dyn_cast<UnaryOperator>(Val: E)) {
3747 switch (UO->getOpcode()) {
3748 case UO_Imag:
3749 return VisitImag(E: UO, PromotionType);
3750 case UO_Real:
3751 return VisitReal(E: UO, PromotionType);
3752 case UO_Minus:
3753 return VisitMinus(E: UO, PromotionType);
3754 case UO_Plus:
3755 return VisitPlus(E: UO, PromotionType);
3756 default:
3757 break;
3758 }
3759 }
3760 auto result = Visit(E: const_cast<Expr *>(E));
3761 if (result) {
3762 if (!PromotionType.isNull())
3763 return EmitPromotedValue(result, PromotionType);
3764 else
3765 return EmitUnPromotedValue(result, ExprType: E->getType());
3766 }
3767 return result;
3768}
3769
3770BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3771 QualType PromotionType) {
3772 TestAndClearIgnoreResultAssign();
3773 BinOpInfo Result;
3774 Result.LHS = CGF.EmitPromotedScalarExpr(E: E->getLHS(), PromotionType);
3775 Result.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType);
3776 if (!PromotionType.isNull())
3777 Result.Ty = PromotionType;
3778 else
3779 Result.Ty = E->getType();
3780 Result.Opcode = E->getOpcode();
3781 Result.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3782 Result.E = E;
3783 return Result;
3784}
3785
3786LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3787 const CompoundAssignOperator *E,
3788 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3789 Value *&Result) {
3790 QualType LHSTy = E->getLHS()->getType();
3791 BinOpInfo OpInfo;
3792
3793 if (E->getComputationResultType()->isAnyComplexType())
3794 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3795
3796 // Emit the RHS first. __block variables need to have the rhs evaluated
3797 // first, plus this should improve codegen a little.
3798
3799 QualType PromotionTypeCR;
3800 PromotionTypeCR = getPromotionType(Ty: E->getComputationResultType());
3801 if (PromotionTypeCR.isNull())
3802 PromotionTypeCR = E->getComputationResultType();
3803 QualType PromotionTypeLHS = getPromotionType(Ty: E->getComputationLHSType());
3804 QualType PromotionTypeRHS = getPromotionType(Ty: E->getRHS()->getType());
3805 if (!PromotionTypeRHS.isNull())
3806 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType: PromotionTypeRHS);
3807 else
3808 OpInfo.RHS = Visit(E: E->getRHS());
3809 OpInfo.Ty = PromotionTypeCR;
3810 OpInfo.Opcode = E->getOpcode();
3811 OpInfo.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3812 OpInfo.E = E;
3813 // Load/convert the LHS.
3814 LValue LHSLV = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
3815
3816 llvm::PHINode *atomicPHI = nullptr;
3817 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3818 QualType type = atomicTy->getValueType();
3819 if (!type->isBooleanType() && type->isIntegerType() &&
3820 !(type->isUnsignedIntegerType() &&
3821 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3822 CGF.getLangOpts().getSignedOverflowBehavior() !=
3823 LangOptions::SOB_Trapping) {
3824 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3825 llvm::Instruction::BinaryOps Op;
3826 switch (OpInfo.Opcode) {
3827 // We don't have atomicrmw operands for *, %, /, <<, >>
3828 case BO_MulAssign: case BO_DivAssign:
3829 case BO_RemAssign:
3830 case BO_ShlAssign:
3831 case BO_ShrAssign:
3832 break;
3833 case BO_AddAssign:
3834 AtomicOp = llvm::AtomicRMWInst::Add;
3835 Op = llvm::Instruction::Add;
3836 break;
3837 case BO_SubAssign:
3838 AtomicOp = llvm::AtomicRMWInst::Sub;
3839 Op = llvm::Instruction::Sub;
3840 break;
3841 case BO_AndAssign:
3842 AtomicOp = llvm::AtomicRMWInst::And;
3843 Op = llvm::Instruction::And;
3844 break;
3845 case BO_XorAssign:
3846 AtomicOp = llvm::AtomicRMWInst::Xor;
3847 Op = llvm::Instruction::Xor;
3848 break;
3849 case BO_OrAssign:
3850 AtomicOp = llvm::AtomicRMWInst::Or;
3851 Op = llvm::Instruction::Or;
3852 break;
3853 default:
3854 llvm_unreachable("Invalid compound assignment type");
3855 }
3856 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3857 llvm::Value *Amt = CGF.EmitToMemory(
3858 Value: EmitScalarConversion(Src: OpInfo.RHS, SrcType: E->getRHS()->getType(), DstType: LHSTy,
3859 Loc: E->getExprLoc()),
3860 Ty: LHSTy);
3861
3862 llvm::AtomicRMWInst *OldVal =
3863 CGF.emitAtomicRMWInst(Op: AtomicOp, Addr: LHSLV.getAddress(), Val: Amt);
3864
3865 // Since operation is atomic, the result type is guaranteed to be the
3866 // same as the input in LLVM terms.
3867 Result = Builder.CreateBinOp(Opc: Op, LHS: OldVal, RHS: Amt);
3868 return LHSLV;
3869 }
3870 }
3871 // FIXME: For floating point types, we should be saving and restoring the
3872 // floating point environment in the loop.
3873 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3874 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3875 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
3876 OpInfo.LHS = CGF.EmitToMemory(Value: OpInfo.LHS, Ty: type);
3877 Builder.CreateBr(Dest: opBB);
3878 Builder.SetInsertPoint(opBB);
3879 atomicPHI = Builder.CreatePHI(Ty: OpInfo.LHS->getType(), NumReservedValues: 2);
3880 atomicPHI->addIncoming(V: OpInfo.LHS, BB: startBB);
3881 OpInfo.LHS = atomicPHI;
3882 }
3883 else
3884 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
3885
3886 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3887 SourceLocation Loc = E->getExprLoc();
3888 if (!PromotionTypeLHS.isNull())
3889 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy, DstType: PromotionTypeLHS,
3890 Loc: E->getExprLoc());
3891 else
3892 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy,
3893 DstType: E->getComputationLHSType(), Loc);
3894
3895 // Expand the binary operator.
3896 Result = (this->*Func)(OpInfo);
3897
3898 // Convert the result back to the LHS type,
3899 // potentially with Implicit Conversion sanitizer check.
3900 // If LHSLV is a bitfield, use default ScalarConversionOpts
3901 // to avoid emit any implicit integer checks.
3902 Value *Previous = nullptr;
3903 if (LHSLV.isBitField()) {
3904 Previous = Result;
3905 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc);
3906 } else
3907 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc,
3908 Opts: ScalarConversionOpts(CGF.SanOpts));
3909
3910 if (atomicPHI) {
3911 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3912 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3913 auto Pair = CGF.EmitAtomicCompareExchange(
3914 Obj: LHSLV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: Result), Loc: E->getExprLoc());
3915 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: LHSTy);
3916 llvm::Value *success = Pair.second;
3917 atomicPHI->addIncoming(V: old, BB: curBlock);
3918 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3919 Builder.SetInsertPoint(contBB);
3920 return LHSLV;
3921 }
3922
3923 // Store the result value into the LHS lvalue. Bit-fields are handled
3924 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3925 // 'An assignment expression has the value of the left operand after the
3926 // assignment...'.
3927 if (LHSLV.isBitField()) {
3928 Value *Src = Previous ? Previous : Result;
3929 QualType SrcType = E->getRHS()->getType();
3930 QualType DstType = E->getLHS()->getType();
3931 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: Result), Dst: LHSLV, Result: &Result);
3932 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType,
3933 Info: LHSLV.getBitFieldInfo(), Loc: E->getExprLoc());
3934 } else
3935 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Result), Dst: LHSLV);
3936
3937 if (CGF.getLangOpts().OpenMP)
3938 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3939 LHS: E->getLHS());
3940 return LHSLV;
3941}
3942
3943Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3944 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3945 bool Ignore = TestAndClearIgnoreResultAssign();
3946 Value *RHS = nullptr;
3947 LValue LHS = EmitCompoundAssignLValue(E, Func, Result&: RHS);
3948
3949 // If the result is clearly ignored, return now.
3950 if (Ignore)
3951 return nullptr;
3952
3953 // The result of an assignment in C is the assigned r-value.
3954 if (!CGF.getLangOpts().CPlusPlus)
3955 return RHS;
3956
3957 // If the lvalue is non-volatile, return the computed value of the assignment.
3958 if (!LHS.isVolatileQualified())
3959 return RHS;
3960
3961 // Otherwise, reload the value.
3962 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
3963}
3964
3965void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3966 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3967 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
3968 Checks;
3969
3970 if (CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero)) {
3971 Checks.push_back(Elt: std::make_pair(x: Builder.CreateICmpNE(LHS: Ops.RHS, RHS: Zero),
3972 y: SanitizerKind::SO_IntegerDivideByZero));
3973 }
3974
3975 const auto *BO = cast<BinaryOperator>(Val: Ops.E);
3976 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow) &&
3977 Ops.Ty->hasSignedIntegerRepresentation() &&
3978 !IsWidenedIntegerOp(Ctx: CGF.getContext(), E: BO->getLHS()) &&
3979 Ops.mayHaveIntegerOverflow()) {
3980 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Val: Zero->getType());
3981
3982 llvm::Value *IntMin =
3983 Builder.getInt(AI: llvm::APInt::getSignedMinValue(numBits: Ty->getBitWidth()));
3984 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3985
3986 llvm::Value *LHSCmp = Builder.CreateICmpNE(LHS: Ops.LHS, RHS: IntMin);
3987 llvm::Value *RHSCmp = Builder.CreateICmpNE(LHS: Ops.RHS, RHS: NegOne);
3988 llvm::Value *NotOverflow = Builder.CreateOr(LHS: LHSCmp, RHS: RHSCmp, Name: "or");
3989 Checks.push_back(
3990 Elt: std::make_pair(x&: NotOverflow, y: SanitizerKind::SO_SignedIntegerOverflow));
3991 }
3992
3993 if (Checks.size() > 0)
3994 EmitBinOpCheck(Checks, Info: Ops);
3995}
3996
3997Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3998 {
3999 SanitizerDebugLocation SanScope(&CGF,
4000 {SanitizerKind::SO_IntegerDivideByZero,
4001 SanitizerKind::SO_SignedIntegerOverflow,
4002 SanitizerKind::SO_FloatDivideByZero},
4003 SanitizerHandler::DivremOverflow);
4004 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4005 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4006 Ops.Ty->isIntegerType() &&
4007 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4008 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4009 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: true);
4010 } else if (CGF.SanOpts.has(K: SanitizerKind::FloatDivideByZero) &&
4011 Ops.Ty->isRealFloatingType() &&
4012 Ops.mayHaveFloatDivisionByZero()) {
4013 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4014 llvm::Value *NonZero = Builder.CreateFCmpUNE(LHS: Ops.RHS, RHS: Zero);
4015 EmitBinOpCheck(
4016 Checks: std::make_pair(x&: NonZero, y: SanitizerKind::SO_FloatDivideByZero), Info: Ops);
4017 }
4018 }
4019
4020 if (Ops.Ty->isConstantMatrixType()) {
4021 llvm::MatrixBuilder MB(Builder);
4022 // We need to check the types of the operands of the operator to get the
4023 // correct matrix dimensions.
4024 auto *BO = cast<BinaryOperator>(Val: Ops.E);
4025 (void)BO;
4026 assert(
4027 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
4028 "first operand must be a matrix");
4029 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4030 "second operand must be an arithmetic type");
4031 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4032 return MB.CreateScalarDiv(LHS: Ops.LHS, RHS: Ops.RHS,
4033 IsUnsigned: Ops.Ty->hasUnsignedIntegerRepresentation());
4034 }
4035
4036 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4037 llvm::Value *Val;
4038 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4039 Val = Builder.CreateFDiv(L: Ops.LHS, R: Ops.RHS, Name: "div");
4040 CGF.SetDivFPAccuracy(Val);
4041 return Val;
4042 }
4043 else if (Ops.isFixedPointOp())
4044 return EmitFixedPointBinOp(Ops);
4045 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4046 return Builder.CreateUDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4047 else
4048 return Builder.CreateSDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4049}
4050
4051Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4052 // Rem in C can't be a floating point type: C99 6.5.5p2.
4053 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4054 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4055 Ops.Ty->isIntegerType() &&
4056 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4057 SanitizerDebugLocation SanScope(&CGF,
4058 {SanitizerKind::SO_IntegerDivideByZero,
4059 SanitizerKind::SO_SignedIntegerOverflow},
4060 SanitizerHandler::DivremOverflow);
4061 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4062 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: false);
4063 }
4064
4065 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4066 return Builder.CreateURem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4067
4068 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4069 return Builder.CreateFRem(L: Ops.LHS, R: Ops.RHS, Name: "rem");
4070
4071 return Builder.CreateSRem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4072}
4073
4074Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4075 unsigned IID;
4076 unsigned OpID = 0;
4077 SanitizerHandler OverflowKind;
4078
4079 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4080 switch (Ops.Opcode) {
4081 case BO_Add:
4082 case BO_AddAssign:
4083 OpID = 1;
4084 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4085 llvm::Intrinsic::uadd_with_overflow;
4086 OverflowKind = SanitizerHandler::AddOverflow;
4087 break;
4088 case BO_Sub:
4089 case BO_SubAssign:
4090 OpID = 2;
4091 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4092 llvm::Intrinsic::usub_with_overflow;
4093 OverflowKind = SanitizerHandler::SubOverflow;
4094 break;
4095 case BO_Mul:
4096 case BO_MulAssign:
4097 OpID = 3;
4098 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4099 llvm::Intrinsic::umul_with_overflow;
4100 OverflowKind = SanitizerHandler::MulOverflow;
4101 break;
4102 default:
4103 llvm_unreachable("Unsupported operation for overflow detection");
4104 }
4105 OpID <<= 1;
4106 if (isSigned)
4107 OpID |= 1;
4108
4109 SanitizerDebugLocation SanScope(&CGF,
4110 {SanitizerKind::SO_SignedIntegerOverflow,
4111 SanitizerKind::SO_UnsignedIntegerOverflow},
4112 OverflowKind);
4113 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(T: Ops.Ty);
4114
4115 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, Tys: opTy);
4116
4117 Value *resultAndOverflow = Builder.CreateCall(Callee: intrinsic, Args: {Ops.LHS, Ops.RHS});
4118 Value *result = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 0);
4119 Value *overflow = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 1);
4120
4121 // Handle overflow with llvm.trap if no custom handler has been specified.
4122 const std::string *handlerName =
4123 &CGF.getLangOpts().OverflowHandler;
4124 if (handlerName->empty()) {
4125 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4126 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4127 if (!isSigned || CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) {
4128 llvm::Value *NotOverflow = Builder.CreateNot(V: overflow);
4129 SanitizerKind::SanitizerOrdinal Ordinal =
4130 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4131 : SanitizerKind::SO_UnsignedIntegerOverflow;
4132 EmitBinOpCheck(Checks: std::make_pair(x&: NotOverflow, y&: Ordinal), Info: Ops);
4133 } else
4134 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
4135 return result;
4136 }
4137
4138 // Branch in case of overflow.
4139 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4140 llvm::BasicBlock *continueBB =
4141 CGF.createBasicBlock(name: "nooverflow", parent: CGF.CurFn, before: initialBB->getNextNode());
4142 llvm::BasicBlock *overflowBB = CGF.createBasicBlock(name: "overflow", parent: CGF.CurFn);
4143
4144 Builder.CreateCondBr(Cond: overflow, True: overflowBB, False: continueBB);
4145
4146 // If an overflow handler is set, then we want to call it and then use its
4147 // result, if it returns.
4148 Builder.SetInsertPoint(overflowBB);
4149
4150 // Get the overflow handler.
4151 llvm::Type *Int8Ty = CGF.Int8Ty;
4152 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4153 llvm::FunctionType *handlerTy =
4154 llvm::FunctionType::get(Result: CGF.Int64Ty, Params: argTypes, isVarArg: true);
4155 llvm::FunctionCallee handler =
4156 CGF.CGM.CreateRuntimeFunction(Ty: handlerTy, Name: *handlerName);
4157
4158 // Sign extend the args to 64-bit, so that we can use the same handler for
4159 // all types of overflow.
4160 llvm::Value *lhs = Builder.CreateSExt(V: Ops.LHS, DestTy: CGF.Int64Ty);
4161 llvm::Value *rhs = Builder.CreateSExt(V: Ops.RHS, DestTy: CGF.Int64Ty);
4162
4163 // Call the handler with the two arguments, the operation, and the size of
4164 // the result.
4165 llvm::Value *handlerArgs[] = {
4166 lhs,
4167 rhs,
4168 Builder.getInt8(C: OpID),
4169 Builder.getInt8(C: cast<llvm::IntegerType>(Val: opTy)->getBitWidth())
4170 };
4171 llvm::Value *handlerResult =
4172 CGF.EmitNounwindRuntimeCall(callee: handler, args: handlerArgs);
4173
4174 // Truncate the result back to the desired size.
4175 handlerResult = Builder.CreateTrunc(V: handlerResult, DestTy: opTy);
4176 Builder.CreateBr(Dest: continueBB);
4177
4178 Builder.SetInsertPoint(continueBB);
4179 llvm::PHINode *phi = Builder.CreatePHI(Ty: opTy, NumReservedValues: 2);
4180 phi->addIncoming(V: result, BB: initialBB);
4181 phi->addIncoming(V: handlerResult, BB: overflowBB);
4182
4183 return phi;
4184}
4185
4186/// Emit pointer + index arithmetic.
4187static Value *emitPointerArithmetic(CodeGenFunction &CGF,
4188 const BinOpInfo &op,
4189 bool isSubtraction) {
4190 // Must have binary (not unary) expr here. Unary pointer
4191 // increment/decrement doesn't use this path.
4192 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4193
4194 Value *pointer = op.LHS;
4195 Expr *pointerOperand = expr->getLHS();
4196 Value *index = op.RHS;
4197 Expr *indexOperand = expr->getRHS();
4198
4199 // In a subtraction, the LHS is always the pointer.
4200 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4201 std::swap(a&: pointer, b&: index);
4202 std::swap(a&: pointerOperand, b&: indexOperand);
4203 }
4204
4205 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4206
4207 unsigned width = cast<llvm::IntegerType>(Val: index->getType())->getBitWidth();
4208 auto &DL = CGF.CGM.getDataLayout();
4209 auto PtrTy = cast<llvm::PointerType>(Val: pointer->getType());
4210
4211 // Some versions of glibc and gcc use idioms (particularly in their malloc
4212 // routines) that add a pointer-sized integer (known to be a pointer value)
4213 // to a null pointer in order to cast the value back to an integer or as
4214 // part of a pointer alignment algorithm. This is undefined behavior, but
4215 // we'd like to be able to compile programs that use it.
4216 //
4217 // Normally, we'd generate a GEP with a null-pointer base here in response
4218 // to that code, but it's also UB to dereference a pointer created that
4219 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4220 // generate a direct cast of the integer value to a pointer.
4221 //
4222 // The idiom (p = nullptr + N) is not met if any of the following are true:
4223 //
4224 // The operation is subtraction.
4225 // The index is not pointer-sized.
4226 // The pointer type is not byte-sized.
4227 //
4228 // Note that we do not suppress the pointer overflow check in this case.
4229 if (BinaryOperator::isNullPointerArithmeticExtension(
4230 Ctx&: CGF.getContext(), Opc: op.Opcode, LHS: expr->getLHS(), RHS: expr->getRHS())) {
4231 Value *Ptr = CGF.Builder.CreateIntToPtr(V: index, DestTy: pointer->getType());
4232 if (CGF.getLangOpts().PointerOverflowDefined ||
4233 !CGF.SanOpts.has(K: SanitizerKind::PointerOverflow) ||
4234 NullPointerIsDefined(F: CGF.Builder.GetInsertBlock()->getParent(),
4235 AS: PtrTy->getPointerAddressSpace()))
4236 return Ptr;
4237 // The inbounds GEP of null is valid iff the index is zero.
4238 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4239 auto CheckHandler = SanitizerHandler::PointerOverflow;
4240 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
4241 Value *IsZeroIndex = CGF.Builder.CreateIsNull(Arg: index);
4242 llvm::Constant *StaticArgs[] = {
4243 CGF.EmitCheckSourceLocation(Loc: op.E->getExprLoc())};
4244 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4245 Value *IntPtr = llvm::Constant::getNullValue(Ty: IntPtrTy);
4246 Value *ComputedGEP = CGF.Builder.CreateZExtOrTrunc(V: index, DestTy: IntPtrTy);
4247 Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4248 CGF.EmitCheck(Checked: {{IsZeroIndex, CheckOrdinal}}, Check: CheckHandler, StaticArgs,
4249 DynamicArgs);
4250 return Ptr;
4251 }
4252
4253 if (width != DL.getIndexTypeSizeInBits(Ty: PtrTy)) {
4254 // Zero-extend or sign-extend the pointer value according to
4255 // whether the index is signed or not.
4256 index = CGF.Builder.CreateIntCast(V: index, DestTy: DL.getIndexType(PtrTy), isSigned,
4257 Name: "idx.ext");
4258 }
4259
4260 // If this is subtraction, negate the index.
4261 if (isSubtraction)
4262 index = CGF.Builder.CreateNeg(V: index, Name: "idx.neg");
4263
4264 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
4265 CGF.EmitBoundsCheck(E: op.E, Base: pointerOperand, Index: index, IndexType: indexOperand->getType(),
4266 /*Accessed*/ false);
4267
4268 const PointerType *pointerType
4269 = pointerOperand->getType()->getAs<PointerType>();
4270 if (!pointerType) {
4271 QualType objectType = pointerOperand->getType()
4272 ->castAs<ObjCObjectPointerType>()
4273 ->getPointeeType();
4274 llvm::Value *objectSize
4275 = CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(T: objectType));
4276
4277 index = CGF.Builder.CreateMul(LHS: index, RHS: objectSize);
4278
4279 Value *result =
4280 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: pointer, IdxList: index, Name: "add.ptr");
4281 return CGF.Builder.CreateBitCast(V: result, DestTy: pointer->getType());
4282 }
4283
4284 QualType elementType = pointerType->getPointeeType();
4285 if (const VariableArrayType *vla
4286 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4287 // The element count here is the total number of non-VLA elements.
4288 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
4289
4290 // Effectively, the multiply by the VLA size is part of the GEP.
4291 // GEP indexes are signed, and scaling an index isn't permitted to
4292 // signed-overflow, so we use the same semantics for our explicit
4293 // multiply. We suppress this if overflow is not undefined behavior.
4294 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
4295 if (CGF.getLangOpts().PointerOverflowDefined) {
4296 index = CGF.Builder.CreateMul(LHS: index, RHS: numElements, Name: "vla.index");
4297 pointer = CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4298 } else {
4299 index = CGF.Builder.CreateNSWMul(LHS: index, RHS: numElements, Name: "vla.index");
4300 pointer = CGF.EmitCheckedInBoundsGEP(
4301 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
4302 Name: "add.ptr");
4303 }
4304 return pointer;
4305 }
4306
4307 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4308 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4309 // future proof.
4310 llvm::Type *elemTy;
4311 if (elementType->isVoidType() || elementType->isFunctionType())
4312 elemTy = CGF.Int8Ty;
4313 else
4314 elemTy = CGF.ConvertTypeForMem(T: elementType);
4315
4316 if (CGF.getLangOpts().PointerOverflowDefined)
4317 return CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4318
4319 return CGF.EmitCheckedInBoundsGEP(
4320 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
4321 Name: "add.ptr");
4322}
4323
4324// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4325// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4326// the add operand respectively. This allows fmuladd to represent a*b-c, or
4327// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4328// efficient operations.
4329static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4330 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4331 bool negMul, bool negAdd) {
4332 Value *MulOp0 = MulOp->getOperand(i: 0);
4333 Value *MulOp1 = MulOp->getOperand(i: 1);
4334 if (negMul)
4335 MulOp0 = Builder.CreateFNeg(V: MulOp0, Name: "neg");
4336 if (negAdd)
4337 Addend = Builder.CreateFNeg(V: Addend, Name: "neg");
4338
4339 Value *FMulAdd = nullptr;
4340 if (Builder.getIsFPConstrained()) {
4341 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4342 "Only constrained operation should be created when Builder is in FP "
4343 "constrained mode");
4344 FMulAdd = Builder.CreateConstrainedFPCall(
4345 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::experimental_constrained_fmuladd,
4346 Tys: Addend->getType()),
4347 Args: {MulOp0, MulOp1, Addend});
4348 } else {
4349 FMulAdd = Builder.CreateCall(
4350 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::fmuladd, Tys: Addend->getType()),
4351 Args: {MulOp0, MulOp1, Addend});
4352 }
4353 MulOp->eraseFromParent();
4354
4355 return FMulAdd;
4356}
4357
4358// Check whether it would be legal to emit an fmuladd intrinsic call to
4359// represent op and if so, build the fmuladd.
4360//
4361// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4362// Does NOT check the type of the operation - it's assumed that this function
4363// will be called from contexts where it's known that the type is contractable.
4364static Value* tryEmitFMulAdd(const BinOpInfo &op,
4365 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4366 bool isSub=false) {
4367
4368 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4369 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4370 "Only fadd/fsub can be the root of an fmuladd.");
4371
4372 // Check whether this op is marked as fusable.
4373 if (!op.FPFeatures.allowFPContractWithinStatement())
4374 return nullptr;
4375
4376 Value *LHS = op.LHS;
4377 Value *RHS = op.RHS;
4378
4379 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4380 // it is the only use of its operand.
4381 bool NegLHS = false;
4382 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: LHS)) {
4383 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4384 LHSUnOp->use_empty() && LHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4385 LHS = LHSUnOp->getOperand(i_nocapture: 0);
4386 NegLHS = true;
4387 }
4388 }
4389
4390 bool NegRHS = false;
4391 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: RHS)) {
4392 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4393 RHSUnOp->use_empty() && RHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4394 RHS = RHSUnOp->getOperand(i_nocapture: 0);
4395 NegRHS = true;
4396 }
4397 }
4398
4399 // We have a potentially fusable op. Look for a mul on one of the operands.
4400 // Also, make sure that the mul result isn't used directly. In that case,
4401 // there's no point creating a muladd operation.
4402 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: LHS)) {
4403 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4404 (LHSBinOp->use_empty() || NegLHS)) {
4405 // If we looked through fneg, erase it.
4406 if (NegLHS)
4407 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4408 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4409 }
4410 }
4411 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: RHS)) {
4412 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4413 (RHSBinOp->use_empty() || NegRHS)) {
4414 // If we looked through fneg, erase it.
4415 if (NegRHS)
4416 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4417 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4418 }
4419 }
4420
4421 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(Val: LHS)) {
4422 if (LHSBinOp->getIntrinsicID() ==
4423 llvm::Intrinsic::experimental_constrained_fmul &&
4424 (LHSBinOp->use_empty() || NegLHS)) {
4425 // If we looked through fneg, erase it.
4426 if (NegLHS)
4427 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4428 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4429 }
4430 }
4431 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(Val: RHS)) {
4432 if (RHSBinOp->getIntrinsicID() ==
4433 llvm::Intrinsic::experimental_constrained_fmul &&
4434 (RHSBinOp->use_empty() || NegRHS)) {
4435 // If we looked through fneg, erase it.
4436 if (NegRHS)
4437 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4438 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4439 }
4440 }
4441
4442 return nullptr;
4443}
4444
4445Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4446 if (op.LHS->getType()->isPointerTy() ||
4447 op.RHS->getType()->isPointerTy())
4448 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::NotSubtraction);
4449
4450 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4451 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4452 case LangOptions::SOB_Defined:
4453 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4454 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4455 [[fallthrough]];
4456 case LangOptions::SOB_Undefined:
4457 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4458 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4459 [[fallthrough]];
4460 case LangOptions::SOB_Trapping:
4461 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4462 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4463 return EmitOverflowCheckedBinOp(Ops: op);
4464 }
4465 }
4466
4467 // For vector and matrix adds, try to fold into a fmuladd.
4468 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4469 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4470 // Try to form an fmuladd.
4471 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4472 return FMulAdd;
4473 }
4474
4475 if (op.Ty->isConstantMatrixType()) {
4476 llvm::MatrixBuilder MB(Builder);
4477 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4478 return MB.CreateAdd(LHS: op.LHS, RHS: op.RHS);
4479 }
4480
4481 if (op.Ty->isUnsignedIntegerType() &&
4482 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4483 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4484 return EmitOverflowCheckedBinOp(Ops: op);
4485
4486 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4487 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4488 return Builder.CreateFAdd(L: op.LHS, R: op.RHS, Name: "add");
4489 }
4490
4491 if (op.isFixedPointOp())
4492 return EmitFixedPointBinOp(Ops: op);
4493
4494 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4495}
4496
4497/// The resulting value must be calculated with exact precision, so the operands
4498/// may not be the same type.
4499Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4500 using llvm::APSInt;
4501 using llvm::ConstantInt;
4502
4503 // This is either a binary operation where at least one of the operands is
4504 // a fixed-point type, or a unary operation where the operand is a fixed-point
4505 // type. The result type of a binary operation is determined by
4506 // Sema::handleFixedPointConversions().
4507 QualType ResultTy = op.Ty;
4508 QualType LHSTy, RHSTy;
4509 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: op.E)) {
4510 RHSTy = BinOp->getRHS()->getType();
4511 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(Val: BinOp)) {
4512 // For compound assignment, the effective type of the LHS at this point
4513 // is the computation LHS type, not the actual LHS type, and the final
4514 // result type is not the type of the expression but rather the
4515 // computation result type.
4516 LHSTy = CAO->getComputationLHSType();
4517 ResultTy = CAO->getComputationResultType();
4518 } else
4519 LHSTy = BinOp->getLHS()->getType();
4520 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: op.E)) {
4521 LHSTy = UnOp->getSubExpr()->getType();
4522 RHSTy = UnOp->getSubExpr()->getType();
4523 }
4524 ASTContext &Ctx = CGF.getContext();
4525 Value *LHS = op.LHS;
4526 Value *RHS = op.RHS;
4527
4528 auto LHSFixedSema = Ctx.getFixedPointSemantics(Ty: LHSTy);
4529 auto RHSFixedSema = Ctx.getFixedPointSemantics(Ty: RHSTy);
4530 auto ResultFixedSema = Ctx.getFixedPointSemantics(Ty: ResultTy);
4531 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(Other: RHSFixedSema);
4532
4533 // Perform the actual operation.
4534 Value *Result;
4535 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4536 switch (op.Opcode) {
4537 case BO_AddAssign:
4538 case BO_Add:
4539 Result = FPBuilder.CreateAdd(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4540 break;
4541 case BO_SubAssign:
4542 case BO_Sub:
4543 Result = FPBuilder.CreateSub(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4544 break;
4545 case BO_MulAssign:
4546 case BO_Mul:
4547 Result = FPBuilder.CreateMul(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4548 break;
4549 case BO_DivAssign:
4550 case BO_Div:
4551 Result = FPBuilder.CreateDiv(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4552 break;
4553 case BO_ShlAssign:
4554 case BO_Shl:
4555 Result = FPBuilder.CreateShl(LHS, LHSSema: LHSFixedSema, RHS);
4556 break;
4557 case BO_ShrAssign:
4558 case BO_Shr:
4559 Result = FPBuilder.CreateShr(LHS, LHSSema: LHSFixedSema, RHS);
4560 break;
4561 case BO_LT:
4562 return FPBuilder.CreateLT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4563 case BO_GT:
4564 return FPBuilder.CreateGT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4565 case BO_LE:
4566 return FPBuilder.CreateLE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4567 case BO_GE:
4568 return FPBuilder.CreateGE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4569 case BO_EQ:
4570 // For equality operations, we assume any padding bits on unsigned types are
4571 // zero'd out. They could be overwritten through non-saturating operations
4572 // that cause overflow, but this leads to undefined behavior.
4573 return FPBuilder.CreateEQ(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4574 case BO_NE:
4575 return FPBuilder.CreateNE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4576 case BO_Cmp:
4577 case BO_LAnd:
4578 case BO_LOr:
4579 llvm_unreachable("Found unimplemented fixed point binary operation");
4580 case BO_PtrMemD:
4581 case BO_PtrMemI:
4582 case BO_Rem:
4583 case BO_Xor:
4584 case BO_And:
4585 case BO_Or:
4586 case BO_Assign:
4587 case BO_RemAssign:
4588 case BO_AndAssign:
4589 case BO_XorAssign:
4590 case BO_OrAssign:
4591 case BO_Comma:
4592 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4593 }
4594
4595 bool IsShift = BinaryOperator::isShiftOp(Opc: op.Opcode) ||
4596 BinaryOperator::isShiftAssignOp(Opc: op.Opcode);
4597 // Convert to the result type.
4598 return FPBuilder.CreateFixedToFixed(Src: Result, SrcSema: IsShift ? LHSFixedSema
4599 : CommonFixedSema,
4600 DstSema: ResultFixedSema);
4601}
4602
4603Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4604 // The LHS is always a pointer if either side is.
4605 if (!op.LHS->getType()->isPointerTy()) {
4606 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4607 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4608 case LangOptions::SOB_Defined:
4609 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4610 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4611 [[fallthrough]];
4612 case LangOptions::SOB_Undefined:
4613 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4614 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4615 [[fallthrough]];
4616 case LangOptions::SOB_Trapping:
4617 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4618 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4619 return EmitOverflowCheckedBinOp(Ops: op);
4620 }
4621 }
4622
4623 // For vector and matrix subs, try to fold into a fmuladd.
4624 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4625 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4626 // Try to form an fmuladd.
4627 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, isSub: true))
4628 return FMulAdd;
4629 }
4630
4631 if (op.Ty->isConstantMatrixType()) {
4632 llvm::MatrixBuilder MB(Builder);
4633 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4634 return MB.CreateSub(LHS: op.LHS, RHS: op.RHS);
4635 }
4636
4637 if (op.Ty->isUnsignedIntegerType() &&
4638 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4639 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4640 return EmitOverflowCheckedBinOp(Ops: op);
4641
4642 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4643 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4644 return Builder.CreateFSub(L: op.LHS, R: op.RHS, Name: "sub");
4645 }
4646
4647 if (op.isFixedPointOp())
4648 return EmitFixedPointBinOp(op);
4649
4650 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4651 }
4652
4653 // If the RHS is not a pointer, then we have normal pointer
4654 // arithmetic.
4655 if (!op.RHS->getType()->isPointerTy())
4656 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::IsSubtraction);
4657
4658 // Otherwise, this is a pointer subtraction.
4659
4660 // Do the raw subtraction part.
4661 llvm::Value *LHS
4662 = Builder.CreatePtrToInt(V: op.LHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.lhs.cast");
4663 llvm::Value *RHS
4664 = Builder.CreatePtrToInt(V: op.RHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.rhs.cast");
4665 Value *diffInChars = Builder.CreateSub(LHS, RHS, Name: "sub.ptr.sub");
4666
4667 // Okay, figure out the element size.
4668 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4669 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4670
4671 llvm::Value *divisor = nullptr;
4672
4673 // For a variable-length array, this is going to be non-constant.
4674 if (const VariableArrayType *vla
4675 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4676 auto VlaSize = CGF.getVLASize(vla);
4677 elementType = VlaSize.Type;
4678 divisor = VlaSize.NumElts;
4679
4680 // Scale the number of non-VLA elements by the non-VLA element size.
4681 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4682 if (!eltSize.isOne())
4683 divisor = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: divisor);
4684
4685 // For everything elese, we can just compute it, safe in the
4686 // assumption that Sema won't let anything through that we can't
4687 // safely compute the size of.
4688 } else {
4689 CharUnits elementSize;
4690 // Handle GCC extension for pointer arithmetic on void* and
4691 // function pointer types.
4692 if (elementType->isVoidType() || elementType->isFunctionType())
4693 elementSize = CharUnits::One();
4694 else
4695 elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4696
4697 // Don't even emit the divide for element size of 1.
4698 if (elementSize.isOne())
4699 return diffInChars;
4700
4701 divisor = CGF.CGM.getSize(numChars: elementSize);
4702 }
4703
4704 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4705 // pointer difference in C is only defined in the case where both operands
4706 // are pointing to elements of an array.
4707 return Builder.CreateExactSDiv(LHS: diffInChars, RHS: divisor, Name: "sub.ptr.div");
4708}
4709
4710Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4711 bool RHSIsSigned) {
4712 llvm::IntegerType *Ty;
4713 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4714 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4715 else
4716 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4717 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4718 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4719 // this in ConstantInt::get, this results in the value getting truncated.
4720 // Constrain the return value to be max(RHS) in this case.
4721 llvm::Type *RHSTy = RHS->getType();
4722 llvm::APInt RHSMax =
4723 RHSIsSigned ? llvm::APInt::getSignedMaxValue(numBits: RHSTy->getScalarSizeInBits())
4724 : llvm::APInt::getMaxValue(numBits: RHSTy->getScalarSizeInBits());
4725 if (RHSMax.ult(RHS: Ty->getBitWidth()))
4726 return llvm::ConstantInt::get(Ty: RHSTy, V: RHSMax);
4727 return llvm::ConstantInt::get(Ty: RHSTy, V: Ty->getBitWidth() - 1);
4728}
4729
4730Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4731 const Twine &Name) {
4732 llvm::IntegerType *Ty;
4733 if (auto *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4734 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4735 else
4736 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4737
4738 if (llvm::isPowerOf2_64(Value: Ty->getBitWidth()))
4739 return Builder.CreateAnd(LHS: RHS, RHS: GetMaximumShiftAmount(LHS, RHS, RHSIsSigned: false), Name);
4740
4741 return Builder.CreateURem(
4742 LHS: RHS, RHS: llvm::ConstantInt::get(Ty: RHS->getType(), V: Ty->getBitWidth()), Name);
4743}
4744
4745Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4746 // TODO: This misses out on the sanitizer check below.
4747 if (Ops.isFixedPointOp())
4748 return EmitFixedPointBinOp(op: Ops);
4749
4750 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4751 // RHS to the same size as the LHS.
4752 Value *RHS = Ops.RHS;
4753 if (Ops.LHS->getType() != RHS->getType())
4754 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4755
4756 bool SanitizeSignedBase = CGF.SanOpts.has(K: SanitizerKind::ShiftBase) &&
4757 Ops.Ty->hasSignedIntegerRepresentation() &&
4758 !CGF.getLangOpts().isSignedOverflowDefined() &&
4759 !CGF.getLangOpts().CPlusPlus20;
4760 bool SanitizeUnsignedBase =
4761 CGF.SanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
4762 Ops.Ty->hasUnsignedIntegerRepresentation();
4763 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4764 bool SanitizeExponent = CGF.SanOpts.has(K: SanitizerKind::ShiftExponent);
4765 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4766 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4767 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shl.mask");
4768 else if ((SanitizeBase || SanitizeExponent) &&
4769 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4770 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4771 if (SanitizeSignedBase)
4772 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftBase);
4773 if (SanitizeUnsignedBase)
4774 Ordinals.push_back(Elt: SanitizerKind::SO_UnsignedShiftBase);
4775 if (SanitizeExponent)
4776 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftExponent);
4777
4778 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4779 SanitizerHandler::ShiftOutOfBounds);
4780 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4781 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4782 llvm::Value *WidthMinusOne =
4783 GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned);
4784 llvm::Value *ValidExponent = Builder.CreateICmpULE(LHS: Ops.RHS, RHS: WidthMinusOne);
4785
4786 if (SanitizeExponent) {
4787 Checks.push_back(
4788 Elt: std::make_pair(x&: ValidExponent, y: SanitizerKind::SO_ShiftExponent));
4789 }
4790
4791 if (SanitizeBase) {
4792 // Check whether we are shifting any non-zero bits off the top of the
4793 // integer. We only emit this check if exponent is valid - otherwise
4794 // instructions below will have undefined behavior themselves.
4795 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4796 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
4797 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock(name: "check");
4798 Builder.CreateCondBr(Cond: ValidExponent, True: CheckShiftBase, False: Cont);
4799 llvm::Value *PromotedWidthMinusOne =
4800 (RHS == Ops.RHS) ? WidthMinusOne
4801 : GetMaximumShiftAmount(LHS: Ops.LHS, RHS, RHSIsSigned);
4802 CGF.EmitBlock(BB: CheckShiftBase);
4803 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4804 LHS: Ops.LHS, RHS: Builder.CreateSub(LHS: PromotedWidthMinusOne, RHS, Name: "shl.zeros",
4805 /*NUW*/ HasNUW: true, /*NSW*/ HasNSW: true),
4806 Name: "shl.check");
4807 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4808 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4809 // Under C++11's rules, shifting a 1 bit into the sign bit is
4810 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4811 // define signed left shifts, so we use the C99 and C++11 rules there).
4812 // Unsigned shifts can always shift into the top bit.
4813 llvm::Value *One = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 1);
4814 BitsShiftedOff = Builder.CreateLShr(LHS: BitsShiftedOff, RHS: One);
4815 }
4816 llvm::Value *Zero = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 0);
4817 llvm::Value *ValidBase = Builder.CreateICmpEQ(LHS: BitsShiftedOff, RHS: Zero);
4818 CGF.EmitBlock(BB: Cont);
4819 llvm::PHINode *BaseCheck = Builder.CreatePHI(Ty: ValidBase->getType(), NumReservedValues: 2);
4820 BaseCheck->addIncoming(V: Builder.getTrue(), BB: Orig);
4821 BaseCheck->addIncoming(V: ValidBase, BB: CheckShiftBase);
4822 Checks.push_back(Elt: std::make_pair(
4823 x&: BaseCheck, y: SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4824 : SanitizerKind::SO_UnsignedShiftBase));
4825 }
4826
4827 assert(!Checks.empty());
4828 EmitBinOpCheck(Checks, Info: Ops);
4829 }
4830
4831 return Builder.CreateShl(LHS: Ops.LHS, RHS, Name: "shl");
4832}
4833
4834Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4835 // TODO: This misses out on the sanitizer check below.
4836 if (Ops.isFixedPointOp())
4837 return EmitFixedPointBinOp(op: Ops);
4838
4839 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4840 // RHS to the same size as the LHS.
4841 Value *RHS = Ops.RHS;
4842 if (Ops.LHS->getType() != RHS->getType())
4843 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4844
4845 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4846 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4847 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shr.mask");
4848 else if (CGF.SanOpts.has(K: SanitizerKind::ShiftExponent) &&
4849 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4850 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4851 SanitizerHandler::ShiftOutOfBounds);
4852 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4853 llvm::Value *Valid = Builder.CreateICmpULE(
4854 LHS: Ops.RHS, RHS: GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned));
4855 EmitBinOpCheck(Checks: std::make_pair(x&: Valid, y: SanitizerKind::SO_ShiftExponent), Info: Ops);
4856 }
4857
4858 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4859 return Builder.CreateLShr(LHS: Ops.LHS, RHS, Name: "shr");
4860 return Builder.CreateAShr(LHS: Ops.LHS, RHS, Name: "shr");
4861}
4862
4863enum IntrinsicType { VCMPEQ, VCMPGT };
4864// return corresponding comparison intrinsic for given vector type
4865static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4866 BuiltinType::Kind ElemKind) {
4867 switch (ElemKind) {
4868 default: llvm_unreachable("unexpected element type");
4869 case BuiltinType::Char_U:
4870 case BuiltinType::UChar:
4871 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4872 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4873 case BuiltinType::Char_S:
4874 case BuiltinType::SChar:
4875 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4876 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4877 case BuiltinType::UShort:
4878 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4879 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4880 case BuiltinType::Short:
4881 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4882 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4883 case BuiltinType::UInt:
4884 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4885 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4886 case BuiltinType::Int:
4887 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4888 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4889 case BuiltinType::ULong:
4890 case BuiltinType::ULongLong:
4891 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4892 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4893 case BuiltinType::Long:
4894 case BuiltinType::LongLong:
4895 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4896 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4897 case BuiltinType::Float:
4898 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4899 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4900 case BuiltinType::Double:
4901 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4902 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4903 case BuiltinType::UInt128:
4904 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4905 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4906 case BuiltinType::Int128:
4907 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4908 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4909 }
4910}
4911
4912Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4913 llvm::CmpInst::Predicate UICmpOpc,
4914 llvm::CmpInst::Predicate SICmpOpc,
4915 llvm::CmpInst::Predicate FCmpOpc,
4916 bool IsSignaling) {
4917 TestAndClearIgnoreResultAssign();
4918 Value *Result;
4919 QualType LHSTy = E->getLHS()->getType();
4920 QualType RHSTy = E->getRHS()->getType();
4921 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4922 assert(E->getOpcode() == BO_EQ ||
4923 E->getOpcode() == BO_NE);
4924 Value *LHS = CGF.EmitScalarExpr(E: E->getLHS());
4925 Value *RHS = CGF.EmitScalarExpr(E: E->getRHS());
4926 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4927 CGF, L: LHS, R: RHS, MPT, Inequality: E->getOpcode() == BO_NE);
4928 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4929 BinOpInfo BOInfo = EmitBinOps(E);
4930 Value *LHS = BOInfo.LHS;
4931 Value *RHS = BOInfo.RHS;
4932
4933 // If AltiVec, the comparison results in a numeric type, so we use
4934 // intrinsics comparing vectors and giving 0 or 1 as a result
4935 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4936 // constants for mapping CR6 register bits to predicate result
4937 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4938
4939 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4940
4941 // in several cases vector arguments order will be reversed
4942 Value *FirstVecArg = LHS,
4943 *SecondVecArg = RHS;
4944
4945 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4946 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4947
4948 switch(E->getOpcode()) {
4949 default: llvm_unreachable("is not a comparison operation");
4950 case BO_EQ:
4951 CR6 = CR6_LT;
4952 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4953 break;
4954 case BO_NE:
4955 CR6 = CR6_EQ;
4956 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4957 break;
4958 case BO_LT:
4959 CR6 = CR6_LT;
4960 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4961 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4962 break;
4963 case BO_GT:
4964 CR6 = CR6_LT;
4965 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4966 break;
4967 case BO_LE:
4968 if (ElementKind == BuiltinType::Float) {
4969 CR6 = CR6_LT;
4970 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4971 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4972 }
4973 else {
4974 CR6 = CR6_EQ;
4975 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4976 }
4977 break;
4978 case BO_GE:
4979 if (ElementKind == BuiltinType::Float) {
4980 CR6 = CR6_LT;
4981 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4982 }
4983 else {
4984 CR6 = CR6_EQ;
4985 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4986 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4987 }
4988 break;
4989 }
4990
4991 Value *CR6Param = Builder.getInt32(C: CR6);
4992 llvm::Function *F = CGF.CGM.getIntrinsic(IID: ID);
4993 Result = Builder.CreateCall(Callee: F, Args: {CR6Param, FirstVecArg, SecondVecArg});
4994
4995 // The result type of intrinsic may not be same as E->getType().
4996 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4997 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4998 // do nothing, if ResultTy is not i1 at the same time, it will cause
4999 // crash later.
5000 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Val: Result->getType());
5001 if (ResultTy->getBitWidth() > 1 &&
5002 E->getType() == CGF.getContext().BoolTy)
5003 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt1Ty());
5004 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5005 Loc: E->getExprLoc());
5006 }
5007
5008 if (BOInfo.isFixedPointOp()) {
5009 Result = EmitFixedPointBinOp(op: BOInfo);
5010 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5011 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5012 if (!IsSignaling)
5013 Result = Builder.CreateFCmp(P: FCmpOpc, LHS, RHS, Name: "cmp");
5014 else
5015 Result = Builder.CreateFCmpS(P: FCmpOpc, LHS, RHS, Name: "cmp");
5016 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5017 Result = Builder.CreateICmp(P: SICmpOpc, LHS, RHS, Name: "cmp");
5018 } else {
5019 // Unsigned integers and pointers.
5020
5021 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5022 !isa<llvm::ConstantPointerNull>(Val: LHS) &&
5023 !isa<llvm::ConstantPointerNull>(Val: RHS)) {
5024
5025 // Dynamic information is required to be stripped for comparisons,
5026 // because it could leak the dynamic information. Based on comparisons
5027 // of pointers to dynamic objects, the optimizer can replace one pointer
5028 // with another, which might be incorrect in presence of invariant
5029 // groups. Comparison with null is safe because null does not carry any
5030 // dynamic information.
5031 if (LHSTy.mayBeDynamicClass())
5032 LHS = Builder.CreateStripInvariantGroup(Ptr: LHS);
5033 if (RHSTy.mayBeDynamicClass())
5034 RHS = Builder.CreateStripInvariantGroup(Ptr: RHS);
5035 }
5036
5037 Result = Builder.CreateICmp(P: UICmpOpc, LHS, RHS, Name: "cmp");
5038 }
5039
5040 // If this is a vector comparison, sign extend the result to the appropriate
5041 // vector integer type and return it (don't convert to bool).
5042 if (LHSTy->isVectorType())
5043 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
5044
5045 } else {
5046 // Complex Comparison: can only be an equality comparison.
5047 CodeGenFunction::ComplexPairTy LHS, RHS;
5048 QualType CETy;
5049 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5050 LHS = CGF.EmitComplexExpr(E: E->getLHS());
5051 CETy = CTy->getElementType();
5052 } else {
5053 LHS.first = Visit(E: E->getLHS());
5054 LHS.second = llvm::Constant::getNullValue(Ty: LHS.first->getType());
5055 CETy = LHSTy;
5056 }
5057 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5058 RHS = CGF.EmitComplexExpr(E: E->getRHS());
5059 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5060 CTy->getElementType()) &&
5061 "The element types must always match.");
5062 (void)CTy;
5063 } else {
5064 RHS.first = Visit(E: E->getRHS());
5065 RHS.second = llvm::Constant::getNullValue(Ty: RHS.first->getType());
5066 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5067 "The element types must always match.");
5068 }
5069
5070 Value *ResultR, *ResultI;
5071 if (CETy->isRealFloatingType()) {
5072 // As complex comparisons can only be equality comparisons, they
5073 // are never signaling comparisons.
5074 ResultR = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5075 ResultI = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5076 } else {
5077 // Complex comparisons can only be equality comparisons. As such, signed
5078 // and unsigned opcodes are the same.
5079 ResultR = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5080 ResultI = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5081 }
5082
5083 if (E->getOpcode() == BO_EQ) {
5084 Result = Builder.CreateAnd(LHS: ResultR, RHS: ResultI, Name: "and.ri");
5085 } else {
5086 assert(E->getOpcode() == BO_NE &&
5087 "Complex comparison other than == or != ?");
5088 Result = Builder.CreateOr(LHS: ResultR, RHS: ResultI, Name: "or.ri");
5089 }
5090 }
5091
5092 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5093 Loc: E->getExprLoc());
5094}
5095
5096llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
5097 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5098 // In case we have the integer or bitfield sanitizer checks enabled
5099 // we want to get the expression before scalar conversion.
5100 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E->getRHS())) {
5101 CastKind Kind = ICE->getCastKind();
5102 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5103 *SrcType = ICE->getSubExpr()->getType();
5104 *Previous = EmitScalarExpr(E: ICE->getSubExpr());
5105 // Pass default ScalarConversionOpts to avoid emitting
5106 // integer sanitizer checks as E refers to bitfield.
5107 return EmitScalarConversion(Src: *Previous, SrcTy: *SrcType, DstTy: ICE->getType(),
5108 Loc: ICE->getExprLoc());
5109 }
5110 }
5111 return EmitScalarExpr(E: E->getRHS());
5112}
5113
5114Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5115 ApplyAtomGroup Grp(CGF.getDebugInfo());
5116 bool Ignore = TestAndClearIgnoreResultAssign();
5117
5118 Value *RHS;
5119 LValue LHS;
5120
5121 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5122 LValue LV = CGF.EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5123 LV.getQuals().removePointerAuth();
5124 llvm::Value *RV =
5125 CGF.EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: E->getRHS(), StorageAddress: LV.getAddress());
5126 CGF.EmitNullabilityCheck(LHS: LV, RHS: RV, Loc: E->getExprLoc());
5127 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RV), Dst: LV);
5128
5129 if (Ignore)
5130 return nullptr;
5131 RV = CGF.EmitPointerAuthUnqualify(Qualifier: PtrAuth, Pointer: RV, PointerType: LV.getType(),
5132 StorageAddress: LV.getAddress(), /*nonnull*/ IsKnownNonNull: false);
5133 return RV;
5134 }
5135
5136 switch (E->getLHS()->getType().getObjCLifetime()) {
5137 case Qualifiers::OCL_Strong:
5138 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreStrong(e: E, ignored: Ignore);
5139 break;
5140
5141 case Qualifiers::OCL_Autoreleasing:
5142 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreAutoreleasing(e: E);
5143 break;
5144
5145 case Qualifiers::OCL_ExplicitNone:
5146 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreUnsafeUnretained(e: E, ignored: Ignore);
5147 break;
5148
5149 case Qualifiers::OCL_Weak:
5150 RHS = Visit(E: E->getRHS());
5151 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5152 RHS = CGF.EmitARCStoreWeak(addr: LHS.getAddress(), value: RHS, ignored: Ignore);
5153 break;
5154
5155 case Qualifiers::OCL_None:
5156 // __block variables need to have the rhs evaluated first, plus
5157 // this should improve codegen just a little.
5158 Value *Previous = nullptr;
5159 QualType SrcType = E->getRHS()->getType();
5160 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5161 // we want to extract that value and potentially (if the bitfield sanitizer
5162 // is enabled) use it to check for an implicit conversion.
5163 if (E->getLHS()->refersToBitField())
5164 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType);
5165 else
5166 RHS = Visit(E: E->getRHS());
5167
5168 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5169
5170 // Store the value into the LHS. Bit-fields are handled specially
5171 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5172 // 'An assignment expression has the value of the left operand after
5173 // the assignment...'.
5174 if (LHS.isBitField()) {
5175 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: RHS), Dst: LHS, Result: &RHS);
5176 // If the expression contained an implicit conversion, make sure
5177 // to use the value before the scalar conversion.
5178 Value *Src = Previous ? Previous : RHS;
5179 QualType DstType = E->getLHS()->getType();
5180 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: RHS, DstType,
5181 Info: LHS.getBitFieldInfo(), Loc: E->getExprLoc());
5182 } else {
5183 CGF.EmitNullabilityCheck(LHS, RHS, Loc: E->getExprLoc());
5184 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RHS), Dst: LHS);
5185 }
5186 }
5187
5188 // If the result is clearly ignored, return now.
5189 if (Ignore)
5190 return nullptr;
5191
5192 // The result of an assignment in C is the assigned r-value.
5193 if (!CGF.getLangOpts().CPlusPlus)
5194 return RHS;
5195
5196 // If the lvalue is non-volatile, return the computed value of the assignment.
5197 if (!LHS.isVolatileQualified())
5198 return RHS;
5199
5200 // Otherwise, reload the value.
5201 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
5202}
5203
5204Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5205 // Perform vector logical and on comparisons with zero vectors.
5206 if (E->getType()->isVectorType()) {
5207 CGF.incrementProfileCounter(S: E);
5208
5209 Value *LHS = Visit(E: E->getLHS());
5210 Value *RHS = Visit(E: E->getRHS());
5211 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5212 if (LHS->getType()->isFPOrFPVectorTy()) {
5213 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5214 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5215 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5216 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5217 } else {
5218 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5219 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5220 }
5221 Value *And = Builder.CreateAnd(LHS, RHS);
5222 return Builder.CreateSExt(V: And, DestTy: ConvertType(T: E->getType()), Name: "sext");
5223 }
5224
5225 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5226 llvm::Type *ResTy = ConvertType(T: E->getType());
5227
5228 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5229 // If we have 1 && X, just emit X without inserting the control flow.
5230 bool LHSCondVal;
5231 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5232 if (LHSCondVal) { // If we have 1 && X, just emit X.
5233 CGF.incrementProfileCounter(S: E);
5234
5235 // If the top of the logical operator nest, reset the MCDC temp to 0.
5236 if (CGF.MCDCLogOpStack.empty())
5237 CGF.maybeResetMCDCCondBitmap(E);
5238
5239 CGF.MCDCLogOpStack.push_back(Elt: E);
5240
5241 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5242
5243 // If we're generating for profiling or coverage, generate a branch to a
5244 // block that increments the RHS counter needed to track branch condition
5245 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5246 // "FalseBlock" after the increment is done.
5247 if (InstrumentRegions &&
5248 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5249 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5250 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "land.end");
5251 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5252 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: FBlock);
5253 CGF.EmitBlock(BB: RHSBlockCnt);
5254 CGF.incrementProfileCounter(S: E->getRHS());
5255 CGF.EmitBranch(Block: FBlock);
5256 CGF.EmitBlock(BB: FBlock);
5257 } else
5258 CGF.markStmtMaybeUsed(S: E->getRHS());
5259
5260 CGF.MCDCLogOpStack.pop_back();
5261 // If the top of the logical operator nest, update the MCDC bitmap.
5262 if (CGF.MCDCLogOpStack.empty())
5263 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5264
5265 // ZExt result to int or bool.
5266 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "land.ext");
5267 }
5268
5269 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5270 if (!CGF.ContainsLabel(S: E->getRHS())) {
5271 CGF.markStmtMaybeUsed(S: E->getRHS());
5272 return llvm::Constant::getNullValue(Ty: ResTy);
5273 }
5274 }
5275
5276 // If the top of the logical operator nest, reset the MCDC temp to 0.
5277 if (CGF.MCDCLogOpStack.empty())
5278 CGF.maybeResetMCDCCondBitmap(E);
5279
5280 CGF.MCDCLogOpStack.push_back(Elt: E);
5281
5282 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "land.end");
5283 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "land.rhs");
5284
5285 CodeGenFunction::ConditionalEvaluation eval(CGF);
5286
5287 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5288 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: RHSBlock, FalseBlock: ContBlock,
5289 TrueCount: CGF.getProfileCount(S: E->getRHS()));
5290
5291 // Any edges into the ContBlock are now from an (indeterminate number of)
5292 // edges from this first condition. All of these values will be false. Start
5293 // setting up the PHI node in the Cont Block for this.
5294 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5295 NameStr: "", InsertBefore: ContBlock);
5296 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5297 PI != PE; ++PI)
5298 PN->addIncoming(V: llvm::ConstantInt::getFalse(Context&: VMContext), BB: *PI);
5299
5300 eval.begin(CGF);
5301 CGF.EmitBlock(BB: RHSBlock);
5302 CGF.incrementProfileCounter(S: E);
5303 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5304 eval.end(CGF);
5305
5306 // Reaquire the RHS block, as there may be subblocks inserted.
5307 RHSBlock = Builder.GetInsertBlock();
5308
5309 // If we're generating for profiling or coverage, generate a branch on the
5310 // RHS to a block that increments the RHS true counter needed to track branch
5311 // condition coverage.
5312 if (InstrumentRegions &&
5313 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5314 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5315 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5316 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: ContBlock);
5317 CGF.EmitBlock(BB: RHSBlockCnt);
5318 CGF.incrementProfileCounter(S: E->getRHS());
5319 CGF.EmitBranch(Block: ContBlock);
5320 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5321 }
5322
5323 // Emit an unconditional branch from this block to ContBlock.
5324 {
5325 // There is no need to emit line number for unconditional branch.
5326 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5327 CGF.EmitBlock(BB: ContBlock);
5328 }
5329 // Insert an entry into the phi node for the edge with the value of RHSCond.
5330 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5331
5332 CGF.MCDCLogOpStack.pop_back();
5333 // If the top of the logical operator nest, update the MCDC bitmap.
5334 if (CGF.MCDCLogOpStack.empty())
5335 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5336
5337 // Artificial location to preserve the scope information
5338 {
5339 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
5340 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5341 }
5342
5343 // ZExt result to int.
5344 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "land.ext");
5345}
5346
5347Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5348 // Perform vector logical or on comparisons with zero vectors.
5349 if (E->getType()->isVectorType()) {
5350 CGF.incrementProfileCounter(S: E);
5351
5352 Value *LHS = Visit(E: E->getLHS());
5353 Value *RHS = Visit(E: E->getRHS());
5354 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5355 if (LHS->getType()->isFPOrFPVectorTy()) {
5356 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5357 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5358 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5359 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5360 } else {
5361 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5362 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5363 }
5364 Value *Or = Builder.CreateOr(LHS, RHS);
5365 return Builder.CreateSExt(V: Or, DestTy: ConvertType(T: E->getType()), Name: "sext");
5366 }
5367
5368 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5369 llvm::Type *ResTy = ConvertType(T: E->getType());
5370
5371 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5372 // If we have 0 || X, just emit X without inserting the control flow.
5373 bool LHSCondVal;
5374 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5375 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5376 CGF.incrementProfileCounter(S: E);
5377
5378 // If the top of the logical operator nest, reset the MCDC temp to 0.
5379 if (CGF.MCDCLogOpStack.empty())
5380 CGF.maybeResetMCDCCondBitmap(E);
5381
5382 CGF.MCDCLogOpStack.push_back(Elt: E);
5383
5384 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5385
5386 // If we're generating for profiling or coverage, generate a branch to a
5387 // block that increments the RHS counter need to track branch condition
5388 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5389 // "FalseBlock" after the increment is done.
5390 if (InstrumentRegions &&
5391 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5392 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5393 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "lor.end");
5394 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5395 Builder.CreateCondBr(Cond: RHSCond, True: FBlock, False: RHSBlockCnt);
5396 CGF.EmitBlock(BB: RHSBlockCnt);
5397 CGF.incrementProfileCounter(S: E->getRHS());
5398 CGF.EmitBranch(Block: FBlock);
5399 CGF.EmitBlock(BB: FBlock);
5400 } else
5401 CGF.markStmtMaybeUsed(S: E->getRHS());
5402
5403 CGF.MCDCLogOpStack.pop_back();
5404 // If the top of the logical operator nest, update the MCDC bitmap.
5405 if (CGF.MCDCLogOpStack.empty())
5406 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5407
5408 // ZExt result to int or bool.
5409 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "lor.ext");
5410 }
5411
5412 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5413 if (!CGF.ContainsLabel(S: E->getRHS())) {
5414 CGF.markStmtMaybeUsed(S: E->getRHS());
5415 return llvm::ConstantInt::get(Ty: ResTy, V: 1);
5416 }
5417 }
5418
5419 // If the top of the logical operator nest, reset the MCDC temp to 0.
5420 if (CGF.MCDCLogOpStack.empty())
5421 CGF.maybeResetMCDCCondBitmap(E);
5422
5423 CGF.MCDCLogOpStack.push_back(Elt: E);
5424
5425 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "lor.end");
5426 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "lor.rhs");
5427
5428 CodeGenFunction::ConditionalEvaluation eval(CGF);
5429
5430 // Branch on the LHS first. If it is true, go to the success (cont) block.
5431 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: ContBlock, FalseBlock: RHSBlock,
5432 TrueCount: CGF.getCurrentProfileCount() -
5433 CGF.getProfileCount(S: E->getRHS()));
5434
5435 // Any edges into the ContBlock are now from an (indeterminate number of)
5436 // edges from this first condition. All of these values will be true. Start
5437 // setting up the PHI node in the Cont Block for this.
5438 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5439 NameStr: "", InsertBefore: ContBlock);
5440 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5441 PI != PE; ++PI)
5442 PN->addIncoming(V: llvm::ConstantInt::getTrue(Context&: VMContext), BB: *PI);
5443
5444 eval.begin(CGF);
5445
5446 // Emit the RHS condition as a bool value.
5447 CGF.EmitBlock(BB: RHSBlock);
5448 CGF.incrementProfileCounter(S: E);
5449 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5450
5451 eval.end(CGF);
5452
5453 // Reaquire the RHS block, as there may be subblocks inserted.
5454 RHSBlock = Builder.GetInsertBlock();
5455
5456 // If we're generating for profiling or coverage, generate a branch on the
5457 // RHS to a block that increments the RHS true counter needed to track branch
5458 // condition coverage.
5459 if (InstrumentRegions &&
5460 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5461 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5462 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5463 Builder.CreateCondBr(Cond: RHSCond, True: ContBlock, False: RHSBlockCnt);
5464 CGF.EmitBlock(BB: RHSBlockCnt);
5465 CGF.incrementProfileCounter(S: E->getRHS());
5466 CGF.EmitBranch(Block: ContBlock);
5467 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5468 }
5469
5470 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5471 // into the phi node for the edge with the value of RHSCond.
5472 CGF.EmitBlock(BB: ContBlock);
5473 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5474
5475 CGF.MCDCLogOpStack.pop_back();
5476 // If the top of the logical operator nest, update the MCDC bitmap.
5477 if (CGF.MCDCLogOpStack.empty())
5478 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5479
5480 // ZExt result to int.
5481 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "lor.ext");
5482}
5483
5484Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5485 CGF.EmitIgnoredExpr(E: E->getLHS());
5486 CGF.EnsureInsertPoint();
5487 return Visit(E: E->getRHS());
5488}
5489
5490//===----------------------------------------------------------------------===//
5491// Other Operators
5492//===----------------------------------------------------------------------===//
5493
5494/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5495/// expression is cheap enough and side-effect-free enough to evaluate
5496/// unconditionally instead of conditionally. This is used to convert control
5497/// flow into selects in some cases.
5498static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
5499 CodeGenFunction &CGF) {
5500 // Anything that is an integer or floating point constant is fine.
5501 return E->IgnoreParens()->isEvaluatable(Ctx: CGF.getContext());
5502
5503 // Even non-volatile automatic variables can't be evaluated unconditionally.
5504 // Referencing a thread_local may cause non-trivial initialization work to
5505 // occur. If we're inside a lambda and one of the variables is from the scope
5506 // outside the lambda, that function may have returned already. Reading its
5507 // locals is a bad idea. Also, these reads may introduce races there didn't
5508 // exist in the source-level program.
5509}
5510
5511
5512Value *ScalarExprEmitter::
5513VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5514 TestAndClearIgnoreResultAssign();
5515
5516 // Bind the common expression if necessary.
5517 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5518
5519 Expr *condExpr = E->getCond();
5520 Expr *lhsExpr = E->getTrueExpr();
5521 Expr *rhsExpr = E->getFalseExpr();
5522
5523 // If the condition constant folds and can be elided, try to avoid emitting
5524 // the condition and the dead arm.
5525 bool CondExprBool;
5526 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
5527 Expr *live = lhsExpr, *dead = rhsExpr;
5528 if (!CondExprBool) std::swap(a&: live, b&: dead);
5529
5530 // If the dead side doesn't have labels we need, just emit the Live part.
5531 if (!CGF.ContainsLabel(S: dead)) {
5532 if (CondExprBool) {
5533 if (llvm::EnableSingleByteCoverage) {
5534 CGF.incrementProfileCounter(S: lhsExpr);
5535 CGF.incrementProfileCounter(S: rhsExpr);
5536 }
5537 CGF.incrementProfileCounter(S: E);
5538 }
5539 Value *Result = Visit(E: live);
5540 CGF.markStmtMaybeUsed(S: dead);
5541
5542 // If the live part is a throw expression, it acts like it has a void
5543 // type, so evaluating it returns a null Value*. However, a conditional
5544 // with non-void type must return a non-null Value*.
5545 if (!Result && !E->getType()->isVoidType())
5546 Result = llvm::UndefValue::get(T: CGF.ConvertType(T: E->getType()));
5547
5548 return Result;
5549 }
5550 }
5551
5552 // OpenCL: If the condition is a vector, we can treat this condition like
5553 // the select function.
5554 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5555 condExpr->getType()->isExtVectorType()) {
5556 CGF.incrementProfileCounter(S: E);
5557
5558 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5559 llvm::Value *LHS = Visit(E: lhsExpr);
5560 llvm::Value *RHS = Visit(E: rhsExpr);
5561
5562 llvm::Type *condType = ConvertType(T: condExpr->getType());
5563 auto *vecTy = cast<llvm::FixedVectorType>(Val: condType);
5564
5565 unsigned numElem = vecTy->getNumElements();
5566 llvm::Type *elemType = vecTy->getElementType();
5567
5568 llvm::Value *zeroVec = llvm::Constant::getNullValue(Ty: vecTy);
5569 llvm::Value *TestMSB = Builder.CreateICmpSLT(LHS: CondV, RHS: zeroVec);
5570 llvm::Value *tmp = Builder.CreateSExt(
5571 V: TestMSB, DestTy: llvm::FixedVectorType::get(ElementType: elemType, NumElts: numElem), Name: "sext");
5572 llvm::Value *tmp2 = Builder.CreateNot(V: tmp);
5573
5574 // Cast float to int to perform ANDs if necessary.
5575 llvm::Value *RHSTmp = RHS;
5576 llvm::Value *LHSTmp = LHS;
5577 bool wasCast = false;
5578 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(Val: RHS->getType());
5579 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5580 RHSTmp = Builder.CreateBitCast(V: RHS, DestTy: tmp2->getType());
5581 LHSTmp = Builder.CreateBitCast(V: LHS, DestTy: tmp->getType());
5582 wasCast = true;
5583 }
5584
5585 llvm::Value *tmp3 = Builder.CreateAnd(LHS: RHSTmp, RHS: tmp2);
5586 llvm::Value *tmp4 = Builder.CreateAnd(LHS: LHSTmp, RHS: tmp);
5587 llvm::Value *tmp5 = Builder.CreateOr(LHS: tmp3, RHS: tmp4, Name: "cond");
5588 if (wasCast)
5589 tmp5 = Builder.CreateBitCast(V: tmp5, DestTy: RHS->getType());
5590
5591 return tmp5;
5592 }
5593
5594 if (condExpr->getType()->isVectorType() ||
5595 condExpr->getType()->isSveVLSBuiltinType()) {
5596 CGF.incrementProfileCounter(S: E);
5597
5598 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5599 llvm::Value *LHS = Visit(E: lhsExpr);
5600 llvm::Value *RHS = Visit(E: rhsExpr);
5601
5602 llvm::Type *CondType = ConvertType(T: condExpr->getType());
5603 auto *VecTy = cast<llvm::VectorType>(Val: CondType);
5604 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: VecTy);
5605
5606 CondV = Builder.CreateICmpNE(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
5607 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
5608 }
5609
5610 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5611 // select instead of as control flow. We can only do this if it is cheap and
5612 // safe to evaluate the LHS and RHS unconditionally.
5613 if (isCheapEnoughToEvaluateUnconditionally(E: lhsExpr, CGF) &&
5614 isCheapEnoughToEvaluateUnconditionally(E: rhsExpr, CGF)) {
5615 llvm::Value *CondV = CGF.EvaluateExprAsBool(E: condExpr);
5616 llvm::Value *StepV = Builder.CreateZExtOrBitCast(V: CondV, DestTy: CGF.Int64Ty);
5617
5618 if (llvm::EnableSingleByteCoverage) {
5619 CGF.incrementProfileCounter(S: lhsExpr);
5620 CGF.incrementProfileCounter(S: rhsExpr);
5621 CGF.incrementProfileCounter(S: E);
5622 } else
5623 CGF.incrementProfileCounter(S: E, StepV);
5624
5625 llvm::Value *LHS = Visit(E: lhsExpr);
5626 llvm::Value *RHS = Visit(E: rhsExpr);
5627 if (!LHS) {
5628 // If the conditional has void type, make sure we return a null Value*.
5629 assert(!RHS && "LHS and RHS types must match");
5630 return nullptr;
5631 }
5632 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "cond");
5633 }
5634
5635 // If the top of the logical operator nest, reset the MCDC temp to 0.
5636 if (CGF.MCDCLogOpStack.empty())
5637 CGF.maybeResetMCDCCondBitmap(E: condExpr);
5638
5639 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
5640 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
5641 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
5642
5643 CodeGenFunction::ConditionalEvaluation eval(CGF);
5644 CGF.EmitBranchOnBoolExpr(Cond: condExpr, TrueBlock: LHSBlock, FalseBlock: RHSBlock,
5645 TrueCount: CGF.getProfileCount(S: lhsExpr));
5646
5647 CGF.EmitBlock(BB: LHSBlock);
5648
5649 // If the top of the logical operator nest, update the MCDC bitmap for the
5650 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5651 // may also contain a boolean expression.
5652 if (CGF.MCDCLogOpStack.empty())
5653 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5654
5655 if (llvm::EnableSingleByteCoverage)
5656 CGF.incrementProfileCounter(S: lhsExpr);
5657 else
5658 CGF.incrementProfileCounter(S: E);
5659
5660 eval.begin(CGF);
5661 Value *LHS = Visit(E: lhsExpr);
5662 eval.end(CGF);
5663
5664 LHSBlock = Builder.GetInsertBlock();
5665 Builder.CreateBr(Dest: ContBlock);
5666
5667 CGF.EmitBlock(BB: RHSBlock);
5668
5669 // If the top of the logical operator nest, update the MCDC bitmap for the
5670 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5671 // may also contain a boolean expression.
5672 if (CGF.MCDCLogOpStack.empty())
5673 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5674
5675 if (llvm::EnableSingleByteCoverage)
5676 CGF.incrementProfileCounter(S: rhsExpr);
5677
5678 eval.begin(CGF);
5679 Value *RHS = Visit(E: rhsExpr);
5680 eval.end(CGF);
5681
5682 RHSBlock = Builder.GetInsertBlock();
5683 CGF.EmitBlock(BB: ContBlock);
5684
5685 // If the LHS or RHS is a throw expression, it will be legitimately null.
5686 if (!LHS)
5687 return RHS;
5688 if (!RHS)
5689 return LHS;
5690
5691 // Create a PHI node for the real part.
5692 llvm::PHINode *PN = Builder.CreatePHI(Ty: LHS->getType(), NumReservedValues: 2, Name: "cond");
5693 PN->addIncoming(V: LHS, BB: LHSBlock);
5694 PN->addIncoming(V: RHS, BB: RHSBlock);
5695
5696 // When single byte coverage mode is enabled, add a counter to continuation
5697 // block.
5698 if (llvm::EnableSingleByteCoverage)
5699 CGF.incrementProfileCounter(S: E);
5700
5701 return PN;
5702}
5703
5704Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5705 return Visit(E: E->getChosenSubExpr());
5706}
5707
5708Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5709 Address ArgValue = Address::invalid();
5710 RValue ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
5711
5712 return ArgPtr.getScalarVal();
5713}
5714
5715Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5716 return CGF.EmitBlockLiteral(block);
5717}
5718
5719// Convert a vec3 to vec4, or vice versa.
5720static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
5721 Value *Src, unsigned NumElementsDst) {
5722 static constexpr int Mask[] = {0, 1, 2, -1};
5723 return Builder.CreateShuffleVector(V: Src, Mask: llvm::ArrayRef(Mask, NumElementsDst));
5724}
5725
5726// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5727// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5728// but could be scalar or vectors of different lengths, and either can be
5729// pointer.
5730// There are 4 cases:
5731// 1. non-pointer -> non-pointer : needs 1 bitcast
5732// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5733// 3. pointer -> non-pointer
5734// a) pointer -> intptr_t : needs 1 ptrtoint
5735// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5736// 4. non-pointer -> pointer
5737// a) intptr_t -> pointer : needs 1 inttoptr
5738// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5739// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5740// allow casting directly between pointer types and non-integer non-pointer
5741// types.
5742static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
5743 const llvm::DataLayout &DL,
5744 Value *Src, llvm::Type *DstTy,
5745 StringRef Name = "") {
5746 auto SrcTy = Src->getType();
5747
5748 // Case 1.
5749 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5750 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name);
5751
5752 // Case 2.
5753 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5754 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: Src, DestTy: DstTy, Name);
5755
5756 // Case 3.
5757 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5758 // Case 3b.
5759 if (!DstTy->isIntegerTy())
5760 Src = Builder.CreatePtrToInt(V: Src, DestTy: DL.getIntPtrType(SrcTy));
5761 // Cases 3a and 3b.
5762 return Builder.CreateBitOrPointerCast(V: Src, DestTy: DstTy, Name);
5763 }
5764
5765 // Case 4b.
5766 if (!SrcTy->isIntegerTy())
5767 Src = Builder.CreateBitCast(V: Src, DestTy: DL.getIntPtrType(DstTy));
5768 // Cases 4a and 4b.
5769 return Builder.CreateIntToPtr(V: Src, DestTy: DstTy, Name);
5770}
5771
5772Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5773 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
5774 llvm::Type *DstTy = ConvertType(T: E->getType());
5775
5776 llvm::Type *SrcTy = Src->getType();
5777 unsigned NumElementsSrc =
5778 isa<llvm::VectorType>(Val: SrcTy)
5779 ? cast<llvm::FixedVectorType>(Val: SrcTy)->getNumElements()
5780 : 0;
5781 unsigned NumElementsDst =
5782 isa<llvm::VectorType>(Val: DstTy)
5783 ? cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements()
5784 : 0;
5785
5786 // Use bit vector expansion for ext_vector_type boolean vectors.
5787 if (E->getType()->isExtVectorBoolType())
5788 return CGF.emitBoolVecConversion(SrcVec: Src, NumElementsDst, Name: "astype");
5789
5790 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5791 // vector to get a vec4, then a bitcast if the target type is different.
5792 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5793 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 4);
5794 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5795 DstTy);
5796
5797 Src->setName("astype");
5798 return Src;
5799 }
5800
5801 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5802 // to vec4 if the original type is not vec4, then a shuffle vector to
5803 // get a vec3.
5804 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5805 auto *Vec4Ty = llvm::FixedVectorType::get(
5806 ElementType: cast<llvm::VectorType>(Val: DstTy)->getElementType(), NumElts: 4);
5807 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5808 DstTy: Vec4Ty);
5809
5810 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 3);
5811 Src->setName("astype");
5812 return Src;
5813 }
5814
5815 return createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(),
5816 Src, DstTy, Name: "astype");
5817}
5818
5819Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5820 return CGF.EmitAtomicExpr(E).getScalarVal();
5821}
5822
5823//===----------------------------------------------------------------------===//
5824// Entry Point into this File
5825//===----------------------------------------------------------------------===//
5826
5827/// Emit the computation of the specified expression of scalar type, ignoring
5828/// the result.
5829Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5830 assert(E && hasScalarEvaluationKind(E->getType()) &&
5831 "Invalid scalar expression to emit");
5832
5833 return ScalarExprEmitter(*this, IgnoreResultAssign)
5834 .Visit(E: const_cast<Expr *>(E));
5835}
5836
5837/// Emit a conversion from the specified type to the specified destination type,
5838/// both of which are LLVM scalar types.
5839Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
5840 QualType DstTy,
5841 SourceLocation Loc) {
5842 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5843 "Invalid scalar expression to emit");
5844 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcType: SrcTy, DstType: DstTy, Loc);
5845}
5846
5847/// Emit a conversion from the specified complex type to the specified
5848/// destination type, where the destination type is an LLVM scalar type.
5849Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
5850 QualType SrcTy,
5851 QualType DstTy,
5852 SourceLocation Loc) {
5853 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5854 "Invalid complex -> scalar conversion");
5855 return ScalarExprEmitter(*this)
5856 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5857}
5858
5859
5860Value *
5861CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
5862 QualType PromotionType) {
5863 if (!PromotionType.isNull())
5864 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5865 else
5866 return ScalarExprEmitter(*this).Visit(E: const_cast<Expr *>(E));
5867}
5868
5869
5870llvm::Value *CodeGenFunction::
5871EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
5872 bool isInc, bool isPre) {
5873 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5874}
5875
5876LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
5877 // object->isa or (*object).isa
5878 // Generate code as for: *(Class*)object
5879
5880 Expr *BaseExpr = E->getBase();
5881 Address Addr = Address::invalid();
5882 if (BaseExpr->isPRValue()) {
5883 llvm::Type *BaseTy =
5884 ConvertTypeForMem(T: BaseExpr->getType()->getPointeeType());
5885 Addr = Address(EmitScalarExpr(E: BaseExpr), BaseTy, getPointerAlign());
5886 } else {
5887 Addr = EmitLValue(E: BaseExpr).getAddress();
5888 }
5889
5890 // Cast the address to Class*.
5891 Addr = Addr.withElementType(ElemTy: ConvertType(T: E->getType()));
5892 return MakeAddrLValue(Addr, T: E->getType());
5893}
5894
5895
5896LValue CodeGenFunction::EmitCompoundAssignmentLValue(
5897 const CompoundAssignOperator *E) {
5898 ApplyAtomGroup Grp(getDebugInfo());
5899 ScalarExprEmitter Scalar(*this);
5900 Value *Result = nullptr;
5901 switch (E->getOpcode()) {
5902#define COMPOUND_OP(Op) \
5903 case BO_##Op##Assign: \
5904 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5905 Result)
5906 COMPOUND_OP(Mul);
5907 COMPOUND_OP(Div);
5908 COMPOUND_OP(Rem);
5909 COMPOUND_OP(Add);
5910 COMPOUND_OP(Sub);
5911 COMPOUND_OP(Shl);
5912 COMPOUND_OP(Shr);
5913 COMPOUND_OP(And);
5914 COMPOUND_OP(Xor);
5915 COMPOUND_OP(Or);
5916#undef COMPOUND_OP
5917
5918 case BO_PtrMemD:
5919 case BO_PtrMemI:
5920 case BO_Mul:
5921 case BO_Div:
5922 case BO_Rem:
5923 case BO_Add:
5924 case BO_Sub:
5925 case BO_Shl:
5926 case BO_Shr:
5927 case BO_LT:
5928 case BO_GT:
5929 case BO_LE:
5930 case BO_GE:
5931 case BO_EQ:
5932 case BO_NE:
5933 case BO_Cmp:
5934 case BO_And:
5935 case BO_Xor:
5936 case BO_Or:
5937 case BO_LAnd:
5938 case BO_LOr:
5939 case BO_Assign:
5940 case BO_Comma:
5941 llvm_unreachable("Not valid compound assignment operators");
5942 }
5943
5944 llvm_unreachable("Unhandled compound assignment operator");
5945}
5946
5947struct GEPOffsetAndOverflow {
5948 // The total (signed) byte offset for the GEP.
5949 llvm::Value *TotalOffset;
5950 // The offset overflow flag - true if the total offset overflows.
5951 llvm::Value *OffsetOverflows;
5952};
5953
5954/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5955/// and compute the total offset it applies from it's base pointer BasePtr.
5956/// Returns offset in bytes and a boolean flag whether an overflow happened
5957/// during evaluation.
5958static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
5959 llvm::LLVMContext &VMContext,
5960 CodeGenModule &CGM,
5961 CGBuilderTy &Builder) {
5962 const auto &DL = CGM.getDataLayout();
5963
5964 // The total (signed) byte offset for the GEP.
5965 llvm::Value *TotalOffset = nullptr;
5966
5967 // Was the GEP already reduced to a constant?
5968 if (isa<llvm::Constant>(Val: GEPVal)) {
5969 // Compute the offset by casting both pointers to integers and subtracting:
5970 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5971 Value *BasePtr_int =
5972 Builder.CreatePtrToInt(V: BasePtr, DestTy: DL.getIntPtrType(BasePtr->getType()));
5973 Value *GEPVal_int =
5974 Builder.CreatePtrToInt(V: GEPVal, DestTy: DL.getIntPtrType(GEPVal->getType()));
5975 TotalOffset = Builder.CreateSub(LHS: GEPVal_int, RHS: BasePtr_int);
5976 return {.TotalOffset: TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5977 }
5978
5979 auto *GEP = cast<llvm::GEPOperator>(Val: GEPVal);
5980 assert(GEP->getPointerOperand() == BasePtr &&
5981 "BasePtr must be the base of the GEP.");
5982 assert(GEP->isInBounds() && "Expected inbounds GEP");
5983
5984 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5985
5986 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5987 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5988 auto *SAddIntrinsic =
5989 CGM.getIntrinsic(IID: llvm::Intrinsic::sadd_with_overflow, Tys: IntPtrTy);
5990 auto *SMulIntrinsic =
5991 CGM.getIntrinsic(IID: llvm::Intrinsic::smul_with_overflow, Tys: IntPtrTy);
5992
5993 // The offset overflow flag - true if the total offset overflows.
5994 llvm::Value *OffsetOverflows = Builder.getFalse();
5995
5996 /// Return the result of the given binary operation.
5997 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5998 llvm::Value *RHS) -> llvm::Value * {
5999 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6000
6001 // If the operands are constants, return a constant result.
6002 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS)) {
6003 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS)) {
6004 llvm::APInt N;
6005 bool HasOverflow = mayHaveIntegerOverflow(LHS: LHSCI, RHS: RHSCI, Opcode,
6006 /*Signed=*/true, Result&: N);
6007 if (HasOverflow)
6008 OffsetOverflows = Builder.getTrue();
6009 return llvm::ConstantInt::get(Context&: VMContext, V: N);
6010 }
6011 }
6012
6013 // Otherwise, compute the result with checked arithmetic.
6014 auto *ResultAndOverflow = Builder.CreateCall(
6015 Callee: (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, Args: {LHS, RHS});
6016 OffsetOverflows = Builder.CreateOr(
6017 LHS: Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1), RHS: OffsetOverflows);
6018 return Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0);
6019 };
6020
6021 // Determine the total byte offset by looking at each GEP operand.
6022 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6023 GTI != GTE; ++GTI) {
6024 llvm::Value *LocalOffset;
6025 auto *Index = GTI.getOperand();
6026 // Compute the local offset contributed by this indexing step:
6027 if (auto *STy = GTI.getStructTypeOrNull()) {
6028 // For struct indexing, the local offset is the byte position of the
6029 // specified field.
6030 unsigned FieldNo = cast<llvm::ConstantInt>(Val: Index)->getZExtValue();
6031 LocalOffset = llvm::ConstantInt::get(
6032 Ty: IntPtrTy, V: DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo));
6033 } else {
6034 // Otherwise this is array-like indexing. The local offset is the index
6035 // multiplied by the element size.
6036 auto *ElementSize =
6037 llvm::ConstantInt::get(Ty: IntPtrTy, V: GTI.getSequentialElementStride(DL));
6038 auto *IndexS = Builder.CreateIntCast(V: Index, DestTy: IntPtrTy, /*isSigned=*/true);
6039 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6040 }
6041
6042 // If this is the first offset, set it as the total offset. Otherwise, add
6043 // the local offset into the running total.
6044 if (!TotalOffset || TotalOffset == Zero)
6045 TotalOffset = LocalOffset;
6046 else
6047 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6048 }
6049
6050 return {.TotalOffset: TotalOffset, .OffsetOverflows: OffsetOverflows};
6051}
6052
6053Value *
6054CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6055 ArrayRef<Value *> IdxList,
6056 bool SignedIndices, bool IsSubtraction,
6057 SourceLocation Loc, const Twine &Name) {
6058 llvm::Type *PtrTy = Ptr->getType();
6059
6060 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6061 if (!SignedIndices && !IsSubtraction)
6062 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6063
6064 Value *GEPVal = Builder.CreateGEP(Ty: ElemTy, Ptr, IdxList, Name, NW: NWFlags);
6065
6066 // If the pointer overflow sanitizer isn't enabled, do nothing.
6067 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
6068 return GEPVal;
6069
6070 // Perform nullptr-and-offset check unless the nullptr is defined.
6071 bool PerformNullCheck = !NullPointerIsDefined(
6072 F: Builder.GetInsertBlock()->getParent(), AS: PtrTy->getPointerAddressSpace());
6073 // Check for overflows unless the GEP got constant-folded,
6074 // and only in the default address space
6075 bool PerformOverflowCheck =
6076 !isa<llvm::Constant>(Val: GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6077
6078 if (!(PerformNullCheck || PerformOverflowCheck))
6079 return GEPVal;
6080
6081 const auto &DL = CGM.getDataLayout();
6082
6083 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6084 auto CheckHandler = SanitizerHandler::PointerOverflow;
6085 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6086 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6087
6088 GEPOffsetAndOverflow EvaluatedGEP =
6089 EmitGEPOffsetInBytes(BasePtr: Ptr, GEPVal, VMContext&: getLLVMContext(), CGM, Builder);
6090
6091 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6092 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6093 "If the offset got constant-folded, we don't expect that there was an "
6094 "overflow.");
6095
6096 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
6097
6098 // Common case: if the total offset is zero, don't emit a check.
6099 if (EvaluatedGEP.TotalOffset == Zero)
6100 return GEPVal;
6101
6102 // Now that we've computed the total offset, add it to the base pointer (with
6103 // wrapping semantics).
6104 auto *IntPtr = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
6105 auto *ComputedGEP = Builder.CreateAdd(LHS: IntPtr, RHS: EvaluatedGEP.TotalOffset);
6106
6107 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6108 2>
6109 Checks;
6110
6111 if (PerformNullCheck) {
6112 // If the base pointer evaluates to a null pointer value,
6113 // the only valid pointer this inbounds GEP can produce is also
6114 // a null pointer, so the offset must also evaluate to zero.
6115 // Likewise, if we have non-zero base pointer, we can not get null pointer
6116 // as a result, so the offset can not be -intptr_t(BasePtr).
6117 // In other words, both pointers are either null, or both are non-null,
6118 // or the behaviour is undefined.
6119 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Arg: Ptr);
6120 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(Arg: ComputedGEP);
6121 auto *Valid = Builder.CreateICmpEQ(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr);
6122 Checks.emplace_back(Args&: Valid, Args&: CheckOrdinal);
6123 }
6124
6125 if (PerformOverflowCheck) {
6126 // The GEP is valid if:
6127 // 1) The total offset doesn't overflow, and
6128 // 2) The sign of the difference between the computed address and the base
6129 // pointer matches the sign of the total offset.
6130 llvm::Value *ValidGEP;
6131 auto *NoOffsetOverflow = Builder.CreateNot(V: EvaluatedGEP.OffsetOverflows);
6132 if (SignedIndices) {
6133 // GEP is computed as `unsigned base + signed offset`, therefore:
6134 // * If offset was positive, then the computed pointer can not be
6135 // [unsigned] less than the base pointer, unless it overflowed.
6136 // * If offset was negative, then the computed pointer can not be
6137 // [unsigned] greater than the bas pointere, unless it overflowed.
6138 auto *PosOrZeroValid = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6139 auto *PosOrZeroOffset =
6140 Builder.CreateICmpSGE(LHS: EvaluatedGEP.TotalOffset, RHS: Zero);
6141 llvm::Value *NegValid = Builder.CreateICmpULT(LHS: ComputedGEP, RHS: IntPtr);
6142 ValidGEP =
6143 Builder.CreateSelect(C: PosOrZeroOffset, True: PosOrZeroValid, False: NegValid);
6144 } else if (!IsSubtraction) {
6145 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6146 // computed pointer can not be [unsigned] less than base pointer,
6147 // unless there was an overflow.
6148 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6149 ValidGEP = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6150 } else {
6151 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6152 // computed pointer can not be [unsigned] greater than base pointer,
6153 // unless there was an overflow.
6154 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6155 ValidGEP = Builder.CreateICmpULE(LHS: ComputedGEP, RHS: IntPtr);
6156 }
6157 ValidGEP = Builder.CreateAnd(LHS: ValidGEP, RHS: NoOffsetOverflow);
6158 Checks.emplace_back(Args&: ValidGEP, Args&: CheckOrdinal);
6159 }
6160
6161 assert(!Checks.empty() && "Should have produced some checks.");
6162
6163 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6164 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6165 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6166 EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs);
6167
6168 return GEPVal;
6169}
6170
6171Address CodeGenFunction::EmitCheckedInBoundsGEP(
6172 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6173 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6174 const Twine &Name) {
6175 if (!SanOpts.has(K: SanitizerKind::PointerOverflow)) {
6176 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6177 if (!SignedIndices && !IsSubtraction)
6178 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6179
6180 return Builder.CreateGEP(Addr, IdxList, ElementType: elementType, Align, Name, NW: NWFlags);
6181 }
6182
6183 return RawAddress(
6184 EmitCheckedInBoundsGEP(ElemTy: Addr.getElementType(), Ptr: Addr.emitRawPointer(CGF&: *this),
6185 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6186 elementType, Align);
6187}
6188

source code of clang/lib/CodeGen/CGExprScalar.cpp