1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/Attr.h"
26#include "clang/AST/DeclObjC.h"
27#include "clang/AST/Expr.h"
28#include "clang/AST/ParentMapContext.h"
29#include "clang/AST/RecordLayout.h"
30#include "clang/AST/StmtVisitor.h"
31#include "clang/Basic/CodeGenOptions.h"
32#include "clang/Basic/TargetInfo.h"
33#include "llvm/ADT/APFixedPoint.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/CFG.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/DerivedTypes.h"
39#include "llvm/IR/FixedPointBuilder.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/GEPNoWrapFlags.h"
42#include "llvm/IR/GetElementPtrTypeIterator.h"
43#include "llvm/IR/GlobalVariable.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/IntrinsicsPowerPC.h"
46#include "llvm/IR/MatrixBuilder.h"
47#include "llvm/IR/Module.h"
48#include "llvm/Support/TypeSize.h"
49#include <cstdarg>
50#include <optional>
51
52using namespace clang;
53using namespace CodeGen;
54using llvm::Value;
55
56//===----------------------------------------------------------------------===//
57// Scalar Expression Emitter
58//===----------------------------------------------------------------------===//
59
60namespace llvm {
61extern cl::opt<bool> EnableSingleByteCoverage;
62} // namespace llvm
63
64namespace {
65
66/// Determine whether the given binary operation may overflow.
67/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
68/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
69/// the returned overflow check is precise. The returned value is 'true' for
70/// all other opcodes, to be conservative.
71bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
72 BinaryOperator::Opcode Opcode, bool Signed,
73 llvm::APInt &Result) {
74 // Assume overflow is possible, unless we can prove otherwise.
75 bool Overflow = true;
76 const auto &LHSAP = LHS->getValue();
77 const auto &RHSAP = RHS->getValue();
78 if (Opcode == BO_Add) {
79 Result = Signed ? LHSAP.sadd_ov(RHS: RHSAP, Overflow)
80 : LHSAP.uadd_ov(RHS: RHSAP, Overflow);
81 } else if (Opcode == BO_Sub) {
82 Result = Signed ? LHSAP.ssub_ov(RHS: RHSAP, Overflow)
83 : LHSAP.usub_ov(RHS: RHSAP, Overflow);
84 } else if (Opcode == BO_Mul) {
85 Result = Signed ? LHSAP.smul_ov(RHS: RHSAP, Overflow)
86 : LHSAP.umul_ov(RHS: RHSAP, Overflow);
87 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
88 if (Signed && !RHS->isZero())
89 Result = LHSAP.sdiv_ov(RHS: RHSAP, Overflow);
90 else
91 return false;
92 }
93 return Overflow;
94}
95
96struct BinOpInfo {
97 Value *LHS;
98 Value *RHS;
99 QualType Ty; // Computation Type.
100 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
101 FPOptions FPFeatures;
102 const Expr *E; // Entire expr, for error unsupported. May not be binop.
103
104 /// Check if the binop can result in integer overflow.
105 bool mayHaveIntegerOverflow() const {
106 // Without constant input, we can't rule out overflow.
107 auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS);
108 auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS);
109 if (!LHSCI || !RHSCI)
110 return true;
111
112 llvm::APInt Result;
113 return ::mayHaveIntegerOverflow(
114 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
115 }
116
117 /// Check if the binop computes a division or a remainder.
118 bool isDivremOp() const {
119 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
120 Opcode == BO_RemAssign;
121 }
122
123 /// Check if the binop can result in an integer division by zero.
124 bool mayHaveIntegerDivisionByZero() const {
125 if (isDivremOp())
126 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: RHS))
127 return CI->isZero();
128 return true;
129 }
130
131 /// Check if the binop can result in a float division by zero.
132 bool mayHaveFloatDivisionByZero() const {
133 if (isDivremOp())
134 if (auto *CFP = dyn_cast<llvm::ConstantFP>(Val: RHS))
135 return CFP->isZero();
136 return true;
137 }
138
139 /// Check if at least one operand is a fixed point type. In such cases, this
140 /// operation did not follow usual arithmetic conversion and both operands
141 /// might not be of the same type.
142 bool isFixedPointOp() const {
143 // We cannot simply check the result type since comparison operations return
144 // an int.
145 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
146 QualType LHSType = BinOp->getLHS()->getType();
147 QualType RHSType = BinOp->getRHS()->getType();
148 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
149 }
150 if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: E))
151 return UnOp->getSubExpr()->getType()->isFixedPointType();
152 return false;
153 }
154
155 /// Check if the RHS has a signed integer representation.
156 bool rhsHasSignedIntegerRepresentation() const {
157 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
158 QualType RHSType = BinOp->getRHS()->getType();
159 return RHSType->hasSignedIntegerRepresentation();
160 }
161 return false;
162 }
163};
164
165static bool MustVisitNullValue(const Expr *E) {
166 // If a null pointer expression's type is the C++0x nullptr_t, then
167 // it's not necessarily a simple constant and it must be evaluated
168 // for its potential side effects.
169 return E->getType()->isNullPtrType();
170}
171
172/// If \p E is a widened promoted integer, get its base (unpromoted) type.
173static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
174 const Expr *E) {
175 const Expr *Base = E->IgnoreImpCasts();
176 if (E == Base)
177 return std::nullopt;
178
179 QualType BaseTy = Base->getType();
180 if (!Ctx.isPromotableIntegerType(T: BaseTy) ||
181 Ctx.getTypeSize(T: BaseTy) >= Ctx.getTypeSize(T: E->getType()))
182 return std::nullopt;
183
184 return BaseTy;
185}
186
187/// Check if \p E is a widened promoted integer.
188static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
189 return getUnwidenedIntegerType(Ctx, E).has_value();
190}
191
192/// Check if we can skip the overflow check for \p Op.
193static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
194 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
195 "Expected a unary or binary operator");
196
197 // If the binop has constant inputs and we can prove there is no overflow,
198 // we can elide the overflow check.
199 if (!Op.mayHaveIntegerOverflow())
200 return true;
201
202 if (Op.Ty->isSignedIntegerType() &&
203 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::SignedIntegerOverflow,
204 Ty: Op.Ty)) {
205 return true;
206 }
207
208 if (Op.Ty->isUnsignedIntegerType() &&
209 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::UnsignedIntegerOverflow,
210 Ty: Op.Ty)) {
211 return true;
212 }
213
214 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Op.E);
215
216 if (UO && UO->getOpcode() == UO_Minus &&
217 Ctx.getLangOpts().isOverflowPatternExcluded(
218 Kind: LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) &&
219 UO->isIntegerConstantExpr(Ctx))
220 return true;
221
222 // If a unary op has a widened operand, the op cannot overflow.
223 if (UO)
224 return !UO->canOverflow();
225
226 // We usually don't need overflow checks for binops with widened operands.
227 // Multiplication with promoted unsigned operands is a special case.
228 const auto *BO = cast<BinaryOperator>(Val: Op.E);
229 if (BO->hasExcludedOverflowPattern())
230 return true;
231
232 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
233 if (!OptionalLHSTy)
234 return false;
235
236 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
237 if (!OptionalRHSTy)
238 return false;
239
240 QualType LHSTy = *OptionalLHSTy;
241 QualType RHSTy = *OptionalRHSTy;
242
243 // This is the simple case: binops without unsigned multiplication, and with
244 // widened operands. No overflow check is needed here.
245 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
246 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
247 return true;
248
249 // For unsigned multiplication the overflow check can be elided if either one
250 // of the unpromoted types are less than half the size of the promoted type.
251 unsigned PromotedSize = Ctx.getTypeSize(T: Op.E->getType());
252 return (2 * Ctx.getTypeSize(T: LHSTy)) < PromotedSize ||
253 (2 * Ctx.getTypeSize(T: RHSTy)) < PromotedSize;
254}
255
256class ScalarExprEmitter
257 : public StmtVisitor<ScalarExprEmitter, Value*> {
258 CodeGenFunction &CGF;
259 CGBuilderTy &Builder;
260 bool IgnoreResultAssign;
261 llvm::LLVMContext &VMContext;
262public:
263
264 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
265 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
266 VMContext(cgf.getLLVMContext()) {
267 }
268
269 //===--------------------------------------------------------------------===//
270 // Utilities
271 //===--------------------------------------------------------------------===//
272
273 bool TestAndClearIgnoreResultAssign() {
274 bool I = IgnoreResultAssign;
275 IgnoreResultAssign = false;
276 return I;
277 }
278
279 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
280 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
281 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
282 return CGF.EmitCheckedLValue(E, TCK);
283 }
284
285 void EmitBinOpCheck(
286 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
287 const BinOpInfo &Info);
288
289 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
290 return CGF.EmitLoadOfLValue(V: LV, Loc).getScalarVal();
291 }
292
293 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
294 const AlignValueAttr *AVAttr = nullptr;
295 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
296 const ValueDecl *VD = DRE->getDecl();
297
298 if (VD->getType()->isReferenceType()) {
299 if (const auto *TTy =
300 VD->getType().getNonReferenceType()->getAs<TypedefType>())
301 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
302 } else {
303 // Assumptions for function parameters are emitted at the start of the
304 // function, so there is no need to repeat that here,
305 // unless the alignment-assumption sanitizer is enabled,
306 // then we prefer the assumption over alignment attribute
307 // on IR function param.
308 if (isa<ParmVarDecl>(Val: VD) && !CGF.SanOpts.has(K: SanitizerKind::Alignment))
309 return;
310
311 AVAttr = VD->getAttr<AlignValueAttr>();
312 }
313 }
314
315 if (!AVAttr)
316 if (const auto *TTy = E->getType()->getAs<TypedefType>())
317 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
318
319 if (!AVAttr)
320 return;
321
322 Value *AlignmentValue = CGF.EmitScalarExpr(E: AVAttr->getAlignment());
323 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Val: AlignmentValue);
324 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
325 }
326
327 /// EmitLoadOfLValue - Given an expression with complex type that represents a
328 /// value l-value, this method emits the address of the l-value, then loads
329 /// and returns the result.
330 Value *EmitLoadOfLValue(const Expr *E) {
331 Value *V = EmitLoadOfLValue(LV: EmitCheckedLValue(E, TCK: CodeGenFunction::TCK_Load),
332 Loc: E->getExprLoc());
333
334 EmitLValueAlignmentAssumption(E, V);
335 return V;
336 }
337
338 /// EmitConversionToBool - Convert the specified expression value to a
339 /// boolean (i1) truth value. This is equivalent to "Val != 0".
340 Value *EmitConversionToBool(Value *Src, QualType DstTy);
341
342 /// Emit a check that a conversion from a floating-point type does not
343 /// overflow.
344 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
345 Value *Src, QualType SrcType, QualType DstType,
346 llvm::Type *DstTy, SourceLocation Loc);
347
348 /// Known implicit conversion check kinds.
349 /// This is used for bitfield conversion checks as well.
350 /// Keep in sync with the enum of the same name in ubsan_handlers.h
351 enum ImplicitConversionCheckKind : unsigned char {
352 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
353 ICCK_UnsignedIntegerTruncation = 1,
354 ICCK_SignedIntegerTruncation = 2,
355 ICCK_IntegerSignChange = 3,
356 ICCK_SignedIntegerTruncationOrSignChange = 4,
357 };
358
359 /// Emit a check that an [implicit] truncation of an integer does not
360 /// discard any bits. It is not UB, so we use the value after truncation.
361 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
362 QualType DstType, SourceLocation Loc);
363
364 /// Emit a check that an [implicit] conversion of an integer does not change
365 /// the sign of the value. It is not UB, so we use the value after conversion.
366 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
367 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
368 QualType DstType, SourceLocation Loc);
369
370 /// Emit a conversion from the specified type to the specified destination
371 /// type, both of which are LLVM scalar types.
372 struct ScalarConversionOpts {
373 bool TreatBooleanAsSigned;
374 bool EmitImplicitIntegerTruncationChecks;
375 bool EmitImplicitIntegerSignChangeChecks;
376
377 ScalarConversionOpts()
378 : TreatBooleanAsSigned(false),
379 EmitImplicitIntegerTruncationChecks(false),
380 EmitImplicitIntegerSignChangeChecks(false) {}
381
382 ScalarConversionOpts(clang::SanitizerSet SanOpts)
383 : TreatBooleanAsSigned(false),
384 EmitImplicitIntegerTruncationChecks(
385 SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
386 EmitImplicitIntegerSignChangeChecks(
387 SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)) {}
388 };
389 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
390 llvm::Type *SrcTy, llvm::Type *DstTy,
391 ScalarConversionOpts Opts);
392 Value *
393 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
394 SourceLocation Loc,
395 ScalarConversionOpts Opts = ScalarConversionOpts());
396
397 /// Convert between either a fixed point and other fixed point or fixed point
398 /// and an integer.
399 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
400 SourceLocation Loc);
401
402 /// Emit a conversion from the specified complex type to the specified
403 /// destination type, where the destination type is an LLVM scalar type.
404 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
405 QualType SrcTy, QualType DstTy,
406 SourceLocation Loc);
407
408 /// EmitNullValue - Emit a value that corresponds to null for the given type.
409 Value *EmitNullValue(QualType Ty);
410
411 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
412 Value *EmitFloatToBoolConversion(Value *V) {
413 // Compare against 0.0 for fp scalars.
414 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: V->getType());
415 return Builder.CreateFCmpUNE(LHS: V, RHS: Zero, Name: "tobool");
416 }
417
418 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
419 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
420 Value *Zero = CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: V->getType()), QT);
421
422 return Builder.CreateICmpNE(LHS: V, RHS: Zero, Name: "tobool");
423 }
424
425 Value *EmitIntToBoolConversion(Value *V) {
426 // Because of the type rules of C, we often end up computing a
427 // logical value, then zero extending it to int, then wanting it
428 // as a logical value again. Optimize this common case.
429 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Val: V)) {
430 if (ZI->getOperand(i_nocapture: 0)->getType() == Builder.getInt1Ty()) {
431 Value *Result = ZI->getOperand(i_nocapture: 0);
432 // If there aren't any more uses, zap the instruction to save space.
433 // Note that there can be more uses, for example if this
434 // is the result of an assignment.
435 if (ZI->use_empty())
436 ZI->eraseFromParent();
437 return Result;
438 }
439 }
440
441 return Builder.CreateIsNotNull(Arg: V, Name: "tobool");
442 }
443
444 //===--------------------------------------------------------------------===//
445 // Visitor Methods
446 //===--------------------------------------------------------------------===//
447
448 Value *Visit(Expr *E) {
449 ApplyDebugLocation DL(CGF, E);
450 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
451 }
452
453 Value *VisitStmt(Stmt *S) {
454 S->dump(OS&: llvm::errs(), Context: CGF.getContext());
455 llvm_unreachable("Stmt can't have complex result type!");
456 }
457 Value *VisitExpr(Expr *S);
458
459 Value *VisitConstantExpr(ConstantExpr *E) {
460 // A constant expression of type 'void' generates no code and produces no
461 // value.
462 if (E->getType()->isVoidType())
463 return nullptr;
464
465 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
466 if (E->isGLValue())
467 return CGF.EmitLoadOfScalar(
468 Address(Result, CGF.convertTypeForLoadStore(ASTTy: E->getType()),
469 CGF.getContext().getTypeAlignInChars(E->getType())),
470 /*Volatile*/ false, E->getType(), E->getExprLoc());
471 return Result;
472 }
473 return Visit(E: E->getSubExpr());
474 }
475 Value *VisitParenExpr(ParenExpr *PE) {
476 return Visit(E: PE->getSubExpr());
477 }
478 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
479 return Visit(E: E->getReplacement());
480 }
481 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
482 return Visit(E: GE->getResultExpr());
483 }
484 Value *VisitCoawaitExpr(CoawaitExpr *S) {
485 return CGF.EmitCoawaitExpr(E: *S).getScalarVal();
486 }
487 Value *VisitCoyieldExpr(CoyieldExpr *S) {
488 return CGF.EmitCoyieldExpr(E: *S).getScalarVal();
489 }
490 Value *VisitUnaryCoawait(const UnaryOperator *E) {
491 return Visit(E: E->getSubExpr());
492 }
493
494 // Leaves.
495 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
496 return Builder.getInt(AI: E->getValue());
497 }
498 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
499 return Builder.getInt(AI: E->getValue());
500 }
501 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
502 return llvm::ConstantFP::get(Context&: VMContext, V: E->getValue());
503 }
504 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
505 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
506 }
507 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
508 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
509 }
510 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
511 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
512 }
513 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
514 if (E->getType()->isVoidType())
515 return nullptr;
516
517 return EmitNullValue(Ty: E->getType());
518 }
519 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
520 return EmitNullValue(Ty: E->getType());
521 }
522 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
523 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
524 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
525 llvm::Value *V = CGF.GetAddrOfLabel(L: E->getLabel());
526 return Builder.CreateBitCast(V, DestTy: ConvertType(T: E->getType()));
527 }
528
529 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
530 return llvm::ConstantInt::get(ConvertType(T: E->getType()),E->getPackLength());
531 }
532
533 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
534 return CGF.EmitPseudoObjectRValue(e: E).getScalarVal();
535 }
536
537 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
538 Value *VisitEmbedExpr(EmbedExpr *E);
539
540 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
541 if (E->isGLValue())
542 return EmitLoadOfLValue(LV: CGF.getOrCreateOpaqueLValueMapping(e: E),
543 Loc: E->getExprLoc());
544
545 // Otherwise, assume the mapping is the scalar directly.
546 return CGF.getOrCreateOpaqueRValueMapping(e: E).getScalarVal();
547 }
548
549 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
550 llvm_unreachable("Codegen for this isn't defined/implemented");
551 }
552
553 // l-values.
554 Value *VisitDeclRefExpr(DeclRefExpr *E) {
555 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(RefExpr: E))
556 return CGF.emitScalarConstant(Constant, E);
557 return EmitLoadOfLValue(E);
558 }
559
560 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
561 return CGF.EmitObjCSelectorExpr(E);
562 }
563 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
564 return CGF.EmitObjCProtocolExpr(E);
565 }
566 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
567 return EmitLoadOfLValue(E);
568 }
569 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
570 if (E->getMethodDecl() &&
571 E->getMethodDecl()->getReturnType()->isReferenceType())
572 return EmitLoadOfLValue(E);
573 return CGF.EmitObjCMessageExpr(E).getScalarVal();
574 }
575
576 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
577 LValue LV = CGF.EmitObjCIsaExpr(E);
578 Value *V = CGF.EmitLoadOfLValue(V: LV, Loc: E->getExprLoc()).getScalarVal();
579 return V;
580 }
581
582 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
583 VersionTuple Version = E->getVersion();
584
585 // If we're checking for a platform older than our minimum deployment
586 // target, we can fold the check away.
587 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
588 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: 1);
589
590 return CGF.EmitBuiltinAvailable(Version);
591 }
592
593 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
594 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
595 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
596 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
597 Value *VisitMemberExpr(MemberExpr *E);
598 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
599 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
600 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
601 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
602 // literals aren't l-values in C++. We do so simply because that's the
603 // cleanest way to handle compound literals in C++.
604 // See the discussion here: https://reviews.llvm.org/D64464
605 return EmitLoadOfLValue(E);
606 }
607
608 Value *VisitInitListExpr(InitListExpr *E);
609
610 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
611 assert(CGF.getArrayInitIndex() &&
612 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
613 return CGF.getArrayInitIndex();
614 }
615
616 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
617 return EmitNullValue(Ty: E->getType());
618 }
619 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
620 CGF.CGM.EmitExplicitCastExprType(E, CGF: &CGF);
621 return VisitCastExpr(E);
622 }
623 Value *VisitCastExpr(CastExpr *E);
624
625 Value *VisitCallExpr(const CallExpr *E) {
626 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType())
627 return EmitLoadOfLValue(E);
628
629 Value *V = CGF.EmitCallExpr(E).getScalarVal();
630
631 EmitLValueAlignmentAssumption(E, V);
632 return V;
633 }
634
635 Value *VisitStmtExpr(const StmtExpr *E);
636
637 // Unary Operators.
638 Value *VisitUnaryPostDec(const UnaryOperator *E) {
639 LValue LV = EmitLValue(E: E->getSubExpr());
640 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: false);
641 }
642 Value *VisitUnaryPostInc(const UnaryOperator *E) {
643 LValue LV = EmitLValue(E: E->getSubExpr());
644 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: false);
645 }
646 Value *VisitUnaryPreDec(const UnaryOperator *E) {
647 LValue LV = EmitLValue(E: E->getSubExpr());
648 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: true);
649 }
650 Value *VisitUnaryPreInc(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E: E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: true);
653 }
654
655 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
656 llvm::Value *InVal,
657 bool IsInc);
658
659 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
660 bool isInc, bool isPre);
661
662
663 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
664 if (isa<MemberPointerType>(E->getType())) // never sugared
665 return CGF.CGM.getMemberPointerConstant(e: E);
666
667 return EmitLValue(E: E->getSubExpr()).getPointer(CGF);
668 }
669 Value *VisitUnaryDeref(const UnaryOperator *E) {
670 if (E->getType()->isVoidType())
671 return Visit(E: E->getSubExpr()); // the actual value should be unused
672 return EmitLoadOfLValue(E);
673 }
674
675 Value *VisitUnaryPlus(const UnaryOperator *E,
676 QualType PromotionType = QualType());
677 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
678 Value *VisitUnaryMinus(const UnaryOperator *E,
679 QualType PromotionType = QualType());
680 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
681
682 Value *VisitUnaryNot (const UnaryOperator *E);
683 Value *VisitUnaryLNot (const UnaryOperator *E);
684 Value *VisitUnaryReal(const UnaryOperator *E,
685 QualType PromotionType = QualType());
686 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
687 Value *VisitUnaryImag(const UnaryOperator *E,
688 QualType PromotionType = QualType());
689 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
690 Value *VisitUnaryExtension(const UnaryOperator *E) {
691 return Visit(E: E->getSubExpr());
692 }
693
694 // C++
695 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
696 return EmitLoadOfLValue(E);
697 }
698 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
699 auto &Ctx = CGF.getContext();
700 APValue Evaluated =
701 SLE->EvaluateInContext(Ctx, DefaultExpr: CGF.CurSourceLocExprScope.getDefaultExpr());
702 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
703 SLE->getType());
704 }
705
706 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
707 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
708 return Visit(E: DAE->getExpr());
709 }
710 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
711 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
712 return Visit(E: DIE->getExpr());
713 }
714 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
715 return CGF.LoadCXXThis();
716 }
717
718 Value *VisitExprWithCleanups(ExprWithCleanups *E);
719 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
720 return CGF.EmitCXXNewExpr(E);
721 }
722 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
723 CGF.EmitCXXDeleteExpr(E);
724 return nullptr;
725 }
726
727 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
728 if (E->isStoredAsBoolean())
729 return llvm::ConstantInt::get(ConvertType(T: E->getType()),
730 E->getBoolValue());
731 assert(E->getAPValue().isInt() && "APValue type not supported");
732 return llvm::ConstantInt::get(ConvertType(T: E->getType()),
733 E->getAPValue().getInt());
734 }
735
736 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
737 return Builder.getInt1(V: E->isSatisfied());
738 }
739
740 Value *VisitRequiresExpr(const RequiresExpr *E) {
741 return Builder.getInt1(V: E->isSatisfied());
742 }
743
744 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
745 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
746 }
747
748 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
749 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: E->getValue());
750 }
751
752 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
753 // C++ [expr.pseudo]p1:
754 // The result shall only be used as the operand for the function call
755 // operator (), and the result of such a call has type void. The only
756 // effect is the evaluation of the postfix-expression before the dot or
757 // arrow.
758 CGF.EmitScalarExpr(E: E->getBase());
759 return nullptr;
760 }
761
762 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
763 return EmitNullValue(Ty: E->getType());
764 }
765
766 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
767 CGF.EmitCXXThrowExpr(E);
768 return nullptr;
769 }
770
771 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
772 return Builder.getInt1(V: E->getValue());
773 }
774
775 // Binary Operators.
776 Value *EmitMul(const BinOpInfo &Ops) {
777 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
778 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
779 case LangOptions::SOB_Defined:
780 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
781 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
782 [[fallthrough]];
783 case LangOptions::SOB_Undefined:
784 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
785 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
786 [[fallthrough]];
787 case LangOptions::SOB_Trapping:
788 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
789 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
790 return EmitOverflowCheckedBinOp(Ops);
791 }
792 }
793
794 if (Ops.Ty->isConstantMatrixType()) {
795 llvm::MatrixBuilder MB(Builder);
796 // We need to check the types of the operands of the operator to get the
797 // correct matrix dimensions.
798 auto *BO = cast<BinaryOperator>(Val: Ops.E);
799 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
800 Val: BO->getLHS()->getType().getCanonicalType());
801 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
802 Val: BO->getRHS()->getType().getCanonicalType());
803 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
804 if (LHSMatTy && RHSMatTy)
805 return MB.CreateMatrixMultiply(LHS: Ops.LHS, RHS: Ops.RHS, LHSRows: LHSMatTy->getNumRows(),
806 LHSColumns: LHSMatTy->getNumColumns(),
807 RHSColumns: RHSMatTy->getNumColumns());
808 return MB.CreateScalarMultiply(LHS: Ops.LHS, RHS: Ops.RHS);
809 }
810
811 if (Ops.Ty->isUnsignedIntegerType() &&
812 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
813 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
814 return EmitOverflowCheckedBinOp(Ops);
815
816 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
817 // Preserve the old values
818 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
819 return Builder.CreateFMul(L: Ops.LHS, R: Ops.RHS, Name: "mul");
820 }
821 if (Ops.isFixedPointOp())
822 return EmitFixedPointBinOp(Ops);
823 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
824 }
825 /// Create a binary op that checks for overflow.
826 /// Currently only supports +, - and *.
827 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
828
829 // Check for undefined division and modulus behaviors.
830 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
831 llvm::Value *Zero,bool isDiv);
832 // Common helper for getting how wide LHS of shift is.
833 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
834
835 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
836 // non powers of two.
837 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
838
839 Value *EmitDiv(const BinOpInfo &Ops);
840 Value *EmitRem(const BinOpInfo &Ops);
841 Value *EmitAdd(const BinOpInfo &Ops);
842 Value *EmitSub(const BinOpInfo &Ops);
843 Value *EmitShl(const BinOpInfo &Ops);
844 Value *EmitShr(const BinOpInfo &Ops);
845 Value *EmitAnd(const BinOpInfo &Ops) {
846 return Builder.CreateAnd(LHS: Ops.LHS, RHS: Ops.RHS, Name: "and");
847 }
848 Value *EmitXor(const BinOpInfo &Ops) {
849 return Builder.CreateXor(LHS: Ops.LHS, RHS: Ops.RHS, Name: "xor");
850 }
851 Value *EmitOr (const BinOpInfo &Ops) {
852 return Builder.CreateOr(LHS: Ops.LHS, RHS: Ops.RHS, Name: "or");
853 }
854
855 // Helper functions for fixed point binary operations.
856 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
857
858 BinOpInfo EmitBinOps(const BinaryOperator *E,
859 QualType PromotionTy = QualType());
860
861 Value *EmitPromotedValue(Value *result, QualType PromotionType);
862 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
863 Value *EmitPromoted(const Expr *E, QualType PromotionType);
864
865 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
866 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
867 Value *&Result);
868
869 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
870 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
871
872 QualType getPromotionType(QualType Ty) {
873 const auto &Ctx = CGF.getContext();
874 if (auto *CT = Ty->getAs<ComplexType>()) {
875 QualType ElementType = CT->getElementType();
876 if (ElementType.UseExcessPrecision(Ctx))
877 return Ctx.getComplexType(Ctx.FloatTy);
878 }
879
880 if (Ty.UseExcessPrecision(Ctx)) {
881 if (auto *VT = Ty->getAs<VectorType>()) {
882 unsigned NumElements = VT->getNumElements();
883 return Ctx.getVectorType(VectorType: Ctx.FloatTy, NumElts: NumElements, VecKind: VT->getVectorKind());
884 }
885 return Ctx.FloatTy;
886 }
887
888 return QualType();
889 }
890
891 // Binary operators and binary compound assignment operators.
892#define HANDLEBINOP(OP) \
893 Value *VisitBin##OP(const BinaryOperator *E) { \
894 QualType promotionTy = getPromotionType(E->getType()); \
895 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
896 if (result && !promotionTy.isNull()) \
897 result = EmitUnPromotedValue(result, E->getType()); \
898 return result; \
899 } \
900 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
901 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
902 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
903 }
904 HANDLEBINOP(Mul)
905 HANDLEBINOP(Div)
906 HANDLEBINOP(Rem)
907 HANDLEBINOP(Add)
908 HANDLEBINOP(Sub)
909 HANDLEBINOP(Shl)
910 HANDLEBINOP(Shr)
911 HANDLEBINOP(And)
912 HANDLEBINOP(Xor)
913 HANDLEBINOP(Or)
914#undef HANDLEBINOP
915
916 // Comparisons.
917 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
918 llvm::CmpInst::Predicate SICmpOpc,
919 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
920#define VISITCOMP(CODE, UI, SI, FP, SIG) \
921 Value *VisitBin##CODE(const BinaryOperator *E) { \
922 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
923 llvm::FCmpInst::FP, SIG); }
924 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
925 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
926 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
927 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
928 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
929 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
930#undef VISITCOMP
931
932 Value *VisitBinAssign (const BinaryOperator *E);
933
934 Value *VisitBinLAnd (const BinaryOperator *E);
935 Value *VisitBinLOr (const BinaryOperator *E);
936 Value *VisitBinComma (const BinaryOperator *E);
937
938 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
939 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
940
941 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
942 return Visit(E: E->getSemanticForm());
943 }
944
945 // Other Operators.
946 Value *VisitBlockExpr(const BlockExpr *BE);
947 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
948 Value *VisitChooseExpr(ChooseExpr *CE);
949 Value *VisitVAArgExpr(VAArgExpr *VE);
950 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
951 return CGF.EmitObjCStringLiteral(E);
952 }
953 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
954 return CGF.EmitObjCBoxedExpr(E);
955 }
956 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
957 return CGF.EmitObjCArrayLiteral(E);
958 }
959 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
960 return CGF.EmitObjCDictionaryLiteral(E);
961 }
962 Value *VisitAsTypeExpr(AsTypeExpr *CE);
963 Value *VisitAtomicExpr(AtomicExpr *AE);
964 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
965 return Visit(E: E->getSelectedExpr());
966 }
967};
968} // end anonymous namespace.
969
970//===----------------------------------------------------------------------===//
971// Utilities
972//===----------------------------------------------------------------------===//
973
974/// EmitConversionToBool - Convert the specified expression value to a
975/// boolean (i1) truth value. This is equivalent to "Val != 0".
976Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
977 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
978
979 if (SrcType->isRealFloatingType())
980 return EmitFloatToBoolConversion(V: Src);
981
982 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(Val&: SrcType))
983 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr: Src, MPT);
984
985 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
986 "Unknown scalar type to convert");
987
988 if (isa<llvm::IntegerType>(Val: Src->getType()))
989 return EmitIntToBoolConversion(V: Src);
990
991 assert(isa<llvm::PointerType>(Src->getType()));
992 return EmitPointerToBoolConversion(V: Src, QT: SrcType);
993}
994
995void ScalarExprEmitter::EmitFloatConversionCheck(
996 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
997 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
998 assert(SrcType->isFloatingType() && "not a conversion from floating point");
999 if (!isa<llvm::IntegerType>(Val: DstTy))
1000 return;
1001
1002 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1003 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1004 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1005 using llvm::APFloat;
1006 using llvm::APSInt;
1007
1008 llvm::Value *Check = nullptr;
1009 const llvm::fltSemantics &SrcSema =
1010 CGF.getContext().getFloatTypeSemantics(T: OrigSrcType);
1011
1012 // Floating-point to integer. This has undefined behavior if the source is
1013 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1014 // to an integer).
1015 unsigned Width = CGF.getContext().getIntWidth(T: DstType);
1016 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
1017
1018 APSInt Min = APSInt::getMinValue(numBits: Width, Unsigned);
1019 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1020 if (MinSrc.convertFromAPInt(Input: Min, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1021 APFloat::opOverflow)
1022 // Don't need an overflow check for lower bound. Just check for
1023 // -Inf/NaN.
1024 MinSrc = APFloat::getInf(Sem: SrcSema, Negative: true);
1025 else
1026 // Find the largest value which is too small to represent (before
1027 // truncation toward zero).
1028 MinSrc.subtract(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardNegative);
1029
1030 APSInt Max = APSInt::getMaxValue(numBits: Width, Unsigned);
1031 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1032 if (MaxSrc.convertFromAPInt(Input: Max, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1033 APFloat::opOverflow)
1034 // Don't need an overflow check for upper bound. Just check for
1035 // +Inf/NaN.
1036 MaxSrc = APFloat::getInf(Sem: SrcSema, Negative: false);
1037 else
1038 // Find the smallest value which is too large to represent (before
1039 // truncation toward zero).
1040 MaxSrc.add(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardPositive);
1041
1042 // If we're converting from __half, convert the range to float to match
1043 // the type of src.
1044 if (OrigSrcType->isHalfType()) {
1045 const llvm::fltSemantics &Sema =
1046 CGF.getContext().getFloatTypeSemantics(T: SrcType);
1047 bool IsInexact;
1048 MinSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1049 MaxSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1050 }
1051
1052 llvm::Value *GE =
1053 Builder.CreateFCmpOGT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MinSrc));
1054 llvm::Value *LE =
1055 Builder.CreateFCmpOLT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MaxSrc));
1056 Check = Builder.CreateAnd(LHS: GE, RHS: LE);
1057
1058 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1059 CGF.EmitCheckTypeDescriptor(T: OrigSrcType),
1060 CGF.EmitCheckTypeDescriptor(T: DstType)};
1061 CGF.EmitCheck(Checked: std::make_pair(x&: Check, y&: CheckOrdinal), Check: CheckHandler, StaticArgs,
1062 DynamicArgs: OrigSrc);
1063}
1064
1065// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1066// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1067static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1068 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1069EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1070 QualType DstType, CGBuilderTy &Builder) {
1071 llvm::Type *SrcTy = Src->getType();
1072 llvm::Type *DstTy = Dst->getType();
1073 (void)DstTy; // Only used in assert()
1074
1075 // This should be truncation of integral types.
1076 assert(Src != Dst);
1077 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1078 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1079 "non-integer llvm type");
1080
1081 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1082 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1083
1084 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1085 // Else, it is a signed truncation.
1086 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1087 SanitizerKind::SanitizerOrdinal Ordinal;
1088 if (!SrcSigned && !DstSigned) {
1089 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1090 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1091 } else {
1092 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1093 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1094 }
1095
1096 llvm::Value *Check = nullptr;
1097 // 1. Extend the truncated value back to the same width as the Src.
1098 Check = Builder.CreateIntCast(V: Dst, DestTy: SrcTy, isSigned: DstSigned, Name: "anyext");
1099 // 2. Equality-compare with the original source value
1100 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "truncheck");
1101 // If the comparison result is 'i1 false', then the truncation was lossy.
1102 return std::make_pair(x&: Kind, y: std::make_pair(x&: Check, y&: Ordinal));
1103}
1104
1105static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1106 QualType SrcType, QualType DstType) {
1107 return SrcType->isIntegerType() && DstType->isIntegerType();
1108}
1109
1110void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1111 Value *Dst, QualType DstType,
1112 SourceLocation Loc) {
1113 if (!CGF.SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation))
1114 return;
1115
1116 // We only care about int->int conversions here.
1117 // We ignore conversions to/from pointer and/or bool.
1118 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1119 DstType))
1120 return;
1121
1122 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1123 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1124 // This must be truncation. Else we do not care.
1125 if (SrcBits <= DstBits)
1126 return;
1127
1128 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1129
1130 // If the integer sign change sanitizer is enabled,
1131 // and we are truncating from larger unsigned type to smaller signed type,
1132 // let that next sanitizer deal with it.
1133 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1134 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1135 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange) &&
1136 (!SrcSigned && DstSigned))
1137 return;
1138
1139 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1140 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1141 Check;
1142
1143 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1144 {
1145 // We don't know the check kind until we call
1146 // EmitIntegerTruncationCheckHelper, but we want to annotate
1147 // EmitIntegerTruncationCheckHelper's instructions too.
1148 SanitizerDebugLocation SanScope(
1149 &CGF,
1150 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1151 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1152 CheckHandler);
1153 Check =
1154 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1155 // If the comparison result is 'i1 false', then the truncation was lossy.
1156 }
1157
1158 // Do we care about this type of truncation?
1159 if (!CGF.SanOpts.has(O: Check.second.second))
1160 return;
1161
1162 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1163
1164 // Does some SSCL ignore this type?
1165 if (CGF.getContext().isTypeIgnoredBySanitizer(
1166 Mask: SanitizerMask::bitPosToMask(Pos: Check.second.second), Ty: DstType))
1167 return;
1168
1169 llvm::Constant *StaticArgs[] = {
1170 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1171 CGF.EmitCheckTypeDescriptor(T: DstType),
1172 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Check.first),
1173 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1174
1175 CGF.EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1176}
1177
1178static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1179 const char *Name,
1180 CGBuilderTy &Builder) {
1181 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1182 llvm::Type *VTy = V->getType();
1183 if (!VSigned) {
1184 // If the value is unsigned, then it is never negative.
1185 return llvm::ConstantInt::getFalse(Context&: VTy->getContext());
1186 }
1187 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: VTy, V: 0);
1188 return Builder.CreateICmp(P: llvm::ICmpInst::ICMP_SLT, LHS: V, RHS: Zero,
1189 Name: llvm::Twine(Name) + "." + V->getName() +
1190 ".negativitycheck");
1191}
1192
1193// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1194// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1195static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1196 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1197EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1198 QualType DstType, CGBuilderTy &Builder) {
1199 llvm::Type *SrcTy = Src->getType();
1200 llvm::Type *DstTy = Dst->getType();
1201
1202 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1203 "non-integer llvm type");
1204
1205 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1206 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1207 (void)SrcSigned; // Only used in assert()
1208 (void)DstSigned; // Only used in assert()
1209 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1210 unsigned DstBits = DstTy->getScalarSizeInBits();
1211 (void)SrcBits; // Only used in assert()
1212 (void)DstBits; // Only used in assert()
1213
1214 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1215 "either the widths should be different, or the signednesses.");
1216
1217 // 1. Was the old Value negative?
1218 llvm::Value *SrcIsNegative =
1219 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "src", Builder);
1220 // 2. Is the new Value negative?
1221 llvm::Value *DstIsNegative =
1222 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "dst", Builder);
1223 // 3. Now, was the 'negativity status' preserved during the conversion?
1224 // NOTE: conversion from negative to zero is considered to change the sign.
1225 // (We want to get 'false' when the conversion changed the sign)
1226 // So we should just equality-compare the negativity statuses.
1227 llvm::Value *Check = nullptr;
1228 Check = Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "signchangecheck");
1229 // If the comparison result is 'false', then the conversion changed the sign.
1230 return std::make_pair(
1231 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1232 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitIntegerSignChange));
1233}
1234
1235void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1236 Value *Dst, QualType DstType,
1237 SourceLocation Loc) {
1238 if (!CGF.SanOpts.has(O: SanitizerKind::SO_ImplicitIntegerSignChange))
1239 return;
1240
1241 llvm::Type *SrcTy = Src->getType();
1242 llvm::Type *DstTy = Dst->getType();
1243
1244 // We only care about int->int conversions here.
1245 // We ignore conversions to/from pointer and/or bool.
1246 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1247 DstType))
1248 return;
1249
1250 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1251 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1252 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1253 unsigned DstBits = DstTy->getScalarSizeInBits();
1254
1255 // Now, we do not need to emit the check in *all* of the cases.
1256 // We can avoid emitting it in some obvious cases where it would have been
1257 // dropped by the opt passes (instcombine) always anyways.
1258 // If it's a cast between effectively the same type, no check.
1259 // NOTE: this is *not* equivalent to checking the canonical types.
1260 if (SrcSigned == DstSigned && SrcBits == DstBits)
1261 return;
1262 // At least one of the values needs to have signed type.
1263 // If both are unsigned, then obviously, neither of them can be negative.
1264 if (!SrcSigned && !DstSigned)
1265 return;
1266 // If the conversion is to *larger* *signed* type, then no check is needed.
1267 // Because either sign-extension happens (so the sign will remain),
1268 // or zero-extension will happen (the sign bit will be zero.)
1269 if ((DstBits > SrcBits) && DstSigned)
1270 return;
1271 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1272 (SrcBits > DstBits) && SrcSigned) {
1273 // If the signed integer truncation sanitizer is enabled,
1274 // and this is a truncation from signed type, then no check is needed.
1275 // Because here sign change check is interchangeable with truncation check.
1276 return;
1277 }
1278 // Does an SSCL have an entry for the DstType under its respective sanitizer
1279 // section?
1280 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1281 Mask: SanitizerKind::ImplicitSignedIntegerTruncation, Ty: DstType))
1282 return;
1283 if (!DstSigned &&
1284 CGF.getContext().isTypeIgnoredBySanitizer(
1285 Mask: SanitizerKind::ImplicitUnsignedIntegerTruncation, Ty: DstType))
1286 return;
1287 // That's it. We can't rule out any more cases with the data we have.
1288
1289 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1290 SanitizerDebugLocation SanScope(
1291 &CGF,
1292 {SanitizerKind::SO_ImplicitIntegerSignChange,
1293 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1294 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1295 CheckHandler);
1296
1297 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1298 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1299 Check;
1300
1301 // Each of these checks needs to return 'false' when an issue was detected.
1302 ImplicitConversionCheckKind CheckKind;
1303 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1304 2>
1305 Checks;
1306 // So we can 'and' all the checks together, and still get 'false',
1307 // if at least one of the checks detected an issue.
1308
1309 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1310 CheckKind = Check.first;
1311 Checks.emplace_back(Args&: Check.second);
1312
1313 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1314 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1315 // If the signed integer truncation sanitizer was enabled,
1316 // and we are truncating from larger unsigned type to smaller signed type,
1317 // let's handle the case we skipped in that check.
1318 Check =
1319 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1320 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1321 Checks.emplace_back(Args&: Check.second);
1322 // If the comparison result is 'i1 false', then the truncation was lossy.
1323 }
1324
1325 llvm::Constant *StaticArgs[] = {
1326 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1327 CGF.EmitCheckTypeDescriptor(T: DstType),
1328 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1329 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1330 // EmitCheck() will 'and' all the checks together.
1331 CGF.EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1332}
1333
1334// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1335// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1336static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1337 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1338EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1339 QualType DstType, CGBuilderTy &Builder) {
1340 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1341 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1342
1343 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1344 if (!SrcSigned && !DstSigned)
1345 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1346 else
1347 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1348
1349 llvm::Value *Check = nullptr;
1350 // 1. Extend the truncated value back to the same width as the Src.
1351 Check = Builder.CreateIntCast(V: Dst, DestTy: Src->getType(), isSigned: DstSigned, Name: "bf.anyext");
1352 // 2. Equality-compare with the original source value
1353 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "bf.truncheck");
1354 // If the comparison result is 'i1 false', then the truncation was lossy.
1355
1356 return std::make_pair(
1357 x&: Kind,
1358 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1359}
1360
1361// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1362// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1363static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1364 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1365EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1366 QualType DstType, CGBuilderTy &Builder) {
1367 // 1. Was the old Value negative?
1368 llvm::Value *SrcIsNegative =
1369 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "bf.src", Builder);
1370 // 2. Is the new Value negative?
1371 llvm::Value *DstIsNegative =
1372 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "bf.dst", Builder);
1373 // 3. Now, was the 'negativity status' preserved during the conversion?
1374 // NOTE: conversion from negative to zero is considered to change the sign.
1375 // (We want to get 'false' when the conversion changed the sign)
1376 // So we should just equality-compare the negativity statuses.
1377 llvm::Value *Check = nullptr;
1378 Check =
1379 Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "bf.signchangecheck");
1380 // If the comparison result is 'false', then the conversion changed the sign.
1381 return std::make_pair(
1382 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1383 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1384}
1385
1386void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1387 Value *Dst, QualType DstType,
1388 const CGBitFieldInfo &Info,
1389 SourceLocation Loc) {
1390
1391 if (!SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion))
1392 return;
1393
1394 // We only care about int->int conversions here.
1395 // We ignore conversions to/from pointer and/or bool.
1396 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1397 DstType))
1398 return;
1399
1400 if (DstType->isBooleanType() || SrcType->isBooleanType())
1401 return;
1402
1403 // This should be truncation of integral types.
1404 assert(isa<llvm::IntegerType>(Src->getType()) &&
1405 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1406
1407 // TODO: Calculate src width to avoid emitting code
1408 // for unecessary cases.
1409 unsigned SrcBits = ConvertType(T: SrcType)->getScalarSizeInBits();
1410 unsigned DstBits = Info.Size;
1411
1412 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1413 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1414
1415 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1416 SanitizerDebugLocation SanScope(
1417 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1418
1419 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1420 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1421 Check;
1422
1423 // Truncation
1424 bool EmitTruncation = DstBits < SrcBits;
1425 // If Dst is signed and Src unsigned, we want to be more specific
1426 // about the CheckKind we emit, in this case we want to emit
1427 // ICCK_SignedIntegerTruncationOrSignChange.
1428 bool EmitTruncationFromUnsignedToSigned =
1429 EmitTruncation && DstSigned && !SrcSigned;
1430 // Sign change
1431 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1432 bool BothUnsigned = !SrcSigned && !DstSigned;
1433 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1434 // We can avoid emitting sign change checks in some obvious cases
1435 // 1. If Src and Dst have the same signedness and size
1436 // 2. If both are unsigned sign check is unecessary!
1437 // 3. If Dst is signed and bigger than Src, either
1438 // sign-extension or zero-extension will make sure
1439 // the sign remains.
1440 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1441
1442 if (EmitTruncation)
1443 Check =
1444 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1445 else if (EmitSignChange) {
1446 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1447 "either the widths should be different, or the signednesses.");
1448 Check =
1449 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1450 } else
1451 return;
1452
1453 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1454 if (EmitTruncationFromUnsignedToSigned)
1455 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1456
1457 llvm::Constant *StaticArgs[] = {
1458 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: SrcType),
1459 EmitCheckTypeDescriptor(T: DstType),
1460 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1461 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Info.Size)};
1462
1463 EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1464}
1465
1466Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1467 QualType DstType, llvm::Type *SrcTy,
1468 llvm::Type *DstTy,
1469 ScalarConversionOpts Opts) {
1470 // The Element types determine the type of cast to perform.
1471 llvm::Type *SrcElementTy;
1472 llvm::Type *DstElementTy;
1473 QualType SrcElementType;
1474 QualType DstElementType;
1475 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1476 SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1477 DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1478 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1479 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1480 } else {
1481 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1482 "cannot cast between matrix and non-matrix types");
1483 SrcElementTy = SrcTy;
1484 DstElementTy = DstTy;
1485 SrcElementType = SrcType;
1486 DstElementType = DstType;
1487 }
1488
1489 if (isa<llvm::IntegerType>(Val: SrcElementTy)) {
1490 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1491 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1492 InputSigned = true;
1493 }
1494
1495 if (isa<llvm::IntegerType>(Val: DstElementTy))
1496 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1497 if (InputSigned)
1498 return Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1499 return Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1500 }
1501
1502 if (isa<llvm::IntegerType>(Val: DstElementTy)) {
1503 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1504 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1505
1506 // If we can't recognize overflow as undefined behavior, assume that
1507 // overflow saturates. This protects against normal optimizations if we are
1508 // compiling with non-standard FP semantics.
1509 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1510 llvm::Intrinsic::ID IID =
1511 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1512 return Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID, Tys: {DstTy, SrcTy}), Args: Src);
1513 }
1514
1515 if (IsSigned)
1516 return Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1517 return Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1518 }
1519
1520 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1521 Value *FloatVal = Builder.CreateFPExt(V: Src, DestTy: Builder.getFloatTy(), Name: "fpext");
1522 return Builder.CreateFPTrunc(V: FloatVal, DestTy: DstTy, Name: "fptrunc");
1523 }
1524 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1525 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1526 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1527}
1528
1529/// Emit a conversion from the specified type to the specified destination type,
1530/// both of which are LLVM scalar types.
1531Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1532 QualType DstType,
1533 SourceLocation Loc,
1534 ScalarConversionOpts Opts) {
1535 // All conversions involving fixed point types should be handled by the
1536 // EmitFixedPoint family functions. This is done to prevent bloating up this
1537 // function more, and although fixed point numbers are represented by
1538 // integers, we do not want to follow any logic that assumes they should be
1539 // treated as integers.
1540 // TODO(leonardchan): When necessary, add another if statement checking for
1541 // conversions to fixed point types from other types.
1542 if (SrcType->isFixedPointType()) {
1543 if (DstType->isBooleanType())
1544 // It is important that we check this before checking if the dest type is
1545 // an integer because booleans are technically integer types.
1546 // We do not need to check the padding bit on unsigned types if unsigned
1547 // padding is enabled because overflow into this bit is undefined
1548 // behavior.
1549 return Builder.CreateIsNotNull(Arg: Src, Name: "tobool");
1550 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1551 DstType->isRealFloatingType())
1552 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1553
1554 llvm_unreachable(
1555 "Unhandled scalar conversion from a fixed point type to another type.");
1556 } else if (DstType->isFixedPointType()) {
1557 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1558 // This also includes converting booleans and enums to fixed point types.
1559 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1560
1561 llvm_unreachable(
1562 "Unhandled scalar conversion to a fixed point type from another type.");
1563 }
1564
1565 QualType NoncanonicalSrcType = SrcType;
1566 QualType NoncanonicalDstType = DstType;
1567
1568 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1569 DstType = CGF.getContext().getCanonicalType(T: DstType);
1570 if (SrcType == DstType) return Src;
1571
1572 if (DstType->isVoidType()) return nullptr;
1573
1574 llvm::Value *OrigSrc = Src;
1575 QualType OrigSrcType = SrcType;
1576 llvm::Type *SrcTy = Src->getType();
1577
1578 // Handle conversions to bool first, they are special: comparisons against 0.
1579 if (DstType->isBooleanType())
1580 return EmitConversionToBool(Src, SrcType);
1581
1582 llvm::Type *DstTy = ConvertType(T: DstType);
1583
1584 // Cast from half through float if half isn't a native type.
1585 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1586 // Cast to FP using the intrinsic if the half type itself isn't supported.
1587 if (DstTy->isFloatingPointTy()) {
1588 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1589 return Builder.CreateCall(
1590 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1591 Src);
1592 } else {
1593 // Cast to other types through float, using either the intrinsic or FPExt,
1594 // depending on whether the half type itself is supported
1595 // (as opposed to operations on half, available with NativeHalfType).
1596 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1597 Src = Builder.CreateCall(
1598 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1599 CGF.CGM.FloatTy),
1600 Src);
1601 } else {
1602 Src = Builder.CreateFPExt(V: Src, DestTy: CGF.CGM.FloatTy, Name: "conv");
1603 }
1604 SrcType = CGF.getContext().FloatTy;
1605 SrcTy = CGF.FloatTy;
1606 }
1607 }
1608
1609 // Ignore conversions like int -> uint.
1610 if (SrcTy == DstTy) {
1611 if (Opts.EmitImplicitIntegerSignChangeChecks)
1612 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Src,
1613 DstType: NoncanonicalDstType, Loc);
1614
1615 return Src;
1616 }
1617
1618 // Handle pointer conversions next: pointers can only be converted to/from
1619 // other pointers and integers. Check for pointer types in terms of LLVM, as
1620 // some native types (like Obj-C id) may map to a pointer type.
1621 if (auto DstPT = dyn_cast<llvm::PointerType>(Val: DstTy)) {
1622 // The source value may be an integer, or a pointer.
1623 if (isa<llvm::PointerType>(Val: SrcTy))
1624 return Src;
1625
1626 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1627 // First, convert to the correct width so that we control the kind of
1628 // extension.
1629 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1630 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1631 llvm::Value* IntResult =
1632 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
1633 // Then, cast to pointer.
1634 return Builder.CreateIntToPtr(V: IntResult, DestTy: DstTy, Name: "conv");
1635 }
1636
1637 if (isa<llvm::PointerType>(Val: SrcTy)) {
1638 // Must be an ptr to int cast.
1639 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1640 return Builder.CreatePtrToInt(V: Src, DestTy: DstTy, Name: "conv");
1641 }
1642
1643 // A scalar can be splatted to an extended vector of the same element type
1644 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1645 // Sema should add casts to make sure that the source expression's type is
1646 // the same as the vector's element type (sans qualifiers)
1647 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1648 SrcType.getTypePtr() &&
1649 "Splatted expr doesn't match with vector element type?");
1650
1651 // Splat the element across to all elements
1652 unsigned NumElements = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements();
1653 return Builder.CreateVectorSplat(NumElts: NumElements, V: Src, Name: "splat");
1654 }
1655
1656 if (SrcType->isMatrixType() && DstType->isMatrixType())
1657 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1658
1659 if (isa<llvm::VectorType>(Val: SrcTy) || isa<llvm::VectorType>(Val: DstTy)) {
1660 // Allow bitcast from vector to integer/fp of the same size.
1661 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1662 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1663 if (SrcSize == DstSize)
1664 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1665
1666 // Conversions between vectors of different sizes are not allowed except
1667 // when vectors of half are involved. Operations on storage-only half
1668 // vectors require promoting half vector operands to float vectors and
1669 // truncating the result, which is either an int or float vector, to a
1670 // short or half vector.
1671
1672 // Source and destination are both expected to be vectors.
1673 llvm::Type *SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1674 llvm::Type *DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1675 (void)DstElementTy;
1676
1677 assert(((SrcElementTy->isIntegerTy() &&
1678 DstElementTy->isIntegerTy()) ||
1679 (SrcElementTy->isFloatingPointTy() &&
1680 DstElementTy->isFloatingPointTy())) &&
1681 "unexpected conversion between a floating-point vector and an "
1682 "integer vector");
1683
1684 // Truncate an i32 vector to an i16 vector.
1685 if (SrcElementTy->isIntegerTy())
1686 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: false, Name: "conv");
1687
1688 // Truncate a float vector to a half vector.
1689 if (SrcSize > DstSize)
1690 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1691
1692 // Promote a half vector to a float vector.
1693 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1694 }
1695
1696 // Finally, we have the arithmetic types: real int/float.
1697 Value *Res = nullptr;
1698 llvm::Type *ResTy = DstTy;
1699
1700 // An overflowing conversion has undefined behavior if either the source type
1701 // or the destination type is a floating-point type. However, we consider the
1702 // range of representable values for all floating-point types to be
1703 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1704 // floating-point type.
1705 if (CGF.SanOpts.has(K: SanitizerKind::FloatCastOverflow) &&
1706 OrigSrcType->isFloatingType())
1707 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1708 Loc);
1709
1710 // Cast to half through float if half isn't a native type.
1711 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1712 // Make sure we cast in a single step if from another FP type.
1713 if (SrcTy->isFloatingPointTy()) {
1714 // Use the intrinsic if the half type itself isn't supported
1715 // (as opposed to operations on half, available with NativeHalfType).
1716 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1717 return Builder.CreateCall(
1718 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1719 // If the half type is supported, just use an fptrunc.
1720 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy);
1721 }
1722 DstTy = CGF.FloatTy;
1723 }
1724
1725 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1726
1727 if (DstTy != ResTy) {
1728 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1729 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1730 Res = Builder.CreateCall(
1731 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1732 Res);
1733 } else {
1734 Res = Builder.CreateFPTrunc(V: Res, DestTy: ResTy, Name: "conv");
1735 }
1736 }
1737
1738 if (Opts.EmitImplicitIntegerTruncationChecks)
1739 EmitIntegerTruncationCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1740 DstType: NoncanonicalDstType, Loc);
1741
1742 if (Opts.EmitImplicitIntegerSignChangeChecks)
1743 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1744 DstType: NoncanonicalDstType, Loc);
1745
1746 return Res;
1747}
1748
1749Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1750 QualType DstTy,
1751 SourceLocation Loc) {
1752 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1753 llvm::Value *Result;
1754 if (SrcTy->isRealFloatingType())
1755 Result = FPBuilder.CreateFloatingToFixed(Src,
1756 DstSema: CGF.getContext().getFixedPointSemantics(Ty: DstTy));
1757 else if (DstTy->isRealFloatingType())
1758 Result = FPBuilder.CreateFixedToFloating(Src,
1759 SrcSema: CGF.getContext().getFixedPointSemantics(Ty: SrcTy),
1760 DstTy: ConvertType(T: DstTy));
1761 else {
1762 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(Ty: SrcTy);
1763 auto DstFPSema = CGF.getContext().getFixedPointSemantics(Ty: DstTy);
1764
1765 if (DstTy->isIntegerType())
1766 Result = FPBuilder.CreateFixedToInteger(Src, SrcSema: SrcFPSema,
1767 DstWidth: DstFPSema.getWidth(),
1768 DstIsSigned: DstFPSema.isSigned());
1769 else if (SrcTy->isIntegerType())
1770 Result = FPBuilder.CreateIntegerToFixed(Src, SrcIsSigned: SrcFPSema.isSigned(),
1771 DstSema: DstFPSema);
1772 else
1773 Result = FPBuilder.CreateFixedToFixed(Src, SrcSema: SrcFPSema, DstSema: DstFPSema);
1774 }
1775 return Result;
1776}
1777
1778/// Emit a conversion from the specified complex type to the specified
1779/// destination type, where the destination type is an LLVM scalar type.
1780Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1781 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1782 SourceLocation Loc) {
1783 // Get the source element type.
1784 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1785
1786 // Handle conversions to bool first, they are special: comparisons against 0.
1787 if (DstTy->isBooleanType()) {
1788 // Complex != 0 -> (Real != 0) | (Imag != 0)
1789 Src.first = EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1790 Src.second = EmitScalarConversion(Src: Src.second, SrcType: SrcTy, DstType: DstTy, Loc);
1791 return Builder.CreateOr(LHS: Src.first, RHS: Src.second, Name: "tobool");
1792 }
1793
1794 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1795 // the imaginary part of the complex value is discarded and the value of the
1796 // real part is converted according to the conversion rules for the
1797 // corresponding real type.
1798 return EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1799}
1800
1801Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1802 return CGF.EmitFromMemory(Value: CGF.CGM.EmitNullConstant(T: Ty), Ty);
1803}
1804
1805/// Emit a sanitization check for the given "binary" operation (which
1806/// might actually be a unary increment which has been lowered to a binary
1807/// operation). The check passes if all values in \p Checks (which are \c i1),
1808/// are \c true.
1809void ScalarExprEmitter::EmitBinOpCheck(
1810 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1811 const BinOpInfo &Info) {
1812 assert(CGF.IsSanitizerScope);
1813 SanitizerHandler Check;
1814 SmallVector<llvm::Constant *, 4> StaticData;
1815 SmallVector<llvm::Value *, 2> DynamicData;
1816
1817 BinaryOperatorKind Opcode = Info.Opcode;
1818 if (BinaryOperator::isCompoundAssignmentOp(Opc: Opcode))
1819 Opcode = BinaryOperator::getOpForCompoundAssignment(Opc: Opcode);
1820
1821 StaticData.push_back(Elt: CGF.EmitCheckSourceLocation(Loc: Info.E->getExprLoc()));
1822 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Info.E);
1823 if (UO && UO->getOpcode() == UO_Minus) {
1824 Check = SanitizerHandler::NegateOverflow;
1825 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: UO->getType()));
1826 DynamicData.push_back(Elt: Info.RHS);
1827 } else {
1828 if (BinaryOperator::isShiftOp(Opc: Opcode)) {
1829 // Shift LHS negative or too large, or RHS out of bounds.
1830 Check = SanitizerHandler::ShiftOutOfBounds;
1831 const BinaryOperator *BO = cast<BinaryOperator>(Val: Info.E);
1832 StaticData.push_back(
1833 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getLHS()->getType()));
1834 StaticData.push_back(
1835 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getRHS()->getType()));
1836 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1837 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1838 Check = SanitizerHandler::DivremOverflow;
1839 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1840 } else {
1841 // Arithmetic overflow (+, -, *).
1842 switch (Opcode) {
1843 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1844 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1845 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1846 default: llvm_unreachable("unexpected opcode for bin op check");
1847 }
1848 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1849 }
1850 DynamicData.push_back(Elt: Info.LHS);
1851 DynamicData.push_back(Elt: Info.RHS);
1852 }
1853
1854 CGF.EmitCheck(Checked: Checks, Check, StaticArgs: StaticData, DynamicArgs: DynamicData);
1855}
1856
1857//===----------------------------------------------------------------------===//
1858// Visitor Methods
1859//===----------------------------------------------------------------------===//
1860
1861Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1862 CGF.ErrorUnsupported(E, "scalar expression");
1863 if (E->getType()->isVoidType())
1864 return nullptr;
1865 return llvm::PoisonValue::get(T: CGF.ConvertType(T: E->getType()));
1866}
1867
1868Value *
1869ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1870 ASTContext &Context = CGF.getContext();
1871 unsigned AddrSpace =
1872 Context.getTargetAddressSpace(AS: CGF.CGM.GetGlobalConstantAddressSpace());
1873 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1874 Str: E->ComputeName(Context), Name: "__usn_str", AddressSpace: AddrSpace);
1875
1876 llvm::Type *ExprTy = ConvertType(T: E->getType());
1877 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: GlobalConstStr, DestTy: ExprTy,
1878 Name: "usn_addr_cast");
1879}
1880
1881Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1882 assert(E->getDataElementCount() == 1);
1883 auto It = E->begin();
1884 return Builder.getInt(AI: (*It)->getValue());
1885}
1886
1887Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1888 // Vector Mask Case
1889 if (E->getNumSubExprs() == 2) {
1890 Value *LHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1891 Value *RHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1892 Value *Mask;
1893
1894 auto *LTy = cast<llvm::FixedVectorType>(Val: LHS->getType());
1895 unsigned LHSElts = LTy->getNumElements();
1896
1897 Mask = RHS;
1898
1899 auto *MTy = cast<llvm::FixedVectorType>(Val: Mask->getType());
1900
1901 // Mask off the high bits of each shuffle index.
1902 Value *MaskBits =
1903 llvm::ConstantInt::get(Ty: MTy, V: llvm::NextPowerOf2(A: LHSElts - 1) - 1);
1904 Mask = Builder.CreateAnd(LHS: Mask, RHS: MaskBits, Name: "mask");
1905
1906 // newv = undef
1907 // mask = mask & maskbits
1908 // for each elt
1909 // n = extract mask i
1910 // x = extract val n
1911 // newv = insert newv, x, i
1912 auto *RTy = llvm::FixedVectorType::get(ElementType: LTy->getElementType(),
1913 NumElts: MTy->getNumElements());
1914 Value* NewV = llvm::PoisonValue::get(T: RTy);
1915 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1916 Value *IIndx = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: i);
1917 Value *Indx = Builder.CreateExtractElement(Vec: Mask, Idx: IIndx, Name: "shuf_idx");
1918
1919 Value *VExt = Builder.CreateExtractElement(Vec: LHS, Idx: Indx, Name: "shuf_elt");
1920 NewV = Builder.CreateInsertElement(Vec: NewV, NewElt: VExt, Idx: IIndx, Name: "shuf_ins");
1921 }
1922 return NewV;
1923 }
1924
1925 Value* V1 = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1926 Value* V2 = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1927
1928 SmallVector<int, 32> Indices;
1929 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1930 llvm::APSInt Idx = E->getShuffleMaskIdx(N: i - 2);
1931 // Check for -1 and output it as undef in the IR.
1932 if (Idx.isSigned() && Idx.isAllOnes())
1933 Indices.push_back(Elt: -1);
1934 else
1935 Indices.push_back(Elt: Idx.getZExtValue());
1936 }
1937
1938 return Builder.CreateShuffleVector(V1, V2, Mask: Indices, Name: "shuffle");
1939}
1940
1941Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1942 QualType SrcType = E->getSrcExpr()->getType(),
1943 DstType = E->getType();
1944
1945 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
1946
1947 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1948 DstType = CGF.getContext().getCanonicalType(T: DstType);
1949 if (SrcType == DstType) return Src;
1950
1951 assert(SrcType->isVectorType() &&
1952 "ConvertVector source type must be a vector");
1953 assert(DstType->isVectorType() &&
1954 "ConvertVector destination type must be a vector");
1955
1956 llvm::Type *SrcTy = Src->getType();
1957 llvm::Type *DstTy = ConvertType(T: DstType);
1958
1959 // Ignore conversions like int -> uint.
1960 if (SrcTy == DstTy)
1961 return Src;
1962
1963 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1964 DstEltType = DstType->castAs<VectorType>()->getElementType();
1965
1966 assert(SrcTy->isVectorTy() &&
1967 "ConvertVector source IR type must be a vector");
1968 assert(DstTy->isVectorTy() &&
1969 "ConvertVector destination IR type must be a vector");
1970
1971 llvm::Type *SrcEltTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType(),
1972 *DstEltTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1973
1974 if (DstEltType->isBooleanType()) {
1975 assert((SrcEltTy->isFloatingPointTy() ||
1976 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1977
1978 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: SrcTy);
1979 if (SrcEltTy->isFloatingPointTy()) {
1980 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
1981 return Builder.CreateFCmpUNE(LHS: Src, RHS: Zero, Name: "tobool");
1982 } else {
1983 return Builder.CreateICmpNE(LHS: Src, RHS: Zero, Name: "tobool");
1984 }
1985 }
1986
1987 // We have the arithmetic types: real int/float.
1988 Value *Res = nullptr;
1989
1990 if (isa<llvm::IntegerType>(Val: SrcEltTy)) {
1991 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1992 if (isa<llvm::IntegerType>(Val: DstEltTy))
1993 Res = Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1994 else {
1995 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
1996 if (InputSigned)
1997 Res = Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1998 else
1999 Res = Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
2000 }
2001 } else if (isa<llvm::IntegerType>(Val: DstEltTy)) {
2002 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2003 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2004 if (DstEltType->isSignedIntegerOrEnumerationType())
2005 Res = Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
2006 else
2007 Res = Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
2008 } else {
2009 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2010 "Unknown real conversion");
2011 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2012 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2013 Res = Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
2014 else
2015 Res = Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
2016 }
2017
2018 return Res;
2019}
2020
2021Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2022 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(ME: E)) {
2023 CGF.EmitIgnoredExpr(E: E->getBase());
2024 return CGF.emitScalarConstant(Constant, E);
2025 } else {
2026 Expr::EvalResult Result;
2027 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
2028 llvm::APSInt Value = Result.Val.getInt();
2029 CGF.EmitIgnoredExpr(E: E->getBase());
2030 return Builder.getInt(AI: Value);
2031 }
2032 }
2033
2034 llvm::Value *Result = EmitLoadOfLValue(E);
2035
2036 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2037 // debug info for the pointer, even if there is no variable associated with
2038 // the pointer's expression.
2039 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2040 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Val: Result)) {
2041 if (llvm::GetElementPtrInst *GEP =
2042 dyn_cast<llvm::GetElementPtrInst>(Val: Load->getPointerOperand())) {
2043 if (llvm::Instruction *Pointer =
2044 dyn_cast<llvm::Instruction>(Val: GEP->getPointerOperand())) {
2045 QualType Ty = E->getBase()->getType();
2046 if (!E->isArrow())
2047 Ty = CGF.getContext().getPointerType(T: Ty);
2048 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Value: Pointer, Ty);
2049 }
2050 }
2051 }
2052 }
2053 return Result;
2054}
2055
2056Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2057 TestAndClearIgnoreResultAssign();
2058
2059 // Emit subscript expressions in rvalue context's. For most cases, this just
2060 // loads the lvalue formed by the subscript expr. However, we have to be
2061 // careful, because the base of a vector subscript is occasionally an rvalue,
2062 // so we can't get it as an lvalue.
2063 if (!E->getBase()->getType()->isVectorType() &&
2064 !E->getBase()->getType()->isSveVLSBuiltinType())
2065 return EmitLoadOfLValue(E);
2066
2067 // Handle the vector case. The base must be a vector, the index must be an
2068 // integer value.
2069 Value *Base = Visit(E: E->getBase());
2070 Value *Idx = Visit(E: E->getIdx());
2071 QualType IdxTy = E->getIdx()->getType();
2072
2073 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
2074 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2075
2076 return Builder.CreateExtractElement(Vec: Base, Idx, Name: "vecext");
2077}
2078
2079Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2080 TestAndClearIgnoreResultAssign();
2081
2082 // Handle the vector case. The base must be a vector, the index must be an
2083 // integer value.
2084 Value *RowIdx = CGF.EmitMatrixIndexExpr(E: E->getRowIdx());
2085 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E: E->getColumnIdx());
2086
2087 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2088 unsigned NumRows = MatrixTy->getNumRows();
2089 llvm::MatrixBuilder MB(Builder);
2090 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2091 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2092 MB.CreateIndexAssumption(Idx, NumElements: MatrixTy->getNumElementsFlattened());
2093
2094 Value *Matrix = Visit(E: E->getBase());
2095
2096 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2097 return Builder.CreateExtractElement(Vec: Matrix, Idx, Name: "matrixext");
2098}
2099
2100static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2101 unsigned Off) {
2102 int MV = SVI->getMaskValue(Elt: Idx);
2103 if (MV == -1)
2104 return -1;
2105 return Off + MV;
2106}
2107
2108static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2109 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2110 "Index operand too large for shufflevector mask!");
2111 return C->getZExtValue();
2112}
2113
2114Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2115 bool Ignore = TestAndClearIgnoreResultAssign();
2116 (void)Ignore;
2117 assert (Ignore == false && "init list ignored");
2118 unsigned NumInitElements = E->getNumInits();
2119
2120 // HLSL initialization lists in the AST are an expansion which can contain
2121 // side-effecting expressions wrapped in opaque value expressions. To properly
2122 // emit these we need to emit the opaque values before we emit the argument
2123 // expressions themselves. This is a little hacky, but it prevents us needing
2124 // to do a bigger AST-level change for a language feature that we need
2125 // deprecate in the near future. See related HLSL language proposals in the
2126 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2127 // * 0005-strict-initializer-lists.md
2128 // * 0032-constructors.md
2129 if (CGF.getLangOpts().HLSL)
2130 CGF.CGM.getHLSLRuntime().emitInitListOpaqueValues(CGF, E);
2131
2132 if (E->hadArrayRangeDesignator())
2133 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2134
2135 llvm::VectorType *VType =
2136 dyn_cast<llvm::VectorType>(ConvertType(T: E->getType()));
2137
2138 if (!VType) {
2139 if (NumInitElements == 0) {
2140 // C++11 value-initialization for the scalar.
2141 return EmitNullValue(Ty: E->getType());
2142 }
2143 // We have a scalar in braces. Just use the first element.
2144 return Visit(E: E->getInit(Init: 0));
2145 }
2146
2147 if (isa<llvm::ScalableVectorType>(Val: VType)) {
2148 if (NumInitElements == 0) {
2149 // C++11 value-initialization for the vector.
2150 return EmitNullValue(Ty: E->getType());
2151 }
2152
2153 if (NumInitElements == 1) {
2154 Expr *InitVector = E->getInit(Init: 0);
2155
2156 // Initialize from another scalable vector of the same type.
2157 if (InitVector->getType().getCanonicalType() ==
2158 E->getType().getCanonicalType())
2159 return Visit(E: InitVector);
2160 }
2161
2162 llvm_unreachable("Unexpected initialization of a scalable vector!");
2163 }
2164
2165 unsigned ResElts = cast<llvm::FixedVectorType>(Val: VType)->getNumElements();
2166
2167 // Loop over initializers collecting the Value for each, and remembering
2168 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2169 // us to fold the shuffle for the swizzle into the shuffle for the vector
2170 // initializer, since LLVM optimizers generally do not want to touch
2171 // shuffles.
2172 unsigned CurIdx = 0;
2173 bool VIsPoisonShuffle = false;
2174 llvm::Value *V = llvm::PoisonValue::get(T: VType);
2175 for (unsigned i = 0; i != NumInitElements; ++i) {
2176 Expr *IE = E->getInit(Init: i);
2177 Value *Init = Visit(E: IE);
2178 SmallVector<int, 16> Args;
2179
2180 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Val: Init->getType());
2181
2182 // Handle scalar elements. If the scalar initializer is actually one
2183 // element of a different vector of the same width, use shuffle instead of
2184 // extract+insert.
2185 if (!VVT) {
2186 if (isa<ExtVectorElementExpr>(Val: IE)) {
2187 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Val: Init);
2188
2189 if (cast<llvm::FixedVectorType>(Val: EI->getVectorOperandType())
2190 ->getNumElements() == ResElts) {
2191 llvm::ConstantInt *C = cast<llvm::ConstantInt>(Val: EI->getIndexOperand());
2192 Value *LHS = nullptr, *RHS = nullptr;
2193 if (CurIdx == 0) {
2194 // insert into poison -> shuffle (src, poison)
2195 // shufflemask must use an i32
2196 Args.push_back(Elt: getAsInt32(C, I32Ty: CGF.Int32Ty));
2197 Args.resize(N: ResElts, NV: -1);
2198
2199 LHS = EI->getVectorOperand();
2200 RHS = V;
2201 VIsPoisonShuffle = true;
2202 } else if (VIsPoisonShuffle) {
2203 // insert into poison shuffle && size match -> shuffle (v, src)
2204 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(Val: V);
2205 for (unsigned j = 0; j != CurIdx; ++j)
2206 Args.push_back(Elt: getMaskElt(SVI: SVV, Idx: j, Off: 0));
2207 Args.push_back(Elt: ResElts + C->getZExtValue());
2208 Args.resize(N: ResElts, NV: -1);
2209
2210 LHS = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2211 RHS = EI->getVectorOperand();
2212 VIsPoisonShuffle = false;
2213 }
2214 if (!Args.empty()) {
2215 V = Builder.CreateShuffleVector(V1: LHS, V2: RHS, Mask: Args);
2216 ++CurIdx;
2217 continue;
2218 }
2219 }
2220 }
2221 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx: Builder.getInt32(C: CurIdx),
2222 Name: "vecinit");
2223 VIsPoisonShuffle = false;
2224 ++CurIdx;
2225 continue;
2226 }
2227
2228 unsigned InitElts = cast<llvm::FixedVectorType>(Val: VVT)->getNumElements();
2229
2230 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2231 // input is the same width as the vector being constructed, generate an
2232 // optimized shuffle of the swizzle input into the result.
2233 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2234 if (isa<ExtVectorElementExpr>(Val: IE)) {
2235 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Val: Init);
2236 Value *SVOp = SVI->getOperand(i_nocapture: 0);
2237 auto *OpTy = cast<llvm::FixedVectorType>(Val: SVOp->getType());
2238
2239 if (OpTy->getNumElements() == ResElts) {
2240 for (unsigned j = 0; j != CurIdx; ++j) {
2241 // If the current vector initializer is a shuffle with poison, merge
2242 // this shuffle directly into it.
2243 if (VIsPoisonShuffle) {
2244 Args.push_back(Elt: getMaskElt(SVI: cast<llvm::ShuffleVectorInst>(Val: V), Idx: j, Off: 0));
2245 } else {
2246 Args.push_back(Elt: j);
2247 }
2248 }
2249 for (unsigned j = 0, je = InitElts; j != je; ++j)
2250 Args.push_back(Elt: getMaskElt(SVI, Idx: j, Off: Offset));
2251 Args.resize(N: ResElts, NV: -1);
2252
2253 if (VIsPoisonShuffle)
2254 V = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2255
2256 Init = SVOp;
2257 }
2258 }
2259
2260 // Extend init to result vector length, and then shuffle its contribution
2261 // to the vector initializer into V.
2262 if (Args.empty()) {
2263 for (unsigned j = 0; j != InitElts; ++j)
2264 Args.push_back(Elt: j);
2265 Args.resize(N: ResElts, NV: -1);
2266 Init = Builder.CreateShuffleVector(V: Init, Mask: Args, Name: "vext");
2267
2268 Args.clear();
2269 for (unsigned j = 0; j != CurIdx; ++j)
2270 Args.push_back(Elt: j);
2271 for (unsigned j = 0; j != InitElts; ++j)
2272 Args.push_back(Elt: j + Offset);
2273 Args.resize(N: ResElts, NV: -1);
2274 }
2275
2276 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2277 // merging subsequent shuffles into this one.
2278 if (CurIdx == 0)
2279 std::swap(a&: V, b&: Init);
2280 V = Builder.CreateShuffleVector(V1: V, V2: Init, Mask: Args, Name: "vecinit");
2281 VIsPoisonShuffle = isa<llvm::PoisonValue>(Val: Init);
2282 CurIdx += InitElts;
2283 }
2284
2285 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2286 // Emit remaining default initializers.
2287 llvm::Type *EltTy = VType->getElementType();
2288
2289 // Emit remaining default initializers
2290 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2291 Value *Idx = Builder.getInt32(C: CurIdx);
2292 llvm::Value *Init = llvm::Constant::getNullValue(Ty: EltTy);
2293 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx, Name: "vecinit");
2294 }
2295 return V;
2296}
2297
2298static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D) {
2299 return !D->isWeak();
2300}
2301
2302static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2303 E = E->IgnoreParens();
2304
2305 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2306 if (UO->getOpcode() == UO_Deref)
2307 return CGF.isPointerKnownNonNull(E: UO->getSubExpr());
2308
2309 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E))
2310 return isDeclRefKnownNonNull(CGF, D: DRE->getDecl());
2311
2312 if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) {
2313 if (isa<FieldDecl>(Val: ME->getMemberDecl()))
2314 return true;
2315 return isDeclRefKnownNonNull(CGF, D: ME->getMemberDecl());
2316 }
2317
2318 // Array subscripts? Anything else?
2319
2320 return false;
2321}
2322
2323bool CodeGenFunction::isPointerKnownNonNull(const Expr *E) {
2324 assert(E->getType()->isSignableType(getContext()));
2325
2326 E = E->IgnoreParens();
2327
2328 if (isa<CXXThisExpr>(Val: E))
2329 return true;
2330
2331 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2332 if (UO->getOpcode() == UO_AddrOf)
2333 return isLValueKnownNonNull(CGF&: *this, E: UO->getSubExpr());
2334
2335 if (const auto *CE = dyn_cast<CastExpr>(Val: E))
2336 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2337 CE->getCastKind() == CK_ArrayToPointerDecay)
2338 return isLValueKnownNonNull(CGF&: *this, E: CE->getSubExpr());
2339
2340 // Maybe honor __nonnull?
2341
2342 return false;
2343}
2344
2345bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2346 const Expr *E = CE->getSubExpr();
2347
2348 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2349 return false;
2350
2351 if (isa<CXXThisExpr>(Val: E->IgnoreParens())) {
2352 // We always assume that 'this' is never null.
2353 return false;
2354 }
2355
2356 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2357 // And that glvalue casts are never null.
2358 if (ICE->isGLValue())
2359 return false;
2360 }
2361
2362 return true;
2363}
2364
2365// RHS is an aggregate type
2366static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address RHSVal,
2367 QualType RHSTy, QualType LHSTy,
2368 SourceLocation Loc) {
2369 SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList;
2370 SmallVector<QualType, 16> SrcTypes; // Flattened type
2371 CGF.FlattenAccessAndType(Addr: RHSVal, AddrTy: RHSTy, AccessList&: LoadGEPList, FlatTypes&: SrcTypes);
2372 // LHS is either a vector or a builtin?
2373 // if its a vector create a temp alloca to store into and return that
2374 if (auto *VecTy = LHSTy->getAs<VectorType>()) {
2375 assert(SrcTypes.size() >= VecTy->getNumElements() &&
2376 "Flattened type on RHS must have more elements than vector on LHS.");
2377 llvm::Value *V =
2378 CGF.Builder.CreateLoad(Addr: CGF.CreateIRTemp(T: LHSTy, Name: "flatcast.tmp"));
2379 // write to V.
2380 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2381 llvm::Value *Load = CGF.Builder.CreateLoad(Addr: LoadGEPList[I].first, Name: "load");
2382 llvm::Value *Idx = LoadGEPList[I].second;
2383 Load = Idx ? CGF.Builder.CreateExtractElement(Vec: Load, Idx, Name: "vec.extract")
2384 : Load;
2385 llvm::Value *Cast = CGF.EmitScalarConversion(
2386 Src: Load, SrcTy: SrcTypes[I], DstTy: VecTy->getElementType(), Loc);
2387 V = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx: I);
2388 }
2389 return V;
2390 }
2391 // i its a builtin just do an extract element or load.
2392 assert(LHSTy->isBuiltinType() &&
2393 "Destination type must be a vector or builtin type.");
2394 llvm::Value *Load = CGF.Builder.CreateLoad(Addr: LoadGEPList[0].first, Name: "load");
2395 llvm::Value *Idx = LoadGEPList[0].second;
2396 Load =
2397 Idx ? CGF.Builder.CreateExtractElement(Vec: Load, Idx, Name: "vec.extract") : Load;
2398 return CGF.EmitScalarConversion(Src: Load, SrcTy: LHSTy, DstTy: SrcTypes[0], Loc);
2399}
2400
2401// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2402// have to handle a more broad range of conversions than explicit casts, as they
2403// handle things like function to ptr-to-function decay etc.
2404Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2405 Expr *E = CE->getSubExpr();
2406 QualType DestTy = CE->getType();
2407 CastKind Kind = CE->getCastKind();
2408 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2409
2410 // These cases are generally not written to ignore the result of
2411 // evaluating their sub-expressions, so we clear this now.
2412 bool Ignored = TestAndClearIgnoreResultAssign();
2413
2414 // Since almost all cast kinds apply to scalars, this switch doesn't have
2415 // a default case, so the compiler will warn on a missing case. The cases
2416 // are in the same order as in the CastKind enum.
2417 switch (Kind) {
2418 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2419 case CK_BuiltinFnToFnPtr:
2420 llvm_unreachable("builtin functions are handled elsewhere");
2421
2422 case CK_LValueBitCast:
2423 case CK_ObjCObjectLValueCast: {
2424 Address Addr = EmitLValue(E).getAddress();
2425 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2426 LValue LV = CGF.MakeAddrLValue(Addr, T: DestTy);
2427 return EmitLoadOfLValue(LV, CE->getExprLoc());
2428 }
2429
2430 case CK_LValueToRValueBitCast: {
2431 LValue SourceLVal = CGF.EmitLValue(E);
2432 Address Addr =
2433 SourceLVal.getAddress().withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2434 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2435 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2436 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2437 }
2438
2439 case CK_CPointerToObjCPointerCast:
2440 case CK_BlockPointerToObjCPointerCast:
2441 case CK_AnyPointerToBlockPointerCast:
2442 case CK_BitCast: {
2443 Value *Src = Visit(E: const_cast<Expr*>(E));
2444 llvm::Type *SrcTy = Src->getType();
2445 llvm::Type *DstTy = ConvertType(T: DestTy);
2446
2447 // FIXME: this is a gross but seemingly necessary workaround for an issue
2448 // manifesting when a target uses a non-default AS for indirect sret args,
2449 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2450 // on the address of a local struct that gets returned by value yields an
2451 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2452 // DefaultAS. We can only do this subversive thing because sret args are
2453 // manufactured and them residing in the IndirectAS is a target specific
2454 // detail, and doing an AS cast here still retains the semantics the user
2455 // expects. It is desirable to remove this iff a better solution is found.
2456 if (auto A = dyn_cast<llvm::Argument>(Val: Src); A && A->hasStructRetAttr())
2457 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2458 CGF, V: Src, SrcAddr: E->getType().getAddressSpace(), DestTy: DstTy);
2459
2460 assert(
2461 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2462 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2463 "Address-space cast must be used to convert address spaces");
2464
2465 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
2466 if (auto *PT = DestTy->getAs<PointerType>()) {
2467 CGF.EmitVTablePtrCheckForCast(
2468 T: PT->getPointeeType(),
2469 Derived: Address(Src,
2470 CGF.ConvertTypeForMem(
2471 T: E->getType()->castAs<PointerType>()->getPointeeType()),
2472 CGF.getPointerAlign()),
2473 /*MayBeNull=*/true, TCK: CodeGenFunction::CFITCK_UnrelatedCast,
2474 Loc: CE->getBeginLoc());
2475 }
2476 }
2477
2478 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2479 const QualType SrcType = E->getType();
2480
2481 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2482 // Casting to pointer that could carry dynamic information (provided by
2483 // invariant.group) requires launder.
2484 Src = Builder.CreateLaunderInvariantGroup(Ptr: Src);
2485 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2486 // Casting to pointer that does not carry dynamic information (provided
2487 // by invariant.group) requires stripping it. Note that we don't do it
2488 // if the source could not be dynamic type and destination could be
2489 // dynamic because dynamic information is already laundered. It is
2490 // because launder(strip(src)) == launder(src), so there is no need to
2491 // add extra strip before launder.
2492 Src = Builder.CreateStripInvariantGroup(Ptr: Src);
2493 }
2494 }
2495
2496 // Update heapallocsite metadata when there is an explicit pointer cast.
2497 if (auto *CI = dyn_cast<llvm::CallBase>(Val: Src)) {
2498 if (CI->getMetadata(Kind: "heapallocsite") && isa<ExplicitCastExpr>(Val: CE) &&
2499 !isa<CastExpr>(Val: E)) {
2500 QualType PointeeType = DestTy->getPointeeType();
2501 if (!PointeeType.isNull())
2502 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: PointeeType,
2503 Loc: CE->getExprLoc());
2504 }
2505 }
2506
2507 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2508 // same element type, use the llvm.vector.insert intrinsic to perform the
2509 // bitcast.
2510 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2511 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2512 // If we are casting a fixed i8 vector to a scalable i1 predicate
2513 // vector, use a vector insert and bitcast the result.
2514 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2515 FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2516 ScalableDstTy = llvm::ScalableVectorType::get(
2517 FixedSrcTy->getElementType(),
2518 llvm::divideCeil(
2519 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2520 }
2521 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2522 llvm::Value *PoisonVec = llvm::PoisonValue::get(T: ScalableDstTy);
2523 llvm::Value *Result = Builder.CreateInsertVector(
2524 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2525 ScalableDstTy = cast<llvm::ScalableVectorType>(
2526 llvm::VectorType::getWithSizeAndScalar(SizeTy: ScalableDstTy, EltTy: DstTy));
2527 if (Result->getType() != ScalableDstTy)
2528 Result = Builder.CreateBitCast(V: Result, DestTy: ScalableDstTy);
2529 if (Result->getType() != DstTy)
2530 Result = Builder.CreateExtractVector(DstType: DstTy, SrcVec: Result, Idx: uint64_t(0));
2531 return Result;
2532 }
2533 }
2534 }
2535
2536 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2537 // same element type, use the llvm.vector.extract intrinsic to perform the
2538 // bitcast.
2539 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(Val: SrcTy)) {
2540 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2541 // If we are casting a scalable i1 predicate vector to a fixed i8
2542 // vector, bitcast the source and use a vector extract.
2543 if (ScalableSrcTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2544 FixedDstTy->getElementType()->isIntegerTy(8)) {
2545 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(RHS: 8)) {
2546 ScalableSrcTy = llvm::ScalableVectorType::get(
2547 ElementType: ScalableSrcTy->getElementType(),
2548 MinNumElts: llvm::alignTo<8>(
2549 Value: ScalableSrcTy->getElementCount().getKnownMinValue()));
2550 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: ScalableSrcTy);
2551 Src = Builder.CreateInsertVector(DstType: ScalableSrcTy, SrcVec: ZeroVec, SubVec: Src,
2552 Idx: uint64_t(0));
2553 }
2554
2555 ScalableSrcTy = llvm::ScalableVectorType::get(
2556 FixedDstTy->getElementType(),
2557 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2558 Src = Builder.CreateBitCast(V: Src, DestTy: ScalableSrcTy);
2559 }
2560 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2561 return Builder.CreateExtractVector(DstType: DstTy, SrcVec: Src, Idx: uint64_t(0),
2562 Name: "cast.fixed");
2563 }
2564 }
2565
2566 // Perform VLAT <-> VLST bitcast through memory.
2567 // TODO: since the llvm.vector.{insert,extract} intrinsics
2568 // require the element types of the vectors to be the same, we
2569 // need to keep this around for bitcasts between VLAT <-> VLST where
2570 // the element types of the vectors are not the same, until we figure
2571 // out a better way of doing these casts.
2572 if ((isa<llvm::FixedVectorType>(Val: SrcTy) &&
2573 isa<llvm::ScalableVectorType>(Val: DstTy)) ||
2574 (isa<llvm::ScalableVectorType>(Val: SrcTy) &&
2575 isa<llvm::FixedVectorType>(Val: DstTy))) {
2576 Address Addr = CGF.CreateDefaultAlignTempAlloca(Ty: SrcTy, Name: "saved-value");
2577 LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType());
2578 CGF.EmitStoreOfScalar(value: Src, lvalue: LV);
2579 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2580 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2581 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2582 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2583 }
2584
2585 llvm::Value *Result = Builder.CreateBitCast(V: Src, DestTy: DstTy);
2586 return CGF.authPointerToPointerCast(ResultPtr: Result, SourceType: E->getType(), DestType: DestTy);
2587 }
2588 case CK_AddressSpaceConversion: {
2589 Expr::EvalResult Result;
2590 if (E->EvaluateAsRValue(Result, Ctx: CGF.getContext()) &&
2591 Result.Val.isNullPointer()) {
2592 // If E has side effect, it is emitted even if its final result is a
2593 // null pointer. In that case, a DCE pass should be able to
2594 // eliminate the useless instructions emitted during translating E.
2595 if (Result.HasSideEffects)
2596 Visit(E);
2597 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(
2598 Val: ConvertType(T: DestTy)), QT: DestTy);
2599 }
2600 // Since target may map different address spaces in AST to the same address
2601 // space, an address space conversion may end up as a bitcast.
2602 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2603 CGF, V: Visit(E), SrcAddr: E->getType()->getPointeeType().getAddressSpace(),
2604 DestTy: ConvertType(T: DestTy));
2605 }
2606 case CK_AtomicToNonAtomic:
2607 case CK_NonAtomicToAtomic:
2608 case CK_UserDefinedConversion:
2609 return Visit(E: const_cast<Expr*>(E));
2610
2611 case CK_NoOp: {
2612 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
2613 : Visit(E: const_cast<Expr *>(E));
2614 }
2615
2616 case CK_BaseToDerived: {
2617 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2618 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2619
2620 Address Base = CGF.EmitPointerWithAlignment(Addr: E);
2621 Address Derived =
2622 CGF.GetAddressOfDerivedClass(Value: Base, Derived: DerivedClassDecl,
2623 PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
2624 NullCheckValue: CGF.ShouldNullCheckClassCastValue(CE));
2625
2626 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2627 // performed and the object is not of the derived type.
2628 if (CGF.sanitizePerformTypeCheck())
2629 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2630 Derived, DestTy->getPointeeType());
2631
2632 if (CGF.SanOpts.has(K: SanitizerKind::CFIDerivedCast))
2633 CGF.EmitVTablePtrCheckForCast(T: DestTy->getPointeeType(), Derived,
2634 /*MayBeNull=*/true,
2635 TCK: CodeGenFunction::CFITCK_DerivedCast,
2636 Loc: CE->getBeginLoc());
2637
2638 return CGF.getAsNaturalPointerTo(Addr: Derived, PointeeType: CE->getType()->getPointeeType());
2639 }
2640 case CK_UncheckedDerivedToBase:
2641 case CK_DerivedToBase: {
2642 // The EmitPointerWithAlignment path does this fine; just discard
2643 // the alignment.
2644 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitPointerWithAlignment(CE),
2645 PointeeType: CE->getType()->getPointeeType());
2646 }
2647
2648 case CK_Dynamic: {
2649 Address V = CGF.EmitPointerWithAlignment(Addr: E);
2650 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(Val: CE);
2651 return CGF.EmitDynamicCast(V, DCE);
2652 }
2653
2654 case CK_ArrayToPointerDecay:
2655 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitArrayToPointerDecay(Array: E),
2656 PointeeType: CE->getType()->getPointeeType());
2657 case CK_FunctionToPointerDecay:
2658 return EmitLValue(E).getPointer(CGF);
2659
2660 case CK_NullToPointer:
2661 if (MustVisitNullValue(E))
2662 CGF.EmitIgnoredExpr(E);
2663
2664 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: ConvertType(T: DestTy)),
2665 QT: DestTy);
2666
2667 case CK_NullToMemberPointer: {
2668 if (MustVisitNullValue(E))
2669 CGF.EmitIgnoredExpr(E);
2670
2671 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2672 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2673 }
2674
2675 case CK_ReinterpretMemberPointer:
2676 case CK_BaseToDerivedMemberPointer:
2677 case CK_DerivedToBaseMemberPointer: {
2678 Value *Src = Visit(E);
2679
2680 // Note that the AST doesn't distinguish between checked and
2681 // unchecked member pointer conversions, so we always have to
2682 // implement checked conversions here. This is inefficient when
2683 // actual control flow may be required in order to perform the
2684 // check, which it is for data member pointers (but not member
2685 // function pointers on Itanium and ARM).
2686 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, E: CE, Src);
2687 }
2688
2689 case CK_ARCProduceObject:
2690 return CGF.EmitARCRetainScalarExpr(expr: E);
2691 case CK_ARCConsumeObject:
2692 return CGF.EmitObjCConsumeObject(T: E->getType(), Ptr: Visit(E));
2693 case CK_ARCReclaimReturnedObject:
2694 return CGF.EmitARCReclaimReturnedObject(e: E, /*allowUnsafe*/ allowUnsafeClaim: Ignored);
2695 case CK_ARCExtendBlockObject:
2696 return CGF.EmitARCExtendBlockObject(expr: E);
2697
2698 case CK_CopyAndAutoreleaseBlockObject:
2699 return CGF.EmitBlockCopyAndAutorelease(Block: Visit(E), Ty: E->getType());
2700
2701 case CK_FloatingRealToComplex:
2702 case CK_FloatingComplexCast:
2703 case CK_IntegralRealToComplex:
2704 case CK_IntegralComplexCast:
2705 case CK_IntegralComplexToFloatingComplex:
2706 case CK_FloatingComplexToIntegralComplex:
2707 case CK_ConstructorConversion:
2708 case CK_ToUnion:
2709 case CK_HLSLArrayRValue:
2710 llvm_unreachable("scalar cast to non-scalar value");
2711
2712 case CK_LValueToRValue:
2713 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2714 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2715 return Visit(E: const_cast<Expr*>(E));
2716
2717 case CK_IntegralToPointer: {
2718 Value *Src = Visit(E: const_cast<Expr*>(E));
2719
2720 // First, convert to the correct width so that we control the kind of
2721 // extension.
2722 auto DestLLVMTy = ConvertType(T: DestTy);
2723 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2724 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2725 llvm::Value* IntResult =
2726 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
2727
2728 auto *IntToPtr = Builder.CreateIntToPtr(V: IntResult, DestTy: DestLLVMTy);
2729
2730 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2731 // Going from integer to pointer that could be dynamic requires reloading
2732 // dynamic information from invariant.group.
2733 if (DestTy.mayBeDynamicClass())
2734 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2735 }
2736
2737 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2738 return IntToPtr;
2739 }
2740 case CK_PointerToIntegral: {
2741 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2742 auto *PtrExpr = Visit(E);
2743
2744 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2745 const QualType SrcType = E->getType();
2746
2747 // Casting to integer requires stripping dynamic information as it does
2748 // not carries it.
2749 if (SrcType.mayBeDynamicClass())
2750 PtrExpr = Builder.CreateStripInvariantGroup(Ptr: PtrExpr);
2751 }
2752
2753 PtrExpr = CGF.authPointerToPointerCast(ResultPtr: PtrExpr, SourceType: E->getType(), DestType: DestTy);
2754 return Builder.CreatePtrToInt(V: PtrExpr, DestTy: ConvertType(T: DestTy));
2755 }
2756 case CK_ToVoid: {
2757 CGF.EmitIgnoredExpr(E);
2758 return nullptr;
2759 }
2760 case CK_MatrixCast: {
2761 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2762 Loc: CE->getExprLoc());
2763 }
2764 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2765 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2766 // To perform any necessary Scalar Cast, so this Cast can be handled
2767 // by the regular Vector Splat cast code.
2768 case CK_HLSLAggregateSplatCast:
2769 case CK_VectorSplat: {
2770 llvm::Type *DstTy = ConvertType(T: DestTy);
2771 Value *Elt = Visit(E: const_cast<Expr *>(E));
2772 // Splat the element across to all elements
2773 llvm::ElementCount NumElements =
2774 cast<llvm::VectorType>(Val: DstTy)->getElementCount();
2775 return Builder.CreateVectorSplat(EC: NumElements, V: Elt, Name: "splat");
2776 }
2777
2778 case CK_FixedPointCast:
2779 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2780 Loc: CE->getExprLoc());
2781
2782 case CK_FixedPointToBoolean:
2783 assert(E->getType()->isFixedPointType() &&
2784 "Expected src type to be fixed point type");
2785 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2786 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2787 Loc: CE->getExprLoc());
2788
2789 case CK_FixedPointToIntegral:
2790 assert(E->getType()->isFixedPointType() &&
2791 "Expected src type to be fixed point type");
2792 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2793 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2794 Loc: CE->getExprLoc());
2795
2796 case CK_IntegralToFixedPoint:
2797 assert(E->getType()->isIntegerType() &&
2798 "Expected src type to be an integer");
2799 assert(DestTy->isFixedPointType() &&
2800 "Expected dest type to be fixed point type");
2801 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2802 Loc: CE->getExprLoc());
2803
2804 case CK_IntegralCast: {
2805 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2806 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2807 return Builder.CreateIntCast(V: Visit(E), DestTy: ConvertType(T: DestTy),
2808 isSigned: SrcElTy->isSignedIntegerOrEnumerationType(),
2809 Name: "conv");
2810 }
2811 ScalarConversionOpts Opts;
2812 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2813 if (!ICE->isPartOfExplicitCast())
2814 Opts = ScalarConversionOpts(CGF.SanOpts);
2815 }
2816 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2817 Loc: CE->getExprLoc(), Opts);
2818 }
2819 case CK_IntegralToFloating: {
2820 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2821 // TODO: Support constrained FP intrinsics.
2822 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2823 if (SrcElTy->isSignedIntegerOrEnumerationType())
2824 return Builder.CreateSIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2825 return Builder.CreateUIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2826 }
2827 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2828 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2829 Loc: CE->getExprLoc());
2830 }
2831 case CK_FloatingToIntegral: {
2832 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2833 // TODO: Support constrained FP intrinsics.
2834 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2835 if (DstElTy->isSignedIntegerOrEnumerationType())
2836 return Builder.CreateFPToSI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2837 return Builder.CreateFPToUI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2838 }
2839 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2840 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2841 Loc: CE->getExprLoc());
2842 }
2843 case CK_FloatingCast: {
2844 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2845 // TODO: Support constrained FP intrinsics.
2846 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2847 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2848 if (DstElTy->castAs<BuiltinType>()->getKind() <
2849 SrcElTy->castAs<BuiltinType>()->getKind())
2850 return Builder.CreateFPTrunc(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2851 return Builder.CreateFPExt(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2852 }
2853 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2854 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2855 Loc: CE->getExprLoc());
2856 }
2857 case CK_FixedPointToFloating:
2858 case CK_FloatingToFixedPoint: {
2859 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2860 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2861 Loc: CE->getExprLoc());
2862 }
2863 case CK_BooleanToSignedIntegral: {
2864 ScalarConversionOpts Opts;
2865 Opts.TreatBooleanAsSigned = true;
2866 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2867 Loc: CE->getExprLoc(), Opts);
2868 }
2869 case CK_IntegralToBoolean:
2870 return EmitIntToBoolConversion(V: Visit(E));
2871 case CK_PointerToBoolean:
2872 return EmitPointerToBoolConversion(V: Visit(E), QT: E->getType());
2873 case CK_FloatingToBoolean: {
2874 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2875 return EmitFloatToBoolConversion(V: Visit(E));
2876 }
2877 case CK_MemberPointerToBoolean: {
2878 llvm::Value *MemPtr = Visit(E);
2879 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2880 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2881 }
2882
2883 case CK_FloatingComplexToReal:
2884 case CK_IntegralComplexToReal:
2885 return CGF.EmitComplexExpr(E, IgnoreReal: false, IgnoreImag: true).first;
2886
2887 case CK_FloatingComplexToBoolean:
2888 case CK_IntegralComplexToBoolean: {
2889 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2890
2891 // TODO: kill this function off, inline appropriate case here
2892 return EmitComplexToScalarConversion(Src: V, SrcTy: E->getType(), DstTy: DestTy,
2893 Loc: CE->getExprLoc());
2894 }
2895
2896 case CK_ZeroToOCLOpaqueType: {
2897 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2898 DestTy->isOCLIntelSubgroupAVCType()) &&
2899 "CK_ZeroToOCLEvent cast on non-event type");
2900 return llvm::Constant::getNullValue(Ty: ConvertType(T: DestTy));
2901 }
2902
2903 case CK_IntToOCLSampler:
2904 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2905
2906 case CK_HLSLVectorTruncation: {
2907 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
2908 "Destination type must be a vector or builtin type.");
2909 Value *Vec = Visit(E: const_cast<Expr *>(E));
2910 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2911 SmallVector<int> Mask;
2912 unsigned NumElts = VecTy->getNumElements();
2913 for (unsigned I = 0; I != NumElts; ++I)
2914 Mask.push_back(Elt: I);
2915
2916 return Builder.CreateShuffleVector(V: Vec, Mask, Name: "trunc");
2917 }
2918 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.SizeTy);
2919 return Builder.CreateExtractElement(Vec, Idx: Zero, Name: "cast.vtrunc");
2920 }
2921 case CK_HLSLElementwiseCast: {
2922 RValue RV = CGF.EmitAnyExpr(E);
2923 SourceLocation Loc = CE->getExprLoc();
2924 QualType SrcTy = E->getType();
2925
2926 assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast.");
2927 // RHS is an aggregate
2928 Address SrcVal = RV.getAggregateAddress();
2929 return EmitHLSLElementwiseCast(CGF, RHSVal: SrcVal, RHSTy: SrcTy, LHSTy: DestTy, Loc);
2930 }
2931 } // end of switch
2932
2933 llvm_unreachable("unknown scalar cast");
2934}
2935
2936Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2937 CodeGenFunction::StmtExprEvaluation eval(CGF);
2938 Address RetAlloca = CGF.EmitCompoundStmt(S: *E->getSubStmt(),
2939 GetLast: !E->getType()->isVoidType());
2940 if (!RetAlloca.isValid())
2941 return nullptr;
2942 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2943 E->getExprLoc());
2944}
2945
2946Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2947 CodeGenFunction::RunCleanupsScope Scope(CGF);
2948 Value *V = Visit(E: E->getSubExpr());
2949 // Defend against dominance problems caused by jumps out of expression
2950 // evaluation through the shared cleanup block.
2951 Scope.ForceCleanup(ValuesToReload: {&V});
2952 return V;
2953}
2954
2955//===----------------------------------------------------------------------===//
2956// Unary Operators
2957//===----------------------------------------------------------------------===//
2958
2959static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2960 llvm::Value *InVal, bool IsInc,
2961 FPOptions FPFeatures) {
2962 BinOpInfo BinOp;
2963 BinOp.LHS = InVal;
2964 BinOp.RHS = llvm::ConstantInt::get(Ty: InVal->getType(), V: 1, IsSigned: false);
2965 BinOp.Ty = E->getType();
2966 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2967 BinOp.FPFeatures = FPFeatures;
2968 BinOp.E = E;
2969 return BinOp;
2970}
2971
2972llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2973 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2974 llvm::Value *Amount =
2975 llvm::ConstantInt::get(Ty: InVal->getType(), V: IsInc ? 1 : -1, IsSigned: true);
2976 StringRef Name = IsInc ? "inc" : "dec";
2977 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2978 case LangOptions::SOB_Defined:
2979 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2980 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
2981 [[fallthrough]];
2982 case LangOptions::SOB_Undefined:
2983 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2984 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2985 [[fallthrough]];
2986 case LangOptions::SOB_Trapping:
2987 BinOpInfo Info = createBinOpInfoFromIncDec(
2988 E, InVal, IsInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
2989 if (!E->canOverflow() || CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Info))
2990 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2991 return EmitOverflowCheckedBinOp(Ops: Info);
2992 }
2993 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2994}
2995
2996/// For the purposes of overflow pattern exclusion, does this match the
2997/// "while(i--)" pattern?
2998static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
2999 bool isPre, ASTContext &Ctx) {
3000 if (isInc || isPre)
3001 return false;
3002
3003 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3004 if (!Ctx.getLangOpts().isOverflowPatternExcluded(
3005 Kind: LangOptions::OverflowPatternExclusionKind::PostDecrInWhile))
3006 return false;
3007
3008 // all Parents (usually just one) must be a WhileStmt
3009 for (const auto &Parent : Ctx.getParentMapContext().getParents(Node: *UO))
3010 if (!Parent.get<WhileStmt>())
3011 return false;
3012
3013 return true;
3014}
3015
3016namespace {
3017/// Handles check and update for lastprivate conditional variables.
3018class OMPLastprivateConditionalUpdateRAII {
3019private:
3020 CodeGenFunction &CGF;
3021 const UnaryOperator *E;
3022
3023public:
3024 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3025 const UnaryOperator *E)
3026 : CGF(CGF), E(E) {}
3027 ~OMPLastprivateConditionalUpdateRAII() {
3028 if (CGF.getLangOpts().OpenMP)
3029 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
3030 CGF, LHS: E->getSubExpr());
3031 }
3032};
3033} // namespace
3034
3035llvm::Value *
3036ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3037 bool isInc, bool isPre) {
3038 ApplyAtomGroup Grp(CGF.getDebugInfo());
3039 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3040 QualType type = E->getSubExpr()->getType();
3041 llvm::PHINode *atomicPHI = nullptr;
3042 llvm::Value *value;
3043 llvm::Value *input;
3044 llvm::Value *Previous = nullptr;
3045 QualType SrcType = E->getType();
3046
3047 int amount = (isInc ? 1 : -1);
3048 bool isSubtraction = !isInc;
3049
3050 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3051 type = atomicTy->getValueType();
3052 if (isInc && type->isBooleanType()) {
3053 llvm::Value *True = CGF.EmitToMemory(Value: Builder.getTrue(), Ty: type);
3054 if (isPre) {
3055 Builder.CreateStore(Val: True, Addr: LV.getAddress(), IsVolatile: LV.isVolatileQualified())
3056 ->setAtomic(Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3057 return Builder.getTrue();
3058 }
3059 // For atomic bool increment, we just store true and return it for
3060 // preincrement, do an atomic swap with true for postincrement
3061 return Builder.CreateAtomicRMW(
3062 Op: llvm::AtomicRMWInst::Xchg, Addr: LV.getAddress(), Val: True,
3063 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3064 }
3065 // Special case for atomic increment / decrement on integers, emit
3066 // atomicrmw instructions. We skip this if we want to be doing overflow
3067 // checking, and fall into the slow path with the atomic cmpxchg loop.
3068 if (!type->isBooleanType() && type->isIntegerType() &&
3069 !(type->isUnsignedIntegerType() &&
3070 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3071 CGF.getLangOpts().getSignedOverflowBehavior() !=
3072 LangOptions::SOB_Trapping) {
3073 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3074 llvm::AtomicRMWInst::Sub;
3075 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3076 llvm::Instruction::Sub;
3077 llvm::Value *amt = CGF.EmitToMemory(
3078 Value: llvm::ConstantInt::get(Ty: ConvertType(T: type), V: 1, IsSigned: true), Ty: type);
3079 llvm::Value *old =
3080 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(), Val: amt,
3081 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3082 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3083 }
3084 // Special case for atomic increment/decrement on floats.
3085 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3086 if (type->isFloatingType()) {
3087 llvm::Type *Ty = ConvertType(T: type);
3088 if (llvm::has_single_bit(Value: Ty->getScalarSizeInBits())) {
3089 llvm::AtomicRMWInst::BinOp aop =
3090 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3091 llvm::Instruction::BinaryOps op =
3092 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3093 llvm::Value *amt = llvm::ConstantFP::get(Ty, V: 1.0);
3094 llvm::AtomicRMWInst *old =
3095 CGF.emitAtomicRMWInst(Op: aop, Addr: LV.getAddress(), Val: amt,
3096 Order: llvm::AtomicOrdering::SequentiallyConsistent);
3097
3098 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3099 }
3100 }
3101 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3102 input = value;
3103 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3104 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3105 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3106 value = CGF.EmitToMemory(Value: value, Ty: type);
3107 Builder.CreateBr(Dest: opBB);
3108 Builder.SetInsertPoint(opBB);
3109 atomicPHI = Builder.CreatePHI(Ty: value->getType(), NumReservedValues: 2);
3110 atomicPHI->addIncoming(V: value, BB: startBB);
3111 value = atomicPHI;
3112 } else {
3113 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3114 input = value;
3115 }
3116
3117 // Special case of integer increment that we have to check first: bool++.
3118 // Due to promotion rules, we get:
3119 // bool++ -> bool = bool + 1
3120 // -> bool = (int)bool + 1
3121 // -> bool = ((int)bool + 1 != 0)
3122 // An interesting aspect of this is that increment is always true.
3123 // Decrement does not have this property.
3124 if (isInc && type->isBooleanType()) {
3125 value = Builder.getTrue();
3126
3127 // Most common case by far: integer increment.
3128 } else if (type->isIntegerType()) {
3129 QualType promotedType;
3130 bool canPerformLossyDemotionCheck = false;
3131
3132 bool excludeOverflowPattern =
3133 matchesPostDecrInWhile(UO: E, isInc, isPre, Ctx&: CGF.getContext());
3134
3135 if (CGF.getContext().isPromotableIntegerType(T: type)) {
3136 promotedType = CGF.getContext().getPromotedIntegerType(PromotableType: type);
3137 assert(promotedType != type && "Shouldn't promote to the same type.");
3138 canPerformLossyDemotionCheck = true;
3139 canPerformLossyDemotionCheck &=
3140 CGF.getContext().getCanonicalType(T: type) !=
3141 CGF.getContext().getCanonicalType(T: promotedType);
3142 canPerformLossyDemotionCheck &=
3143 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
3144 SrcType: type, DstType: promotedType);
3145 assert((!canPerformLossyDemotionCheck ||
3146 type->isSignedIntegerOrEnumerationType() ||
3147 promotedType->isSignedIntegerOrEnumerationType() ||
3148 ConvertType(type)->getScalarSizeInBits() ==
3149 ConvertType(promotedType)->getScalarSizeInBits()) &&
3150 "The following check expects that if we do promotion to different "
3151 "underlying canonical type, at least one of the types (either "
3152 "base or promoted) will be signed, or the bitwidths will match.");
3153 }
3154 if (CGF.SanOpts.hasOneOf(
3155 K: SanitizerKind::ImplicitIntegerArithmeticValueChange |
3156 SanitizerKind::ImplicitBitfieldConversion) &&
3157 canPerformLossyDemotionCheck) {
3158 // While `x += 1` (for `x` with width less than int) is modeled as
3159 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3160 // ease; inc/dec with width less than int can't overflow because of
3161 // promotion rules, so we omit promotion+demotion, which means that we can
3162 // not catch lossy "demotion". Because we still want to catch these cases
3163 // when the sanitizer is enabled, we perform the promotion, then perform
3164 // the increment/decrement in the wider type, and finally
3165 // perform the demotion. This will catch lossy demotions.
3166
3167 // We have a special case for bitfields defined using all the bits of the
3168 // type. In this case we need to do the same trick as for the integer
3169 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3170
3171 value = EmitScalarConversion(Src: value, SrcType: type, DstType: promotedType, Loc: E->getExprLoc());
3172 Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
3173 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3174 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3175 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3176 // checks will take care of the conversion.
3177 ScalarConversionOpts Opts;
3178 if (!LV.isBitField())
3179 Opts = ScalarConversionOpts(CGF.SanOpts);
3180 else if (CGF.SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) {
3181 Previous = value;
3182 SrcType = promotedType;
3183 }
3184
3185 value = EmitScalarConversion(Src: value, SrcType: promotedType, DstType: type, Loc: E->getExprLoc(),
3186 Opts);
3187
3188 // Note that signed integer inc/dec with width less than int can't
3189 // overflow because of promotion rules; we're just eliding a few steps
3190 // here.
3191 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3192 value = EmitIncDecConsiderOverflowBehavior(E, InVal: value, IsInc: isInc);
3193 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3194 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
3195 !excludeOverflowPattern &&
3196 !CGF.getContext().isTypeIgnoredBySanitizer(
3197 Mask: SanitizerKind::UnsignedIntegerOverflow, Ty: E->getType())) {
3198 value = EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
3199 E, InVal: value, IsInc: isInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
3200 } else {
3201 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
3202 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3203 }
3204
3205 // Next most common: pointer increment.
3206 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3207 QualType type = ptr->getPointeeType();
3208
3209 // VLA types don't have constant size.
3210 if (const VariableArrayType *vla
3211 = CGF.getContext().getAsVariableArrayType(T: type)) {
3212 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3213 if (!isInc) numElts = Builder.CreateNSWNeg(V: numElts, Name: "vla.negsize");
3214 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
3215 if (CGF.getLangOpts().PointerOverflowDefined)
3216 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: numElts, Name: "vla.inc");
3217 else
3218 value = CGF.EmitCheckedInBoundsGEP(
3219 ElemTy: elemTy, Ptr: value, IdxList: numElts, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3220 Loc: E->getExprLoc(), Name: "vla.inc");
3221
3222 // Arithmetic on function pointers (!) is just +-1.
3223 } else if (type->isFunctionType()) {
3224 llvm::Value *amt = Builder.getInt32(C: amount);
3225
3226 if (CGF.getLangOpts().PointerOverflowDefined)
3227 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: amt, Name: "incdec.funcptr");
3228 else
3229 value =
3230 CGF.EmitCheckedInBoundsGEP(ElemTy: CGF.Int8Ty, Ptr: value, IdxList: amt,
3231 /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3232 Loc: E->getExprLoc(), Name: "incdec.funcptr");
3233
3234 // For everything else, we can just do a simple increment.
3235 } else {
3236 llvm::Value *amt = Builder.getInt32(C: amount);
3237 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
3238 if (CGF.getLangOpts().PointerOverflowDefined)
3239 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: amt, Name: "incdec.ptr");
3240 else
3241 value = CGF.EmitCheckedInBoundsGEP(
3242 ElemTy: elemTy, Ptr: value, IdxList: amt, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3243 Loc: E->getExprLoc(), Name: "incdec.ptr");
3244 }
3245
3246 // Vector increment/decrement.
3247 } else if (type->isVectorType()) {
3248 if (type->hasIntegerRepresentation()) {
3249 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount);
3250
3251 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3252 } else {
3253 value = Builder.CreateFAdd(
3254 L: value,
3255 R: llvm::ConstantFP::get(Ty: value->getType(), V: amount),
3256 Name: isInc ? "inc" : "dec");
3257 }
3258
3259 // Floating point.
3260 } else if (type->isRealFloatingType()) {
3261 // Add the inc/dec to the real part.
3262 llvm::Value *amt;
3263 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3264
3265 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3266 // Another special case: half FP increment should be done via float
3267 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3268 value = Builder.CreateCall(
3269 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
3270 CGF.CGM.FloatTy),
3271 input, "incdec.conv");
3272 } else {
3273 value = Builder.CreateFPExt(V: input, DestTy: CGF.CGM.FloatTy, Name: "incdec.conv");
3274 }
3275 }
3276
3277 if (value->getType()->isFloatTy())
3278 amt = llvm::ConstantFP::get(Context&: VMContext,
3279 V: llvm::APFloat(static_cast<float>(amount)));
3280 else if (value->getType()->isDoubleTy())
3281 amt = llvm::ConstantFP::get(Context&: VMContext,
3282 V: llvm::APFloat(static_cast<double>(amount)));
3283 else {
3284 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3285 // Convert from float.
3286 llvm::APFloat F(static_cast<float>(amount));
3287 bool ignored;
3288 const llvm::fltSemantics *FS;
3289 // Don't use getFloatTypeSemantics because Half isn't
3290 // necessarily represented using the "half" LLVM type.
3291 if (value->getType()->isFP128Ty())
3292 FS = &CGF.getTarget().getFloat128Format();
3293 else if (value->getType()->isHalfTy())
3294 FS = &CGF.getTarget().getHalfFormat();
3295 else if (value->getType()->isBFloatTy())
3296 FS = &CGF.getTarget().getBFloat16Format();
3297 else if (value->getType()->isPPC_FP128Ty())
3298 FS = &CGF.getTarget().getIbm128Format();
3299 else
3300 FS = &CGF.getTarget().getLongDoubleFormat();
3301 F.convert(ToSemantics: *FS, RM: llvm::APFloat::rmTowardZero, losesInfo: &ignored);
3302 amt = llvm::ConstantFP::get(Context&: VMContext, V: F);
3303 }
3304 value = Builder.CreateFAdd(L: value, R: amt, Name: isInc ? "inc" : "dec");
3305
3306 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3307 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3308 value = Builder.CreateCall(
3309 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3310 CGF.CGM.FloatTy),
3311 value, "incdec.conv");
3312 } else {
3313 value = Builder.CreateFPTrunc(V: value, DestTy: input->getType(), Name: "incdec.conv");
3314 }
3315 }
3316
3317 // Fixed-point types.
3318 } else if (type->isFixedPointType()) {
3319 // Fixed-point types are tricky. In some cases, it isn't possible to
3320 // represent a 1 or a -1 in the type at all. Piggyback off of
3321 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3322 BinOpInfo Info;
3323 Info.E = E;
3324 Info.Ty = E->getType();
3325 Info.Opcode = isInc ? BO_Add : BO_Sub;
3326 Info.LHS = value;
3327 Info.RHS = llvm::ConstantInt::get(Ty: value->getType(), V: 1, IsSigned: false);
3328 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3329 // since -1 is guaranteed to be representable.
3330 if (type->isSignedFixedPointType()) {
3331 Info.Opcode = isInc ? BO_Sub : BO_Add;
3332 Info.RHS = Builder.CreateNeg(V: Info.RHS);
3333 }
3334 // Now, convert from our invented integer literal to the type of the unary
3335 // op. This will upscale and saturate if necessary. This value can become
3336 // undef in some cases.
3337 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3338 auto DstSema = CGF.getContext().getFixedPointSemantics(Ty: Info.Ty);
3339 Info.RHS = FPBuilder.CreateIntegerToFixed(Src: Info.RHS, SrcIsSigned: true, DstSema: DstSema);
3340 value = EmitFixedPointBinOp(Ops: Info);
3341
3342 // Objective-C pointer types.
3343 } else {
3344 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3345
3346 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3347 if (!isInc) size = -size;
3348 llvm::Value *sizeValue =
3349 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: size.getQuantity());
3350
3351 if (CGF.getLangOpts().PointerOverflowDefined)
3352 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, Name: "incdec.objptr");
3353 else
3354 value = CGF.EmitCheckedInBoundsGEP(
3355 ElemTy: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3356 Loc: E->getExprLoc(), Name: "incdec.objptr");
3357 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
3358 }
3359
3360 if (atomicPHI) {
3361 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3362 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3363 auto Pair = CGF.EmitAtomicCompareExchange(
3364 Obj: LV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: value), Loc: E->getExprLoc());
3365 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: type);
3366 llvm::Value *success = Pair.second;
3367 atomicPHI->addIncoming(V: old, BB: curBlock);
3368 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3369 Builder.SetInsertPoint(contBB);
3370 return isPre ? value : input;
3371 }
3372
3373 // Store the updated result through the lvalue.
3374 if (LV.isBitField()) {
3375 Value *Src = Previous ? Previous : value;
3376 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: value), Dst: LV, Result: &value);
3377 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: value, DstType: E->getType(),
3378 Info: LV.getBitFieldInfo(), Loc: E->getExprLoc());
3379 } else
3380 CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV);
3381
3382 // If this is a postinc, return the value read from memory, otherwise use the
3383 // updated value.
3384 return isPre ? value : input;
3385}
3386
3387
3388Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3389 QualType PromotionType) {
3390 QualType promotionTy = PromotionType.isNull()
3391 ? getPromotionType(Ty: E->getSubExpr()->getType())
3392 : PromotionType;
3393 Value *result = VisitPlus(E, PromotionType: promotionTy);
3394 if (result && !promotionTy.isNull())
3395 result = EmitUnPromotedValue(result, ExprType: E->getType());
3396 return result;
3397}
3398
3399Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3400 QualType PromotionType) {
3401 // This differs from gcc, though, most likely due to a bug in gcc.
3402 TestAndClearIgnoreResultAssign();
3403 if (!PromotionType.isNull())
3404 return CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3405 return Visit(E: E->getSubExpr());
3406}
3407
3408Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3409 QualType PromotionType) {
3410 QualType promotionTy = PromotionType.isNull()
3411 ? getPromotionType(Ty: E->getSubExpr()->getType())
3412 : PromotionType;
3413 Value *result = VisitMinus(E, PromotionType: promotionTy);
3414 if (result && !promotionTy.isNull())
3415 result = EmitUnPromotedValue(result, ExprType: E->getType());
3416 return result;
3417}
3418
3419Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3420 QualType PromotionType) {
3421 TestAndClearIgnoreResultAssign();
3422 Value *Op;
3423 if (!PromotionType.isNull())
3424 Op = CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3425 else
3426 Op = Visit(E: E->getSubExpr());
3427
3428 // Generate a unary FNeg for FP ops.
3429 if (Op->getType()->isFPOrFPVectorTy())
3430 return Builder.CreateFNeg(V: Op, Name: "fneg");
3431
3432 // Emit unary minus with EmitSub so we handle overflow cases etc.
3433 BinOpInfo BinOp;
3434 BinOp.RHS = Op;
3435 BinOp.LHS = llvm::Constant::getNullValue(Ty: BinOp.RHS->getType());
3436 BinOp.Ty = E->getType();
3437 BinOp.Opcode = BO_Sub;
3438 BinOp.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3439 BinOp.E = E;
3440 return EmitSub(Ops: BinOp);
3441}
3442
3443Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3444 TestAndClearIgnoreResultAssign();
3445 Value *Op = Visit(E: E->getSubExpr());
3446 return Builder.CreateNot(V: Op, Name: "not");
3447}
3448
3449Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3450 // Perform vector logical not on comparison with zero vector.
3451 if (E->getType()->isVectorType() &&
3452 E->getType()->castAs<VectorType>()->getVectorKind() ==
3453 VectorKind::Generic) {
3454 Value *Oper = Visit(E: E->getSubExpr());
3455 Value *Zero = llvm::Constant::getNullValue(Ty: Oper->getType());
3456 Value *Result;
3457 if (Oper->getType()->isFPOrFPVectorTy()) {
3458 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3459 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
3460 Result = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_OEQ, LHS: Oper, RHS: Zero, Name: "cmp");
3461 } else
3462 Result = Builder.CreateICmp(P: llvm::CmpInst::ICMP_EQ, LHS: Oper, RHS: Zero, Name: "cmp");
3463 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
3464 }
3465
3466 // Compare operand to zero.
3467 Value *BoolVal = CGF.EvaluateExprAsBool(E: E->getSubExpr());
3468
3469 // Invert value.
3470 // TODO: Could dynamically modify easy computations here. For example, if
3471 // the operand is an icmp ne, turn into icmp eq.
3472 BoolVal = Builder.CreateNot(V: BoolVal, Name: "lnot");
3473
3474 // ZExt result to the expr type.
3475 return Builder.CreateZExt(V: BoolVal, DestTy: ConvertType(T: E->getType()), Name: "lnot.ext");
3476}
3477
3478Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3479 // Try folding the offsetof to a constant.
3480 Expr::EvalResult EVResult;
3481 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3482 llvm::APSInt Value = EVResult.Val.getInt();
3483 return Builder.getInt(AI: Value);
3484 }
3485
3486 // Loop over the components of the offsetof to compute the value.
3487 unsigned n = E->getNumComponents();
3488 llvm::Type* ResultType = ConvertType(T: E->getType());
3489 llvm::Value* Result = llvm::Constant::getNullValue(Ty: ResultType);
3490 QualType CurrentType = E->getTypeSourceInfo()->getType();
3491 for (unsigned i = 0; i != n; ++i) {
3492 OffsetOfNode ON = E->getComponent(Idx: i);
3493 llvm::Value *Offset = nullptr;
3494 switch (ON.getKind()) {
3495 case OffsetOfNode::Array: {
3496 // Compute the index
3497 Expr *IdxExpr = E->getIndexExpr(Idx: ON.getArrayExprIndex());
3498 llvm::Value* Idx = CGF.EmitScalarExpr(E: IdxExpr);
3499 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3500 Idx = Builder.CreateIntCast(V: Idx, DestTy: ResultType, isSigned: IdxSigned, Name: "conv");
3501
3502 // Save the element type
3503 CurrentType =
3504 CGF.getContext().getAsArrayType(T: CurrentType)->getElementType();
3505
3506 // Compute the element size
3507 llvm::Value* ElemSize = llvm::ConstantInt::get(Ty: ResultType,
3508 V: CGF.getContext().getTypeSizeInChars(T: CurrentType).getQuantity());
3509
3510 // Multiply out to compute the result
3511 Offset = Builder.CreateMul(LHS: Idx, RHS: ElemSize);
3512 break;
3513 }
3514
3515 case OffsetOfNode::Field: {
3516 FieldDecl *MemberDecl = ON.getField();
3517 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3518 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3519
3520 // Compute the index of the field in its parent.
3521 unsigned i = 0;
3522 // FIXME: It would be nice if we didn't have to loop here!
3523 for (RecordDecl::field_iterator Field = RD->field_begin(),
3524 FieldEnd = RD->field_end();
3525 Field != FieldEnd; ++Field, ++i) {
3526 if (*Field == MemberDecl)
3527 break;
3528 }
3529 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3530
3531 // Compute the offset to the field
3532 int64_t OffsetInt = RL.getFieldOffset(FieldNo: i) /
3533 CGF.getContext().getCharWidth();
3534 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt);
3535
3536 // Save the element type.
3537 CurrentType = MemberDecl->getType();
3538 break;
3539 }
3540
3541 case OffsetOfNode::Identifier:
3542 llvm_unreachable("dependent __builtin_offsetof");
3543
3544 case OffsetOfNode::Base: {
3545 if (ON.getBase()->isVirtual()) {
3546 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3547 continue;
3548 }
3549
3550 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3551 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3552
3553 // Save the element type.
3554 CurrentType = ON.getBase()->getType();
3555
3556 // Compute the offset to the base.
3557 auto *BaseRT = CurrentType->castAs<RecordType>();
3558 auto *BaseRD = cast<CXXRecordDecl>(Val: BaseRT->getDecl());
3559 CharUnits OffsetInt = RL.getBaseClassOffset(Base: BaseRD);
3560 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt.getQuantity());
3561 break;
3562 }
3563 }
3564 Result = Builder.CreateAdd(LHS: Result, RHS: Offset);
3565 }
3566 return Result;
3567}
3568
3569/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3570/// argument of the sizeof expression as an integer.
3571Value *
3572ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3573 const UnaryExprOrTypeTraitExpr *E) {
3574 QualType TypeToSize = E->getTypeOfArgument();
3575 if (auto Kind = E->getKind();
3576 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3577 if (const VariableArrayType *VAT =
3578 CGF.getContext().getAsVariableArrayType(T: TypeToSize)) {
3579 // For _Countof, we only want to evaluate if the extent is actually
3580 // variable as opposed to a multi-dimensional array whose extent is
3581 // constant but whose element type is variable.
3582 bool EvaluateExtent = true;
3583 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3584 EvaluateExtent =
3585 !VAT->getSizeExpr()->isIntegerConstantExpr(Ctx: CGF.getContext());
3586 }
3587 if (EvaluateExtent) {
3588 if (E->isArgumentType()) {
3589 // sizeof(type) - make sure to emit the VLA size.
3590 CGF.EmitVariablyModifiedType(Ty: TypeToSize);
3591 } else {
3592 // C99 6.5.3.4p2: If the argument is an expression of type
3593 // VLA, it is evaluated.
3594 CGF.EmitIgnoredExpr(E: E->getArgumentExpr());
3595 }
3596
3597 // For _Countof, we just want to return the size of a single dimension.
3598 if (Kind == UETT_CountOf)
3599 return CGF.getVLAElements1D(vla: VAT).NumElts;
3600
3601 // For sizeof and __datasizeof, we need to scale the number of elements
3602 // by the size of the array element type.
3603 auto VlaSize = CGF.getVLASize(vla: VAT);
3604
3605 // Scale the number of non-VLA elements by the non-VLA element size.
3606 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3607 if (!eltSize.isOne())
3608 return CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize),
3609 RHS: VlaSize.NumElts);
3610 return VlaSize.NumElts;
3611 }
3612 }
3613 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3614 auto Alignment =
3615 CGF.getContext()
3616 .toCharUnitsFromBits(BitSize: CGF.getContext().getOpenMPDefaultSimdAlign(
3617 T: E->getTypeOfArgument()->getPointeeType()))
3618 .getQuantity();
3619 return llvm::ConstantInt::get(Ty: CGF.SizeTy, V: Alignment);
3620 } else if (E->getKind() == UETT_VectorElements) {
3621 auto *VecTy = cast<llvm::VectorType>(Val: ConvertType(T: E->getTypeOfArgument()));
3622 return Builder.CreateElementCount(Ty: CGF.SizeTy, EC: VecTy->getElementCount());
3623 }
3624
3625 // If this isn't sizeof(vla), the result must be constant; use the constant
3626 // folding logic so we don't have to duplicate it here.
3627 return Builder.getInt(AI: E->EvaluateKnownConstInt(CGF.getContext()));
3628}
3629
3630Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3631 QualType PromotionType) {
3632 QualType promotionTy = PromotionType.isNull()
3633 ? getPromotionType(Ty: E->getSubExpr()->getType())
3634 : PromotionType;
3635 Value *result = VisitReal(E, PromotionType: promotionTy);
3636 if (result && !promotionTy.isNull())
3637 result = EmitUnPromotedValue(result, ExprType: E->getType());
3638 return result;
3639}
3640
3641Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3642 QualType PromotionType) {
3643 Expr *Op = E->getSubExpr();
3644 if (Op->getType()->isAnyComplexType()) {
3645 // If it's an l-value, load through the appropriate subobject l-value.
3646 // Note that we have to ask E because Op might be an l-value that
3647 // this won't work for, e.g. an Obj-C property.
3648 if (E->isGLValue()) {
3649 if (!PromotionType.isNull()) {
3650 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3651 E: Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3652 if (result.first)
3653 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3654 return result.first;
3655 } else {
3656 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3657 .getScalarVal();
3658 }
3659 }
3660 // Otherwise, calculate and project.
3661 return CGF.EmitComplexExpr(E: Op, IgnoreReal: false, IgnoreImag: true).first;
3662 }
3663
3664 if (!PromotionType.isNull())
3665 return CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3666 return Visit(E: Op);
3667}
3668
3669Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3670 QualType PromotionType) {
3671 QualType promotionTy = PromotionType.isNull()
3672 ? getPromotionType(Ty: E->getSubExpr()->getType())
3673 : PromotionType;
3674 Value *result = VisitImag(E, PromotionType: promotionTy);
3675 if (result && !promotionTy.isNull())
3676 result = EmitUnPromotedValue(result, ExprType: E->getType());
3677 return result;
3678}
3679
3680Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3681 QualType PromotionType) {
3682 Expr *Op = E->getSubExpr();
3683 if (Op->getType()->isAnyComplexType()) {
3684 // If it's an l-value, load through the appropriate subobject l-value.
3685 // Note that we have to ask E because Op might be an l-value that
3686 // this won't work for, e.g. an Obj-C property.
3687 if (Op->isGLValue()) {
3688 if (!PromotionType.isNull()) {
3689 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3690 E: Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3691 if (result.second)
3692 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3693 return result.second;
3694 } else {
3695 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3696 .getScalarVal();
3697 }
3698 }
3699 // Otherwise, calculate and project.
3700 return CGF.EmitComplexExpr(E: Op, IgnoreReal: true, IgnoreImag: false).second;
3701 }
3702
3703 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3704 // effects are evaluated, but not the actual value.
3705 if (Op->isGLValue())
3706 CGF.EmitLValue(E: Op);
3707 else if (!PromotionType.isNull())
3708 CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3709 else
3710 CGF.EmitScalarExpr(E: Op, IgnoreResultAssign: true);
3711 if (!PromotionType.isNull())
3712 return llvm::Constant::getNullValue(Ty: ConvertType(T: PromotionType));
3713 return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType()));
3714}
3715
3716//===----------------------------------------------------------------------===//
3717// Binary Operators
3718//===----------------------------------------------------------------------===//
3719
3720Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3721 QualType PromotionType) {
3722 return CGF.Builder.CreateFPExt(V: result, DestTy: ConvertType(T: PromotionType), Name: "ext");
3723}
3724
3725Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3726 QualType ExprType) {
3727 return CGF.Builder.CreateFPTrunc(V: result, DestTy: ConvertType(T: ExprType), Name: "unpromotion");
3728}
3729
3730Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3731 E = E->IgnoreParens();
3732 if (auto BO = dyn_cast<BinaryOperator>(Val: E)) {
3733 switch (BO->getOpcode()) {
3734#define HANDLE_BINOP(OP) \
3735 case BO_##OP: \
3736 return Emit##OP(EmitBinOps(BO, PromotionType));
3737 HANDLE_BINOP(Add)
3738 HANDLE_BINOP(Sub)
3739 HANDLE_BINOP(Mul)
3740 HANDLE_BINOP(Div)
3741#undef HANDLE_BINOP
3742 default:
3743 break;
3744 }
3745 } else if (auto UO = dyn_cast<UnaryOperator>(Val: E)) {
3746 switch (UO->getOpcode()) {
3747 case UO_Imag:
3748 return VisitImag(E: UO, PromotionType);
3749 case UO_Real:
3750 return VisitReal(E: UO, PromotionType);
3751 case UO_Minus:
3752 return VisitMinus(E: UO, PromotionType);
3753 case UO_Plus:
3754 return VisitPlus(E: UO, PromotionType);
3755 default:
3756 break;
3757 }
3758 }
3759 auto result = Visit(E: const_cast<Expr *>(E));
3760 if (result) {
3761 if (!PromotionType.isNull())
3762 return EmitPromotedValue(result, PromotionType);
3763 else
3764 return EmitUnPromotedValue(result, ExprType: E->getType());
3765 }
3766 return result;
3767}
3768
3769BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3770 QualType PromotionType) {
3771 TestAndClearIgnoreResultAssign();
3772 BinOpInfo Result;
3773 Result.LHS = CGF.EmitPromotedScalarExpr(E: E->getLHS(), PromotionType);
3774 Result.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType);
3775 if (!PromotionType.isNull())
3776 Result.Ty = PromotionType;
3777 else
3778 Result.Ty = E->getType();
3779 Result.Opcode = E->getOpcode();
3780 Result.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3781 Result.E = E;
3782 return Result;
3783}
3784
3785LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3786 const CompoundAssignOperator *E,
3787 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3788 Value *&Result) {
3789 QualType LHSTy = E->getLHS()->getType();
3790 BinOpInfo OpInfo;
3791
3792 if (E->getComputationResultType()->isAnyComplexType())
3793 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3794
3795 // Emit the RHS first. __block variables need to have the rhs evaluated
3796 // first, plus this should improve codegen a little.
3797
3798 QualType PromotionTypeCR;
3799 PromotionTypeCR = getPromotionType(Ty: E->getComputationResultType());
3800 if (PromotionTypeCR.isNull())
3801 PromotionTypeCR = E->getComputationResultType();
3802 QualType PromotionTypeLHS = getPromotionType(Ty: E->getComputationLHSType());
3803 QualType PromotionTypeRHS = getPromotionType(Ty: E->getRHS()->getType());
3804 if (!PromotionTypeRHS.isNull())
3805 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType: PromotionTypeRHS);
3806 else
3807 OpInfo.RHS = Visit(E: E->getRHS());
3808 OpInfo.Ty = PromotionTypeCR;
3809 OpInfo.Opcode = E->getOpcode();
3810 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3811 OpInfo.E = E;
3812 // Load/convert the LHS.
3813 LValue LHSLV = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
3814
3815 llvm::PHINode *atomicPHI = nullptr;
3816 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3817 QualType type = atomicTy->getValueType();
3818 if (!type->isBooleanType() && type->isIntegerType() &&
3819 !(type->isUnsignedIntegerType() &&
3820 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3821 CGF.getLangOpts().getSignedOverflowBehavior() !=
3822 LangOptions::SOB_Trapping) {
3823 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3824 llvm::Instruction::BinaryOps Op;
3825 switch (OpInfo.Opcode) {
3826 // We don't have atomicrmw operands for *, %, /, <<, >>
3827 case BO_MulAssign: case BO_DivAssign:
3828 case BO_RemAssign:
3829 case BO_ShlAssign:
3830 case BO_ShrAssign:
3831 break;
3832 case BO_AddAssign:
3833 AtomicOp = llvm::AtomicRMWInst::Add;
3834 Op = llvm::Instruction::Add;
3835 break;
3836 case BO_SubAssign:
3837 AtomicOp = llvm::AtomicRMWInst::Sub;
3838 Op = llvm::Instruction::Sub;
3839 break;
3840 case BO_AndAssign:
3841 AtomicOp = llvm::AtomicRMWInst::And;
3842 Op = llvm::Instruction::And;
3843 break;
3844 case BO_XorAssign:
3845 AtomicOp = llvm::AtomicRMWInst::Xor;
3846 Op = llvm::Instruction::Xor;
3847 break;
3848 case BO_OrAssign:
3849 AtomicOp = llvm::AtomicRMWInst::Or;
3850 Op = llvm::Instruction::Or;
3851 break;
3852 default:
3853 llvm_unreachable("Invalid compound assignment type");
3854 }
3855 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3856 llvm::Value *Amt = CGF.EmitToMemory(
3857 Value: EmitScalarConversion(Src: OpInfo.RHS, SrcType: E->getRHS()->getType(), DstType: LHSTy,
3858 Loc: E->getExprLoc()),
3859 Ty: LHSTy);
3860
3861 llvm::AtomicRMWInst *OldVal =
3862 CGF.emitAtomicRMWInst(Op: AtomicOp, Addr: LHSLV.getAddress(), Val: Amt);
3863
3864 // Since operation is atomic, the result type is guaranteed to be the
3865 // same as the input in LLVM terms.
3866 Result = Builder.CreateBinOp(Opc: Op, LHS: OldVal, RHS: Amt);
3867 return LHSLV;
3868 }
3869 }
3870 // FIXME: For floating point types, we should be saving and restoring the
3871 // floating point environment in the loop.
3872 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3873 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3874 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3875 OpInfo.LHS = CGF.EmitToMemory(Value: OpInfo.LHS, Ty: type);
3876 Builder.CreateBr(Dest: opBB);
3877 Builder.SetInsertPoint(opBB);
3878 atomicPHI = Builder.CreatePHI(Ty: OpInfo.LHS->getType(), NumReservedValues: 2);
3879 atomicPHI->addIncoming(V: OpInfo.LHS, BB: startBB);
3880 OpInfo.LHS = atomicPHI;
3881 }
3882 else
3883 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3884
3885 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3886 SourceLocation Loc = E->getExprLoc();
3887 if (!PromotionTypeLHS.isNull())
3888 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy, DstType: PromotionTypeLHS,
3889 Loc: E->getExprLoc());
3890 else
3891 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy,
3892 DstType: E->getComputationLHSType(), Loc);
3893
3894 // Expand the binary operator.
3895 Result = (this->*Func)(OpInfo);
3896
3897 // Convert the result back to the LHS type,
3898 // potentially with Implicit Conversion sanitizer check.
3899 // If LHSLV is a bitfield, use default ScalarConversionOpts
3900 // to avoid emit any implicit integer checks.
3901 Value *Previous = nullptr;
3902 if (LHSLV.isBitField()) {
3903 Previous = Result;
3904 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc);
3905 } else
3906 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc,
3907 Opts: ScalarConversionOpts(CGF.SanOpts));
3908
3909 if (atomicPHI) {
3910 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3911 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3912 auto Pair = CGF.EmitAtomicCompareExchange(
3913 Obj: LHSLV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: Result), Loc: E->getExprLoc());
3914 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: LHSTy);
3915 llvm::Value *success = Pair.second;
3916 atomicPHI->addIncoming(V: old, BB: curBlock);
3917 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3918 Builder.SetInsertPoint(contBB);
3919 return LHSLV;
3920 }
3921
3922 // Store the result value into the LHS lvalue. Bit-fields are handled
3923 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3924 // 'An assignment expression has the value of the left operand after the
3925 // assignment...'.
3926 if (LHSLV.isBitField()) {
3927 Value *Src = Previous ? Previous : Result;
3928 QualType SrcType = E->getRHS()->getType();
3929 QualType DstType = E->getLHS()->getType();
3930 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: Result), Dst: LHSLV, Result: &Result);
3931 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType,
3932 Info: LHSLV.getBitFieldInfo(), Loc: E->getExprLoc());
3933 } else
3934 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Result), Dst: LHSLV);
3935
3936 if (CGF.getLangOpts().OpenMP)
3937 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3938 LHS: E->getLHS());
3939 return LHSLV;
3940}
3941
3942Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3943 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3944 bool Ignore = TestAndClearIgnoreResultAssign();
3945 Value *RHS = nullptr;
3946 LValue LHS = EmitCompoundAssignLValue(E, Func, Result&: RHS);
3947
3948 // If the result is clearly ignored, return now.
3949 if (Ignore)
3950 return nullptr;
3951
3952 // The result of an assignment in C is the assigned r-value.
3953 if (!CGF.getLangOpts().CPlusPlus)
3954 return RHS;
3955
3956 // If the lvalue is non-volatile, return the computed value of the assignment.
3957 if (!LHS.isVolatileQualified())
3958 return RHS;
3959
3960 // Otherwise, reload the value.
3961 return EmitLoadOfLValue(LHS, E->getExprLoc());
3962}
3963
3964void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3965 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3966 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
3967 Checks;
3968
3969 if (CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero)) {
3970 Checks.push_back(Elt: std::make_pair(x: Builder.CreateICmpNE(LHS: Ops.RHS, RHS: Zero),
3971 y: SanitizerKind::SO_IntegerDivideByZero));
3972 }
3973
3974 const auto *BO = cast<BinaryOperator>(Val: Ops.E);
3975 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow) &&
3976 Ops.Ty->hasSignedIntegerRepresentation() &&
3977 !IsWidenedIntegerOp(Ctx: CGF.getContext(), E: BO->getLHS()) &&
3978 Ops.mayHaveIntegerOverflow()) {
3979 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Val: Zero->getType());
3980
3981 llvm::Value *IntMin =
3982 Builder.getInt(AI: llvm::APInt::getSignedMinValue(numBits: Ty->getBitWidth()));
3983 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3984
3985 llvm::Value *LHSCmp = Builder.CreateICmpNE(LHS: Ops.LHS, RHS: IntMin);
3986 llvm::Value *RHSCmp = Builder.CreateICmpNE(LHS: Ops.RHS, RHS: NegOne);
3987 llvm::Value *NotOverflow = Builder.CreateOr(LHS: LHSCmp, RHS: RHSCmp, Name: "or");
3988 Checks.push_back(
3989 Elt: std::make_pair(x&: NotOverflow, y: SanitizerKind::SO_SignedIntegerOverflow));
3990 }
3991
3992 if (Checks.size() > 0)
3993 EmitBinOpCheck(Checks, Info: Ops);
3994}
3995
3996Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3997 {
3998 SanitizerDebugLocation SanScope(&CGF,
3999 {SanitizerKind::SO_IntegerDivideByZero,
4000 SanitizerKind::SO_SignedIntegerOverflow,
4001 SanitizerKind::SO_FloatDivideByZero},
4002 SanitizerHandler::DivremOverflow);
4003 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4004 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4005 Ops.Ty->isIntegerType() &&
4006 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4007 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4008 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: true);
4009 } else if (CGF.SanOpts.has(K: SanitizerKind::FloatDivideByZero) &&
4010 Ops.Ty->isRealFloatingType() &&
4011 Ops.mayHaveFloatDivisionByZero()) {
4012 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4013 llvm::Value *NonZero = Builder.CreateFCmpUNE(LHS: Ops.RHS, RHS: Zero);
4014 EmitBinOpCheck(
4015 Checks: std::make_pair(x&: NonZero, y: SanitizerKind::SO_FloatDivideByZero), Info: Ops);
4016 }
4017 }
4018
4019 if (Ops.Ty->isConstantMatrixType()) {
4020 llvm::MatrixBuilder MB(Builder);
4021 // We need to check the types of the operands of the operator to get the
4022 // correct matrix dimensions.
4023 auto *BO = cast<BinaryOperator>(Val: Ops.E);
4024 (void)BO;
4025 assert(
4026 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
4027 "first operand must be a matrix");
4028 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4029 "second operand must be an arithmetic type");
4030 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4031 return MB.CreateScalarDiv(LHS: Ops.LHS, RHS: Ops.RHS,
4032 IsUnsigned: Ops.Ty->hasUnsignedIntegerRepresentation());
4033 }
4034
4035 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4036 llvm::Value *Val;
4037 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4038 Val = Builder.CreateFDiv(L: Ops.LHS, R: Ops.RHS, Name: "div");
4039 CGF.SetDivFPAccuracy(Val);
4040 return Val;
4041 }
4042 else if (Ops.isFixedPointOp())
4043 return EmitFixedPointBinOp(Ops);
4044 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4045 return Builder.CreateUDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4046 else
4047 return Builder.CreateSDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4048}
4049
4050Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4051 // Rem in C can't be a floating point type: C99 6.5.5p2.
4052 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4053 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4054 Ops.Ty->isIntegerType() &&
4055 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4056 SanitizerDebugLocation SanScope(&CGF,
4057 {SanitizerKind::SO_IntegerDivideByZero,
4058 SanitizerKind::SO_SignedIntegerOverflow},
4059 SanitizerHandler::DivremOverflow);
4060 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4061 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: false);
4062 }
4063
4064 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4065 return Builder.CreateURem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4066
4067 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4068 return Builder.CreateFRem(L: Ops.LHS, R: Ops.RHS, Name: "rem");
4069
4070 return Builder.CreateSRem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4071}
4072
4073Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4074 unsigned IID;
4075 unsigned OpID = 0;
4076 SanitizerHandler OverflowKind;
4077
4078 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4079 switch (Ops.Opcode) {
4080 case BO_Add:
4081 case BO_AddAssign:
4082 OpID = 1;
4083 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4084 llvm::Intrinsic::uadd_with_overflow;
4085 OverflowKind = SanitizerHandler::AddOverflow;
4086 break;
4087 case BO_Sub:
4088 case BO_SubAssign:
4089 OpID = 2;
4090 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4091 llvm::Intrinsic::usub_with_overflow;
4092 OverflowKind = SanitizerHandler::SubOverflow;
4093 break;
4094 case BO_Mul:
4095 case BO_MulAssign:
4096 OpID = 3;
4097 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4098 llvm::Intrinsic::umul_with_overflow;
4099 OverflowKind = SanitizerHandler::MulOverflow;
4100 break;
4101 default:
4102 llvm_unreachable("Unsupported operation for overflow detection");
4103 }
4104 OpID <<= 1;
4105 if (isSigned)
4106 OpID |= 1;
4107
4108 SanitizerDebugLocation SanScope(&CGF,
4109 {SanitizerKind::SO_SignedIntegerOverflow,
4110 SanitizerKind::SO_UnsignedIntegerOverflow},
4111 OverflowKind);
4112 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(T: Ops.Ty);
4113
4114 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, Tys: opTy);
4115
4116 Value *resultAndOverflow = Builder.CreateCall(Callee: intrinsic, Args: {Ops.LHS, Ops.RHS});
4117 Value *result = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 0);
4118 Value *overflow = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 1);
4119
4120 // Handle overflow with llvm.trap if no custom handler has been specified.
4121 const std::string *handlerName =
4122 &CGF.getLangOpts().OverflowHandler;
4123 if (handlerName->empty()) {
4124 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4125 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4126 if (!isSigned || CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) {
4127 llvm::Value *NotOverflow = Builder.CreateNot(V: overflow);
4128 SanitizerKind::SanitizerOrdinal Ordinal =
4129 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4130 : SanitizerKind::SO_UnsignedIntegerOverflow;
4131 EmitBinOpCheck(Checks: std::make_pair(x&: NotOverflow, y&: Ordinal), Info: Ops);
4132 } else
4133 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
4134 return result;
4135 }
4136
4137 // Branch in case of overflow.
4138 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4139 llvm::BasicBlock *continueBB =
4140 CGF.createBasicBlock(name: "nooverflow", parent: CGF.CurFn, before: initialBB->getNextNode());
4141 llvm::BasicBlock *overflowBB = CGF.createBasicBlock(name: "overflow", parent: CGF.CurFn);
4142
4143 Builder.CreateCondBr(Cond: overflow, True: overflowBB, False: continueBB);
4144
4145 // If an overflow handler is set, then we want to call it and then use its
4146 // result, if it returns.
4147 Builder.SetInsertPoint(overflowBB);
4148
4149 // Get the overflow handler.
4150 llvm::Type *Int8Ty = CGF.Int8Ty;
4151 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4152 llvm::FunctionType *handlerTy =
4153 llvm::FunctionType::get(Result: CGF.Int64Ty, Params: argTypes, isVarArg: true);
4154 llvm::FunctionCallee handler =
4155 CGF.CGM.CreateRuntimeFunction(Ty: handlerTy, Name: *handlerName);
4156
4157 // Sign extend the args to 64-bit, so that we can use the same handler for
4158 // all types of overflow.
4159 llvm::Value *lhs = Builder.CreateSExt(V: Ops.LHS, DestTy: CGF.Int64Ty);
4160 llvm::Value *rhs = Builder.CreateSExt(V: Ops.RHS, DestTy: CGF.Int64Ty);
4161
4162 // Call the handler with the two arguments, the operation, and the size of
4163 // the result.
4164 llvm::Value *handlerArgs[] = {
4165 lhs,
4166 rhs,
4167 Builder.getInt8(C: OpID),
4168 Builder.getInt8(C: cast<llvm::IntegerType>(Val: opTy)->getBitWidth())
4169 };
4170 llvm::Value *handlerResult =
4171 CGF.EmitNounwindRuntimeCall(callee: handler, args: handlerArgs);
4172
4173 // Truncate the result back to the desired size.
4174 handlerResult = Builder.CreateTrunc(V: handlerResult, DestTy: opTy);
4175 Builder.CreateBr(Dest: continueBB);
4176
4177 Builder.SetInsertPoint(continueBB);
4178 llvm::PHINode *phi = Builder.CreatePHI(Ty: opTy, NumReservedValues: 2);
4179 phi->addIncoming(V: result, BB: initialBB);
4180 phi->addIncoming(V: handlerResult, BB: overflowBB);
4181
4182 return phi;
4183}
4184
4185/// Emit pointer + index arithmetic.
4186static Value *emitPointerArithmetic(CodeGenFunction &CGF,
4187 const BinOpInfo &op,
4188 bool isSubtraction) {
4189 // Must have binary (not unary) expr here. Unary pointer
4190 // increment/decrement doesn't use this path.
4191 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4192
4193 Value *pointer = op.LHS;
4194 Expr *pointerOperand = expr->getLHS();
4195 Value *index = op.RHS;
4196 Expr *indexOperand = expr->getRHS();
4197
4198 // In a subtraction, the LHS is always the pointer.
4199 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4200 std::swap(a&: pointer, b&: index);
4201 std::swap(a&: pointerOperand, b&: indexOperand);
4202 }
4203
4204 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4205
4206 unsigned width = cast<llvm::IntegerType>(Val: index->getType())->getBitWidth();
4207 auto &DL = CGF.CGM.getDataLayout();
4208 auto PtrTy = cast<llvm::PointerType>(Val: pointer->getType());
4209
4210 // Some versions of glibc and gcc use idioms (particularly in their malloc
4211 // routines) that add a pointer-sized integer (known to be a pointer value)
4212 // to a null pointer in order to cast the value back to an integer or as
4213 // part of a pointer alignment algorithm. This is undefined behavior, but
4214 // we'd like to be able to compile programs that use it.
4215 //
4216 // Normally, we'd generate a GEP with a null-pointer base here in response
4217 // to that code, but it's also UB to dereference a pointer created that
4218 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4219 // generate a direct cast of the integer value to a pointer.
4220 //
4221 // The idiom (p = nullptr + N) is not met if any of the following are true:
4222 //
4223 // The operation is subtraction.
4224 // The index is not pointer-sized.
4225 // The pointer type is not byte-sized.
4226 //
4227 // Note that we do not suppress the pointer overflow check in this case.
4228 if (BinaryOperator::isNullPointerArithmeticExtension(
4229 Ctx&: CGF.getContext(), Opc: op.Opcode, LHS: expr->getLHS(), RHS: expr->getRHS())) {
4230 Value *Ptr = CGF.Builder.CreateIntToPtr(V: index, DestTy: pointer->getType());
4231 if (CGF.getLangOpts().PointerOverflowDefined ||
4232 !CGF.SanOpts.has(K: SanitizerKind::PointerOverflow) ||
4233 NullPointerIsDefined(F: CGF.Builder.GetInsertBlock()->getParent(),
4234 AS: PtrTy->getPointerAddressSpace()))
4235 return Ptr;
4236 // The inbounds GEP of null is valid iff the index is zero.
4237 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4238 auto CheckHandler = SanitizerHandler::PointerOverflow;
4239 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
4240 Value *IsZeroIndex = CGF.Builder.CreateIsNull(Arg: index);
4241 llvm::Constant *StaticArgs[] = {
4242 CGF.EmitCheckSourceLocation(Loc: op.E->getExprLoc())};
4243 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4244 Value *IntPtr = llvm::Constant::getNullValue(Ty: IntPtrTy);
4245 Value *ComputedGEP = CGF.Builder.CreateZExtOrTrunc(V: index, DestTy: IntPtrTy);
4246 Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4247 CGF.EmitCheck(Checked: {{IsZeroIndex, CheckOrdinal}}, Check: CheckHandler, StaticArgs,
4248 DynamicArgs);
4249 return Ptr;
4250 }
4251
4252 if (width != DL.getIndexTypeSizeInBits(Ty: PtrTy)) {
4253 // Zero-extend or sign-extend the pointer value according to
4254 // whether the index is signed or not.
4255 index = CGF.Builder.CreateIntCast(V: index, DestTy: DL.getIndexType(PtrTy), isSigned,
4256 Name: "idx.ext");
4257 }
4258
4259 // If this is subtraction, negate the index.
4260 if (isSubtraction)
4261 index = CGF.Builder.CreateNeg(V: index, Name: "idx.neg");
4262
4263 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
4264 CGF.EmitBoundsCheck(E: op.E, Base: pointerOperand, Index: index, IndexType: indexOperand->getType(),
4265 /*Accessed*/ false);
4266
4267 const PointerType *pointerType
4268 = pointerOperand->getType()->getAs<PointerType>();
4269 if (!pointerType) {
4270 QualType objectType = pointerOperand->getType()
4271 ->castAs<ObjCObjectPointerType>()
4272 ->getPointeeType();
4273 llvm::Value *objectSize
4274 = CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(T: objectType));
4275
4276 index = CGF.Builder.CreateMul(LHS: index, RHS: objectSize);
4277
4278 Value *result =
4279 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: pointer, IdxList: index, Name: "add.ptr");
4280 return CGF.Builder.CreateBitCast(V: result, DestTy: pointer->getType());
4281 }
4282
4283 QualType elementType = pointerType->getPointeeType();
4284 if (const VariableArrayType *vla
4285 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4286 // The element count here is the total number of non-VLA elements.
4287 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
4288
4289 // Effectively, the multiply by the VLA size is part of the GEP.
4290 // GEP indexes are signed, and scaling an index isn't permitted to
4291 // signed-overflow, so we use the same semantics for our explicit
4292 // multiply. We suppress this if overflow is not undefined behavior.
4293 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
4294 if (CGF.getLangOpts().PointerOverflowDefined) {
4295 index = CGF.Builder.CreateMul(LHS: index, RHS: numElements, Name: "vla.index");
4296 pointer = CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4297 } else {
4298 index = CGF.Builder.CreateNSWMul(LHS: index, RHS: numElements, Name: "vla.index");
4299 pointer = CGF.EmitCheckedInBoundsGEP(
4300 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
4301 Name: "add.ptr");
4302 }
4303 return pointer;
4304 }
4305
4306 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4307 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4308 // future proof.
4309 llvm::Type *elemTy;
4310 if (elementType->isVoidType() || elementType->isFunctionType())
4311 elemTy = CGF.Int8Ty;
4312 else
4313 elemTy = CGF.ConvertTypeForMem(T: elementType);
4314
4315 if (CGF.getLangOpts().PointerOverflowDefined)
4316 return CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4317
4318 return CGF.EmitCheckedInBoundsGEP(
4319 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
4320 Name: "add.ptr");
4321}
4322
4323// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4324// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4325// the add operand respectively. This allows fmuladd to represent a*b-c, or
4326// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4327// efficient operations.
4328static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4329 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4330 bool negMul, bool negAdd) {
4331 Value *MulOp0 = MulOp->getOperand(i: 0);
4332 Value *MulOp1 = MulOp->getOperand(i: 1);
4333 if (negMul)
4334 MulOp0 = Builder.CreateFNeg(V: MulOp0, Name: "neg");
4335 if (negAdd)
4336 Addend = Builder.CreateFNeg(V: Addend, Name: "neg");
4337
4338 Value *FMulAdd = nullptr;
4339 if (Builder.getIsFPConstrained()) {
4340 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4341 "Only constrained operation should be created when Builder is in FP "
4342 "constrained mode");
4343 FMulAdd = Builder.CreateConstrainedFPCall(
4344 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4345 Addend->getType()),
4346 {MulOp0, MulOp1, Addend});
4347 } else {
4348 FMulAdd = Builder.CreateCall(
4349 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4350 {MulOp0, MulOp1, Addend});
4351 }
4352 MulOp->eraseFromParent();
4353
4354 return FMulAdd;
4355}
4356
4357// Check whether it would be legal to emit an fmuladd intrinsic call to
4358// represent op and if so, build the fmuladd.
4359//
4360// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4361// Does NOT check the type of the operation - it's assumed that this function
4362// will be called from contexts where it's known that the type is contractable.
4363static Value* tryEmitFMulAdd(const BinOpInfo &op,
4364 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4365 bool isSub=false) {
4366
4367 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4368 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4369 "Only fadd/fsub can be the root of an fmuladd.");
4370
4371 // Check whether this op is marked as fusable.
4372 if (!op.FPFeatures.allowFPContractWithinStatement())
4373 return nullptr;
4374
4375 Value *LHS = op.LHS;
4376 Value *RHS = op.RHS;
4377
4378 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4379 // it is the only use of its operand.
4380 bool NegLHS = false;
4381 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: LHS)) {
4382 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4383 LHSUnOp->use_empty() && LHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4384 LHS = LHSUnOp->getOperand(i_nocapture: 0);
4385 NegLHS = true;
4386 }
4387 }
4388
4389 bool NegRHS = false;
4390 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: RHS)) {
4391 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4392 RHSUnOp->use_empty() && RHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4393 RHS = RHSUnOp->getOperand(i_nocapture: 0);
4394 NegRHS = true;
4395 }
4396 }
4397
4398 // We have a potentially fusable op. Look for a mul on one of the operands.
4399 // Also, make sure that the mul result isn't used directly. In that case,
4400 // there's no point creating a muladd operation.
4401 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: LHS)) {
4402 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4403 (LHSBinOp->use_empty() || NegLHS)) {
4404 // If we looked through fneg, erase it.
4405 if (NegLHS)
4406 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4407 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4408 }
4409 }
4410 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: RHS)) {
4411 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4412 (RHSBinOp->use_empty() || NegRHS)) {
4413 // If we looked through fneg, erase it.
4414 if (NegRHS)
4415 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4416 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4417 }
4418 }
4419
4420 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(Val: LHS)) {
4421 if (LHSBinOp->getIntrinsicID() ==
4422 llvm::Intrinsic::experimental_constrained_fmul &&
4423 (LHSBinOp->use_empty() || NegLHS)) {
4424 // If we looked through fneg, erase it.
4425 if (NegLHS)
4426 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4427 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4428 }
4429 }
4430 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(Val: RHS)) {
4431 if (RHSBinOp->getIntrinsicID() ==
4432 llvm::Intrinsic::experimental_constrained_fmul &&
4433 (RHSBinOp->use_empty() || NegRHS)) {
4434 // If we looked through fneg, erase it.
4435 if (NegRHS)
4436 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4437 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4438 }
4439 }
4440
4441 return nullptr;
4442}
4443
4444Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4445 if (op.LHS->getType()->isPointerTy() ||
4446 op.RHS->getType()->isPointerTy())
4447 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::NotSubtraction);
4448
4449 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4450 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4451 case LangOptions::SOB_Defined:
4452 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4453 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4454 [[fallthrough]];
4455 case LangOptions::SOB_Undefined:
4456 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4457 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4458 [[fallthrough]];
4459 case LangOptions::SOB_Trapping:
4460 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4461 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4462 return EmitOverflowCheckedBinOp(Ops: op);
4463 }
4464 }
4465
4466 // For vector and matrix adds, try to fold into a fmuladd.
4467 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4468 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4469 // Try to form an fmuladd.
4470 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4471 return FMulAdd;
4472 }
4473
4474 if (op.Ty->isConstantMatrixType()) {
4475 llvm::MatrixBuilder MB(Builder);
4476 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4477 return MB.CreateAdd(LHS: op.LHS, RHS: op.RHS);
4478 }
4479
4480 if (op.Ty->isUnsignedIntegerType() &&
4481 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4482 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4483 return EmitOverflowCheckedBinOp(Ops: op);
4484
4485 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4486 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4487 return Builder.CreateFAdd(L: op.LHS, R: op.RHS, Name: "add");
4488 }
4489
4490 if (op.isFixedPointOp())
4491 return EmitFixedPointBinOp(Ops: op);
4492
4493 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4494}
4495
4496/// The resulting value must be calculated with exact precision, so the operands
4497/// may not be the same type.
4498Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4499 using llvm::APSInt;
4500 using llvm::ConstantInt;
4501
4502 // This is either a binary operation where at least one of the operands is
4503 // a fixed-point type, or a unary operation where the operand is a fixed-point
4504 // type. The result type of a binary operation is determined by
4505 // Sema::handleFixedPointConversions().
4506 QualType ResultTy = op.Ty;
4507 QualType LHSTy, RHSTy;
4508 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: op.E)) {
4509 RHSTy = BinOp->getRHS()->getType();
4510 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(Val: BinOp)) {
4511 // For compound assignment, the effective type of the LHS at this point
4512 // is the computation LHS type, not the actual LHS type, and the final
4513 // result type is not the type of the expression but rather the
4514 // computation result type.
4515 LHSTy = CAO->getComputationLHSType();
4516 ResultTy = CAO->getComputationResultType();
4517 } else
4518 LHSTy = BinOp->getLHS()->getType();
4519 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: op.E)) {
4520 LHSTy = UnOp->getSubExpr()->getType();
4521 RHSTy = UnOp->getSubExpr()->getType();
4522 }
4523 ASTContext &Ctx = CGF.getContext();
4524 Value *LHS = op.LHS;
4525 Value *RHS = op.RHS;
4526
4527 auto LHSFixedSema = Ctx.getFixedPointSemantics(Ty: LHSTy);
4528 auto RHSFixedSema = Ctx.getFixedPointSemantics(Ty: RHSTy);
4529 auto ResultFixedSema = Ctx.getFixedPointSemantics(Ty: ResultTy);
4530 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(Other: RHSFixedSema);
4531
4532 // Perform the actual operation.
4533 Value *Result;
4534 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4535 switch (op.Opcode) {
4536 case BO_AddAssign:
4537 case BO_Add:
4538 Result = FPBuilder.CreateAdd(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4539 break;
4540 case BO_SubAssign:
4541 case BO_Sub:
4542 Result = FPBuilder.CreateSub(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4543 break;
4544 case BO_MulAssign:
4545 case BO_Mul:
4546 Result = FPBuilder.CreateMul(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4547 break;
4548 case BO_DivAssign:
4549 case BO_Div:
4550 Result = FPBuilder.CreateDiv(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4551 break;
4552 case BO_ShlAssign:
4553 case BO_Shl:
4554 Result = FPBuilder.CreateShl(LHS, LHSSema: LHSFixedSema, RHS);
4555 break;
4556 case BO_ShrAssign:
4557 case BO_Shr:
4558 Result = FPBuilder.CreateShr(LHS, LHSSema: LHSFixedSema, RHS);
4559 break;
4560 case BO_LT:
4561 return FPBuilder.CreateLT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4562 case BO_GT:
4563 return FPBuilder.CreateGT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4564 case BO_LE:
4565 return FPBuilder.CreateLE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4566 case BO_GE:
4567 return FPBuilder.CreateGE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4568 case BO_EQ:
4569 // For equality operations, we assume any padding bits on unsigned types are
4570 // zero'd out. They could be overwritten through non-saturating operations
4571 // that cause overflow, but this leads to undefined behavior.
4572 return FPBuilder.CreateEQ(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4573 case BO_NE:
4574 return FPBuilder.CreateNE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4575 case BO_Cmp:
4576 case BO_LAnd:
4577 case BO_LOr:
4578 llvm_unreachable("Found unimplemented fixed point binary operation");
4579 case BO_PtrMemD:
4580 case BO_PtrMemI:
4581 case BO_Rem:
4582 case BO_Xor:
4583 case BO_And:
4584 case BO_Or:
4585 case BO_Assign:
4586 case BO_RemAssign:
4587 case BO_AndAssign:
4588 case BO_XorAssign:
4589 case BO_OrAssign:
4590 case BO_Comma:
4591 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4592 }
4593
4594 bool IsShift = BinaryOperator::isShiftOp(Opc: op.Opcode) ||
4595 BinaryOperator::isShiftAssignOp(Opc: op.Opcode);
4596 // Convert to the result type.
4597 return FPBuilder.CreateFixedToFixed(Src: Result, SrcSema: IsShift ? LHSFixedSema
4598 : CommonFixedSema,
4599 DstSema: ResultFixedSema);
4600}
4601
4602Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4603 // The LHS is always a pointer if either side is.
4604 if (!op.LHS->getType()->isPointerTy()) {
4605 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4606 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4607 case LangOptions::SOB_Defined:
4608 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4609 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4610 [[fallthrough]];
4611 case LangOptions::SOB_Undefined:
4612 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4613 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4614 [[fallthrough]];
4615 case LangOptions::SOB_Trapping:
4616 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4617 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4618 return EmitOverflowCheckedBinOp(Ops: op);
4619 }
4620 }
4621
4622 // For vector and matrix subs, try to fold into a fmuladd.
4623 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4624 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4625 // Try to form an fmuladd.
4626 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, isSub: true))
4627 return FMulAdd;
4628 }
4629
4630 if (op.Ty->isConstantMatrixType()) {
4631 llvm::MatrixBuilder MB(Builder);
4632 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4633 return MB.CreateSub(LHS: op.LHS, RHS: op.RHS);
4634 }
4635
4636 if (op.Ty->isUnsignedIntegerType() &&
4637 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4638 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4639 return EmitOverflowCheckedBinOp(Ops: op);
4640
4641 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4642 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4643 return Builder.CreateFSub(L: op.LHS, R: op.RHS, Name: "sub");
4644 }
4645
4646 if (op.isFixedPointOp())
4647 return EmitFixedPointBinOp(op);
4648
4649 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4650 }
4651
4652 // If the RHS is not a pointer, then we have normal pointer
4653 // arithmetic.
4654 if (!op.RHS->getType()->isPointerTy())
4655 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::IsSubtraction);
4656
4657 // Otherwise, this is a pointer subtraction.
4658
4659 // Do the raw subtraction part.
4660 llvm::Value *LHS
4661 = Builder.CreatePtrToInt(V: op.LHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.lhs.cast");
4662 llvm::Value *RHS
4663 = Builder.CreatePtrToInt(V: op.RHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.rhs.cast");
4664 Value *diffInChars = Builder.CreateSub(LHS, RHS, Name: "sub.ptr.sub");
4665
4666 // Okay, figure out the element size.
4667 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4668 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4669
4670 llvm::Value *divisor = nullptr;
4671
4672 // For a variable-length array, this is going to be non-constant.
4673 if (const VariableArrayType *vla
4674 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4675 auto VlaSize = CGF.getVLASize(vla);
4676 elementType = VlaSize.Type;
4677 divisor = VlaSize.NumElts;
4678
4679 // Scale the number of non-VLA elements by the non-VLA element size.
4680 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4681 if (!eltSize.isOne())
4682 divisor = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: divisor);
4683
4684 // For everything elese, we can just compute it, safe in the
4685 // assumption that Sema won't let anything through that we can't
4686 // safely compute the size of.
4687 } else {
4688 CharUnits elementSize;
4689 // Handle GCC extension for pointer arithmetic on void* and
4690 // function pointer types.
4691 if (elementType->isVoidType() || elementType->isFunctionType())
4692 elementSize = CharUnits::One();
4693 else
4694 elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4695
4696 // Don't even emit the divide for element size of 1.
4697 if (elementSize.isOne())
4698 return diffInChars;
4699
4700 divisor = CGF.CGM.getSize(numChars: elementSize);
4701 }
4702
4703 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4704 // pointer difference in C is only defined in the case where both operands
4705 // are pointing to elements of an array.
4706 return Builder.CreateExactSDiv(LHS: diffInChars, RHS: divisor, Name: "sub.ptr.div");
4707}
4708
4709Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4710 bool RHSIsSigned) {
4711 llvm::IntegerType *Ty;
4712 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4713 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4714 else
4715 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4716 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4717 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4718 // this in ConstantInt::get, this results in the value getting truncated.
4719 // Constrain the return value to be max(RHS) in this case.
4720 llvm::Type *RHSTy = RHS->getType();
4721 llvm::APInt RHSMax =
4722 RHSIsSigned ? llvm::APInt::getSignedMaxValue(numBits: RHSTy->getScalarSizeInBits())
4723 : llvm::APInt::getMaxValue(numBits: RHSTy->getScalarSizeInBits());
4724 if (RHSMax.ult(RHS: Ty->getBitWidth()))
4725 return llvm::ConstantInt::get(Ty: RHSTy, V: RHSMax);
4726 return llvm::ConstantInt::get(Ty: RHSTy, V: Ty->getBitWidth() - 1);
4727}
4728
4729Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4730 const Twine &Name) {
4731 llvm::IntegerType *Ty;
4732 if (auto *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4733 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4734 else
4735 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4736
4737 if (llvm::isPowerOf2_64(Value: Ty->getBitWidth()))
4738 return Builder.CreateAnd(LHS: RHS, RHS: GetMaximumShiftAmount(LHS, RHS, RHSIsSigned: false), Name);
4739
4740 return Builder.CreateURem(
4741 LHS: RHS, RHS: llvm::ConstantInt::get(Ty: RHS->getType(), V: Ty->getBitWidth()), Name);
4742}
4743
4744Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4745 // TODO: This misses out on the sanitizer check below.
4746 if (Ops.isFixedPointOp())
4747 return EmitFixedPointBinOp(op: Ops);
4748
4749 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4750 // RHS to the same size as the LHS.
4751 Value *RHS = Ops.RHS;
4752 if (Ops.LHS->getType() != RHS->getType())
4753 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4754
4755 bool SanitizeSignedBase = CGF.SanOpts.has(K: SanitizerKind::ShiftBase) &&
4756 Ops.Ty->hasSignedIntegerRepresentation() &&
4757 !CGF.getLangOpts().isSignedOverflowDefined() &&
4758 !CGF.getLangOpts().CPlusPlus20;
4759 bool SanitizeUnsignedBase =
4760 CGF.SanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
4761 Ops.Ty->hasUnsignedIntegerRepresentation();
4762 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4763 bool SanitizeExponent = CGF.SanOpts.has(K: SanitizerKind::ShiftExponent);
4764 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4765 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4766 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shl.mask");
4767 else if ((SanitizeBase || SanitizeExponent) &&
4768 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4769 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4770 if (SanitizeSignedBase)
4771 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftBase);
4772 if (SanitizeUnsignedBase)
4773 Ordinals.push_back(Elt: SanitizerKind::SO_UnsignedShiftBase);
4774 if (SanitizeExponent)
4775 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftExponent);
4776
4777 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4778 SanitizerHandler::ShiftOutOfBounds);
4779 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4780 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4781 llvm::Value *WidthMinusOne =
4782 GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned);
4783 llvm::Value *ValidExponent = Builder.CreateICmpULE(LHS: Ops.RHS, RHS: WidthMinusOne);
4784
4785 if (SanitizeExponent) {
4786 Checks.push_back(
4787 Elt: std::make_pair(x&: ValidExponent, y: SanitizerKind::SO_ShiftExponent));
4788 }
4789
4790 if (SanitizeBase) {
4791 // Check whether we are shifting any non-zero bits off the top of the
4792 // integer. We only emit this check if exponent is valid - otherwise
4793 // instructions below will have undefined behavior themselves.
4794 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4795 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
4796 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock(name: "check");
4797 Builder.CreateCondBr(Cond: ValidExponent, True: CheckShiftBase, False: Cont);
4798 llvm::Value *PromotedWidthMinusOne =
4799 (RHS == Ops.RHS) ? WidthMinusOne
4800 : GetMaximumShiftAmount(LHS: Ops.LHS, RHS, RHSIsSigned);
4801 CGF.EmitBlock(BB: CheckShiftBase);
4802 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4803 LHS: Ops.LHS, RHS: Builder.CreateSub(LHS: PromotedWidthMinusOne, RHS, Name: "shl.zeros",
4804 /*NUW*/ HasNUW: true, /*NSW*/ HasNSW: true),
4805 Name: "shl.check");
4806 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4807 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4808 // Under C++11's rules, shifting a 1 bit into the sign bit is
4809 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4810 // define signed left shifts, so we use the C99 and C++11 rules there).
4811 // Unsigned shifts can always shift into the top bit.
4812 llvm::Value *One = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 1);
4813 BitsShiftedOff = Builder.CreateLShr(LHS: BitsShiftedOff, RHS: One);
4814 }
4815 llvm::Value *Zero = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 0);
4816 llvm::Value *ValidBase = Builder.CreateICmpEQ(LHS: BitsShiftedOff, RHS: Zero);
4817 CGF.EmitBlock(BB: Cont);
4818 llvm::PHINode *BaseCheck = Builder.CreatePHI(Ty: ValidBase->getType(), NumReservedValues: 2);
4819 BaseCheck->addIncoming(V: Builder.getTrue(), BB: Orig);
4820 BaseCheck->addIncoming(V: ValidBase, BB: CheckShiftBase);
4821 Checks.push_back(Elt: std::make_pair(
4822 x&: BaseCheck, y: SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4823 : SanitizerKind::SO_UnsignedShiftBase));
4824 }
4825
4826 assert(!Checks.empty());
4827 EmitBinOpCheck(Checks, Info: Ops);
4828 }
4829
4830 return Builder.CreateShl(LHS: Ops.LHS, RHS, Name: "shl");
4831}
4832
4833Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4834 // TODO: This misses out on the sanitizer check below.
4835 if (Ops.isFixedPointOp())
4836 return EmitFixedPointBinOp(op: Ops);
4837
4838 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4839 // RHS to the same size as the LHS.
4840 Value *RHS = Ops.RHS;
4841 if (Ops.LHS->getType() != RHS->getType())
4842 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4843
4844 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4845 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4846 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shr.mask");
4847 else if (CGF.SanOpts.has(K: SanitizerKind::ShiftExponent) &&
4848 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4849 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4850 SanitizerHandler::ShiftOutOfBounds);
4851 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4852 llvm::Value *Valid = Builder.CreateICmpULE(
4853 LHS: Ops.RHS, RHS: GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned));
4854 EmitBinOpCheck(Checks: std::make_pair(x&: Valid, y: SanitizerKind::SO_ShiftExponent), Info: Ops);
4855 }
4856
4857 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4858 return Builder.CreateLShr(LHS: Ops.LHS, RHS, Name: "shr");
4859 return Builder.CreateAShr(LHS: Ops.LHS, RHS, Name: "shr");
4860}
4861
4862enum IntrinsicType { VCMPEQ, VCMPGT };
4863// return corresponding comparison intrinsic for given vector type
4864static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4865 BuiltinType::Kind ElemKind) {
4866 switch (ElemKind) {
4867 default: llvm_unreachable("unexpected element type");
4868 case BuiltinType::Char_U:
4869 case BuiltinType::UChar:
4870 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4871 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4872 case BuiltinType::Char_S:
4873 case BuiltinType::SChar:
4874 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4875 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4876 case BuiltinType::UShort:
4877 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4878 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4879 case BuiltinType::Short:
4880 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4881 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4882 case BuiltinType::UInt:
4883 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4884 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4885 case BuiltinType::Int:
4886 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4887 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4888 case BuiltinType::ULong:
4889 case BuiltinType::ULongLong:
4890 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4891 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4892 case BuiltinType::Long:
4893 case BuiltinType::LongLong:
4894 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4895 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4896 case BuiltinType::Float:
4897 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4898 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4899 case BuiltinType::Double:
4900 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4901 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4902 case BuiltinType::UInt128:
4903 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4904 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4905 case BuiltinType::Int128:
4906 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4907 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4908 }
4909}
4910
4911Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4912 llvm::CmpInst::Predicate UICmpOpc,
4913 llvm::CmpInst::Predicate SICmpOpc,
4914 llvm::CmpInst::Predicate FCmpOpc,
4915 bool IsSignaling) {
4916 TestAndClearIgnoreResultAssign();
4917 Value *Result;
4918 QualType LHSTy = E->getLHS()->getType();
4919 QualType RHSTy = E->getRHS()->getType();
4920 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4921 assert(E->getOpcode() == BO_EQ ||
4922 E->getOpcode() == BO_NE);
4923 Value *LHS = CGF.EmitScalarExpr(E: E->getLHS());
4924 Value *RHS = CGF.EmitScalarExpr(E: E->getRHS());
4925 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4926 CGF, L: LHS, R: RHS, MPT, Inequality: E->getOpcode() == BO_NE);
4927 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4928 BinOpInfo BOInfo = EmitBinOps(E);
4929 Value *LHS = BOInfo.LHS;
4930 Value *RHS = BOInfo.RHS;
4931
4932 // If AltiVec, the comparison results in a numeric type, so we use
4933 // intrinsics comparing vectors and giving 0 or 1 as a result
4934 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4935 // constants for mapping CR6 register bits to predicate result
4936 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4937
4938 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4939
4940 // in several cases vector arguments order will be reversed
4941 Value *FirstVecArg = LHS,
4942 *SecondVecArg = RHS;
4943
4944 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4945 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4946
4947 switch(E->getOpcode()) {
4948 default: llvm_unreachable("is not a comparison operation");
4949 case BO_EQ:
4950 CR6 = CR6_LT;
4951 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4952 break;
4953 case BO_NE:
4954 CR6 = CR6_EQ;
4955 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4956 break;
4957 case BO_LT:
4958 CR6 = CR6_LT;
4959 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4960 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4961 break;
4962 case BO_GT:
4963 CR6 = CR6_LT;
4964 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4965 break;
4966 case BO_LE:
4967 if (ElementKind == BuiltinType::Float) {
4968 CR6 = CR6_LT;
4969 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4970 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4971 }
4972 else {
4973 CR6 = CR6_EQ;
4974 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4975 }
4976 break;
4977 case BO_GE:
4978 if (ElementKind == BuiltinType::Float) {
4979 CR6 = CR6_LT;
4980 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4981 }
4982 else {
4983 CR6 = CR6_EQ;
4984 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4985 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4986 }
4987 break;
4988 }
4989
4990 Value *CR6Param = Builder.getInt32(C: CR6);
4991 llvm::Function *F = CGF.CGM.getIntrinsic(IID: ID);
4992 Result = Builder.CreateCall(Callee: F, Args: {CR6Param, FirstVecArg, SecondVecArg});
4993
4994 // The result type of intrinsic may not be same as E->getType().
4995 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4996 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4997 // do nothing, if ResultTy is not i1 at the same time, it will cause
4998 // crash later.
4999 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Val: Result->getType());
5000 if (ResultTy->getBitWidth() > 1 &&
5001 E->getType() == CGF.getContext().BoolTy)
5002 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt1Ty());
5003 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5004 Loc: E->getExprLoc());
5005 }
5006
5007 if (BOInfo.isFixedPointOp()) {
5008 Result = EmitFixedPointBinOp(op: BOInfo);
5009 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5010 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5011 if (!IsSignaling)
5012 Result = Builder.CreateFCmp(P: FCmpOpc, LHS, RHS, Name: "cmp");
5013 else
5014 Result = Builder.CreateFCmpS(P: FCmpOpc, LHS, RHS, Name: "cmp");
5015 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5016 Result = Builder.CreateICmp(P: SICmpOpc, LHS, RHS, Name: "cmp");
5017 } else {
5018 // Unsigned integers and pointers.
5019
5020 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5021 !isa<llvm::ConstantPointerNull>(Val: LHS) &&
5022 !isa<llvm::ConstantPointerNull>(Val: RHS)) {
5023
5024 // Dynamic information is required to be stripped for comparisons,
5025 // because it could leak the dynamic information. Based on comparisons
5026 // of pointers to dynamic objects, the optimizer can replace one pointer
5027 // with another, which might be incorrect in presence of invariant
5028 // groups. Comparison with null is safe because null does not carry any
5029 // dynamic information.
5030 if (LHSTy.mayBeDynamicClass())
5031 LHS = Builder.CreateStripInvariantGroup(Ptr: LHS);
5032 if (RHSTy.mayBeDynamicClass())
5033 RHS = Builder.CreateStripInvariantGroup(Ptr: RHS);
5034 }
5035
5036 Result = Builder.CreateICmp(P: UICmpOpc, LHS, RHS, Name: "cmp");
5037 }
5038
5039 // If this is a vector comparison, sign extend the result to the appropriate
5040 // vector integer type and return it (don't convert to bool).
5041 if (LHSTy->isVectorType())
5042 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
5043
5044 } else {
5045 // Complex Comparison: can only be an equality comparison.
5046 CodeGenFunction::ComplexPairTy LHS, RHS;
5047 QualType CETy;
5048 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5049 LHS = CGF.EmitComplexExpr(E: E->getLHS());
5050 CETy = CTy->getElementType();
5051 } else {
5052 LHS.first = Visit(E: E->getLHS());
5053 LHS.second = llvm::Constant::getNullValue(Ty: LHS.first->getType());
5054 CETy = LHSTy;
5055 }
5056 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5057 RHS = CGF.EmitComplexExpr(E: E->getRHS());
5058 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5059 CTy->getElementType()) &&
5060 "The element types must always match.");
5061 (void)CTy;
5062 } else {
5063 RHS.first = Visit(E: E->getRHS());
5064 RHS.second = llvm::Constant::getNullValue(Ty: RHS.first->getType());
5065 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5066 "The element types must always match.");
5067 }
5068
5069 Value *ResultR, *ResultI;
5070 if (CETy->isRealFloatingType()) {
5071 // As complex comparisons can only be equality comparisons, they
5072 // are never signaling comparisons.
5073 ResultR = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5074 ResultI = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5075 } else {
5076 // Complex comparisons can only be equality comparisons. As such, signed
5077 // and unsigned opcodes are the same.
5078 ResultR = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5079 ResultI = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5080 }
5081
5082 if (E->getOpcode() == BO_EQ) {
5083 Result = Builder.CreateAnd(LHS: ResultR, RHS: ResultI, Name: "and.ri");
5084 } else {
5085 assert(E->getOpcode() == BO_NE &&
5086 "Complex comparison other than == or != ?");
5087 Result = Builder.CreateOr(LHS: ResultR, RHS: ResultI, Name: "or.ri");
5088 }
5089 }
5090
5091 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5092 Loc: E->getExprLoc());
5093}
5094
5095llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
5096 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5097 // In case we have the integer or bitfield sanitizer checks enabled
5098 // we want to get the expression before scalar conversion.
5099 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E->getRHS())) {
5100 CastKind Kind = ICE->getCastKind();
5101 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5102 *SrcType = ICE->getSubExpr()->getType();
5103 *Previous = EmitScalarExpr(E: ICE->getSubExpr());
5104 // Pass default ScalarConversionOpts to avoid emitting
5105 // integer sanitizer checks as E refers to bitfield.
5106 return EmitScalarConversion(Src: *Previous, SrcTy: *SrcType, DstTy: ICE->getType(),
5107 Loc: ICE->getExprLoc());
5108 }
5109 }
5110 return EmitScalarExpr(E: E->getRHS());
5111}
5112
5113Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5114 ApplyAtomGroup Grp(CGF.getDebugInfo());
5115 bool Ignore = TestAndClearIgnoreResultAssign();
5116
5117 Value *RHS;
5118 LValue LHS;
5119
5120 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5121 LValue LV = CGF.EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5122 LV.getQuals().removePointerAuth();
5123 llvm::Value *RV =
5124 CGF.EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: E->getRHS(), StorageAddress: LV.getAddress());
5125 CGF.EmitNullabilityCheck(LHS: LV, RHS: RV, Loc: E->getExprLoc());
5126 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RV), Dst: LV);
5127
5128 if (Ignore)
5129 return nullptr;
5130 RV = CGF.EmitPointerAuthUnqualify(Qualifier: PtrAuth, Pointer: RV, PointerType: LV.getType(),
5131 StorageAddress: LV.getAddress(), /*nonnull*/ IsKnownNonNull: false);
5132 return RV;
5133 }
5134
5135 switch (E->getLHS()->getType().getObjCLifetime()) {
5136 case Qualifiers::OCL_Strong:
5137 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5138 break;
5139
5140 case Qualifiers::OCL_Autoreleasing:
5141 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreAutoreleasing(e: E);
5142 break;
5143
5144 case Qualifiers::OCL_ExplicitNone:
5145 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreUnsafeUnretained(e: E, ignored: Ignore);
5146 break;
5147
5148 case Qualifiers::OCL_Weak:
5149 RHS = Visit(E: E->getRHS());
5150 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5151 RHS = CGF.EmitARCStoreWeak(addr: LHS.getAddress(), value: RHS, ignored: Ignore);
5152 break;
5153
5154 case Qualifiers::OCL_None:
5155 // __block variables need to have the rhs evaluated first, plus
5156 // this should improve codegen just a little.
5157 Value *Previous = nullptr;
5158 QualType SrcType = E->getRHS()->getType();
5159 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5160 // we want to extract that value and potentially (if the bitfield sanitizer
5161 // is enabled) use it to check for an implicit conversion.
5162 if (E->getLHS()->refersToBitField())
5163 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType);
5164 else
5165 RHS = Visit(E: E->getRHS());
5166
5167 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5168
5169 // Store the value into the LHS. Bit-fields are handled specially
5170 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5171 // 'An assignment expression has the value of the left operand after
5172 // the assignment...'.
5173 if (LHS.isBitField()) {
5174 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: RHS), Dst: LHS, Result: &RHS);
5175 // If the expression contained an implicit conversion, make sure
5176 // to use the value before the scalar conversion.
5177 Value *Src = Previous ? Previous : RHS;
5178 QualType DstType = E->getLHS()->getType();
5179 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: RHS, DstType,
5180 Info: LHS.getBitFieldInfo(), Loc: E->getExprLoc());
5181 } else {
5182 CGF.EmitNullabilityCheck(LHS, RHS, Loc: E->getExprLoc());
5183 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RHS), Dst: LHS);
5184 }
5185 }
5186
5187 // If the result is clearly ignored, return now.
5188 if (Ignore)
5189 return nullptr;
5190
5191 // The result of an assignment in C is the assigned r-value.
5192 if (!CGF.getLangOpts().CPlusPlus)
5193 return RHS;
5194
5195 // If the lvalue is non-volatile, return the computed value of the assignment.
5196 if (!LHS.isVolatileQualified())
5197 return RHS;
5198
5199 // Otherwise, reload the value.
5200 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
5201}
5202
5203Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5204 // Perform vector logical and on comparisons with zero vectors.
5205 if (E->getType()->isVectorType()) {
5206 CGF.incrementProfileCounter(E);
5207
5208 Value *LHS = Visit(E: E->getLHS());
5209 Value *RHS = Visit(E: E->getRHS());
5210 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5211 if (LHS->getType()->isFPOrFPVectorTy()) {
5212 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5213 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5214 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5215 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5216 } else {
5217 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5218 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5219 }
5220 Value *And = Builder.CreateAnd(LHS, RHS);
5221 return Builder.CreateSExt(V: And, DestTy: ConvertType(T: E->getType()), Name: "sext");
5222 }
5223
5224 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5225 llvm::Type *ResTy = ConvertType(T: E->getType());
5226
5227 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5228 // If we have 1 && X, just emit X without inserting the control flow.
5229 bool LHSCondVal;
5230 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5231 if (LHSCondVal) { // If we have 1 && X, just emit X.
5232 CGF.incrementProfileCounter(E);
5233
5234 // If the top of the logical operator nest, reset the MCDC temp to 0.
5235 if (CGF.MCDCLogOpStack.empty())
5236 CGF.maybeResetMCDCCondBitmap(E);
5237
5238 CGF.MCDCLogOpStack.push_back(Elt: E);
5239
5240 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5241
5242 // If we're generating for profiling or coverage, generate a branch to a
5243 // block that increments the RHS counter needed to track branch condition
5244 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5245 // "FalseBlock" after the increment is done.
5246 if (InstrumentRegions &&
5247 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5248 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5249 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "land.end");
5250 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5251 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: FBlock);
5252 CGF.EmitBlock(BB: RHSBlockCnt);
5253 CGF.incrementProfileCounter(E->getRHS());
5254 CGF.EmitBranch(Block: FBlock);
5255 CGF.EmitBlock(BB: FBlock);
5256 } else
5257 CGF.markStmtMaybeUsed(E->getRHS());
5258
5259 CGF.MCDCLogOpStack.pop_back();
5260 // If the top of the logical operator nest, update the MCDC bitmap.
5261 if (CGF.MCDCLogOpStack.empty())
5262 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5263
5264 // ZExt result to int or bool.
5265 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "land.ext");
5266 }
5267
5268 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5269 if (!CGF.ContainsLabel(E->getRHS())) {
5270 CGF.markStmtMaybeUsed(E->getRHS());
5271 return llvm::Constant::getNullValue(Ty: ResTy);
5272 }
5273 }
5274
5275 // If the top of the logical operator nest, reset the MCDC temp to 0.
5276 if (CGF.MCDCLogOpStack.empty())
5277 CGF.maybeResetMCDCCondBitmap(E);
5278
5279 CGF.MCDCLogOpStack.push_back(Elt: E);
5280
5281 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "land.end");
5282 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "land.rhs");
5283
5284 CodeGenFunction::ConditionalEvaluation eval(CGF);
5285
5286 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5287 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: RHSBlock, FalseBlock: ContBlock,
5288 TrueCount: CGF.getProfileCount(E->getRHS()));
5289
5290 // Any edges into the ContBlock are now from an (indeterminate number of)
5291 // edges from this first condition. All of these values will be false. Start
5292 // setting up the PHI node in the Cont Block for this.
5293 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5294 NameStr: "", InsertBefore: ContBlock);
5295 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5296 PI != PE; ++PI)
5297 PN->addIncoming(V: llvm::ConstantInt::getFalse(Context&: VMContext), BB: *PI);
5298
5299 eval.begin(CGF);
5300 CGF.EmitBlock(BB: RHSBlock);
5301 CGF.incrementProfileCounter(E);
5302 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5303 eval.end(CGF);
5304
5305 // Reaquire the RHS block, as there may be subblocks inserted.
5306 RHSBlock = Builder.GetInsertBlock();
5307
5308 // If we're generating for profiling or coverage, generate a branch on the
5309 // RHS to a block that increments the RHS true counter needed to track branch
5310 // condition coverage.
5311 if (InstrumentRegions &&
5312 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5313 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5314 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5315 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: ContBlock);
5316 CGF.EmitBlock(BB: RHSBlockCnt);
5317 CGF.incrementProfileCounter(E->getRHS());
5318 CGF.EmitBranch(Block: ContBlock);
5319 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5320 }
5321
5322 // Emit an unconditional branch from this block to ContBlock.
5323 {
5324 // There is no need to emit line number for unconditional branch.
5325 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5326 CGF.EmitBlock(BB: ContBlock);
5327 }
5328 // Insert an entry into the phi node for the edge with the value of RHSCond.
5329 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5330
5331 CGF.MCDCLogOpStack.pop_back();
5332 // If the top of the logical operator nest, update the MCDC bitmap.
5333 if (CGF.MCDCLogOpStack.empty())
5334 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5335
5336 // Artificial location to preserve the scope information
5337 {
5338 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
5339 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5340 }
5341
5342 // ZExt result to int.
5343 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "land.ext");
5344}
5345
5346Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5347 // Perform vector logical or on comparisons with zero vectors.
5348 if (E->getType()->isVectorType()) {
5349 CGF.incrementProfileCounter(E);
5350
5351 Value *LHS = Visit(E: E->getLHS());
5352 Value *RHS = Visit(E: E->getRHS());
5353 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5354 if (LHS->getType()->isFPOrFPVectorTy()) {
5355 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5356 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5357 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5358 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5359 } else {
5360 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5361 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5362 }
5363 Value *Or = Builder.CreateOr(LHS, RHS);
5364 return Builder.CreateSExt(V: Or, DestTy: ConvertType(T: E->getType()), Name: "sext");
5365 }
5366
5367 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5368 llvm::Type *ResTy = ConvertType(T: E->getType());
5369
5370 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5371 // If we have 0 || X, just emit X without inserting the control flow.
5372 bool LHSCondVal;
5373 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5374 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5375 CGF.incrementProfileCounter(E);
5376
5377 // If the top of the logical operator nest, reset the MCDC temp to 0.
5378 if (CGF.MCDCLogOpStack.empty())
5379 CGF.maybeResetMCDCCondBitmap(E);
5380
5381 CGF.MCDCLogOpStack.push_back(Elt: E);
5382
5383 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5384
5385 // If we're generating for profiling or coverage, generate a branch to a
5386 // block that increments the RHS counter need to track branch condition
5387 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5388 // "FalseBlock" after the increment is done.
5389 if (InstrumentRegions &&
5390 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5391 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5392 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "lor.end");
5393 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5394 Builder.CreateCondBr(Cond: RHSCond, True: FBlock, False: RHSBlockCnt);
5395 CGF.EmitBlock(BB: RHSBlockCnt);
5396 CGF.incrementProfileCounter(E->getRHS());
5397 CGF.EmitBranch(Block: FBlock);
5398 CGF.EmitBlock(BB: FBlock);
5399 } else
5400 CGF.markStmtMaybeUsed(E->getRHS());
5401
5402 CGF.MCDCLogOpStack.pop_back();
5403 // If the top of the logical operator nest, update the MCDC bitmap.
5404 if (CGF.MCDCLogOpStack.empty())
5405 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5406
5407 // ZExt result to int or bool.
5408 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "lor.ext");
5409 }
5410
5411 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5412 if (!CGF.ContainsLabel(E->getRHS())) {
5413 CGF.markStmtMaybeUsed(E->getRHS());
5414 return llvm::ConstantInt::get(Ty: ResTy, V: 1);
5415 }
5416 }
5417
5418 // If the top of the logical operator nest, reset the MCDC temp to 0.
5419 if (CGF.MCDCLogOpStack.empty())
5420 CGF.maybeResetMCDCCondBitmap(E);
5421
5422 CGF.MCDCLogOpStack.push_back(Elt: E);
5423
5424 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "lor.end");
5425 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "lor.rhs");
5426
5427 CodeGenFunction::ConditionalEvaluation eval(CGF);
5428
5429 // Branch on the LHS first. If it is true, go to the success (cont) block.
5430 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: ContBlock, FalseBlock: RHSBlock,
5431 TrueCount: CGF.getCurrentProfileCount() -
5432 CGF.getProfileCount(E->getRHS()));
5433
5434 // Any edges into the ContBlock are now from an (indeterminate number of)
5435 // edges from this first condition. All of these values will be true. Start
5436 // setting up the PHI node in the Cont Block for this.
5437 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5438 NameStr: "", InsertBefore: ContBlock);
5439 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5440 PI != PE; ++PI)
5441 PN->addIncoming(V: llvm::ConstantInt::getTrue(Context&: VMContext), BB: *PI);
5442
5443 eval.begin(CGF);
5444
5445 // Emit the RHS condition as a bool value.
5446 CGF.EmitBlock(BB: RHSBlock);
5447 CGF.incrementProfileCounter(E);
5448 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5449
5450 eval.end(CGF);
5451
5452 // Reaquire the RHS block, as there may be subblocks inserted.
5453 RHSBlock = Builder.GetInsertBlock();
5454
5455 // If we're generating for profiling or coverage, generate a branch on the
5456 // RHS to a block that increments the RHS true counter needed to track branch
5457 // condition coverage.
5458 if (InstrumentRegions &&
5459 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5460 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5461 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5462 Builder.CreateCondBr(Cond: RHSCond, True: ContBlock, False: RHSBlockCnt);
5463 CGF.EmitBlock(BB: RHSBlockCnt);
5464 CGF.incrementProfileCounter(E->getRHS());
5465 CGF.EmitBranch(Block: ContBlock);
5466 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5467 }
5468
5469 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5470 // into the phi node for the edge with the value of RHSCond.
5471 CGF.EmitBlock(BB: ContBlock);
5472 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5473
5474 CGF.MCDCLogOpStack.pop_back();
5475 // If the top of the logical operator nest, update the MCDC bitmap.
5476 if (CGF.MCDCLogOpStack.empty())
5477 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5478
5479 // ZExt result to int.
5480 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "lor.ext");
5481}
5482
5483Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5484 CGF.EmitIgnoredExpr(E: E->getLHS());
5485 CGF.EnsureInsertPoint();
5486 return Visit(E: E->getRHS());
5487}
5488
5489//===----------------------------------------------------------------------===//
5490// Other Operators
5491//===----------------------------------------------------------------------===//
5492
5493/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5494/// expression is cheap enough and side-effect-free enough to evaluate
5495/// unconditionally instead of conditionally. This is used to convert control
5496/// flow into selects in some cases.
5497static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
5498 CodeGenFunction &CGF) {
5499 // Anything that is an integer or floating point constant is fine.
5500 return E->IgnoreParens()->isEvaluatable(Ctx: CGF.getContext());
5501
5502 // Even non-volatile automatic variables can't be evaluated unconditionally.
5503 // Referencing a thread_local may cause non-trivial initialization work to
5504 // occur. If we're inside a lambda and one of the variables is from the scope
5505 // outside the lambda, that function may have returned already. Reading its
5506 // locals is a bad idea. Also, these reads may introduce races there didn't
5507 // exist in the source-level program.
5508}
5509
5510
5511Value *ScalarExprEmitter::
5512VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5513 TestAndClearIgnoreResultAssign();
5514
5515 // Bind the common expression if necessary.
5516 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5517
5518 Expr *condExpr = E->getCond();
5519 Expr *lhsExpr = E->getTrueExpr();
5520 Expr *rhsExpr = E->getFalseExpr();
5521
5522 // If the condition constant folds and can be elided, try to avoid emitting
5523 // the condition and the dead arm.
5524 bool CondExprBool;
5525 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
5526 Expr *live = lhsExpr, *dead = rhsExpr;
5527 if (!CondExprBool) std::swap(a&: live, b&: dead);
5528
5529 // If the dead side doesn't have labels we need, just emit the Live part.
5530 if (!CGF.ContainsLabel(dead)) {
5531 if (CondExprBool) {
5532 if (llvm::EnableSingleByteCoverage) {
5533 CGF.incrementProfileCounter(lhsExpr);
5534 CGF.incrementProfileCounter(rhsExpr);
5535 }
5536 CGF.incrementProfileCounter(E);
5537 }
5538 Value *Result = Visit(E: live);
5539 CGF.markStmtMaybeUsed(dead);
5540
5541 // If the live part is a throw expression, it acts like it has a void
5542 // type, so evaluating it returns a null Value*. However, a conditional
5543 // with non-void type must return a non-null Value*.
5544 if (!Result && !E->getType()->isVoidType())
5545 Result = llvm::UndefValue::get(T: CGF.ConvertType(E->getType()));
5546
5547 return Result;
5548 }
5549 }
5550
5551 // OpenCL: If the condition is a vector, we can treat this condition like
5552 // the select function.
5553 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5554 condExpr->getType()->isExtVectorType()) {
5555 CGF.incrementProfileCounter(E);
5556
5557 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5558 llvm::Value *LHS = Visit(E: lhsExpr);
5559 llvm::Value *RHS = Visit(E: rhsExpr);
5560
5561 llvm::Type *condType = ConvertType(T: condExpr->getType());
5562 auto *vecTy = cast<llvm::FixedVectorType>(Val: condType);
5563
5564 unsigned numElem = vecTy->getNumElements();
5565 llvm::Type *elemType = vecTy->getElementType();
5566
5567 llvm::Value *zeroVec = llvm::Constant::getNullValue(Ty: vecTy);
5568 llvm::Value *TestMSB = Builder.CreateICmpSLT(LHS: CondV, RHS: zeroVec);
5569 llvm::Value *tmp = Builder.CreateSExt(
5570 V: TestMSB, DestTy: llvm::FixedVectorType::get(ElementType: elemType, NumElts: numElem), Name: "sext");
5571 llvm::Value *tmp2 = Builder.CreateNot(V: tmp);
5572
5573 // Cast float to int to perform ANDs if necessary.
5574 llvm::Value *RHSTmp = RHS;
5575 llvm::Value *LHSTmp = LHS;
5576 bool wasCast = false;
5577 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(Val: RHS->getType());
5578 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5579 RHSTmp = Builder.CreateBitCast(V: RHS, DestTy: tmp2->getType());
5580 LHSTmp = Builder.CreateBitCast(V: LHS, DestTy: tmp->getType());
5581 wasCast = true;
5582 }
5583
5584 llvm::Value *tmp3 = Builder.CreateAnd(LHS: RHSTmp, RHS: tmp2);
5585 llvm::Value *tmp4 = Builder.CreateAnd(LHS: LHSTmp, RHS: tmp);
5586 llvm::Value *tmp5 = Builder.CreateOr(LHS: tmp3, RHS: tmp4, Name: "cond");
5587 if (wasCast)
5588 tmp5 = Builder.CreateBitCast(V: tmp5, DestTy: RHS->getType());
5589
5590 return tmp5;
5591 }
5592
5593 if (condExpr->getType()->isVectorType() ||
5594 condExpr->getType()->isSveVLSBuiltinType()) {
5595 CGF.incrementProfileCounter(E);
5596
5597 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5598 llvm::Value *LHS = Visit(E: lhsExpr);
5599 llvm::Value *RHS = Visit(E: rhsExpr);
5600
5601 llvm::Type *CondType = ConvertType(T: condExpr->getType());
5602 auto *VecTy = cast<llvm::VectorType>(Val: CondType);
5603 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: VecTy);
5604
5605 CondV = Builder.CreateICmpNE(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
5606 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
5607 }
5608
5609 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5610 // select instead of as control flow. We can only do this if it is cheap and
5611 // safe to evaluate the LHS and RHS unconditionally.
5612 if (isCheapEnoughToEvaluateUnconditionally(E: lhsExpr, CGF) &&
5613 isCheapEnoughToEvaluateUnconditionally(E: rhsExpr, CGF)) {
5614 llvm::Value *CondV = CGF.EvaluateExprAsBool(E: condExpr);
5615 llvm::Value *StepV = Builder.CreateZExtOrBitCast(V: CondV, DestTy: CGF.Int64Ty);
5616
5617 if (llvm::EnableSingleByteCoverage) {
5618 CGF.incrementProfileCounter(lhsExpr);
5619 CGF.incrementProfileCounter(rhsExpr);
5620 CGF.incrementProfileCounter(E);
5621 } else
5622 CGF.incrementProfileCounter(E, StepV);
5623
5624 llvm::Value *LHS = Visit(E: lhsExpr);
5625 llvm::Value *RHS = Visit(E: rhsExpr);
5626 if (!LHS) {
5627 // If the conditional has void type, make sure we return a null Value*.
5628 assert(!RHS && "LHS and RHS types must match");
5629 return nullptr;
5630 }
5631 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "cond");
5632 }
5633
5634 // If the top of the logical operator nest, reset the MCDC temp to 0.
5635 if (CGF.MCDCLogOpStack.empty())
5636 CGF.maybeResetMCDCCondBitmap(E: condExpr);
5637
5638 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
5639 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
5640 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
5641
5642 CodeGenFunction::ConditionalEvaluation eval(CGF);
5643 CGF.EmitBranchOnBoolExpr(Cond: condExpr, TrueBlock: LHSBlock, FalseBlock: RHSBlock,
5644 TrueCount: CGF.getProfileCount(lhsExpr));
5645
5646 CGF.EmitBlock(BB: LHSBlock);
5647
5648 // If the top of the logical operator nest, update the MCDC bitmap for the
5649 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5650 // may also contain a boolean expression.
5651 if (CGF.MCDCLogOpStack.empty())
5652 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5653
5654 if (llvm::EnableSingleByteCoverage)
5655 CGF.incrementProfileCounter(lhsExpr);
5656 else
5657 CGF.incrementProfileCounter(E);
5658
5659 eval.begin(CGF);
5660 Value *LHS = Visit(E: lhsExpr);
5661 eval.end(CGF);
5662
5663 LHSBlock = Builder.GetInsertBlock();
5664 Builder.CreateBr(Dest: ContBlock);
5665
5666 CGF.EmitBlock(BB: RHSBlock);
5667
5668 // If the top of the logical operator nest, update the MCDC bitmap for the
5669 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5670 // may also contain a boolean expression.
5671 if (CGF.MCDCLogOpStack.empty())
5672 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5673
5674 if (llvm::EnableSingleByteCoverage)
5675 CGF.incrementProfileCounter(rhsExpr);
5676
5677 eval.begin(CGF);
5678 Value *RHS = Visit(E: rhsExpr);
5679 eval.end(CGF);
5680
5681 RHSBlock = Builder.GetInsertBlock();
5682 CGF.EmitBlock(BB: ContBlock);
5683
5684 // If the LHS or RHS is a throw expression, it will be legitimately null.
5685 if (!LHS)
5686 return RHS;
5687 if (!RHS)
5688 return LHS;
5689
5690 // Create a PHI node for the real part.
5691 llvm::PHINode *PN = Builder.CreatePHI(Ty: LHS->getType(), NumReservedValues: 2, Name: "cond");
5692 PN->addIncoming(V: LHS, BB: LHSBlock);
5693 PN->addIncoming(V: RHS, BB: RHSBlock);
5694
5695 // When single byte coverage mode is enabled, add a counter to continuation
5696 // block.
5697 if (llvm::EnableSingleByteCoverage)
5698 CGF.incrementProfileCounter(E);
5699
5700 return PN;
5701}
5702
5703Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5704 return Visit(E: E->getChosenSubExpr());
5705}
5706
5707Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5708 Address ArgValue = Address::invalid();
5709 RValue ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
5710
5711 return ArgPtr.getScalarVal();
5712}
5713
5714Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5715 return CGF.EmitBlockLiteral(block);
5716}
5717
5718// Convert a vec3 to vec4, or vice versa.
5719static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
5720 Value *Src, unsigned NumElementsDst) {
5721 static constexpr int Mask[] = {0, 1, 2, -1};
5722 return Builder.CreateShuffleVector(V: Src, Mask: llvm::ArrayRef(Mask, NumElementsDst));
5723}
5724
5725// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5726// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5727// but could be scalar or vectors of different lengths, and either can be
5728// pointer.
5729// There are 4 cases:
5730// 1. non-pointer -> non-pointer : needs 1 bitcast
5731// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5732// 3. pointer -> non-pointer
5733// a) pointer -> intptr_t : needs 1 ptrtoint
5734// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5735// 4. non-pointer -> pointer
5736// a) intptr_t -> pointer : needs 1 inttoptr
5737// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5738// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5739// allow casting directly between pointer types and non-integer non-pointer
5740// types.
5741static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
5742 const llvm::DataLayout &DL,
5743 Value *Src, llvm::Type *DstTy,
5744 StringRef Name = "") {
5745 auto SrcTy = Src->getType();
5746
5747 // Case 1.
5748 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5749 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name);
5750
5751 // Case 2.
5752 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5753 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: Src, DestTy: DstTy, Name);
5754
5755 // Case 3.
5756 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5757 // Case 3b.
5758 if (!DstTy->isIntegerTy())
5759 Src = Builder.CreatePtrToInt(V: Src, DestTy: DL.getIntPtrType(SrcTy));
5760 // Cases 3a and 3b.
5761 return Builder.CreateBitOrPointerCast(V: Src, DestTy: DstTy, Name);
5762 }
5763
5764 // Case 4b.
5765 if (!SrcTy->isIntegerTy())
5766 Src = Builder.CreateBitCast(V: Src, DestTy: DL.getIntPtrType(DstTy));
5767 // Cases 4a and 4b.
5768 return Builder.CreateIntToPtr(V: Src, DestTy: DstTy, Name);
5769}
5770
5771Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5772 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
5773 llvm::Type *DstTy = ConvertType(T: E->getType());
5774
5775 llvm::Type *SrcTy = Src->getType();
5776 unsigned NumElementsSrc =
5777 isa<llvm::VectorType>(Val: SrcTy)
5778 ? cast<llvm::FixedVectorType>(Val: SrcTy)->getNumElements()
5779 : 0;
5780 unsigned NumElementsDst =
5781 isa<llvm::VectorType>(Val: DstTy)
5782 ? cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements()
5783 : 0;
5784
5785 // Use bit vector expansion for ext_vector_type boolean vectors.
5786 if (E->getType()->isExtVectorBoolType())
5787 return CGF.emitBoolVecConversion(SrcVec: Src, NumElementsDst, Name: "astype");
5788
5789 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5790 // vector to get a vec4, then a bitcast if the target type is different.
5791 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5792 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 4);
5793 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5794 DstTy);
5795
5796 Src->setName("astype");
5797 return Src;
5798 }
5799
5800 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5801 // to vec4 if the original type is not vec4, then a shuffle vector to
5802 // get a vec3.
5803 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5804 auto *Vec4Ty = llvm::FixedVectorType::get(
5805 ElementType: cast<llvm::VectorType>(Val: DstTy)->getElementType(), NumElts: 4);
5806 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5807 Vec4Ty);
5808
5809 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 3);
5810 Src->setName("astype");
5811 return Src;
5812 }
5813
5814 return createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(),
5815 Src, DstTy, Name: "astype");
5816}
5817
5818Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5819 return CGF.EmitAtomicExpr(E).getScalarVal();
5820}
5821
5822//===----------------------------------------------------------------------===//
5823// Entry Point into this File
5824//===----------------------------------------------------------------------===//
5825
5826/// Emit the computation of the specified expression of scalar type, ignoring
5827/// the result.
5828Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5829 assert(E && hasScalarEvaluationKind(E->getType()) &&
5830 "Invalid scalar expression to emit");
5831
5832 return ScalarExprEmitter(*this, IgnoreResultAssign)
5833 .Visit(E: const_cast<Expr *>(E));
5834}
5835
5836/// Emit a conversion from the specified type to the specified destination type,
5837/// both of which are LLVM scalar types.
5838Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
5839 QualType DstTy,
5840 SourceLocation Loc) {
5841 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5842 "Invalid scalar expression to emit");
5843 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcType: SrcTy, DstType: DstTy, Loc);
5844}
5845
5846/// Emit a conversion from the specified complex type to the specified
5847/// destination type, where the destination type is an LLVM scalar type.
5848Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
5849 QualType SrcTy,
5850 QualType DstTy,
5851 SourceLocation Loc) {
5852 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5853 "Invalid complex -> scalar conversion");
5854 return ScalarExprEmitter(*this)
5855 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5856}
5857
5858
5859Value *
5860CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
5861 QualType PromotionType) {
5862 if (!PromotionType.isNull())
5863 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5864 else
5865 return ScalarExprEmitter(*this).Visit(E: const_cast<Expr *>(E));
5866}
5867
5868
5869llvm::Value *CodeGenFunction::
5870EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
5871 bool isInc, bool isPre) {
5872 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5873}
5874
5875LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
5876 // object->isa or (*object).isa
5877 // Generate code as for: *(Class*)object
5878
5879 Expr *BaseExpr = E->getBase();
5880 Address Addr = Address::invalid();
5881 if (BaseExpr->isPRValue()) {
5882 llvm::Type *BaseTy =
5883 ConvertTypeForMem(T: BaseExpr->getType()->getPointeeType());
5884 Addr = Address(EmitScalarExpr(E: BaseExpr), BaseTy, getPointerAlign());
5885 } else {
5886 Addr = EmitLValue(E: BaseExpr).getAddress();
5887 }
5888
5889 // Cast the address to Class*.
5890 Addr = Addr.withElementType(ElemTy: ConvertType(E->getType()));
5891 return MakeAddrLValue(Addr, E->getType());
5892}
5893
5894
5895LValue CodeGenFunction::EmitCompoundAssignmentLValue(
5896 const CompoundAssignOperator *E) {
5897 ApplyAtomGroup Grp(getDebugInfo());
5898 ScalarExprEmitter Scalar(*this);
5899 Value *Result = nullptr;
5900 switch (E->getOpcode()) {
5901#define COMPOUND_OP(Op) \
5902 case BO_##Op##Assign: \
5903 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5904 Result)
5905 COMPOUND_OP(Mul);
5906 COMPOUND_OP(Div);
5907 COMPOUND_OP(Rem);
5908 COMPOUND_OP(Add);
5909 COMPOUND_OP(Sub);
5910 COMPOUND_OP(Shl);
5911 COMPOUND_OP(Shr);
5912 COMPOUND_OP(And);
5913 COMPOUND_OP(Xor);
5914 COMPOUND_OP(Or);
5915#undef COMPOUND_OP
5916
5917 case BO_PtrMemD:
5918 case BO_PtrMemI:
5919 case BO_Mul:
5920 case BO_Div:
5921 case BO_Rem:
5922 case BO_Add:
5923 case BO_Sub:
5924 case BO_Shl:
5925 case BO_Shr:
5926 case BO_LT:
5927 case BO_GT:
5928 case BO_LE:
5929 case BO_GE:
5930 case BO_EQ:
5931 case BO_NE:
5932 case BO_Cmp:
5933 case BO_And:
5934 case BO_Xor:
5935 case BO_Or:
5936 case BO_LAnd:
5937 case BO_LOr:
5938 case BO_Assign:
5939 case BO_Comma:
5940 llvm_unreachable("Not valid compound assignment operators");
5941 }
5942
5943 llvm_unreachable("Unhandled compound assignment operator");
5944}
5945
5946struct GEPOffsetAndOverflow {
5947 // The total (signed) byte offset for the GEP.
5948 llvm::Value *TotalOffset;
5949 // The offset overflow flag - true if the total offset overflows.
5950 llvm::Value *OffsetOverflows;
5951};
5952
5953/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5954/// and compute the total offset it applies from it's base pointer BasePtr.
5955/// Returns offset in bytes and a boolean flag whether an overflow happened
5956/// during evaluation.
5957static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
5958 llvm::LLVMContext &VMContext,
5959 CodeGenModule &CGM,
5960 CGBuilderTy &Builder) {
5961 const auto &DL = CGM.getDataLayout();
5962
5963 // The total (signed) byte offset for the GEP.
5964 llvm::Value *TotalOffset = nullptr;
5965
5966 // Was the GEP already reduced to a constant?
5967 if (isa<llvm::Constant>(Val: GEPVal)) {
5968 // Compute the offset by casting both pointers to integers and subtracting:
5969 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5970 Value *BasePtr_int =
5971 Builder.CreatePtrToInt(V: BasePtr, DestTy: DL.getIntPtrType(BasePtr->getType()));
5972 Value *GEPVal_int =
5973 Builder.CreatePtrToInt(V: GEPVal, DestTy: DL.getIntPtrType(GEPVal->getType()));
5974 TotalOffset = Builder.CreateSub(LHS: GEPVal_int, RHS: BasePtr_int);
5975 return {.TotalOffset: TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5976 }
5977
5978 auto *GEP = cast<llvm::GEPOperator>(Val: GEPVal);
5979 assert(GEP->getPointerOperand() == BasePtr &&
5980 "BasePtr must be the base of the GEP.");
5981 assert(GEP->isInBounds() && "Expected inbounds GEP");
5982
5983 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5984
5985 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5986 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5987 auto *SAddIntrinsic =
5988 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
5989 auto *SMulIntrinsic =
5990 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
5991
5992 // The offset overflow flag - true if the total offset overflows.
5993 llvm::Value *OffsetOverflows = Builder.getFalse();
5994
5995 /// Return the result of the given binary operation.
5996 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5997 llvm::Value *RHS) -> llvm::Value * {
5998 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5999
6000 // If the operands are constants, return a constant result.
6001 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS)) {
6002 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS)) {
6003 llvm::APInt N;
6004 bool HasOverflow = mayHaveIntegerOverflow(LHS: LHSCI, RHS: RHSCI, Opcode,
6005 /*Signed=*/true, Result&: N);
6006 if (HasOverflow)
6007 OffsetOverflows = Builder.getTrue();
6008 return llvm::ConstantInt::get(Context&: VMContext, V: N);
6009 }
6010 }
6011
6012 // Otherwise, compute the result with checked arithmetic.
6013 auto *ResultAndOverflow = Builder.CreateCall(
6014 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6015 OffsetOverflows = Builder.CreateOr(
6016 Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1), OffsetOverflows);
6017 return Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0);
6018 };
6019
6020 // Determine the total byte offset by looking at each GEP operand.
6021 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6022 GTI != GTE; ++GTI) {
6023 llvm::Value *LocalOffset;
6024 auto *Index = GTI.getOperand();
6025 // Compute the local offset contributed by this indexing step:
6026 if (auto *STy = GTI.getStructTypeOrNull()) {
6027 // For struct indexing, the local offset is the byte position of the
6028 // specified field.
6029 unsigned FieldNo = cast<llvm::ConstantInt>(Val: Index)->getZExtValue();
6030 LocalOffset = llvm::ConstantInt::get(
6031 Ty: IntPtrTy, V: DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo));
6032 } else {
6033 // Otherwise this is array-like indexing. The local offset is the index
6034 // multiplied by the element size.
6035 auto *ElementSize =
6036 llvm::ConstantInt::get(Ty: IntPtrTy, V: GTI.getSequentialElementStride(DL));
6037 auto *IndexS = Builder.CreateIntCast(V: Index, DestTy: IntPtrTy, /*isSigned=*/true);
6038 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6039 }
6040
6041 // If this is the first offset, set it as the total offset. Otherwise, add
6042 // the local offset into the running total.
6043 if (!TotalOffset || TotalOffset == Zero)
6044 TotalOffset = LocalOffset;
6045 else
6046 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6047 }
6048
6049 return {.TotalOffset: TotalOffset, .OffsetOverflows: OffsetOverflows};
6050}
6051
6052Value *
6053CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6054 ArrayRef<Value *> IdxList,
6055 bool SignedIndices, bool IsSubtraction,
6056 SourceLocation Loc, const Twine &Name) {
6057 llvm::Type *PtrTy = Ptr->getType();
6058
6059 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6060 if (!SignedIndices && !IsSubtraction)
6061 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6062
6063 Value *GEPVal = Builder.CreateGEP(Ty: ElemTy, Ptr, IdxList, Name, NW: NWFlags);
6064
6065 // If the pointer overflow sanitizer isn't enabled, do nothing.
6066 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
6067 return GEPVal;
6068
6069 // Perform nullptr-and-offset check unless the nullptr is defined.
6070 bool PerformNullCheck = !NullPointerIsDefined(
6071 F: Builder.GetInsertBlock()->getParent(), AS: PtrTy->getPointerAddressSpace());
6072 // Check for overflows unless the GEP got constant-folded,
6073 // and only in the default address space
6074 bool PerformOverflowCheck =
6075 !isa<llvm::Constant>(Val: GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6076
6077 if (!(PerformNullCheck || PerformOverflowCheck))
6078 return GEPVal;
6079
6080 const auto &DL = CGM.getDataLayout();
6081
6082 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6083 auto CheckHandler = SanitizerHandler::PointerOverflow;
6084 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6085 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6086
6087 GEPOffsetAndOverflow EvaluatedGEP =
6088 EmitGEPOffsetInBytes(BasePtr: Ptr, GEPVal, VMContext&: getLLVMContext(), CGM, Builder);
6089
6090 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6091 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6092 "If the offset got constant-folded, we don't expect that there was an "
6093 "overflow.");
6094
6095 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
6096
6097 // Common case: if the total offset is zero, don't emit a check.
6098 if (EvaluatedGEP.TotalOffset == Zero)
6099 return GEPVal;
6100
6101 // Now that we've computed the total offset, add it to the base pointer (with
6102 // wrapping semantics).
6103 auto *IntPtr = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
6104 auto *ComputedGEP = Builder.CreateAdd(LHS: IntPtr, RHS: EvaluatedGEP.TotalOffset);
6105
6106 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6107 2>
6108 Checks;
6109
6110 if (PerformNullCheck) {
6111 // If the base pointer evaluates to a null pointer value,
6112 // the only valid pointer this inbounds GEP can produce is also
6113 // a null pointer, so the offset must also evaluate to zero.
6114 // Likewise, if we have non-zero base pointer, we can not get null pointer
6115 // as a result, so the offset can not be -intptr_t(BasePtr).
6116 // In other words, both pointers are either null, or both are non-null,
6117 // or the behaviour is undefined.
6118 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Arg: Ptr);
6119 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(Arg: ComputedGEP);
6120 auto *Valid = Builder.CreateICmpEQ(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr);
6121 Checks.emplace_back(Args&: Valid, Args&: CheckOrdinal);
6122 }
6123
6124 if (PerformOverflowCheck) {
6125 // The GEP is valid if:
6126 // 1) The total offset doesn't overflow, and
6127 // 2) The sign of the difference between the computed address and the base
6128 // pointer matches the sign of the total offset.
6129 llvm::Value *ValidGEP;
6130 auto *NoOffsetOverflow = Builder.CreateNot(V: EvaluatedGEP.OffsetOverflows);
6131 if (SignedIndices) {
6132 // GEP is computed as `unsigned base + signed offset`, therefore:
6133 // * If offset was positive, then the computed pointer can not be
6134 // [unsigned] less than the base pointer, unless it overflowed.
6135 // * If offset was negative, then the computed pointer can not be
6136 // [unsigned] greater than the bas pointere, unless it overflowed.
6137 auto *PosOrZeroValid = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6138 auto *PosOrZeroOffset =
6139 Builder.CreateICmpSGE(LHS: EvaluatedGEP.TotalOffset, RHS: Zero);
6140 llvm::Value *NegValid = Builder.CreateICmpULT(LHS: ComputedGEP, RHS: IntPtr);
6141 ValidGEP =
6142 Builder.CreateSelect(C: PosOrZeroOffset, True: PosOrZeroValid, False: NegValid);
6143 } else if (!IsSubtraction) {
6144 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6145 // computed pointer can not be [unsigned] less than base pointer,
6146 // unless there was an overflow.
6147 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6148 ValidGEP = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6149 } else {
6150 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6151 // computed pointer can not be [unsigned] greater than base pointer,
6152 // unless there was an overflow.
6153 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6154 ValidGEP = Builder.CreateICmpULE(LHS: ComputedGEP, RHS: IntPtr);
6155 }
6156 ValidGEP = Builder.CreateAnd(LHS: ValidGEP, RHS: NoOffsetOverflow);
6157 Checks.emplace_back(Args&: ValidGEP, Args&: CheckOrdinal);
6158 }
6159
6160 assert(!Checks.empty() && "Should have produced some checks.");
6161
6162 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6163 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6164 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6165 EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs);
6166
6167 return GEPVal;
6168}
6169
6170Address CodeGenFunction::EmitCheckedInBoundsGEP(
6171 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6172 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6173 const Twine &Name) {
6174 if (!SanOpts.has(K: SanitizerKind::PointerOverflow)) {
6175 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6176 if (!SignedIndices && !IsSubtraction)
6177 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6178
6179 return Builder.CreateGEP(Addr, IdxList, ElementType: elementType, Align, Name, NW: NWFlags);
6180 }
6181
6182 return RawAddress(
6183 EmitCheckedInBoundsGEP(ElemTy: Addr.getElementType(), Ptr: Addr.emitRawPointer(CGF&: *this),
6184 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6185 elementType, Align);
6186}
6187

source code of clang/lib/CodeGen/CGExprScalar.cpp