1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CGRecordLayout.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "TargetInfo.h"
23#include "clang/AST/ASTContext.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/AST/Expr.h"
27#include "clang/AST/RecordLayout.h"
28#include "clang/AST/StmtVisitor.h"
29#include "clang/Basic/CodeGenOptions.h"
30#include "clang/Basic/TargetInfo.h"
31#include "llvm/ADT/APFixedPoint.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/FixedPointBuilder.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/IR/Module.h"
44#include "llvm/Support/TypeSize.h"
45#include <cstdarg>
46#include <optional>
47
48using namespace clang;
49using namespace CodeGen;
50using llvm::Value;
51
52//===----------------------------------------------------------------------===//
53// Scalar Expression Emitter
54//===----------------------------------------------------------------------===//
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60namespace {
61
62/// Determine whether the given binary operation may overflow.
63/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
64/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
65/// the returned overflow check is precise. The returned value is 'true' for
66/// all other opcodes, to be conservative.
67bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
68 BinaryOperator::Opcode Opcode, bool Signed,
69 llvm::APInt &Result) {
70 // Assume overflow is possible, unless we can prove otherwise.
71 bool Overflow = true;
72 const auto &LHSAP = LHS->getValue();
73 const auto &RHSAP = RHS->getValue();
74 if (Opcode == BO_Add) {
75 Result = Signed ? LHSAP.sadd_ov(RHS: RHSAP, Overflow)
76 : LHSAP.uadd_ov(RHS: RHSAP, Overflow);
77 } else if (Opcode == BO_Sub) {
78 Result = Signed ? LHSAP.ssub_ov(RHS: RHSAP, Overflow)
79 : LHSAP.usub_ov(RHS: RHSAP, Overflow);
80 } else if (Opcode == BO_Mul) {
81 Result = Signed ? LHSAP.smul_ov(RHS: RHSAP, Overflow)
82 : LHSAP.umul_ov(RHS: RHSAP, Overflow);
83 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
84 if (Signed && !RHS->isZero())
85 Result = LHSAP.sdiv_ov(RHS: RHSAP, Overflow);
86 else
87 return false;
88 }
89 return Overflow;
90}
91
92struct BinOpInfo {
93 Value *LHS;
94 Value *RHS;
95 QualType Ty; // Computation Type.
96 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
97 FPOptions FPFeatures;
98 const Expr *E; // Entire expr, for error unsupported. May not be binop.
99
100 /// Check if the binop can result in integer overflow.
101 bool mayHaveIntegerOverflow() const {
102 // Without constant input, we can't rule out overflow.
103 auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS);
104 auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS);
105 if (!LHSCI || !RHSCI)
106 return true;
107
108 llvm::APInt Result;
109 return ::mayHaveIntegerOverflow(
110 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
111 }
112
113 /// Check if the binop computes a division or a remainder.
114 bool isDivremOp() const {
115 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
116 Opcode == BO_RemAssign;
117 }
118
119 /// Check if the binop can result in an integer division by zero.
120 bool mayHaveIntegerDivisionByZero() const {
121 if (isDivremOp())
122 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: RHS))
123 return CI->isZero();
124 return true;
125 }
126
127 /// Check if the binop can result in a float division by zero.
128 bool mayHaveFloatDivisionByZero() const {
129 if (isDivremOp())
130 if (auto *CFP = dyn_cast<llvm::ConstantFP>(Val: RHS))
131 return CFP->isZero();
132 return true;
133 }
134
135 /// Check if at least one operand is a fixed point type. In such cases, this
136 /// operation did not follow usual arithmetic conversion and both operands
137 /// might not be of the same type.
138 bool isFixedPointOp() const {
139 // We cannot simply check the result type since comparison operations return
140 // an int.
141 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
142 QualType LHSType = BinOp->getLHS()->getType();
143 QualType RHSType = BinOp->getRHS()->getType();
144 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
145 }
146 if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: E))
147 return UnOp->getSubExpr()->getType()->isFixedPointType();
148 return false;
149 }
150
151 /// Check if the RHS has a signed integer representation.
152 bool rhsHasSignedIntegerRepresentation() const {
153 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
154 QualType RHSType = BinOp->getRHS()->getType();
155 return RHSType->hasSignedIntegerRepresentation();
156 }
157 return false;
158 }
159};
160
161static bool MustVisitNullValue(const Expr *E) {
162 // If a null pointer expression's type is the C++0x nullptr_t, then
163 // it's not necessarily a simple constant and it must be evaluated
164 // for its potential side effects.
165 return E->getType()->isNullPtrType();
166}
167
168/// If \p E is a widened promoted integer, get its base (unpromoted) type.
169static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
170 const Expr *E) {
171 const Expr *Base = E->IgnoreImpCasts();
172 if (E == Base)
173 return std::nullopt;
174
175 QualType BaseTy = Base->getType();
176 if (!Ctx.isPromotableIntegerType(T: BaseTy) ||
177 Ctx.getTypeSize(T: BaseTy) >= Ctx.getTypeSize(T: E->getType()))
178 return std::nullopt;
179
180 return BaseTy;
181}
182
183/// Check if \p E is a widened promoted integer.
184static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
185 return getUnwidenedIntegerType(Ctx, E).has_value();
186}
187
188/// Check if we can skip the overflow check for \p Op.
189static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
190 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
191 "Expected a unary or binary operator");
192
193 // If the binop has constant inputs and we can prove there is no overflow,
194 // we can elide the overflow check.
195 if (!Op.mayHaveIntegerOverflow())
196 return true;
197
198 // If a unary op has a widened operand, the op cannot overflow.
199 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Op.E))
200 return !UO->canOverflow();
201
202 // We usually don't need overflow checks for binops with widened operands.
203 // Multiplication with promoted unsigned operands is a special case.
204 const auto *BO = cast<BinaryOperator>(Val: Op.E);
205 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
206 if (!OptionalLHSTy)
207 return false;
208
209 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
210 if (!OptionalRHSTy)
211 return false;
212
213 QualType LHSTy = *OptionalLHSTy;
214 QualType RHSTy = *OptionalRHSTy;
215
216 // This is the simple case: binops without unsigned multiplication, and with
217 // widened operands. No overflow check is needed here.
218 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
219 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
220 return true;
221
222 // For unsigned multiplication the overflow check can be elided if either one
223 // of the unpromoted types are less than half the size of the promoted type.
224 unsigned PromotedSize = Ctx.getTypeSize(T: Op.E->getType());
225 return (2 * Ctx.getTypeSize(T: LHSTy)) < PromotedSize ||
226 (2 * Ctx.getTypeSize(T: RHSTy)) < PromotedSize;
227}
228
229class ScalarExprEmitter
230 : public StmtVisitor<ScalarExprEmitter, Value*> {
231 CodeGenFunction &CGF;
232 CGBuilderTy &Builder;
233 bool IgnoreResultAssign;
234 llvm::LLVMContext &VMContext;
235public:
236
237 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
238 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
239 VMContext(cgf.getLLVMContext()) {
240 }
241
242 //===--------------------------------------------------------------------===//
243 // Utilities
244 //===--------------------------------------------------------------------===//
245
246 bool TestAndClearIgnoreResultAssign() {
247 bool I = IgnoreResultAssign;
248 IgnoreResultAssign = false;
249 return I;
250 }
251
252 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
253 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
254 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
255 return CGF.EmitCheckedLValue(E, TCK);
256 }
257
258 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
259 const BinOpInfo &Info);
260
261 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
262 return CGF.EmitLoadOfLValue(V: LV, Loc).getScalarVal();
263 }
264
265 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
266 const AlignValueAttr *AVAttr = nullptr;
267 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
268 const ValueDecl *VD = DRE->getDecl();
269
270 if (VD->getType()->isReferenceType()) {
271 if (const auto *TTy =
272 VD->getType().getNonReferenceType()->getAs<TypedefType>())
273 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
274 } else {
275 // Assumptions for function parameters are emitted at the start of the
276 // function, so there is no need to repeat that here,
277 // unless the alignment-assumption sanitizer is enabled,
278 // then we prefer the assumption over alignment attribute
279 // on IR function param.
280 if (isa<ParmVarDecl>(Val: VD) && !CGF.SanOpts.has(K: SanitizerKind::Alignment))
281 return;
282
283 AVAttr = VD->getAttr<AlignValueAttr>();
284 }
285 }
286
287 if (!AVAttr)
288 if (const auto *TTy = E->getType()->getAs<TypedefType>())
289 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
290
291 if (!AVAttr)
292 return;
293
294 Value *AlignmentValue = CGF.EmitScalarExpr(E: AVAttr->getAlignment());
295 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Val: AlignmentValue);
296 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
297 }
298
299 /// EmitLoadOfLValue - Given an expression with complex type that represents a
300 /// value l-value, this method emits the address of the l-value, then loads
301 /// and returns the result.
302 Value *EmitLoadOfLValue(const Expr *E) {
303 Value *V = EmitLoadOfLValue(LV: EmitCheckedLValue(E, TCK: CodeGenFunction::TCK_Load),
304 Loc: E->getExprLoc());
305
306 EmitLValueAlignmentAssumption(E, V);
307 return V;
308 }
309
310 /// EmitConversionToBool - Convert the specified expression value to a
311 /// boolean (i1) truth value. This is equivalent to "Val != 0".
312 Value *EmitConversionToBool(Value *Src, QualType DstTy);
313
314 /// Emit a check that a conversion from a floating-point type does not
315 /// overflow.
316 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
317 Value *Src, QualType SrcType, QualType DstType,
318 llvm::Type *DstTy, SourceLocation Loc);
319
320 /// Known implicit conversion check kinds.
321 /// This is used for bitfield conversion checks as well.
322 /// Keep in sync with the enum of the same name in ubsan_handlers.h
323 enum ImplicitConversionCheckKind : unsigned char {
324 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
325 ICCK_UnsignedIntegerTruncation = 1,
326 ICCK_SignedIntegerTruncation = 2,
327 ICCK_IntegerSignChange = 3,
328 ICCK_SignedIntegerTruncationOrSignChange = 4,
329 };
330
331 /// Emit a check that an [implicit] truncation of an integer does not
332 /// discard any bits. It is not UB, so we use the value after truncation.
333 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
334 QualType DstType, SourceLocation Loc);
335
336 /// Emit a check that an [implicit] conversion of an integer does not change
337 /// the sign of the value. It is not UB, so we use the value after conversion.
338 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
339 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
340 QualType DstType, SourceLocation Loc);
341
342 /// Emit a conversion from the specified type to the specified destination
343 /// type, both of which are LLVM scalar types.
344 struct ScalarConversionOpts {
345 bool TreatBooleanAsSigned;
346 bool EmitImplicitIntegerTruncationChecks;
347 bool EmitImplicitIntegerSignChangeChecks;
348
349 ScalarConversionOpts()
350 : TreatBooleanAsSigned(false),
351 EmitImplicitIntegerTruncationChecks(false),
352 EmitImplicitIntegerSignChangeChecks(false) {}
353
354 ScalarConversionOpts(clang::SanitizerSet SanOpts)
355 : TreatBooleanAsSigned(false),
356 EmitImplicitIntegerTruncationChecks(
357 SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
358 EmitImplicitIntegerSignChangeChecks(
359 SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)) {}
360 };
361 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
362 llvm::Type *SrcTy, llvm::Type *DstTy,
363 ScalarConversionOpts Opts);
364 Value *
365 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
366 SourceLocation Loc,
367 ScalarConversionOpts Opts = ScalarConversionOpts());
368
369 /// Convert between either a fixed point and other fixed point or fixed point
370 /// and an integer.
371 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
372 SourceLocation Loc);
373
374 /// Emit a conversion from the specified complex type to the specified
375 /// destination type, where the destination type is an LLVM scalar type.
376 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
377 QualType SrcTy, QualType DstTy,
378 SourceLocation Loc);
379
380 /// EmitNullValue - Emit a value that corresponds to null for the given type.
381 Value *EmitNullValue(QualType Ty);
382
383 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
384 Value *EmitFloatToBoolConversion(Value *V) {
385 // Compare against 0.0 for fp scalars.
386 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: V->getType());
387 return Builder.CreateFCmpUNE(LHS: V, RHS: Zero, Name: "tobool");
388 }
389
390 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
391 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
392 Value *Zero = CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: V->getType()), QT);
393
394 return Builder.CreateICmpNE(LHS: V, RHS: Zero, Name: "tobool");
395 }
396
397 Value *EmitIntToBoolConversion(Value *V) {
398 // Because of the type rules of C, we often end up computing a
399 // logical value, then zero extending it to int, then wanting it
400 // as a logical value again. Optimize this common case.
401 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Val: V)) {
402 if (ZI->getOperand(i_nocapture: 0)->getType() == Builder.getInt1Ty()) {
403 Value *Result = ZI->getOperand(i_nocapture: 0);
404 // If there aren't any more uses, zap the instruction to save space.
405 // Note that there can be more uses, for example if this
406 // is the result of an assignment.
407 if (ZI->use_empty())
408 ZI->eraseFromParent();
409 return Result;
410 }
411 }
412
413 return Builder.CreateIsNotNull(Arg: V, Name: "tobool");
414 }
415
416 //===--------------------------------------------------------------------===//
417 // Visitor Methods
418 //===--------------------------------------------------------------------===//
419
420 Value *Visit(Expr *E) {
421 ApplyDebugLocation DL(CGF, E);
422 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
423 }
424
425 Value *VisitStmt(Stmt *S) {
426 S->dump(OS&: llvm::errs(), Context: CGF.getContext());
427 llvm_unreachable("Stmt can't have complex result type!");
428 }
429 Value *VisitExpr(Expr *S);
430
431 Value *VisitConstantExpr(ConstantExpr *E) {
432 // A constant expression of type 'void' generates no code and produces no
433 // value.
434 if (E->getType()->isVoidType())
435 return nullptr;
436
437 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
438 if (E->isGLValue())
439 return CGF.Builder.CreateLoad(Addr: Address(
440 Result, CGF.ConvertTypeForMem(T: E->getType()),
441 CGF.getContext().getTypeAlignInChars(E->getType())));
442 return Result;
443 }
444 return Visit(E: E->getSubExpr());
445 }
446 Value *VisitParenExpr(ParenExpr *PE) {
447 return Visit(E: PE->getSubExpr());
448 }
449 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
450 return Visit(E: E->getReplacement());
451 }
452 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
453 return Visit(E: GE->getResultExpr());
454 }
455 Value *VisitCoawaitExpr(CoawaitExpr *S) {
456 return CGF.EmitCoawaitExpr(E: *S).getScalarVal();
457 }
458 Value *VisitCoyieldExpr(CoyieldExpr *S) {
459 return CGF.EmitCoyieldExpr(E: *S).getScalarVal();
460 }
461 Value *VisitUnaryCoawait(const UnaryOperator *E) {
462 return Visit(E: E->getSubExpr());
463 }
464
465 // Leaves.
466 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
467 return Builder.getInt(AI: E->getValue());
468 }
469 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
470 return Builder.getInt(AI: E->getValue());
471 }
472 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
473 return llvm::ConstantFP::get(Context&: VMContext, V: E->getValue());
474 }
475 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
476 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
477 }
478 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
479 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
480 }
481 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
483 }
484 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
485 if (E->getType()->isVoidType())
486 return nullptr;
487
488 return EmitNullValue(Ty: E->getType());
489 }
490 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
491 return EmitNullValue(Ty: E->getType());
492 }
493 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
494 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
495 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
496 llvm::Value *V = CGF.GetAddrOfLabel(L: E->getLabel());
497 return Builder.CreateBitCast(V, DestTy: ConvertType(T: E->getType()));
498 }
499
500 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
501 return llvm::ConstantInt::get(ConvertType(T: E->getType()),E->getPackLength());
502 }
503
504 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
505 return CGF.EmitPseudoObjectRValue(e: E).getScalarVal();
506 }
507
508 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
509
510 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
511 if (E->isGLValue())
512 return EmitLoadOfLValue(LV: CGF.getOrCreateOpaqueLValueMapping(e: E),
513 Loc: E->getExprLoc());
514
515 // Otherwise, assume the mapping is the scalar directly.
516 return CGF.getOrCreateOpaqueRValueMapping(e: E).getScalarVal();
517 }
518
519 // l-values.
520 Value *VisitDeclRefExpr(DeclRefExpr *E) {
521 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(refExpr: E))
522 return CGF.emitScalarConstant(Constant, E);
523 return EmitLoadOfLValue(E);
524 }
525
526 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
527 return CGF.EmitObjCSelectorExpr(E);
528 }
529 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
530 return CGF.EmitObjCProtocolExpr(E);
531 }
532 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
533 return EmitLoadOfLValue(E);
534 }
535 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
536 if (E->getMethodDecl() &&
537 E->getMethodDecl()->getReturnType()->isReferenceType())
538 return EmitLoadOfLValue(E);
539 return CGF.EmitObjCMessageExpr(E).getScalarVal();
540 }
541
542 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
543 LValue LV = CGF.EmitObjCIsaExpr(E);
544 Value *V = CGF.EmitLoadOfLValue(V: LV, Loc: E->getExprLoc()).getScalarVal();
545 return V;
546 }
547
548 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
549 VersionTuple Version = E->getVersion();
550
551 // If we're checking for a platform older than our minimum deployment
552 // target, we can fold the check away.
553 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
554 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: 1);
555
556 return CGF.EmitBuiltinAvailable(Version);
557 }
558
559 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
560 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
561 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
562 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
563 Value *VisitMemberExpr(MemberExpr *E);
564 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
565 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
566 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
567 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
568 // literals aren't l-values in C++. We do so simply because that's the
569 // cleanest way to handle compound literals in C++.
570 // See the discussion here: https://reviews.llvm.org/D64464
571 return EmitLoadOfLValue(E);
572 }
573
574 Value *VisitInitListExpr(InitListExpr *E);
575
576 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
577 assert(CGF.getArrayInitIndex() &&
578 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
579 return CGF.getArrayInitIndex();
580 }
581
582 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
583 return EmitNullValue(Ty: E->getType());
584 }
585 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
586 CGF.CGM.EmitExplicitCastExprType(E, CGF: &CGF);
587 return VisitCastExpr(E);
588 }
589 Value *VisitCastExpr(CastExpr *E);
590
591 Value *VisitCallExpr(const CallExpr *E) {
592 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType())
593 return EmitLoadOfLValue(E);
594
595 Value *V = CGF.EmitCallExpr(E).getScalarVal();
596
597 EmitLValueAlignmentAssumption(E, V);
598 return V;
599 }
600
601 Value *VisitStmtExpr(const StmtExpr *E);
602
603 // Unary Operators.
604 Value *VisitUnaryPostDec(const UnaryOperator *E) {
605 LValue LV = EmitLValue(E: E->getSubExpr());
606 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: false);
607 }
608 Value *VisitUnaryPostInc(const UnaryOperator *E) {
609 LValue LV = EmitLValue(E: E->getSubExpr());
610 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: false);
611 }
612 Value *VisitUnaryPreDec(const UnaryOperator *E) {
613 LValue LV = EmitLValue(E: E->getSubExpr());
614 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: true);
615 }
616 Value *VisitUnaryPreInc(const UnaryOperator *E) {
617 LValue LV = EmitLValue(E: E->getSubExpr());
618 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: true);
619 }
620
621 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
622 llvm::Value *InVal,
623 bool IsInc);
624
625 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
626 bool isInc, bool isPre);
627
628
629 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
630 if (isa<MemberPointerType>(E->getType())) // never sugared
631 return CGF.CGM.getMemberPointerConstant(e: E);
632
633 return EmitLValue(E: E->getSubExpr()).getPointer(CGF);
634 }
635 Value *VisitUnaryDeref(const UnaryOperator *E) {
636 if (E->getType()->isVoidType())
637 return Visit(E: E->getSubExpr()); // the actual value should be unused
638 return EmitLoadOfLValue(E);
639 }
640
641 Value *VisitUnaryPlus(const UnaryOperator *E,
642 QualType PromotionType = QualType());
643 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
644 Value *VisitUnaryMinus(const UnaryOperator *E,
645 QualType PromotionType = QualType());
646 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
647
648 Value *VisitUnaryNot (const UnaryOperator *E);
649 Value *VisitUnaryLNot (const UnaryOperator *E);
650 Value *VisitUnaryReal(const UnaryOperator *E,
651 QualType PromotionType = QualType());
652 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
653 Value *VisitUnaryImag(const UnaryOperator *E,
654 QualType PromotionType = QualType());
655 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
656 Value *VisitUnaryExtension(const UnaryOperator *E) {
657 return Visit(E: E->getSubExpr());
658 }
659
660 // C++
661 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
662 return EmitLoadOfLValue(E);
663 }
664 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
665 auto &Ctx = CGF.getContext();
666 APValue Evaluated =
667 SLE->EvaluateInContext(Ctx, DefaultExpr: CGF.CurSourceLocExprScope.getDefaultExpr());
668 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
669 SLE->getType());
670 }
671
672 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
673 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
674 return Visit(E: DAE->getExpr());
675 }
676 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
677 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
678 return Visit(E: DIE->getExpr());
679 }
680 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
681 return CGF.LoadCXXThis();
682 }
683
684 Value *VisitExprWithCleanups(ExprWithCleanups *E);
685 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
686 return CGF.EmitCXXNewExpr(E);
687 }
688 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
689 CGF.EmitCXXDeleteExpr(E);
690 return nullptr;
691 }
692
693 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
694 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
695 }
696
697 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
698 return Builder.getInt1(V: E->isSatisfied());
699 }
700
701 Value *VisitRequiresExpr(const RequiresExpr *E) {
702 return Builder.getInt1(V: E->isSatisfied());
703 }
704
705 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
706 return llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: E->getValue());
707 }
708
709 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
710 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: E->getValue());
711 }
712
713 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
714 // C++ [expr.pseudo]p1:
715 // The result shall only be used as the operand for the function call
716 // operator (), and the result of such a call has type void. The only
717 // effect is the evaluation of the postfix-expression before the dot or
718 // arrow.
719 CGF.EmitScalarExpr(E: E->getBase());
720 return nullptr;
721 }
722
723 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
724 return EmitNullValue(Ty: E->getType());
725 }
726
727 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
728 CGF.EmitCXXThrowExpr(E);
729 return nullptr;
730 }
731
732 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
733 return Builder.getInt1(V: E->getValue());
734 }
735
736 // Binary Operators.
737 Value *EmitMul(const BinOpInfo &Ops) {
738 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
739 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
740 case LangOptions::SOB_Defined:
741 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
742 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
743 [[fallthrough]];
744 case LangOptions::SOB_Undefined:
745 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
746 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
747 [[fallthrough]];
748 case LangOptions::SOB_Trapping:
749 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
750 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
751 return EmitOverflowCheckedBinOp(Ops);
752 }
753 }
754
755 if (Ops.Ty->isConstantMatrixType()) {
756 llvm::MatrixBuilder MB(Builder);
757 // We need to check the types of the operands of the operator to get the
758 // correct matrix dimensions.
759 auto *BO = cast<BinaryOperator>(Val: Ops.E);
760 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
761 Val: BO->getLHS()->getType().getCanonicalType());
762 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
763 Val: BO->getRHS()->getType().getCanonicalType());
764 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
765 if (LHSMatTy && RHSMatTy)
766 return MB.CreateMatrixMultiply(LHS: Ops.LHS, RHS: Ops.RHS, LHSRows: LHSMatTy->getNumRows(),
767 LHSColumns: LHSMatTy->getNumColumns(),
768 RHSColumns: RHSMatTy->getNumColumns());
769 return MB.CreateScalarMultiply(LHS: Ops.LHS, RHS: Ops.RHS);
770 }
771
772 if (Ops.Ty->isUnsignedIntegerType() &&
773 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
774 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
775 return EmitOverflowCheckedBinOp(Ops);
776
777 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
778 // Preserve the old values
779 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
780 return Builder.CreateFMul(L: Ops.LHS, R: Ops.RHS, Name: "mul");
781 }
782 if (Ops.isFixedPointOp())
783 return EmitFixedPointBinOp(Ops);
784 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
785 }
786 /// Create a binary op that checks for overflow.
787 /// Currently only supports +, - and *.
788 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
789
790 // Check for undefined division and modulus behaviors.
791 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
792 llvm::Value *Zero,bool isDiv);
793 // Common helper for getting how wide LHS of shift is.
794 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
795
796 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
797 // non powers of two.
798 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
799
800 Value *EmitDiv(const BinOpInfo &Ops);
801 Value *EmitRem(const BinOpInfo &Ops);
802 Value *EmitAdd(const BinOpInfo &Ops);
803 Value *EmitSub(const BinOpInfo &Ops);
804 Value *EmitShl(const BinOpInfo &Ops);
805 Value *EmitShr(const BinOpInfo &Ops);
806 Value *EmitAnd(const BinOpInfo &Ops) {
807 return Builder.CreateAnd(LHS: Ops.LHS, RHS: Ops.RHS, Name: "and");
808 }
809 Value *EmitXor(const BinOpInfo &Ops) {
810 return Builder.CreateXor(LHS: Ops.LHS, RHS: Ops.RHS, Name: "xor");
811 }
812 Value *EmitOr (const BinOpInfo &Ops) {
813 return Builder.CreateOr(LHS: Ops.LHS, RHS: Ops.RHS, Name: "or");
814 }
815
816 // Helper functions for fixed point binary operations.
817 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
818
819 BinOpInfo EmitBinOps(const BinaryOperator *E,
820 QualType PromotionTy = QualType());
821
822 Value *EmitPromotedValue(Value *result, QualType PromotionType);
823 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
824 Value *EmitPromoted(const Expr *E, QualType PromotionType);
825
826 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
827 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
828 Value *&Result);
829
830 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
831 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
832
833 QualType getPromotionType(QualType Ty) {
834 const auto &Ctx = CGF.getContext();
835 if (auto *CT = Ty->getAs<ComplexType>()) {
836 QualType ElementType = CT->getElementType();
837 if (ElementType.UseExcessPrecision(Ctx))
838 return Ctx.getComplexType(Ctx.FloatTy);
839 }
840
841 if (Ty.UseExcessPrecision(Ctx)) {
842 if (auto *VT = Ty->getAs<VectorType>()) {
843 unsigned NumElements = VT->getNumElements();
844 return Ctx.getVectorType(VectorType: Ctx.FloatTy, NumElts: NumElements, VecKind: VT->getVectorKind());
845 }
846 return Ctx.FloatTy;
847 }
848
849 return QualType();
850 }
851
852 // Binary operators and binary compound assignment operators.
853#define HANDLEBINOP(OP) \
854 Value *VisitBin##OP(const BinaryOperator *E) { \
855 QualType promotionTy = getPromotionType(E->getType()); \
856 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
857 if (result && !promotionTy.isNull()) \
858 result = EmitUnPromotedValue(result, E->getType()); \
859 return result; \
860 } \
861 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
862 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
863 }
864 HANDLEBINOP(Mul)
865 HANDLEBINOP(Div)
866 HANDLEBINOP(Rem)
867 HANDLEBINOP(Add)
868 HANDLEBINOP(Sub)
869 HANDLEBINOP(Shl)
870 HANDLEBINOP(Shr)
871 HANDLEBINOP(And)
872 HANDLEBINOP(Xor)
873 HANDLEBINOP(Or)
874#undef HANDLEBINOP
875
876 // Comparisons.
877 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
878 llvm::CmpInst::Predicate SICmpOpc,
879 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
880#define VISITCOMP(CODE, UI, SI, FP, SIG) \
881 Value *VisitBin##CODE(const BinaryOperator *E) { \
882 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
883 llvm::FCmpInst::FP, SIG); }
884 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
885 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
886 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
887 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
888 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
889 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
890#undef VISITCOMP
891
892 Value *VisitBinAssign (const BinaryOperator *E);
893
894 Value *VisitBinLAnd (const BinaryOperator *E);
895 Value *VisitBinLOr (const BinaryOperator *E);
896 Value *VisitBinComma (const BinaryOperator *E);
897
898 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
899 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
900
901 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
902 return Visit(E: E->getSemanticForm());
903 }
904
905 // Other Operators.
906 Value *VisitBlockExpr(const BlockExpr *BE);
907 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
908 Value *VisitChooseExpr(ChooseExpr *CE);
909 Value *VisitVAArgExpr(VAArgExpr *VE);
910 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
911 return CGF.EmitObjCStringLiteral(E);
912 }
913 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
914 return CGF.EmitObjCBoxedExpr(E);
915 }
916 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
917 return CGF.EmitObjCArrayLiteral(E);
918 }
919 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
920 return CGF.EmitObjCDictionaryLiteral(E);
921 }
922 Value *VisitAsTypeExpr(AsTypeExpr *CE);
923 Value *VisitAtomicExpr(AtomicExpr *AE);
924 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
925 return Visit(E: E->getSelectedExpr());
926 }
927};
928} // end anonymous namespace.
929
930//===----------------------------------------------------------------------===//
931// Utilities
932//===----------------------------------------------------------------------===//
933
934/// EmitConversionToBool - Convert the specified expression value to a
935/// boolean (i1) truth value. This is equivalent to "Val != 0".
936Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
937 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
938
939 if (SrcType->isRealFloatingType())
940 return EmitFloatToBoolConversion(V: Src);
941
942 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(Val&: SrcType))
943 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr: Src, MPT);
944
945 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
946 "Unknown scalar type to convert");
947
948 if (isa<llvm::IntegerType>(Val: Src->getType()))
949 return EmitIntToBoolConversion(V: Src);
950
951 assert(isa<llvm::PointerType>(Src->getType()));
952 return EmitPointerToBoolConversion(V: Src, QT: SrcType);
953}
954
955void ScalarExprEmitter::EmitFloatConversionCheck(
956 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
957 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
958 assert(SrcType->isFloatingType() && "not a conversion from floating point");
959 if (!isa<llvm::IntegerType>(Val: DstTy))
960 return;
961
962 CodeGenFunction::SanitizerScope SanScope(&CGF);
963 using llvm::APFloat;
964 using llvm::APSInt;
965
966 llvm::Value *Check = nullptr;
967 const llvm::fltSemantics &SrcSema =
968 CGF.getContext().getFloatTypeSemantics(T: OrigSrcType);
969
970 // Floating-point to integer. This has undefined behavior if the source is
971 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
972 // to an integer).
973 unsigned Width = CGF.getContext().getIntWidth(T: DstType);
974 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
975
976 APSInt Min = APSInt::getMinValue(numBits: Width, Unsigned);
977 APFloat MinSrc(SrcSema, APFloat::uninitialized);
978 if (MinSrc.convertFromAPInt(Input: Min, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
979 APFloat::opOverflow)
980 // Don't need an overflow check for lower bound. Just check for
981 // -Inf/NaN.
982 MinSrc = APFloat::getInf(Sem: SrcSema, Negative: true);
983 else
984 // Find the largest value which is too small to represent (before
985 // truncation toward zero).
986 MinSrc.subtract(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardNegative);
987
988 APSInt Max = APSInt::getMaxValue(numBits: Width, Unsigned);
989 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
990 if (MaxSrc.convertFromAPInt(Input: Max, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
991 APFloat::opOverflow)
992 // Don't need an overflow check for upper bound. Just check for
993 // +Inf/NaN.
994 MaxSrc = APFloat::getInf(Sem: SrcSema, Negative: false);
995 else
996 // Find the smallest value which is too large to represent (before
997 // truncation toward zero).
998 MaxSrc.add(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardPositive);
999
1000 // If we're converting from __half, convert the range to float to match
1001 // the type of src.
1002 if (OrigSrcType->isHalfType()) {
1003 const llvm::fltSemantics &Sema =
1004 CGF.getContext().getFloatTypeSemantics(T: SrcType);
1005 bool IsInexact;
1006 MinSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1007 MaxSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1008 }
1009
1010 llvm::Value *GE =
1011 Builder.CreateFCmpOGT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MinSrc));
1012 llvm::Value *LE =
1013 Builder.CreateFCmpOLT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MaxSrc));
1014 Check = Builder.CreateAnd(LHS: GE, RHS: LE);
1015
1016 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1017 CGF.EmitCheckTypeDescriptor(T: OrigSrcType),
1018 CGF.EmitCheckTypeDescriptor(T: DstType)};
1019 CGF.EmitCheck(Checked: std::make_pair(x&: Check, y: SanitizerKind::FloatCastOverflow),
1020 Check: SanitizerHandler::FloatCastOverflow, StaticArgs, DynamicArgs: OrigSrc);
1021}
1022
1023// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1024// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1025static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1026 std::pair<llvm::Value *, SanitizerMask>>
1027EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1028 QualType DstType, CGBuilderTy &Builder) {
1029 llvm::Type *SrcTy = Src->getType();
1030 llvm::Type *DstTy = Dst->getType();
1031 (void)DstTy; // Only used in assert()
1032
1033 // This should be truncation of integral types.
1034 assert(Src != Dst);
1035 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1036 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1037 "non-integer llvm type");
1038
1039 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1040 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1041
1042 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1043 // Else, it is a signed truncation.
1044 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1045 SanitizerMask Mask;
1046 if (!SrcSigned && !DstSigned) {
1047 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1048 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1049 } else {
1050 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1051 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1052 }
1053
1054 llvm::Value *Check = nullptr;
1055 // 1. Extend the truncated value back to the same width as the Src.
1056 Check = Builder.CreateIntCast(V: Dst, DestTy: SrcTy, isSigned: DstSigned, Name: "anyext");
1057 // 2. Equality-compare with the original source value
1058 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "truncheck");
1059 // If the comparison result is 'i1 false', then the truncation was lossy.
1060 return std::make_pair(x&: Kind, y: std::make_pair(x&: Check, y&: Mask));
1061}
1062
1063static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1064 QualType SrcType, QualType DstType) {
1065 return SrcType->isIntegerType() && DstType->isIntegerType();
1066}
1067
1068void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1069 Value *Dst, QualType DstType,
1070 SourceLocation Loc) {
1071 if (!CGF.SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation))
1072 return;
1073
1074 // We only care about int->int conversions here.
1075 // We ignore conversions to/from pointer and/or bool.
1076 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1077 DstType))
1078 return;
1079
1080 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1081 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1082 // This must be truncation. Else we do not care.
1083 if (SrcBits <= DstBits)
1084 return;
1085
1086 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1087
1088 // If the integer sign change sanitizer is enabled,
1089 // and we are truncating from larger unsigned type to smaller signed type,
1090 // let that next sanitizer deal with it.
1091 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1092 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1093 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange) &&
1094 (!SrcSigned && DstSigned))
1095 return;
1096
1097 CodeGenFunction::SanitizerScope SanScope(&CGF);
1098
1099 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1100 std::pair<llvm::Value *, SanitizerMask>>
1101 Check =
1102 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1103 // If the comparison result is 'i1 false', then the truncation was lossy.
1104
1105 // Do we care about this type of truncation?
1106 if (!CGF.SanOpts.has(K: Check.second.second))
1107 return;
1108
1109 llvm::Constant *StaticArgs[] = {
1110 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1111 CGF.EmitCheckTypeDescriptor(T: DstType),
1112 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Check.first),
1113 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1114
1115 CGF.EmitCheck(Checked: Check.second, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1116 DynamicArgs: {Src, Dst});
1117}
1118
1119static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1120 const char *Name,
1121 CGBuilderTy &Builder) {
1122 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1123 llvm::Type *VTy = V->getType();
1124 if (!VSigned) {
1125 // If the value is unsigned, then it is never negative.
1126 return llvm::ConstantInt::getFalse(Context&: VTy->getContext());
1127 }
1128 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: VTy, V: 0);
1129 return Builder.CreateICmp(P: llvm::ICmpInst::ICMP_SLT, LHS: V, RHS: Zero,
1130 Name: llvm::Twine(Name) + "." + V->getName() +
1131 ".negativitycheck");
1132}
1133
1134// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1135// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1136static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1137 std::pair<llvm::Value *, SanitizerMask>>
1138EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1139 QualType DstType, CGBuilderTy &Builder) {
1140 llvm::Type *SrcTy = Src->getType();
1141 llvm::Type *DstTy = Dst->getType();
1142
1143 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1144 "non-integer llvm type");
1145
1146 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1147 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1148 (void)SrcSigned; // Only used in assert()
1149 (void)DstSigned; // Only used in assert()
1150 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1151 unsigned DstBits = DstTy->getScalarSizeInBits();
1152 (void)SrcBits; // Only used in assert()
1153 (void)DstBits; // Only used in assert()
1154
1155 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1156 "either the widths should be different, or the signednesses.");
1157
1158 // 1. Was the old Value negative?
1159 llvm::Value *SrcIsNegative =
1160 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "src", Builder);
1161 // 2. Is the new Value negative?
1162 llvm::Value *DstIsNegative =
1163 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "dst", Builder);
1164 // 3. Now, was the 'negativity status' preserved during the conversion?
1165 // NOTE: conversion from negative to zero is considered to change the sign.
1166 // (We want to get 'false' when the conversion changed the sign)
1167 // So we should just equality-compare the negativity statuses.
1168 llvm::Value *Check = nullptr;
1169 Check = Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "signchangecheck");
1170 // If the comparison result is 'false', then the conversion changed the sign.
1171 return std::make_pair(
1172 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1173 y: std::make_pair(x&: Check, y: SanitizerKind::ImplicitIntegerSignChange));
1174}
1175
1176void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1177 Value *Dst, QualType DstType,
1178 SourceLocation Loc) {
1179 if (!CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange))
1180 return;
1181
1182 llvm::Type *SrcTy = Src->getType();
1183 llvm::Type *DstTy = Dst->getType();
1184
1185 // We only care about int->int conversions here.
1186 // We ignore conversions to/from pointer and/or bool.
1187 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1188 DstType))
1189 return;
1190
1191 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1192 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1193 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1194 unsigned DstBits = DstTy->getScalarSizeInBits();
1195
1196 // Now, we do not need to emit the check in *all* of the cases.
1197 // We can avoid emitting it in some obvious cases where it would have been
1198 // dropped by the opt passes (instcombine) always anyways.
1199 // If it's a cast between effectively the same type, no check.
1200 // NOTE: this is *not* equivalent to checking the canonical types.
1201 if (SrcSigned == DstSigned && SrcBits == DstBits)
1202 return;
1203 // At least one of the values needs to have signed type.
1204 // If both are unsigned, then obviously, neither of them can be negative.
1205 if (!SrcSigned && !DstSigned)
1206 return;
1207 // If the conversion is to *larger* *signed* type, then no check is needed.
1208 // Because either sign-extension happens (so the sign will remain),
1209 // or zero-extension will happen (the sign bit will be zero.)
1210 if ((DstBits > SrcBits) && DstSigned)
1211 return;
1212 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1213 (SrcBits > DstBits) && SrcSigned) {
1214 // If the signed integer truncation sanitizer is enabled,
1215 // and this is a truncation from signed type, then no check is needed.
1216 // Because here sign change check is interchangeable with truncation check.
1217 return;
1218 }
1219 // That's it. We can't rule out any more cases with the data we have.
1220
1221 CodeGenFunction::SanitizerScope SanScope(&CGF);
1222
1223 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1224 std::pair<llvm::Value *, SanitizerMask>>
1225 Check;
1226
1227 // Each of these checks needs to return 'false' when an issue was detected.
1228 ImplicitConversionCheckKind CheckKind;
1229 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1230 // So we can 'and' all the checks together, and still get 'false',
1231 // if at least one of the checks detected an issue.
1232
1233 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1234 CheckKind = Check.first;
1235 Checks.emplace_back(Args&: Check.second);
1236
1237 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1238 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1239 // If the signed integer truncation sanitizer was enabled,
1240 // and we are truncating from larger unsigned type to smaller signed type,
1241 // let's handle the case we skipped in that check.
1242 Check =
1243 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1244 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1245 Checks.emplace_back(Args&: Check.second);
1246 // If the comparison result is 'i1 false', then the truncation was lossy.
1247 }
1248
1249 llvm::Constant *StaticArgs[] = {
1250 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1251 CGF.EmitCheckTypeDescriptor(T: DstType),
1252 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1253 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1254 // EmitCheck() will 'and' all the checks together.
1255 CGF.EmitCheck(Checked: Checks, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1256 DynamicArgs: {Src, Dst});
1257}
1258
1259// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1260// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1261static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1262 std::pair<llvm::Value *, SanitizerMask>>
1263EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1264 QualType DstType, CGBuilderTy &Builder) {
1265 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1266 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1267
1268 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1269 if (!SrcSigned && !DstSigned)
1270 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1271 else
1272 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1273
1274 llvm::Value *Check = nullptr;
1275 // 1. Extend the truncated value back to the same width as the Src.
1276 Check = Builder.CreateIntCast(V: Dst, DestTy: Src->getType(), isSigned: DstSigned, Name: "bf.anyext");
1277 // 2. Equality-compare with the original source value
1278 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "bf.truncheck");
1279 // If the comparison result is 'i1 false', then the truncation was lossy.
1280
1281 return std::make_pair(
1282 x&: Kind, y: std::make_pair(x&: Check, y: SanitizerKind::ImplicitBitfieldConversion));
1283}
1284
1285// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1286// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1287static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1288 std::pair<llvm::Value *, SanitizerMask>>
1289EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1290 QualType DstType, CGBuilderTy &Builder) {
1291 // 1. Was the old Value negative?
1292 llvm::Value *SrcIsNegative =
1293 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "bf.src", Builder);
1294 // 2. Is the new Value negative?
1295 llvm::Value *DstIsNegative =
1296 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "bf.dst", Builder);
1297 // 3. Now, was the 'negativity status' preserved during the conversion?
1298 // NOTE: conversion from negative to zero is considered to change the sign.
1299 // (We want to get 'false' when the conversion changed the sign)
1300 // So we should just equality-compare the negativity statuses.
1301 llvm::Value *Check = nullptr;
1302 Check =
1303 Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "bf.signchangecheck");
1304 // If the comparison result is 'false', then the conversion changed the sign.
1305 return std::make_pair(
1306 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1307 y: std::make_pair(x&: Check, y: SanitizerKind::ImplicitBitfieldConversion));
1308}
1309
1310void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1311 Value *Dst, QualType DstType,
1312 const CGBitFieldInfo &Info,
1313 SourceLocation Loc) {
1314
1315 if (!SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion))
1316 return;
1317
1318 // We only care about int->int conversions here.
1319 // We ignore conversions to/from pointer and/or bool.
1320 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1321 DstType))
1322 return;
1323
1324 if (DstType->isBooleanType() || SrcType->isBooleanType())
1325 return;
1326
1327 // This should be truncation of integral types.
1328 assert(isa<llvm::IntegerType>(Src->getType()) &&
1329 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1330
1331 // TODO: Calculate src width to avoid emitting code
1332 // for unecessary cases.
1333 unsigned SrcBits = ConvertType(T: SrcType)->getScalarSizeInBits();
1334 unsigned DstBits = Info.Size;
1335
1336 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1337 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1338
1339 CodeGenFunction::SanitizerScope SanScope(this);
1340
1341 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1342 std::pair<llvm::Value *, SanitizerMask>>
1343 Check;
1344
1345 // Truncation
1346 bool EmitTruncation = DstBits < SrcBits;
1347 // If Dst is signed and Src unsigned, we want to be more specific
1348 // about the CheckKind we emit, in this case we want to emit
1349 // ICCK_SignedIntegerTruncationOrSignChange.
1350 bool EmitTruncationFromUnsignedToSigned =
1351 EmitTruncation && DstSigned && !SrcSigned;
1352 // Sign change
1353 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1354 bool BothUnsigned = !SrcSigned && !DstSigned;
1355 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1356 // We can avoid emitting sign change checks in some obvious cases
1357 // 1. If Src and Dst have the same signedness and size
1358 // 2. If both are unsigned sign check is unecessary!
1359 // 3. If Dst is signed and bigger than Src, either
1360 // sign-extension or zero-extension will make sure
1361 // the sign remains.
1362 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1363
1364 if (EmitTruncation)
1365 Check =
1366 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1367 else if (EmitSignChange) {
1368 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1369 "either the widths should be different, or the signednesses.");
1370 Check =
1371 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1372 } else
1373 return;
1374
1375 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1376 if (EmitTruncationFromUnsignedToSigned)
1377 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1378
1379 llvm::Constant *StaticArgs[] = {
1380 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: SrcType),
1381 EmitCheckTypeDescriptor(T: DstType),
1382 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1383 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Info.Size)};
1384
1385 EmitCheck(Checked: Check.second, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1386 DynamicArgs: {Src, Dst});
1387}
1388
1389Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1390 QualType DstType, llvm::Type *SrcTy,
1391 llvm::Type *DstTy,
1392 ScalarConversionOpts Opts) {
1393 // The Element types determine the type of cast to perform.
1394 llvm::Type *SrcElementTy;
1395 llvm::Type *DstElementTy;
1396 QualType SrcElementType;
1397 QualType DstElementType;
1398 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1399 SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1400 DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1401 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1402 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1403 } else {
1404 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1405 "cannot cast between matrix and non-matrix types");
1406 SrcElementTy = SrcTy;
1407 DstElementTy = DstTy;
1408 SrcElementType = SrcType;
1409 DstElementType = DstType;
1410 }
1411
1412 if (isa<llvm::IntegerType>(Val: SrcElementTy)) {
1413 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1414 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1415 InputSigned = true;
1416 }
1417
1418 if (isa<llvm::IntegerType>(Val: DstElementTy))
1419 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1420 if (InputSigned)
1421 return Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1422 return Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1423 }
1424
1425 if (isa<llvm::IntegerType>(Val: DstElementTy)) {
1426 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1427 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1428
1429 // If we can't recognize overflow as undefined behavior, assume that
1430 // overflow saturates. This protects against normal optimizations if we are
1431 // compiling with non-standard FP semantics.
1432 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1433 llvm::Intrinsic::ID IID =
1434 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1435 return Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID, Tys: {DstTy, SrcTy}), Args: Src);
1436 }
1437
1438 if (IsSigned)
1439 return Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1440 return Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1441 }
1442
1443 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1444 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1445 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1446}
1447
1448/// Emit a conversion from the specified type to the specified destination type,
1449/// both of which are LLVM scalar types.
1450Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1451 QualType DstType,
1452 SourceLocation Loc,
1453 ScalarConversionOpts Opts) {
1454 // All conversions involving fixed point types should be handled by the
1455 // EmitFixedPoint family functions. This is done to prevent bloating up this
1456 // function more, and although fixed point numbers are represented by
1457 // integers, we do not want to follow any logic that assumes they should be
1458 // treated as integers.
1459 // TODO(leonardchan): When necessary, add another if statement checking for
1460 // conversions to fixed point types from other types.
1461 if (SrcType->isFixedPointType()) {
1462 if (DstType->isBooleanType())
1463 // It is important that we check this before checking if the dest type is
1464 // an integer because booleans are technically integer types.
1465 // We do not need to check the padding bit on unsigned types if unsigned
1466 // padding is enabled because overflow into this bit is undefined
1467 // behavior.
1468 return Builder.CreateIsNotNull(Arg: Src, Name: "tobool");
1469 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1470 DstType->isRealFloatingType())
1471 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1472
1473 llvm_unreachable(
1474 "Unhandled scalar conversion from a fixed point type to another type.");
1475 } else if (DstType->isFixedPointType()) {
1476 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1477 // This also includes converting booleans and enums to fixed point types.
1478 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1479
1480 llvm_unreachable(
1481 "Unhandled scalar conversion to a fixed point type from another type.");
1482 }
1483
1484 QualType NoncanonicalSrcType = SrcType;
1485 QualType NoncanonicalDstType = DstType;
1486
1487 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1488 DstType = CGF.getContext().getCanonicalType(T: DstType);
1489 if (SrcType == DstType) return Src;
1490
1491 if (DstType->isVoidType()) return nullptr;
1492
1493 llvm::Value *OrigSrc = Src;
1494 QualType OrigSrcType = SrcType;
1495 llvm::Type *SrcTy = Src->getType();
1496
1497 // Handle conversions to bool first, they are special: comparisons against 0.
1498 if (DstType->isBooleanType())
1499 return EmitConversionToBool(Src, SrcType);
1500
1501 llvm::Type *DstTy = ConvertType(T: DstType);
1502
1503 // Cast from half through float if half isn't a native type.
1504 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1505 // Cast to FP using the intrinsic if the half type itself isn't supported.
1506 if (DstTy->isFloatingPointTy()) {
1507 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1508 return Builder.CreateCall(
1509 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1510 Src);
1511 } else {
1512 // Cast to other types through float, using either the intrinsic or FPExt,
1513 // depending on whether the half type itself is supported
1514 // (as opposed to operations on half, available with NativeHalfType).
1515 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1516 Src = Builder.CreateCall(
1517 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1518 CGF.CGM.FloatTy),
1519 Src);
1520 } else {
1521 Src = Builder.CreateFPExt(V: Src, DestTy: CGF.CGM.FloatTy, Name: "conv");
1522 }
1523 SrcType = CGF.getContext().FloatTy;
1524 SrcTy = CGF.FloatTy;
1525 }
1526 }
1527
1528 // Ignore conversions like int -> uint.
1529 if (SrcTy == DstTy) {
1530 if (Opts.EmitImplicitIntegerSignChangeChecks)
1531 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Src,
1532 DstType: NoncanonicalDstType, Loc);
1533
1534 return Src;
1535 }
1536
1537 // Handle pointer conversions next: pointers can only be converted to/from
1538 // other pointers and integers. Check for pointer types in terms of LLVM, as
1539 // some native types (like Obj-C id) may map to a pointer type.
1540 if (auto DstPT = dyn_cast<llvm::PointerType>(Val: DstTy)) {
1541 // The source value may be an integer, or a pointer.
1542 if (isa<llvm::PointerType>(Val: SrcTy))
1543 return Src;
1544
1545 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1546 // First, convert to the correct width so that we control the kind of
1547 // extension.
1548 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1549 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1550 llvm::Value* IntResult =
1551 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
1552 // Then, cast to pointer.
1553 return Builder.CreateIntToPtr(V: IntResult, DestTy: DstTy, Name: "conv");
1554 }
1555
1556 if (isa<llvm::PointerType>(Val: SrcTy)) {
1557 // Must be an ptr to int cast.
1558 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1559 return Builder.CreatePtrToInt(V: Src, DestTy: DstTy, Name: "conv");
1560 }
1561
1562 // A scalar can be splatted to an extended vector of the same element type
1563 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1564 // Sema should add casts to make sure that the source expression's type is
1565 // the same as the vector's element type (sans qualifiers)
1566 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1567 SrcType.getTypePtr() &&
1568 "Splatted expr doesn't match with vector element type?");
1569
1570 // Splat the element across to all elements
1571 unsigned NumElements = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements();
1572 return Builder.CreateVectorSplat(NumElts: NumElements, V: Src, Name: "splat");
1573 }
1574
1575 if (SrcType->isMatrixType() && DstType->isMatrixType())
1576 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1577
1578 if (isa<llvm::VectorType>(Val: SrcTy) || isa<llvm::VectorType>(Val: DstTy)) {
1579 // Allow bitcast from vector to integer/fp of the same size.
1580 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1581 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1582 if (SrcSize == DstSize)
1583 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1584
1585 // Conversions between vectors of different sizes are not allowed except
1586 // when vectors of half are involved. Operations on storage-only half
1587 // vectors require promoting half vector operands to float vectors and
1588 // truncating the result, which is either an int or float vector, to a
1589 // short or half vector.
1590
1591 // Source and destination are both expected to be vectors.
1592 llvm::Type *SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1593 llvm::Type *DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1594 (void)DstElementTy;
1595
1596 assert(((SrcElementTy->isIntegerTy() &&
1597 DstElementTy->isIntegerTy()) ||
1598 (SrcElementTy->isFloatingPointTy() &&
1599 DstElementTy->isFloatingPointTy())) &&
1600 "unexpected conversion between a floating-point vector and an "
1601 "integer vector");
1602
1603 // Truncate an i32 vector to an i16 vector.
1604 if (SrcElementTy->isIntegerTy())
1605 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: false, Name: "conv");
1606
1607 // Truncate a float vector to a half vector.
1608 if (SrcSize > DstSize)
1609 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1610
1611 // Promote a half vector to a float vector.
1612 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1613 }
1614
1615 // Finally, we have the arithmetic types: real int/float.
1616 Value *Res = nullptr;
1617 llvm::Type *ResTy = DstTy;
1618
1619 // An overflowing conversion has undefined behavior if either the source type
1620 // or the destination type is a floating-point type. However, we consider the
1621 // range of representable values for all floating-point types to be
1622 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1623 // floating-point type.
1624 if (CGF.SanOpts.has(K: SanitizerKind::FloatCastOverflow) &&
1625 OrigSrcType->isFloatingType())
1626 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1627 Loc);
1628
1629 // Cast to half through float if half isn't a native type.
1630 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1631 // Make sure we cast in a single step if from another FP type.
1632 if (SrcTy->isFloatingPointTy()) {
1633 // Use the intrinsic if the half type itself isn't supported
1634 // (as opposed to operations on half, available with NativeHalfType).
1635 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1636 return Builder.CreateCall(
1637 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1638 // If the half type is supported, just use an fptrunc.
1639 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy);
1640 }
1641 DstTy = CGF.FloatTy;
1642 }
1643
1644 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1645
1646 if (DstTy != ResTy) {
1647 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1648 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1649 Res = Builder.CreateCall(
1650 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1651 Res);
1652 } else {
1653 Res = Builder.CreateFPTrunc(V: Res, DestTy: ResTy, Name: "conv");
1654 }
1655 }
1656
1657 if (Opts.EmitImplicitIntegerTruncationChecks)
1658 EmitIntegerTruncationCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1659 DstType: NoncanonicalDstType, Loc);
1660
1661 if (Opts.EmitImplicitIntegerSignChangeChecks)
1662 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1663 DstType: NoncanonicalDstType, Loc);
1664
1665 return Res;
1666}
1667
1668Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1669 QualType DstTy,
1670 SourceLocation Loc) {
1671 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1672 llvm::Value *Result;
1673 if (SrcTy->isRealFloatingType())
1674 Result = FPBuilder.CreateFloatingToFixed(Src,
1675 DstSema: CGF.getContext().getFixedPointSemantics(Ty: DstTy));
1676 else if (DstTy->isRealFloatingType())
1677 Result = FPBuilder.CreateFixedToFloating(Src,
1678 SrcSema: CGF.getContext().getFixedPointSemantics(Ty: SrcTy),
1679 DstTy: ConvertType(T: DstTy));
1680 else {
1681 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(Ty: SrcTy);
1682 auto DstFPSema = CGF.getContext().getFixedPointSemantics(Ty: DstTy);
1683
1684 if (DstTy->isIntegerType())
1685 Result = FPBuilder.CreateFixedToInteger(Src, SrcSema: SrcFPSema,
1686 DstWidth: DstFPSema.getWidth(),
1687 DstIsSigned: DstFPSema.isSigned());
1688 else if (SrcTy->isIntegerType())
1689 Result = FPBuilder.CreateIntegerToFixed(Src, SrcIsSigned: SrcFPSema.isSigned(),
1690 DstSema: DstFPSema);
1691 else
1692 Result = FPBuilder.CreateFixedToFixed(Src, SrcSema: SrcFPSema, DstSema: DstFPSema);
1693 }
1694 return Result;
1695}
1696
1697/// Emit a conversion from the specified complex type to the specified
1698/// destination type, where the destination type is an LLVM scalar type.
1699Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1700 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1701 SourceLocation Loc) {
1702 // Get the source element type.
1703 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1704
1705 // Handle conversions to bool first, they are special: comparisons against 0.
1706 if (DstTy->isBooleanType()) {
1707 // Complex != 0 -> (Real != 0) | (Imag != 0)
1708 Src.first = EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1709 Src.second = EmitScalarConversion(Src: Src.second, SrcType: SrcTy, DstType: DstTy, Loc);
1710 return Builder.CreateOr(LHS: Src.first, RHS: Src.second, Name: "tobool");
1711 }
1712
1713 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1714 // the imaginary part of the complex value is discarded and the value of the
1715 // real part is converted according to the conversion rules for the
1716 // corresponding real type.
1717 return EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1718}
1719
1720Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1721 return CGF.EmitFromMemory(Value: CGF.CGM.EmitNullConstant(T: Ty), Ty);
1722}
1723
1724/// Emit a sanitization check for the given "binary" operation (which
1725/// might actually be a unary increment which has been lowered to a binary
1726/// operation). The check passes if all values in \p Checks (which are \c i1),
1727/// are \c true.
1728void ScalarExprEmitter::EmitBinOpCheck(
1729 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1730 assert(CGF.IsSanitizerScope);
1731 SanitizerHandler Check;
1732 SmallVector<llvm::Constant *, 4> StaticData;
1733 SmallVector<llvm::Value *, 2> DynamicData;
1734
1735 BinaryOperatorKind Opcode = Info.Opcode;
1736 if (BinaryOperator::isCompoundAssignmentOp(Opc: Opcode))
1737 Opcode = BinaryOperator::getOpForCompoundAssignment(Opc: Opcode);
1738
1739 StaticData.push_back(Elt: CGF.EmitCheckSourceLocation(Loc: Info.E->getExprLoc()));
1740 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Info.E);
1741 if (UO && UO->getOpcode() == UO_Minus) {
1742 Check = SanitizerHandler::NegateOverflow;
1743 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: UO->getType()));
1744 DynamicData.push_back(Elt: Info.RHS);
1745 } else {
1746 if (BinaryOperator::isShiftOp(Opc: Opcode)) {
1747 // Shift LHS negative or too large, or RHS out of bounds.
1748 Check = SanitizerHandler::ShiftOutOfBounds;
1749 const BinaryOperator *BO = cast<BinaryOperator>(Val: Info.E);
1750 StaticData.push_back(
1751 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getLHS()->getType()));
1752 StaticData.push_back(
1753 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getRHS()->getType()));
1754 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1755 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1756 Check = SanitizerHandler::DivremOverflow;
1757 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1758 } else {
1759 // Arithmetic overflow (+, -, *).
1760 switch (Opcode) {
1761 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1762 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1763 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1764 default: llvm_unreachable("unexpected opcode for bin op check");
1765 }
1766 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1767 }
1768 DynamicData.push_back(Elt: Info.LHS);
1769 DynamicData.push_back(Elt: Info.RHS);
1770 }
1771
1772 CGF.EmitCheck(Checked: Checks, Check, StaticArgs: StaticData, DynamicArgs: DynamicData);
1773}
1774
1775//===----------------------------------------------------------------------===//
1776// Visitor Methods
1777//===----------------------------------------------------------------------===//
1778
1779Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1780 CGF.ErrorUnsupported(E, "scalar expression");
1781 if (E->getType()->isVoidType())
1782 return nullptr;
1783 return llvm::UndefValue::get(T: CGF.ConvertType(T: E->getType()));
1784}
1785
1786Value *
1787ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1788 ASTContext &Context = CGF.getContext();
1789 unsigned AddrSpace =
1790 Context.getTargetAddressSpace(AS: CGF.CGM.GetGlobalConstantAddressSpace());
1791 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1792 Str: E->ComputeName(Context), Name: "__usn_str", AddressSpace: AddrSpace);
1793
1794 llvm::Type *ExprTy = ConvertType(T: E->getType());
1795 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: GlobalConstStr, DestTy: ExprTy,
1796 Name: "usn_addr_cast");
1797}
1798
1799Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1800 // Vector Mask Case
1801 if (E->getNumSubExprs() == 2) {
1802 Value *LHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1803 Value *RHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1804 Value *Mask;
1805
1806 auto *LTy = cast<llvm::FixedVectorType>(Val: LHS->getType());
1807 unsigned LHSElts = LTy->getNumElements();
1808
1809 Mask = RHS;
1810
1811 auto *MTy = cast<llvm::FixedVectorType>(Val: Mask->getType());
1812
1813 // Mask off the high bits of each shuffle index.
1814 Value *MaskBits =
1815 llvm::ConstantInt::get(Ty: MTy, V: llvm::NextPowerOf2(A: LHSElts - 1) - 1);
1816 Mask = Builder.CreateAnd(LHS: Mask, RHS: MaskBits, Name: "mask");
1817
1818 // newv = undef
1819 // mask = mask & maskbits
1820 // for each elt
1821 // n = extract mask i
1822 // x = extract val n
1823 // newv = insert newv, x, i
1824 auto *RTy = llvm::FixedVectorType::get(ElementType: LTy->getElementType(),
1825 NumElts: MTy->getNumElements());
1826 Value* NewV = llvm::PoisonValue::get(T: RTy);
1827 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1828 Value *IIndx = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: i);
1829 Value *Indx = Builder.CreateExtractElement(Vec: Mask, Idx: IIndx, Name: "shuf_idx");
1830
1831 Value *VExt = Builder.CreateExtractElement(Vec: LHS, Idx: Indx, Name: "shuf_elt");
1832 NewV = Builder.CreateInsertElement(Vec: NewV, NewElt: VExt, Idx: IIndx, Name: "shuf_ins");
1833 }
1834 return NewV;
1835 }
1836
1837 Value* V1 = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1838 Value* V2 = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1839
1840 SmallVector<int, 32> Indices;
1841 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1842 llvm::APSInt Idx = E->getShuffleMaskIdx(Ctx: CGF.getContext(), N: i-2);
1843 // Check for -1 and output it as undef in the IR.
1844 if (Idx.isSigned() && Idx.isAllOnes())
1845 Indices.push_back(Elt: -1);
1846 else
1847 Indices.push_back(Elt: Idx.getZExtValue());
1848 }
1849
1850 return Builder.CreateShuffleVector(V1, V2, Mask: Indices, Name: "shuffle");
1851}
1852
1853Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1854 QualType SrcType = E->getSrcExpr()->getType(),
1855 DstType = E->getType();
1856
1857 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
1858
1859 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1860 DstType = CGF.getContext().getCanonicalType(T: DstType);
1861 if (SrcType == DstType) return Src;
1862
1863 assert(SrcType->isVectorType() &&
1864 "ConvertVector source type must be a vector");
1865 assert(DstType->isVectorType() &&
1866 "ConvertVector destination type must be a vector");
1867
1868 llvm::Type *SrcTy = Src->getType();
1869 llvm::Type *DstTy = ConvertType(T: DstType);
1870
1871 // Ignore conversions like int -> uint.
1872 if (SrcTy == DstTy)
1873 return Src;
1874
1875 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1876 DstEltType = DstType->castAs<VectorType>()->getElementType();
1877
1878 assert(SrcTy->isVectorTy() &&
1879 "ConvertVector source IR type must be a vector");
1880 assert(DstTy->isVectorTy() &&
1881 "ConvertVector destination IR type must be a vector");
1882
1883 llvm::Type *SrcEltTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType(),
1884 *DstEltTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1885
1886 if (DstEltType->isBooleanType()) {
1887 assert((SrcEltTy->isFloatingPointTy() ||
1888 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1889
1890 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: SrcTy);
1891 if (SrcEltTy->isFloatingPointTy()) {
1892 return Builder.CreateFCmpUNE(LHS: Src, RHS: Zero, Name: "tobool");
1893 } else {
1894 return Builder.CreateICmpNE(LHS: Src, RHS: Zero, Name: "tobool");
1895 }
1896 }
1897
1898 // We have the arithmetic types: real int/float.
1899 Value *Res = nullptr;
1900
1901 if (isa<llvm::IntegerType>(Val: SrcEltTy)) {
1902 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1903 if (isa<llvm::IntegerType>(Val: DstEltTy))
1904 Res = Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1905 else if (InputSigned)
1906 Res = Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1907 else
1908 Res = Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1909 } else if (isa<llvm::IntegerType>(Val: DstEltTy)) {
1910 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1911 if (DstEltType->isSignedIntegerOrEnumerationType())
1912 Res = Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1913 else
1914 Res = Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1915 } else {
1916 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1917 "Unknown real conversion");
1918 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1919 Res = Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1920 else
1921 Res = Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1922 }
1923
1924 return Res;
1925}
1926
1927Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1928 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(ME: E)) {
1929 CGF.EmitIgnoredExpr(E: E->getBase());
1930 return CGF.emitScalarConstant(Constant, E);
1931 } else {
1932 Expr::EvalResult Result;
1933 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1934 llvm::APSInt Value = Result.Val.getInt();
1935 CGF.EmitIgnoredExpr(E: E->getBase());
1936 return Builder.getInt(AI: Value);
1937 }
1938 }
1939
1940 return EmitLoadOfLValue(E);
1941}
1942
1943Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1944 TestAndClearIgnoreResultAssign();
1945
1946 // Emit subscript expressions in rvalue context's. For most cases, this just
1947 // loads the lvalue formed by the subscript expr. However, we have to be
1948 // careful, because the base of a vector subscript is occasionally an rvalue,
1949 // so we can't get it as an lvalue.
1950 if (!E->getBase()->getType()->isVectorType() &&
1951 !E->getBase()->getType()->isSveVLSBuiltinType())
1952 return EmitLoadOfLValue(E);
1953
1954 // Handle the vector case. The base must be a vector, the index must be an
1955 // integer value.
1956 Value *Base = Visit(E: E->getBase());
1957 Value *Idx = Visit(E: E->getIdx());
1958 QualType IdxTy = E->getIdx()->getType();
1959
1960 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
1961 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1962
1963 return Builder.CreateExtractElement(Vec: Base, Idx, Name: "vecext");
1964}
1965
1966Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1967 TestAndClearIgnoreResultAssign();
1968
1969 // Handle the vector case. The base must be a vector, the index must be an
1970 // integer value.
1971 Value *RowIdx = Visit(E: E->getRowIdx());
1972 Value *ColumnIdx = Visit(E: E->getColumnIdx());
1973
1974 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
1975 unsigned NumRows = MatrixTy->getNumRows();
1976 llvm::MatrixBuilder MB(Builder);
1977 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
1978 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
1979 MB.CreateIndexAssumption(Idx, NumElements: MatrixTy->getNumElementsFlattened());
1980
1981 Value *Matrix = Visit(E: E->getBase());
1982
1983 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1984 return Builder.CreateExtractElement(Vec: Matrix, Idx, Name: "matrixext");
1985}
1986
1987static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1988 unsigned Off) {
1989 int MV = SVI->getMaskValue(Elt: Idx);
1990 if (MV == -1)
1991 return -1;
1992 return Off + MV;
1993}
1994
1995static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1996 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1997 "Index operand too large for shufflevector mask!");
1998 return C->getZExtValue();
1999}
2000
2001Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2002 bool Ignore = TestAndClearIgnoreResultAssign();
2003 (void)Ignore;
2004 assert (Ignore == false && "init list ignored");
2005 unsigned NumInitElements = E->getNumInits();
2006
2007 if (E->hadArrayRangeDesignator())
2008 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2009
2010 llvm::VectorType *VType =
2011 dyn_cast<llvm::VectorType>(ConvertType(T: E->getType()));
2012
2013 if (!VType) {
2014 if (NumInitElements == 0) {
2015 // C++11 value-initialization for the scalar.
2016 return EmitNullValue(Ty: E->getType());
2017 }
2018 // We have a scalar in braces. Just use the first element.
2019 return Visit(E: E->getInit(Init: 0));
2020 }
2021
2022 if (isa<llvm::ScalableVectorType>(Val: VType)) {
2023 if (NumInitElements == 0) {
2024 // C++11 value-initialization for the vector.
2025 return EmitNullValue(Ty: E->getType());
2026 }
2027
2028 if (NumInitElements == 1) {
2029 Expr *InitVector = E->getInit(Init: 0);
2030
2031 // Initialize from another scalable vector of the same type.
2032 if (InitVector->getType() == E->getType())
2033 return Visit(E: InitVector);
2034 }
2035
2036 llvm_unreachable("Unexpected initialization of a scalable vector!");
2037 }
2038
2039 unsigned ResElts = cast<llvm::FixedVectorType>(Val: VType)->getNumElements();
2040
2041 // Loop over initializers collecting the Value for each, and remembering
2042 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2043 // us to fold the shuffle for the swizzle into the shuffle for the vector
2044 // initializer, since LLVM optimizers generally do not want to touch
2045 // shuffles.
2046 unsigned CurIdx = 0;
2047 bool VIsPoisonShuffle = false;
2048 llvm::Value *V = llvm::PoisonValue::get(T: VType);
2049 for (unsigned i = 0; i != NumInitElements; ++i) {
2050 Expr *IE = E->getInit(Init: i);
2051 Value *Init = Visit(E: IE);
2052 SmallVector<int, 16> Args;
2053
2054 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Val: Init->getType());
2055
2056 // Handle scalar elements. If the scalar initializer is actually one
2057 // element of a different vector of the same width, use shuffle instead of
2058 // extract+insert.
2059 if (!VVT) {
2060 if (isa<ExtVectorElementExpr>(Val: IE)) {
2061 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Val: Init);
2062
2063 if (cast<llvm::FixedVectorType>(Val: EI->getVectorOperandType())
2064 ->getNumElements() == ResElts) {
2065 llvm::ConstantInt *C = cast<llvm::ConstantInt>(Val: EI->getIndexOperand());
2066 Value *LHS = nullptr, *RHS = nullptr;
2067 if (CurIdx == 0) {
2068 // insert into poison -> shuffle (src, poison)
2069 // shufflemask must use an i32
2070 Args.push_back(Elt: getAsInt32(C, I32Ty: CGF.Int32Ty));
2071 Args.resize(N: ResElts, NV: -1);
2072
2073 LHS = EI->getVectorOperand();
2074 RHS = V;
2075 VIsPoisonShuffle = true;
2076 } else if (VIsPoisonShuffle) {
2077 // insert into poison shuffle && size match -> shuffle (v, src)
2078 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(Val: V);
2079 for (unsigned j = 0; j != CurIdx; ++j)
2080 Args.push_back(Elt: getMaskElt(SVI: SVV, Idx: j, Off: 0));
2081 Args.push_back(Elt: ResElts + C->getZExtValue());
2082 Args.resize(N: ResElts, NV: -1);
2083
2084 LHS = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2085 RHS = EI->getVectorOperand();
2086 VIsPoisonShuffle = false;
2087 }
2088 if (!Args.empty()) {
2089 V = Builder.CreateShuffleVector(V1: LHS, V2: RHS, Mask: Args);
2090 ++CurIdx;
2091 continue;
2092 }
2093 }
2094 }
2095 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx: Builder.getInt32(C: CurIdx),
2096 Name: "vecinit");
2097 VIsPoisonShuffle = false;
2098 ++CurIdx;
2099 continue;
2100 }
2101
2102 unsigned InitElts = cast<llvm::FixedVectorType>(Val: VVT)->getNumElements();
2103
2104 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2105 // input is the same width as the vector being constructed, generate an
2106 // optimized shuffle of the swizzle input into the result.
2107 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2108 if (isa<ExtVectorElementExpr>(Val: IE)) {
2109 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Val: Init);
2110 Value *SVOp = SVI->getOperand(i_nocapture: 0);
2111 auto *OpTy = cast<llvm::FixedVectorType>(Val: SVOp->getType());
2112
2113 if (OpTy->getNumElements() == ResElts) {
2114 for (unsigned j = 0; j != CurIdx; ++j) {
2115 // If the current vector initializer is a shuffle with poison, merge
2116 // this shuffle directly into it.
2117 if (VIsPoisonShuffle) {
2118 Args.push_back(Elt: getMaskElt(SVI: cast<llvm::ShuffleVectorInst>(Val: V), Idx: j, Off: 0));
2119 } else {
2120 Args.push_back(Elt: j);
2121 }
2122 }
2123 for (unsigned j = 0, je = InitElts; j != je; ++j)
2124 Args.push_back(Elt: getMaskElt(SVI, Idx: j, Off: Offset));
2125 Args.resize(N: ResElts, NV: -1);
2126
2127 if (VIsPoisonShuffle)
2128 V = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2129
2130 Init = SVOp;
2131 }
2132 }
2133
2134 // Extend init to result vector length, and then shuffle its contribution
2135 // to the vector initializer into V.
2136 if (Args.empty()) {
2137 for (unsigned j = 0; j != InitElts; ++j)
2138 Args.push_back(Elt: j);
2139 Args.resize(N: ResElts, NV: -1);
2140 Init = Builder.CreateShuffleVector(V: Init, Mask: Args, Name: "vext");
2141
2142 Args.clear();
2143 for (unsigned j = 0; j != CurIdx; ++j)
2144 Args.push_back(Elt: j);
2145 for (unsigned j = 0; j != InitElts; ++j)
2146 Args.push_back(Elt: j + Offset);
2147 Args.resize(N: ResElts, NV: -1);
2148 }
2149
2150 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2151 // merging subsequent shuffles into this one.
2152 if (CurIdx == 0)
2153 std::swap(a&: V, b&: Init);
2154 V = Builder.CreateShuffleVector(V1: V, V2: Init, Mask: Args, Name: "vecinit");
2155 VIsPoisonShuffle = isa<llvm::PoisonValue>(Val: Init);
2156 CurIdx += InitElts;
2157 }
2158
2159 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2160 // Emit remaining default initializers.
2161 llvm::Type *EltTy = VType->getElementType();
2162
2163 // Emit remaining default initializers
2164 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2165 Value *Idx = Builder.getInt32(C: CurIdx);
2166 llvm::Value *Init = llvm::Constant::getNullValue(Ty: EltTy);
2167 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx, Name: "vecinit");
2168 }
2169 return V;
2170}
2171
2172bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2173 const Expr *E = CE->getSubExpr();
2174
2175 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2176 return false;
2177
2178 if (isa<CXXThisExpr>(Val: E->IgnoreParens())) {
2179 // We always assume that 'this' is never null.
2180 return false;
2181 }
2182
2183 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2184 // And that glvalue casts are never null.
2185 if (ICE->isGLValue())
2186 return false;
2187 }
2188
2189 return true;
2190}
2191
2192// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2193// have to handle a more broad range of conversions than explicit casts, as they
2194// handle things like function to ptr-to-function decay etc.
2195Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2196 Expr *E = CE->getSubExpr();
2197 QualType DestTy = CE->getType();
2198 CastKind Kind = CE->getCastKind();
2199 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2200
2201 // These cases are generally not written to ignore the result of
2202 // evaluating their sub-expressions, so we clear this now.
2203 bool Ignored = TestAndClearIgnoreResultAssign();
2204
2205 // Since almost all cast kinds apply to scalars, this switch doesn't have
2206 // a default case, so the compiler will warn on a missing case. The cases
2207 // are in the same order as in the CastKind enum.
2208 switch (Kind) {
2209 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2210 case CK_BuiltinFnToFnPtr:
2211 llvm_unreachable("builtin functions are handled elsewhere");
2212
2213 case CK_LValueBitCast:
2214 case CK_ObjCObjectLValueCast: {
2215 Address Addr = EmitLValue(E).getAddress(CGF);
2216 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2217 LValue LV = CGF.MakeAddrLValue(Addr, T: DestTy);
2218 return EmitLoadOfLValue(LV, CE->getExprLoc());
2219 }
2220
2221 case CK_LValueToRValueBitCast: {
2222 LValue SourceLVal = CGF.EmitLValue(E);
2223 Address Addr = SourceLVal.getAddress(CGF).withElementType(
2224 ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2225 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2226 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2227 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2228 }
2229
2230 case CK_CPointerToObjCPointerCast:
2231 case CK_BlockPointerToObjCPointerCast:
2232 case CK_AnyPointerToBlockPointerCast:
2233 case CK_BitCast: {
2234 Value *Src = Visit(E: const_cast<Expr*>(E));
2235 llvm::Type *SrcTy = Src->getType();
2236 llvm::Type *DstTy = ConvertType(T: DestTy);
2237 assert(
2238 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2239 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2240 "Address-space cast must be used to convert address spaces");
2241
2242 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
2243 if (auto *PT = DestTy->getAs<PointerType>()) {
2244 CGF.EmitVTablePtrCheckForCast(
2245 T: PT->getPointeeType(),
2246 Derived: Address(Src,
2247 CGF.ConvertTypeForMem(
2248 T: E->getType()->castAs<PointerType>()->getPointeeType()),
2249 CGF.getPointerAlign()),
2250 /*MayBeNull=*/true, TCK: CodeGenFunction::CFITCK_UnrelatedCast,
2251 Loc: CE->getBeginLoc());
2252 }
2253 }
2254
2255 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2256 const QualType SrcType = E->getType();
2257
2258 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2259 // Casting to pointer that could carry dynamic information (provided by
2260 // invariant.group) requires launder.
2261 Src = Builder.CreateLaunderInvariantGroup(Ptr: Src);
2262 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2263 // Casting to pointer that does not carry dynamic information (provided
2264 // by invariant.group) requires stripping it. Note that we don't do it
2265 // if the source could not be dynamic type and destination could be
2266 // dynamic because dynamic information is already laundered. It is
2267 // because launder(strip(src)) == launder(src), so there is no need to
2268 // add extra strip before launder.
2269 Src = Builder.CreateStripInvariantGroup(Ptr: Src);
2270 }
2271 }
2272
2273 // Update heapallocsite metadata when there is an explicit pointer cast.
2274 if (auto *CI = dyn_cast<llvm::CallBase>(Val: Src)) {
2275 if (CI->getMetadata(Kind: "heapallocsite") && isa<ExplicitCastExpr>(Val: CE) &&
2276 !isa<CastExpr>(Val: E)) {
2277 QualType PointeeType = DestTy->getPointeeType();
2278 if (!PointeeType.isNull())
2279 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: PointeeType,
2280 Loc: CE->getExprLoc());
2281 }
2282 }
2283
2284 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2285 // same element type, use the llvm.vector.insert intrinsic to perform the
2286 // bitcast.
2287 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2288 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2289 // If we are casting a fixed i8 vector to a scalable i1 predicate
2290 // vector, use a vector insert and bitcast the result.
2291 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2292 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
2293 FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2294 ScalableDstTy = llvm::ScalableVectorType::get(
2295 FixedSrcTy->getElementType(),
2296 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
2297 }
2298 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2299 llvm::Value *UndefVec = llvm::UndefValue::get(T: ScalableDstTy);
2300 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty);
2301 llvm::Value *Result = Builder.CreateInsertVector(
2302 DstType: ScalableDstTy, SrcVec: UndefVec, SubVec: Src, Idx: Zero, Name: "cast.scalable");
2303 if (Result->getType() != DstTy)
2304 Result = Builder.CreateBitCast(V: Result, DestTy: DstTy);
2305 return Result;
2306 }
2307 }
2308 }
2309
2310 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2311 // same element type, use the llvm.vector.extract intrinsic to perform the
2312 // bitcast.
2313 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(Val: SrcTy)) {
2314 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2315 // If we are casting a scalable i1 predicate vector to a fixed i8
2316 // vector, bitcast the source and use a vector extract.
2317 if (ScalableSrcTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2318 ScalableSrcTy->getElementCount().isKnownMultipleOf(RHS: 8) &&
2319 FixedDstTy->getElementType()->isIntegerTy(8)) {
2320 ScalableSrcTy = llvm::ScalableVectorType::get(
2321 FixedDstTy->getElementType(),
2322 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2323 Src = Builder.CreateBitCast(V: Src, DestTy: ScalableSrcTy);
2324 }
2325 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
2326 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty);
2327 return Builder.CreateExtractVector(DstType: DstTy, SrcVec: Src, Idx: Zero, Name: "cast.fixed");
2328 }
2329 }
2330 }
2331
2332 // Perform VLAT <-> VLST bitcast through memory.
2333 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2334 // require the element types of the vectors to be the same, we
2335 // need to keep this around for bitcasts between VLAT <-> VLST where
2336 // the element types of the vectors are not the same, until we figure
2337 // out a better way of doing these casts.
2338 if ((isa<llvm::FixedVectorType>(Val: SrcTy) &&
2339 isa<llvm::ScalableVectorType>(Val: DstTy)) ||
2340 (isa<llvm::ScalableVectorType>(Val: SrcTy) &&
2341 isa<llvm::FixedVectorType>(Val: DstTy))) {
2342 Address Addr = CGF.CreateDefaultAlignTempAlloca(Ty: SrcTy, Name: "saved-value");
2343 LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType());
2344 CGF.EmitStoreOfScalar(value: Src, lvalue: LV);
2345 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2346 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2347 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2348 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2349 }
2350 return Builder.CreateBitCast(V: Src, DestTy: DstTy);
2351 }
2352 case CK_AddressSpaceConversion: {
2353 Expr::EvalResult Result;
2354 if (E->EvaluateAsRValue(Result, Ctx: CGF.getContext()) &&
2355 Result.Val.isNullPointer()) {
2356 // If E has side effect, it is emitted even if its final result is a
2357 // null pointer. In that case, a DCE pass should be able to
2358 // eliminate the useless instructions emitted during translating E.
2359 if (Result.HasSideEffects)
2360 Visit(E);
2361 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(
2362 Val: ConvertType(T: DestTy)), QT: DestTy);
2363 }
2364 // Since target may map different address spaces in AST to the same address
2365 // space, an address space conversion may end up as a bitcast.
2366 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2367 CGF, V: Visit(E), SrcAddr: E->getType()->getPointeeType().getAddressSpace(),
2368 DestAddr: DestTy->getPointeeType().getAddressSpace(), DestTy: ConvertType(T: DestTy));
2369 }
2370 case CK_AtomicToNonAtomic:
2371 case CK_NonAtomicToAtomic:
2372 case CK_UserDefinedConversion:
2373 return Visit(E: const_cast<Expr*>(E));
2374
2375 case CK_NoOp: {
2376 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
2377 : Visit(E: const_cast<Expr *>(E));
2378 }
2379
2380 case CK_BaseToDerived: {
2381 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2382 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2383
2384 Address Base = CGF.EmitPointerWithAlignment(Addr: E);
2385 Address Derived =
2386 CGF.GetAddressOfDerivedClass(Value: Base, Derived: DerivedClassDecl,
2387 PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
2388 NullCheckValue: CGF.ShouldNullCheckClassCastValue(CE));
2389
2390 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2391 // performed and the object is not of the derived type.
2392 if (CGF.sanitizePerformTypeCheck())
2393 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2394 Derived, DestTy->getPointeeType());
2395
2396 if (CGF.SanOpts.has(K: SanitizerKind::CFIDerivedCast))
2397 CGF.EmitVTablePtrCheckForCast(T: DestTy->getPointeeType(), Derived,
2398 /*MayBeNull=*/true,
2399 TCK: CodeGenFunction::CFITCK_DerivedCast,
2400 Loc: CE->getBeginLoc());
2401
2402 return CGF.getAsNaturalPointerTo(Addr: Derived, PointeeType: CE->getType()->getPointeeType());
2403 }
2404 case CK_UncheckedDerivedToBase:
2405 case CK_DerivedToBase: {
2406 // The EmitPointerWithAlignment path does this fine; just discard
2407 // the alignment.
2408 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitPointerWithAlignment(CE),
2409 PointeeType: CE->getType()->getPointeeType());
2410 }
2411
2412 case CK_Dynamic: {
2413 Address V = CGF.EmitPointerWithAlignment(Addr: E);
2414 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(Val: CE);
2415 return CGF.EmitDynamicCast(V, DCE);
2416 }
2417
2418 case CK_ArrayToPointerDecay:
2419 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitArrayToPointerDecay(Array: E),
2420 PointeeType: CE->getType()->getPointeeType());
2421 case CK_FunctionToPointerDecay:
2422 return EmitLValue(E).getPointer(CGF);
2423
2424 case CK_NullToPointer:
2425 if (MustVisitNullValue(E))
2426 CGF.EmitIgnoredExpr(E);
2427
2428 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: ConvertType(T: DestTy)),
2429 QT: DestTy);
2430
2431 case CK_NullToMemberPointer: {
2432 if (MustVisitNullValue(E))
2433 CGF.EmitIgnoredExpr(E);
2434
2435 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2436 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2437 }
2438
2439 case CK_ReinterpretMemberPointer:
2440 case CK_BaseToDerivedMemberPointer:
2441 case CK_DerivedToBaseMemberPointer: {
2442 Value *Src = Visit(E);
2443
2444 // Note that the AST doesn't distinguish between checked and
2445 // unchecked member pointer conversions, so we always have to
2446 // implement checked conversions here. This is inefficient when
2447 // actual control flow may be required in order to perform the
2448 // check, which it is for data member pointers (but not member
2449 // function pointers on Itanium and ARM).
2450 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, E: CE, Src);
2451 }
2452
2453 case CK_ARCProduceObject:
2454 return CGF.EmitARCRetainScalarExpr(expr: E);
2455 case CK_ARCConsumeObject:
2456 return CGF.EmitObjCConsumeObject(T: E->getType(), Ptr: Visit(E));
2457 case CK_ARCReclaimReturnedObject:
2458 return CGF.EmitARCReclaimReturnedObject(e: E, /*allowUnsafe*/ allowUnsafeClaim: Ignored);
2459 case CK_ARCExtendBlockObject:
2460 return CGF.EmitARCExtendBlockObject(expr: E);
2461
2462 case CK_CopyAndAutoreleaseBlockObject:
2463 return CGF.EmitBlockCopyAndAutorelease(Block: Visit(E), Ty: E->getType());
2464
2465 case CK_FloatingRealToComplex:
2466 case CK_FloatingComplexCast:
2467 case CK_IntegralRealToComplex:
2468 case CK_IntegralComplexCast:
2469 case CK_IntegralComplexToFloatingComplex:
2470 case CK_FloatingComplexToIntegralComplex:
2471 case CK_ConstructorConversion:
2472 case CK_ToUnion:
2473 case CK_HLSLArrayRValue:
2474 llvm_unreachable("scalar cast to non-scalar value");
2475
2476 case CK_LValueToRValue:
2477 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2478 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2479 return Visit(E: const_cast<Expr*>(E));
2480
2481 case CK_IntegralToPointer: {
2482 Value *Src = Visit(E: const_cast<Expr*>(E));
2483
2484 // First, convert to the correct width so that we control the kind of
2485 // extension.
2486 auto DestLLVMTy = ConvertType(T: DestTy);
2487 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2488 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2489 llvm::Value* IntResult =
2490 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
2491
2492 auto *IntToPtr = Builder.CreateIntToPtr(V: IntResult, DestTy: DestLLVMTy);
2493
2494 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2495 // Going from integer to pointer that could be dynamic requires reloading
2496 // dynamic information from invariant.group.
2497 if (DestTy.mayBeDynamicClass())
2498 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2499 }
2500 return IntToPtr;
2501 }
2502 case CK_PointerToIntegral: {
2503 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2504 auto *PtrExpr = Visit(E);
2505
2506 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2507 const QualType SrcType = E->getType();
2508
2509 // Casting to integer requires stripping dynamic information as it does
2510 // not carries it.
2511 if (SrcType.mayBeDynamicClass())
2512 PtrExpr = Builder.CreateStripInvariantGroup(Ptr: PtrExpr);
2513 }
2514
2515 return Builder.CreatePtrToInt(V: PtrExpr, DestTy: ConvertType(T: DestTy));
2516 }
2517 case CK_ToVoid: {
2518 CGF.EmitIgnoredExpr(E);
2519 return nullptr;
2520 }
2521 case CK_MatrixCast: {
2522 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2523 Loc: CE->getExprLoc());
2524 }
2525 case CK_VectorSplat: {
2526 llvm::Type *DstTy = ConvertType(T: DestTy);
2527 Value *Elt = Visit(E: const_cast<Expr *>(E));
2528 // Splat the element across to all elements
2529 llvm::ElementCount NumElements =
2530 cast<llvm::VectorType>(Val: DstTy)->getElementCount();
2531 return Builder.CreateVectorSplat(EC: NumElements, V: Elt, Name: "splat");
2532 }
2533
2534 case CK_FixedPointCast:
2535 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2536 Loc: CE->getExprLoc());
2537
2538 case CK_FixedPointToBoolean:
2539 assert(E->getType()->isFixedPointType() &&
2540 "Expected src type to be fixed point type");
2541 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2542 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2543 Loc: CE->getExprLoc());
2544
2545 case CK_FixedPointToIntegral:
2546 assert(E->getType()->isFixedPointType() &&
2547 "Expected src type to be fixed point type");
2548 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2549 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2550 Loc: CE->getExprLoc());
2551
2552 case CK_IntegralToFixedPoint:
2553 assert(E->getType()->isIntegerType() &&
2554 "Expected src type to be an integer");
2555 assert(DestTy->isFixedPointType() &&
2556 "Expected dest type to be fixed point type");
2557 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2558 Loc: CE->getExprLoc());
2559
2560 case CK_IntegralCast: {
2561 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2562 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2563 return Builder.CreateIntCast(V: Visit(E), DestTy: ConvertType(T: DestTy),
2564 isSigned: SrcElTy->isSignedIntegerOrEnumerationType(),
2565 Name: "conv");
2566 }
2567 ScalarConversionOpts Opts;
2568 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2569 if (!ICE->isPartOfExplicitCast())
2570 Opts = ScalarConversionOpts(CGF.SanOpts);
2571 }
2572 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2573 Loc: CE->getExprLoc(), Opts);
2574 }
2575 case CK_IntegralToFloating: {
2576 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2577 // TODO: Support constrained FP intrinsics.
2578 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2579 if (SrcElTy->isSignedIntegerOrEnumerationType())
2580 return Builder.CreateSIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2581 return Builder.CreateUIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2582 }
2583 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2584 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2585 Loc: CE->getExprLoc());
2586 }
2587 case CK_FloatingToIntegral: {
2588 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2589 // TODO: Support constrained FP intrinsics.
2590 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2591 if (DstElTy->isSignedIntegerOrEnumerationType())
2592 return Builder.CreateFPToSI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2593 return Builder.CreateFPToUI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2594 }
2595 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2596 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2597 Loc: CE->getExprLoc());
2598 }
2599 case CK_FloatingCast: {
2600 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2601 // TODO: Support constrained FP intrinsics.
2602 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2603 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2604 if (DstElTy->castAs<BuiltinType>()->getKind() <
2605 SrcElTy->castAs<BuiltinType>()->getKind())
2606 return Builder.CreateFPTrunc(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2607 return Builder.CreateFPExt(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2608 }
2609 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2610 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2611 Loc: CE->getExprLoc());
2612 }
2613 case CK_FixedPointToFloating:
2614 case CK_FloatingToFixedPoint: {
2615 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2616 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2617 Loc: CE->getExprLoc());
2618 }
2619 case CK_BooleanToSignedIntegral: {
2620 ScalarConversionOpts Opts;
2621 Opts.TreatBooleanAsSigned = true;
2622 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2623 Loc: CE->getExprLoc(), Opts);
2624 }
2625 case CK_IntegralToBoolean:
2626 return EmitIntToBoolConversion(V: Visit(E));
2627 case CK_PointerToBoolean:
2628 return EmitPointerToBoolConversion(V: Visit(E), QT: E->getType());
2629 case CK_FloatingToBoolean: {
2630 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2631 return EmitFloatToBoolConversion(V: Visit(E));
2632 }
2633 case CK_MemberPointerToBoolean: {
2634 llvm::Value *MemPtr = Visit(E);
2635 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2636 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2637 }
2638
2639 case CK_FloatingComplexToReal:
2640 case CK_IntegralComplexToReal:
2641 return CGF.EmitComplexExpr(E, IgnoreReal: false, IgnoreImag: true).first;
2642
2643 case CK_FloatingComplexToBoolean:
2644 case CK_IntegralComplexToBoolean: {
2645 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2646
2647 // TODO: kill this function off, inline appropriate case here
2648 return EmitComplexToScalarConversion(Src: V, SrcTy: E->getType(), DstTy: DestTy,
2649 Loc: CE->getExprLoc());
2650 }
2651
2652 case CK_ZeroToOCLOpaqueType: {
2653 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2654 DestTy->isOCLIntelSubgroupAVCType()) &&
2655 "CK_ZeroToOCLEvent cast on non-event type");
2656 return llvm::Constant::getNullValue(Ty: ConvertType(T: DestTy));
2657 }
2658
2659 case CK_IntToOCLSampler:
2660 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2661
2662 case CK_HLSLVectorTruncation: {
2663 assert(DestTy->isVectorType() && "Expected dest type to be vector type");
2664 Value *Vec = Visit(E: const_cast<Expr *>(E));
2665 SmallVector<int, 16> Mask;
2666 unsigned NumElts = DestTy->castAs<VectorType>()->getNumElements();
2667 for (unsigned I = 0; I != NumElts; ++I)
2668 Mask.push_back(Elt: I);
2669
2670 return Builder.CreateShuffleVector(V: Vec, Mask, Name: "trunc");
2671 }
2672
2673 } // end of switch
2674
2675 llvm_unreachable("unknown scalar cast");
2676}
2677
2678Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2679 CodeGenFunction::StmtExprEvaluation eval(CGF);
2680 Address RetAlloca = CGF.EmitCompoundStmt(S: *E->getSubStmt(),
2681 GetLast: !E->getType()->isVoidType());
2682 if (!RetAlloca.isValid())
2683 return nullptr;
2684 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2685 E->getExprLoc());
2686}
2687
2688Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2689 CodeGenFunction::RunCleanupsScope Scope(CGF);
2690 Value *V = Visit(E: E->getSubExpr());
2691 // Defend against dominance problems caused by jumps out of expression
2692 // evaluation through the shared cleanup block.
2693 Scope.ForceCleanup(ValuesToReload: {&V});
2694 return V;
2695}
2696
2697//===----------------------------------------------------------------------===//
2698// Unary Operators
2699//===----------------------------------------------------------------------===//
2700
2701static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2702 llvm::Value *InVal, bool IsInc,
2703 FPOptions FPFeatures) {
2704 BinOpInfo BinOp;
2705 BinOp.LHS = InVal;
2706 BinOp.RHS = llvm::ConstantInt::get(Ty: InVal->getType(), V: 1, IsSigned: false);
2707 BinOp.Ty = E->getType();
2708 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2709 BinOp.FPFeatures = FPFeatures;
2710 BinOp.E = E;
2711 return BinOp;
2712}
2713
2714llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2715 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2716 llvm::Value *Amount =
2717 llvm::ConstantInt::get(Ty: InVal->getType(), V: IsInc ? 1 : -1, IsSigned: true);
2718 StringRef Name = IsInc ? "inc" : "dec";
2719 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2720 case LangOptions::SOB_Defined:
2721 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2722 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
2723 [[fallthrough]];
2724 case LangOptions::SOB_Undefined:
2725 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2726 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2727 [[fallthrough]];
2728 case LangOptions::SOB_Trapping:
2729 if (!E->canOverflow())
2730 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2731 return EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
2732 E, InVal, IsInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
2733 }
2734 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2735}
2736
2737namespace {
2738/// Handles check and update for lastprivate conditional variables.
2739class OMPLastprivateConditionalUpdateRAII {
2740private:
2741 CodeGenFunction &CGF;
2742 const UnaryOperator *E;
2743
2744public:
2745 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2746 const UnaryOperator *E)
2747 : CGF(CGF), E(E) {}
2748 ~OMPLastprivateConditionalUpdateRAII() {
2749 if (CGF.getLangOpts().OpenMP)
2750 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2751 CGF, LHS: E->getSubExpr());
2752 }
2753};
2754} // namespace
2755
2756llvm::Value *
2757ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2758 bool isInc, bool isPre) {
2759 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2760 QualType type = E->getSubExpr()->getType();
2761 llvm::PHINode *atomicPHI = nullptr;
2762 llvm::Value *value;
2763 llvm::Value *input;
2764 llvm::Value *Previous = nullptr;
2765 QualType SrcType = E->getType();
2766
2767 int amount = (isInc ? 1 : -1);
2768 bool isSubtraction = !isInc;
2769
2770 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2771 type = atomicTy->getValueType();
2772 if (isInc && type->isBooleanType()) {
2773 llvm::Value *True = CGF.EmitToMemory(Value: Builder.getTrue(), Ty: type);
2774 if (isPre) {
2775 Builder.CreateStore(Val: True, Addr: LV.getAddress(CGF), IsVolatile: LV.isVolatileQualified())
2776 ->setAtomic(Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2777 return Builder.getTrue();
2778 }
2779 // For atomic bool increment, we just store true and return it for
2780 // preincrement, do an atomic swap with true for postincrement
2781 return Builder.CreateAtomicRMW(
2782 Op: llvm::AtomicRMWInst::Xchg, Addr: LV.getAddress(CGF), Val: True,
2783 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2784 }
2785 // Special case for atomic increment / decrement on integers, emit
2786 // atomicrmw instructions. We skip this if we want to be doing overflow
2787 // checking, and fall into the slow path with the atomic cmpxchg loop.
2788 if (!type->isBooleanType() && type->isIntegerType() &&
2789 !(type->isUnsignedIntegerType() &&
2790 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
2791 CGF.getLangOpts().getSignedOverflowBehavior() !=
2792 LangOptions::SOB_Trapping) {
2793 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2794 llvm::AtomicRMWInst::Sub;
2795 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2796 llvm::Instruction::Sub;
2797 llvm::Value *amt = CGF.EmitToMemory(
2798 Value: llvm::ConstantInt::get(Ty: ConvertType(T: type), V: 1, IsSigned: true), Ty: type);
2799 llvm::Value *old =
2800 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(CGF), Val: amt,
2801 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2802 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
2803 }
2804 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
2805 input = value;
2806 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2807 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2808 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
2809 value = CGF.EmitToMemory(Value: value, Ty: type);
2810 Builder.CreateBr(Dest: opBB);
2811 Builder.SetInsertPoint(opBB);
2812 atomicPHI = Builder.CreatePHI(Ty: value->getType(), NumReservedValues: 2);
2813 atomicPHI->addIncoming(V: value, BB: startBB);
2814 value = atomicPHI;
2815 } else {
2816 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
2817 input = value;
2818 }
2819
2820 // Special case of integer increment that we have to check first: bool++.
2821 // Due to promotion rules, we get:
2822 // bool++ -> bool = bool + 1
2823 // -> bool = (int)bool + 1
2824 // -> bool = ((int)bool + 1 != 0)
2825 // An interesting aspect of this is that increment is always true.
2826 // Decrement does not have this property.
2827 if (isInc && type->isBooleanType()) {
2828 value = Builder.getTrue();
2829
2830 // Most common case by far: integer increment.
2831 } else if (type->isIntegerType()) {
2832 QualType promotedType;
2833 bool canPerformLossyDemotionCheck = false;
2834 if (CGF.getContext().isPromotableIntegerType(T: type)) {
2835 promotedType = CGF.getContext().getPromotedIntegerType(PromotableType: type);
2836 assert(promotedType != type && "Shouldn't promote to the same type.");
2837 canPerformLossyDemotionCheck = true;
2838 canPerformLossyDemotionCheck &=
2839 CGF.getContext().getCanonicalType(T: type) !=
2840 CGF.getContext().getCanonicalType(T: promotedType);
2841 canPerformLossyDemotionCheck &=
2842 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2843 SrcType: type, DstType: promotedType);
2844 assert((!canPerformLossyDemotionCheck ||
2845 type->isSignedIntegerOrEnumerationType() ||
2846 promotedType->isSignedIntegerOrEnumerationType() ||
2847 ConvertType(type)->getScalarSizeInBits() ==
2848 ConvertType(promotedType)->getScalarSizeInBits()) &&
2849 "The following check expects that if we do promotion to different "
2850 "underlying canonical type, at least one of the types (either "
2851 "base or promoted) will be signed, or the bitwidths will match.");
2852 }
2853 if (CGF.SanOpts.hasOneOf(
2854 K: SanitizerKind::ImplicitIntegerArithmeticValueChange |
2855 SanitizerKind::ImplicitBitfieldConversion) &&
2856 canPerformLossyDemotionCheck) {
2857 // While `x += 1` (for `x` with width less than int) is modeled as
2858 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2859 // ease; inc/dec with width less than int can't overflow because of
2860 // promotion rules, so we omit promotion+demotion, which means that we can
2861 // not catch lossy "demotion". Because we still want to catch these cases
2862 // when the sanitizer is enabled, we perform the promotion, then perform
2863 // the increment/decrement in the wider type, and finally
2864 // perform the demotion. This will catch lossy demotions.
2865
2866 // We have a special case for bitfields defined using all the bits of the
2867 // type. In this case we need to do the same trick as for the integer
2868 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
2869
2870 value = EmitScalarConversion(Src: value, SrcType: type, DstType: promotedType, Loc: E->getExprLoc());
2871 Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
2872 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2873 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2874 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
2875 // checks will take care of the conversion.
2876 ScalarConversionOpts Opts;
2877 if (!LV.isBitField())
2878 Opts = ScalarConversionOpts(CGF.SanOpts);
2879 else if (CGF.SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) {
2880 Previous = value;
2881 SrcType = promotedType;
2882 }
2883
2884 value = EmitScalarConversion(Src: value, SrcType: promotedType, DstType: type, Loc: E->getExprLoc(),
2885 Opts);
2886
2887 // Note that signed integer inc/dec with width less than int can't
2888 // overflow because of promotion rules; we're just eliding a few steps
2889 // here.
2890 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2891 value = EmitIncDecConsiderOverflowBehavior(E, InVal: value, IsInc: isInc);
2892 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2893 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) {
2894 value = EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
2895 E, InVal: value, IsInc: isInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
2896 } else {
2897 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
2898 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2899 }
2900
2901 // Next most common: pointer increment.
2902 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2903 QualType type = ptr->getPointeeType();
2904
2905 // VLA types don't have constant size.
2906 if (const VariableArrayType *vla
2907 = CGF.getContext().getAsVariableArrayType(T: type)) {
2908 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2909 if (!isInc) numElts = Builder.CreateNSWNeg(V: numElts, Name: "vla.negsize");
2910 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
2911 if (CGF.getLangOpts().isSignedOverflowDefined())
2912 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: numElts, Name: "vla.inc");
2913 else
2914 value = CGF.EmitCheckedInBoundsGEP(
2915 ElemTy: elemTy, Ptr: value, IdxList: numElts, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2916 Loc: E->getExprLoc(), Name: "vla.inc");
2917
2918 // Arithmetic on function pointers (!) is just +-1.
2919 } else if (type->isFunctionType()) {
2920 llvm::Value *amt = Builder.getInt32(C: amount);
2921
2922 if (CGF.getLangOpts().isSignedOverflowDefined())
2923 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: amt, Name: "incdec.funcptr");
2924 else
2925 value =
2926 CGF.EmitCheckedInBoundsGEP(ElemTy: CGF.Int8Ty, Ptr: value, IdxList: amt,
2927 /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2928 Loc: E->getExprLoc(), Name: "incdec.funcptr");
2929
2930 // For everything else, we can just do a simple increment.
2931 } else {
2932 llvm::Value *amt = Builder.getInt32(C: amount);
2933 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
2934 if (CGF.getLangOpts().isSignedOverflowDefined())
2935 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: amt, Name: "incdec.ptr");
2936 else
2937 value = CGF.EmitCheckedInBoundsGEP(
2938 ElemTy: elemTy, Ptr: value, IdxList: amt, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2939 Loc: E->getExprLoc(), Name: "incdec.ptr");
2940 }
2941
2942 // Vector increment/decrement.
2943 } else if (type->isVectorType()) {
2944 if (type->hasIntegerRepresentation()) {
2945 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount);
2946
2947 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2948 } else {
2949 value = Builder.CreateFAdd(
2950 L: value,
2951 R: llvm::ConstantFP::get(Ty: value->getType(), V: amount),
2952 Name: isInc ? "inc" : "dec");
2953 }
2954
2955 // Floating point.
2956 } else if (type->isRealFloatingType()) {
2957 // Add the inc/dec to the real part.
2958 llvm::Value *amt;
2959 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2960
2961 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2962 // Another special case: half FP increment should be done via float
2963 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2964 value = Builder.CreateCall(
2965 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2966 CGF.CGM.FloatTy),
2967 input, "incdec.conv");
2968 } else {
2969 value = Builder.CreateFPExt(V: input, DestTy: CGF.CGM.FloatTy, Name: "incdec.conv");
2970 }
2971 }
2972
2973 if (value->getType()->isFloatTy())
2974 amt = llvm::ConstantFP::get(Context&: VMContext,
2975 V: llvm::APFloat(static_cast<float>(amount)));
2976 else if (value->getType()->isDoubleTy())
2977 amt = llvm::ConstantFP::get(Context&: VMContext,
2978 V: llvm::APFloat(static_cast<double>(amount)));
2979 else {
2980 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
2981 // Convert from float.
2982 llvm::APFloat F(static_cast<float>(amount));
2983 bool ignored;
2984 const llvm::fltSemantics *FS;
2985 // Don't use getFloatTypeSemantics because Half isn't
2986 // necessarily represented using the "half" LLVM type.
2987 if (value->getType()->isFP128Ty())
2988 FS = &CGF.getTarget().getFloat128Format();
2989 else if (value->getType()->isHalfTy())
2990 FS = &CGF.getTarget().getHalfFormat();
2991 else if (value->getType()->isBFloatTy())
2992 FS = &CGF.getTarget().getBFloat16Format();
2993 else if (value->getType()->isPPC_FP128Ty())
2994 FS = &CGF.getTarget().getIbm128Format();
2995 else
2996 FS = &CGF.getTarget().getLongDoubleFormat();
2997 F.convert(ToSemantics: *FS, RM: llvm::APFloat::rmTowardZero, losesInfo: &ignored);
2998 amt = llvm::ConstantFP::get(Context&: VMContext, V: F);
2999 }
3000 value = Builder.CreateFAdd(L: value, R: amt, Name: isInc ? "inc" : "dec");
3001
3002 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3003 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3004 value = Builder.CreateCall(
3005 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3006 CGF.CGM.FloatTy),
3007 value, "incdec.conv");
3008 } else {
3009 value = Builder.CreateFPTrunc(V: value, DestTy: input->getType(), Name: "incdec.conv");
3010 }
3011 }
3012
3013 // Fixed-point types.
3014 } else if (type->isFixedPointType()) {
3015 // Fixed-point types are tricky. In some cases, it isn't possible to
3016 // represent a 1 or a -1 in the type at all. Piggyback off of
3017 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3018 BinOpInfo Info;
3019 Info.E = E;
3020 Info.Ty = E->getType();
3021 Info.Opcode = isInc ? BO_Add : BO_Sub;
3022 Info.LHS = value;
3023 Info.RHS = llvm::ConstantInt::get(Ty: value->getType(), V: 1, IsSigned: false);
3024 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3025 // since -1 is guaranteed to be representable.
3026 if (type->isSignedFixedPointType()) {
3027 Info.Opcode = isInc ? BO_Sub : BO_Add;
3028 Info.RHS = Builder.CreateNeg(V: Info.RHS);
3029 }
3030 // Now, convert from our invented integer literal to the type of the unary
3031 // op. This will upscale and saturate if necessary. This value can become
3032 // undef in some cases.
3033 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3034 auto DstSema = CGF.getContext().getFixedPointSemantics(Ty: Info.Ty);
3035 Info.RHS = FPBuilder.CreateIntegerToFixed(Src: Info.RHS, SrcIsSigned: true, DstSema: DstSema);
3036 value = EmitFixedPointBinOp(Ops: Info);
3037
3038 // Objective-C pointer types.
3039 } else {
3040 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3041
3042 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3043 if (!isInc) size = -size;
3044 llvm::Value *sizeValue =
3045 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: size.getQuantity());
3046
3047 if (CGF.getLangOpts().isSignedOverflowDefined())
3048 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, Name: "incdec.objptr");
3049 else
3050 value = CGF.EmitCheckedInBoundsGEP(
3051 ElemTy: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3052 Loc: E->getExprLoc(), Name: "incdec.objptr");
3053 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
3054 }
3055
3056 if (atomicPHI) {
3057 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3058 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3059 auto Pair = CGF.EmitAtomicCompareExchange(
3060 Obj: LV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: value), Loc: E->getExprLoc());
3061 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: type);
3062 llvm::Value *success = Pair.second;
3063 atomicPHI->addIncoming(V: old, BB: curBlock);
3064 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3065 Builder.SetInsertPoint(contBB);
3066 return isPre ? value : input;
3067 }
3068
3069 // Store the updated result through the lvalue.
3070 if (LV.isBitField()) {
3071 Value *Src = Previous ? Previous : value;
3072 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: value), Dst: LV, Result: &value);
3073 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: value, DstType: E->getType(),
3074 Info: LV.getBitFieldInfo(), Loc: E->getExprLoc());
3075 } else
3076 CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV);
3077
3078 // If this is a postinc, return the value read from memory, otherwise use the
3079 // updated value.
3080 return isPre ? value : input;
3081}
3082
3083
3084Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3085 QualType PromotionType) {
3086 QualType promotionTy = PromotionType.isNull()
3087 ? getPromotionType(Ty: E->getSubExpr()->getType())
3088 : PromotionType;
3089 Value *result = VisitPlus(E, PromotionType: promotionTy);
3090 if (result && !promotionTy.isNull())
3091 result = EmitUnPromotedValue(result, ExprType: E->getType());
3092 return result;
3093}
3094
3095Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3096 QualType PromotionType) {
3097 // This differs from gcc, though, most likely due to a bug in gcc.
3098 TestAndClearIgnoreResultAssign();
3099 if (!PromotionType.isNull())
3100 return CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3101 return Visit(E: E->getSubExpr());
3102}
3103
3104Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3105 QualType PromotionType) {
3106 QualType promotionTy = PromotionType.isNull()
3107 ? getPromotionType(Ty: E->getSubExpr()->getType())
3108 : PromotionType;
3109 Value *result = VisitMinus(E, PromotionType: promotionTy);
3110 if (result && !promotionTy.isNull())
3111 result = EmitUnPromotedValue(result, ExprType: E->getType());
3112 return result;
3113}
3114
3115Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3116 QualType PromotionType) {
3117 TestAndClearIgnoreResultAssign();
3118 Value *Op;
3119 if (!PromotionType.isNull())
3120 Op = CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3121 else
3122 Op = Visit(E: E->getSubExpr());
3123
3124 // Generate a unary FNeg for FP ops.
3125 if (Op->getType()->isFPOrFPVectorTy())
3126 return Builder.CreateFNeg(V: Op, Name: "fneg");
3127
3128 // Emit unary minus with EmitSub so we handle overflow cases etc.
3129 BinOpInfo BinOp;
3130 BinOp.RHS = Op;
3131 BinOp.LHS = llvm::Constant::getNullValue(Ty: BinOp.RHS->getType());
3132 BinOp.Ty = E->getType();
3133 BinOp.Opcode = BO_Sub;
3134 BinOp.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3135 BinOp.E = E;
3136 return EmitSub(Ops: BinOp);
3137}
3138
3139Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3140 TestAndClearIgnoreResultAssign();
3141 Value *Op = Visit(E: E->getSubExpr());
3142 return Builder.CreateNot(V: Op, Name: "not");
3143}
3144
3145Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3146 // Perform vector logical not on comparison with zero vector.
3147 if (E->getType()->isVectorType() &&
3148 E->getType()->castAs<VectorType>()->getVectorKind() ==
3149 VectorKind::Generic) {
3150 Value *Oper = Visit(E: E->getSubExpr());
3151 Value *Zero = llvm::Constant::getNullValue(Ty: Oper->getType());
3152 Value *Result;
3153 if (Oper->getType()->isFPOrFPVectorTy()) {
3154 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3155 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
3156 Result = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_OEQ, LHS: Oper, RHS: Zero, Name: "cmp");
3157 } else
3158 Result = Builder.CreateICmp(P: llvm::CmpInst::ICMP_EQ, LHS: Oper, RHS: Zero, Name: "cmp");
3159 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
3160 }
3161
3162 // Compare operand to zero.
3163 Value *BoolVal = CGF.EvaluateExprAsBool(E: E->getSubExpr());
3164
3165 // Invert value.
3166 // TODO: Could dynamically modify easy computations here. For example, if
3167 // the operand is an icmp ne, turn into icmp eq.
3168 BoolVal = Builder.CreateNot(V: BoolVal, Name: "lnot");
3169
3170 // ZExt result to the expr type.
3171 return Builder.CreateZExt(V: BoolVal, DestTy: ConvertType(T: E->getType()), Name: "lnot.ext");
3172}
3173
3174Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3175 // Try folding the offsetof to a constant.
3176 Expr::EvalResult EVResult;
3177 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3178 llvm::APSInt Value = EVResult.Val.getInt();
3179 return Builder.getInt(AI: Value);
3180 }
3181
3182 // Loop over the components of the offsetof to compute the value.
3183 unsigned n = E->getNumComponents();
3184 llvm::Type* ResultType = ConvertType(T: E->getType());
3185 llvm::Value* Result = llvm::Constant::getNullValue(Ty: ResultType);
3186 QualType CurrentType = E->getTypeSourceInfo()->getType();
3187 for (unsigned i = 0; i != n; ++i) {
3188 OffsetOfNode ON = E->getComponent(Idx: i);
3189 llvm::Value *Offset = nullptr;
3190 switch (ON.getKind()) {
3191 case OffsetOfNode::Array: {
3192 // Compute the index
3193 Expr *IdxExpr = E->getIndexExpr(Idx: ON.getArrayExprIndex());
3194 llvm::Value* Idx = CGF.EmitScalarExpr(E: IdxExpr);
3195 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3196 Idx = Builder.CreateIntCast(V: Idx, DestTy: ResultType, isSigned: IdxSigned, Name: "conv");
3197
3198 // Save the element type
3199 CurrentType =
3200 CGF.getContext().getAsArrayType(T: CurrentType)->getElementType();
3201
3202 // Compute the element size
3203 llvm::Value* ElemSize = llvm::ConstantInt::get(Ty: ResultType,
3204 V: CGF.getContext().getTypeSizeInChars(T: CurrentType).getQuantity());
3205
3206 // Multiply out to compute the result
3207 Offset = Builder.CreateMul(LHS: Idx, RHS: ElemSize);
3208 break;
3209 }
3210
3211 case OffsetOfNode::Field: {
3212 FieldDecl *MemberDecl = ON.getField();
3213 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3214 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3215
3216 // Compute the index of the field in its parent.
3217 unsigned i = 0;
3218 // FIXME: It would be nice if we didn't have to loop here!
3219 for (RecordDecl::field_iterator Field = RD->field_begin(),
3220 FieldEnd = RD->field_end();
3221 Field != FieldEnd; ++Field, ++i) {
3222 if (*Field == MemberDecl)
3223 break;
3224 }
3225 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3226
3227 // Compute the offset to the field
3228 int64_t OffsetInt = RL.getFieldOffset(FieldNo: i) /
3229 CGF.getContext().getCharWidth();
3230 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt);
3231
3232 // Save the element type.
3233 CurrentType = MemberDecl->getType();
3234 break;
3235 }
3236
3237 case OffsetOfNode::Identifier:
3238 llvm_unreachable("dependent __builtin_offsetof");
3239
3240 case OffsetOfNode::Base: {
3241 if (ON.getBase()->isVirtual()) {
3242 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3243 continue;
3244 }
3245
3246 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3247 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3248
3249 // Save the element type.
3250 CurrentType = ON.getBase()->getType();
3251
3252 // Compute the offset to the base.
3253 auto *BaseRT = CurrentType->castAs<RecordType>();
3254 auto *BaseRD = cast<CXXRecordDecl>(Val: BaseRT->getDecl());
3255 CharUnits OffsetInt = RL.getBaseClassOffset(Base: BaseRD);
3256 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt.getQuantity());
3257 break;
3258 }
3259 }
3260 Result = Builder.CreateAdd(LHS: Result, RHS: Offset);
3261 }
3262 return Result;
3263}
3264
3265/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3266/// argument of the sizeof expression as an integer.
3267Value *
3268ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3269 const UnaryExprOrTypeTraitExpr *E) {
3270 QualType TypeToSize = E->getTypeOfArgument();
3271 if (auto Kind = E->getKind();
3272 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
3273 if (const VariableArrayType *VAT =
3274 CGF.getContext().getAsVariableArrayType(T: TypeToSize)) {
3275 if (E->isArgumentType()) {
3276 // sizeof(type) - make sure to emit the VLA size.
3277 CGF.EmitVariablyModifiedType(Ty: TypeToSize);
3278 } else {
3279 // C99 6.5.3.4p2: If the argument is an expression of type
3280 // VLA, it is evaluated.
3281 CGF.EmitIgnoredExpr(E: E->getArgumentExpr());
3282 }
3283
3284 auto VlaSize = CGF.getVLASize(vla: VAT);
3285 llvm::Value *size = VlaSize.NumElts;
3286
3287 // Scale the number of non-VLA elements by the non-VLA element size.
3288 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3289 if (!eltSize.isOne())
3290 size = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: size);
3291
3292 return size;
3293 }
3294 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3295 auto Alignment =
3296 CGF.getContext()
3297 .toCharUnitsFromBits(BitSize: CGF.getContext().getOpenMPDefaultSimdAlign(
3298 T: E->getTypeOfArgument()->getPointeeType()))
3299 .getQuantity();
3300 return llvm::ConstantInt::get(Ty: CGF.SizeTy, V: Alignment);
3301 } else if (E->getKind() == UETT_VectorElements) {
3302 auto *VecTy = cast<llvm::VectorType>(Val: ConvertType(T: E->getTypeOfArgument()));
3303 return Builder.CreateElementCount(DstType: CGF.SizeTy, EC: VecTy->getElementCount());
3304 }
3305
3306 // If this isn't sizeof(vla), the result must be constant; use the constant
3307 // folding logic so we don't have to duplicate it here.
3308 return Builder.getInt(AI: E->EvaluateKnownConstInt(CGF.getContext()));
3309}
3310
3311Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3312 QualType PromotionType) {
3313 QualType promotionTy = PromotionType.isNull()
3314 ? getPromotionType(Ty: E->getSubExpr()->getType())
3315 : PromotionType;
3316 Value *result = VisitReal(E, PromotionType: promotionTy);
3317 if (result && !promotionTy.isNull())
3318 result = EmitUnPromotedValue(result, ExprType: E->getType());
3319 return result;
3320}
3321
3322Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3323 QualType PromotionType) {
3324 Expr *Op = E->getSubExpr();
3325 if (Op->getType()->isAnyComplexType()) {
3326 // If it's an l-value, load through the appropriate subobject l-value.
3327 // Note that we have to ask E because Op might be an l-value that
3328 // this won't work for, e.g. an Obj-C property.
3329 if (E->isGLValue()) {
3330 if (!PromotionType.isNull()) {
3331 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3332 E: Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3333 if (result.first)
3334 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3335 return result.first;
3336 } else {
3337 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3338 .getScalarVal();
3339 }
3340 }
3341 // Otherwise, calculate and project.
3342 return CGF.EmitComplexExpr(E: Op, IgnoreReal: false, IgnoreImag: true).first;
3343 }
3344
3345 if (!PromotionType.isNull())
3346 return CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3347 return Visit(E: Op);
3348}
3349
3350Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3351 QualType PromotionType) {
3352 QualType promotionTy = PromotionType.isNull()
3353 ? getPromotionType(Ty: E->getSubExpr()->getType())
3354 : PromotionType;
3355 Value *result = VisitImag(E, PromotionType: promotionTy);
3356 if (result && !promotionTy.isNull())
3357 result = EmitUnPromotedValue(result, ExprType: E->getType());
3358 return result;
3359}
3360
3361Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3362 QualType PromotionType) {
3363 Expr *Op = E->getSubExpr();
3364 if (Op->getType()->isAnyComplexType()) {
3365 // If it's an l-value, load through the appropriate subobject l-value.
3366 // Note that we have to ask E because Op might be an l-value that
3367 // this won't work for, e.g. an Obj-C property.
3368 if (Op->isGLValue()) {
3369 if (!PromotionType.isNull()) {
3370 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3371 E: Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3372 if (result.second)
3373 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3374 return result.second;
3375 } else {
3376 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3377 .getScalarVal();
3378 }
3379 }
3380 // Otherwise, calculate and project.
3381 return CGF.EmitComplexExpr(E: Op, IgnoreReal: true, IgnoreImag: false).second;
3382 }
3383
3384 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3385 // effects are evaluated, but not the actual value.
3386 if (Op->isGLValue())
3387 CGF.EmitLValue(E: Op);
3388 else if (!PromotionType.isNull())
3389 CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3390 else
3391 CGF.EmitScalarExpr(E: Op, IgnoreResultAssign: true);
3392 if (!PromotionType.isNull())
3393 return llvm::Constant::getNullValue(Ty: ConvertType(T: PromotionType));
3394 return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType()));
3395}
3396
3397//===----------------------------------------------------------------------===//
3398// Binary Operators
3399//===----------------------------------------------------------------------===//
3400
3401Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3402 QualType PromotionType) {
3403 return CGF.Builder.CreateFPExt(V: result, DestTy: ConvertType(T: PromotionType), Name: "ext");
3404}
3405
3406Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3407 QualType ExprType) {
3408 return CGF.Builder.CreateFPTrunc(V: result, DestTy: ConvertType(T: ExprType), Name: "unpromotion");
3409}
3410
3411Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3412 E = E->IgnoreParens();
3413 if (auto BO = dyn_cast<BinaryOperator>(Val: E)) {
3414 switch (BO->getOpcode()) {
3415#define HANDLE_BINOP(OP) \
3416 case BO_##OP: \
3417 return Emit##OP(EmitBinOps(BO, PromotionType));
3418 HANDLE_BINOP(Add)
3419 HANDLE_BINOP(Sub)
3420 HANDLE_BINOP(Mul)
3421 HANDLE_BINOP(Div)
3422#undef HANDLE_BINOP
3423 default:
3424 break;
3425 }
3426 } else if (auto UO = dyn_cast<UnaryOperator>(Val: E)) {
3427 switch (UO->getOpcode()) {
3428 case UO_Imag:
3429 return VisitImag(E: UO, PromotionType);
3430 case UO_Real:
3431 return VisitReal(E: UO, PromotionType);
3432 case UO_Minus:
3433 return VisitMinus(E: UO, PromotionType);
3434 case UO_Plus:
3435 return VisitPlus(E: UO, PromotionType);
3436 default:
3437 break;
3438 }
3439 }
3440 auto result = Visit(E: const_cast<Expr *>(E));
3441 if (result) {
3442 if (!PromotionType.isNull())
3443 return EmitPromotedValue(result, PromotionType);
3444 else
3445 return EmitUnPromotedValue(result, ExprType: E->getType());
3446 }
3447 return result;
3448}
3449
3450BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3451 QualType PromotionType) {
3452 TestAndClearIgnoreResultAssign();
3453 BinOpInfo Result;
3454 Result.LHS = CGF.EmitPromotedScalarExpr(E: E->getLHS(), PromotionType);
3455 Result.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType);
3456 if (!PromotionType.isNull())
3457 Result.Ty = PromotionType;
3458 else
3459 Result.Ty = E->getType();
3460 Result.Opcode = E->getOpcode();
3461 Result.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3462 Result.E = E;
3463 return Result;
3464}
3465
3466LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3467 const CompoundAssignOperator *E,
3468 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3469 Value *&Result) {
3470 QualType LHSTy = E->getLHS()->getType();
3471 BinOpInfo OpInfo;
3472
3473 if (E->getComputationResultType()->isAnyComplexType())
3474 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3475
3476 // Emit the RHS first. __block variables need to have the rhs evaluated
3477 // first, plus this should improve codegen a little.
3478
3479 QualType PromotionTypeCR;
3480 PromotionTypeCR = getPromotionType(Ty: E->getComputationResultType());
3481 if (PromotionTypeCR.isNull())
3482 PromotionTypeCR = E->getComputationResultType();
3483 QualType PromotionTypeLHS = getPromotionType(Ty: E->getComputationLHSType());
3484 QualType PromotionTypeRHS = getPromotionType(Ty: E->getRHS()->getType());
3485 if (!PromotionTypeRHS.isNull())
3486 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType: PromotionTypeRHS);
3487 else
3488 OpInfo.RHS = Visit(E: E->getRHS());
3489 OpInfo.Ty = PromotionTypeCR;
3490 OpInfo.Opcode = E->getOpcode();
3491 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3492 OpInfo.E = E;
3493 // Load/convert the LHS.
3494 LValue LHSLV = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
3495
3496 llvm::PHINode *atomicPHI = nullptr;
3497 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3498 QualType type = atomicTy->getValueType();
3499 if (!type->isBooleanType() && type->isIntegerType() &&
3500 !(type->isUnsignedIntegerType() &&
3501 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3502 CGF.getLangOpts().getSignedOverflowBehavior() !=
3503 LangOptions::SOB_Trapping) {
3504 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3505 llvm::Instruction::BinaryOps Op;
3506 switch (OpInfo.Opcode) {
3507 // We don't have atomicrmw operands for *, %, /, <<, >>
3508 case BO_MulAssign: case BO_DivAssign:
3509 case BO_RemAssign:
3510 case BO_ShlAssign:
3511 case BO_ShrAssign:
3512 break;
3513 case BO_AddAssign:
3514 AtomicOp = llvm::AtomicRMWInst::Add;
3515 Op = llvm::Instruction::Add;
3516 break;
3517 case BO_SubAssign:
3518 AtomicOp = llvm::AtomicRMWInst::Sub;
3519 Op = llvm::Instruction::Sub;
3520 break;
3521 case BO_AndAssign:
3522 AtomicOp = llvm::AtomicRMWInst::And;
3523 Op = llvm::Instruction::And;
3524 break;
3525 case BO_XorAssign:
3526 AtomicOp = llvm::AtomicRMWInst::Xor;
3527 Op = llvm::Instruction::Xor;
3528 break;
3529 case BO_OrAssign:
3530 AtomicOp = llvm::AtomicRMWInst::Or;
3531 Op = llvm::Instruction::Or;
3532 break;
3533 default:
3534 llvm_unreachable("Invalid compound assignment type");
3535 }
3536 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3537 llvm::Value *Amt = CGF.EmitToMemory(
3538 Value: EmitScalarConversion(Src: OpInfo.RHS, SrcType: E->getRHS()->getType(), DstType: LHSTy,
3539 Loc: E->getExprLoc()),
3540 Ty: LHSTy);
3541 Value *OldVal = Builder.CreateAtomicRMW(
3542 Op: AtomicOp, Addr: LHSLV.getAddress(CGF), Val: Amt,
3543 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3544
3545 // Since operation is atomic, the result type is guaranteed to be the
3546 // same as the input in LLVM terms.
3547 Result = Builder.CreateBinOp(Opc: Op, LHS: OldVal, RHS: Amt);
3548 return LHSLV;
3549 }
3550 }
3551 // FIXME: For floating point types, we should be saving and restoring the
3552 // floating point environment in the loop.
3553 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3554 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3555 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3556 OpInfo.LHS = CGF.EmitToMemory(Value: OpInfo.LHS, Ty: type);
3557 Builder.CreateBr(Dest: opBB);
3558 Builder.SetInsertPoint(opBB);
3559 atomicPHI = Builder.CreatePHI(Ty: OpInfo.LHS->getType(), NumReservedValues: 2);
3560 atomicPHI->addIncoming(V: OpInfo.LHS, BB: startBB);
3561 OpInfo.LHS = atomicPHI;
3562 }
3563 else
3564 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3565
3566 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3567 SourceLocation Loc = E->getExprLoc();
3568 if (!PromotionTypeLHS.isNull())
3569 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy, DstType: PromotionTypeLHS,
3570 Loc: E->getExprLoc());
3571 else
3572 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy,
3573 DstType: E->getComputationLHSType(), Loc);
3574
3575 // Expand the binary operator.
3576 Result = (this->*Func)(OpInfo);
3577
3578 // Convert the result back to the LHS type,
3579 // potentially with Implicit Conversion sanitizer check.
3580 // If LHSLV is a bitfield, use default ScalarConversionOpts
3581 // to avoid emit any implicit integer checks.
3582 Value *Previous = nullptr;
3583 if (LHSLV.isBitField()) {
3584 Previous = Result;
3585 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc);
3586 } else
3587 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc,
3588 Opts: ScalarConversionOpts(CGF.SanOpts));
3589
3590 if (atomicPHI) {
3591 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3592 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3593 auto Pair = CGF.EmitAtomicCompareExchange(
3594 Obj: LHSLV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: Result), Loc: E->getExprLoc());
3595 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: LHSTy);
3596 llvm::Value *success = Pair.second;
3597 atomicPHI->addIncoming(V: old, BB: curBlock);
3598 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3599 Builder.SetInsertPoint(contBB);
3600 return LHSLV;
3601 }
3602
3603 // Store the result value into the LHS lvalue. Bit-fields are handled
3604 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3605 // 'An assignment expression has the value of the left operand after the
3606 // assignment...'.
3607 if (LHSLV.isBitField()) {
3608 Value *Src = Previous ? Previous : Result;
3609 QualType SrcType = E->getRHS()->getType();
3610 QualType DstType = E->getLHS()->getType();
3611 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: Result), Dst: LHSLV, Result: &Result);
3612 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType,
3613 Info: LHSLV.getBitFieldInfo(), Loc: E->getExprLoc());
3614 } else
3615 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Result), Dst: LHSLV);
3616
3617 if (CGF.getLangOpts().OpenMP)
3618 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3619 LHS: E->getLHS());
3620 return LHSLV;
3621}
3622
3623Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3624 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3625 bool Ignore = TestAndClearIgnoreResultAssign();
3626 Value *RHS = nullptr;
3627 LValue LHS = EmitCompoundAssignLValue(E, Func, Result&: RHS);
3628
3629 // If the result is clearly ignored, return now.
3630 if (Ignore)
3631 return nullptr;
3632
3633 // The result of an assignment in C is the assigned r-value.
3634 if (!CGF.getLangOpts().CPlusPlus)
3635 return RHS;
3636
3637 // If the lvalue is non-volatile, return the computed value of the assignment.
3638 if (!LHS.isVolatileQualified())
3639 return RHS;
3640
3641 // Otherwise, reload the value.
3642 return EmitLoadOfLValue(LHS, E->getExprLoc());
3643}
3644
3645void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3646 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3647 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3648
3649 if (CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero)) {
3650 Checks.push_back(Elt: std::make_pair(x: Builder.CreateICmpNE(LHS: Ops.RHS, RHS: Zero),
3651 y: SanitizerKind::IntegerDivideByZero));
3652 }
3653
3654 const auto *BO = cast<BinaryOperator>(Val: Ops.E);
3655 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow) &&
3656 Ops.Ty->hasSignedIntegerRepresentation() &&
3657 !IsWidenedIntegerOp(Ctx: CGF.getContext(), E: BO->getLHS()) &&
3658 Ops.mayHaveIntegerOverflow()) {
3659 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Val: Zero->getType());
3660
3661 llvm::Value *IntMin =
3662 Builder.getInt(AI: llvm::APInt::getSignedMinValue(numBits: Ty->getBitWidth()));
3663 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3664
3665 llvm::Value *LHSCmp = Builder.CreateICmpNE(LHS: Ops.LHS, RHS: IntMin);
3666 llvm::Value *RHSCmp = Builder.CreateICmpNE(LHS: Ops.RHS, RHS: NegOne);
3667 llvm::Value *NotOverflow = Builder.CreateOr(LHS: LHSCmp, RHS: RHSCmp, Name: "or");
3668 Checks.push_back(
3669 Elt: std::make_pair(x&: NotOverflow, y: SanitizerKind::SignedIntegerOverflow));
3670 }
3671
3672 if (Checks.size() > 0)
3673 EmitBinOpCheck(Checks, Info: Ops);
3674}
3675
3676Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3677 {
3678 CodeGenFunction::SanitizerScope SanScope(&CGF);
3679 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
3680 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
3681 Ops.Ty->isIntegerType() &&
3682 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3683 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3684 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: true);
3685 } else if (CGF.SanOpts.has(K: SanitizerKind::FloatDivideByZero) &&
3686 Ops.Ty->isRealFloatingType() &&
3687 Ops.mayHaveFloatDivisionByZero()) {
3688 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3689 llvm::Value *NonZero = Builder.CreateFCmpUNE(LHS: Ops.RHS, RHS: Zero);
3690 EmitBinOpCheck(Checks: std::make_pair(x&: NonZero, y: SanitizerKind::FloatDivideByZero),
3691 Info: Ops);
3692 }
3693 }
3694
3695 if (Ops.Ty->isConstantMatrixType()) {
3696 llvm::MatrixBuilder MB(Builder);
3697 // We need to check the types of the operands of the operator to get the
3698 // correct matrix dimensions.
3699 auto *BO = cast<BinaryOperator>(Val: Ops.E);
3700 (void)BO;
3701 assert(
3702 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3703 "first operand must be a matrix");
3704 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3705 "second operand must be an arithmetic type");
3706 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3707 return MB.CreateScalarDiv(LHS: Ops.LHS, RHS: Ops.RHS,
3708 IsUnsigned: Ops.Ty->hasUnsignedIntegerRepresentation());
3709 }
3710
3711 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3712 llvm::Value *Val;
3713 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3714 Val = Builder.CreateFDiv(L: Ops.LHS, R: Ops.RHS, Name: "div");
3715 CGF.SetDivFPAccuracy(Val);
3716 return Val;
3717 }
3718 else if (Ops.isFixedPointOp())
3719 return EmitFixedPointBinOp(Ops);
3720 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3721 return Builder.CreateUDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
3722 else
3723 return Builder.CreateSDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
3724}
3725
3726Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3727 // Rem in C can't be a floating point type: C99 6.5.5p2.
3728 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
3729 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
3730 Ops.Ty->isIntegerType() &&
3731 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3732 CodeGenFunction::SanitizerScope SanScope(&CGF);
3733 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3734 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: false);
3735 }
3736
3737 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3738 return Builder.CreateURem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
3739 else
3740 return Builder.CreateSRem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
3741}
3742
3743Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3744 unsigned IID;
3745 unsigned OpID = 0;
3746 SanitizerHandler OverflowKind;
3747
3748 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3749 switch (Ops.Opcode) {
3750 case BO_Add:
3751 case BO_AddAssign:
3752 OpID = 1;
3753 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3754 llvm::Intrinsic::uadd_with_overflow;
3755 OverflowKind = SanitizerHandler::AddOverflow;
3756 break;
3757 case BO_Sub:
3758 case BO_SubAssign:
3759 OpID = 2;
3760 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3761 llvm::Intrinsic::usub_with_overflow;
3762 OverflowKind = SanitizerHandler::SubOverflow;
3763 break;
3764 case BO_Mul:
3765 case BO_MulAssign:
3766 OpID = 3;
3767 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3768 llvm::Intrinsic::umul_with_overflow;
3769 OverflowKind = SanitizerHandler::MulOverflow;
3770 break;
3771 default:
3772 llvm_unreachable("Unsupported operation for overflow detection");
3773 }
3774 OpID <<= 1;
3775 if (isSigned)
3776 OpID |= 1;
3777
3778 CodeGenFunction::SanitizerScope SanScope(&CGF);
3779 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(T: Ops.Ty);
3780
3781 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, Tys: opTy);
3782
3783 Value *resultAndOverflow = Builder.CreateCall(Callee: intrinsic, Args: {Ops.LHS, Ops.RHS});
3784 Value *result = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 0);
3785 Value *overflow = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 1);
3786
3787 // Handle overflow with llvm.trap if no custom handler has been specified.
3788 const std::string *handlerName =
3789 &CGF.getLangOpts().OverflowHandler;
3790 if (handlerName->empty()) {
3791 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3792 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3793 if (!isSigned || CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) {
3794 llvm::Value *NotOverflow = Builder.CreateNot(V: overflow);
3795 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3796 : SanitizerKind::UnsignedIntegerOverflow;
3797 EmitBinOpCheck(Checks: std::make_pair(x&: NotOverflow, y&: Kind), Info: Ops);
3798 } else
3799 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
3800 return result;
3801 }
3802
3803 // Branch in case of overflow.
3804 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3805 llvm::BasicBlock *continueBB =
3806 CGF.createBasicBlock(name: "nooverflow", parent: CGF.CurFn, before: initialBB->getNextNode());
3807 llvm::BasicBlock *overflowBB = CGF.createBasicBlock(name: "overflow", parent: CGF.CurFn);
3808
3809 Builder.CreateCondBr(Cond: overflow, True: overflowBB, False: continueBB);
3810
3811 // If an overflow handler is set, then we want to call it and then use its
3812 // result, if it returns.
3813 Builder.SetInsertPoint(overflowBB);
3814
3815 // Get the overflow handler.
3816 llvm::Type *Int8Ty = CGF.Int8Ty;
3817 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3818 llvm::FunctionType *handlerTy =
3819 llvm::FunctionType::get(Result: CGF.Int64Ty, Params: argTypes, isVarArg: true);
3820 llvm::FunctionCallee handler =
3821 CGF.CGM.CreateRuntimeFunction(Ty: handlerTy, Name: *handlerName);
3822
3823 // Sign extend the args to 64-bit, so that we can use the same handler for
3824 // all types of overflow.
3825 llvm::Value *lhs = Builder.CreateSExt(V: Ops.LHS, DestTy: CGF.Int64Ty);
3826 llvm::Value *rhs = Builder.CreateSExt(V: Ops.RHS, DestTy: CGF.Int64Ty);
3827
3828 // Call the handler with the two arguments, the operation, and the size of
3829 // the result.
3830 llvm::Value *handlerArgs[] = {
3831 lhs,
3832 rhs,
3833 Builder.getInt8(C: OpID),
3834 Builder.getInt8(C: cast<llvm::IntegerType>(Val: opTy)->getBitWidth())
3835 };
3836 llvm::Value *handlerResult =
3837 CGF.EmitNounwindRuntimeCall(callee: handler, args: handlerArgs);
3838
3839 // Truncate the result back to the desired size.
3840 handlerResult = Builder.CreateTrunc(V: handlerResult, DestTy: opTy);
3841 Builder.CreateBr(Dest: continueBB);
3842
3843 Builder.SetInsertPoint(continueBB);
3844 llvm::PHINode *phi = Builder.CreatePHI(Ty: opTy, NumReservedValues: 2);
3845 phi->addIncoming(V: result, BB: initialBB);
3846 phi->addIncoming(V: handlerResult, BB: overflowBB);
3847
3848 return phi;
3849}
3850
3851/// Emit pointer + index arithmetic.
3852static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3853 const BinOpInfo &op,
3854 bool isSubtraction) {
3855 // Must have binary (not unary) expr here. Unary pointer
3856 // increment/decrement doesn't use this path.
3857 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
3858
3859 Value *pointer = op.LHS;
3860 Expr *pointerOperand = expr->getLHS();
3861 Value *index = op.RHS;
3862 Expr *indexOperand = expr->getRHS();
3863
3864 // In a subtraction, the LHS is always the pointer.
3865 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3866 std::swap(a&: pointer, b&: index);
3867 std::swap(a&: pointerOperand, b&: indexOperand);
3868 }
3869
3870 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3871
3872 unsigned width = cast<llvm::IntegerType>(Val: index->getType())->getBitWidth();
3873 auto &DL = CGF.CGM.getDataLayout();
3874 auto PtrTy = cast<llvm::PointerType>(Val: pointer->getType());
3875
3876 // Some versions of glibc and gcc use idioms (particularly in their malloc
3877 // routines) that add a pointer-sized integer (known to be a pointer value)
3878 // to a null pointer in order to cast the value back to an integer or as
3879 // part of a pointer alignment algorithm. This is undefined behavior, but
3880 // we'd like to be able to compile programs that use it.
3881 //
3882 // Normally, we'd generate a GEP with a null-pointer base here in response
3883 // to that code, but it's also UB to dereference a pointer created that
3884 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3885 // generate a direct cast of the integer value to a pointer.
3886 //
3887 // The idiom (p = nullptr + N) is not met if any of the following are true:
3888 //
3889 // The operation is subtraction.
3890 // The index is not pointer-sized.
3891 // The pointer type is not byte-sized.
3892 //
3893 if (BinaryOperator::isNullPointerArithmeticExtension(Ctx&: CGF.getContext(),
3894 Opc: op.Opcode,
3895 LHS: expr->getLHS(),
3896 RHS: expr->getRHS()))
3897 return CGF.Builder.CreateIntToPtr(V: index, DestTy: pointer->getType());
3898
3899 if (width != DL.getIndexTypeSizeInBits(Ty: PtrTy)) {
3900 // Zero-extend or sign-extend the pointer value according to
3901 // whether the index is signed or not.
3902 index = CGF.Builder.CreateIntCast(V: index, DestTy: DL.getIndexType(PtrTy), isSigned,
3903 Name: "idx.ext");
3904 }
3905
3906 // If this is subtraction, negate the index.
3907 if (isSubtraction)
3908 index = CGF.Builder.CreateNeg(V: index, Name: "idx.neg");
3909
3910 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
3911 CGF.EmitBoundsCheck(E: op.E, Base: pointerOperand, Index: index, IndexType: indexOperand->getType(),
3912 /*Accessed*/ false);
3913
3914 const PointerType *pointerType
3915 = pointerOperand->getType()->getAs<PointerType>();
3916 if (!pointerType) {
3917 QualType objectType = pointerOperand->getType()
3918 ->castAs<ObjCObjectPointerType>()
3919 ->getPointeeType();
3920 llvm::Value *objectSize
3921 = CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(T: objectType));
3922
3923 index = CGF.Builder.CreateMul(LHS: index, RHS: objectSize);
3924
3925 Value *result =
3926 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: pointer, IdxList: index, Name: "add.ptr");
3927 return CGF.Builder.CreateBitCast(V: result, DestTy: pointer->getType());
3928 }
3929
3930 QualType elementType = pointerType->getPointeeType();
3931 if (const VariableArrayType *vla
3932 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
3933 // The element count here is the total number of non-VLA elements.
3934 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3935
3936 // Effectively, the multiply by the VLA size is part of the GEP.
3937 // GEP indexes are signed, and scaling an index isn't permitted to
3938 // signed-overflow, so we use the same semantics for our explicit
3939 // multiply. We suppress this if overflow is not undefined behavior.
3940 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
3941 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3942 index = CGF.Builder.CreateMul(LHS: index, RHS: numElements, Name: "vla.index");
3943 pointer = CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
3944 } else {
3945 index = CGF.Builder.CreateNSWMul(LHS: index, RHS: numElements, Name: "vla.index");
3946 pointer = CGF.EmitCheckedInBoundsGEP(
3947 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
3948 Name: "add.ptr");
3949 }
3950 return pointer;
3951 }
3952
3953 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3954 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3955 // future proof.
3956 llvm::Type *elemTy;
3957 if (elementType->isVoidType() || elementType->isFunctionType())
3958 elemTy = CGF.Int8Ty;
3959 else
3960 elemTy = CGF.ConvertTypeForMem(T: elementType);
3961
3962 if (CGF.getLangOpts().isSignedOverflowDefined())
3963 return CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
3964
3965 return CGF.EmitCheckedInBoundsGEP(
3966 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
3967 Name: "add.ptr");
3968}
3969
3970// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3971// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3972// the add operand respectively. This allows fmuladd to represent a*b-c, or
3973// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3974// efficient operations.
3975static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3976 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3977 bool negMul, bool negAdd) {
3978 Value *MulOp0 = MulOp->getOperand(i: 0);
3979 Value *MulOp1 = MulOp->getOperand(i: 1);
3980 if (negMul)
3981 MulOp0 = Builder.CreateFNeg(V: MulOp0, Name: "neg");
3982 if (negAdd)
3983 Addend = Builder.CreateFNeg(V: Addend, Name: "neg");
3984
3985 Value *FMulAdd = nullptr;
3986 if (Builder.getIsFPConstrained()) {
3987 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3988 "Only constrained operation should be created when Builder is in FP "
3989 "constrained mode");
3990 FMulAdd = Builder.CreateConstrainedFPCall(
3991 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3992 Addend->getType()),
3993 {MulOp0, MulOp1, Addend});
3994 } else {
3995 FMulAdd = Builder.CreateCall(
3996 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3997 {MulOp0, MulOp1, Addend});
3998 }
3999 MulOp->eraseFromParent();
4000
4001 return FMulAdd;
4002}
4003
4004// Check whether it would be legal to emit an fmuladd intrinsic call to
4005// represent op and if so, build the fmuladd.
4006//
4007// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4008// Does NOT check the type of the operation - it's assumed that this function
4009// will be called from contexts where it's known that the type is contractable.
4010static Value* tryEmitFMulAdd(const BinOpInfo &op,
4011 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4012 bool isSub=false) {
4013
4014 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4015 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4016 "Only fadd/fsub can be the root of an fmuladd.");
4017
4018 // Check whether this op is marked as fusable.
4019 if (!op.FPFeatures.allowFPContractWithinStatement())
4020 return nullptr;
4021
4022 Value *LHS = op.LHS;
4023 Value *RHS = op.RHS;
4024
4025 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4026 // it is the only use of its operand.
4027 bool NegLHS = false;
4028 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: LHS)) {
4029 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4030 LHSUnOp->use_empty() && LHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4031 LHS = LHSUnOp->getOperand(i_nocapture: 0);
4032 NegLHS = true;
4033 }
4034 }
4035
4036 bool NegRHS = false;
4037 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: RHS)) {
4038 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4039 RHSUnOp->use_empty() && RHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4040 RHS = RHSUnOp->getOperand(i_nocapture: 0);
4041 NegRHS = true;
4042 }
4043 }
4044
4045 // We have a potentially fusable op. Look for a mul on one of the operands.
4046 // Also, make sure that the mul result isn't used directly. In that case,
4047 // there's no point creating a muladd operation.
4048 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: LHS)) {
4049 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4050 (LHSBinOp->use_empty() || NegLHS)) {
4051 // If we looked through fneg, erase it.
4052 if (NegLHS)
4053 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4054 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4055 }
4056 }
4057 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: RHS)) {
4058 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4059 (RHSBinOp->use_empty() || NegRHS)) {
4060 // If we looked through fneg, erase it.
4061 if (NegRHS)
4062 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4063 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4064 }
4065 }
4066
4067 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(Val: LHS)) {
4068 if (LHSBinOp->getIntrinsicID() ==
4069 llvm::Intrinsic::experimental_constrained_fmul &&
4070 (LHSBinOp->use_empty() || NegLHS)) {
4071 // If we looked through fneg, erase it.
4072 if (NegLHS)
4073 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4074 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4075 }
4076 }
4077 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(Val: RHS)) {
4078 if (RHSBinOp->getIntrinsicID() ==
4079 llvm::Intrinsic::experimental_constrained_fmul &&
4080 (RHSBinOp->use_empty() || NegRHS)) {
4081 // If we looked through fneg, erase it.
4082 if (NegRHS)
4083 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4084 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4085 }
4086 }
4087
4088 return nullptr;
4089}
4090
4091Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4092 if (op.LHS->getType()->isPointerTy() ||
4093 op.RHS->getType()->isPointerTy())
4094 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::NotSubtraction);
4095
4096 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4097 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4098 case LangOptions::SOB_Defined:
4099 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4100 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4101 [[fallthrough]];
4102 case LangOptions::SOB_Undefined:
4103 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4104 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4105 [[fallthrough]];
4106 case LangOptions::SOB_Trapping:
4107 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4108 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4109 return EmitOverflowCheckedBinOp(Ops: op);
4110 }
4111 }
4112
4113 // For vector and matrix adds, try to fold into a fmuladd.
4114 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4115 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4116 // Try to form an fmuladd.
4117 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4118 return FMulAdd;
4119 }
4120
4121 if (op.Ty->isConstantMatrixType()) {
4122 llvm::MatrixBuilder MB(Builder);
4123 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4124 return MB.CreateAdd(LHS: op.LHS, RHS: op.RHS);
4125 }
4126
4127 if (op.Ty->isUnsignedIntegerType() &&
4128 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4129 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4130 return EmitOverflowCheckedBinOp(Ops: op);
4131
4132 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4133 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4134 return Builder.CreateFAdd(L: op.LHS, R: op.RHS, Name: "add");
4135 }
4136
4137 if (op.isFixedPointOp())
4138 return EmitFixedPointBinOp(Ops: op);
4139
4140 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4141}
4142
4143/// The resulting value must be calculated with exact precision, so the operands
4144/// may not be the same type.
4145Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4146 using llvm::APSInt;
4147 using llvm::ConstantInt;
4148
4149 // This is either a binary operation where at least one of the operands is
4150 // a fixed-point type, or a unary operation where the operand is a fixed-point
4151 // type. The result type of a binary operation is determined by
4152 // Sema::handleFixedPointConversions().
4153 QualType ResultTy = op.Ty;
4154 QualType LHSTy, RHSTy;
4155 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: op.E)) {
4156 RHSTy = BinOp->getRHS()->getType();
4157 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(Val: BinOp)) {
4158 // For compound assignment, the effective type of the LHS at this point
4159 // is the computation LHS type, not the actual LHS type, and the final
4160 // result type is not the type of the expression but rather the
4161 // computation result type.
4162 LHSTy = CAO->getComputationLHSType();
4163 ResultTy = CAO->getComputationResultType();
4164 } else
4165 LHSTy = BinOp->getLHS()->getType();
4166 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: op.E)) {
4167 LHSTy = UnOp->getSubExpr()->getType();
4168 RHSTy = UnOp->getSubExpr()->getType();
4169 }
4170 ASTContext &Ctx = CGF.getContext();
4171 Value *LHS = op.LHS;
4172 Value *RHS = op.RHS;
4173
4174 auto LHSFixedSema = Ctx.getFixedPointSemantics(Ty: LHSTy);
4175 auto RHSFixedSema = Ctx.getFixedPointSemantics(Ty: RHSTy);
4176 auto ResultFixedSema = Ctx.getFixedPointSemantics(Ty: ResultTy);
4177 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(Other: RHSFixedSema);
4178
4179 // Perform the actual operation.
4180 Value *Result;
4181 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4182 switch (op.Opcode) {
4183 case BO_AddAssign:
4184 case BO_Add:
4185 Result = FPBuilder.CreateAdd(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4186 break;
4187 case BO_SubAssign:
4188 case BO_Sub:
4189 Result = FPBuilder.CreateSub(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4190 break;
4191 case BO_MulAssign:
4192 case BO_Mul:
4193 Result = FPBuilder.CreateMul(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4194 break;
4195 case BO_DivAssign:
4196 case BO_Div:
4197 Result = FPBuilder.CreateDiv(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4198 break;
4199 case BO_ShlAssign:
4200 case BO_Shl:
4201 Result = FPBuilder.CreateShl(LHS, LHSSema: LHSFixedSema, RHS);
4202 break;
4203 case BO_ShrAssign:
4204 case BO_Shr:
4205 Result = FPBuilder.CreateShr(LHS, LHSSema: LHSFixedSema, RHS);
4206 break;
4207 case BO_LT:
4208 return FPBuilder.CreateLT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4209 case BO_GT:
4210 return FPBuilder.CreateGT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4211 case BO_LE:
4212 return FPBuilder.CreateLE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4213 case BO_GE:
4214 return FPBuilder.CreateGE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4215 case BO_EQ:
4216 // For equality operations, we assume any padding bits on unsigned types are
4217 // zero'd out. They could be overwritten through non-saturating operations
4218 // that cause overflow, but this leads to undefined behavior.
4219 return FPBuilder.CreateEQ(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4220 case BO_NE:
4221 return FPBuilder.CreateNE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4222 case BO_Cmp:
4223 case BO_LAnd:
4224 case BO_LOr:
4225 llvm_unreachable("Found unimplemented fixed point binary operation");
4226 case BO_PtrMemD:
4227 case BO_PtrMemI:
4228 case BO_Rem:
4229 case BO_Xor:
4230 case BO_And:
4231 case BO_Or:
4232 case BO_Assign:
4233 case BO_RemAssign:
4234 case BO_AndAssign:
4235 case BO_XorAssign:
4236 case BO_OrAssign:
4237 case BO_Comma:
4238 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4239 }
4240
4241 bool IsShift = BinaryOperator::isShiftOp(Opc: op.Opcode) ||
4242 BinaryOperator::isShiftAssignOp(Opc: op.Opcode);
4243 // Convert to the result type.
4244 return FPBuilder.CreateFixedToFixed(Src: Result, SrcSema: IsShift ? LHSFixedSema
4245 : CommonFixedSema,
4246 DstSema: ResultFixedSema);
4247}
4248
4249Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4250 // The LHS is always a pointer if either side is.
4251 if (!op.LHS->getType()->isPointerTy()) {
4252 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4253 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4254 case LangOptions::SOB_Defined:
4255 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4256 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4257 [[fallthrough]];
4258 case LangOptions::SOB_Undefined:
4259 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4260 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4261 [[fallthrough]];
4262 case LangOptions::SOB_Trapping:
4263 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4264 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4265 return EmitOverflowCheckedBinOp(Ops: op);
4266 }
4267 }
4268
4269 // For vector and matrix subs, try to fold into a fmuladd.
4270 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4271 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4272 // Try to form an fmuladd.
4273 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, isSub: true))
4274 return FMulAdd;
4275 }
4276
4277 if (op.Ty->isConstantMatrixType()) {
4278 llvm::MatrixBuilder MB(Builder);
4279 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4280 return MB.CreateSub(LHS: op.LHS, RHS: op.RHS);
4281 }
4282
4283 if (op.Ty->isUnsignedIntegerType() &&
4284 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4285 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4286 return EmitOverflowCheckedBinOp(Ops: op);
4287
4288 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4289 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4290 return Builder.CreateFSub(L: op.LHS, R: op.RHS, Name: "sub");
4291 }
4292
4293 if (op.isFixedPointOp())
4294 return EmitFixedPointBinOp(op);
4295
4296 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4297 }
4298
4299 // If the RHS is not a pointer, then we have normal pointer
4300 // arithmetic.
4301 if (!op.RHS->getType()->isPointerTy())
4302 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::IsSubtraction);
4303
4304 // Otherwise, this is a pointer subtraction.
4305
4306 // Do the raw subtraction part.
4307 llvm::Value *LHS
4308 = Builder.CreatePtrToInt(V: op.LHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.lhs.cast");
4309 llvm::Value *RHS
4310 = Builder.CreatePtrToInt(V: op.RHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.rhs.cast");
4311 Value *diffInChars = Builder.CreateSub(LHS, RHS, Name: "sub.ptr.sub");
4312
4313 // Okay, figure out the element size.
4314 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4315 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4316
4317 llvm::Value *divisor = nullptr;
4318
4319 // For a variable-length array, this is going to be non-constant.
4320 if (const VariableArrayType *vla
4321 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4322 auto VlaSize = CGF.getVLASize(vla);
4323 elementType = VlaSize.Type;
4324 divisor = VlaSize.NumElts;
4325
4326 // Scale the number of non-VLA elements by the non-VLA element size.
4327 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4328 if (!eltSize.isOne())
4329 divisor = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: divisor);
4330
4331 // For everything elese, we can just compute it, safe in the
4332 // assumption that Sema won't let anything through that we can't
4333 // safely compute the size of.
4334 } else {
4335 CharUnits elementSize;
4336 // Handle GCC extension for pointer arithmetic on void* and
4337 // function pointer types.
4338 if (elementType->isVoidType() || elementType->isFunctionType())
4339 elementSize = CharUnits::One();
4340 else
4341 elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4342
4343 // Don't even emit the divide for element size of 1.
4344 if (elementSize.isOne())
4345 return diffInChars;
4346
4347 divisor = CGF.CGM.getSize(numChars: elementSize);
4348 }
4349
4350 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4351 // pointer difference in C is only defined in the case where both operands
4352 // are pointing to elements of an array.
4353 return Builder.CreateExactSDiv(LHS: diffInChars, RHS: divisor, Name: "sub.ptr.div");
4354}
4355
4356Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4357 bool RHSIsSigned) {
4358 llvm::IntegerType *Ty;
4359 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4360 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4361 else
4362 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4363 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4364 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4365 // this in ConstantInt::get, this results in the value getting truncated.
4366 // Constrain the return value to be max(RHS) in this case.
4367 llvm::Type *RHSTy = RHS->getType();
4368 llvm::APInt RHSMax =
4369 RHSIsSigned ? llvm::APInt::getSignedMaxValue(numBits: RHSTy->getScalarSizeInBits())
4370 : llvm::APInt::getMaxValue(numBits: RHSTy->getScalarSizeInBits());
4371 if (RHSMax.ult(RHS: Ty->getBitWidth()))
4372 return llvm::ConstantInt::get(Ty: RHSTy, V: RHSMax);
4373 return llvm::ConstantInt::get(Ty: RHSTy, V: Ty->getBitWidth() - 1);
4374}
4375
4376Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4377 const Twine &Name) {
4378 llvm::IntegerType *Ty;
4379 if (auto *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4380 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4381 else
4382 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4383
4384 if (llvm::isPowerOf2_64(Value: Ty->getBitWidth()))
4385 return Builder.CreateAnd(LHS: RHS, RHS: GetMaximumShiftAmount(LHS, RHS, RHSIsSigned: false), Name);
4386
4387 return Builder.CreateURem(
4388 LHS: RHS, RHS: llvm::ConstantInt::get(Ty: RHS->getType(), V: Ty->getBitWidth()), Name);
4389}
4390
4391Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4392 // TODO: This misses out on the sanitizer check below.
4393 if (Ops.isFixedPointOp())
4394 return EmitFixedPointBinOp(op: Ops);
4395
4396 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4397 // RHS to the same size as the LHS.
4398 Value *RHS = Ops.RHS;
4399 if (Ops.LHS->getType() != RHS->getType())
4400 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4401
4402 bool SanitizeSignedBase = CGF.SanOpts.has(K: SanitizerKind::ShiftBase) &&
4403 Ops.Ty->hasSignedIntegerRepresentation() &&
4404 !CGF.getLangOpts().isSignedOverflowDefined() &&
4405 !CGF.getLangOpts().CPlusPlus20;
4406 bool SanitizeUnsignedBase =
4407 CGF.SanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
4408 Ops.Ty->hasUnsignedIntegerRepresentation();
4409 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4410 bool SanitizeExponent = CGF.SanOpts.has(K: SanitizerKind::ShiftExponent);
4411 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4412 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4413 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shl.mask");
4414 else if ((SanitizeBase || SanitizeExponent) &&
4415 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4416 CodeGenFunction::SanitizerScope SanScope(&CGF);
4417 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
4418 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4419 llvm::Value *WidthMinusOne =
4420 GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned);
4421 llvm::Value *ValidExponent = Builder.CreateICmpULE(LHS: Ops.RHS, RHS: WidthMinusOne);
4422
4423 if (SanitizeExponent) {
4424 Checks.push_back(
4425 Elt: std::make_pair(x&: ValidExponent, y: SanitizerKind::ShiftExponent));
4426 }
4427
4428 if (SanitizeBase) {
4429 // Check whether we are shifting any non-zero bits off the top of the
4430 // integer. We only emit this check if exponent is valid - otherwise
4431 // instructions below will have undefined behavior themselves.
4432 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4433 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
4434 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock(name: "check");
4435 Builder.CreateCondBr(Cond: ValidExponent, True: CheckShiftBase, False: Cont);
4436 llvm::Value *PromotedWidthMinusOne =
4437 (RHS == Ops.RHS) ? WidthMinusOne
4438 : GetMaximumShiftAmount(LHS: Ops.LHS, RHS, RHSIsSigned);
4439 CGF.EmitBlock(BB: CheckShiftBase);
4440 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4441 LHS: Ops.LHS, RHS: Builder.CreateSub(LHS: PromotedWidthMinusOne, RHS, Name: "shl.zeros",
4442 /*NUW*/ HasNUW: true, /*NSW*/ HasNSW: true),
4443 Name: "shl.check");
4444 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4445 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4446 // Under C++11's rules, shifting a 1 bit into the sign bit is
4447 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4448 // define signed left shifts, so we use the C99 and C++11 rules there).
4449 // Unsigned shifts can always shift into the top bit.
4450 llvm::Value *One = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 1);
4451 BitsShiftedOff = Builder.CreateLShr(LHS: BitsShiftedOff, RHS: One);
4452 }
4453 llvm::Value *Zero = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 0);
4454 llvm::Value *ValidBase = Builder.CreateICmpEQ(LHS: BitsShiftedOff, RHS: Zero);
4455 CGF.EmitBlock(BB: Cont);
4456 llvm::PHINode *BaseCheck = Builder.CreatePHI(Ty: ValidBase->getType(), NumReservedValues: 2);
4457 BaseCheck->addIncoming(V: Builder.getTrue(), BB: Orig);
4458 BaseCheck->addIncoming(V: ValidBase, BB: CheckShiftBase);
4459 Checks.push_back(Elt: std::make_pair(
4460 x&: BaseCheck, y: SanitizeSignedBase ? SanitizerKind::ShiftBase
4461 : SanitizerKind::UnsignedShiftBase));
4462 }
4463
4464 assert(!Checks.empty());
4465 EmitBinOpCheck(Checks, Info: Ops);
4466 }
4467
4468 return Builder.CreateShl(LHS: Ops.LHS, RHS, Name: "shl");
4469}
4470
4471Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4472 // TODO: This misses out on the sanitizer check below.
4473 if (Ops.isFixedPointOp())
4474 return EmitFixedPointBinOp(op: Ops);
4475
4476 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4477 // RHS to the same size as the LHS.
4478 Value *RHS = Ops.RHS;
4479 if (Ops.LHS->getType() != RHS->getType())
4480 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4481
4482 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4483 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4484 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shr.mask");
4485 else if (CGF.SanOpts.has(K: SanitizerKind::ShiftExponent) &&
4486 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4487 CodeGenFunction::SanitizerScope SanScope(&CGF);
4488 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4489 llvm::Value *Valid = Builder.CreateICmpULE(
4490 LHS: Ops.RHS, RHS: GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned));
4491 EmitBinOpCheck(Checks: std::make_pair(x&: Valid, y: SanitizerKind::ShiftExponent), Info: Ops);
4492 }
4493
4494 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4495 return Builder.CreateLShr(LHS: Ops.LHS, RHS, Name: "shr");
4496 return Builder.CreateAShr(LHS: Ops.LHS, RHS, Name: "shr");
4497}
4498
4499enum IntrinsicType { VCMPEQ, VCMPGT };
4500// return corresponding comparison intrinsic for given vector type
4501static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4502 BuiltinType::Kind ElemKind) {
4503 switch (ElemKind) {
4504 default: llvm_unreachable("unexpected element type");
4505 case BuiltinType::Char_U:
4506 case BuiltinType::UChar:
4507 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4508 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4509 case BuiltinType::Char_S:
4510 case BuiltinType::SChar:
4511 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4512 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4513 case BuiltinType::UShort:
4514 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4515 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4516 case BuiltinType::Short:
4517 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4518 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4519 case BuiltinType::UInt:
4520 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4521 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4522 case BuiltinType::Int:
4523 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4524 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4525 case BuiltinType::ULong:
4526 case BuiltinType::ULongLong:
4527 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4528 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4529 case BuiltinType::Long:
4530 case BuiltinType::LongLong:
4531 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4532 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4533 case BuiltinType::Float:
4534 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4535 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4536 case BuiltinType::Double:
4537 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4538 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4539 case BuiltinType::UInt128:
4540 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4541 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4542 case BuiltinType::Int128:
4543 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4544 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4545 }
4546}
4547
4548Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4549 llvm::CmpInst::Predicate UICmpOpc,
4550 llvm::CmpInst::Predicate SICmpOpc,
4551 llvm::CmpInst::Predicate FCmpOpc,
4552 bool IsSignaling) {
4553 TestAndClearIgnoreResultAssign();
4554 Value *Result;
4555 QualType LHSTy = E->getLHS()->getType();
4556 QualType RHSTy = E->getRHS()->getType();
4557 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4558 assert(E->getOpcode() == BO_EQ ||
4559 E->getOpcode() == BO_NE);
4560 Value *LHS = CGF.EmitScalarExpr(E: E->getLHS());
4561 Value *RHS = CGF.EmitScalarExpr(E: E->getRHS());
4562 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4563 CGF, L: LHS, R: RHS, MPT, Inequality: E->getOpcode() == BO_NE);
4564 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4565 BinOpInfo BOInfo = EmitBinOps(E);
4566 Value *LHS = BOInfo.LHS;
4567 Value *RHS = BOInfo.RHS;
4568
4569 // If AltiVec, the comparison results in a numeric type, so we use
4570 // intrinsics comparing vectors and giving 0 or 1 as a result
4571 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4572 // constants for mapping CR6 register bits to predicate result
4573 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4574
4575 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4576
4577 // in several cases vector arguments order will be reversed
4578 Value *FirstVecArg = LHS,
4579 *SecondVecArg = RHS;
4580
4581 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4582 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4583
4584 switch(E->getOpcode()) {
4585 default: llvm_unreachable("is not a comparison operation");
4586 case BO_EQ:
4587 CR6 = CR6_LT;
4588 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4589 break;
4590 case BO_NE:
4591 CR6 = CR6_EQ;
4592 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4593 break;
4594 case BO_LT:
4595 CR6 = CR6_LT;
4596 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4597 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4598 break;
4599 case BO_GT:
4600 CR6 = CR6_LT;
4601 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4602 break;
4603 case BO_LE:
4604 if (ElementKind == BuiltinType::Float) {
4605 CR6 = CR6_LT;
4606 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4607 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4608 }
4609 else {
4610 CR6 = CR6_EQ;
4611 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4612 }
4613 break;
4614 case BO_GE:
4615 if (ElementKind == BuiltinType::Float) {
4616 CR6 = CR6_LT;
4617 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4618 }
4619 else {
4620 CR6 = CR6_EQ;
4621 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4622 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4623 }
4624 break;
4625 }
4626
4627 Value *CR6Param = Builder.getInt32(C: CR6);
4628 llvm::Function *F = CGF.CGM.getIntrinsic(IID: ID);
4629 Result = Builder.CreateCall(Callee: F, Args: {CR6Param, FirstVecArg, SecondVecArg});
4630
4631 // The result type of intrinsic may not be same as E->getType().
4632 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4633 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4634 // do nothing, if ResultTy is not i1 at the same time, it will cause
4635 // crash later.
4636 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Val: Result->getType());
4637 if (ResultTy->getBitWidth() > 1 &&
4638 E->getType() == CGF.getContext().BoolTy)
4639 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt1Ty());
4640 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
4641 Loc: E->getExprLoc());
4642 }
4643
4644 if (BOInfo.isFixedPointOp()) {
4645 Result = EmitFixedPointBinOp(op: BOInfo);
4646 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4647 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4648 if (!IsSignaling)
4649 Result = Builder.CreateFCmp(P: FCmpOpc, LHS, RHS, Name: "cmp");
4650 else
4651 Result = Builder.CreateFCmpS(P: FCmpOpc, LHS, RHS, Name: "cmp");
4652 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4653 Result = Builder.CreateICmp(P: SICmpOpc, LHS, RHS, Name: "cmp");
4654 } else {
4655 // Unsigned integers and pointers.
4656
4657 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4658 !isa<llvm::ConstantPointerNull>(Val: LHS) &&
4659 !isa<llvm::ConstantPointerNull>(Val: RHS)) {
4660
4661 // Dynamic information is required to be stripped for comparisons,
4662 // because it could leak the dynamic information. Based on comparisons
4663 // of pointers to dynamic objects, the optimizer can replace one pointer
4664 // with another, which might be incorrect in presence of invariant
4665 // groups. Comparison with null is safe because null does not carry any
4666 // dynamic information.
4667 if (LHSTy.mayBeDynamicClass())
4668 LHS = Builder.CreateStripInvariantGroup(Ptr: LHS);
4669 if (RHSTy.mayBeDynamicClass())
4670 RHS = Builder.CreateStripInvariantGroup(Ptr: RHS);
4671 }
4672
4673 Result = Builder.CreateICmp(P: UICmpOpc, LHS, RHS, Name: "cmp");
4674 }
4675
4676 // If this is a vector comparison, sign extend the result to the appropriate
4677 // vector integer type and return it (don't convert to bool).
4678 if (LHSTy->isVectorType())
4679 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
4680
4681 } else {
4682 // Complex Comparison: can only be an equality comparison.
4683 CodeGenFunction::ComplexPairTy LHS, RHS;
4684 QualType CETy;
4685 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4686 LHS = CGF.EmitComplexExpr(E: E->getLHS());
4687 CETy = CTy->getElementType();
4688 } else {
4689 LHS.first = Visit(E: E->getLHS());
4690 LHS.second = llvm::Constant::getNullValue(Ty: LHS.first->getType());
4691 CETy = LHSTy;
4692 }
4693 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4694 RHS = CGF.EmitComplexExpr(E: E->getRHS());
4695 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4696 CTy->getElementType()) &&
4697 "The element types must always match.");
4698 (void)CTy;
4699 } else {
4700 RHS.first = Visit(E: E->getRHS());
4701 RHS.second = llvm::Constant::getNullValue(Ty: RHS.first->getType());
4702 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4703 "The element types must always match.");
4704 }
4705
4706 Value *ResultR, *ResultI;
4707 if (CETy->isRealFloatingType()) {
4708 // As complex comparisons can only be equality comparisons, they
4709 // are never signaling comparisons.
4710 ResultR = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
4711 ResultI = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
4712 } else {
4713 // Complex comparisons can only be equality comparisons. As such, signed
4714 // and unsigned opcodes are the same.
4715 ResultR = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
4716 ResultI = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
4717 }
4718
4719 if (E->getOpcode() == BO_EQ) {
4720 Result = Builder.CreateAnd(LHS: ResultR, RHS: ResultI, Name: "and.ri");
4721 } else {
4722 assert(E->getOpcode() == BO_NE &&
4723 "Complex comparison other than == or != ?");
4724 Result = Builder.CreateOr(LHS: ResultR, RHS: ResultI, Name: "or.ri");
4725 }
4726 }
4727
4728 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
4729 Loc: E->getExprLoc());
4730}
4731
4732llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
4733 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
4734 // In case we have the integer or bitfield sanitizer checks enabled
4735 // we want to get the expression before scalar conversion.
4736 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E->getRHS())) {
4737 CastKind Kind = ICE->getCastKind();
4738 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
4739 *SrcType = ICE->getSubExpr()->getType();
4740 *Previous = EmitScalarExpr(E: ICE->getSubExpr());
4741 // Pass default ScalarConversionOpts to avoid emitting
4742 // integer sanitizer checks as E refers to bitfield.
4743 return EmitScalarConversion(Src: *Previous, SrcTy: *SrcType, DstTy: ICE->getType(),
4744 Loc: ICE->getExprLoc());
4745 }
4746 }
4747 return EmitScalarExpr(E: E->getRHS());
4748}
4749
4750Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4751 bool Ignore = TestAndClearIgnoreResultAssign();
4752
4753 Value *RHS;
4754 LValue LHS;
4755
4756 switch (E->getLHS()->getType().getObjCLifetime()) {
4757 case Qualifiers::OCL_Strong:
4758 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4759 break;
4760
4761 case Qualifiers::OCL_Autoreleasing:
4762 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreAutoreleasing(e: E);
4763 break;
4764
4765 case Qualifiers::OCL_ExplicitNone:
4766 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreUnsafeUnretained(e: E, ignored: Ignore);
4767 break;
4768
4769 case Qualifiers::OCL_Weak:
4770 RHS = Visit(E: E->getRHS());
4771 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
4772 RHS = CGF.EmitARCStoreWeak(addr: LHS.getAddress(CGF), value: RHS, ignored: Ignore);
4773 break;
4774
4775 case Qualifiers::OCL_None:
4776 // __block variables need to have the rhs evaluated first, plus
4777 // this should improve codegen just a little.
4778 Value *Previous = nullptr;
4779 QualType SrcType = E->getRHS()->getType();
4780 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
4781 // we want to extract that value and potentially (if the bitfield sanitizer
4782 // is enabled) use it to check for an implicit conversion.
4783 if (E->getLHS()->refersToBitField())
4784 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType);
4785 else
4786 RHS = Visit(E: E->getRHS());
4787
4788 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
4789
4790 // Store the value into the LHS. Bit-fields are handled specially
4791 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4792 // 'An assignment expression has the value of the left operand after
4793 // the assignment...'.
4794 if (LHS.isBitField()) {
4795 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: RHS), Dst: LHS, Result: &RHS);
4796 // If the expression contained an implicit conversion, make sure
4797 // to use the value before the scalar conversion.
4798 Value *Src = Previous ? Previous : RHS;
4799 QualType DstType = E->getLHS()->getType();
4800 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: RHS, DstType,
4801 Info: LHS.getBitFieldInfo(), Loc: E->getExprLoc());
4802 } else {
4803 CGF.EmitNullabilityCheck(LHS, RHS, Loc: E->getExprLoc());
4804 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RHS), Dst: LHS);
4805 }
4806 }
4807
4808 // If the result is clearly ignored, return now.
4809 if (Ignore)
4810 return nullptr;
4811
4812 // The result of an assignment in C is the assigned r-value.
4813 if (!CGF.getLangOpts().CPlusPlus)
4814 return RHS;
4815
4816 // If the lvalue is non-volatile, return the computed value of the assignment.
4817 if (!LHS.isVolatileQualified())
4818 return RHS;
4819
4820 // Otherwise, reload the value.
4821 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
4822}
4823
4824Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4825 // Perform vector logical and on comparisons with zero vectors.
4826 if (E->getType()->isVectorType()) {
4827 CGF.incrementProfileCounter(E);
4828
4829 Value *LHS = Visit(E: E->getLHS());
4830 Value *RHS = Visit(E: E->getRHS());
4831 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
4832 if (LHS->getType()->isFPOrFPVectorTy()) {
4833 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4834 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
4835 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
4836 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
4837 } else {
4838 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
4839 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
4840 }
4841 Value *And = Builder.CreateAnd(LHS, RHS);
4842 return Builder.CreateSExt(V: And, DestTy: ConvertType(T: E->getType()), Name: "sext");
4843 }
4844
4845 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4846 llvm::Type *ResTy = ConvertType(T: E->getType());
4847
4848 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4849 // If we have 1 && X, just emit X without inserting the control flow.
4850 bool LHSCondVal;
4851 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
4852 if (LHSCondVal) { // If we have 1 && X, just emit X.
4853 CGF.incrementProfileCounter(E);
4854
4855 // If the top of the logical operator nest, reset the MCDC temp to 0.
4856 if (CGF.MCDCLogOpStack.empty())
4857 CGF.maybeResetMCDCCondBitmap(E);
4858
4859 CGF.MCDCLogOpStack.push_back(Elt: E);
4860
4861 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4862
4863 // If we're generating for profiling or coverage, generate a branch to a
4864 // block that increments the RHS counter needed to track branch condition
4865 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4866 // "FalseBlock" after the increment is done.
4867 if (InstrumentRegions &&
4868 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4869 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4870 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "land.end");
4871 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
4872 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: FBlock);
4873 CGF.EmitBlock(BB: RHSBlockCnt);
4874 CGF.incrementProfileCounter(E->getRHS());
4875 CGF.EmitBranch(Block: FBlock);
4876 CGF.EmitBlock(BB: FBlock);
4877 }
4878
4879 CGF.MCDCLogOpStack.pop_back();
4880 // If the top of the logical operator nest, update the MCDC bitmap.
4881 if (CGF.MCDCLogOpStack.empty())
4882 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4883
4884 // ZExt result to int or bool.
4885 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "land.ext");
4886 }
4887
4888 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4889 if (!CGF.ContainsLabel(E->getRHS()))
4890 return llvm::Constant::getNullValue(Ty: ResTy);
4891 }
4892
4893 // If the top of the logical operator nest, reset the MCDC temp to 0.
4894 if (CGF.MCDCLogOpStack.empty())
4895 CGF.maybeResetMCDCCondBitmap(E);
4896
4897 CGF.MCDCLogOpStack.push_back(Elt: E);
4898
4899 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "land.end");
4900 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "land.rhs");
4901
4902 CodeGenFunction::ConditionalEvaluation eval(CGF);
4903
4904 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4905 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: RHSBlock, FalseBlock: ContBlock,
4906 TrueCount: CGF.getProfileCount(E->getRHS()));
4907
4908 // Any edges into the ContBlock are now from an (indeterminate number of)
4909 // edges from this first condition. All of these values will be false. Start
4910 // setting up the PHI node in the Cont Block for this.
4911 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
4912 NameStr: "", InsertAtEnd: ContBlock);
4913 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
4914 PI != PE; ++PI)
4915 PN->addIncoming(V: llvm::ConstantInt::getFalse(Context&: VMContext), BB: *PI);
4916
4917 eval.begin(CGF);
4918 CGF.EmitBlock(BB: RHSBlock);
4919 CGF.incrementProfileCounter(E);
4920 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4921 eval.end(CGF);
4922
4923 // Reaquire the RHS block, as there may be subblocks inserted.
4924 RHSBlock = Builder.GetInsertBlock();
4925
4926 // If we're generating for profiling or coverage, generate a branch on the
4927 // RHS to a block that increments the RHS true counter needed to track branch
4928 // condition coverage.
4929 if (InstrumentRegions &&
4930 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4931 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4932 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
4933 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: ContBlock);
4934 CGF.EmitBlock(BB: RHSBlockCnt);
4935 CGF.incrementProfileCounter(E->getRHS());
4936 CGF.EmitBranch(Block: ContBlock);
4937 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
4938 }
4939
4940 // Emit an unconditional branch from this block to ContBlock.
4941 {
4942 // There is no need to emit line number for unconditional branch.
4943 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4944 CGF.EmitBlock(BB: ContBlock);
4945 }
4946 // Insert an entry into the phi node for the edge with the value of RHSCond.
4947 PN->addIncoming(V: RHSCond, BB: RHSBlock);
4948
4949 CGF.MCDCLogOpStack.pop_back();
4950 // If the top of the logical operator nest, update the MCDC bitmap.
4951 if (CGF.MCDCLogOpStack.empty())
4952 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4953
4954 // Artificial location to preserve the scope information
4955 {
4956 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4957 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4958 }
4959
4960 // ZExt result to int.
4961 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "land.ext");
4962}
4963
4964Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4965 // Perform vector logical or on comparisons with zero vectors.
4966 if (E->getType()->isVectorType()) {
4967 CGF.incrementProfileCounter(E);
4968
4969 Value *LHS = Visit(E: E->getLHS());
4970 Value *RHS = Visit(E: E->getRHS());
4971 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
4972 if (LHS->getType()->isFPOrFPVectorTy()) {
4973 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4974 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
4975 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
4976 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
4977 } else {
4978 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
4979 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
4980 }
4981 Value *Or = Builder.CreateOr(LHS, RHS);
4982 return Builder.CreateSExt(V: Or, DestTy: ConvertType(T: E->getType()), Name: "sext");
4983 }
4984
4985 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4986 llvm::Type *ResTy = ConvertType(T: E->getType());
4987
4988 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4989 // If we have 0 || X, just emit X without inserting the control flow.
4990 bool LHSCondVal;
4991 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
4992 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4993 CGF.incrementProfileCounter(E);
4994
4995 // If the top of the logical operator nest, reset the MCDC temp to 0.
4996 if (CGF.MCDCLogOpStack.empty())
4997 CGF.maybeResetMCDCCondBitmap(E);
4998
4999 CGF.MCDCLogOpStack.push_back(Elt: E);
5000
5001 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5002
5003 // If we're generating for profiling or coverage, generate a branch to a
5004 // block that increments the RHS counter need to track branch condition
5005 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5006 // "FalseBlock" after the increment is done.
5007 if (InstrumentRegions &&
5008 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5009 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5010 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "lor.end");
5011 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5012 Builder.CreateCondBr(Cond: RHSCond, True: FBlock, False: RHSBlockCnt);
5013 CGF.EmitBlock(BB: RHSBlockCnt);
5014 CGF.incrementProfileCounter(E->getRHS());
5015 CGF.EmitBranch(Block: FBlock);
5016 CGF.EmitBlock(BB: FBlock);
5017 }
5018
5019 CGF.MCDCLogOpStack.pop_back();
5020 // If the top of the logical operator nest, update the MCDC bitmap.
5021 if (CGF.MCDCLogOpStack.empty())
5022 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5023
5024 // ZExt result to int or bool.
5025 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "lor.ext");
5026 }
5027
5028 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5029 if (!CGF.ContainsLabel(E->getRHS()))
5030 return llvm::ConstantInt::get(Ty: ResTy, V: 1);
5031 }
5032
5033 // If the top of the logical operator nest, reset the MCDC temp to 0.
5034 if (CGF.MCDCLogOpStack.empty())
5035 CGF.maybeResetMCDCCondBitmap(E);
5036
5037 CGF.MCDCLogOpStack.push_back(Elt: E);
5038
5039 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "lor.end");
5040 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "lor.rhs");
5041
5042 CodeGenFunction::ConditionalEvaluation eval(CGF);
5043
5044 // Branch on the LHS first. If it is true, go to the success (cont) block.
5045 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: ContBlock, FalseBlock: RHSBlock,
5046 TrueCount: CGF.getCurrentProfileCount() -
5047 CGF.getProfileCount(E->getRHS()));
5048
5049 // Any edges into the ContBlock are now from an (indeterminate number of)
5050 // edges from this first condition. All of these values will be true. Start
5051 // setting up the PHI node in the Cont Block for this.
5052 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5053 NameStr: "", InsertAtEnd: ContBlock);
5054 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5055 PI != PE; ++PI)
5056 PN->addIncoming(V: llvm::ConstantInt::getTrue(Context&: VMContext), BB: *PI);
5057
5058 eval.begin(CGF);
5059
5060 // Emit the RHS condition as a bool value.
5061 CGF.EmitBlock(BB: RHSBlock);
5062 CGF.incrementProfileCounter(E);
5063 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5064
5065 eval.end(CGF);
5066
5067 // Reaquire the RHS block, as there may be subblocks inserted.
5068 RHSBlock = Builder.GetInsertBlock();
5069
5070 // If we're generating for profiling or coverage, generate a branch on the
5071 // RHS to a block that increments the RHS true counter needed to track branch
5072 // condition coverage.
5073 if (InstrumentRegions &&
5074 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5075 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5076 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5077 Builder.CreateCondBr(Cond: RHSCond, True: ContBlock, False: RHSBlockCnt);
5078 CGF.EmitBlock(BB: RHSBlockCnt);
5079 CGF.incrementProfileCounter(E->getRHS());
5080 CGF.EmitBranch(Block: ContBlock);
5081 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5082 }
5083
5084 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5085 // into the phi node for the edge with the value of RHSCond.
5086 CGF.EmitBlock(BB: ContBlock);
5087 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5088
5089 CGF.MCDCLogOpStack.pop_back();
5090 // If the top of the logical operator nest, update the MCDC bitmap.
5091 if (CGF.MCDCLogOpStack.empty())
5092 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5093
5094 // ZExt result to int.
5095 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "lor.ext");
5096}
5097
5098Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5099 CGF.EmitIgnoredExpr(E: E->getLHS());
5100 CGF.EnsureInsertPoint();
5101 return Visit(E: E->getRHS());
5102}
5103
5104//===----------------------------------------------------------------------===//
5105// Other Operators
5106//===----------------------------------------------------------------------===//
5107
5108/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5109/// expression is cheap enough and side-effect-free enough to evaluate
5110/// unconditionally instead of conditionally. This is used to convert control
5111/// flow into selects in some cases.
5112static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
5113 CodeGenFunction &CGF) {
5114 // Anything that is an integer or floating point constant is fine.
5115 return E->IgnoreParens()->isEvaluatable(Ctx: CGF.getContext());
5116
5117 // Even non-volatile automatic variables can't be evaluated unconditionally.
5118 // Referencing a thread_local may cause non-trivial initialization work to
5119 // occur. If we're inside a lambda and one of the variables is from the scope
5120 // outside the lambda, that function may have returned already. Reading its
5121 // locals is a bad idea. Also, these reads may introduce races there didn't
5122 // exist in the source-level program.
5123}
5124
5125
5126Value *ScalarExprEmitter::
5127VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5128 TestAndClearIgnoreResultAssign();
5129
5130 // Bind the common expression if necessary.
5131 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5132
5133 Expr *condExpr = E->getCond();
5134 Expr *lhsExpr = E->getTrueExpr();
5135 Expr *rhsExpr = E->getFalseExpr();
5136
5137 // If the condition constant folds and can be elided, try to avoid emitting
5138 // the condition and the dead arm.
5139 bool CondExprBool;
5140 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
5141 Expr *live = lhsExpr, *dead = rhsExpr;
5142 if (!CondExprBool) std::swap(a&: live, b&: dead);
5143
5144 // If the dead side doesn't have labels we need, just emit the Live part.
5145 if (!CGF.ContainsLabel(dead)) {
5146 if (CondExprBool) {
5147 if (llvm::EnableSingleByteCoverage) {
5148 CGF.incrementProfileCounter(lhsExpr);
5149 CGF.incrementProfileCounter(rhsExpr);
5150 }
5151 CGF.incrementProfileCounter(E);
5152 }
5153 Value *Result = Visit(E: live);
5154
5155 // If the live part is a throw expression, it acts like it has a void
5156 // type, so evaluating it returns a null Value*. However, a conditional
5157 // with non-void type must return a non-null Value*.
5158 if (!Result && !E->getType()->isVoidType())
5159 Result = llvm::UndefValue::get(T: CGF.ConvertType(E->getType()));
5160
5161 return Result;
5162 }
5163 }
5164
5165 // OpenCL: If the condition is a vector, we can treat this condition like
5166 // the select function.
5167 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5168 condExpr->getType()->isExtVectorType()) {
5169 CGF.incrementProfileCounter(E);
5170
5171 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5172 llvm::Value *LHS = Visit(E: lhsExpr);
5173 llvm::Value *RHS = Visit(E: rhsExpr);
5174
5175 llvm::Type *condType = ConvertType(T: condExpr->getType());
5176 auto *vecTy = cast<llvm::FixedVectorType>(Val: condType);
5177
5178 unsigned numElem = vecTy->getNumElements();
5179 llvm::Type *elemType = vecTy->getElementType();
5180
5181 llvm::Value *zeroVec = llvm::Constant::getNullValue(Ty: vecTy);
5182 llvm::Value *TestMSB = Builder.CreateICmpSLT(LHS: CondV, RHS: zeroVec);
5183 llvm::Value *tmp = Builder.CreateSExt(
5184 V: TestMSB, DestTy: llvm::FixedVectorType::get(ElementType: elemType, NumElts: numElem), Name: "sext");
5185 llvm::Value *tmp2 = Builder.CreateNot(V: tmp);
5186
5187 // Cast float to int to perform ANDs if necessary.
5188 llvm::Value *RHSTmp = RHS;
5189 llvm::Value *LHSTmp = LHS;
5190 bool wasCast = false;
5191 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(Val: RHS->getType());
5192 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5193 RHSTmp = Builder.CreateBitCast(V: RHS, DestTy: tmp2->getType());
5194 LHSTmp = Builder.CreateBitCast(V: LHS, DestTy: tmp->getType());
5195 wasCast = true;
5196 }
5197
5198 llvm::Value *tmp3 = Builder.CreateAnd(LHS: RHSTmp, RHS: tmp2);
5199 llvm::Value *tmp4 = Builder.CreateAnd(LHS: LHSTmp, RHS: tmp);
5200 llvm::Value *tmp5 = Builder.CreateOr(LHS: tmp3, RHS: tmp4, Name: "cond");
5201 if (wasCast)
5202 tmp5 = Builder.CreateBitCast(V: tmp5, DestTy: RHS->getType());
5203
5204 return tmp5;
5205 }
5206
5207 if (condExpr->getType()->isVectorType() ||
5208 condExpr->getType()->isSveVLSBuiltinType()) {
5209 CGF.incrementProfileCounter(E);
5210
5211 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5212 llvm::Value *LHS = Visit(E: lhsExpr);
5213 llvm::Value *RHS = Visit(E: rhsExpr);
5214
5215 llvm::Type *CondType = ConvertType(T: condExpr->getType());
5216 auto *VecTy = cast<llvm::VectorType>(Val: CondType);
5217 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: VecTy);
5218
5219 CondV = Builder.CreateICmpNE(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
5220 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
5221 }
5222
5223 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5224 // select instead of as control flow. We can only do this if it is cheap and
5225 // safe to evaluate the LHS and RHS unconditionally.
5226 if (isCheapEnoughToEvaluateUnconditionally(E: lhsExpr, CGF) &&
5227 isCheapEnoughToEvaluateUnconditionally(E: rhsExpr, CGF)) {
5228 llvm::Value *CondV = CGF.EvaluateExprAsBool(E: condExpr);
5229 llvm::Value *StepV = Builder.CreateZExtOrBitCast(V: CondV, DestTy: CGF.Int64Ty);
5230
5231 if (llvm::EnableSingleByteCoverage) {
5232 CGF.incrementProfileCounter(lhsExpr);
5233 CGF.incrementProfileCounter(rhsExpr);
5234 CGF.incrementProfileCounter(E);
5235 } else
5236 CGF.incrementProfileCounter(E, StepV);
5237
5238 llvm::Value *LHS = Visit(E: lhsExpr);
5239 llvm::Value *RHS = Visit(E: rhsExpr);
5240 if (!LHS) {
5241 // If the conditional has void type, make sure we return a null Value*.
5242 assert(!RHS && "LHS and RHS types must match");
5243 return nullptr;
5244 }
5245 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "cond");
5246 }
5247
5248 // If the top of the logical operator nest, reset the MCDC temp to 0.
5249 if (CGF.MCDCLogOpStack.empty())
5250 CGF.maybeResetMCDCCondBitmap(E: condExpr);
5251
5252 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
5253 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
5254 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
5255
5256 CodeGenFunction::ConditionalEvaluation eval(CGF);
5257 CGF.EmitBranchOnBoolExpr(Cond: condExpr, TrueBlock: LHSBlock, FalseBlock: RHSBlock,
5258 TrueCount: CGF.getProfileCount(lhsExpr));
5259
5260 CGF.EmitBlock(BB: LHSBlock);
5261
5262 // If the top of the logical operator nest, update the MCDC bitmap for the
5263 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5264 // may also contain a boolean expression.
5265 if (CGF.MCDCLogOpStack.empty())
5266 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5267
5268 if (llvm::EnableSingleByteCoverage)
5269 CGF.incrementProfileCounter(lhsExpr);
5270 else
5271 CGF.incrementProfileCounter(E);
5272
5273 eval.begin(CGF);
5274 Value *LHS = Visit(E: lhsExpr);
5275 eval.end(CGF);
5276
5277 LHSBlock = Builder.GetInsertBlock();
5278 Builder.CreateBr(Dest: ContBlock);
5279
5280 CGF.EmitBlock(BB: RHSBlock);
5281
5282 // If the top of the logical operator nest, update the MCDC bitmap for the
5283 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5284 // may also contain a boolean expression.
5285 if (CGF.MCDCLogOpStack.empty())
5286 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5287
5288 if (llvm::EnableSingleByteCoverage)
5289 CGF.incrementProfileCounter(rhsExpr);
5290
5291 eval.begin(CGF);
5292 Value *RHS = Visit(E: rhsExpr);
5293 eval.end(CGF);
5294
5295 RHSBlock = Builder.GetInsertBlock();
5296 CGF.EmitBlock(BB: ContBlock);
5297
5298 // If the LHS or RHS is a throw expression, it will be legitimately null.
5299 if (!LHS)
5300 return RHS;
5301 if (!RHS)
5302 return LHS;
5303
5304 // Create a PHI node for the real part.
5305 llvm::PHINode *PN = Builder.CreatePHI(Ty: LHS->getType(), NumReservedValues: 2, Name: "cond");
5306 PN->addIncoming(V: LHS, BB: LHSBlock);
5307 PN->addIncoming(V: RHS, BB: RHSBlock);
5308
5309 // When single byte coverage mode is enabled, add a counter to continuation
5310 // block.
5311 if (llvm::EnableSingleByteCoverage)
5312 CGF.incrementProfileCounter(E);
5313
5314 return PN;
5315}
5316
5317Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5318 return Visit(E: E->getChosenSubExpr());
5319}
5320
5321Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5322 QualType Ty = VE->getType();
5323
5324 if (Ty->isVariablyModifiedType())
5325 CGF.EmitVariablyModifiedType(Ty);
5326
5327 Address ArgValue = Address::invalid();
5328 Address ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
5329
5330 llvm::Type *ArgTy = ConvertType(T: VE->getType());
5331
5332 // If EmitVAArg fails, emit an error.
5333 if (!ArgPtr.isValid()) {
5334 CGF.ErrorUnsupported(VE, "va_arg expression");
5335 return llvm::UndefValue::get(T: ArgTy);
5336 }
5337
5338 // FIXME Volatility.
5339 llvm::Value *Val = Builder.CreateLoad(Addr: ArgPtr);
5340
5341 // If EmitVAArg promoted the type, we must truncate it.
5342 if (ArgTy != Val->getType()) {
5343 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
5344 Val = Builder.CreateIntToPtr(V: Val, DestTy: ArgTy);
5345 else
5346 Val = Builder.CreateTrunc(V: Val, DestTy: ArgTy);
5347 }
5348
5349 return Val;
5350}
5351
5352Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5353 return CGF.EmitBlockLiteral(block);
5354}
5355
5356// Convert a vec3 to vec4, or vice versa.
5357static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
5358 Value *Src, unsigned NumElementsDst) {
5359 static constexpr int Mask[] = {0, 1, 2, -1};
5360 return Builder.CreateShuffleVector(V: Src, Mask: llvm::ArrayRef(Mask, NumElementsDst));
5361}
5362
5363// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5364// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5365// but could be scalar or vectors of different lengths, and either can be
5366// pointer.
5367// There are 4 cases:
5368// 1. non-pointer -> non-pointer : needs 1 bitcast
5369// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5370// 3. pointer -> non-pointer
5371// a) pointer -> intptr_t : needs 1 ptrtoint
5372// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5373// 4. non-pointer -> pointer
5374// a) intptr_t -> pointer : needs 1 inttoptr
5375// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5376// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5377// allow casting directly between pointer types and non-integer non-pointer
5378// types.
5379static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
5380 const llvm::DataLayout &DL,
5381 Value *Src, llvm::Type *DstTy,
5382 StringRef Name = "") {
5383 auto SrcTy = Src->getType();
5384
5385 // Case 1.
5386 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5387 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name);
5388
5389 // Case 2.
5390 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5391 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: Src, DestTy: DstTy, Name);
5392
5393 // Case 3.
5394 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5395 // Case 3b.
5396 if (!DstTy->isIntegerTy())
5397 Src = Builder.CreatePtrToInt(V: Src, DestTy: DL.getIntPtrType(SrcTy));
5398 // Cases 3a and 3b.
5399 return Builder.CreateBitOrPointerCast(V: Src, DestTy: DstTy, Name);
5400 }
5401
5402 // Case 4b.
5403 if (!SrcTy->isIntegerTy())
5404 Src = Builder.CreateBitCast(V: Src, DestTy: DL.getIntPtrType(DstTy));
5405 // Cases 4a and 4b.
5406 return Builder.CreateIntToPtr(V: Src, DestTy: DstTy, Name);
5407}
5408
5409Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5410 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
5411 llvm::Type *DstTy = ConvertType(T: E->getType());
5412
5413 llvm::Type *SrcTy = Src->getType();
5414 unsigned NumElementsSrc =
5415 isa<llvm::VectorType>(Val: SrcTy)
5416 ? cast<llvm::FixedVectorType>(Val: SrcTy)->getNumElements()
5417 : 0;
5418 unsigned NumElementsDst =
5419 isa<llvm::VectorType>(Val: DstTy)
5420 ? cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements()
5421 : 0;
5422
5423 // Use bit vector expansion for ext_vector_type boolean vectors.
5424 if (E->getType()->isExtVectorBoolType())
5425 return CGF.emitBoolVecConversion(SrcVec: Src, NumElementsDst, Name: "astype");
5426
5427 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5428 // vector to get a vec4, then a bitcast if the target type is different.
5429 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5430 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 4);
5431 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5432 DstTy);
5433
5434 Src->setName("astype");
5435 return Src;
5436 }
5437
5438 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5439 // to vec4 if the original type is not vec4, then a shuffle vector to
5440 // get a vec3.
5441 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5442 auto *Vec4Ty = llvm::FixedVectorType::get(
5443 ElementType: cast<llvm::VectorType>(Val: DstTy)->getElementType(), NumElts: 4);
5444 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5445 Vec4Ty);
5446
5447 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 3);
5448 Src->setName("astype");
5449 return Src;
5450 }
5451
5452 return createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(),
5453 Src, DstTy, Name: "astype");
5454}
5455
5456Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5457 return CGF.EmitAtomicExpr(E).getScalarVal();
5458}
5459
5460//===----------------------------------------------------------------------===//
5461// Entry Point into this File
5462//===----------------------------------------------------------------------===//
5463
5464/// Emit the computation of the specified expression of scalar type, ignoring
5465/// the result.
5466Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5467 assert(E && hasScalarEvaluationKind(E->getType()) &&
5468 "Invalid scalar expression to emit");
5469
5470 return ScalarExprEmitter(*this, IgnoreResultAssign)
5471 .Visit(E: const_cast<Expr *>(E));
5472}
5473
5474/// Emit a conversion from the specified type to the specified destination type,
5475/// both of which are LLVM scalar types.
5476Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
5477 QualType DstTy,
5478 SourceLocation Loc) {
5479 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5480 "Invalid scalar expression to emit");
5481 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcType: SrcTy, DstType: DstTy, Loc);
5482}
5483
5484/// Emit a conversion from the specified complex type to the specified
5485/// destination type, where the destination type is an LLVM scalar type.
5486Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
5487 QualType SrcTy,
5488 QualType DstTy,
5489 SourceLocation Loc) {
5490 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5491 "Invalid complex -> scalar conversion");
5492 return ScalarExprEmitter(*this)
5493 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5494}
5495
5496
5497Value *
5498CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
5499 QualType PromotionType) {
5500 if (!PromotionType.isNull())
5501 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5502 else
5503 return ScalarExprEmitter(*this).Visit(E: const_cast<Expr *>(E));
5504}
5505
5506
5507llvm::Value *CodeGenFunction::
5508EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
5509 bool isInc, bool isPre) {
5510 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5511}
5512
5513LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
5514 // object->isa or (*object).isa
5515 // Generate code as for: *(Class*)object
5516
5517 Expr *BaseExpr = E->getBase();
5518 Address Addr = Address::invalid();
5519 if (BaseExpr->isPRValue()) {
5520 llvm::Type *BaseTy =
5521 ConvertTypeForMem(T: BaseExpr->getType()->getPointeeType());
5522 Addr = Address(EmitScalarExpr(E: BaseExpr), BaseTy, getPointerAlign());
5523 } else {
5524 Addr = EmitLValue(E: BaseExpr).getAddress(CGF&: *this);
5525 }
5526
5527 // Cast the address to Class*.
5528 Addr = Addr.withElementType(ElemTy: ConvertType(E->getType()));
5529 return MakeAddrLValue(Addr, E->getType());
5530}
5531
5532
5533LValue CodeGenFunction::EmitCompoundAssignmentLValue(
5534 const CompoundAssignOperator *E) {
5535 ScalarExprEmitter Scalar(*this);
5536 Value *Result = nullptr;
5537 switch (E->getOpcode()) {
5538#define COMPOUND_OP(Op) \
5539 case BO_##Op##Assign: \
5540 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5541 Result)
5542 COMPOUND_OP(Mul);
5543 COMPOUND_OP(Div);
5544 COMPOUND_OP(Rem);
5545 COMPOUND_OP(Add);
5546 COMPOUND_OP(Sub);
5547 COMPOUND_OP(Shl);
5548 COMPOUND_OP(Shr);
5549 COMPOUND_OP(And);
5550 COMPOUND_OP(Xor);
5551 COMPOUND_OP(Or);
5552#undef COMPOUND_OP
5553
5554 case BO_PtrMemD:
5555 case BO_PtrMemI:
5556 case BO_Mul:
5557 case BO_Div:
5558 case BO_Rem:
5559 case BO_Add:
5560 case BO_Sub:
5561 case BO_Shl:
5562 case BO_Shr:
5563 case BO_LT:
5564 case BO_GT:
5565 case BO_LE:
5566 case BO_GE:
5567 case BO_EQ:
5568 case BO_NE:
5569 case BO_Cmp:
5570 case BO_And:
5571 case BO_Xor:
5572 case BO_Or:
5573 case BO_LAnd:
5574 case BO_LOr:
5575 case BO_Assign:
5576 case BO_Comma:
5577 llvm_unreachable("Not valid compound assignment operators");
5578 }
5579
5580 llvm_unreachable("Unhandled compound assignment operator");
5581}
5582
5583struct GEPOffsetAndOverflow {
5584 // The total (signed) byte offset for the GEP.
5585 llvm::Value *TotalOffset;
5586 // The offset overflow flag - true if the total offset overflows.
5587 llvm::Value *OffsetOverflows;
5588};
5589
5590/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5591/// and compute the total offset it applies from it's base pointer BasePtr.
5592/// Returns offset in bytes and a boolean flag whether an overflow happened
5593/// during evaluation.
5594static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
5595 llvm::LLVMContext &VMContext,
5596 CodeGenModule &CGM,
5597 CGBuilderTy &Builder) {
5598 const auto &DL = CGM.getDataLayout();
5599
5600 // The total (signed) byte offset for the GEP.
5601 llvm::Value *TotalOffset = nullptr;
5602
5603 // Was the GEP already reduced to a constant?
5604 if (isa<llvm::Constant>(Val: GEPVal)) {
5605 // Compute the offset by casting both pointers to integers and subtracting:
5606 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5607 Value *BasePtr_int =
5608 Builder.CreatePtrToInt(V: BasePtr, DestTy: DL.getIntPtrType(BasePtr->getType()));
5609 Value *GEPVal_int =
5610 Builder.CreatePtrToInt(V: GEPVal, DestTy: DL.getIntPtrType(GEPVal->getType()));
5611 TotalOffset = Builder.CreateSub(LHS: GEPVal_int, RHS: BasePtr_int);
5612 return {.TotalOffset: TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5613 }
5614
5615 auto *GEP = cast<llvm::GEPOperator>(Val: GEPVal);
5616 assert(GEP->getPointerOperand() == BasePtr &&
5617 "BasePtr must be the base of the GEP.");
5618 assert(GEP->isInBounds() && "Expected inbounds GEP");
5619
5620 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5621
5622 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5623 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5624 auto *SAddIntrinsic =
5625 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
5626 auto *SMulIntrinsic =
5627 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
5628
5629 // The offset overflow flag - true if the total offset overflows.
5630 llvm::Value *OffsetOverflows = Builder.getFalse();
5631
5632 /// Return the result of the given binary operation.
5633 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5634 llvm::Value *RHS) -> llvm::Value * {
5635 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5636
5637 // If the operands are constants, return a constant result.
5638 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS)) {
5639 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS)) {
5640 llvm::APInt N;
5641 bool HasOverflow = mayHaveIntegerOverflow(LHS: LHSCI, RHS: RHSCI, Opcode,
5642 /*Signed=*/true, Result&: N);
5643 if (HasOverflow)
5644 OffsetOverflows = Builder.getTrue();
5645 return llvm::ConstantInt::get(Context&: VMContext, V: N);
5646 }
5647 }
5648
5649 // Otherwise, compute the result with checked arithmetic.
5650 auto *ResultAndOverflow = Builder.CreateCall(
5651 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
5652 OffsetOverflows = Builder.CreateOr(
5653 Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1), OffsetOverflows);
5654 return Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0);
5655 };
5656
5657 // Determine the total byte offset by looking at each GEP operand.
5658 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
5659 GTI != GTE; ++GTI) {
5660 llvm::Value *LocalOffset;
5661 auto *Index = GTI.getOperand();
5662 // Compute the local offset contributed by this indexing step:
5663 if (auto *STy = GTI.getStructTypeOrNull()) {
5664 // For struct indexing, the local offset is the byte position of the
5665 // specified field.
5666 unsigned FieldNo = cast<llvm::ConstantInt>(Val: Index)->getZExtValue();
5667 LocalOffset = llvm::ConstantInt::get(
5668 Ty: IntPtrTy, V: DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo));
5669 } else {
5670 // Otherwise this is array-like indexing. The local offset is the index
5671 // multiplied by the element size.
5672 auto *ElementSize =
5673 llvm::ConstantInt::get(Ty: IntPtrTy, V: GTI.getSequentialElementStride(DL));
5674 auto *IndexS = Builder.CreateIntCast(V: Index, DestTy: IntPtrTy, /*isSigned=*/true);
5675 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5676 }
5677
5678 // If this is the first offset, set it as the total offset. Otherwise, add
5679 // the local offset into the running total.
5680 if (!TotalOffset || TotalOffset == Zero)
5681 TotalOffset = LocalOffset;
5682 else
5683 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5684 }
5685
5686 return {.TotalOffset: TotalOffset, .OffsetOverflows: OffsetOverflows};
5687}
5688
5689Value *
5690CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
5691 ArrayRef<Value *> IdxList,
5692 bool SignedIndices, bool IsSubtraction,
5693 SourceLocation Loc, const Twine &Name) {
5694 llvm::Type *PtrTy = Ptr->getType();
5695 Value *GEPVal = Builder.CreateInBoundsGEP(Ty: ElemTy, Ptr, IdxList, Name);
5696
5697 // If the pointer overflow sanitizer isn't enabled, do nothing.
5698 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
5699 return GEPVal;
5700
5701 // Perform nullptr-and-offset check unless the nullptr is defined.
5702 bool PerformNullCheck = !NullPointerIsDefined(
5703 F: Builder.GetInsertBlock()->getParent(), AS: PtrTy->getPointerAddressSpace());
5704 // Check for overflows unless the GEP got constant-folded,
5705 // and only in the default address space
5706 bool PerformOverflowCheck =
5707 !isa<llvm::Constant>(Val: GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5708
5709 if (!(PerformNullCheck || PerformOverflowCheck))
5710 return GEPVal;
5711
5712 const auto &DL = CGM.getDataLayout();
5713
5714 SanitizerScope SanScope(this);
5715 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5716
5717 GEPOffsetAndOverflow EvaluatedGEP =
5718 EmitGEPOffsetInBytes(BasePtr: Ptr, GEPVal, VMContext&: getLLVMContext(), CGM, Builder);
5719
5720 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5721 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5722 "If the offset got constant-folded, we don't expect that there was an "
5723 "overflow.");
5724
5725 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5726
5727 // Common case: if the total offset is zero, and we are using C++ semantics,
5728 // where nullptr+0 is defined, don't emit a check.
5729 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5730 return GEPVal;
5731
5732 // Now that we've computed the total offset, add it to the base pointer (with
5733 // wrapping semantics).
5734 auto *IntPtr = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
5735 auto *ComputedGEP = Builder.CreateAdd(LHS: IntPtr, RHS: EvaluatedGEP.TotalOffset);
5736
5737 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5738
5739 if (PerformNullCheck) {
5740 // In C++, if the base pointer evaluates to a null pointer value,
5741 // the only valid pointer this inbounds GEP can produce is also
5742 // a null pointer, so the offset must also evaluate to zero.
5743 // Likewise, if we have non-zero base pointer, we can not get null pointer
5744 // as a result, so the offset can not be -intptr_t(BasePtr).
5745 // In other words, both pointers are either null, or both are non-null,
5746 // or the behaviour is undefined.
5747 //
5748 // C, however, is more strict in this regard, and gives more
5749 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5750 // So both the input to the 'gep inbounds' AND the output must not be null.
5751 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Arg: Ptr);
5752 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(Arg: ComputedGEP);
5753 auto *Valid =
5754 CGM.getLangOpts().CPlusPlus
5755 ? Builder.CreateICmpEQ(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr)
5756 : Builder.CreateAnd(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr);
5757 Checks.emplace_back(Args&: Valid, Args: SanitizerKind::PointerOverflow);
5758 }
5759
5760 if (PerformOverflowCheck) {
5761 // The GEP is valid if:
5762 // 1) The total offset doesn't overflow, and
5763 // 2) The sign of the difference between the computed address and the base
5764 // pointer matches the sign of the total offset.
5765 llvm::Value *ValidGEP;
5766 auto *NoOffsetOverflow = Builder.CreateNot(V: EvaluatedGEP.OffsetOverflows);
5767 if (SignedIndices) {
5768 // GEP is computed as `unsigned base + signed offset`, therefore:
5769 // * If offset was positive, then the computed pointer can not be
5770 // [unsigned] less than the base pointer, unless it overflowed.
5771 // * If offset was negative, then the computed pointer can not be
5772 // [unsigned] greater than the bas pointere, unless it overflowed.
5773 auto *PosOrZeroValid = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
5774 auto *PosOrZeroOffset =
5775 Builder.CreateICmpSGE(LHS: EvaluatedGEP.TotalOffset, RHS: Zero);
5776 llvm::Value *NegValid = Builder.CreateICmpULT(LHS: ComputedGEP, RHS: IntPtr);
5777 ValidGEP =
5778 Builder.CreateSelect(C: PosOrZeroOffset, True: PosOrZeroValid, False: NegValid);
5779 } else if (!IsSubtraction) {
5780 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5781 // computed pointer can not be [unsigned] less than base pointer,
5782 // unless there was an overflow.
5783 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5784 ValidGEP = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
5785 } else {
5786 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5787 // computed pointer can not be [unsigned] greater than base pointer,
5788 // unless there was an overflow.
5789 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5790 ValidGEP = Builder.CreateICmpULE(LHS: ComputedGEP, RHS: IntPtr);
5791 }
5792 ValidGEP = Builder.CreateAnd(LHS: ValidGEP, RHS: NoOffsetOverflow);
5793 Checks.emplace_back(Args&: ValidGEP, Args: SanitizerKind::PointerOverflow);
5794 }
5795
5796 assert(!Checks.empty() && "Should have produced some checks.");
5797
5798 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5799 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5800 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5801 EmitCheck(Checked: Checks, Check: SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5802
5803 return GEPVal;
5804}
5805
5806Address CodeGenFunction::EmitCheckedInBoundsGEP(
5807 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
5808 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
5809 const Twine &Name) {
5810 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
5811 return Builder.CreateInBoundsGEP(Addr, IdxList, ElementType: elementType, Align, Name);
5812
5813 return RawAddress(
5814 EmitCheckedInBoundsGEP(ElemTy: Addr.getElementType(), Ptr: Addr.emitRawPointer(CGF&: *this),
5815 IdxList, SignedIndices, IsSubtraction, Loc, Name),
5816 elementType, Align);
5817}
5818

source code of clang/lib/CodeGen/CGExprScalar.cpp