1//===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10///
11/// This file provides internal interfaces used to implement the InstCombine.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
17
18#include "llvm/ADT/Statistic.h"
19#include "llvm/ADT/PostOrderIterator.h"
20#include "llvm/Analysis/InstructionSimplify.h"
21#include "llvm/Analysis/TargetFolder.h"
22#include "llvm/Analysis/ValueTracking.h"
23#include "llvm/IR/IRBuilder.h"
24#include "llvm/IR/InstVisitor.h"
25#include "llvm/IR/PatternMatch.h"
26#include "llvm/IR/Value.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/KnownBits.h"
29#include "llvm/Transforms/InstCombine/InstCombiner.h"
30#include "llvm/Transforms/Utils/Local.h"
31#include <cassert>
32
33#define DEBUG_TYPE "instcombine"
34#include "llvm/Transforms/Utils/InstructionWorklist.h"
35
36using namespace llvm::PatternMatch;
37
38// As a default, let's assume that we want to be aggressive,
39// and attempt to traverse with no limits in attempt to sink negation.
40static constexpr unsigned NegatorDefaultMaxDepth = ~0U;
41
42// Let's guesstimate that most often we will end up visiting/producing
43// fairly small number of new instructions.
44static constexpr unsigned NegatorMaxNodesSSO = 16;
45
46namespace llvm {
47
48class AAResults;
49class APInt;
50class AssumptionCache;
51class BlockFrequencyInfo;
52class DataLayout;
53class DominatorTree;
54class GEPOperator;
55class GlobalVariable;
56class LoopInfo;
57class OptimizationRemarkEmitter;
58class ProfileSummaryInfo;
59class TargetLibraryInfo;
60class User;
61
62class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
63 : public InstCombiner,
64 public InstVisitor<InstCombinerImpl, Instruction *> {
65public:
66 InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder,
67 bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
68 TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
69 DominatorTree &DT, OptimizationRemarkEmitter &ORE,
70 BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI,
71 ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI)
72 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE,
73 BFI, BPI, PSI, DL, LI) {}
74
75 virtual ~InstCombinerImpl() = default;
76
77 /// Perform early cleanup and prepare the InstCombine worklist.
78 bool prepareWorklist(Function &F,
79 ReversePostOrderTraversal<BasicBlock *> &RPOT);
80
81 /// Run the combiner over the entire worklist until it is empty.
82 ///
83 /// \returns true if the IR is changed.
84 bool run();
85
86 // Visitation implementation - Implement instruction combining for different
87 // instruction types. The semantics are as follows:
88 // Return Value:
89 // null - No change was made
90 // I - Change was made, I is still valid, I may be dead though
91 // otherwise - Change was made, replace I with returned instruction
92 //
93 Instruction *visitFNeg(UnaryOperator &I);
94 Instruction *visitAdd(BinaryOperator &I);
95 Instruction *visitFAdd(BinaryOperator &I);
96 Value *OptimizePointerDifference(
97 Value *LHS, Value *RHS, Type *Ty, bool isNUW);
98 Instruction *visitSub(BinaryOperator &I);
99 Instruction *visitFSub(BinaryOperator &I);
100 Instruction *visitMul(BinaryOperator &I);
101 Instruction *foldPowiReassoc(BinaryOperator &I);
102 Instruction *foldFMulReassoc(BinaryOperator &I);
103 Instruction *visitFMul(BinaryOperator &I);
104 Instruction *visitURem(BinaryOperator &I);
105 Instruction *visitSRem(BinaryOperator &I);
106 Instruction *visitFRem(BinaryOperator &I);
107 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I);
108 Instruction *commonIRemTransforms(BinaryOperator &I);
109 Instruction *commonIDivTransforms(BinaryOperator &I);
110 Instruction *visitUDiv(BinaryOperator &I);
111 Instruction *visitSDiv(BinaryOperator &I);
112 Instruction *visitFDiv(BinaryOperator &I);
113 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
114 Instruction *visitAnd(BinaryOperator &I);
115 Instruction *visitOr(BinaryOperator &I);
116 bool sinkNotIntoLogicalOp(Instruction &I);
117 bool sinkNotIntoOtherHandOfLogicalOp(Instruction &I);
118 Instruction *visitXor(BinaryOperator &I);
119 Instruction *visitShl(BinaryOperator &I);
120 Value *reassociateShiftAmtsOfTwoSameDirectionShifts(
121 BinaryOperator *Sh0, const SimplifyQuery &SQ,
122 bool AnalyzeForSignBitExtraction = false);
123 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
124 BinaryOperator &I);
125 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract(
126 BinaryOperator &OldAShr);
127 Instruction *visitAShr(BinaryOperator &I);
128 Instruction *visitLShr(BinaryOperator &I);
129 Instruction *commonShiftTransforms(BinaryOperator &I);
130 Instruction *visitFCmpInst(FCmpInst &I);
131 CmpInst *canonicalizeICmpPredicate(CmpInst &I);
132 Instruction *visitICmpInst(ICmpInst &I);
133 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
134 BinaryOperator &I);
135 Instruction *commonCastTransforms(CastInst &CI);
136 Instruction *visitTrunc(TruncInst &CI);
137 Instruction *visitZExt(ZExtInst &Zext);
138 Instruction *visitSExt(SExtInst &Sext);
139 Instruction *visitFPTrunc(FPTruncInst &CI);
140 Instruction *visitFPExt(CastInst &CI);
141 Instruction *visitFPToUI(FPToUIInst &FI);
142 Instruction *visitFPToSI(FPToSIInst &FI);
143 Instruction *visitUIToFP(CastInst &CI);
144 Instruction *visitSIToFP(CastInst &CI);
145 Instruction *visitPtrToInt(PtrToIntInst &CI);
146 Instruction *visitIntToPtr(IntToPtrInst &CI);
147 Instruction *visitBitCast(BitCastInst &CI);
148 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
149 Instruction *foldItoFPtoI(CastInst &FI);
150 Instruction *visitSelectInst(SelectInst &SI);
151 Instruction *visitCallInst(CallInst &CI);
152 Instruction *visitInvokeInst(InvokeInst &II);
153 Instruction *visitCallBrInst(CallBrInst &CBI);
154
155 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
156 Instruction *visitPHINode(PHINode &PN);
157 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
158 Instruction *visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src);
159 Instruction *visitAllocaInst(AllocaInst &AI);
160 Instruction *visitAllocSite(Instruction &FI);
161 Instruction *visitFree(CallInst &FI, Value *FreedOp);
162 Instruction *visitLoadInst(LoadInst &LI);
163 Instruction *visitStoreInst(StoreInst &SI);
164 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI);
165 Instruction *visitUnconditionalBranchInst(BranchInst &BI);
166 Instruction *visitBranchInst(BranchInst &BI);
167 Instruction *visitFenceInst(FenceInst &FI);
168 Instruction *visitSwitchInst(SwitchInst &SI);
169 Instruction *visitReturnInst(ReturnInst &RI);
170 Instruction *visitUnreachableInst(UnreachableInst &I);
171 Instruction *
172 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI);
173 Instruction *visitInsertValueInst(InsertValueInst &IV);
174 Instruction *visitInsertElementInst(InsertElementInst &IE);
175 Instruction *visitExtractElementInst(ExtractElementInst &EI);
176 Instruction *simplifyBinOpSplats(ShuffleVectorInst &SVI);
177 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
178 Instruction *visitExtractValueInst(ExtractValueInst &EV);
179 Instruction *visitLandingPadInst(LandingPadInst &LI);
180 Instruction *visitVAEndInst(VAEndInst &I);
181 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI);
182 bool freezeOtherUses(FreezeInst &FI);
183 Instruction *foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN);
184 Instruction *visitFreeze(FreezeInst &I);
185
186 /// Specify what to return for unhandled instructions.
187 Instruction *visitInstruction(Instruction &I) { return nullptr; }
188
189 /// True when DB dominates all uses of DI except UI.
190 /// UI must be in the same block as DI.
191 /// The routine checks that the DI parent and DB are different.
192 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
193 const BasicBlock *DB) const;
194
195 /// Try to replace select with select operand SIOpd in SI-ICmp sequence.
196 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
197 const unsigned SIOpd);
198
199 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy,
200 const Twine &Suffix = "");
201
202 KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF,
203 FPClassTest Interested = fcAllFlags,
204 const Instruction *CtxI = nullptr,
205 unsigned Depth = 0) const {
206 return llvm::computeKnownFPClass(
207 V: Val, FMF, InterestedClasses: Interested, Depth,
208 SQ: getSimplifyQuery().getWithInstruction(I: CtxI));
209 }
210
211 KnownFPClass computeKnownFPClass(Value *Val,
212 FPClassTest Interested = fcAllFlags,
213 const Instruction *CtxI = nullptr,
214 unsigned Depth = 0) const {
215 return llvm::computeKnownFPClass(
216 V: Val, InterestedClasses: Interested, Depth, SQ: getSimplifyQuery().getWithInstruction(I: CtxI));
217 }
218
219 /// Check if fmul \p MulVal, +0.0 will yield +0.0 (or signed zero is
220 /// ignorable).
221 bool fmulByZeroIsZero(Value *MulVal, FastMathFlags FMF,
222 const Instruction *CtxI) const;
223
224 Constant *getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp) {
225 Constant *TruncC = ConstantExpr::getTrunc(C, Ty: TruncTy);
226 Constant *ExtTruncC =
227 ConstantFoldCastOperand(Opcode: ExtOp, C: TruncC, DestTy: C->getType(), DL);
228 if (ExtTruncC && ExtTruncC == C)
229 return TruncC;
230 return nullptr;
231 }
232
233 Constant *getLosslessUnsignedTrunc(Constant *C, Type *TruncTy) {
234 return getLosslessTrunc(C, TruncTy, ExtOp: Instruction::ZExt);
235 }
236
237 Constant *getLosslessSignedTrunc(Constant *C, Type *TruncTy) {
238 return getLosslessTrunc(C, TruncTy, ExtOp: Instruction::SExt);
239 }
240
241 std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
242 convertOrOfShiftsToFunnelShift(Instruction &Or);
243
244private:
245 bool annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI);
246 bool isDesirableIntType(unsigned BitWidth) const;
247 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
248 bool shouldChangeType(Type *From, Type *To) const;
249 Value *dyn_castNegVal(Value *V) const;
250
251 /// Classify whether a cast is worth optimizing.
252 ///
253 /// This is a helper to decide whether the simplification of
254 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed.
255 ///
256 /// \param CI The cast we are interested in.
257 ///
258 /// \return true if this cast actually results in any code being generated and
259 /// if it cannot already be eliminated by some other transformation.
260 bool shouldOptimizeCast(CastInst *CI);
261
262 /// Try to optimize a sequence of instructions checking if an operation
263 /// on LHS and RHS overflows.
264 ///
265 /// If this overflow check is done via one of the overflow check intrinsics,
266 /// then CtxI has to be the call instruction calling that intrinsic. If this
267 /// overflow check is done by arithmetic followed by a compare, then CtxI has
268 /// to be the arithmetic instruction.
269 ///
270 /// If a simplification is possible, stores the simplified result of the
271 /// operation in OperationResult and result of the overflow check in
272 /// OverflowResult, and return true. If no simplification is possible,
273 /// returns false.
274 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned,
275 Value *LHS, Value *RHS,
276 Instruction &CtxI, Value *&OperationResult,
277 Constant *&OverflowResult);
278
279 Instruction *visitCallBase(CallBase &Call);
280 Instruction *tryOptimizeCall(CallInst *CI);
281 bool transformConstExprCastCall(CallBase &Call);
282 Instruction *transformCallThroughTrampoline(CallBase &Call,
283 IntrinsicInst &Tramp);
284
285 // Return (a, b) if (LHS, RHS) is known to be (a, b) or (b, a).
286 // Otherwise, return std::nullopt
287 // Currently it matches:
288 // - LHS = (select c, a, b), RHS = (select c, b, a)
289 // - LHS = (phi [a, BB0], [b, BB1]), RHS = (phi [b, BB0], [a, BB1])
290 // - LHS = min(a, b), RHS = max(a, b)
291 std::optional<std::pair<Value *, Value *>> matchSymmetricPair(Value *LHS,
292 Value *RHS);
293
294 Value *simplifyMaskedLoad(IntrinsicInst &II);
295 Instruction *simplifyMaskedStore(IntrinsicInst &II);
296 Instruction *simplifyMaskedGather(IntrinsicInst &II);
297 Instruction *simplifyMaskedScatter(IntrinsicInst &II);
298
299 /// Transform (zext icmp) to bitwise / integer operations in order to
300 /// eliminate it.
301 ///
302 /// \param ICI The icmp of the (zext icmp) pair we are interested in.
303 /// \parem CI The zext of the (zext icmp) pair we are interested in.
304 ///
305 /// \return null if the transformation cannot be performed. If the
306 /// transformation can be performed the new instruction that replaces the
307 /// (zext icmp) pair will be returned.
308 Instruction *transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext);
309
310 Instruction *transformSExtICmp(ICmpInst *Cmp, SExtInst &Sext);
311
312 bool willNotOverflowSignedAdd(const WithCache<const Value *> &LHS,
313 const WithCache<const Value *> &RHS,
314 const Instruction &CxtI) const {
315 return computeOverflowForSignedAdd(LHS, RHS, CxtI: &CxtI) ==
316 OverflowResult::NeverOverflows;
317 }
318
319 bool willNotOverflowUnsignedAdd(const WithCache<const Value *> &LHS,
320 const WithCache<const Value *> &RHS,
321 const Instruction &CxtI) const {
322 return computeOverflowForUnsignedAdd(LHS, RHS, CxtI: &CxtI) ==
323 OverflowResult::NeverOverflows;
324 }
325
326 bool willNotOverflowAdd(const Value *LHS, const Value *RHS,
327 const Instruction &CxtI, bool IsSigned) const {
328 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI)
329 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI);
330 }
331
332 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
333 const Instruction &CxtI) const {
334 return computeOverflowForSignedSub(LHS, RHS, CxtI: &CxtI) ==
335 OverflowResult::NeverOverflows;
336 }
337
338 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
339 const Instruction &CxtI) const {
340 return computeOverflowForUnsignedSub(LHS, RHS, CxtI: &CxtI) ==
341 OverflowResult::NeverOverflows;
342 }
343
344 bool willNotOverflowSub(const Value *LHS, const Value *RHS,
345 const Instruction &CxtI, bool IsSigned) const {
346 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI)
347 : willNotOverflowUnsignedSub(LHS, RHS, CxtI);
348 }
349
350 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
351 const Instruction &CxtI) const {
352 return computeOverflowForSignedMul(LHS, RHS, CxtI: &CxtI) ==
353 OverflowResult::NeverOverflows;
354 }
355
356 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
357 const Instruction &CxtI) const {
358 return computeOverflowForUnsignedMul(LHS, RHS, CxtI: &CxtI) ==
359 OverflowResult::NeverOverflows;
360 }
361
362 bool willNotOverflowMul(const Value *LHS, const Value *RHS,
363 const Instruction &CxtI, bool IsSigned) const {
364 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI)
365 : willNotOverflowUnsignedMul(LHS, RHS, CxtI);
366 }
367
368 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS,
369 const Value *RHS, const Instruction &CxtI,
370 bool IsSigned) const {
371 switch (Opcode) {
372 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned);
373 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned);
374 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned);
375 default: llvm_unreachable("Unexpected opcode for overflow query");
376 }
377 }
378
379 Value *EmitGEPOffset(User *GEP);
380 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
381 Instruction *foldBitcastExtElt(ExtractElementInst &ExtElt);
382 Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
383 Instruction *foldFBinOpOfIntCasts(BinaryOperator &I);
384 // Should only be called by `foldFBinOpOfIntCasts`.
385 Instruction *foldFBinOpOfIntCastsFromSign(
386 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
387 Constant *Op1FpC, SmallVectorImpl<WithCache<const Value *>> &OpsKnown);
388 Instruction *foldBinopOfSextBoolToSelect(BinaryOperator &I);
389 Instruction *narrowBinOp(TruncInst &Trunc);
390 Instruction *narrowMaskedBinOp(BinaryOperator &And);
391 Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
392 Instruction *narrowFunnelShift(TruncInst &Trunc);
393 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
394 Instruction *matchSAddSubSat(IntrinsicInst &MinMax1);
395 Instruction *foldNot(BinaryOperator &I);
396 Instruction *foldBinOpOfDisplacedShifts(BinaryOperator &I);
397
398 /// Determine if a pair of casts can be replaced by a single cast.
399 ///
400 /// \param CI1 The first of a pair of casts.
401 /// \param CI2 The second of a pair of casts.
402 ///
403 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an
404 /// Instruction::CastOps value for a cast that can replace the pair, casting
405 /// CI1->getSrcTy() to CI2->getDstTy().
406 ///
407 /// \see CastInst::isEliminableCastPair
408 Instruction::CastOps isEliminableCastPair(const CastInst *CI1,
409 const CastInst *CI2);
410 Value *simplifyIntToPtrRoundTripCast(Value *Val);
411
412 Value *foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &I,
413 bool IsAnd, bool IsLogical = false);
414 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor);
415
416 Value *foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd);
417
418 Value *foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, ICmpInst *ICmp2,
419 bool IsAnd);
420
421 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp).
422 /// NOTE: Unlike most of instcombine, this returns a Value which should
423 /// already be inserted into the function.
424 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd,
425 bool IsLogicalSelect = false);
426
427 Instruction *foldLogicOfIsFPClass(BinaryOperator &Operator, Value *LHS,
428 Value *RHS);
429
430 Instruction *
431 canonicalizeConditionalNegationViaMathToSelect(BinaryOperator &i);
432
433 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
434 Instruction *CxtI, bool IsAnd,
435 bool IsLogical = false);
436 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D,
437 bool InvertFalseVal = false);
438 Value *getSelectCondition(Value *A, Value *B, bool ABIsTheSame);
439
440 Instruction *foldLShrOverflowBit(BinaryOperator &I);
441 Instruction *foldExtractOfOverflowIntrinsic(ExtractValueInst &EV);
442 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
443 Instruction *foldIntrinsicIsFPClass(IntrinsicInst &II);
444 Instruction *foldFPSignBitOps(BinaryOperator &I);
445 Instruction *foldFDivConstantDivisor(BinaryOperator &I);
446
447 // Optimize one of these forms:
448 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true)
449 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false)
450 // into simplier select instruction using isImpliedCondition.
451 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI,
452 bool IsAnd);
453
454 Instruction *hoistFNegAboveFMulFDiv(Value *FNegOp, Instruction &FMFSource);
455
456public:
457 /// Create and insert the idiom we use to indicate a block is unreachable
458 /// without having to rewrite the CFG from within InstCombine.
459 void CreateNonTerminatorUnreachable(Instruction *InsertAt) {
460 auto &Ctx = InsertAt->getContext();
461 auto *SI = new StoreInst(ConstantInt::getTrue(Context&: Ctx),
462 PoisonValue::get(T: PointerType::getUnqual(C&: Ctx)),
463 /*isVolatile*/ false, Align(1));
464 InsertNewInstBefore(New: SI, Old: InsertAt->getIterator());
465 }
466
467 /// Combiner aware instruction erasure.
468 ///
469 /// When dealing with an instruction that has side effects or produces a void
470 /// value, we can't rely on DCE to delete the instruction. Instead, visit
471 /// methods should return the value returned by this function.
472 Instruction *eraseInstFromFunction(Instruction &I) override {
473 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
474 assert(I.use_empty() && "Cannot erase instruction that is used!");
475 salvageDebugInfo(I);
476
477 // Make sure that we reprocess all operands now that we reduced their
478 // use counts.
479 SmallVector<Value *> Ops(I.operands());
480 Worklist.remove(I: &I);
481 DC.removeValue(V: &I);
482 I.eraseFromParent();
483 for (Value *Op : Ops)
484 Worklist.handleUseCountDecrement(V: Op);
485 MadeIRChange = true;
486 return nullptr; // Don't do anything with FI
487 }
488
489 OverflowResult computeOverflow(
490 Instruction::BinaryOps BinaryOp, bool IsSigned,
491 Value *LHS, Value *RHS, Instruction *CxtI) const;
492
493 /// Performs a few simplifications for operators which are associative
494 /// or commutative.
495 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
496
497 /// Tries to simplify binary operations which some other binary
498 /// operation distributes over.
499 ///
500 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
501 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
502 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
503 /// value, or null if it didn't simplify.
504 Value *foldUsingDistributiveLaws(BinaryOperator &I);
505
506 /// Tries to simplify add operations using the definition of remainder.
507 ///
508 /// The definition of remainder is X % C = X - (X / C ) * C. The add
509 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to
510 /// X % (C0 * C1)
511 Value *SimplifyAddWithRemainder(BinaryOperator &I);
512
513 // Binary Op helper for select operations where the expression can be
514 // efficiently reorganized.
515 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS,
516 Value *RHS);
517
518 // If `I` has operand `(ctpop (not x))`, fold `I` with `(sub nuw nsw
519 // BitWidth(x), (ctpop x))`.
520 Instruction *tryFoldInstWithCtpopWithNot(Instruction *I);
521
522 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
523 // -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
524 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
525 // -> (BinOp (logic_shift (BinOp X, Y)), Mask)
526 Instruction *foldBinOpShiftWithShift(BinaryOperator &I);
527
528 /// Tries to simplify binops of select and cast of the select condition.
529 ///
530 /// (Binop (cast C), (select C, T, F))
531 /// -> (select C, C0, C1)
532 Instruction *foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I);
533
534 /// This tries to simplify binary operations by factorizing out common terms
535 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
536 Value *tryFactorizationFolds(BinaryOperator &I);
537
538 /// Match a select chain which produces one of three values based on whether
539 /// the LHS is less than, equal to, or greater than RHS respectively.
540 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less,
541 /// Equal and Greater values are saved in the matching process and returned to
542 /// the caller.
543 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS,
544 ConstantInt *&Less, ConstantInt *&Equal,
545 ConstantInt *&Greater);
546
547 /// Attempts to replace V with a simpler value based on the demanded
548 /// bits.
549 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known,
550 unsigned Depth, Instruction *CxtI);
551 bool SimplifyDemandedBits(Instruction *I, unsigned Op,
552 const APInt &DemandedMask, KnownBits &Known,
553 unsigned Depth = 0) override;
554
555 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
556 /// bits. It also tries to handle simplifications that can be done based on
557 /// DemandedMask, but without modifying the Instruction.
558 Value *SimplifyMultipleUseDemandedBits(Instruction *I,
559 const APInt &DemandedMask,
560 KnownBits &Known,
561 unsigned Depth, Instruction *CxtI);
562
563 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
564 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
565 Value *simplifyShrShlDemandedBits(
566 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
567 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
568
569 /// Tries to simplify operands to an integer instruction based on its
570 /// demanded bits.
571 bool SimplifyDemandedInstructionBits(Instruction &Inst);
572 bool SimplifyDemandedInstructionBits(Instruction &Inst, KnownBits &Known);
573
574 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
575 APInt &PoisonElts, unsigned Depth = 0,
576 bool AllowMultipleUsers = false) override;
577
578 /// Attempts to replace V with a simpler value based on the demanded
579 /// floating-point classes
580 Value *SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask,
581 KnownFPClass &Known, unsigned Depth,
582 Instruction *CxtI);
583 bool SimplifyDemandedFPClass(Instruction *I, unsigned Op,
584 FPClassTest DemandedMask, KnownFPClass &Known,
585 unsigned Depth = 0);
586
587 /// Canonicalize the position of binops relative to shufflevector.
588 Instruction *foldVectorBinop(BinaryOperator &Inst);
589 Instruction *foldVectorSelect(SelectInst &Sel);
590 Instruction *foldSelectShuffle(ShuffleVectorInst &Shuf);
591
592 /// Given a binary operator, cast instruction, or select which has a PHI node
593 /// as operand #0, see if we can fold the instruction into the PHI (which is
594 /// only possible if all operands to the PHI are constants).
595 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN);
596
597 /// For a binary operator with 2 phi operands, try to hoist the binary
598 /// operation before the phi. This can result in fewer instructions in
599 /// patterns where at least one set of phi operands simplifies.
600 /// Example:
601 /// BB3: binop (phi [X, BB1], [C1, BB2]), (phi [Y, BB1], [C2, BB2])
602 /// -->
603 /// BB1: BO = binop X, Y
604 /// BB3: phi [BO, BB1], [(binop C1, C2), BB2]
605 Instruction *foldBinopWithPhiOperands(BinaryOperator &BO);
606
607 /// Given an instruction with a select as one operand and a constant as the
608 /// other operand, try to fold the binary operator into the select arguments.
609 /// This also works for Cast instructions, which obviously do not have a
610 /// second operand.
611 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
612 bool FoldWithMultiUse = false);
613
614 /// This is a convenience wrapper function for the above two functions.
615 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I);
616
617 Instruction *foldAddWithConstant(BinaryOperator &Add);
618
619 Instruction *foldSquareSumInt(BinaryOperator &I);
620 Instruction *foldSquareSumFP(BinaryOperator &I);
621
622 /// Try to rotate an operation below a PHI node, using PHI nodes for
623 /// its operands.
624 Instruction *foldPHIArgOpIntoPHI(PHINode &PN);
625 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN);
626 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN);
627 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN);
628 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN);
629 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN);
630 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN);
631 Instruction *foldPHIArgIntToPtrToPHI(PHINode &PN);
632
633 /// If an integer typed PHI has only one use which is an IntToPtr operation,
634 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
635 /// insert a new pointer typed PHI and replace the original one.
636 bool foldIntegerTypedPHI(PHINode &PN);
637
638 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the
639 /// folded operation.
640 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN);
641
642 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
643 ICmpInst::Predicate Cond, Instruction &I);
644 Instruction *foldSelectICmp(ICmpInst::Predicate Pred, SelectInst *SI,
645 Value *RHS, const ICmpInst &I);
646 bool foldAllocaCmp(AllocaInst *Alloca);
647 Instruction *foldCmpLoadFromIndexedGlobal(LoadInst *LI,
648 GetElementPtrInst *GEP,
649 GlobalVariable *GV, CmpInst &ICI,
650 ConstantInt *AndCst = nullptr);
651 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
652 Constant *RHSC);
653 Instruction *foldICmpAddOpConst(Value *X, const APInt &C,
654 ICmpInst::Predicate Pred);
655 Instruction *foldICmpWithCastOp(ICmpInst &ICmp);
656 Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp);
657
658 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp);
659 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp);
660 Instruction *foldICmpWithConstant(ICmpInst &Cmp);
661 Instruction *foldICmpUsingBoolRange(ICmpInst &I);
662 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
663 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
664 Instruction *foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp,
665 const APInt &C);
666 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
667 Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax,
668 Value *Z, ICmpInst::Predicate Pred);
669 Instruction *foldICmpEquality(ICmpInst &Cmp);
670 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
671 Instruction *foldSignBitTest(ICmpInst &I);
672 Instruction *foldICmpWithZero(ICmpInst &Cmp);
673
674 Value *foldMultiplicationOverflowCheck(ICmpInst &Cmp);
675
676 Instruction *foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO,
677 const APInt &C);
678 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select,
679 ConstantInt *C);
680 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
681 const APInt &C);
682 Instruction *foldICmpTruncWithTruncOrExt(ICmpInst &Cmp,
683 const SimplifyQuery &Q);
684 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
685 const APInt &C);
686 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
687 const APInt &C);
688 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
689 const APInt &C);
690 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul,
691 const APInt &C);
692 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl,
693 const APInt &C);
694 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr,
695 const APInt &C);
696 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
697 const APInt &C);
698 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
699 const APInt &C);
700 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div,
701 const APInt &C);
702 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub,
703 const APInt &C);
704 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add,
705 const APInt &C);
706 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
707 const APInt &C1);
708 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
709 const APInt &C1, const APInt &C2);
710 Instruction *foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor,
711 const APInt &C);
712 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
713 const APInt &C2);
714 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
715 const APInt &C2);
716
717 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
718 BinaryOperator *BO,
719 const APInt &C);
720 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
721 const APInt &C);
722 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
723 const APInt &C);
724 Instruction *foldICmpBitCast(ICmpInst &Cmp);
725 Instruction *foldICmpWithTrunc(ICmpInst &Cmp);
726 Instruction *foldICmpCommutative(ICmpInst::Predicate Pred, Value *Op0,
727 Value *Op1, ICmpInst &CxtI);
728
729 // Helpers of visitSelectInst().
730 Instruction *foldSelectOfBools(SelectInst &SI);
731 Instruction *foldSelectExtConst(SelectInst &Sel);
732 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
733 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *);
734 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
735 Value *A, Value *B, Instruction &Outer,
736 SelectPatternFlavor SPF2, Value *C);
737 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
738 Instruction *foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI);
739 bool replaceInInstruction(Value *V, Value *Old, Value *New,
740 unsigned Depth = 0);
741
742 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
743 bool isSigned, bool Inside);
744 bool mergeStoreIntoSuccessor(StoreInst &SI);
745
746 /// Given an initial instruction, check to see if it is the root of a
747 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse
748 /// intrinsic.
749 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps,
750 bool MatchBitReversals);
751
752 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI);
753 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI);
754
755 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
756
757 bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock);
758 void tryToSinkInstructionDbgValues(
759 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
760 BasicBlock *DestBlock, SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers);
761 void tryToSinkInstructionDbgVariableRecords(
762 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
763 BasicBlock *DestBlock, SmallVectorImpl<DbgVariableRecord *> &DPUsers);
764
765 bool removeInstructionsBeforeUnreachable(Instruction &I);
766 void addDeadEdge(BasicBlock *From, BasicBlock *To,
767 SmallVectorImpl<BasicBlock *> &Worklist);
768 void handleUnreachableFrom(Instruction *I,
769 SmallVectorImpl<BasicBlock *> &Worklist);
770 void handlePotentiallyDeadBlocks(SmallVectorImpl<BasicBlock *> &Worklist);
771 void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc);
772 void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser = nullptr);
773};
774
775class Negator final {
776 /// Top-to-bottom, def-to-use negated instruction tree we produced.
777 SmallVector<Instruction *, NegatorMaxNodesSSO> NewInstructions;
778
779 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
780 BuilderTy Builder;
781
782 const bool IsTrulyNegation;
783
784 SmallDenseMap<Value *, Value *> NegationsCache;
785
786 Negator(LLVMContext &C, const DataLayout &DL, bool IsTrulyNegation);
787
788#if LLVM_ENABLE_STATS
789 unsigned NumValuesVisitedInThisNegator = 0;
790 ~Negator();
791#endif
792
793 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/,
794 Value * /*NegatedRoot*/>;
795
796 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I);
797
798 [[nodiscard]] Value *visitImpl(Value *V, bool IsNSW, unsigned Depth);
799
800 [[nodiscard]] Value *negate(Value *V, bool IsNSW, unsigned Depth);
801
802 /// Recurse depth-first and attempt to sink the negation.
803 /// FIXME: use worklist?
804 [[nodiscard]] std::optional<Result> run(Value *Root, bool IsNSW);
805
806 Negator(const Negator &) = delete;
807 Negator(Negator &&) = delete;
808 Negator &operator=(const Negator &) = delete;
809 Negator &operator=(Negator &&) = delete;
810
811public:
812 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed,
813 /// otherwise returns negated value.
814 [[nodiscard]] static Value *Negate(bool LHSIsZero, bool IsNSW, Value *Root,
815 InstCombinerImpl &IC);
816};
817
818} // end namespace llvm
819
820#undef DEBUG_TYPE
821
822#endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
823

source code of llvm/lib/Transforms/InstCombine/InstCombineInternal.h