1//===- CorrelatedValuePropagation.cpp - Propagate CFG-derived info --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Correlated Value Propagation pass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
14#include "llvm/ADT/DepthFirstIterator.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/DomTreeUpdater.h"
18#include "llvm/Analysis/GlobalsModRef.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/LazyValueInfo.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/IR/Attributes.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/Constant.h"
26#include "llvm/IR/ConstantRange.h"
27#include "llvm/IR/Constants.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Instructions.h"
34#include "llvm/IR/IntrinsicInst.h"
35#include "llvm/IR/Operator.h"
36#include "llvm/IR/PassManager.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
39#include "llvm/Support/Casting.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Transforms/Utils/Local.h"
42#include <cassert>
43#include <optional>
44#include <utility>
45
46using namespace llvm;
47
48#define DEBUG_TYPE "correlated-value-propagation"
49
50static cl::opt<bool> CanonicalizeICmpPredicatesToUnsigned(
51 "canonicalize-icmp-predicates-to-unsigned", cl::init(Val: true), cl::Hidden,
52 cl::desc("Enables canonicalization of signed relational predicates to "
53 "unsigned (e.g. sgt => ugt)"));
54
55STATISTIC(NumPhis, "Number of phis propagated");
56STATISTIC(NumPhiCommon, "Number of phis deleted via common incoming value");
57STATISTIC(NumSelects, "Number of selects propagated");
58STATISTIC(NumCmps, "Number of comparisons propagated");
59STATISTIC(NumReturns, "Number of return values propagated");
60STATISTIC(NumDeadCases, "Number of switch cases removed");
61STATISTIC(NumSDivSRemsNarrowed,
62 "Number of sdivs/srems whose width was decreased");
63STATISTIC(NumSDivs, "Number of sdiv converted to udiv");
64STATISTIC(NumUDivURemsNarrowed,
65 "Number of udivs/urems whose width was decreased");
66STATISTIC(NumAShrsConverted, "Number of ashr converted to lshr");
67STATISTIC(NumAShrsRemoved, "Number of ashr removed");
68STATISTIC(NumSRems, "Number of srem converted to urem");
69STATISTIC(NumSExt, "Number of sext converted to zext");
70STATISTIC(NumSICmps, "Number of signed icmp preds simplified to unsigned");
71STATISTIC(NumAnd, "Number of ands removed");
72STATISTIC(NumNW, "Number of no-wrap deductions");
73STATISTIC(NumNSW, "Number of no-signed-wrap deductions");
74STATISTIC(NumNUW, "Number of no-unsigned-wrap deductions");
75STATISTIC(NumAddNW, "Number of no-wrap deductions for add");
76STATISTIC(NumAddNSW, "Number of no-signed-wrap deductions for add");
77STATISTIC(NumAddNUW, "Number of no-unsigned-wrap deductions for add");
78STATISTIC(NumSubNW, "Number of no-wrap deductions for sub");
79STATISTIC(NumSubNSW, "Number of no-signed-wrap deductions for sub");
80STATISTIC(NumSubNUW, "Number of no-unsigned-wrap deductions for sub");
81STATISTIC(NumMulNW, "Number of no-wrap deductions for mul");
82STATISTIC(NumMulNSW, "Number of no-signed-wrap deductions for mul");
83STATISTIC(NumMulNUW, "Number of no-unsigned-wrap deductions for mul");
84STATISTIC(NumShlNW, "Number of no-wrap deductions for shl");
85STATISTIC(NumShlNSW, "Number of no-signed-wrap deductions for shl");
86STATISTIC(NumShlNUW, "Number of no-unsigned-wrap deductions for shl");
87STATISTIC(NumAbs, "Number of llvm.abs intrinsics removed");
88STATISTIC(NumOverflows, "Number of overflow checks removed");
89STATISTIC(NumSaturating,
90 "Number of saturating arithmetics converted to normal arithmetics");
91STATISTIC(NumNonNull, "Number of function pointer arguments marked non-null");
92STATISTIC(NumMinMax, "Number of llvm.[us]{min,max} intrinsics removed");
93STATISTIC(NumUDivURemsNarrowedExpanded,
94 "Number of bound udiv's/urem's expanded");
95STATISTIC(NumZExt, "Number of non-negative deductions");
96
97static Constant *getConstantAt(Value *V, Instruction *At, LazyValueInfo *LVI) {
98 if (Constant *C = LVI->getConstant(V, CxtI: At))
99 return C;
100
101 // TODO: The following really should be sunk inside LVI's core algorithm, or
102 // at least the outer shims around such.
103 auto *C = dyn_cast<CmpInst>(Val: V);
104 if (!C)
105 return nullptr;
106
107 Value *Op0 = C->getOperand(i_nocapture: 0);
108 Constant *Op1 = dyn_cast<Constant>(Val: C->getOperand(i_nocapture: 1));
109 if (!Op1)
110 return nullptr;
111
112 LazyValueInfo::Tristate Result = LVI->getPredicateAt(
113 Pred: C->getPredicate(), V: Op0, C: Op1, CxtI: At, /*UseBlockValue=*/false);
114 if (Result == LazyValueInfo::Unknown)
115 return nullptr;
116
117 return (Result == LazyValueInfo::True)
118 ? ConstantInt::getTrue(Context&: C->getContext())
119 : ConstantInt::getFalse(Context&: C->getContext());
120}
121
122static bool processSelect(SelectInst *S, LazyValueInfo *LVI) {
123 if (S->getType()->isVectorTy() || isa<Constant>(Val: S->getCondition()))
124 return false;
125
126 bool Changed = false;
127 for (Use &U : make_early_inc_range(Range: S->uses())) {
128 auto *I = cast<Instruction>(Val: U.getUser());
129 Constant *C;
130 if (auto *PN = dyn_cast<PHINode>(Val: I))
131 C = LVI->getConstantOnEdge(V: S->getCondition(), FromBB: PN->getIncomingBlock(U),
132 ToBB: I->getParent(), CxtI: I);
133 else
134 C = getConstantAt(V: S->getCondition(), At: I, LVI);
135
136 auto *CI = dyn_cast_or_null<ConstantInt>(Val: C);
137 if (!CI)
138 continue;
139
140 U.set(CI->isOne() ? S->getTrueValue() : S->getFalseValue());
141 Changed = true;
142 ++NumSelects;
143 }
144
145 if (Changed && S->use_empty())
146 S->eraseFromParent();
147
148 return Changed;
149}
150
151/// Try to simplify a phi with constant incoming values that match the edge
152/// values of a non-constant value on all other edges:
153/// bb0:
154/// %isnull = icmp eq i8* %x, null
155/// br i1 %isnull, label %bb2, label %bb1
156/// bb1:
157/// br label %bb2
158/// bb2:
159/// %r = phi i8* [ %x, %bb1 ], [ null, %bb0 ]
160/// -->
161/// %r = %x
162static bool simplifyCommonValuePhi(PHINode *P, LazyValueInfo *LVI,
163 DominatorTree *DT) {
164 // Collect incoming constants and initialize possible common value.
165 SmallVector<std::pair<Constant *, unsigned>, 4> IncomingConstants;
166 Value *CommonValue = nullptr;
167 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
168 Value *Incoming = P->getIncomingValue(i);
169 if (auto *IncomingConstant = dyn_cast<Constant>(Val: Incoming)) {
170 IncomingConstants.push_back(Elt: std::make_pair(x&: IncomingConstant, y&: i));
171 } else if (!CommonValue) {
172 // The potential common value is initialized to the first non-constant.
173 CommonValue = Incoming;
174 } else if (Incoming != CommonValue) {
175 // There can be only one non-constant common value.
176 return false;
177 }
178 }
179
180 if (!CommonValue || IncomingConstants.empty())
181 return false;
182
183 // The common value must be valid in all incoming blocks.
184 BasicBlock *ToBB = P->getParent();
185 if (auto *CommonInst = dyn_cast<Instruction>(Val: CommonValue))
186 if (!DT->dominates(Def: CommonInst, BB: ToBB))
187 return false;
188
189 // We have a phi with exactly 1 variable incoming value and 1 or more constant
190 // incoming values. See if all constant incoming values can be mapped back to
191 // the same incoming variable value.
192 for (auto &IncomingConstant : IncomingConstants) {
193 Constant *C = IncomingConstant.first;
194 BasicBlock *IncomingBB = P->getIncomingBlock(i: IncomingConstant.second);
195 if (C != LVI->getConstantOnEdge(V: CommonValue, FromBB: IncomingBB, ToBB, CxtI: P))
196 return false;
197 }
198
199 // LVI only guarantees that the value matches a certain constant if the value
200 // is not poison. Make sure we don't replace a well-defined value with poison.
201 // This is usually satisfied due to a prior branch on the value.
202 if (!isGuaranteedNotToBePoison(V: CommonValue, AC: nullptr, CtxI: P, DT))
203 return false;
204
205 // All constant incoming values map to the same variable along the incoming
206 // edges of the phi. The phi is unnecessary.
207 P->replaceAllUsesWith(V: CommonValue);
208 P->eraseFromParent();
209 ++NumPhiCommon;
210 return true;
211}
212
213static Value *getValueOnEdge(LazyValueInfo *LVI, Value *Incoming,
214 BasicBlock *From, BasicBlock *To,
215 Instruction *CxtI) {
216 if (Constant *C = LVI->getConstantOnEdge(V: Incoming, FromBB: From, ToBB: To, CxtI))
217 return C;
218
219 // Look if the incoming value is a select with a scalar condition for which
220 // LVI can tells us the value. In that case replace the incoming value with
221 // the appropriate value of the select. This often allows us to remove the
222 // select later.
223 auto *SI = dyn_cast<SelectInst>(Val: Incoming);
224 if (!SI)
225 return nullptr;
226
227 // Once LVI learns to handle vector types, we could also add support
228 // for vector type constants that are not all zeroes or all ones.
229 Value *Condition = SI->getCondition();
230 if (!Condition->getType()->isVectorTy()) {
231 if (Constant *C = LVI->getConstantOnEdge(V: Condition, FromBB: From, ToBB: To, CxtI)) {
232 if (C->isOneValue())
233 return SI->getTrueValue();
234 if (C->isZeroValue())
235 return SI->getFalseValue();
236 }
237 }
238
239 // Look if the select has a constant but LVI tells us that the incoming
240 // value can never be that constant. In that case replace the incoming
241 // value with the other value of the select. This often allows us to
242 // remove the select later.
243
244 // The "false" case
245 if (auto *C = dyn_cast<Constant>(Val: SI->getFalseValue()))
246 if (LVI->getPredicateOnEdge(Pred: ICmpInst::ICMP_EQ, V: SI, C, FromBB: From, ToBB: To, CxtI) ==
247 LazyValueInfo::False)
248 return SI->getTrueValue();
249
250 // The "true" case,
251 // similar to the select "false" case, but try the select "true" value
252 if (auto *C = dyn_cast<Constant>(Val: SI->getTrueValue()))
253 if (LVI->getPredicateOnEdge(Pred: ICmpInst::ICMP_EQ, V: SI, C, FromBB: From, ToBB: To, CxtI) ==
254 LazyValueInfo::False)
255 return SI->getFalseValue();
256
257 return nullptr;
258}
259
260static bool processPHI(PHINode *P, LazyValueInfo *LVI, DominatorTree *DT,
261 const SimplifyQuery &SQ) {
262 bool Changed = false;
263
264 BasicBlock *BB = P->getParent();
265 for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
266 Value *Incoming = P->getIncomingValue(i);
267 if (isa<Constant>(Val: Incoming)) continue;
268
269 Value *V = getValueOnEdge(LVI, Incoming, From: P->getIncomingBlock(i), To: BB, CxtI: P);
270 if (V) {
271 P->setIncomingValue(i, V);
272 Changed = true;
273 }
274 }
275
276 if (Value *V = simplifyInstruction(I: P, Q: SQ)) {
277 P->replaceAllUsesWith(V);
278 P->eraseFromParent();
279 Changed = true;
280 }
281
282 if (!Changed)
283 Changed = simplifyCommonValuePhi(P, LVI, DT);
284
285 if (Changed)
286 ++NumPhis;
287
288 return Changed;
289}
290
291static bool processICmp(ICmpInst *Cmp, LazyValueInfo *LVI) {
292 if (!CanonicalizeICmpPredicatesToUnsigned)
293 return false;
294
295 // Only for signed relational comparisons of scalar integers.
296 if (Cmp->getType()->isVectorTy() ||
297 !Cmp->getOperand(i_nocapture: 0)->getType()->isIntegerTy())
298 return false;
299
300 if (!Cmp->isSigned())
301 return false;
302
303 ICmpInst::Predicate UnsignedPred =
304 ConstantRange::getEquivalentPredWithFlippedSignedness(
305 Pred: Cmp->getPredicate(),
306 CR1: LVI->getConstantRangeAtUse(U: Cmp->getOperandUse(i: 0),
307 /*UndefAllowed*/ true),
308 CR2: LVI->getConstantRangeAtUse(U: Cmp->getOperandUse(i: 1),
309 /*UndefAllowed*/ true));
310
311 if (UnsignedPred == ICmpInst::Predicate::BAD_ICMP_PREDICATE)
312 return false;
313
314 ++NumSICmps;
315 Cmp->setPredicate(UnsignedPred);
316
317 return true;
318}
319
320/// See if LazyValueInfo's ability to exploit edge conditions or range
321/// information is sufficient to prove this comparison. Even for local
322/// conditions, this can sometimes prove conditions instcombine can't by
323/// exploiting range information.
324static bool constantFoldCmp(CmpInst *Cmp, LazyValueInfo *LVI) {
325 Value *Op0 = Cmp->getOperand(i_nocapture: 0);
326 Value *Op1 = Cmp->getOperand(i_nocapture: 1);
327 LazyValueInfo::Tristate Result =
328 LVI->getPredicateAt(Pred: Cmp->getPredicate(), LHS: Op0, RHS: Op1, CxtI: Cmp,
329 /*UseBlockValue=*/true);
330 if (Result == LazyValueInfo::Unknown)
331 return false;
332
333 ++NumCmps;
334 Constant *TorF =
335 ConstantInt::get(Ty: CmpInst::makeCmpResultType(opnd_type: Op0->getType()), V: Result);
336 Cmp->replaceAllUsesWith(V: TorF);
337 Cmp->eraseFromParent();
338 return true;
339}
340
341static bool processCmp(CmpInst *Cmp, LazyValueInfo *LVI) {
342 if (constantFoldCmp(Cmp, LVI))
343 return true;
344
345 if (auto *ICmp = dyn_cast<ICmpInst>(Val: Cmp))
346 if (processICmp(Cmp: ICmp, LVI))
347 return true;
348
349 return false;
350}
351
352/// Simplify a switch instruction by removing cases which can never fire. If the
353/// uselessness of a case could be determined locally then constant propagation
354/// would already have figured it out. Instead, walk the predecessors and
355/// statically evaluate cases based on information available on that edge. Cases
356/// that cannot fire no matter what the incoming edge can safely be removed. If
357/// a case fires on every incoming edge then the entire switch can be removed
358/// and replaced with a branch to the case destination.
359static bool processSwitch(SwitchInst *I, LazyValueInfo *LVI,
360 DominatorTree *DT) {
361 DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy);
362 Value *Cond = I->getCondition();
363 BasicBlock *BB = I->getParent();
364
365 // Analyse each switch case in turn.
366 bool Changed = false;
367 DenseMap<BasicBlock*, int> SuccessorsCount;
368 for (auto *Succ : successors(BB))
369 SuccessorsCount[Succ]++;
370
371 { // Scope for SwitchInstProfUpdateWrapper. It must not live during
372 // ConstantFoldTerminator() as the underlying SwitchInst can be changed.
373 SwitchInstProfUpdateWrapper SI(*I);
374
375 for (auto CI = SI->case_begin(), CE = SI->case_end(); CI != CE;) {
376 ConstantInt *Case = CI->getCaseValue();
377 LazyValueInfo::Tristate State =
378 LVI->getPredicateAt(Pred: CmpInst::ICMP_EQ, V: Cond, C: Case, CxtI: I,
379 /* UseBlockValue */ true);
380
381 if (State == LazyValueInfo::False) {
382 // This case never fires - remove it.
383 BasicBlock *Succ = CI->getCaseSuccessor();
384 Succ->removePredecessor(Pred: BB);
385 CI = SI.removeCase(I: CI);
386 CE = SI->case_end();
387
388 // The condition can be modified by removePredecessor's PHI simplification
389 // logic.
390 Cond = SI->getCondition();
391
392 ++NumDeadCases;
393 Changed = true;
394 if (--SuccessorsCount[Succ] == 0)
395 DTU.applyUpdatesPermissive(Updates: {{DominatorTree::Delete, BB, Succ}});
396 continue;
397 }
398 if (State == LazyValueInfo::True) {
399 // This case always fires. Arrange for the switch to be turned into an
400 // unconditional branch by replacing the switch condition with the case
401 // value.
402 SI->setCondition(Case);
403 NumDeadCases += SI->getNumCases();
404 Changed = true;
405 break;
406 }
407
408 // Increment the case iterator since we didn't delete it.
409 ++CI;
410 }
411 }
412
413 if (Changed)
414 // If the switch has been simplified to the point where it can be replaced
415 // by a branch then do so now.
416 ConstantFoldTerminator(BB, /*DeleteDeadConditions = */ false,
417 /*TLI = */ nullptr, DTU: &DTU);
418 return Changed;
419}
420
421// See if we can prove that the given binary op intrinsic will not overflow.
422static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI) {
423 ConstantRange LRange =
424 LVI->getConstantRangeAtUse(U: BO->getOperandUse(i: 0), /*UndefAllowed*/ false);
425 ConstantRange RRange =
426 LVI->getConstantRangeAtUse(U: BO->getOperandUse(i: 1), /*UndefAllowed*/ false);
427 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
428 BinOp: BO->getBinaryOp(), Other: RRange, NoWrapKind: BO->getNoWrapKind());
429 return NWRegion.contains(CR: LRange);
430}
431
432static void setDeducedOverflowingFlags(Value *V, Instruction::BinaryOps Opcode,
433 bool NewNSW, bool NewNUW) {
434 Statistic *OpcNW, *OpcNSW, *OpcNUW;
435 switch (Opcode) {
436 case Instruction::Add:
437 OpcNW = &NumAddNW;
438 OpcNSW = &NumAddNSW;
439 OpcNUW = &NumAddNUW;
440 break;
441 case Instruction::Sub:
442 OpcNW = &NumSubNW;
443 OpcNSW = &NumSubNSW;
444 OpcNUW = &NumSubNUW;
445 break;
446 case Instruction::Mul:
447 OpcNW = &NumMulNW;
448 OpcNSW = &NumMulNSW;
449 OpcNUW = &NumMulNUW;
450 break;
451 case Instruction::Shl:
452 OpcNW = &NumShlNW;
453 OpcNSW = &NumShlNSW;
454 OpcNUW = &NumShlNUW;
455 break;
456 default:
457 llvm_unreachable("Will not be called with other binops");
458 }
459
460 auto *Inst = dyn_cast<Instruction>(Val: V);
461 if (NewNSW) {
462 ++NumNW;
463 ++*OpcNW;
464 ++NumNSW;
465 ++*OpcNSW;
466 if (Inst)
467 Inst->setHasNoSignedWrap();
468 }
469 if (NewNUW) {
470 ++NumNW;
471 ++*OpcNW;
472 ++NumNUW;
473 ++*OpcNUW;
474 if (Inst)
475 Inst->setHasNoUnsignedWrap();
476 }
477}
478
479static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI);
480
481// See if @llvm.abs argument is alays positive/negative, and simplify.
482// Notably, INT_MIN can belong to either range, regardless of the NSW,
483// because it is negation-invariant.
484static bool processAbsIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
485 Value *X = II->getArgOperand(i: 0);
486 Type *Ty = X->getType();
487 if (!Ty->isIntegerTy())
488 return false;
489
490 bool IsIntMinPoison = cast<ConstantInt>(Val: II->getArgOperand(i: 1))->isOne();
491 APInt IntMin = APInt::getSignedMinValue(numBits: Ty->getScalarSizeInBits());
492 ConstantRange Range = LVI->getConstantRangeAtUse(
493 U: II->getOperandUse(i: 0), /*UndefAllowed*/ IsIntMinPoison);
494
495 // Is X in [0, IntMin]? NOTE: INT_MIN is fine!
496 if (Range.icmp(Pred: CmpInst::ICMP_ULE, Other: IntMin)) {
497 ++NumAbs;
498 II->replaceAllUsesWith(V: X);
499 II->eraseFromParent();
500 return true;
501 }
502
503 // Is X in [IntMin, 0]? NOTE: INT_MIN is fine!
504 if (Range.getSignedMax().isNonPositive()) {
505 IRBuilder<> B(II);
506 Value *NegX = B.CreateNeg(V: X, Name: II->getName(), /*HasNUW=*/false,
507 /*HasNSW=*/IsIntMinPoison);
508 ++NumAbs;
509 II->replaceAllUsesWith(V: NegX);
510 II->eraseFromParent();
511
512 // See if we can infer some no-wrap flags.
513 if (auto *BO = dyn_cast<BinaryOperator>(Val: NegX))
514 processBinOp(BinOp: BO, LVI);
515
516 return true;
517 }
518
519 // Argument's range crosses zero.
520 // Can we at least tell that the argument is never INT_MIN?
521 if (!IsIntMinPoison && !Range.contains(Val: IntMin)) {
522 ++NumNSW;
523 ++NumSubNSW;
524 II->setArgOperand(i: 1, v: ConstantInt::getTrue(Context&: II->getContext()));
525 return true;
526 }
527 return false;
528}
529
530// See if this min/max intrinsic always picks it's one specific operand.
531static bool processMinMaxIntrinsic(MinMaxIntrinsic *MM, LazyValueInfo *LVI) {
532 CmpInst::Predicate Pred = CmpInst::getNonStrictPredicate(pred: MM->getPredicate());
533 LazyValueInfo::Tristate Result = LVI->getPredicateAt(
534 Pred, LHS: MM->getLHS(), RHS: MM->getRHS(), CxtI: MM, /*UseBlockValue=*/true);
535 if (Result == LazyValueInfo::Unknown)
536 return false;
537
538 ++NumMinMax;
539 MM->replaceAllUsesWith(V: MM->getOperand(i_nocapture: !Result));
540 MM->eraseFromParent();
541 return true;
542}
543
544// Rewrite this with.overflow intrinsic as non-overflowing.
545static bool processOverflowIntrinsic(WithOverflowInst *WO, LazyValueInfo *LVI) {
546 IRBuilder<> B(WO);
547 Instruction::BinaryOps Opcode = WO->getBinaryOp();
548 bool NSW = WO->isSigned();
549 bool NUW = !WO->isSigned();
550
551 Value *NewOp =
552 B.CreateBinOp(Opc: Opcode, LHS: WO->getLHS(), RHS: WO->getRHS(), Name: WO->getName());
553 setDeducedOverflowingFlags(V: NewOp, Opcode, NewNSW: NSW, NewNUW: NUW);
554
555 StructType *ST = cast<StructType>(Val: WO->getType());
556 Constant *Struct = ConstantStruct::get(T: ST,
557 V: { PoisonValue::get(T: ST->getElementType(N: 0)),
558 ConstantInt::getFalse(Ty: ST->getElementType(N: 1)) });
559 Value *NewI = B.CreateInsertValue(Agg: Struct, Val: NewOp, Idxs: 0);
560 WO->replaceAllUsesWith(V: NewI);
561 WO->eraseFromParent();
562 ++NumOverflows;
563
564 // See if we can infer the other no-wrap too.
565 if (auto *BO = dyn_cast<BinaryOperator>(Val: NewOp))
566 processBinOp(BinOp: BO, LVI);
567
568 return true;
569}
570
571static bool processSaturatingInst(SaturatingInst *SI, LazyValueInfo *LVI) {
572 Instruction::BinaryOps Opcode = SI->getBinaryOp();
573 bool NSW = SI->isSigned();
574 bool NUW = !SI->isSigned();
575 BinaryOperator *BinOp = BinaryOperator::Create(
576 Op: Opcode, S1: SI->getLHS(), S2: SI->getRHS(), Name: SI->getName(), InsertBefore: SI);
577 BinOp->setDebugLoc(SI->getDebugLoc());
578 setDeducedOverflowingFlags(V: BinOp, Opcode, NewNSW: NSW, NewNUW: NUW);
579
580 SI->replaceAllUsesWith(V: BinOp);
581 SI->eraseFromParent();
582 ++NumSaturating;
583
584 // See if we can infer the other no-wrap too.
585 if (auto *BO = dyn_cast<BinaryOperator>(Val: BinOp))
586 processBinOp(BinOp: BO, LVI);
587
588 return true;
589}
590
591/// Infer nonnull attributes for the arguments at the specified callsite.
592static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) {
593
594 if (CB.getIntrinsicID() == Intrinsic::abs) {
595 return processAbsIntrinsic(II: &cast<IntrinsicInst>(Val&: CB), LVI);
596 }
597
598 if (auto *MM = dyn_cast<MinMaxIntrinsic>(Val: &CB)) {
599 return processMinMaxIntrinsic(MM, LVI);
600 }
601
602 if (auto *WO = dyn_cast<WithOverflowInst>(Val: &CB)) {
603 if (WO->getLHS()->getType()->isIntegerTy() && willNotOverflow(BO: WO, LVI)) {
604 return processOverflowIntrinsic(WO, LVI);
605 }
606 }
607
608 if (auto *SI = dyn_cast<SaturatingInst>(Val: &CB)) {
609 if (SI->getType()->isIntegerTy() && willNotOverflow(BO: SI, LVI)) {
610 return processSaturatingInst(SI, LVI);
611 }
612 }
613
614 bool Changed = false;
615
616 // Deopt bundle operands are intended to capture state with minimal
617 // perturbance of the code otherwise. If we can find a constant value for
618 // any such operand and remove a use of the original value, that's
619 // desireable since it may allow further optimization of that value (e.g. via
620 // single use rules in instcombine). Since deopt uses tend to,
621 // idiomatically, appear along rare conditional paths, it's reasonable likely
622 // we may have a conditional fact with which LVI can fold.
623 if (auto DeoptBundle = CB.getOperandBundle(ID: LLVMContext::OB_deopt)) {
624 for (const Use &ConstU : DeoptBundle->Inputs) {
625 Use &U = const_cast<Use&>(ConstU);
626 Value *V = U.get();
627 if (V->getType()->isVectorTy()) continue;
628 if (isa<Constant>(Val: V)) continue;
629
630 Constant *C = LVI->getConstant(V, CxtI: &CB);
631 if (!C) continue;
632 U.set(C);
633 Changed = true;
634 }
635 }
636
637 SmallVector<unsigned, 4> ArgNos;
638 unsigned ArgNo = 0;
639
640 for (Value *V : CB.args()) {
641 PointerType *Type = dyn_cast<PointerType>(Val: V->getType());
642 // Try to mark pointer typed parameters as non-null. We skip the
643 // relatively expensive analysis for constants which are obviously either
644 // null or non-null to start with.
645 if (Type && !CB.paramHasAttr(ArgNo, Attribute::Kind: NonNull) &&
646 !isa<Constant>(Val: V) &&
647 LVI->getPredicateAt(Pred: ICmpInst::ICMP_EQ, V,
648 C: ConstantPointerNull::get(T: Type), CxtI: &CB,
649 /*UseBlockValue=*/false) == LazyValueInfo::False)
650 ArgNos.push_back(Elt: ArgNo);
651 ArgNo++;
652 }
653
654 assert(ArgNo == CB.arg_size() && "Call arguments not processed correctly.");
655
656 if (ArgNos.empty())
657 return Changed;
658
659 NumNonNull += ArgNos.size();
660 AttributeList AS = CB.getAttributes();
661 LLVMContext &Ctx = CB.getContext();
662 AS = AS.addParamAttribute(Ctx, ArgNos,
663 Attribute::get(Ctx, Attribute::NonNull));
664 CB.setAttributes(AS);
665
666 return true;
667}
668
669enum class Domain { NonNegative, NonPositive, Unknown };
670
671static Domain getDomain(const ConstantRange &CR) {
672 if (CR.isAllNonNegative())
673 return Domain::NonNegative;
674 if (CR.icmp(Pred: ICmpInst::ICMP_SLE, Other: APInt::getZero(numBits: CR.getBitWidth())))
675 return Domain::NonPositive;
676 return Domain::Unknown;
677}
678
679/// Try to shrink a sdiv/srem's width down to the smallest power of two that's
680/// sufficient to contain its operands.
681static bool narrowSDivOrSRem(BinaryOperator *Instr, const ConstantRange &LCR,
682 const ConstantRange &RCR) {
683 assert(Instr->getOpcode() == Instruction::SDiv ||
684 Instr->getOpcode() == Instruction::SRem);
685 assert(!Instr->getType()->isVectorTy());
686
687 // Find the smallest power of two bitwidth that's sufficient to hold Instr's
688 // operands.
689 unsigned OrigWidth = Instr->getType()->getIntegerBitWidth();
690
691 // What is the smallest bit width that can accommodate the entire value ranges
692 // of both of the operands?
693 unsigned MinSignedBits =
694 std::max(a: LCR.getMinSignedBits(), b: RCR.getMinSignedBits());
695
696 // sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can
697 // prove that such a combination is impossible, we need to bump the bitwidth.
698 if (RCR.contains(Val: APInt::getAllOnes(numBits: OrigWidth)) &&
699 LCR.contains(Val: APInt::getSignedMinValue(numBits: MinSignedBits).sext(width: OrigWidth)))
700 ++MinSignedBits;
701
702 // Don't shrink below 8 bits wide.
703 unsigned NewWidth = std::max<unsigned>(a: PowerOf2Ceil(A: MinSignedBits), b: 8);
704
705 // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
706 // two.
707 if (NewWidth >= OrigWidth)
708 return false;
709
710 ++NumSDivSRemsNarrowed;
711 IRBuilder<> B{Instr};
712 auto *TruncTy = Type::getIntNTy(C&: Instr->getContext(), N: NewWidth);
713 auto *LHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 0), DestTy: TruncTy,
714 Name: Instr->getName() + ".lhs.trunc");
715 auto *RHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 1), DestTy: TruncTy,
716 Name: Instr->getName() + ".rhs.trunc");
717 auto *BO = B.CreateBinOp(Opc: Instr->getOpcode(), LHS, RHS, Name: Instr->getName());
718 auto *Sext = B.CreateSExt(V: BO, DestTy: Instr->getType(), Name: Instr->getName() + ".sext");
719 if (auto *BinOp = dyn_cast<BinaryOperator>(Val: BO))
720 if (BinOp->getOpcode() == Instruction::SDiv)
721 BinOp->setIsExact(Instr->isExact());
722
723 Instr->replaceAllUsesWith(V: Sext);
724 Instr->eraseFromParent();
725 return true;
726}
727
728static bool expandUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
729 const ConstantRange &YCR) {
730 Type *Ty = Instr->getType();
731 assert(Instr->getOpcode() == Instruction::UDiv ||
732 Instr->getOpcode() == Instruction::URem);
733 assert(!Ty->isVectorTy());
734 bool IsRem = Instr->getOpcode() == Instruction::URem;
735
736 Value *X = Instr->getOperand(i_nocapture: 0);
737 Value *Y = Instr->getOperand(i_nocapture: 1);
738
739 // X u/ Y -> 0 iff X u< Y
740 // X u% Y -> X iff X u< Y
741 if (XCR.icmp(Pred: ICmpInst::ICMP_ULT, Other: YCR)) {
742 Instr->replaceAllUsesWith(V: IsRem ? X : Constant::getNullValue(Ty));
743 Instr->eraseFromParent();
744 ++NumUDivURemsNarrowedExpanded;
745 return true;
746 }
747
748 // Given
749 // R = X u% Y
750 // We can represent the modulo operation as a loop/self-recursion:
751 // urem_rec(X, Y):
752 // Z = X - Y
753 // if X u< Y
754 // ret X
755 // else
756 // ret urem_rec(Z, Y)
757 // which isn't better, but if we only need a single iteration
758 // to compute the answer, this becomes quite good:
759 // R = X < Y ? X : X - Y iff X u< 2*Y (w/ unsigned saturation)
760 // Now, we do not care about all full multiples of Y in X, they do not change
761 // the answer, thus we could rewrite the expression as:
762 // X* = X - (Y * |_ X / Y _|)
763 // R = X* % Y
764 // so we don't need the *first* iteration to return, we just need to
765 // know *which* iteration will always return, so we could also rewrite it as:
766 // X* = X - (Y * |_ X / Y _|)
767 // R = X* % Y iff X* u< 2*Y (w/ unsigned saturation)
768 // but that does not seem profitable here.
769
770 // Even if we don't know X's range, the divisor may be so large, X can't ever
771 // be 2x larger than that. I.e. if divisor is always negative.
772 if (!XCR.icmp(Pred: ICmpInst::ICMP_ULT,
773 Other: YCR.umul_sat(Other: APInt(YCR.getBitWidth(), 2))) &&
774 !YCR.isAllNegative())
775 return false;
776
777 IRBuilder<> B(Instr);
778 Value *ExpandedOp;
779 if (XCR.icmp(Pred: ICmpInst::ICMP_UGE, Other: YCR)) {
780 // If X is between Y and 2*Y the result is known.
781 if (IsRem)
782 ExpandedOp = B.CreateNUWSub(LHS: X, RHS: Y);
783 else
784 ExpandedOp = ConstantInt::get(Ty: Instr->getType(), V: 1);
785 } else if (IsRem) {
786 // NOTE: this transformation introduces two uses of X,
787 // but it may be undef so we must freeze it first.
788 Value *FrozenX = X;
789 if (!isGuaranteedNotToBeUndef(V: X))
790 FrozenX = B.CreateFreeze(V: X, Name: X->getName() + ".frozen");
791 auto *AdjX = B.CreateNUWSub(LHS: FrozenX, RHS: Y, Name: Instr->getName() + ".urem");
792 auto *Cmp =
793 B.CreateICmp(P: ICmpInst::ICMP_ULT, LHS: FrozenX, RHS: Y, Name: Instr->getName() + ".cmp");
794 ExpandedOp = B.CreateSelect(C: Cmp, True: FrozenX, False: AdjX);
795 } else {
796 auto *Cmp =
797 B.CreateICmp(P: ICmpInst::ICMP_UGE, LHS: X, RHS: Y, Name: Instr->getName() + ".cmp");
798 ExpandedOp = B.CreateZExt(V: Cmp, DestTy: Ty, Name: Instr->getName() + ".udiv");
799 }
800 ExpandedOp->takeName(V: Instr);
801 Instr->replaceAllUsesWith(V: ExpandedOp);
802 Instr->eraseFromParent();
803 ++NumUDivURemsNarrowedExpanded;
804 return true;
805}
806
807/// Try to shrink a udiv/urem's width down to the smallest power of two that's
808/// sufficient to contain its operands.
809static bool narrowUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
810 const ConstantRange &YCR) {
811 assert(Instr->getOpcode() == Instruction::UDiv ||
812 Instr->getOpcode() == Instruction::URem);
813 assert(!Instr->getType()->isVectorTy());
814
815 // Find the smallest power of two bitwidth that's sufficient to hold Instr's
816 // operands.
817
818 // What is the smallest bit width that can accommodate the entire value ranges
819 // of both of the operands?
820 unsigned MaxActiveBits = std::max(a: XCR.getActiveBits(), b: YCR.getActiveBits());
821 // Don't shrink below 8 bits wide.
822 unsigned NewWidth = std::max<unsigned>(a: PowerOf2Ceil(A: MaxActiveBits), b: 8);
823
824 // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
825 // two.
826 if (NewWidth >= Instr->getType()->getIntegerBitWidth())
827 return false;
828
829 ++NumUDivURemsNarrowed;
830 IRBuilder<> B{Instr};
831 auto *TruncTy = Type::getIntNTy(C&: Instr->getContext(), N: NewWidth);
832 auto *LHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 0), DestTy: TruncTy,
833 Name: Instr->getName() + ".lhs.trunc");
834 auto *RHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 1), DestTy: TruncTy,
835 Name: Instr->getName() + ".rhs.trunc");
836 auto *BO = B.CreateBinOp(Opc: Instr->getOpcode(), LHS, RHS, Name: Instr->getName());
837 auto *Zext = B.CreateZExt(V: BO, DestTy: Instr->getType(), Name: Instr->getName() + ".zext");
838 if (auto *BinOp = dyn_cast<BinaryOperator>(Val: BO))
839 if (BinOp->getOpcode() == Instruction::UDiv)
840 BinOp->setIsExact(Instr->isExact());
841
842 Instr->replaceAllUsesWith(V: Zext);
843 Instr->eraseFromParent();
844 return true;
845}
846
847static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
848 assert(Instr->getOpcode() == Instruction::UDiv ||
849 Instr->getOpcode() == Instruction::URem);
850 if (Instr->getType()->isVectorTy())
851 return false;
852
853 ConstantRange XCR = LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 0),
854 /*UndefAllowed*/ false);
855 // Allow undef for RHS, as we can assume it is division by zero UB.
856 ConstantRange YCR = LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 1),
857 /*UndefAllowed*/ true);
858 if (expandUDivOrURem(Instr, XCR, YCR))
859 return true;
860
861 return narrowUDivOrURem(Instr, XCR, YCR);
862}
863
864static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
865 const ConstantRange &RCR, LazyValueInfo *LVI) {
866 assert(SDI->getOpcode() == Instruction::SRem);
867 assert(!SDI->getType()->isVectorTy());
868
869 if (LCR.abs().icmp(Pred: CmpInst::ICMP_ULT, Other: RCR.abs())) {
870 SDI->replaceAllUsesWith(V: SDI->getOperand(i_nocapture: 0));
871 SDI->eraseFromParent();
872 return true;
873 }
874
875 struct Operand {
876 Value *V;
877 Domain D;
878 };
879 std::array<Operand, 2> Ops = {._M_elems: {{.V: SDI->getOperand(i_nocapture: 0), .D: getDomain(CR: LCR)},
880 {.V: SDI->getOperand(i_nocapture: 1), .D: getDomain(CR: RCR)}}};
881 if (Ops[0].D == Domain::Unknown || Ops[1].D == Domain::Unknown)
882 return false;
883
884 // We know domains of both of the operands!
885 ++NumSRems;
886
887 // We need operands to be non-negative, so negate each one that isn't.
888 for (Operand &Op : Ops) {
889 if (Op.D == Domain::NonNegative)
890 continue;
891 auto *BO =
892 BinaryOperator::CreateNeg(Op: Op.V, Name: Op.V->getName() + ".nonneg", InsertBefore: SDI);
893 BO->setDebugLoc(SDI->getDebugLoc());
894 Op.V = BO;
895 }
896
897 auto *URem =
898 BinaryOperator::CreateURem(V1: Ops[0].V, V2: Ops[1].V, Name: SDI->getName(), I: SDI);
899 URem->setDebugLoc(SDI->getDebugLoc());
900
901 auto *Res = URem;
902
903 // If the divident was non-positive, we need to negate the result.
904 if (Ops[0].D == Domain::NonPositive) {
905 Res = BinaryOperator::CreateNeg(Op: Res, Name: Res->getName() + ".neg", InsertBefore: SDI);
906 Res->setDebugLoc(SDI->getDebugLoc());
907 }
908
909 SDI->replaceAllUsesWith(V: Res);
910 SDI->eraseFromParent();
911
912 // Try to simplify our new urem.
913 processUDivOrURem(Instr: URem, LVI);
914
915 return true;
916}
917
918/// See if LazyValueInfo's ability to exploit edge conditions or range
919/// information is sufficient to prove the signs of both operands of this SDiv.
920/// If this is the case, replace the SDiv with a UDiv. Even for local
921/// conditions, this can sometimes prove conditions instcombine can't by
922/// exploiting range information.
923static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
924 const ConstantRange &RCR, LazyValueInfo *LVI) {
925 assert(SDI->getOpcode() == Instruction::SDiv);
926 assert(!SDI->getType()->isVectorTy());
927
928 // Check whether the division folds to a constant.
929 ConstantRange DivCR = LCR.sdiv(Other: RCR);
930 if (const APInt *Elem = DivCR.getSingleElement()) {
931 SDI->replaceAllUsesWith(V: ConstantInt::get(Ty: SDI->getType(), V: *Elem));
932 SDI->eraseFromParent();
933 return true;
934 }
935
936 struct Operand {
937 Value *V;
938 Domain D;
939 };
940 std::array<Operand, 2> Ops = {._M_elems: {{.V: SDI->getOperand(i_nocapture: 0), .D: getDomain(CR: LCR)},
941 {.V: SDI->getOperand(i_nocapture: 1), .D: getDomain(CR: RCR)}}};
942 if (Ops[0].D == Domain::Unknown || Ops[1].D == Domain::Unknown)
943 return false;
944
945 // We know domains of both of the operands!
946 ++NumSDivs;
947
948 // We need operands to be non-negative, so negate each one that isn't.
949 for (Operand &Op : Ops) {
950 if (Op.D == Domain::NonNegative)
951 continue;
952 auto *BO =
953 BinaryOperator::CreateNeg(Op: Op.V, Name: Op.V->getName() + ".nonneg", InsertBefore: SDI);
954 BO->setDebugLoc(SDI->getDebugLoc());
955 Op.V = BO;
956 }
957
958 auto *UDiv =
959 BinaryOperator::CreateUDiv(V1: Ops[0].V, V2: Ops[1].V, Name: SDI->getName(), I: SDI);
960 UDiv->setDebugLoc(SDI->getDebugLoc());
961 UDiv->setIsExact(SDI->isExact());
962
963 auto *Res = UDiv;
964
965 // If the operands had two different domains, we need to negate the result.
966 if (Ops[0].D != Ops[1].D) {
967 Res = BinaryOperator::CreateNeg(Op: Res, Name: Res->getName() + ".neg", InsertBefore: SDI);
968 Res->setDebugLoc(SDI->getDebugLoc());
969 }
970
971 SDI->replaceAllUsesWith(V: Res);
972 SDI->eraseFromParent();
973
974 // Try to simplify our new udiv.
975 processUDivOrURem(Instr: UDiv, LVI);
976
977 return true;
978}
979
980static bool processSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
981 assert(Instr->getOpcode() == Instruction::SDiv ||
982 Instr->getOpcode() == Instruction::SRem);
983 if (Instr->getType()->isVectorTy())
984 return false;
985
986 ConstantRange LCR =
987 LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 0), /*AllowUndef*/ UndefAllowed: false);
988 // Allow undef for RHS, as we can assume it is division by zero UB.
989 ConstantRange RCR =
990 LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 1), /*AlloweUndef*/ UndefAllowed: true);
991 if (Instr->getOpcode() == Instruction::SDiv)
992 if (processSDiv(SDI: Instr, LCR, RCR, LVI))
993 return true;
994
995 if (Instr->getOpcode() == Instruction::SRem) {
996 if (processSRem(SDI: Instr, LCR, RCR, LVI))
997 return true;
998 }
999
1000 return narrowSDivOrSRem(Instr, LCR, RCR);
1001}
1002
1003static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
1004 if (SDI->getType()->isVectorTy())
1005 return false;
1006
1007 ConstantRange LRange =
1008 LVI->getConstantRangeAtUse(U: SDI->getOperandUse(i: 0), /*UndefAllowed*/ false);
1009 unsigned OrigWidth = SDI->getType()->getIntegerBitWidth();
1010 ConstantRange NegOneOrZero =
1011 ConstantRange(APInt(OrigWidth, (uint64_t)-1, true), APInt(OrigWidth, 1));
1012 if (NegOneOrZero.contains(CR: LRange)) {
1013 // ashr of -1 or 0 never changes the value, so drop the whole instruction
1014 ++NumAShrsRemoved;
1015 SDI->replaceAllUsesWith(V: SDI->getOperand(i_nocapture: 0));
1016 SDI->eraseFromParent();
1017 return true;
1018 }
1019
1020 if (!LRange.isAllNonNegative())
1021 return false;
1022
1023 ++NumAShrsConverted;
1024 auto *BO = BinaryOperator::CreateLShr(V1: SDI->getOperand(i_nocapture: 0), V2: SDI->getOperand(i_nocapture: 1),
1025 Name: "", I: SDI);
1026 BO->takeName(V: SDI);
1027 BO->setDebugLoc(SDI->getDebugLoc());
1028 BO->setIsExact(SDI->isExact());
1029 SDI->replaceAllUsesWith(V: BO);
1030 SDI->eraseFromParent();
1031
1032 return true;
1033}
1034
1035static bool processSExt(SExtInst *SDI, LazyValueInfo *LVI) {
1036 if (SDI->getType()->isVectorTy())
1037 return false;
1038
1039 const Use &Base = SDI->getOperandUse(i: 0);
1040 if (!LVI->getConstantRangeAtUse(U: Base, /*UndefAllowed*/ false)
1041 .isAllNonNegative())
1042 return false;
1043
1044 ++NumSExt;
1045 auto *ZExt = CastInst::CreateZExtOrBitCast(S: Base, Ty: SDI->getType(), Name: "", InsertBefore: SDI);
1046 ZExt->takeName(V: SDI);
1047 ZExt->setDebugLoc(SDI->getDebugLoc());
1048 ZExt->setNonNeg();
1049 SDI->replaceAllUsesWith(V: ZExt);
1050 SDI->eraseFromParent();
1051
1052 return true;
1053}
1054
1055static bool processZExt(ZExtInst *ZExt, LazyValueInfo *LVI) {
1056 if (ZExt->getType()->isVectorTy())
1057 return false;
1058
1059 if (ZExt->hasNonNeg())
1060 return false;
1061
1062 const Use &Base = ZExt->getOperandUse(i: 0);
1063 if (!LVI->getConstantRangeAtUse(U: Base, /*UndefAllowed*/ false)
1064 .isAllNonNegative())
1065 return false;
1066
1067 ++NumZExt;
1068 ZExt->setNonNeg();
1069
1070 return true;
1071}
1072
1073static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI) {
1074 using OBO = OverflowingBinaryOperator;
1075
1076 if (BinOp->getType()->isVectorTy())
1077 return false;
1078
1079 bool NSW = BinOp->hasNoSignedWrap();
1080 bool NUW = BinOp->hasNoUnsignedWrap();
1081 if (NSW && NUW)
1082 return false;
1083
1084 Instruction::BinaryOps Opcode = BinOp->getOpcode();
1085 Value *LHS = BinOp->getOperand(i_nocapture: 0);
1086 Value *RHS = BinOp->getOperand(i_nocapture: 1);
1087
1088 ConstantRange LRange =
1089 LVI->getConstantRange(V: LHS, CxtI: BinOp, /*UndefAllowed*/ false);
1090 ConstantRange RRange =
1091 LVI->getConstantRange(V: RHS, CxtI: BinOp, /*UndefAllowed*/ false);
1092
1093 bool Changed = false;
1094 bool NewNUW = false, NewNSW = false;
1095 if (!NUW) {
1096 ConstantRange NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
1097 BinOp: Opcode, Other: RRange, NoWrapKind: OBO::NoUnsignedWrap);
1098 NewNUW = NUWRange.contains(CR: LRange);
1099 Changed |= NewNUW;
1100 }
1101 if (!NSW) {
1102 ConstantRange NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
1103 BinOp: Opcode, Other: RRange, NoWrapKind: OBO::NoSignedWrap);
1104 NewNSW = NSWRange.contains(CR: LRange);
1105 Changed |= NewNSW;
1106 }
1107
1108 setDeducedOverflowingFlags(V: BinOp, Opcode, NewNSW, NewNUW);
1109
1110 return Changed;
1111}
1112
1113static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
1114 if (BinOp->getType()->isVectorTy())
1115 return false;
1116
1117 // Pattern match (and lhs, C) where C includes a superset of bits which might
1118 // be set in lhs. This is a common truncation idiom created by instcombine.
1119 const Use &LHS = BinOp->getOperandUse(i: 0);
1120 ConstantInt *RHS = dyn_cast<ConstantInt>(Val: BinOp->getOperand(i_nocapture: 1));
1121 if (!RHS || !RHS->getValue().isMask())
1122 return false;
1123
1124 // We can only replace the AND with LHS based on range info if the range does
1125 // not include undef.
1126 ConstantRange LRange =
1127 LVI->getConstantRangeAtUse(U: LHS, /*UndefAllowed=*/false);
1128 if (!LRange.getUnsignedMax().ule(RHS: RHS->getValue()))
1129 return false;
1130
1131 BinOp->replaceAllUsesWith(V: LHS);
1132 BinOp->eraseFromParent();
1133 NumAnd++;
1134 return true;
1135}
1136
1137static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT,
1138 const SimplifyQuery &SQ) {
1139 bool FnChanged = false;
1140 // Visiting in a pre-order depth-first traversal causes us to simplify early
1141 // blocks before querying later blocks (which require us to analyze early
1142 // blocks). Eagerly simplifying shallow blocks means there is strictly less
1143 // work to do for deep blocks. This also means we don't visit unreachable
1144 // blocks.
1145 for (BasicBlock *BB : depth_first(G: &F.getEntryBlock())) {
1146 bool BBChanged = false;
1147 for (Instruction &II : llvm::make_early_inc_range(Range&: *BB)) {
1148 switch (II.getOpcode()) {
1149 case Instruction::Select:
1150 BBChanged |= processSelect(S: cast<SelectInst>(Val: &II), LVI);
1151 break;
1152 case Instruction::PHI:
1153 BBChanged |= processPHI(P: cast<PHINode>(Val: &II), LVI, DT, SQ);
1154 break;
1155 case Instruction::ICmp:
1156 case Instruction::FCmp:
1157 BBChanged |= processCmp(Cmp: cast<CmpInst>(Val: &II), LVI);
1158 break;
1159 case Instruction::Call:
1160 case Instruction::Invoke:
1161 BBChanged |= processCallSite(CB&: cast<CallBase>(Val&: II), LVI);
1162 break;
1163 case Instruction::SRem:
1164 case Instruction::SDiv:
1165 BBChanged |= processSDivOrSRem(Instr: cast<BinaryOperator>(Val: &II), LVI);
1166 break;
1167 case Instruction::UDiv:
1168 case Instruction::URem:
1169 BBChanged |= processUDivOrURem(Instr: cast<BinaryOperator>(Val: &II), LVI);
1170 break;
1171 case Instruction::AShr:
1172 BBChanged |= processAShr(SDI: cast<BinaryOperator>(Val: &II), LVI);
1173 break;
1174 case Instruction::SExt:
1175 BBChanged |= processSExt(SDI: cast<SExtInst>(Val: &II), LVI);
1176 break;
1177 case Instruction::ZExt:
1178 BBChanged |= processZExt(ZExt: cast<ZExtInst>(Val: &II), LVI);
1179 break;
1180 case Instruction::Add:
1181 case Instruction::Sub:
1182 case Instruction::Mul:
1183 case Instruction::Shl:
1184 BBChanged |= processBinOp(BinOp: cast<BinaryOperator>(Val: &II), LVI);
1185 break;
1186 case Instruction::And:
1187 BBChanged |= processAnd(BinOp: cast<BinaryOperator>(Val: &II), LVI);
1188 break;
1189 }
1190 }
1191
1192 Instruction *Term = BB->getTerminator();
1193 switch (Term->getOpcode()) {
1194 case Instruction::Switch:
1195 BBChanged |= processSwitch(I: cast<SwitchInst>(Val: Term), LVI, DT);
1196 break;
1197 case Instruction::Ret: {
1198 auto *RI = cast<ReturnInst>(Val: Term);
1199 // Try to determine the return value if we can. This is mainly here to
1200 // simplify the writing of unit tests, but also helps to enable IPO by
1201 // constant folding the return values of callees.
1202 auto *RetVal = RI->getReturnValue();
1203 if (!RetVal) break; // handle "ret void"
1204 if (isa<Constant>(Val: RetVal)) break; // nothing to do
1205 if (auto *C = getConstantAt(V: RetVal, At: RI, LVI)) {
1206 ++NumReturns;
1207 RI->replaceUsesOfWith(From: RetVal, To: C);
1208 BBChanged = true;
1209 }
1210 }
1211 }
1212
1213 FnChanged |= BBChanged;
1214 }
1215
1216 return FnChanged;
1217}
1218
1219PreservedAnalyses
1220CorrelatedValuePropagationPass::run(Function &F, FunctionAnalysisManager &AM) {
1221 LazyValueInfo *LVI = &AM.getResult<LazyValueAnalysis>(IR&: F);
1222 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
1223
1224 bool Changed = runImpl(F, LVI, DT, SQ: getBestSimplifyQuery(AM, F));
1225
1226 PreservedAnalyses PA;
1227 if (!Changed) {
1228 PA = PreservedAnalyses::all();
1229 } else {
1230 PA.preserve<DominatorTreeAnalysis>();
1231 PA.preserve<LazyValueAnalysis>();
1232 }
1233
1234 // Keeping LVI alive is expensive, both because it uses a lot of memory, and
1235 // because invalidating values in LVI is expensive. While CVP does preserve
1236 // LVI, we know that passes after JumpThreading+CVP will not need the result
1237 // of this analysis, so we forcefully discard it early.
1238 PA.abandon<LazyValueAnalysis>();
1239 return PA;
1240}
1241

source code of llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp