1//===- llvm/Analysis/ScalarEvolution.h - Scalar Evolution -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The ScalarEvolution class is an LLVM pass which can be used to analyze and
10// categorize scalar expressions in loops. It specializes in recognizing
11// general induction variables, representing them with the abstract and opaque
12// SCEV class. Given this analysis, trip counts of loops and other important
13// properties can be obtained.
14//
15// This analysis is primarily useful for induction variable substitution and
16// strength reduction.
17//
18//===----------------------------------------------------------------------===//
19
20#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
21#define LLVM_ANALYSIS_SCALAREVOLUTION_H
22
23#include "llvm/ADT/APInt.h"
24#include "llvm/ADT/ArrayRef.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseMapInfo.h"
27#include "llvm/ADT/FoldingSet.h"
28#include "llvm/ADT/PointerIntPair.h"
29#include "llvm/ADT/SetVector.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/IR/ConstantRange.h"
33#include "llvm/IR/Instructions.h"
34#include "llvm/IR/PassManager.h"
35#include "llvm/IR/ValueHandle.h"
36#include "llvm/IR/ValueMap.h"
37#include "llvm/Pass.h"
38#include "llvm/Support/Compiler.h"
39#include <cassert>
40#include <cstdint>
41#include <memory>
42#include <optional>
43#include <utility>
44
45namespace llvm {
46
47class OverflowingBinaryOperator;
48class AssumptionCache;
49class BasicBlock;
50class Constant;
51class ConstantInt;
52class DataLayout;
53class DominatorTree;
54class GEPOperator;
55class LLVMContext;
56class Loop;
57class LoopInfo;
58class raw_ostream;
59class ScalarEvolution;
60class SCEVAddRecExpr;
61class SCEVUnknown;
62class StructType;
63class TargetLibraryInfo;
64class Type;
65enum SCEVTypes : unsigned short;
66
67LLVM_ABI extern bool VerifySCEV;
68
69/// This class represents an analyzed expression in the program. These are
70/// opaque objects that the client is not allowed to do much with directly.
71///
72class SCEV : public FoldingSetNode {
73 friend struct FoldingSetTrait<SCEV>;
74
75 /// A reference to an Interned FoldingSetNodeID for this node. The
76 /// ScalarEvolution's BumpPtrAllocator holds the data.
77 FoldingSetNodeIDRef FastID;
78
79 // The SCEV baseclass this node corresponds to
80 const SCEVTypes SCEVType;
81
82protected:
83 // Estimated complexity of this node's expression tree size.
84 const unsigned short ExpressionSize;
85
86 /// This field is initialized to zero and may be used in subclasses to store
87 /// miscellaneous information.
88 unsigned short SubclassData = 0;
89
90public:
91 /// NoWrapFlags are bitfield indices into SubclassData.
92 ///
93 /// Add and Mul expressions may have no-unsigned-wrap <NUW> or
94 /// no-signed-wrap <NSW> properties, which are derived from the IR
95 /// operator. NSW is a misnomer that we use to mean no signed overflow or
96 /// underflow.
97 ///
98 /// AddRec expressions may have a no-self-wraparound <NW> property if, in
99 /// the integer domain, abs(step) * max-iteration(loop) <=
100 /// unsigned-max(bitwidth). This means that the recurrence will never reach
101 /// its start value if the step is non-zero. Computing the same value on
102 /// each iteration is not considered wrapping, and recurrences with step = 0
103 /// are trivially <NW>. <NW> is independent of the sign of step and the
104 /// value the add recurrence starts with.
105 ///
106 /// Note that NUW and NSW are also valid properties of a recurrence, and
107 /// either implies NW. For convenience, NW will be set for a recurrence
108 /// whenever either NUW or NSW are set.
109 ///
110 /// We require that the flag on a SCEV apply to the entire scope in which
111 /// that SCEV is defined. A SCEV's scope is set of locations dominated by
112 /// a defining location, which is in turn described by the following rules:
113 /// * A SCEVUnknown is at the point of definition of the Value.
114 /// * A SCEVConstant is defined at all points.
115 /// * A SCEVAddRec is defined starting with the header of the associated
116 /// loop.
117 /// * All other SCEVs are defined at the earlest point all operands are
118 /// defined.
119 ///
120 /// The above rules describe a maximally hoisted form (without regards to
121 /// potential control dependence). A SCEV is defined anywhere a
122 /// corresponding instruction could be defined in said maximally hoisted
123 /// form. Note that SCEVUDivExpr (currently the only expression type which
124 /// can trap) can be defined per these rules in regions where it would trap
125 /// at runtime. A SCEV being defined does not require the existence of any
126 /// instruction within the defined scope.
127 enum NoWrapFlags {
128 FlagAnyWrap = 0, // No guarantee.
129 FlagNW = (1 << 0), // No self-wrap.
130 FlagNUW = (1 << 1), // No unsigned wrap.
131 FlagNSW = (1 << 2), // No signed wrap.
132 NoWrapMask = (1 << 3) - 1
133 };
134
135 explicit SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
136 unsigned short ExpressionSize)
137 : FastID(ID), SCEVType(SCEVTy), ExpressionSize(ExpressionSize) {}
138 SCEV(const SCEV &) = delete;
139 SCEV &operator=(const SCEV &) = delete;
140
141 SCEVTypes getSCEVType() const { return SCEVType; }
142
143 /// Return the LLVM type of this SCEV expression.
144 LLVM_ABI Type *getType() const;
145
146 /// Return operands of this SCEV expression.
147 LLVM_ABI ArrayRef<const SCEV *> operands() const;
148
149 /// Return true if the expression is a constant zero.
150 LLVM_ABI bool isZero() const;
151
152 /// Return true if the expression is a constant one.
153 LLVM_ABI bool isOne() const;
154
155 /// Return true if the expression is a constant all-ones value.
156 LLVM_ABI bool isAllOnesValue() const;
157
158 /// Return true if the specified scev is negated, but not a constant.
159 LLVM_ABI bool isNonConstantNegative() const;
160
161 // Returns estimated size of the mathematical expression represented by this
162 // SCEV. The rules of its calculation are following:
163 // 1) Size of a SCEV without operands (like constants and SCEVUnknown) is 1;
164 // 2) Size SCEV with operands Op1, Op2, ..., OpN is calculated by formula:
165 // (1 + Size(Op1) + ... + Size(OpN)).
166 // This value gives us an estimation of time we need to traverse through this
167 // SCEV and all its operands recursively. We may use it to avoid performing
168 // heavy transformations on SCEVs of excessive size for sake of saving the
169 // compilation time.
170 unsigned short getExpressionSize() const {
171 return ExpressionSize;
172 }
173
174 /// Print out the internal representation of this scalar to the specified
175 /// stream. This should really only be used for debugging purposes.
176 LLVM_ABI void print(raw_ostream &OS) const;
177
178 /// This method is used for debugging.
179 LLVM_ABI void dump() const;
180};
181
182// Specialize FoldingSetTrait for SCEV to avoid needing to compute
183// temporary FoldingSetNodeID values.
184template <> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
185 static void Profile(const SCEV &X, FoldingSetNodeID &ID) { ID = X.FastID; }
186
187 static bool Equals(const SCEV &X, const FoldingSetNodeID &ID, unsigned IDHash,
188 FoldingSetNodeID &TempID) {
189 return ID == X.FastID;
190 }
191
192 static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
193 return X.FastID.ComputeHash();
194 }
195};
196
197inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
198 S.print(OS);
199 return OS;
200}
201
202/// An object of this class is returned by queries that could not be answered.
203/// For example, if you ask for the number of iterations of a linked-list
204/// traversal loop, you will get one of these. None of the standard SCEV
205/// operations are valid on this class, it is just a marker.
206struct SCEVCouldNotCompute : public SCEV {
207 LLVM_ABI SCEVCouldNotCompute();
208
209 /// Methods for support type inquiry through isa, cast, and dyn_cast:
210 LLVM_ABI static bool classof(const SCEV *S);
211};
212
213/// This class represents an assumption made using SCEV expressions which can
214/// be checked at run-time.
215class SCEVPredicate : public FoldingSetNode {
216 friend struct FoldingSetTrait<SCEVPredicate>;
217
218 /// A reference to an Interned FoldingSetNodeID for this node. The
219 /// ScalarEvolution's BumpPtrAllocator holds the data.
220 FoldingSetNodeIDRef FastID;
221
222public:
223 enum SCEVPredicateKind { P_Union, P_Compare, P_Wrap };
224
225protected:
226 SCEVPredicateKind Kind;
227 ~SCEVPredicate() = default;
228 SCEVPredicate(const SCEVPredicate &) = default;
229 SCEVPredicate &operator=(const SCEVPredicate &) = default;
230
231public:
232 LLVM_ABI SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);
233
234 SCEVPredicateKind getKind() const { return Kind; }
235
236 /// Returns the estimated complexity of this predicate. This is roughly
237 /// measured in the number of run-time checks required.
238 virtual unsigned getComplexity() const { return 1; }
239
240 /// Returns true if the predicate is always true. This means that no
241 /// assumptions were made and nothing needs to be checked at run-time.
242 virtual bool isAlwaysTrue() const = 0;
243
244 /// Returns true if this predicate implies \p N.
245 virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const = 0;
246
247 /// Prints a textual representation of this predicate with an indentation of
248 /// \p Depth.
249 virtual void print(raw_ostream &OS, unsigned Depth = 0) const = 0;
250};
251
252inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
253 P.print(OS);
254 return OS;
255}
256
257// Specialize FoldingSetTrait for SCEVPredicate to avoid needing to compute
258// temporary FoldingSetNodeID values.
259template <>
260struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
261 static void Profile(const SCEVPredicate &X, FoldingSetNodeID &ID) {
262 ID = X.FastID;
263 }
264
265 static bool Equals(const SCEVPredicate &X, const FoldingSetNodeID &ID,
266 unsigned IDHash, FoldingSetNodeID &TempID) {
267 return ID == X.FastID;
268 }
269
270 static unsigned ComputeHash(const SCEVPredicate &X,
271 FoldingSetNodeID &TempID) {
272 return X.FastID.ComputeHash();
273 }
274};
275
276/// This class represents an assumption that the expression LHS Pred RHS
277/// evaluates to true, and this can be checked at run-time.
278class LLVM_ABI SCEVComparePredicate final : public SCEVPredicate {
279 /// We assume that LHS Pred RHS is true.
280 const ICmpInst::Predicate Pred;
281 const SCEV *LHS;
282 const SCEV *RHS;
283
284public:
285 SCEVComparePredicate(const FoldingSetNodeIDRef ID,
286 const ICmpInst::Predicate Pred,
287 const SCEV *LHS, const SCEV *RHS);
288
289 /// Implementation of the SCEVPredicate interface
290 bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override;
291 void print(raw_ostream &OS, unsigned Depth = 0) const override;
292 bool isAlwaysTrue() const override;
293
294 ICmpInst::Predicate getPredicate() const { return Pred; }
295
296 /// Returns the left hand side of the predicate.
297 const SCEV *getLHS() const { return LHS; }
298
299 /// Returns the right hand side of the predicate.
300 const SCEV *getRHS() const { return RHS; }
301
302 /// Methods for support type inquiry through isa, cast, and dyn_cast:
303 static bool classof(const SCEVPredicate *P) {
304 return P->getKind() == P_Compare;
305 }
306};
307
308/// This class represents an assumption made on an AddRec expression. Given an
309/// affine AddRec expression {a,+,b}, we assume that it has the nssw or nusw
310/// flags (defined below) in the first X iterations of the loop, where X is a
311/// SCEV expression returned by getPredicatedBackedgeTakenCount).
312///
313/// Note that this does not imply that X is equal to the backedge taken
314/// count. This means that if we have a nusw predicate for i32 {0,+,1} with a
315/// predicated backedge taken count of X, we only guarantee that {0,+,1} has
316/// nusw in the first X iterations. {0,+,1} may still wrap in the loop if we
317/// have more than X iterations.
318class LLVM_ABI SCEVWrapPredicate final : public SCEVPredicate {
319public:
320 /// Similar to SCEV::NoWrapFlags, but with slightly different semantics
321 /// for FlagNUSW. The increment is considered to be signed, and a + b
322 /// (where b is the increment) is considered to wrap if:
323 /// zext(a + b) != zext(a) + sext(b)
324 ///
325 /// If Signed is a function that takes an n-bit tuple and maps to the
326 /// integer domain as the tuples value interpreted as twos complement,
327 /// and Unsigned a function that takes an n-bit tuple and maps to the
328 /// integer domain as the base two value of input tuple, then a + b
329 /// has IncrementNUSW iff:
330 ///
331 /// 0 <= Unsigned(a) + Signed(b) < 2^n
332 ///
333 /// The IncrementNSSW flag has identical semantics with SCEV::FlagNSW.
334 ///
335 /// Note that the IncrementNUSW flag is not commutative: if base + inc
336 /// has IncrementNUSW, then inc + base doesn't neccessarily have this
337 /// property. The reason for this is that this is used for sign/zero
338 /// extending affine AddRec SCEV expressions when a SCEVWrapPredicate is
339 /// assumed. A {base,+,inc} expression is already non-commutative with
340 /// regards to base and inc, since it is interpreted as:
341 /// (((base + inc) + inc) + inc) ...
342 enum IncrementWrapFlags {
343 IncrementAnyWrap = 0, // No guarantee.
344 IncrementNUSW = (1 << 0), // No unsigned with signed increment wrap.
345 IncrementNSSW = (1 << 1), // No signed with signed increment wrap
346 // (equivalent with SCEV::NSW)
347 IncrementNoWrapMask = (1 << 2) - 1
348 };
349
350 /// Convenient IncrementWrapFlags manipulation methods.
351 [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
352 clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
353 SCEVWrapPredicate::IncrementWrapFlags OffFlags) {
354 assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
355 assert((OffFlags & IncrementNoWrapMask) == OffFlags &&
356 "Invalid flags value!");
357 return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & ~OffFlags);
358 }
359
360 [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
361 maskFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, int Mask) {
362 assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
363 assert((Mask & IncrementNoWrapMask) == Mask && "Invalid mask value!");
364
365 return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & Mask);
366 }
367
368 [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
369 setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
370 SCEVWrapPredicate::IncrementWrapFlags OnFlags) {
371 assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
372 assert((OnFlags & IncrementNoWrapMask) == OnFlags &&
373 "Invalid flags value!");
374
375 return (SCEVWrapPredicate::IncrementWrapFlags)(Flags | OnFlags);
376 }
377
378 /// Returns the set of SCEVWrapPredicate no wrap flags implied by a
379 /// SCEVAddRecExpr.
380 [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
381 getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE);
382
383private:
384 const SCEVAddRecExpr *AR;
385 IncrementWrapFlags Flags;
386
387public:
388 explicit SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
389 const SCEVAddRecExpr *AR,
390 IncrementWrapFlags Flags);
391
392 /// Returns the set assumed no overflow flags.
393 IncrementWrapFlags getFlags() const { return Flags; }
394
395 /// Implementation of the SCEVPredicate interface
396 const SCEVAddRecExpr *getExpr() const;
397 bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override;
398 void print(raw_ostream &OS, unsigned Depth = 0) const override;
399 bool isAlwaysTrue() const override;
400
401 /// Methods for support type inquiry through isa, cast, and dyn_cast:
402 static bool classof(const SCEVPredicate *P) {
403 return P->getKind() == P_Wrap;
404 }
405};
406
407/// This class represents a composition of other SCEV predicates, and is the
408/// class that most clients will interact with. This is equivalent to a
409/// logical "AND" of all the predicates in the union.
410///
411/// NB! Unlike other SCEVPredicate sub-classes this class does not live in the
412/// ScalarEvolution::Preds folding set. This is why the \c add function is sound.
413class LLVM_ABI SCEVUnionPredicate final : public SCEVPredicate {
414private:
415 using PredicateMap =
416 DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>;
417
418 /// Vector with references to all predicates in this union.
419 SmallVector<const SCEVPredicate *, 16> Preds;
420
421 /// Adds a predicate to this union.
422 void add(const SCEVPredicate *N, ScalarEvolution &SE);
423
424public:
425 SCEVUnionPredicate(ArrayRef<const SCEVPredicate *> Preds,
426 ScalarEvolution &SE);
427
428 ArrayRef<const SCEVPredicate *> getPredicates() const { return Preds; }
429
430 /// Implementation of the SCEVPredicate interface
431 bool isAlwaysTrue() const override;
432 bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override;
433 void print(raw_ostream &OS, unsigned Depth) const override;
434
435 /// We estimate the complexity of a union predicate as the size number of
436 /// predicates in the union.
437 unsigned getComplexity() const override { return Preds.size(); }
438
439 /// Methods for support type inquiry through isa, cast, and dyn_cast:
440 static bool classof(const SCEVPredicate *P) {
441 return P->getKind() == P_Union;
442 }
443};
444
445/// The main scalar evolution driver. Because client code (intentionally)
446/// can't do much with the SCEV objects directly, they must ask this class
447/// for services.
448class ScalarEvolution {
449 friend class ScalarEvolutionsTest;
450
451public:
452 /// An enum describing the relationship between a SCEV and a loop.
453 enum LoopDisposition {
454 LoopVariant, ///< The SCEV is loop-variant (unknown).
455 LoopInvariant, ///< The SCEV is loop-invariant.
456 LoopComputable ///< The SCEV varies predictably with the loop.
457 };
458
459 /// An enum describing the relationship between a SCEV and a basic block.
460 enum BlockDisposition {
461 DoesNotDominateBlock, ///< The SCEV does not dominate the block.
462 DominatesBlock, ///< The SCEV dominates the block.
463 ProperlyDominatesBlock ///< The SCEV properly dominates the block.
464 };
465
466 /// Convenient NoWrapFlags manipulation that hides enum casts and is
467 /// visible in the ScalarEvolution name space.
468 [[nodiscard]] static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags,
469 int Mask) {
470 return (SCEV::NoWrapFlags)(Flags & Mask);
471 }
472 [[nodiscard]] static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags,
473 SCEV::NoWrapFlags OnFlags) {
474 return (SCEV::NoWrapFlags)(Flags | OnFlags);
475 }
476 [[nodiscard]] static SCEV::NoWrapFlags
477 clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) {
478 return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
479 }
480 [[nodiscard]] static bool hasFlags(SCEV::NoWrapFlags Flags,
481 SCEV::NoWrapFlags TestFlags) {
482 return TestFlags == maskFlags(Flags, Mask: TestFlags);
483 };
484
485 LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
486 AssumptionCache &AC, DominatorTree &DT,
487 LoopInfo &LI);
488 LLVM_ABI ScalarEvolution(ScalarEvolution &&Arg);
489 LLVM_ABI ~ScalarEvolution();
490
491 LLVMContext &getContext() const { return F.getContext(); }
492
493 /// Test if values of the given type are analyzable within the SCEV
494 /// framework. This primarily includes integer types, and it can optionally
495 /// include pointer types if the ScalarEvolution class has access to
496 /// target-specific information.
497 LLVM_ABI bool isSCEVable(Type *Ty) const;
498
499 /// Return the size in bits of the specified type, for which isSCEVable must
500 /// return true.
501 LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const;
502
503 /// Return a type with the same bitwidth as the given type and which
504 /// represents how SCEV will treat the given type, for which isSCEVable must
505 /// return true. For pointer types, this is the pointer-sized integer type.
506 LLVM_ABI Type *getEffectiveSCEVType(Type *Ty) const;
507
508 // Returns a wider type among {Ty1, Ty2}.
509 LLVM_ABI Type *getWiderType(Type *Ty1, Type *Ty2) const;
510
511 /// Return true if there exists a point in the program at which both
512 /// A and B could be operands to the same instruction.
513 /// SCEV expressions are generally assumed to correspond to instructions
514 /// which could exists in IR. In general, this requires that there exists
515 /// a use point in the program where all operands dominate the use.
516 ///
517 /// Example:
518 /// loop {
519 /// if
520 /// loop { v1 = load @global1; }
521 /// else
522 /// loop { v2 = load @global2; }
523 /// }
524 /// No SCEV with operand V1, and v2 can exist in this program.
525 LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B);
526
527 /// Return true if the SCEV is a scAddRecExpr or it contains
528 /// scAddRecExpr. The result will be cached in HasRecMap.
529 LLVM_ABI bool containsAddRecurrence(const SCEV *S);
530
531 /// Is operation \p BinOp between \p LHS and \p RHS provably does not have
532 /// a signed/unsigned overflow (\p Signed)? If \p CtxI is specified, the
533 /// no-overflow fact should be true in the context of this instruction.
534 LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
535 const SCEV *LHS, const SCEV *RHS,
536 const Instruction *CtxI = nullptr);
537
538 /// Parse NSW/NUW flags from add/sub/mul IR binary operation \p Op into
539 /// SCEV no-wrap flags, and deduce flag[s] that aren't known yet.
540 /// Does not mutate the original instruction. Returns std::nullopt if it could
541 /// not deduce more precise flags than the instruction already has, otherwise
542 /// returns proven flags.
543 LLVM_ABI std::optional<SCEV::NoWrapFlags>
544 getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO);
545
546 /// Notify this ScalarEvolution that \p User directly uses SCEVs in \p Ops.
547 LLVM_ABI void registerUser(const SCEV *User, ArrayRef<const SCEV *> Ops);
548
549 /// Return true if the SCEV expression contains an undef value.
550 LLVM_ABI bool containsUndefs(const SCEV *S) const;
551
552 /// Return true if the SCEV expression contains a Value that has been
553 /// optimised out and is now a nullptr.
554 LLVM_ABI bool containsErasedValue(const SCEV *S) const;
555
556 /// Return a SCEV expression for the full generality of the specified
557 /// expression.
558 LLVM_ABI const SCEV *getSCEV(Value *V);
559
560 /// Return an existing SCEV for V if there is one, otherwise return nullptr.
561 LLVM_ABI const SCEV *getExistingSCEV(Value *V);
562
563 LLVM_ABI const SCEV *getConstant(ConstantInt *V);
564 LLVM_ABI const SCEV *getConstant(const APInt &Val);
565 LLVM_ABI const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
566 LLVM_ABI const SCEV *getLosslessPtrToIntExpr(const SCEV *Op,
567 unsigned Depth = 0);
568 LLVM_ABI const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty);
569 LLVM_ABI const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty,
570 unsigned Depth = 0);
571 LLVM_ABI const SCEV *getVScale(Type *Ty);
572 LLVM_ABI const SCEV *getElementCount(Type *Ty, ElementCount EC);
573 LLVM_ABI const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty,
574 unsigned Depth = 0);
575 LLVM_ABI const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
576 unsigned Depth = 0);
577 LLVM_ABI const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty,
578 unsigned Depth = 0);
579 LLVM_ABI const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty,
580 unsigned Depth = 0);
581 LLVM_ABI const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
582 LLVM_ABI const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
583 LLVM_ABI const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
584 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
585 unsigned Depth = 0);
586 const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
587 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
588 unsigned Depth = 0) {
589 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
590 return getAddExpr(Ops, Flags, Depth);
591 }
592 const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
593 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
594 unsigned Depth = 0) {
595 SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
596 return getAddExpr(Ops, Flags, Depth);
597 }
598 LLVM_ABI const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
599 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
600 unsigned Depth = 0);
601 const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
602 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
603 unsigned Depth = 0) {
604 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
605 return getMulExpr(Ops, Flags, Depth);
606 }
607 const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
608 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
609 unsigned Depth = 0) {
610 SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
611 return getMulExpr(Ops, Flags, Depth);
612 }
613 LLVM_ABI const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
614 LLVM_ABI const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
615 LLVM_ABI const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
616 LLVM_ABI const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
617 const Loop *L, SCEV::NoWrapFlags Flags);
618 LLVM_ABI const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
619 const Loop *L, SCEV::NoWrapFlags Flags);
620 const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
621 const Loop *L, SCEV::NoWrapFlags Flags) {
622 SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
623 return getAddRecExpr(Operands&: NewOp, L, Flags);
624 }
625
626 /// Checks if \p SymbolicPHI can be rewritten as an AddRecExpr under some
627 /// Predicates. If successful return these <AddRecExpr, Predicates>;
628 /// The function is intended to be called from PSCEV (the caller will decide
629 /// whether to actually add the predicates and carry out the rewrites).
630 LLVM_ABI std::optional<
631 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
632 createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI);
633
634 /// Returns an expression for a GEP
635 ///
636 /// \p GEP The GEP. The indices contained in the GEP itself are ignored,
637 /// instead we use IndexExprs.
638 /// \p IndexExprs The expressions for the indices.
639 LLVM_ABI const SCEV *
640 getGEPExpr(GEPOperator *GEP, const SmallVectorImpl<const SCEV *> &IndexExprs);
641 LLVM_ABI const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
642 LLVM_ABI const SCEV *getMinMaxExpr(SCEVTypes Kind,
643 SmallVectorImpl<const SCEV *> &Operands);
644 LLVM_ABI const SCEV *
645 getSequentialMinMaxExpr(SCEVTypes Kind,
646 SmallVectorImpl<const SCEV *> &Operands);
647 LLVM_ABI const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
648 LLVM_ABI const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
649 LLVM_ABI const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
650 LLVM_ABI const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
651 LLVM_ABI const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
652 LLVM_ABI const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
653 LLVM_ABI const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
654 bool Sequential = false);
655 LLVM_ABI const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
656 bool Sequential = false);
657 LLVM_ABI const SCEV *getUnknown(Value *V);
658 LLVM_ABI const SCEV *getCouldNotCompute();
659
660 /// Return a SCEV for the constant 0 of a specific type.
661 const SCEV *getZero(Type *Ty) { return getConstant(Ty, V: 0); }
662
663 /// Return a SCEV for the constant 1 of a specific type.
664 const SCEV *getOne(Type *Ty) { return getConstant(Ty, V: 1); }
665
666 /// Return a SCEV for the constant \p Power of two.
667 const SCEV *getPowerOfTwo(Type *Ty, unsigned Power) {
668 assert(Power < getTypeSizeInBits(Ty) && "Power out of range");
669 return getConstant(Val: APInt::getOneBitSet(numBits: getTypeSizeInBits(Ty), BitNo: Power));
670 }
671
672 /// Return a SCEV for the constant -1 of a specific type.
673 const SCEV *getMinusOne(Type *Ty) {
674 return getConstant(Ty, V: -1, /*isSigned=*/isSigned: true);
675 }
676
677 /// Return an expression for a TypeSize.
678 LLVM_ABI const SCEV *getSizeOfExpr(Type *IntTy, TypeSize Size);
679
680 /// Return an expression for the alloc size of AllocTy that is type IntTy
681 LLVM_ABI const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
682
683 /// Return an expression for the store size of StoreTy that is type IntTy
684 LLVM_ABI const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
685
686 /// Return an expression for offsetof on the given field with type IntTy
687 LLVM_ABI const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy,
688 unsigned FieldNo);
689
690 /// Return the SCEV object corresponding to -V.
691 LLVM_ABI const SCEV *
692 getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
693
694 /// Return the SCEV object corresponding to ~V.
695 LLVM_ABI const SCEV *getNotSCEV(const SCEV *V);
696
697 /// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
698 ///
699 /// If the LHS and RHS are pointers which don't share a common base
700 /// (according to getPointerBase()), this returns a SCEVCouldNotCompute.
701 /// To compute the difference between two unrelated pointers, you can
702 /// explicitly convert the arguments using getPtrToIntExpr(), for pointer
703 /// types that support it.
704 LLVM_ABI const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
705 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
706 unsigned Depth = 0);
707
708 /// Compute ceil(N / D). N and D are treated as unsigned values.
709 ///
710 /// Since SCEV doesn't have native ceiling division, this generates a
711 /// SCEV expression of the following form:
712 ///
713 /// umin(N, 1) + floor((N - umin(N, 1)) / D)
714 ///
715 /// A denominator of zero or poison is handled the same way as getUDivExpr().
716 LLVM_ABI const SCEV *getUDivCeilSCEV(const SCEV *N, const SCEV *D);
717
718 /// Return a SCEV corresponding to a conversion of the input value to the
719 /// specified type. If the type must be extended, it is zero extended.
720 LLVM_ABI const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
721 unsigned Depth = 0);
722
723 /// Return a SCEV corresponding to a conversion of the input value to the
724 /// specified type. If the type must be extended, it is sign extended.
725 LLVM_ABI const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty,
726 unsigned Depth = 0);
727
728 /// Return a SCEV corresponding to a conversion of the input value to the
729 /// specified type. If the type must be extended, it is zero extended. The
730 /// conversion must not be narrowing.
731 LLVM_ABI const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
732
733 /// Return a SCEV corresponding to a conversion of the input value to the
734 /// specified type. If the type must be extended, it is sign extended. The
735 /// conversion must not be narrowing.
736 LLVM_ABI const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
737
738 /// Return a SCEV corresponding to a conversion of the input value to the
739 /// specified type. If the type must be extended, it is extended with
740 /// unspecified bits. The conversion must not be narrowing.
741 LLVM_ABI const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
742
743 /// Return a SCEV corresponding to a conversion of the input value to the
744 /// specified type. The conversion must not be widening.
745 LLVM_ABI const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
746
747 /// Promote the operands to the wider of the types using zero-extension, and
748 /// then perform a umax operation with them.
749 LLVM_ABI const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS,
750 const SCEV *RHS);
751
752 /// Promote the operands to the wider of the types using zero-extension, and
753 /// then perform a umin operation with them.
754 LLVM_ABI const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS,
755 const SCEV *RHS,
756 bool Sequential = false);
757
758 /// Promote the operands to the wider of the types using zero-extension, and
759 /// then perform a umin operation with them. N-ary function.
760 LLVM_ABI const SCEV *
761 getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
762 bool Sequential = false);
763
764 /// Transitively follow the chain of pointer-type operands until reaching a
765 /// SCEV that does not have a single pointer operand. This returns a
766 /// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
767 /// cases do exist.
768 LLVM_ABI const SCEV *getPointerBase(const SCEV *V);
769
770 /// Compute an expression equivalent to S - getPointerBase(S).
771 LLVM_ABI const SCEV *removePointerBase(const SCEV *S);
772
773 /// Return a SCEV expression for the specified value at the specified scope
774 /// in the program. The L value specifies a loop nest to evaluate the
775 /// expression at, where null is the top-level or a specified loop is
776 /// immediately inside of the loop.
777 ///
778 /// This method can be used to compute the exit value for a variable defined
779 /// in a loop by querying what the value will hold in the parent loop.
780 ///
781 /// In the case that a relevant loop exit value cannot be computed, the
782 /// original value V is returned.
783 LLVM_ABI const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
784
785 /// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
786 LLVM_ABI const SCEV *getSCEVAtScope(Value *V, const Loop *L);
787
788 /// Test whether entry to the loop is protected by a conditional between LHS
789 /// and RHS. This is used to help avoid max expressions in loop trip
790 /// counts, and to eliminate casts.
791 LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred,
792 const SCEV *LHS, const SCEV *RHS);
793
794 /// Test whether entry to the basic block is protected by a conditional
795 /// between LHS and RHS.
796 LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
797 CmpPredicate Pred,
798 const SCEV *LHS,
799 const SCEV *RHS);
800
801 /// Test whether the backedge of the loop is protected by a conditional
802 /// between LHS and RHS. This is used to eliminate casts.
803 LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred,
804 const SCEV *LHS, const SCEV *RHS);
805
806 /// A version of getTripCountFromExitCount below which always picks an
807 /// evaluation type which can not result in overflow.
808 LLVM_ABI const SCEV *getTripCountFromExitCount(const SCEV *ExitCount);
809
810 /// Convert from an "exit count" (i.e. "backedge taken count") to a "trip
811 /// count". A "trip count" is the number of times the header of the loop
812 /// will execute if an exit is taken after the specified number of backedges
813 /// have been taken. (e.g. TripCount = ExitCount + 1). Note that the
814 /// expression can overflow if ExitCount = UINT_MAX. If EvalTy is not wide
815 /// enough to hold the result without overflow, result unsigned wraps with
816 /// 2s-complement semantics. ex: EC = 255 (i8), TC = 0 (i8)
817 LLVM_ABI const SCEV *getTripCountFromExitCount(const SCEV *ExitCount,
818 Type *EvalTy, const Loop *L);
819
820 /// Returns the exact trip count of the loop if we can compute it, and
821 /// the result is a small constant. '0' is used to represent an unknown
822 /// or non-constant trip count. Note that a trip count is simply one more
823 /// than the backedge taken count for the loop.
824 LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L);
825
826 /// Return the exact trip count for this loop if we exit through ExitingBlock.
827 /// '0' is used to represent an unknown or non-constant trip count. Note
828 /// that a trip count is simply one more than the backedge taken count for
829 /// the same exit.
830 /// This "trip count" assumes that control exits via ExitingBlock. More
831 /// precisely, it is the number of times that control will reach ExitingBlock
832 /// before taking the branch. For loops with multiple exits, it may not be
833 /// the number times that the loop header executes if the loop exits
834 /// prematurely via another branch.
835 LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L,
836 const BasicBlock *ExitingBlock);
837
838 /// Returns the upper bound of the loop trip count as a normal unsigned
839 /// value.
840 /// Returns 0 if the trip count is unknown, not constant or requires
841 /// SCEV predicates and \p Predicates is nullptr.
842 LLVM_ABI unsigned getSmallConstantMaxTripCount(
843 const Loop *L,
844 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
845
846 /// Returns the largest constant divisor of the trip count as a normal
847 /// unsigned value, if possible. This means that the actual trip count is
848 /// always a multiple of the returned value. Returns 1 if the trip count is
849 /// unknown or not guaranteed to be the multiple of a constant., Will also
850 /// return 1 if the trip count is very large (>= 2^32).
851 /// Note that the argument is an exit count for loop L, NOT a trip count.
852 LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L,
853 const SCEV *ExitCount);
854
855 /// Returns the largest constant divisor of the trip count of the
856 /// loop. Will return 1 if no trip count could be computed, or if a
857 /// divisor could not be found.
858 LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L);
859
860 /// Returns the largest constant divisor of the trip count of this loop as a
861 /// normal unsigned value, if possible. This means that the actual trip
862 /// count is always a multiple of the returned value (don't forget the trip
863 /// count could very well be zero as well!). As explained in the comments
864 /// for getSmallConstantTripCount, this assumes that control exits the loop
865 /// via ExitingBlock.
866 LLVM_ABI unsigned
867 getSmallConstantTripMultiple(const Loop *L, const BasicBlock *ExitingBlock);
868
869 /// The terms "backedge taken count" and "exit count" are used
870 /// interchangeably to refer to the number of times the backedge of a loop
871 /// has executed before the loop is exited.
872 enum ExitCountKind {
873 /// An expression exactly describing the number of times the backedge has
874 /// executed when a loop is exited.
875 Exact,
876 /// A constant which provides an upper bound on the exact trip count.
877 ConstantMaximum,
878 /// An expression which provides an upper bound on the exact trip count.
879 SymbolicMaximum,
880 };
881
882 /// Return the number of times the backedge executes before the given exit
883 /// would be taken; if not exactly computable, return SCEVCouldNotCompute.
884 /// For a single exit loop, this value is equivelent to the result of
885 /// getBackedgeTakenCount. The loop is guaranteed to exit (via *some* exit)
886 /// before the backedge is executed (ExitCount + 1) times. Note that there
887 /// is no guarantee about *which* exit is taken on the exiting iteration.
888 LLVM_ABI const SCEV *getExitCount(const Loop *L,
889 const BasicBlock *ExitingBlock,
890 ExitCountKind Kind = Exact);
891
892 /// Same as above except this uses the predicated backedge taken info and
893 /// may require predicates.
894 LLVM_ABI const SCEV *
895 getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock,
896 SmallVectorImpl<const SCEVPredicate *> *Predicates,
897 ExitCountKind Kind = Exact);
898
899 /// If the specified loop has a predictable backedge-taken count, return it,
900 /// otherwise return a SCEVCouldNotCompute object. The backedge-taken count is
901 /// the number of times the loop header will be branched to from within the
902 /// loop, assuming there are no abnormal exists like exception throws. This is
903 /// one less than the trip count of the loop, since it doesn't count the first
904 /// iteration, when the header is branched to from outside the loop.
905 ///
906 /// Note that it is not valid to call this method on a loop without a
907 /// loop-invariant backedge-taken count (see
908 /// hasLoopInvariantBackedgeTakenCount).
909 LLVM_ABI const SCEV *getBackedgeTakenCount(const Loop *L,
910 ExitCountKind Kind = Exact);
911
912 /// Similar to getBackedgeTakenCount, except it will add a set of
913 /// SCEV predicates to Predicates that are required to be true in order for
914 /// the answer to be correct. Predicates can be checked with run-time
915 /// checks and can be used to perform loop versioning.
916 LLVM_ABI const SCEV *getPredicatedBackedgeTakenCount(
917 const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Predicates);
918
919 /// When successful, this returns a SCEVConstant that is greater than or equal
920 /// to (i.e. a "conservative over-approximation") of the value returend by
921 /// getBackedgeTakenCount. If such a value cannot be computed, it returns the
922 /// SCEVCouldNotCompute object.
923 const SCEV *getConstantMaxBackedgeTakenCount(const Loop *L) {
924 return getBackedgeTakenCount(L, Kind: ConstantMaximum);
925 }
926
927 /// Similar to getConstantMaxBackedgeTakenCount, except it will add a set of
928 /// SCEV predicates to Predicates that are required to be true in order for
929 /// the answer to be correct. Predicates can be checked with run-time
930 /// checks and can be used to perform loop versioning.
931 LLVM_ABI const SCEV *getPredicatedConstantMaxBackedgeTakenCount(
932 const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Predicates);
933
934 /// When successful, this returns a SCEV that is greater than or equal
935 /// to (i.e. a "conservative over-approximation") of the value returend by
936 /// getBackedgeTakenCount. If such a value cannot be computed, it returns the
937 /// SCEVCouldNotCompute object.
938 const SCEV *getSymbolicMaxBackedgeTakenCount(const Loop *L) {
939 return getBackedgeTakenCount(L, Kind: SymbolicMaximum);
940 }
941
942 /// Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of
943 /// SCEV predicates to Predicates that are required to be true in order for
944 /// the answer to be correct. Predicates can be checked with run-time
945 /// checks and can be used to perform loop versioning.
946 LLVM_ABI const SCEV *getPredicatedSymbolicMaxBackedgeTakenCount(
947 const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Predicates);
948
949 /// Return true if the backedge taken count is either the value returned by
950 /// getConstantMaxBackedgeTakenCount or zero.
951 LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L);
952
953 /// Return true if the specified loop has an analyzable loop-invariant
954 /// backedge-taken count.
955 LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
956
957 // This method should be called by the client when it made any change that
958 // would invalidate SCEV's answers, and the client wants to remove all loop
959 // information held internally by ScalarEvolution. This is intended to be used
960 // when the alternative to forget a loop is too expensive (i.e. large loop
961 // bodies).
962 LLVM_ABI void forgetAllLoops();
963
964 /// This method should be called by the client when it has changed a loop in
965 /// a way that may effect ScalarEvolution's ability to compute a trip count,
966 /// or if the loop is deleted. This call is potentially expensive for large
967 /// loop bodies.
968 LLVM_ABI void forgetLoop(const Loop *L);
969
970 // This method invokes forgetLoop for the outermost loop of the given loop
971 // \p L, making ScalarEvolution forget about all this subtree. This needs to
972 // be done whenever we make a transform that may affect the parameters of the
973 // outer loop, such as exit counts for branches.
974 LLVM_ABI void forgetTopmostLoop(const Loop *L);
975
976 /// This method should be called by the client when it has changed a value
977 /// in a way that may effect its value, or which may disconnect it from a
978 /// def-use chain linking it to a loop.
979 LLVM_ABI void forgetValue(Value *V);
980
981 /// Forget LCSSA phi node V of loop L to which a new predecessor was added,
982 /// such that it may no longer be trivial.
983 LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V);
984
985 /// Called when the client has changed the disposition of values in
986 /// this loop.
987 ///
988 /// We don't have a way to invalidate per-loop dispositions. Clear and
989 /// recompute is simpler.
990 LLVM_ABI void forgetLoopDispositions();
991
992 /// Called when the client has changed the disposition of values in
993 /// a loop or block.
994 ///
995 /// We don't have a way to invalidate per-loop/per-block dispositions. Clear
996 /// and recompute is simpler.
997 LLVM_ABI void forgetBlockAndLoopDispositions(Value *V = nullptr);
998
999 /// Determine the minimum number of zero bits that S is guaranteed to end in
1000 /// (at every loop iteration). It is, at the same time, the minimum number
1001 /// of times S is divisible by 2. For example, given {4,+,8} it returns 2.
1002 /// If S is guaranteed to be 0, it returns the bitwidth of S.
1003 LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S);
1004
1005 /// Returns the max constant multiple of S.
1006 LLVM_ABI APInt getConstantMultiple(const SCEV *S);
1007
1008 // Returns the max constant multiple of S. If S is exactly 0, return 1.
1009 LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S);
1010
1011 /// Determine the unsigned range for a particular SCEV.
1012 /// NOTE: This returns a copy of the reference returned by getRangeRef.
1013 ConstantRange getUnsignedRange(const SCEV *S) {
1014 return getRangeRef(S, Hint: HINT_RANGE_UNSIGNED);
1015 }
1016
1017 /// Determine the min of the unsigned range for a particular SCEV.
1018 APInt getUnsignedRangeMin(const SCEV *S) {
1019 return getRangeRef(S, Hint: HINT_RANGE_UNSIGNED).getUnsignedMin();
1020 }
1021
1022 /// Determine the max of the unsigned range for a particular SCEV.
1023 APInt getUnsignedRangeMax(const SCEV *S) {
1024 return getRangeRef(S, Hint: HINT_RANGE_UNSIGNED).getUnsignedMax();
1025 }
1026
1027 /// Determine the signed range for a particular SCEV.
1028 /// NOTE: This returns a copy of the reference returned by getRangeRef.
1029 ConstantRange getSignedRange(const SCEV *S) {
1030 return getRangeRef(S, Hint: HINT_RANGE_SIGNED);
1031 }
1032
1033 /// Determine the min of the signed range for a particular SCEV.
1034 APInt getSignedRangeMin(const SCEV *S) {
1035 return getRangeRef(S, Hint: HINT_RANGE_SIGNED).getSignedMin();
1036 }
1037
1038 /// Determine the max of the signed range for a particular SCEV.
1039 APInt getSignedRangeMax(const SCEV *S) {
1040 return getRangeRef(S, Hint: HINT_RANGE_SIGNED).getSignedMax();
1041 }
1042
1043 /// Test if the given expression is known to be negative.
1044 LLVM_ABI bool isKnownNegative(const SCEV *S);
1045
1046 /// Test if the given expression is known to be positive.
1047 LLVM_ABI bool isKnownPositive(const SCEV *S);
1048
1049 /// Test if the given expression is known to be non-negative.
1050 LLVM_ABI bool isKnownNonNegative(const SCEV *S);
1051
1052 /// Test if the given expression is known to be non-positive.
1053 LLVM_ABI bool isKnownNonPositive(const SCEV *S);
1054
1055 /// Test if the given expression is known to be non-zero.
1056 LLVM_ABI bool isKnownNonZero(const SCEV *S);
1057
1058 /// Test if the given expression is known to be a power of 2. OrNegative
1059 /// allows matching negative power of 2s, and OrZero allows matching 0.
1060 LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero = false,
1061 bool OrNegative = false);
1062
1063 /// Check that \p S is a multiple of \p M. When \p S is an AddRecExpr, \p S is
1064 /// a multiple of \p M if \p S starts with a multiple of \p M and at every
1065 /// iteration step \p S only adds multiples of \p M. \p Assumptions records
1066 /// the runtime predicates under which \p S is a multiple of \p M.
1067 LLVM_ABI bool
1068 isKnownMultipleOf(const SCEV *S, uint64_t M,
1069 SmallVectorImpl<const SCEVPredicate *> &Assumptions);
1070
1071 /// Splits SCEV expression \p S into two SCEVs. One of them is obtained from
1072 /// \p S by substitution of all AddRec sub-expression related to loop \p L
1073 /// with initial value of that SCEV. The second is obtained from \p S by
1074 /// substitution of all AddRec sub-expressions related to loop \p L with post
1075 /// increment of this AddRec in the loop \p L. In both cases all other AddRec
1076 /// sub-expressions (not related to \p L) remain the same.
1077 /// If the \p S contains non-invariant unknown SCEV the function returns
1078 /// CouldNotCompute SCEV in both values of std::pair.
1079 /// For example, for SCEV S={0, +, 1}<L1> + {0, +, 1}<L2> and loop L=L1
1080 /// the function returns pair:
1081 /// first = {0, +, 1}<L2>
1082 /// second = {1, +, 1}<L1> + {0, +, 1}<L2>
1083 /// We can see that for the first AddRec sub-expression it was replaced with
1084 /// 0 (initial value) for the first element and to {1, +, 1}<L1> (post
1085 /// increment value) for the second one. In both cases AddRec expression
1086 /// related to L2 remains the same.
1087 LLVM_ABI std::pair<const SCEV *, const SCEV *>
1088 SplitIntoInitAndPostInc(const Loop *L, const SCEV *S);
1089
1090 /// We'd like to check the predicate on every iteration of the most dominated
1091 /// loop between loops used in LHS and RHS.
1092 /// To do this we use the following list of steps:
1093 /// 1. Collect set S all loops on which either LHS or RHS depend.
1094 /// 2. If S is non-empty
1095 /// a. Let PD be the element of S which is dominated by all other elements.
1096 /// b. Let E(LHS) be value of LHS on entry of PD.
1097 /// To get E(LHS), we should just take LHS and replace all AddRecs that are
1098 /// attached to PD on with their entry values.
1099 /// Define E(RHS) in the same way.
1100 /// c. Let B(LHS) be value of L on backedge of PD.
1101 /// To get B(LHS), we should just take LHS and replace all AddRecs that are
1102 /// attached to PD on with their backedge values.
1103 /// Define B(RHS) in the same way.
1104 /// d. Note that E(LHS) and E(RHS) are automatically available on entry of PD,
1105 /// so we can assert on that.
1106 /// e. Return true if isLoopEntryGuardedByCond(Pred, E(LHS), E(RHS)) &&
1107 /// isLoopBackedgeGuardedByCond(Pred, B(LHS), B(RHS))
1108 LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS,
1109 const SCEV *RHS);
1110
1111 /// Test if the given expression is known to satisfy the condition described
1112 /// by Pred, LHS, and RHS.
1113 LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS,
1114 const SCEV *RHS);
1115
1116 /// Check whether the condition described by Pred, LHS, and RHS is true or
1117 /// false. If we know it, return the evaluation of this condition. If neither
1118 /// is proved, return std::nullopt.
1119 LLVM_ABI std::optional<bool>
1120 evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
1121
1122 /// Test if the given expression is known to satisfy the condition described
1123 /// by Pred, LHS, and RHS in the given Context.
1124 LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS,
1125 const SCEV *RHS, const Instruction *CtxI);
1126
1127 /// Check whether the condition described by Pred, LHS, and RHS is true or
1128 /// false in the given \p Context. If we know it, return the evaluation of
1129 /// this condition. If neither is proved, return std::nullopt.
1130 LLVM_ABI std::optional<bool> evaluatePredicateAt(CmpPredicate Pred,
1131 const SCEV *LHS,
1132 const SCEV *RHS,
1133 const Instruction *CtxI);
1134
1135 /// Test if the condition described by Pred, LHS, RHS is known to be true on
1136 /// every iteration of the loop of the recurrency LHS.
1137 LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred,
1138 const SCEVAddRecExpr *LHS,
1139 const SCEV *RHS);
1140
1141 /// Information about the number of loop iterations for which a loop exit's
1142 /// branch condition evaluates to the not-taken path. This is a temporary
1143 /// pair of exact and max expressions that are eventually summarized in
1144 /// ExitNotTakenInfo and BackedgeTakenInfo.
1145 struct ExitLimit {
1146 const SCEV *ExactNotTaken; // The exit is not taken exactly this many times
1147 const SCEV *ConstantMaxNotTaken; // The exit is not taken at most this many
1148 // times
1149 const SCEV *SymbolicMaxNotTaken;
1150
1151 // Not taken either exactly ConstantMaxNotTaken or zero times
1152 bool MaxOrZero = false;
1153
1154 /// A vector of predicate guards for this ExitLimit. The result is only
1155 /// valid if all of the predicates in \c Predicates evaluate to 'true' at
1156 /// run-time.
1157 SmallVector<const SCEVPredicate *, 4> Predicates;
1158
1159 /// Construct either an exact exit limit from a constant, or an unknown
1160 /// one from a SCEVCouldNotCompute. No other types of SCEVs are allowed
1161 /// as arguments and asserts enforce that internally.
1162 /*implicit*/ LLVM_ABI ExitLimit(const SCEV *E);
1163
1164 LLVM_ABI
1165 ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
1166 const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
1167 ArrayRef<ArrayRef<const SCEVPredicate *>> PredLists = {});
1168
1169 LLVM_ABI ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
1170 const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
1171 ArrayRef<const SCEVPredicate *> PredList);
1172
1173 /// Test whether this ExitLimit contains any computed information, or
1174 /// whether it's all SCEVCouldNotCompute values.
1175 bool hasAnyInfo() const {
1176 return !isa<SCEVCouldNotCompute>(Val: ExactNotTaken) ||
1177 !isa<SCEVCouldNotCompute>(Val: ConstantMaxNotTaken);
1178 }
1179
1180 /// Test whether this ExitLimit contains all information.
1181 bool hasFullInfo() const {
1182 return !isa<SCEVCouldNotCompute>(Val: ExactNotTaken);
1183 }
1184 };
1185
1186 /// Compute the number of times the backedge of the specified loop will
1187 /// execute if its exit condition were a conditional branch of ExitCond.
1188 ///
1189 /// \p ControlsOnlyExit is true if ExitCond directly controls the only exit
1190 /// branch. In this case, we can assume that the loop exits only if the
1191 /// condition is true and can infer that failing to meet the condition prior
1192 /// to integer wraparound results in undefined behavior.
1193 ///
1194 /// If \p AllowPredicates is set, this call will try to use a minimal set of
1195 /// SCEV predicates in order to return an exact answer.
1196 LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond,
1197 bool ExitIfTrue,
1198 bool ControlsOnlyExit,
1199 bool AllowPredicates = false);
1200
1201 /// A predicate is said to be monotonically increasing if may go from being
1202 /// false to being true as the loop iterates, but never the other way
1203 /// around. A predicate is said to be monotonically decreasing if may go
1204 /// from being true to being false as the loop iterates, but never the other
1205 /// way around.
1206 enum MonotonicPredicateType {
1207 MonotonicallyIncreasing,
1208 MonotonicallyDecreasing
1209 };
1210
1211 /// If, for all loop invariant X, the predicate "LHS `Pred` X" is
1212 /// monotonically increasing or decreasing, returns
1213 /// Some(MonotonicallyIncreasing) and Some(MonotonicallyDecreasing)
1214 /// respectively. If we could not prove either of these facts, returns
1215 /// std::nullopt.
1216 LLVM_ABI std::optional<MonotonicPredicateType>
1217 getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
1218 ICmpInst::Predicate Pred);
1219
1220 struct LoopInvariantPredicate {
1221 CmpPredicate Pred;
1222 const SCEV *LHS;
1223 const SCEV *RHS;
1224
1225 LoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
1226 : Pred(Pred), LHS(LHS), RHS(RHS) {}
1227 };
1228 /// If the result of the predicate LHS `Pred` RHS is loop invariant with
1229 /// respect to L, return a LoopInvariantPredicate with LHS and RHS being
1230 /// invariants, available at L's entry. Otherwise, return std::nullopt.
1231 LLVM_ABI std::optional<LoopInvariantPredicate>
1232 getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
1233 const Loop *L, const Instruction *CtxI = nullptr);
1234
1235 /// If the result of the predicate LHS `Pred` RHS is loop invariant with
1236 /// respect to L at given Context during at least first MaxIter iterations,
1237 /// return a LoopInvariantPredicate with LHS and RHS being invariants,
1238 /// available at L's entry. Otherwise, return std::nullopt. The predicate
1239 /// should be the loop's exit condition.
1240 LLVM_ABI std::optional<LoopInvariantPredicate>
1241 getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred,
1242 const SCEV *LHS,
1243 const SCEV *RHS, const Loop *L,
1244 const Instruction *CtxI,
1245 const SCEV *MaxIter);
1246
1247 LLVM_ABI std::optional<LoopInvariantPredicate>
1248 getLoopInvariantExitCondDuringFirstIterationsImpl(
1249 CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
1250 const Instruction *CtxI, const SCEV *MaxIter);
1251
1252 /// Simplify LHS and RHS in a comparison with predicate Pred. Return true
1253 /// iff any changes were made. If the operands are provably equal or
1254 /// unequal, LHS and RHS are set to the same value and Pred is set to either
1255 /// ICMP_EQ or ICMP_NE.
1256 LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
1257 const SCEV *&RHS, unsigned Depth = 0);
1258
1259 /// Return the "disposition" of the given SCEV with respect to the given
1260 /// loop.
1261 LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
1262
1263 /// Return true if the value of the given SCEV is unchanging in the
1264 /// specified loop.
1265 LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L);
1266
1267 /// Determine if the SCEV can be evaluated at loop's entry. It is true if it
1268 /// doesn't depend on a SCEVUnknown of an instruction which is dominated by
1269 /// the header of loop L.
1270 LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L);
1271
1272 /// Return true if the given SCEV changes value in a known way in the
1273 /// specified loop. This property being true implies that the value is
1274 /// variant in the loop AND that we can emit an expression to compute the
1275 /// value of the expression at any particular loop iteration.
1276 LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
1277
1278 /// Return the "disposition" of the given SCEV with respect to the given
1279 /// block.
1280 LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S,
1281 const BasicBlock *BB);
1282
1283 /// Return true if elements that makes up the given SCEV dominate the
1284 /// specified basic block.
1285 LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB);
1286
1287 /// Return true if elements that makes up the given SCEV properly dominate
1288 /// the specified basic block.
1289 LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB);
1290
1291 /// Test whether the given SCEV has Op as a direct or indirect operand.
1292 LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const;
1293
1294 /// Return the size of an element read or written by Inst.
1295 LLVM_ABI const SCEV *getElementSize(Instruction *Inst);
1296
1297 LLVM_ABI void print(raw_ostream &OS) const;
1298 LLVM_ABI void verify() const;
1299 LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
1300 FunctionAnalysisManager::Invalidator &Inv);
1301
1302 /// Return the DataLayout associated with the module this SCEV instance is
1303 /// operating on.
1304 const DataLayout &getDataLayout() const { return DL; }
1305
1306 LLVM_ABI const SCEVPredicate *getEqualPredicate(const SCEV *LHS,
1307 const SCEV *RHS);
1308 LLVM_ABI const SCEVPredicate *getComparePredicate(ICmpInst::Predicate Pred,
1309 const SCEV *LHS,
1310 const SCEV *RHS);
1311
1312 LLVM_ABI const SCEVPredicate *
1313 getWrapPredicate(const SCEVAddRecExpr *AR,
1314 SCEVWrapPredicate::IncrementWrapFlags AddedFlags);
1315
1316 /// Re-writes the SCEV according to the Predicates in \p A.
1317 LLVM_ABI const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
1318 const SCEVPredicate &A);
1319 /// Tries to convert the \p S expression to an AddRec expression,
1320 /// adding additional predicates to \p Preds as required.
1321 LLVM_ABI const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
1322 const SCEV *S, const Loop *L,
1323 SmallVectorImpl<const SCEVPredicate *> &Preds);
1324
1325 /// Compute \p LHS - \p RHS and returns the result as an APInt if it is a
1326 /// constant, and std::nullopt if it isn't.
1327 ///
1328 /// This is intended to be a cheaper version of getMinusSCEV. We can be
1329 /// frugal here since we just bail out of actually constructing and
1330 /// canonicalizing an expression in the cases where the result isn't going
1331 /// to be a constant.
1332 LLVM_ABI std::optional<APInt> computeConstantDifference(const SCEV *LHS,
1333 const SCEV *RHS);
1334
1335 /// Update no-wrap flags of an AddRec. This may drop the cached info about
1336 /// this AddRec (such as range info) in case if new flags may potentially
1337 /// sharpen it.
1338 LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);
1339
1340 class LoopGuards {
1341 DenseMap<const SCEV *, const SCEV *> RewriteMap;
1342 bool PreserveNUW = false;
1343 bool PreserveNSW = false;
1344 ScalarEvolution &SE;
1345
1346 LoopGuards(ScalarEvolution &SE) : SE(SE) {}
1347
1348 /// Recursively collect loop guards in \p Guards, starting from
1349 /// block \p Block with predecessor \p Pred. The intended starting point
1350 /// is to collect from a loop header and its predecessor.
1351 static void
1352 collectFromBlock(ScalarEvolution &SE, ScalarEvolution::LoopGuards &Guards,
1353 const BasicBlock *Block, const BasicBlock *Pred,
1354 SmallPtrSetImpl<const BasicBlock *> &VisitedBlocks,
1355 unsigned Depth = 0);
1356
1357 /// Collect loop guards in \p Guards, starting from PHINode \p
1358 /// Phi, by calling \p collectFromBlock on the incoming blocks of
1359 /// \Phi and trying to merge the found constraints into a single
1360 /// combined one for \p Phi.
1361 static void collectFromPHI(
1362 ScalarEvolution &SE, ScalarEvolution::LoopGuards &Guards,
1363 const PHINode &Phi, SmallPtrSetImpl<const BasicBlock *> &VisitedBlocks,
1364 SmallDenseMap<const BasicBlock *, LoopGuards> &IncomingGuards,
1365 unsigned Depth);
1366
1367 public:
1368 /// Collect rewrite map for loop guards for loop \p L, together with flags
1369 /// indicating if NUW and NSW can be preserved during rewriting.
1370 LLVM_ABI static LoopGuards collect(const Loop *L, ScalarEvolution &SE);
1371
1372 /// Try to apply the collected loop guards to \p Expr.
1373 LLVM_ABI const SCEV *rewrite(const SCEV *Expr) const;
1374 };
1375
1376 /// Try to apply information from loop guards for \p L to \p Expr.
1377 LLVM_ABI const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
1378 LLVM_ABI const SCEV *applyLoopGuards(const SCEV *Expr,
1379 const LoopGuards &Guards);
1380
1381 /// Return true if the loop has no abnormal exits. That is, if the loop
1382 /// is not infinite, it must exit through an explicit edge in the CFG.
1383 /// (As opposed to either a) throwing out of the function or b) entering a
1384 /// well defined infinite loop in some callee.)
1385 bool loopHasNoAbnormalExits(const Loop *L) {
1386 return getLoopProperties(L).HasNoAbnormalExits;
1387 }
1388
1389 /// Return true if this loop is finite by assumption. That is,
1390 /// to be infinite, it must also be undefined.
1391 LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L);
1392
1393 /// Return the set of Values that, if poison, will definitively result in S
1394 /// being poison as well. The returned set may be incomplete, i.e. there can
1395 /// be additional Values that also result in S being poison.
1396 LLVM_ABI void
1397 getPoisonGeneratingValues(SmallPtrSetImpl<const Value *> &Result,
1398 const SCEV *S);
1399
1400 /// Check whether it is poison-safe to represent the expression S using the
1401 /// instruction I. If such a replacement is performed, the poison flags of
1402 /// instructions in DropPoisonGeneratingInsts must be dropped.
1403 LLVM_ABI bool canReuseInstruction(
1404 const SCEV *S, Instruction *I,
1405 SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts);
1406
1407 class FoldID {
1408 const SCEV *Op = nullptr;
1409 const Type *Ty = nullptr;
1410 unsigned short C;
1411
1412 public:
1413 FoldID(SCEVTypes C, const SCEV *Op, const Type *Ty) : Op(Op), Ty(Ty), C(C) {
1414 assert(Op);
1415 assert(Ty);
1416 }
1417
1418 FoldID(unsigned short C) : C(C) {}
1419
1420 unsigned computeHash() const {
1421 return detail::combineHashValue(
1422 a: C, b: detail::combineHashValue(a: reinterpret_cast<uintptr_t>(Op),
1423 b: reinterpret_cast<uintptr_t>(Ty)));
1424 }
1425
1426 bool operator==(const FoldID &RHS) const {
1427 return std::tie(args: Op, args: Ty, args: C) == std::tie(args: RHS.Op, args: RHS.Ty, args: RHS.C);
1428 }
1429 };
1430
1431private:
1432 /// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
1433 /// Value is deleted.
1434 class LLVM_ABI SCEVCallbackVH final : public CallbackVH {
1435 ScalarEvolution *SE;
1436
1437 void deleted() override;
1438 void allUsesReplacedWith(Value *New) override;
1439
1440 public:
1441 SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
1442 };
1443
1444 friend class SCEVCallbackVH;
1445 friend class SCEVExpander;
1446 friend class SCEVUnknown;
1447
1448 /// The function we are analyzing.
1449 Function &F;
1450
1451 /// Data layout of the module.
1452 const DataLayout &DL;
1453
1454 /// Does the module have any calls to the llvm.experimental.guard intrinsic
1455 /// at all? If this is false, we avoid doing work that will only help if
1456 /// thare are guards present in the IR.
1457 bool HasGuards;
1458
1459 /// The target library information for the target we are targeting.
1460 TargetLibraryInfo &TLI;
1461
1462 /// The tracker for \@llvm.assume intrinsics in this function.
1463 AssumptionCache &AC;
1464
1465 /// The dominator tree.
1466 DominatorTree &DT;
1467
1468 /// The loop information for the function we are currently analyzing.
1469 LoopInfo &LI;
1470
1471 /// This SCEV is used to represent unknown trip counts and things.
1472 std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;
1473
1474 /// The type for HasRecMap.
1475 using HasRecMapType = DenseMap<const SCEV *, bool>;
1476
1477 /// This is a cache to record whether a SCEV contains any scAddRecExpr.
1478 HasRecMapType HasRecMap;
1479
1480 /// The type for ExprValueMap.
1481 using ValueSetVector = SmallSetVector<Value *, 4>;
1482 using ExprValueMapType = DenseMap<const SCEV *, ValueSetVector>;
1483
1484 /// ExprValueMap -- This map records the original values from which
1485 /// the SCEV expr is generated from.
1486 ExprValueMapType ExprValueMap;
1487
1488 /// The type for ValueExprMap.
1489 using ValueExprMapType =
1490 DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *>>;
1491
1492 /// This is a cache of the values we have analyzed so far.
1493 ValueExprMapType ValueExprMap;
1494
1495 /// This is a cache for expressions that got folded to a different existing
1496 /// SCEV.
1497 DenseMap<FoldID, const SCEV *> FoldCache;
1498 DenseMap<const SCEV *, SmallVector<FoldID, 2>> FoldCacheUser;
1499
1500 /// Mark predicate values currently being processed by isImpliedCond.
1501 SmallPtrSet<const Value *, 6> PendingLoopPredicates;
1502
1503 /// Mark SCEVUnknown Phis currently being processed by getRangeRef.
1504 SmallPtrSet<const PHINode *, 6> PendingPhiRanges;
1505
1506 /// Mark SCEVUnknown Phis currently being processed by getRangeRefIter.
1507 SmallPtrSet<const PHINode *, 6> PendingPhiRangesIter;
1508
1509 // Mark SCEVUnknown Phis currently being processed by isImpliedViaMerge.
1510 SmallPtrSet<const PHINode *, 6> PendingMerges;
1511
1512 /// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
1513 /// conditions dominating the backedge of a loop.
1514 bool WalkingBEDominatingConds = false;
1515
1516 /// Set to true by isKnownPredicateViaSplitting when we're trying to prove a
1517 /// predicate by splitting it into a set of independent predicates.
1518 bool ProvingSplitPredicate = false;
1519
1520 /// Memoized values for the getConstantMultiple
1521 DenseMap<const SCEV *, APInt> ConstantMultipleCache;
1522
1523 /// Return the Value set from which the SCEV expr is generated.
1524 ArrayRef<Value *> getSCEVValues(const SCEV *S);
1525
1526 /// Private helper method for the getConstantMultiple method.
1527 APInt getConstantMultipleImpl(const SCEV *S);
1528
1529 /// Information about the number of times a particular loop exit may be
1530 /// reached before exiting the loop.
1531 struct ExitNotTakenInfo {
1532 PoisoningVH<BasicBlock> ExitingBlock;
1533 const SCEV *ExactNotTaken;
1534 const SCEV *ConstantMaxNotTaken;
1535 const SCEV *SymbolicMaxNotTaken;
1536 SmallVector<const SCEVPredicate *, 4> Predicates;
1537
1538 explicit ExitNotTakenInfo(PoisoningVH<BasicBlock> ExitingBlock,
1539 const SCEV *ExactNotTaken,
1540 const SCEV *ConstantMaxNotTaken,
1541 const SCEV *SymbolicMaxNotTaken,
1542 ArrayRef<const SCEVPredicate *> Predicates)
1543 : ExitingBlock(ExitingBlock), ExactNotTaken(ExactNotTaken),
1544 ConstantMaxNotTaken(ConstantMaxNotTaken),
1545 SymbolicMaxNotTaken(SymbolicMaxNotTaken), Predicates(Predicates) {}
1546
1547 bool hasAlwaysTruePredicate() const {
1548 return Predicates.empty();
1549 }
1550 };
1551
1552 /// Information about the backedge-taken count of a loop. This currently
1553 /// includes an exact count and a maximum count.
1554 ///
1555 class BackedgeTakenInfo {
1556 friend class ScalarEvolution;
1557
1558 /// A list of computable exits and their not-taken counts. Loops almost
1559 /// never have more than one computable exit.
1560 SmallVector<ExitNotTakenInfo, 1> ExitNotTaken;
1561
1562 /// Expression indicating the least constant maximum backedge-taken count of
1563 /// the loop that is known, or a SCEVCouldNotCompute. This expression is
1564 /// only valid if the predicates associated with all loop exits are true.
1565 const SCEV *ConstantMax = nullptr;
1566
1567 /// Indicating if \c ExitNotTaken has an element for every exiting block in
1568 /// the loop.
1569 bool IsComplete = false;
1570
1571 /// Expression indicating the least maximum backedge-taken count of the loop
1572 /// that is known, or a SCEVCouldNotCompute. Lazily computed on first query.
1573 const SCEV *SymbolicMax = nullptr;
1574
1575 /// True iff the backedge is taken either exactly Max or zero times.
1576 bool MaxOrZero = false;
1577
1578 bool isComplete() const { return IsComplete; }
1579 const SCEV *getConstantMax() const { return ConstantMax; }
1580
1581 LLVM_ABI const ExitNotTakenInfo *getExitNotTaken(
1582 const BasicBlock *ExitingBlock,
1583 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const;
1584
1585 public:
1586 BackedgeTakenInfo() = default;
1587 BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
1588 BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;
1589
1590 using EdgeExitInfo = std::pair<BasicBlock *, ExitLimit>;
1591
1592 /// Initialize BackedgeTakenInfo from a list of exact exit counts.
1593 LLVM_ABI BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts,
1594 bool IsComplete, const SCEV *ConstantMax,
1595 bool MaxOrZero);
1596
1597 /// Test whether this BackedgeTakenInfo contains any computed information,
1598 /// or whether it's all SCEVCouldNotCompute values.
1599 bool hasAnyInfo() const {
1600 return !ExitNotTaken.empty() ||
1601 !isa<SCEVCouldNotCompute>(Val: getConstantMax());
1602 }
1603
1604 /// Test whether this BackedgeTakenInfo contains complete information.
1605 bool hasFullInfo() const { return isComplete(); }
1606
1607 /// Return an expression indicating the exact *backedge-taken*
1608 /// count of the loop if it is known or SCEVCouldNotCompute
1609 /// otherwise. If execution makes it to the backedge on every
1610 /// iteration (i.e. there are no abnormal exists like exception
1611 /// throws and thread exits) then this is the number of times the
1612 /// loop header will execute minus one.
1613 ///
1614 /// If the SCEV predicate associated with the answer can be different
1615 /// from AlwaysTrue, we must add a (non null) Predicates argument.
1616 /// The SCEV predicate associated with the answer will be added to
1617 /// Predicates. A run-time check needs to be emitted for the SCEV
1618 /// predicate in order for the answer to be valid.
1619 ///
1620 /// Note that we should always know if we need to pass a predicate
1621 /// argument or not from the way the ExitCounts vector was computed.
1622 /// If we allowed SCEV predicates to be generated when populating this
1623 /// vector, this information can contain them and therefore a
1624 /// SCEVPredicate argument should be added to getExact.
1625 LLVM_ABI const SCEV *getExact(
1626 const Loop *L, ScalarEvolution *SE,
1627 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const;
1628
1629 /// Return the number of times this loop exit may fall through to the back
1630 /// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
1631 /// this block before this number of iterations, but may exit via another
1632 /// block. If \p Predicates is null the function returns CouldNotCompute if
1633 /// predicates are required, otherwise it fills in the required predicates.
1634 const SCEV *getExact(
1635 const BasicBlock *ExitingBlock, ScalarEvolution *SE,
1636 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const {
1637 if (auto *ENT = getExitNotTaken(ExitingBlock, Predicates))
1638 return ENT->ExactNotTaken;
1639 else
1640 return SE->getCouldNotCompute();
1641 }
1642
1643 /// Get the constant max backedge taken count for the loop.
1644 LLVM_ABI const SCEV *getConstantMax(
1645 ScalarEvolution *SE,
1646 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const;
1647
1648 /// Get the constant max backedge taken count for the particular loop exit.
1649 const SCEV *getConstantMax(
1650 const BasicBlock *ExitingBlock, ScalarEvolution *SE,
1651 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const {
1652 if (auto *ENT = getExitNotTaken(ExitingBlock, Predicates))
1653 return ENT->ConstantMaxNotTaken;
1654 else
1655 return SE->getCouldNotCompute();
1656 }
1657
1658 /// Get the symbolic max backedge taken count for the loop.
1659 LLVM_ABI const SCEV *getSymbolicMax(
1660 const Loop *L, ScalarEvolution *SE,
1661 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
1662
1663 /// Get the symbolic max backedge taken count for the particular loop exit.
1664 const SCEV *getSymbolicMax(
1665 const BasicBlock *ExitingBlock, ScalarEvolution *SE,
1666 SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const {
1667 if (auto *ENT = getExitNotTaken(ExitingBlock, Predicates))
1668 return ENT->SymbolicMaxNotTaken;
1669 else
1670 return SE->getCouldNotCompute();
1671 }
1672
1673 /// Return true if the number of times this backedge is taken is either the
1674 /// value returned by getConstantMax or zero.
1675 LLVM_ABI bool isConstantMaxOrZero(ScalarEvolution *SE) const;
1676 };
1677
1678 /// Cache the backedge-taken count of the loops for this function as they
1679 /// are computed.
1680 DenseMap<const Loop *, BackedgeTakenInfo> BackedgeTakenCounts;
1681
1682 /// Cache the predicated backedge-taken count of the loops for this
1683 /// function as they are computed.
1684 DenseMap<const Loop *, BackedgeTakenInfo> PredicatedBackedgeTakenCounts;
1685
1686 /// Loops whose backedge taken counts directly use this non-constant SCEV.
1687 DenseMap<const SCEV *, SmallPtrSet<PointerIntPair<const Loop *, 1, bool>, 4>>
1688 BECountUsers;
1689
1690 /// This map contains entries for all of the PHI instructions that we
1691 /// attempt to compute constant evolutions for. This allows us to avoid
1692 /// potentially expensive recomputation of these properties. An instruction
1693 /// maps to null if we are unable to compute its exit value.
1694 DenseMap<PHINode *, Constant *> ConstantEvolutionLoopExitValue;
1695
1696 /// This map contains entries for all the expressions that we attempt to
1697 /// compute getSCEVAtScope information for, which can be expensive in
1698 /// extreme cases.
1699 DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
1700 ValuesAtScopes;
1701
1702 /// Reverse map for invalidation purposes: Stores of which SCEV and which
1703 /// loop this is the value-at-scope of.
1704 DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
1705 ValuesAtScopesUsers;
1706
1707 /// Memoized computeLoopDisposition results.
1708 DenseMap<const SCEV *,
1709 SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
1710 LoopDispositions;
1711
1712 struct LoopProperties {
1713 /// Set to true if the loop contains no instruction that can abnormally exit
1714 /// the loop (i.e. via throwing an exception, by terminating the thread
1715 /// cleanly or by infinite looping in a called function). Strictly
1716 /// speaking, the last one is not leaving the loop, but is identical to
1717 /// leaving the loop for reasoning about undefined behavior.
1718 bool HasNoAbnormalExits;
1719
1720 /// Set to true if the loop contains no instruction that can have side
1721 /// effects (i.e. via throwing an exception, volatile or atomic access).
1722 bool HasNoSideEffects;
1723 };
1724
1725 /// Cache for \c getLoopProperties.
1726 DenseMap<const Loop *, LoopProperties> LoopPropertiesCache;
1727
1728 /// Return a \c LoopProperties instance for \p L, creating one if necessary.
1729 LLVM_ABI LoopProperties getLoopProperties(const Loop *L);
1730
1731 bool loopHasNoSideEffects(const Loop *L) {
1732 return getLoopProperties(L).HasNoSideEffects;
1733 }
1734
1735 /// Compute a LoopDisposition value.
1736 LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
1737
1738 /// Memoized computeBlockDisposition results.
1739 DenseMap<
1740 const SCEV *,
1741 SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
1742 BlockDispositions;
1743
1744 /// Compute a BlockDisposition value.
1745 BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
1746
1747 /// Stores all SCEV that use a given SCEV as its direct operand.
1748 DenseMap<const SCEV *, SmallPtrSet<const SCEV *, 8> > SCEVUsers;
1749
1750 /// Memoized results from getRange
1751 DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
1752
1753 /// Memoized results from getRange
1754 DenseMap<const SCEV *, ConstantRange> SignedRanges;
1755
1756 /// Used to parameterize getRange
1757 enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };
1758
1759 /// Set the memoized range for the given SCEV.
1760 const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
1761 ConstantRange CR) {
1762 DenseMap<const SCEV *, ConstantRange> &Cache =
1763 Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
1764
1765 auto Pair = Cache.insert_or_assign(Key: S, Val: std::move(CR));
1766 return Pair.first->second;
1767 }
1768
1769 /// Determine the range for a particular SCEV.
1770 /// NOTE: This returns a reference to an entry in a cache. It must be
1771 /// copied if its needed for longer.
1772 LLVM_ABI const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint,
1773 unsigned Depth = 0);
1774
1775 /// Determine the range for a particular SCEV, but evaluates ranges for
1776 /// operands iteratively first.
1777 const ConstantRange &getRangeRefIter(const SCEV *S, RangeSignHint Hint);
1778
1779 /// Determines the range for the affine SCEVAddRecExpr {\p Start,+,\p Step}.
1780 /// Helper for \c getRange.
1781 ConstantRange getRangeForAffineAR(const SCEV *Start, const SCEV *Step,
1782 const APInt &MaxBECount);
1783
1784 /// Determines the range for the affine non-self-wrapping SCEVAddRecExpr {\p
1785 /// Start,+,\p Step}<nw>.
1786 ConstantRange getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr *AddRec,
1787 const SCEV *MaxBECount,
1788 unsigned BitWidth,
1789 RangeSignHint SignHint);
1790
1791 /// Try to compute a range for the affine SCEVAddRecExpr {\p Start,+,\p
1792 /// Step} by "factoring out" a ternary expression from the add recurrence.
1793 /// Helper called by \c getRange.
1794 ConstantRange getRangeViaFactoring(const SCEV *Start, const SCEV *Step,
1795 const APInt &MaxBECount);
1796
1797 /// If the unknown expression U corresponds to a simple recurrence, return
1798 /// a constant range which represents the entire recurrence. Note that
1799 /// *add* recurrences with loop invariant steps aren't represented by
1800 /// SCEVUnknowns and thus don't use this mechanism.
1801 ConstantRange getRangeForUnknownRecurrence(const SCEVUnknown *U);
1802
1803 /// We know that there is no SCEV for the specified value. Analyze the
1804 /// expression recursively.
1805 const SCEV *createSCEV(Value *V);
1806
1807 /// We know that there is no SCEV for the specified value. Create a new SCEV
1808 /// for \p V iteratively.
1809 const SCEV *createSCEVIter(Value *V);
1810 /// Collect operands of \p V for which SCEV expressions should be constructed
1811 /// first. Returns a SCEV directly if it can be constructed trivially for \p
1812 /// V.
1813 const SCEV *getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops);
1814
1815 /// Returns SCEV for the first operand of a phi if all phi operands have
1816 /// identical opcodes and operands.
1817 const SCEV *createNodeForPHIWithIdenticalOperands(PHINode *PN);
1818
1819 /// Provide the special handling we need to analyze PHI SCEVs.
1820 const SCEV *createNodeForPHI(PHINode *PN);
1821
1822 /// Helper function called from createNodeForPHI.
1823 const SCEV *createAddRecFromPHI(PHINode *PN);
1824
1825 /// A helper function for createAddRecFromPHI to handle simple cases.
1826 const SCEV *createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
1827 Value *StartValueV);
1828
1829 /// Helper function called from createNodeForPHI.
1830 const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
1831
1832 /// Provide special handling for a select-like instruction (currently this
1833 /// is either a select instruction or a phi node). \p Ty is the type of the
1834 /// instruction being processed, that is assumed equivalent to
1835 /// "Cond ? TrueVal : FalseVal".
1836 std::optional<const SCEV *>
1837 createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty, ICmpInst *Cond,
1838 Value *TrueVal, Value *FalseVal);
1839
1840 /// See if we can model this select-like instruction via umin_seq expression.
1841 const SCEV *createNodeForSelectOrPHIViaUMinSeq(Value *I, Value *Cond,
1842 Value *TrueVal,
1843 Value *FalseVal);
1844
1845 /// Given a value \p V, which is a select-like instruction (currently this is
1846 /// either a select instruction or a phi node), which is assumed equivalent to
1847 /// Cond ? TrueVal : FalseVal
1848 /// see if we can model it as a SCEV expression.
1849 const SCEV *createNodeForSelectOrPHI(Value *V, Value *Cond, Value *TrueVal,
1850 Value *FalseVal);
1851
1852 /// Provide the special handling we need to analyze GEP SCEVs.
1853 const SCEV *createNodeForGEP(GEPOperator *GEP);
1854
1855 /// Implementation code for getSCEVAtScope; called at most once for each
1856 /// SCEV+Loop pair.
1857 const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
1858
1859 /// Return the BackedgeTakenInfo for the given loop, lazily computing new
1860 /// values if the loop hasn't been analyzed yet. The returned result is
1861 /// guaranteed not to be predicated.
1862 BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
1863
1864 /// Similar to getBackedgeTakenInfo, but will add predicates as required
1865 /// with the purpose of returning complete information.
1866 BackedgeTakenInfo &getPredicatedBackedgeTakenInfo(const Loop *L);
1867
1868 /// Compute the number of times the specified loop will iterate.
1869 /// If AllowPredicates is set, we will create new SCEV predicates as
1870 /// necessary in order to return an exact answer.
1871 BackedgeTakenInfo computeBackedgeTakenCount(const Loop *L,
1872 bool AllowPredicates = false);
1873
1874 /// Compute the number of times the backedge of the specified loop will
1875 /// execute if it exits via the specified block. If AllowPredicates is set,
1876 /// this call will try to use a minimal set of SCEV predicates in order to
1877 /// return an exact answer.
1878 ExitLimit computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
1879 bool IsOnlyExit, bool AllowPredicates = false);
1880
1881 // Helper functions for computeExitLimitFromCond to avoid exponential time
1882 // complexity.
1883
1884 class ExitLimitCache {
1885 // It may look like we need key on the whole (L, ExitIfTrue,
1886 // ControlsOnlyExit, AllowPredicates) tuple, but recursive calls to
1887 // computeExitLimitFromCondCached from computeExitLimitFromCondImpl only
1888 // vary the in \c ExitCond and \c ControlsOnlyExit parameters. We remember
1889 // the initial values of the other values to assert our assumption.
1890 SmallDenseMap<PointerIntPair<Value *, 1>, ExitLimit> TripCountMap;
1891
1892 const Loop *L;
1893 bool ExitIfTrue;
1894 bool AllowPredicates;
1895
1896 public:
1897 ExitLimitCache(const Loop *L, bool ExitIfTrue, bool AllowPredicates)
1898 : L(L), ExitIfTrue(ExitIfTrue), AllowPredicates(AllowPredicates) {}
1899
1900 LLVM_ABI std::optional<ExitLimit> find(const Loop *L, Value *ExitCond,
1901 bool ExitIfTrue,
1902 bool ControlsOnlyExit,
1903 bool AllowPredicates);
1904
1905 LLVM_ABI void insert(const Loop *L, Value *ExitCond, bool ExitIfTrue,
1906 bool ControlsOnlyExit, bool AllowPredicates,
1907 const ExitLimit &EL);
1908 };
1909
1910 using ExitLimitCacheTy = ExitLimitCache;
1911
1912 ExitLimit computeExitLimitFromCondCached(ExitLimitCacheTy &Cache,
1913 const Loop *L, Value *ExitCond,
1914 bool ExitIfTrue,
1915 bool ControlsOnlyExit,
1916 bool AllowPredicates);
1917 ExitLimit computeExitLimitFromCondImpl(ExitLimitCacheTy &Cache, const Loop *L,
1918 Value *ExitCond, bool ExitIfTrue,
1919 bool ControlsOnlyExit,
1920 bool AllowPredicates);
1921 std::optional<ScalarEvolution::ExitLimit> computeExitLimitFromCondFromBinOp(
1922 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
1923 bool ControlsOnlyExit, bool AllowPredicates);
1924
1925 /// Compute the number of times the backedge of the specified loop will
1926 /// execute if its exit condition were a conditional branch of the ICmpInst
1927 /// ExitCond and ExitIfTrue. If AllowPredicates is set, this call will try
1928 /// to use a minimal set of SCEV predicates in order to return an exact
1929 /// answer.
1930 ExitLimit computeExitLimitFromICmp(const Loop *L, ICmpInst *ExitCond,
1931 bool ExitIfTrue,
1932 bool IsSubExpr,
1933 bool AllowPredicates = false);
1934
1935 /// Variant of previous which takes the components representing an ICmp
1936 /// as opposed to the ICmpInst itself. Note that the prior version can
1937 /// return more precise results in some cases and is preferred when caller
1938 /// has a materialized ICmp.
1939 ExitLimit computeExitLimitFromICmp(const Loop *L, CmpPredicate Pred,
1940 const SCEV *LHS, const SCEV *RHS,
1941 bool IsSubExpr,
1942 bool AllowPredicates = false);
1943
1944 /// Compute the number of times the backedge of the specified loop will
1945 /// execute if its exit condition were a switch with a single exiting case
1946 /// to ExitingBB.
1947 ExitLimit computeExitLimitFromSingleExitSwitch(const Loop *L,
1948 SwitchInst *Switch,
1949 BasicBlock *ExitingBB,
1950 bool IsSubExpr);
1951
1952 /// Compute the exit limit of a loop that is controlled by a
1953 /// "(IV >> 1) != 0" type comparison. We cannot compute the exact trip
1954 /// count in these cases (since SCEV has no way of expressing them), but we
1955 /// can still sometimes compute an upper bound.
1956 ///
1957 /// Return an ExitLimit for a loop whose backedge is guarded by `LHS Pred
1958 /// RHS`.
1959 ExitLimit computeShiftCompareExitLimit(Value *LHS, Value *RHS, const Loop *L,
1960 ICmpInst::Predicate Pred);
1961
1962 /// If the loop is known to execute a constant number of times (the
1963 /// condition evolves only from constants), try to evaluate a few iterations
1964 /// of the loop until we get the exit condition gets a value of ExitWhen
1965 /// (true or false). If we cannot evaluate the exit count of the loop,
1966 /// return CouldNotCompute.
1967 const SCEV *computeExitCountExhaustively(const Loop *L, Value *Cond,
1968 bool ExitWhen);
1969
1970 /// Return the number of times an exit condition comparing the specified
1971 /// value to zero will execute. If not computable, return CouldNotCompute.
1972 /// If AllowPredicates is set, this call will try to use a minimal set of
1973 /// SCEV predicates in order to return an exact answer.
1974 ExitLimit howFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr,
1975 bool AllowPredicates = false);
1976
1977 /// Return the number of times an exit condition checking the specified
1978 /// value for nonzero will execute. If not computable, return
1979 /// CouldNotCompute.
1980 ExitLimit howFarToNonZero(const SCEV *V, const Loop *L);
1981
1982 /// Return the number of times an exit condition containing the specified
1983 /// less-than comparison will execute. If not computable, return
1984 /// CouldNotCompute.
1985 ///
1986 /// \p isSigned specifies whether the less-than is signed.
1987 ///
1988 /// \p ControlsOnlyExit is true when the LHS < RHS condition directly controls
1989 /// the branch (loops exits only if condition is true). In this case, we can
1990 /// use NoWrapFlags to skip overflow checks.
1991 ///
1992 /// If \p AllowPredicates is set, this call will try to use a minimal set of
1993 /// SCEV predicates in order to return an exact answer.
1994 ExitLimit howManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
1995 bool isSigned, bool ControlsOnlyExit,
1996 bool AllowPredicates = false);
1997
1998 ExitLimit howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
1999 bool isSigned, bool IsSubExpr,
2000 bool AllowPredicates = false);
2001
2002 /// Return a predecessor of BB (which may not be an immediate predecessor)
2003 /// which has exactly one successor from which BB is reachable, or null if
2004 /// no such block is found.
2005 std::pair<const BasicBlock *, const BasicBlock *>
2006 getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) const;
2007
2008 /// Test whether the condition described by Pred, LHS, and RHS is true
2009 /// whenever the given FoundCondValue value evaluates to true in given
2010 /// Context. If Context is nullptr, then the found predicate is true
2011 /// everywhere. LHS and FoundLHS may have different type width.
2012 LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS,
2013 const SCEV *RHS, const Value *FoundCondValue,
2014 bool Inverse,
2015 const Instruction *Context = nullptr);
2016
2017 /// Test whether the condition described by Pred, LHS, and RHS is true
2018 /// whenever the given FoundCondValue value evaluates to true in given
2019 /// Context. If Context is nullptr, then the found predicate is true
2020 /// everywhere. LHS and FoundLHS must have same type width.
2021 LLVM_ABI bool isImpliedCondBalancedTypes(CmpPredicate Pred, const SCEV *LHS,
2022 const SCEV *RHS,
2023 CmpPredicate FoundPred,
2024 const SCEV *FoundLHS,
2025 const SCEV *FoundRHS,
2026 const Instruction *CtxI);
2027
2028 /// Test whether the condition described by Pred, LHS, and RHS is true
2029 /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
2030 /// true in given Context. If Context is nullptr, then the found predicate is
2031 /// true everywhere.
2032 LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS,
2033 const SCEV *RHS, CmpPredicate FoundPred,
2034 const SCEV *FoundLHS, const SCEV *FoundRHS,
2035 const Instruction *Context = nullptr);
2036
2037 /// Test whether the condition described by Pred, LHS, and RHS is true
2038 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2039 /// true in given Context. If Context is nullptr, then the found predicate is
2040 /// true everywhere.
2041 bool isImpliedCondOperands(CmpPredicate Pred, const SCEV *LHS,
2042 const SCEV *RHS, const SCEV *FoundLHS,
2043 const SCEV *FoundRHS,
2044 const Instruction *Context = nullptr);
2045
2046 /// Test whether the condition described by Pred, LHS, and RHS is true
2047 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2048 /// true. Here LHS is an operation that includes FoundLHS as one of its
2049 /// arguments.
2050 bool isImpliedViaOperations(CmpPredicate Pred, const SCEV *LHS,
2051 const SCEV *RHS, const SCEV *FoundLHS,
2052 const SCEV *FoundRHS, unsigned Depth = 0);
2053
2054 /// Test whether the condition described by Pred, LHS, and RHS is true.
2055 /// Use only simple non-recursive types of checks, such as range analysis etc.
2056 bool isKnownViaNonRecursiveReasoning(CmpPredicate Pred, const SCEV *LHS,
2057 const SCEV *RHS);
2058
2059 /// Test whether the condition described by Pred, LHS, and RHS is true
2060 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2061 /// true.
2062 bool isImpliedCondOperandsHelper(CmpPredicate Pred, const SCEV *LHS,
2063 const SCEV *RHS, const SCEV *FoundLHS,
2064 const SCEV *FoundRHS);
2065
2066 /// Test whether the condition described by Pred, LHS, and RHS is true
2067 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2068 /// true. Utility function used by isImpliedCondOperands. Tries to get
2069 /// cases like "X `sgt` 0 => X - 1 `sgt` -1".
2070 bool isImpliedCondOperandsViaRanges(CmpPredicate Pred, const SCEV *LHS,
2071 const SCEV *RHS, CmpPredicate FoundPred,
2072 const SCEV *FoundLHS,
2073 const SCEV *FoundRHS);
2074
2075 /// Return true if the condition denoted by \p LHS \p Pred \p RHS is implied
2076 /// by a call to @llvm.experimental.guard in \p BB.
2077 bool isImpliedViaGuard(const BasicBlock *BB, CmpPredicate Pred,
2078 const SCEV *LHS, const SCEV *RHS);
2079
2080 /// Test whether the condition described by Pred, LHS, and RHS is true
2081 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2082 /// true.
2083 ///
2084 /// This routine tries to rule out certain kinds of integer overflow, and
2085 /// then tries to reason about arithmetic properties of the predicates.
2086 bool isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred, const SCEV *LHS,
2087 const SCEV *RHS, const SCEV *FoundLHS,
2088 const SCEV *FoundRHS);
2089
2090 /// Test whether the condition described by Pred, LHS, and RHS is true
2091 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2092 /// true.
2093 ///
2094 /// This routine tries to weaken the known condition basing on fact that
2095 /// FoundLHS is an AddRec.
2096 bool isImpliedCondOperandsViaAddRecStart(CmpPredicate Pred, const SCEV *LHS,
2097 const SCEV *RHS,
2098 const SCEV *FoundLHS,
2099 const SCEV *FoundRHS,
2100 const Instruction *CtxI);
2101
2102 /// Test whether the condition described by Pred, LHS, and RHS is true
2103 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2104 /// true.
2105 ///
2106 /// This routine tries to figure out predicate for Phis which are SCEVUnknown
2107 /// if it is true for every possible incoming value from their respective
2108 /// basic blocks.
2109 bool isImpliedViaMerge(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
2110 const SCEV *FoundLHS, const SCEV *FoundRHS,
2111 unsigned Depth);
2112
2113 /// Test whether the condition described by Pred, LHS, and RHS is true
2114 /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
2115 /// true.
2116 ///
2117 /// This routine tries to reason about shifts.
2118 bool isImpliedCondOperandsViaShift(CmpPredicate Pred, const SCEV *LHS,
2119 const SCEV *RHS, const SCEV *FoundLHS,
2120 const SCEV *FoundRHS);
2121
2122 /// If we know that the specified Phi is in the header of its containing
2123 /// loop, we know the loop executes a constant number of times, and the PHI
2124 /// node is just a recurrence involving constants, fold it.
2125 Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt &BEs,
2126 const Loop *L);
2127
2128 /// Test if the given expression is known to satisfy the condition described
2129 /// by Pred and the known constant ranges of LHS and RHS.
2130 bool isKnownPredicateViaConstantRanges(CmpPredicate Pred, const SCEV *LHS,
2131 const SCEV *RHS);
2132
2133 /// Try to prove the condition described by "LHS Pred RHS" by ruling out
2134 /// integer overflow.
2135 ///
2136 /// For instance, this will return true for "A s< (A + C)<nsw>" if C is
2137 /// positive.
2138 bool isKnownPredicateViaNoOverflow(CmpPredicate Pred, const SCEV *LHS,
2139 const SCEV *RHS);
2140
2141 /// Try to split Pred LHS RHS into logical conjunctions (and's) and try to
2142 /// prove them individually.
2143 bool isKnownPredicateViaSplitting(CmpPredicate Pred, const SCEV *LHS,
2144 const SCEV *RHS);
2145
2146 /// Try to match the Expr as "(L + R)<Flags>".
2147 bool splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R,
2148 SCEV::NoWrapFlags &Flags);
2149
2150 /// Forget predicated/non-predicated backedge taken counts for the given loop.
2151 void forgetBackedgeTakenCounts(const Loop *L, bool Predicated);
2152
2153 /// Drop memoized information for all \p SCEVs.
2154 void forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs);
2155
2156 /// Helper for forgetMemoizedResults.
2157 void forgetMemoizedResultsImpl(const SCEV *S);
2158
2159 /// Iterate over instructions in \p Worklist and their users. Erase entries
2160 /// from ValueExprMap and collect SCEV expressions in \p ToForget
2161 void visitAndClearUsers(SmallVectorImpl<Instruction *> &Worklist,
2162 SmallPtrSetImpl<Instruction *> &Visited,
2163 SmallVectorImpl<const SCEV *> &ToForget);
2164
2165 /// Erase Value from ValueExprMap and ExprValueMap.
2166 void eraseValueFromMap(Value *V);
2167
2168 /// Insert V to S mapping into ValueExprMap and ExprValueMap.
2169 void insertValueToMap(Value *V, const SCEV *S);
2170
2171 /// Return false iff given SCEV contains a SCEVUnknown with NULL value-
2172 /// pointer.
2173 bool checkValidity(const SCEV *S) const;
2174
2175 /// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be
2176 /// equal to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}. This is
2177 /// equivalent to proving no signed (resp. unsigned) wrap in
2178 /// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
2179 /// (resp. `SCEVZeroExtendExpr`).
2180 template <typename ExtendOpTy>
2181 bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
2182 const Loop *L);
2183
2184 /// Try to prove NSW or NUW on \p AR relying on ConstantRange manipulation.
2185 SCEV::NoWrapFlags proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR);
2186
2187 /// Try to prove NSW on \p AR by proving facts about conditions known on
2188 /// entry and backedge.
2189 SCEV::NoWrapFlags proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR);
2190
2191 /// Try to prove NUW on \p AR by proving facts about conditions known on
2192 /// entry and backedge.
2193 SCEV::NoWrapFlags proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR);
2194
2195 std::optional<MonotonicPredicateType>
2196 getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
2197 ICmpInst::Predicate Pred);
2198
2199 /// Return SCEV no-wrap flags that can be proven based on reasoning about
2200 /// how poison produced from no-wrap flags on this value (e.g. a nuw add)
2201 /// would trigger undefined behavior on overflow.
2202 SCEV::NoWrapFlags getNoWrapFlagsFromUB(const Value *V);
2203
2204 /// Return a scope which provides an upper bound on the defining scope of
2205 /// 'S'. Specifically, return the first instruction in said bounding scope.
2206 /// Return nullptr if the scope is trivial (function entry).
2207 /// (See scope definition rules associated with flag discussion above)
2208 const Instruction *getNonTrivialDefiningScopeBound(const SCEV *S);
2209
2210 /// Return a scope which provides an upper bound on the defining scope for
2211 /// a SCEV with the operands in Ops. The outparam Precise is set if the
2212 /// bound found is a precise bound (i.e. must be the defining scope.)
2213 const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
2214 bool &Precise);
2215
2216 /// Wrapper around the above for cases which don't care if the bound
2217 /// is precise.
2218 const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops);
2219
2220 /// Given two instructions in the same function, return true if we can
2221 /// prove B must execute given A executes.
2222 bool isGuaranteedToTransferExecutionTo(const Instruction *A,
2223 const Instruction *B);
2224
2225 /// Returns true if \p Op is guaranteed not to cause immediate UB.
2226 bool isGuaranteedNotToCauseUB(const SCEV *Op);
2227
2228 /// Returns true if \p Op is guaranteed to not be poison.
2229 static bool isGuaranteedNotToBePoison(const SCEV *Op);
2230
2231 /// Return true if the SCEV corresponding to \p I is never poison. Proving
2232 /// this is more complex than proving that just \p I is never poison, since
2233 /// SCEV commons expressions across control flow, and you can have cases
2234 /// like:
2235 ///
2236 /// idx0 = a + b;
2237 /// ptr[idx0] = 100;
2238 /// if (<condition>) {
2239 /// idx1 = a +nsw b;
2240 /// ptr[idx1] = 200;
2241 /// }
2242 ///
2243 /// where the SCEV expression (+ a b) is guaranteed to not be poison (and
2244 /// hence not sign-overflow) only if "<condition>" is true. Since both
2245 /// `idx0` and `idx1` will be mapped to the same SCEV expression, (+ a b),
2246 /// it is not okay to annotate (+ a b) with <nsw> in the above example.
2247 bool isSCEVExprNeverPoison(const Instruction *I);
2248
2249 /// This is like \c isSCEVExprNeverPoison but it specifically works for
2250 /// instructions that will get mapped to SCEV add recurrences. Return true
2251 /// if \p I will never generate poison under the assumption that \p I is an
2252 /// add recurrence on the loop \p L.
2253 bool isAddRecNeverPoison(const Instruction *I, const Loop *L);
2254
2255 /// Similar to createAddRecFromPHI, but with the additional flexibility of
2256 /// suggesting runtime overflow checks in case casts are encountered.
2257 /// If successful, the analysis records that for this loop, \p SymbolicPHI,
2258 /// which is the UnknownSCEV currently representing the PHI, can be rewritten
2259 /// into an AddRec, assuming some predicates; The function then returns the
2260 /// AddRec and the predicates as a pair, and caches this pair in
2261 /// PredicatedSCEVRewrites.
2262 /// If the analysis is not successful, a mapping from the \p SymbolicPHI to
2263 /// itself (with no predicates) is recorded, and a nullptr with an empty
2264 /// predicates vector is returned as a pair.
2265 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
2266 createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI);
2267
2268 /// Compute the maximum backedge count based on the range of values
2269 /// permitted by Start, End, and Stride. This is for loops of the form
2270 /// {Start, +, Stride} LT End.
2271 ///
2272 /// Preconditions:
2273 /// * the induction variable is known to be positive.
2274 /// * the induction variable is assumed not to overflow (i.e. either it
2275 /// actually doesn't, or we'd have to immediately execute UB)
2276 /// We *don't* assert these preconditions so please be careful.
2277 const SCEV *computeMaxBECountForLT(const SCEV *Start, const SCEV *Stride,
2278 const SCEV *End, unsigned BitWidth,
2279 bool IsSigned);
2280
2281 /// Verify if an linear IV with positive stride can overflow when in a
2282 /// less-than comparison, knowing the invariant term of the comparison,
2283 /// the stride.
2284 bool canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);
2285
2286 /// Verify if an linear IV with negative stride can overflow when in a
2287 /// greater-than comparison, knowing the invariant term of the comparison,
2288 /// the stride.
2289 bool canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);
2290
2291 /// Get add expr already created or create a new one.
2292 const SCEV *getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2293 SCEV::NoWrapFlags Flags);
2294
2295 /// Get mul expr already created or create a new one.
2296 const SCEV *getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2297 SCEV::NoWrapFlags Flags);
2298
2299 // Get addrec expr already created or create a new one.
2300 const SCEV *getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2301 const Loop *L, SCEV::NoWrapFlags Flags);
2302
2303 /// Return x if \p Val is f(x) where f is a 1-1 function.
2304 const SCEV *stripInjectiveFunctions(const SCEV *Val) const;
2305
2306 /// Find all of the loops transitively used in \p S, and fill \p LoopsUsed.
2307 /// A loop is considered "used" by an expression if it contains
2308 /// an add rec on said loop.
2309 void getUsedLoops(const SCEV *S, SmallPtrSetImpl<const Loop *> &LoopsUsed);
2310
2311 /// Try to match the pattern generated by getURemExpr(A, B). If successful,
2312 /// Assign A and B to LHS and RHS, respectively.
2313 LLVM_ABI bool matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS);
2314
2315 /// Look for a SCEV expression with type `SCEVType` and operands `Ops` in
2316 /// `UniqueSCEVs`. Return if found, else nullptr.
2317 SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<const SCEV *> Ops);
2318
2319 /// Get reachable blocks in this function, making limited use of SCEV
2320 /// reasoning about conditions.
2321 void getReachableBlocks(SmallPtrSetImpl<BasicBlock *> &Reachable,
2322 Function &F);
2323
2324 /// Return the given SCEV expression with a new set of operands.
2325 /// This preserves the origial nowrap flags.
2326 const SCEV *getWithOperands(const SCEV *S,
2327 SmallVectorImpl<const SCEV *> &NewOps);
2328
2329 FoldingSet<SCEV> UniqueSCEVs;
2330 FoldingSet<SCEVPredicate> UniquePreds;
2331 BumpPtrAllocator SCEVAllocator;
2332
2333 /// This maps loops to a list of addrecs that directly use said loop.
2334 DenseMap<const Loop *, SmallVector<const SCEVAddRecExpr *, 4>> LoopUsers;
2335
2336 /// Cache tentative mappings from UnknownSCEVs in a Loop, to a SCEV expression
2337 /// they can be rewritten into under certain predicates.
2338 DenseMap<std::pair<const SCEVUnknown *, const Loop *>,
2339 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
2340 PredicatedSCEVRewrites;
2341
2342 /// Set of AddRecs for which proving NUW via an induction has already been
2343 /// tried.
2344 SmallPtrSet<const SCEVAddRecExpr *, 16> UnsignedWrapViaInductionTried;
2345
2346 /// Set of AddRecs for which proving NSW via an induction has already been
2347 /// tried.
2348 SmallPtrSet<const SCEVAddRecExpr *, 16> SignedWrapViaInductionTried;
2349
2350 /// The head of a linked list of all SCEVUnknown values that have been
2351 /// allocated. This is used by releaseMemory to locate them all and call
2352 /// their destructors.
2353 SCEVUnknown *FirstUnknown = nullptr;
2354};
2355
2356/// Analysis pass that exposes the \c ScalarEvolution for a function.
2357class ScalarEvolutionAnalysis
2358 : public AnalysisInfoMixin<ScalarEvolutionAnalysis> {
2359 friend AnalysisInfoMixin<ScalarEvolutionAnalysis>;
2360
2361 LLVM_ABI static AnalysisKey Key;
2362
2363public:
2364 using Result = ScalarEvolution;
2365
2366 LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM);
2367};
2368
2369/// Verifier pass for the \c ScalarEvolutionAnalysis results.
2370class ScalarEvolutionVerifierPass
2371 : public PassInfoMixin<ScalarEvolutionVerifierPass> {
2372public:
2373 LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
2374 static bool isRequired() { return true; }
2375};
2376
2377/// Printer pass for the \c ScalarEvolutionAnalysis results.
2378class ScalarEvolutionPrinterPass
2379 : public PassInfoMixin<ScalarEvolutionPrinterPass> {
2380 raw_ostream &OS;
2381
2382public:
2383 explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}
2384
2385 LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
2386
2387 static bool isRequired() { return true; }
2388};
2389
2390class LLVM_ABI ScalarEvolutionWrapperPass : public FunctionPass {
2391 std::unique_ptr<ScalarEvolution> SE;
2392
2393public:
2394 static char ID;
2395
2396 ScalarEvolutionWrapperPass();
2397
2398 ScalarEvolution &getSE() { return *SE; }
2399 const ScalarEvolution &getSE() const { return *SE; }
2400
2401 bool runOnFunction(Function &F) override;
2402 void releaseMemory() override;
2403 void getAnalysisUsage(AnalysisUsage &AU) const override;
2404 void print(raw_ostream &OS, const Module * = nullptr) const override;
2405 void verifyAnalysis() const override;
2406};
2407
2408/// An interface layer with SCEV used to manage how we see SCEV expressions
2409/// for values in the context of existing predicates. We can add new
2410/// predicates, but we cannot remove them.
2411///
2412/// This layer has multiple purposes:
2413/// - provides a simple interface for SCEV versioning.
2414/// - guarantees that the order of transformations applied on a SCEV
2415/// expression for a single Value is consistent across two different
2416/// getSCEV calls. This means that, for example, once we've obtained
2417/// an AddRec expression for a certain value through expression
2418/// rewriting, we will continue to get an AddRec expression for that
2419/// Value.
2420/// - lowers the number of expression rewrites.
2421class PredicatedScalarEvolution {
2422public:
2423 LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);
2424
2425 LLVM_ABI const SCEVPredicate &getPredicate() const;
2426
2427 /// Returns the SCEV expression of V, in the context of the current SCEV
2428 /// predicate. The order of transformations applied on the expression of V
2429 /// returned by ScalarEvolution is guaranteed to be preserved, even when
2430 /// adding new predicates.
2431 LLVM_ABI const SCEV *getSCEV(Value *V);
2432
2433 /// Get the (predicated) backedge count for the analyzed loop.
2434 LLVM_ABI const SCEV *getBackedgeTakenCount();
2435
2436 /// Get the (predicated) symbolic max backedge count for the analyzed loop.
2437 LLVM_ABI const SCEV *getSymbolicMaxBackedgeTakenCount();
2438
2439 /// Returns the upper bound of the loop trip count as a normal unsigned
2440 /// value, or 0 if the trip count is unknown.
2441 LLVM_ABI unsigned getSmallConstantMaxTripCount();
2442
2443 /// Adds a new predicate.
2444 LLVM_ABI void addPredicate(const SCEVPredicate &Pred);
2445
2446 /// Attempts to produce an AddRecExpr for V by adding additional SCEV
2447 /// predicates. If we can't transform the expression into an AddRecExpr we
2448 /// return nullptr and not add additional SCEV predicates to the current
2449 /// context.
2450 LLVM_ABI const SCEVAddRecExpr *getAsAddRec(Value *V);
2451
2452 /// Proves that V doesn't overflow by adding SCEV predicate.
2453 LLVM_ABI void setNoOverflow(Value *V,
2454 SCEVWrapPredicate::IncrementWrapFlags Flags);
2455
2456 /// Returns true if we've proved that V doesn't wrap by means of a SCEV
2457 /// predicate.
2458 LLVM_ABI bool hasNoOverflow(Value *V,
2459 SCEVWrapPredicate::IncrementWrapFlags Flags);
2460
2461 /// Returns the ScalarEvolution analysis used.
2462 ScalarEvolution *getSE() const { return &SE; }
2463
2464 /// We need to explicitly define the copy constructor because of FlagsMap.
2465 LLVM_ABI PredicatedScalarEvolution(const PredicatedScalarEvolution &);
2466
2467 /// Print the SCEV mappings done by the Predicated Scalar Evolution.
2468 /// The printed text is indented by \p Depth.
2469 LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const;
2470
2471 /// Check if \p AR1 and \p AR2 are equal, while taking into account
2472 /// Equal predicates in Preds.
2473 LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1,
2474 const SCEVAddRecExpr *AR2) const;
2475
2476private:
2477 /// Increments the version number of the predicate. This needs to be called
2478 /// every time the SCEV predicate changes.
2479 void updateGeneration();
2480
2481 /// Holds a SCEV and the version number of the SCEV predicate used to
2482 /// perform the rewrite of the expression.
2483 using RewriteEntry = std::pair<unsigned, const SCEV *>;
2484
2485 /// Maps a SCEV to the rewrite result of that SCEV at a certain version
2486 /// number. If this number doesn't match the current Generation, we will
2487 /// need to do a rewrite. To preserve the transformation order of previous
2488 /// rewrites, we will rewrite the previous result instead of the original
2489 /// SCEV.
2490 DenseMap<const SCEV *, RewriteEntry> RewriteMap;
2491
2492 /// Records what NoWrap flags we've added to a Value *.
2493 ValueMap<Value *, SCEVWrapPredicate::IncrementWrapFlags> FlagsMap;
2494
2495 /// The ScalarEvolution analysis.
2496 ScalarEvolution &SE;
2497
2498 /// The analyzed Loop.
2499 const Loop &L;
2500
2501 /// The SCEVPredicate that forms our context. We will rewrite all
2502 /// expressions assuming that this predicate true.
2503 std::unique_ptr<SCEVUnionPredicate> Preds;
2504
2505 /// Marks the version of the SCEV predicate used. When rewriting a SCEV
2506 /// expression we mark it with the version of the predicate. We use this to
2507 /// figure out if the predicate has changed from the last rewrite of the
2508 /// SCEV. If so, we need to perform a new rewrite.
2509 unsigned Generation = 0;
2510
2511 /// The backedge taken count.
2512 const SCEV *BackedgeCount = nullptr;
2513
2514 /// The symbolic backedge taken count.
2515 const SCEV *SymbolicMaxBackedgeCount = nullptr;
2516
2517 /// The constant max trip count for the loop.
2518 std::optional<unsigned> SmallConstantMaxTripCount;
2519};
2520
2521template <> struct DenseMapInfo<ScalarEvolution::FoldID> {
2522 static inline ScalarEvolution::FoldID getEmptyKey() {
2523 ScalarEvolution::FoldID ID(0);
2524 return ID;
2525 }
2526 static inline ScalarEvolution::FoldID getTombstoneKey() {
2527 ScalarEvolution::FoldID ID(1);
2528 return ID;
2529 }
2530
2531 static unsigned getHashValue(const ScalarEvolution::FoldID &Val) {
2532 return Val.computeHash();
2533 }
2534
2535 static bool isEqual(const ScalarEvolution::FoldID &LHS,
2536 const ScalarEvolution::FoldID &RHS) {
2537 return LHS == RHS;
2538 }
2539};
2540
2541} // end namespace llvm
2542
2543#endif // LLVM_ANALYSIS_SCALAREVOLUTION_H
2544

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of llvm/include/llvm/Analysis/ScalarEvolution.h