1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs global value numbering to eliminate fully redundant
10// instructions. It also performs simple dead load elimination.
11//
12// Note that this pass does the value numbering itself; it does not use the
13// ValueNumbering analysis passes.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Transforms/Scalar/GVN.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/DepthFirstIterator.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/MapVector.h"
22#include "llvm/ADT/PostOrderIterator.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/Statistic.h"
28#include "llvm/Analysis/AliasAnalysis.h"
29#include "llvm/Analysis/AssumeBundleQueries.h"
30#include "llvm/Analysis/AssumptionCache.h"
31#include "llvm/Analysis/CFG.h"
32#include "llvm/Analysis/DomTreeUpdater.h"
33#include "llvm/Analysis/GlobalsModRef.h"
34#include "llvm/Analysis/InstructionPrecedenceTracking.h"
35#include "llvm/Analysis/InstructionSimplify.h"
36#include "llvm/Analysis/LoopInfo.h"
37#include "llvm/Analysis/MemoryBuiltins.h"
38#include "llvm/Analysis/MemoryDependenceAnalysis.h"
39#include "llvm/Analysis/MemorySSA.h"
40#include "llvm/Analysis/MemorySSAUpdater.h"
41#include "llvm/Analysis/OptimizationRemarkEmitter.h"
42#include "llvm/Analysis/PHITransAddr.h"
43#include "llvm/Analysis/TargetLibraryInfo.h"
44#include "llvm/Analysis/ValueTracking.h"
45#include "llvm/IR/Attributes.h"
46#include "llvm/IR/BasicBlock.h"
47#include "llvm/IR/Constant.h"
48#include "llvm/IR/Constants.h"
49#include "llvm/IR/DebugLoc.h"
50#include "llvm/IR/Dominators.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/InstrTypes.h"
53#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Instructions.h"
55#include "llvm/IR/IntrinsicInst.h"
56#include "llvm/IR/LLVMContext.h"
57#include "llvm/IR/Metadata.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/PassManager.h"
60#include "llvm/IR/PatternMatch.h"
61#include "llvm/IR/Type.h"
62#include "llvm/IR/Use.h"
63#include "llvm/IR/Value.h"
64#include "llvm/InitializePasses.h"
65#include "llvm/Pass.h"
66#include "llvm/Support/Casting.h"
67#include "llvm/Support/CommandLine.h"
68#include "llvm/Support/Compiler.h"
69#include "llvm/Support/Debug.h"
70#include "llvm/Support/raw_ostream.h"
71#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
72#include "llvm/Transforms/Utils/BasicBlockUtils.h"
73#include "llvm/Transforms/Utils/Local.h"
74#include "llvm/Transforms/Utils/SSAUpdater.h"
75#include "llvm/Transforms/Utils/VNCoercion.h"
76#include <algorithm>
77#include <cassert>
78#include <cstdint>
79#include <utility>
80
81using namespace llvm;
82using namespace llvm::gvn;
83using namespace llvm::VNCoercion;
84using namespace PatternMatch;
85
86#define DEBUG_TYPE "gvn"
87
88STATISTIC(NumGVNInstr, "Number of instructions deleted");
89STATISTIC(NumGVNLoad, "Number of loads deleted");
90STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
91STATISTIC(NumGVNBlocks, "Number of blocks merged");
92STATISTIC(NumGVNSimpl, "Number of instructions simplified");
93STATISTIC(NumGVNEqProp, "Number of equalities propagated");
94STATISTIC(NumPRELoad, "Number of loads PRE'd");
95STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
96
97STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
98 "Number of blocks speculated as available in "
99 "IsValueFullyAvailableInBlock(), max");
100STATISTIC(MaxBBSpeculationCutoffReachedTimes,
101 "Number of times we we reached gvn-max-block-speculations cut-off "
102 "preventing further exploration");
103
104static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
105static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
106static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
107 cl::init(true));
108static cl::opt<bool>
109GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
110 cl::init(false));
111static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
112
113static cl::opt<uint32_t> MaxNumDeps(
114 "gvn-max-num-deps", cl::Hidden, cl::init(100),
115 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
116
117// This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
118static cl::opt<uint32_t> MaxBBSpeculations(
119 "gvn-max-block-speculations", cl::Hidden, cl::init(600),
120 cl::desc("Max number of blocks we're willing to speculate on (and recurse "
121 "into) when deducing if a value is fully available or not in GVN "
122 "(default = 600)"));
123
124struct llvm::GVNPass::Expression {
125 uint32_t opcode;
126 bool commutative = false;
127 // The type is not necessarily the result type of the expression, it may be
128 // any additional type needed to disambiguate the expression.
129 Type *type = nullptr;
130 SmallVector<uint32_t, 4> varargs;
131
132 Expression(uint32_t o = ~2U) : opcode(o) {}
133
134 bool operator==(const Expression &other) const {
135 if (opcode != other.opcode)
136 return false;
137 if (opcode == ~0U || opcode == ~1U)
138 return true;
139 if (type != other.type)
140 return false;
141 if (varargs != other.varargs)
142 return false;
143 return true;
144 }
145
146 friend hash_code hash_value(const Expression &Value) {
147 return hash_combine(
148 Value.opcode, Value.type,
149 hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
150 }
151};
152
153namespace llvm {
154
155template <> struct DenseMapInfo<GVNPass::Expression> {
156 static inline GVNPass::Expression getEmptyKey() { return ~0U; }
157 static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
158
159 static unsigned getHashValue(const GVNPass::Expression &e) {
160 using llvm::hash_value;
161
162 return static_cast<unsigned>(hash_value(e));
163 }
164
165 static bool isEqual(const GVNPass::Expression &LHS,
166 const GVNPass::Expression &RHS) {
167 return LHS == RHS;
168 }
169};
170
171} // end namespace llvm
172
173/// Represents a particular available value that we know how to materialize.
174/// Materialization of an AvailableValue never fails. An AvailableValue is
175/// implicitly associated with a rematerialization point which is the
176/// location of the instruction from which it was formed.
177struct llvm::gvn::AvailableValue {
178 enum class ValType {
179 SimpleVal, // A simple offsetted value that is accessed.
180 LoadVal, // A value produced by a load.
181 MemIntrin, // A memory intrinsic which is loaded from.
182 UndefVal, // A UndefValue representing a value from dead block (which
183 // is not yet physically removed from the CFG).
184 SelectVal, // A pointer select which is loaded from and for which the load
185 // can be replace by a value select.
186 };
187
188 /// Val - The value that is live out of the block.
189 Value *Val;
190 /// Kind of the live-out value.
191 ValType Kind;
192
193 /// Offset - The byte offset in Val that is interesting for the load query.
194 unsigned Offset = 0;
195
196 static AvailableValue get(Value *V, unsigned Offset = 0) {
197 AvailableValue Res;
198 Res.Val = V;
199 Res.Kind = ValType::SimpleVal;
200 Res.Offset = Offset;
201 return Res;
202 }
203
204 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
205 AvailableValue Res;
206 Res.Val = MI;
207 Res.Kind = ValType::MemIntrin;
208 Res.Offset = Offset;
209 return Res;
210 }
211
212 static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
213 AvailableValue Res;
214 Res.Val = Load;
215 Res.Kind = ValType::LoadVal;
216 Res.Offset = Offset;
217 return Res;
218 }
219
220 static AvailableValue getUndef() {
221 AvailableValue Res;
222 Res.Val = nullptr;
223 Res.Kind = ValType::UndefVal;
224 Res.Offset = 0;
225 return Res;
226 }
227
228 static AvailableValue getSelect(SelectInst *Sel) {
229 AvailableValue Res;
230 Res.Val = Sel;
231 Res.Kind = ValType::SelectVal;
232 Res.Offset = 0;
233 return Res;
234 }
235
236 bool isSimpleValue() const { return Kind == ValType::SimpleVal; }
237 bool isCoercedLoadValue() const { return Kind == ValType::LoadVal; }
238 bool isMemIntrinValue() const { return Kind == ValType::MemIntrin; }
239 bool isUndefValue() const { return Kind == ValType::UndefVal; }
240 bool isSelectValue() const { return Kind == ValType::SelectVal; }
241
242 Value *getSimpleValue() const {
243 assert(isSimpleValue() && "Wrong accessor");
244 return Val;
245 }
246
247 LoadInst *getCoercedLoadValue() const {
248 assert(isCoercedLoadValue() && "Wrong accessor");
249 return cast<LoadInst>(Val);
250 }
251
252 MemIntrinsic *getMemIntrinValue() const {
253 assert(isMemIntrinValue() && "Wrong accessor");
254 return cast<MemIntrinsic>(Val);
255 }
256
257 SelectInst *getSelectValue() const {
258 assert(isSelectValue() && "Wrong accessor");
259 return cast<SelectInst>(Val);
260 }
261
262 /// Emit code at the specified insertion point to adjust the value defined
263 /// here to the specified type. This handles various coercion cases.
264 Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt,
265 GVNPass &gvn) const;
266};
267
268/// Represents an AvailableValue which can be rematerialized at the end of
269/// the associated BasicBlock.
270struct llvm::gvn::AvailableValueInBlock {
271 /// BB - The basic block in question.
272 BasicBlock *BB = nullptr;
273
274 /// AV - The actual available value
275 AvailableValue AV;
276
277 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
278 AvailableValueInBlock Res;
279 Res.BB = BB;
280 Res.AV = std::move(AV);
281 return Res;
282 }
283
284 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
285 unsigned Offset = 0) {
286 return get(BB, AvailableValue::get(V, Offset));
287 }
288
289 static AvailableValueInBlock getUndef(BasicBlock *BB) {
290 return get(BB, AvailableValue::getUndef());
291 }
292
293 static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel) {
294 return get(BB, AvailableValue::getSelect(Sel));
295 }
296
297 /// Emit code at the end of this block to adjust the value defined here to
298 /// the specified type. This handles various coercion cases.
299 Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const {
300 return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
301 }
302};
303
304//===----------------------------------------------------------------------===//
305// ValueTable Internal Functions
306//===----------------------------------------------------------------------===//
307
308GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
309 Expression e;
310 e.type = I->getType();
311 e.opcode = I->getOpcode();
312 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
313 // gc.relocate is 'special' call: its second and third operands are
314 // not real values, but indices into statepoint's argument list.
315 // Use the refered to values for purposes of identity.
316 e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
317 e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
318 e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
319 } else {
320 for (Use &Op : I->operands())
321 e.varargs.push_back(lookupOrAdd(Op));
322 }
323 if (I->isCommutative()) {
324 // Ensure that commutative instructions that only differ by a permutation
325 // of their operands get the same value number by sorting the operand value
326 // numbers. Since commutative operands are the 1st two operands it is more
327 // efficient to sort by hand rather than using, say, std::sort.
328 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
329 if (e.varargs[0] > e.varargs[1])
330 std::swap(e.varargs[0], e.varargs[1]);
331 e.commutative = true;
332 }
333
334 if (auto *C = dyn_cast<CmpInst>(I)) {
335 // Sort the operand value numbers so x<y and y>x get the same value number.
336 CmpInst::Predicate Predicate = C->getPredicate();
337 if (e.varargs[0] > e.varargs[1]) {
338 std::swap(e.varargs[0], e.varargs[1]);
339 Predicate = CmpInst::getSwappedPredicate(Predicate);
340 }
341 e.opcode = (C->getOpcode() << 8) | Predicate;
342 e.commutative = true;
343 } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
344 e.varargs.append(E->idx_begin(), E->idx_end());
345 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
346 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
347 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
348 }
349
350 return e;
351}
352
353GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
354 unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
355 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
356 "Not a comparison!");
357 Expression e;
358 e.type = CmpInst::makeCmpResultType(LHS->getType());
359 e.varargs.push_back(lookupOrAdd(LHS));
360 e.varargs.push_back(lookupOrAdd(RHS));
361
362 // Sort the operand value numbers so x<y and y>x get the same value number.
363 if (e.varargs[0] > e.varargs[1]) {
364 std::swap(e.varargs[0], e.varargs[1]);
365 Predicate = CmpInst::getSwappedPredicate(Predicate);
366 }
367 e.opcode = (Opcode << 8) | Predicate;
368 e.commutative = true;
369 return e;
370}
371
372GVNPass::Expression
373GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
374 assert(EI && "Not an ExtractValueInst?");
375 Expression e;
376 e.type = EI->getType();
377 e.opcode = 0;
378
379 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
380 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
381 // EI is an extract from one of our with.overflow intrinsics. Synthesize
382 // a semantically equivalent expression instead of an extract value
383 // expression.
384 e.opcode = WO->getBinaryOp();
385 e.varargs.push_back(lookupOrAdd(WO->getLHS()));
386 e.varargs.push_back(lookupOrAdd(WO->getRHS()));
387 return e;
388 }
389
390 // Not a recognised intrinsic. Fall back to producing an extract value
391 // expression.
392 e.opcode = EI->getOpcode();
393 for (Use &Op : EI->operands())
394 e.varargs.push_back(lookupOrAdd(Op));
395
396 append_range(e.varargs, EI->indices());
397
398 return e;
399}
400
401GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
402 Expression E;
403 Type *PtrTy = GEP->getType()->getScalarType();
404 const DataLayout &DL = GEP->getModule()->getDataLayout();
405 unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy);
406 MapVector<Value *, APInt> VariableOffsets;
407 APInt ConstantOffset(BitWidth, 0);
408 if (PtrTy->isOpaquePointerTy() &&
409 GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
410 // For opaque pointers, convert into offset representation, to recognize
411 // equivalent address calculations that use different type encoding.
412 LLVMContext &Context = GEP->getContext();
413 E.opcode = GEP->getOpcode();
414 E.type = nullptr;
415 E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand()));
416 for (const auto &Pair : VariableOffsets) {
417 E.varargs.push_back(lookupOrAdd(Pair.first));
418 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
419 }
420 if (!ConstantOffset.isZero())
421 E.varargs.push_back(
422 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
423 } else {
424 // If converting to offset representation fails (for typed pointers and
425 // scalable vectors), fall back to type-based implementation:
426 E.opcode = GEP->getOpcode();
427 E.type = GEP->getSourceElementType();
428 for (Use &Op : GEP->operands())
429 E.varargs.push_back(lookupOrAdd(Op));
430 }
431 return E;
432}
433
434//===----------------------------------------------------------------------===//
435// ValueTable External Functions
436//===----------------------------------------------------------------------===//
437
438GVNPass::ValueTable::ValueTable() = default;
439GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
440GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
441GVNPass::ValueTable::~ValueTable() = default;
442GVNPass::ValueTable &
443GVNPass::ValueTable::operator=(const GVNPass::ValueTable &Arg) = default;
444
445/// add - Insert a value into the table with a specified value number.
446void GVNPass::ValueTable::add(Value *V, uint32_t num) {
447 valueNumbering.insert(std::make_pair(V, num));
448 if (PHINode *PN = dyn_cast<PHINode>(V))
449 NumberingPhi[num] = PN;
450}
451
452uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
453 if (AA->doesNotAccessMemory(C)) {
454 Expression exp = createExpr(C);
455 uint32_t e = assignExpNewValueNum(exp).first;
456 valueNumbering[C] = e;
457 return e;
458 } else if (MD && AA->onlyReadsMemory(C)) {
459 Expression exp = createExpr(C);
460 auto ValNum = assignExpNewValueNum(exp);
461 if (ValNum.second) {
462 valueNumbering[C] = ValNum.first;
463 return ValNum.first;
464 }
465
466 MemDepResult local_dep = MD->getDependency(C);
467
468 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
469 valueNumbering[C] = nextValueNumber;
470 return nextValueNumber++;
471 }
472
473 if (local_dep.isDef()) {
474 // For masked load/store intrinsics, the local_dep may actually be
475 // a normal load or store instruction.
476 CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
477
478 if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
479 valueNumbering[C] = nextValueNumber;
480 return nextValueNumber++;
481 }
482
483 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
484 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
485 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
486 if (c_vn != cd_vn) {
487 valueNumbering[C] = nextValueNumber;
488 return nextValueNumber++;
489 }
490 }
491
492 uint32_t v = lookupOrAdd(local_cdep);
493 valueNumbering[C] = v;
494 return v;
495 }
496
497 // Non-local case.
498 const MemoryDependenceResults::NonLocalDepInfo &deps =
499 MD->getNonLocalCallDependency(C);
500 // FIXME: Move the checking logic to MemDep!
501 CallInst* cdep = nullptr;
502
503 // Check to see if we have a single dominating call instruction that is
504 // identical to C.
505 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
506 const NonLocalDepEntry *I = &deps[i];
507 if (I->getResult().isNonLocal())
508 continue;
509
510 // We don't handle non-definitions. If we already have a call, reject
511 // instruction dependencies.
512 if (!I->getResult().isDef() || cdep != nullptr) {
513 cdep = nullptr;
514 break;
515 }
516
517 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
518 // FIXME: All duplicated with non-local case.
519 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
520 cdep = NonLocalDepCall;
521 continue;
522 }
523
524 cdep = nullptr;
525 break;
526 }
527
528 if (!cdep) {
529 valueNumbering[C] = nextValueNumber;
530 return nextValueNumber++;
531 }
532
533 if (cdep->arg_size() != C->arg_size()) {
534 valueNumbering[C] = nextValueNumber;
535 return nextValueNumber++;
536 }
537 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
538 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
539 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
540 if (c_vn != cd_vn) {
541 valueNumbering[C] = nextValueNumber;
542 return nextValueNumber++;
543 }
544 }
545
546 uint32_t v = lookupOrAdd(cdep);
547 valueNumbering[C] = v;
548 return v;
549 } else {
550 valueNumbering[C] = nextValueNumber;
551 return nextValueNumber++;
552 }
553}
554
555/// Returns true if a value number exists for the specified value.
556bool GVNPass::ValueTable::exists(Value *V) const {
557 return valueNumbering.count(V) != 0;
558}
559
560/// lookup_or_add - Returns the value number for the specified value, assigning
561/// it a new number if it did not have one before.
562uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
563 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
564 if (VI != valueNumbering.end())
565 return VI->second;
566
567 if (!isa<Instruction>(V)) {
568 valueNumbering[V] = nextValueNumber;
569 return nextValueNumber++;
570 }
571
572 Instruction* I = cast<Instruction>(V);
573 Expression exp;
574 switch (I->getOpcode()) {
575 case Instruction::Call:
576 return lookupOrAddCall(cast<CallInst>(I));
577 case Instruction::FNeg:
578 case Instruction::Add:
579 case Instruction::FAdd:
580 case Instruction::Sub:
581 case Instruction::FSub:
582 case Instruction::Mul:
583 case Instruction::FMul:
584 case Instruction::UDiv:
585 case Instruction::SDiv:
586 case Instruction::FDiv:
587 case Instruction::URem:
588 case Instruction::SRem:
589 case Instruction::FRem:
590 case Instruction::Shl:
591 case Instruction::LShr:
592 case Instruction::AShr:
593 case Instruction::And:
594 case Instruction::Or:
595 case Instruction::Xor:
596 case Instruction::ICmp:
597 case Instruction::FCmp:
598 case Instruction::Trunc:
599 case Instruction::ZExt:
600 case Instruction::SExt:
601 case Instruction::FPToUI:
602 case Instruction::FPToSI:
603 case Instruction::UIToFP:
604 case Instruction::SIToFP:
605 case Instruction::FPTrunc:
606 case Instruction::FPExt:
607 case Instruction::PtrToInt:
608 case Instruction::IntToPtr:
609 case Instruction::AddrSpaceCast:
610 case Instruction::BitCast:
611 case Instruction::Select:
612 case Instruction::Freeze:
613 case Instruction::ExtractElement:
614 case Instruction::InsertElement:
615 case Instruction::ShuffleVector:
616 case Instruction::InsertValue:
617 exp = createExpr(I);
618 break;
619 case Instruction::GetElementPtr:
620 exp = createGEPExpr(cast<GetElementPtrInst>(I));
621 break;
622 case Instruction::ExtractValue:
623 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
624 break;
625 case Instruction::PHI:
626 valueNumbering[V] = nextValueNumber;
627 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
628 return nextValueNumber++;
629 default:
630 valueNumbering[V] = nextValueNumber;
631 return nextValueNumber++;
632 }
633
634 uint32_t e = assignExpNewValueNum(exp).first;
635 valueNumbering[V] = e;
636 return e;
637}
638
639/// Returns the value number of the specified value. Fails if
640/// the value has not yet been numbered.
641uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
642 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
643 if (Verify) {
644 assert(VI != valueNumbering.end() && "Value not numbered?");
645 return VI->second;
646 }
647 return (VI != valueNumbering.end()) ? VI->second : 0;
648}
649
650/// Returns the value number of the given comparison,
651/// assigning it a new number if it did not have one before. Useful when
652/// we deduced the result of a comparison, but don't immediately have an
653/// instruction realizing that comparison to hand.
654uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
655 CmpInst::Predicate Predicate,
656 Value *LHS, Value *RHS) {
657 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
658 return assignExpNewValueNum(exp).first;
659}
660
661/// Remove all entries from the ValueTable.
662void GVNPass::ValueTable::clear() {
663 valueNumbering.clear();
664 expressionNumbering.clear();
665 NumberingPhi.clear();
666 PhiTranslateTable.clear();
667 nextValueNumber = 1;
668 Expressions.clear();
669 ExprIdx.clear();
670 nextExprNumber = 0;
671}
672
673/// Remove a value from the value numbering.
674void GVNPass::ValueTable::erase(Value *V) {
675 uint32_t Num = valueNumbering.lookup(V);
676 valueNumbering.erase(V);
677 // If V is PHINode, V <--> value number is an one-to-one mapping.
678 if (isa<PHINode>(V))
679 NumberingPhi.erase(Num);
680}
681
682/// verifyRemoved - Verify that the value is removed from all internal data
683/// structures.
684void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
685 for (DenseMap<Value*, uint32_t>::const_iterator
686 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
687 assert(I->first != V && "Inst still occurs in value numbering map!");
688 }
689}
690
691//===----------------------------------------------------------------------===//
692// GVN Pass
693//===----------------------------------------------------------------------===//
694
695bool GVNPass::isPREEnabled() const {
696 return Options.AllowPRE.value_or(GVNEnablePRE);
697}
698
699bool GVNPass::isLoadPREEnabled() const {
700 return Options.AllowLoadPRE.value_or(GVNEnableLoadPRE);
701}
702
703bool GVNPass::isLoadInLoopPREEnabled() const {
704 return Options.AllowLoadInLoopPRE.value_or(GVNEnableLoadInLoopPRE);
705}
706
707bool GVNPass::isLoadPRESplitBackedgeEnabled() const {
708 return Options.AllowLoadPRESplitBackedge.value_or(
709 GVNEnableSplitBackedgeInLoadPRE);
710}
711
712bool GVNPass::isMemDepEnabled() const {
713 return Options.AllowMemDep.value_or(GVNEnableMemDep);
714}
715
716PreservedAnalyses GVNPass::run(Function &F, FunctionAnalysisManager &AM) {
717 // FIXME: The order of evaluation of these 'getResult' calls is very
718 // significant! Re-ordering these variables will cause GVN when run alone to
719 // be less effective! We should fix memdep and basic-aa to not exhibit this
720 // behavior, but until then don't change the order here.
721 auto &AC = AM.getResult<AssumptionAnalysis>(F);
722 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
723 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
724 auto &AA = AM.getResult<AAManager>(F);
725 auto *MemDep =
726 isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
727 auto *LI = AM.getCachedResult<LoopAnalysis>(F);
728 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
729 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
730 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
731 MSSA ? &MSSA->getMSSA() : nullptr);
732 if (!Changed)
733 return PreservedAnalyses::all();
734 PreservedAnalyses PA;
735 PA.preserve<DominatorTreeAnalysis>();
736 PA.preserve<TargetLibraryAnalysis>();
737 if (MSSA)
738 PA.preserve<MemorySSAAnalysis>();
739 if (LI)
740 PA.preserve<LoopAnalysis>();
741 return PA;
742}
743
744void GVNPass::printPipeline(
745 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
746 static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline(
747 OS, MapClassName2PassName);
748
749 OS << "<";
750 if (Options.AllowPRE != None)
751 OS << (Options.AllowPRE.value() ? "" : "no-") << "pre;";
752 if (Options.AllowLoadPRE != None)
753 OS << (Options.AllowLoadPRE.value() ? "" : "no-") << "load-pre;";
754 if (Options.AllowLoadPRESplitBackedge != None)
755 OS << (Options.AllowLoadPRESplitBackedge.value() ? "" : "no-")
756 << "split-backedge-load-pre;";
757 if (Options.AllowMemDep != None)
758 OS << (Options.AllowMemDep.value() ? "" : "no-") << "memdep";
759 OS << ">";
760}
761
762#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
763LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
764 errs() << "{\n";
765 for (auto &I : d) {
766 errs() << I.first << "\n";
767 I.second->dump();
768 }
769 errs() << "}\n";
770}
771#endif
772
773enum class AvailabilityState : char {
774 /// We know the block *is not* fully available. This is a fixpoint.
775 Unavailable = 0,
776 /// We know the block *is* fully available. This is a fixpoint.
777 Available = 1,
778 /// We do not know whether the block is fully available or not,
779 /// but we are currently speculating that it will be.
780 /// If it would have turned out that the block was, in fact, not fully
781 /// available, this would have been cleaned up into an Unavailable.
782 SpeculativelyAvailable = 2,
783};
784
785/// Return true if we can prove that the value
786/// we're analyzing is fully available in the specified block. As we go, keep
787/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
788/// map is actually a tri-state map with the following values:
789/// 0) we know the block *is not* fully available.
790/// 1) we know the block *is* fully available.
791/// 2) we do not know whether the block is fully available or not, but we are
792/// currently speculating that it will be.
793static bool IsValueFullyAvailableInBlock(
794 BasicBlock *BB,
795 DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
796 SmallVector<BasicBlock *, 32> Worklist;
797 Optional<BasicBlock *> UnavailableBB;
798
799 // The number of times we didn't find an entry for a block in a map and
800 // optimistically inserted an entry marking block as speculatively available.
801 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
802
803#ifndef NDEBUG
804 SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
805 SmallVector<BasicBlock *, 32> AvailableBBs;
806#endif
807
808 Worklist.emplace_back(BB);
809 while (!Worklist.empty()) {
810 BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
811 // Optimistically assume that the block is Speculatively Available and check
812 // to see if we already know about this block in one lookup.
813 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
814 FullyAvailableBlocks.try_emplace(
815 CurrBB, AvailabilityState::SpeculativelyAvailable);
816 AvailabilityState &State = IV.first->second;
817
818 // Did the entry already exist for this block?
819 if (!IV.second) {
820 if (State == AvailabilityState::Unavailable) {
821 UnavailableBB = CurrBB;
822 break; // Backpropagate unavailability info.
823 }
824
825#ifndef NDEBUG
826 AvailableBBs.emplace_back(CurrBB);
827#endif
828 continue; // Don't recurse further, but continue processing worklist.
829 }
830
831 // No entry found for block.
832 ++NumNewNewSpeculativelyAvailableBBs;
833 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
834
835 // If we have exhausted our budget, mark this block as unavailable.
836 // Also, if this block has no predecessors, the value isn't live-in here.
837 if (OutOfBudget || pred_empty(CurrBB)) {
838 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
839 State = AvailabilityState::Unavailable;
840 UnavailableBB = CurrBB;
841 break; // Backpropagate unavailability info.
842 }
843
844 // Tentatively consider this block as speculatively available.
845#ifndef NDEBUG
846 NewSpeculativelyAvailableBBs.insert(CurrBB);
847#endif
848 // And further recurse into block's predecessors, in depth-first order!
849 Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
850 }
851
852#if LLVM_ENABLE_STATS
853 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
854 NumNewNewSpeculativelyAvailableBBs);
855#endif
856
857 // If the block isn't marked as fixpoint yet
858 // (the Unavailable and Available states are fixpoints)
859 auto MarkAsFixpointAndEnqueueSuccessors =
860 [&](BasicBlock *BB, AvailabilityState FixpointState) {
861 auto It = FullyAvailableBlocks.find(BB);
862 if (It == FullyAvailableBlocks.end())
863 return; // Never queried this block, leave as-is.
864 switch (AvailabilityState &State = It->second) {
865 case AvailabilityState::Unavailable:
866 case AvailabilityState::Available:
867 return; // Don't backpropagate further, continue processing worklist.
868 case AvailabilityState::SpeculativelyAvailable: // Fix it!
869 State = FixpointState;
870#ifndef NDEBUG
871 assert(NewSpeculativelyAvailableBBs.erase(BB) &&
872 "Found a speculatively available successor leftover?");
873#endif
874 // Queue successors for further processing.
875 Worklist.append(succ_begin(BB), succ_end(BB));
876 return;
877 }
878 };
879
880 if (UnavailableBB) {
881 // Okay, we have encountered an unavailable block.
882 // Mark speculatively available blocks reachable from UnavailableBB as
883 // unavailable as well. Paths are terminated when they reach blocks not in
884 // FullyAvailableBlocks or they are not marked as speculatively available.
885 Worklist.clear();
886 Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
887 while (!Worklist.empty())
888 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
889 AvailabilityState::Unavailable);
890 }
891
892#ifndef NDEBUG
893 Worklist.clear();
894 for (BasicBlock *AvailableBB : AvailableBBs)
895 Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
896 while (!Worklist.empty())
897 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
898 AvailabilityState::Available);
899
900 assert(NewSpeculativelyAvailableBBs.empty() &&
901 "Must have fixed all the new speculatively available blocks.");
902#endif
903
904 return !UnavailableBB;
905}
906
907/// Given a set of loads specified by ValuesPerBlock,
908/// construct SSA form, allowing us to eliminate Load. This returns the value
909/// that should be used at Load's definition site.
910static Value *
911ConstructSSAForLoadSet(LoadInst *Load,
912 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
913 GVNPass &gvn) {
914 // Check for the fully redundant, dominating load case. In this case, we can
915 // just use the dominating value directly.
916 if (ValuesPerBlock.size() == 1 &&
917 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
918 Load->getParent())) {
919 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
920 "Dead BB dominate this block");
921 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
922 }
923
924 // Otherwise, we have to construct SSA form.
925 SmallVector<PHINode*, 8> NewPHIs;
926 SSAUpdater SSAUpdate(&NewPHIs);
927 SSAUpdate.Initialize(Load->getType(), Load->getName());
928
929 for (const AvailableValueInBlock &AV : ValuesPerBlock) {
930 BasicBlock *BB = AV.BB;
931
932 if (AV.AV.isUndefValue())
933 continue;
934
935 if (SSAUpdate.HasValueForBlock(BB))
936 continue;
937
938 // If the value is the load that we will be eliminating, and the block it's
939 // available in is the block that the load is in, then don't add it as
940 // SSAUpdater will resolve the value to the relevant phi which may let it
941 // avoid phi construction entirely if there's actually only one value.
942 if (BB == Load->getParent() &&
943 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
944 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
945 continue;
946
947 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
948 }
949
950 // Perform PHI construction.
951 return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
952}
953
954static LoadInst *findDominatingLoad(Value *Ptr, Type *LoadTy, SelectInst *Sel,
955 DominatorTree &DT) {
956 for (Value *U : Ptr->users()) {
957 auto *LI = dyn_cast<LoadInst>(U);
958 if (LI && LI->getType() == LoadTy && LI->getParent() == Sel->getParent() &&
959 DT.dominates(LI, Sel))
960 return LI;
961 }
962 return nullptr;
963}
964
965Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
966 Instruction *InsertPt,
967 GVNPass &gvn) const {
968 Value *Res;
969 Type *LoadTy = Load->getType();
970 const DataLayout &DL = Load->getModule()->getDataLayout();
971 if (isSimpleValue()) {
972 Res = getSimpleValue();
973 if (Res->getType() != LoadTy) {
974 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
975
976 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
977 << " " << *getSimpleValue() << '\n'
978 << *Res << '\n'
979 << "\n\n\n");
980 }
981 } else if (isCoercedLoadValue()) {
982 LoadInst *CoercedLoad = getCoercedLoadValue();
983 if (CoercedLoad->getType() == LoadTy && Offset == 0) {
984 Res = CoercedLoad;
985 } else {
986 Res = getLoadValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
987 // We would like to use gvn.markInstructionForDeletion here, but we can't
988 // because the load is already memoized into the leader map table that GVN
989 // tracks. It is potentially possible to remove the load from the table,
990 // but then there all of the operations based on it would need to be
991 // rehashed. Just leave the dead load around.
992 gvn.getMemDep().removeInstruction(CoercedLoad);
993 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
994 << " " << *getCoercedLoadValue() << '\n'
995 << *Res << '\n'
996 << "\n\n\n");
997 }
998 } else if (isMemIntrinValue()) {
999 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
1000 InsertPt, DL);
1001 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1002 << " " << *getMemIntrinValue() << '\n'
1003 << *Res << '\n'
1004 << "\n\n\n");
1005 } else if (isSelectValue()) {
1006 // Introduce a new value select for a load from an eligible pointer select.
1007 SelectInst *Sel = getSelectValue();
1008 LoadInst *L1 = findDominatingLoad(Sel->getOperand(1), LoadTy, Sel,
1009 gvn.getDominatorTree());
1010 LoadInst *L2 = findDominatingLoad(Sel->getOperand(2), LoadTy, Sel,
1011 gvn.getDominatorTree());
1012 assert(L1 && L2 &&
1013 "must be able to obtain dominating loads for both value operands of "
1014 "the select");
1015 Res = SelectInst::Create(Sel->getCondition(), L1, L2, "", Sel);
1016 } else {
1017 llvm_unreachable("Should not materialize value from dead block");
1018 }
1019 assert(Res && "failed to materialize?");
1020 return Res;
1021}
1022
1023static bool isLifetimeStart(const Instruction *Inst) {
1024 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1025 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1026 return false;
1027}
1028
1029/// Assuming To can be reached from both From and Between, does Between lie on
1030/// every path from From to To?
1031static bool liesBetween(const Instruction *From, Instruction *Between,
1032 const Instruction *To, DominatorTree *DT) {
1033 if (From->getParent() == Between->getParent())
1034 return DT->dominates(From, Between);
1035 SmallSet<BasicBlock *, 1> Exclusion;
1036 Exclusion.insert(Between->getParent());
1037 return !isPotentiallyReachable(From, To, &Exclusion, DT);
1038}
1039
1040/// Try to locate the three instruction involved in a missed
1041/// load-elimination case that is due to an intervening store.
1042static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
1043 DominatorTree *DT,
1044 OptimizationRemarkEmitter *ORE) {
1045 using namespace ore;
1046
1047 User *OtherAccess = nullptr;
1048
1049 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
1050 R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
1051 << setExtraArgs();
1052
1053 for (auto *U : Load->getPointerOperand()->users()) {
1054 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
1055 cast<Instruction>(U)->getFunction() == Load->getFunction() &&
1056 DT->dominates(cast<Instruction>(U), Load)) {
1057 // Use the most immediately dominating value
1058 if (OtherAccess) {
1059 if (DT->dominates(cast<Instruction>(OtherAccess), cast<Instruction>(U)))
1060 OtherAccess = U;
1061 else
1062 assert(U == OtherAccess || DT->dominates(cast<Instruction>(U),
1063 cast<Instruction>(OtherAccess)));
1064 } else
1065 OtherAccess = U;
1066 }
1067 }
1068
1069 if (!OtherAccess) {
1070 // There is no dominating use, check if we can find a closest non-dominating
1071 // use that lies between any other potentially available use and Load.
1072 for (auto *U : Load->getPointerOperand()->users()) {
1073 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
1074 cast<Instruction>(U)->getFunction() == Load->getFunction() &&
1075 isPotentiallyReachable(cast<Instruction>(U), Load, nullptr, DT)) {
1076 if (OtherAccess) {
1077 if (liesBetween(cast<Instruction>(OtherAccess), cast<Instruction>(U),
1078 Load, DT)) {
1079 OtherAccess = U;
1080 } else if (!liesBetween(cast<Instruction>(U),
1081 cast<Instruction>(OtherAccess), Load, DT)) {
1082 // These uses are both partially available at Load were it not for
1083 // the clobber, but neither lies strictly after the other.
1084 OtherAccess = nullptr;
1085 break;
1086 } // else: keep current OtherAccess since it lies between U and Load
1087 } else {
1088 OtherAccess = U;
1089 }
1090 }
1091 }
1092 }
1093
1094 if (OtherAccess)
1095 R << " in favor of " << NV("OtherAccess", OtherAccess);
1096
1097 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1098
1099 ORE->emit(R);
1100}
1101
1102/// Check if a load from pointer-select \p Address in \p DepBB can be converted
1103/// to a value select. The following conditions need to be satisfied:
1104/// 1. The pointer select (\p Address) must be defined in \p DepBB.
1105/// 2. Both value operands of the pointer select must be loaded in the same
1106/// basic block, before the pointer select.
1107/// 3. There must be no instructions between the found loads and \p End that may
1108/// clobber the loads.
1109static Optional<AvailableValue>
1110tryToConvertLoadOfPtrSelect(BasicBlock *DepBB, BasicBlock::iterator End,
1111 Value *Address, Type *LoadTy, DominatorTree &DT,
1112 AAResults *AA) {
1113
1114 auto *Sel = dyn_cast_or_null<SelectInst>(Address);
1115 if (!Sel || DepBB != Sel->getParent())
1116 return None;
1117
1118 LoadInst *L1 = findDominatingLoad(Sel->getOperand(1), LoadTy, Sel, DT);
1119 LoadInst *L2 = findDominatingLoad(Sel->getOperand(2), LoadTy, Sel, DT);
1120 if (!L1 || !L2)
1121 return None;
1122
1123 // Ensure there are no accesses that may modify the locations referenced by
1124 // either L1 or L2 between L1, L2 and the specified End iterator.
1125 Instruction *EarlierLoad = L1->comesBefore(L2) ? L1 : L2;
1126 MemoryLocation L1Loc = MemoryLocation::get(L1);
1127 MemoryLocation L2Loc = MemoryLocation::get(L2);
1128 if (any_of(make_range(EarlierLoad->getIterator(), End), [&](Instruction &I) {
1129 return isModSet(AA->getModRefInfo(&I, L1Loc)) ||
1130 isModSet(AA->getModRefInfo(&I, L2Loc));
1131 }))
1132 return None;
1133
1134 return AvailableValue::getSelect(Sel);
1135}
1136
1137bool GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1138 Value *Address, AvailableValue &Res) {
1139 if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1140 assert(isa<SelectInst>(Address));
1141 if (auto R = tryToConvertLoadOfPtrSelect(
1142 Load->getParent(), Load->getIterator(), Address, Load->getType(),
1143 getDominatorTree(), getAliasAnalysis())) {
1144 Res = *R;
1145 return true;
1146 }
1147 return false;
1148 }
1149
1150 assert((DepInfo.isDef() || DepInfo.isClobber()) &&
1151 "expected a local dependence");
1152 assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1153
1154 const DataLayout &DL = Load->getModule()->getDataLayout();
1155
1156 Instruction *DepInst = DepInfo.getInst();
1157 if (DepInfo.isClobber()) {
1158 // If the dependence is to a store that writes to a superset of the bits
1159 // read by the load, we can extract the bits we need for the load from the
1160 // stored value.
1161 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1162 // Can't forward from non-atomic to atomic without violating memory model.
1163 if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1164 int Offset =
1165 analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1166 if (Offset != -1) {
1167 Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
1168 return true;
1169 }
1170 }
1171 }
1172
1173 // Check to see if we have something like this:
1174 // load i32* P
1175 // load i8* (P+1)
1176 // if we have this, replace the later with an extraction from the former.
1177 if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1178 // If this is a clobber and L is the first instruction in its block, then
1179 // we have the first instruction in the entry block.
1180 // Can't forward from non-atomic to atomic without violating memory model.
1181 if (DepLoad != Load && Address &&
1182 Load->isAtomic() <= DepLoad->isAtomic()) {
1183 Type *LoadType = Load->getType();
1184 int Offset = -1;
1185
1186 // If MD reported clobber, check it was nested.
1187 if (DepInfo.isClobber() &&
1188 canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1189 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1190 // GVN has no deal with a negative offset.
1191 Offset = (ClobberOff == None || *ClobberOff < 0) ? -1 : *ClobberOff;
1192 }
1193 if (Offset == -1)
1194 Offset =
1195 analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1196 if (Offset != -1) {
1197 Res = AvailableValue::getLoad(DepLoad, Offset);
1198 return true;
1199 }
1200 }
1201 }
1202
1203 // If the clobbering value is a memset/memcpy/memmove, see if we can
1204 // forward a value on from it.
1205 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1206 if (Address && !Load->isAtomic()) {
1207 int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
1208 DepMI, DL);
1209 if (Offset != -1) {
1210 Res = AvailableValue::getMI(DepMI, Offset);
1211 return true;
1212 }
1213 }
1214 }
1215
1216 // Nothing known about this clobber, have to be conservative
1217 LLVM_DEBUG(
1218 // fast print dep, using operator<< on instruction is too slow.
1219 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1220 dbgs() << " is clobbered by " << *DepInst << '\n';);
1221 if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1222 reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1223
1224 return false;
1225 }
1226 assert(DepInfo.isDef() && "follows from above");
1227
1228 // Loading the alloca -> undef.
1229 // Loading immediately after lifetime begin -> undef.
1230 if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst)) {
1231 Res = AvailableValue::get(UndefValue::get(Load->getType()));
1232 return true;
1233 }
1234
1235 if (Constant *InitVal =
1236 getInitialValueOfAllocation(DepInst, TLI, Load->getType())) {
1237 Res = AvailableValue::get(InitVal);
1238 return true;
1239 }
1240
1241 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1242 // Reject loads and stores that are to the same address but are of
1243 // different types if we have to. If the stored value is convertable to
1244 // the loaded value, we can reuse it.
1245 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1246 DL))
1247 return false;
1248
1249 // Can't forward from non-atomic to atomic without violating memory model.
1250 if (S->isAtomic() < Load->isAtomic())
1251 return false;
1252
1253 Res = AvailableValue::get(S->getValueOperand());
1254 return true;
1255 }
1256
1257 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1258 // If the types mismatch and we can't handle it, reject reuse of the load.
1259 // If the stored value is larger or equal to the loaded value, we can reuse
1260 // it.
1261 if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1262 return false;
1263
1264 // Can't forward from non-atomic to atomic without violating memory model.
1265 if (LD->isAtomic() < Load->isAtomic())
1266 return false;
1267
1268 Res = AvailableValue::getLoad(LD);
1269 return true;
1270 }
1271
1272 // Unknown def - must be conservative
1273 LLVM_DEBUG(
1274 // fast print dep, using operator<< on instruction is too slow.
1275 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1276 dbgs() << " has unknown def " << *DepInst << '\n';);
1277 return false;
1278}
1279
1280void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1281 AvailValInBlkVect &ValuesPerBlock,
1282 UnavailBlkVect &UnavailableBlocks) {
1283 // Filter out useless results (non-locals, etc). Keep track of the blocks
1284 // where we have a value available in repl, also keep track of whether we see
1285 // dependencies that produce an unknown value for the load (such as a call
1286 // that could potentially clobber the load).
1287 unsigned NumDeps = Deps.size();
1288 for (unsigned i = 0, e = NumDeps; i != e; ++i) {
1289 BasicBlock *DepBB = Deps[i].getBB();
1290 MemDepResult DepInfo = Deps[i].getResult();
1291
1292 if (DeadBlocks.count(DepBB)) {
1293 // Dead dependent mem-op disguise as a load evaluating the same value
1294 // as the load in question.
1295 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1296 continue;
1297 }
1298
1299 // The address being loaded in this non-local block may not be the same as
1300 // the pointer operand of the load if PHI translation occurs. Make sure
1301 // to consider the right address.
1302 Value *Address = Deps[i].getAddress();
1303
1304 if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1305 if (auto R = tryToConvertLoadOfPtrSelect(
1306 DepBB, DepBB->end(), Address, Load->getType(), getDominatorTree(),
1307 getAliasAnalysis())) {
1308 ValuesPerBlock.push_back(
1309 AvailableValueInBlock::get(DepBB, std::move(*R)));
1310 continue;
1311 }
1312 UnavailableBlocks.push_back(DepBB);
1313 continue;
1314 }
1315
1316 AvailableValue AV;
1317 if (AnalyzeLoadAvailability(Load, DepInfo, Address, AV)) {
1318 // subtlety: because we know this was a non-local dependency, we know
1319 // it's safe to materialize anywhere between the instruction within
1320 // DepInfo and the end of it's block.
1321 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1322 std::move(AV)));
1323 } else {
1324 UnavailableBlocks.push_back(DepBB);
1325 }
1326 }
1327
1328 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1329 "post condition violation");
1330}
1331
1332void GVNPass::eliminatePartiallyRedundantLoad(
1333 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1334 MapVector<BasicBlock *, Value *> &AvailableLoads) {
1335 for (const auto &AvailableLoad : AvailableLoads) {
1336 BasicBlock *UnavailableBlock = AvailableLoad.first;
1337 Value *LoadPtr = AvailableLoad.second;
1338
1339 auto *NewLoad =
1340 new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
1341 Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
1342 Load->getSyncScopeID(), UnavailableBlock->getTerminator());
1343 NewLoad->setDebugLoc(Load->getDebugLoc());
1344 if (MSSAU) {
1345 auto *MSSA = MSSAU->getMemorySSA();
1346 // Get the defining access of the original load or use the load if it is a
1347 // MemoryDef (e.g. because it is volatile). The inserted loads are
1348 // guaranteed to load from the same definition.
1349 auto *LoadAcc = MSSA->getMemoryAccess(Load);
1350 auto *DefiningAcc =
1351 isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
1352 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1353 NewLoad, DefiningAcc, NewLoad->getParent(),
1354 MemorySSA::BeforeTerminator);
1355 if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1356 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1357 else
1358 MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1359 }
1360
1361 // Transfer the old load's AA tags to the new load.
1362 AAMDNodes Tags = Load->getAAMetadata();
1363 if (Tags)
1364 NewLoad->setAAMetadata(Tags);
1365
1366 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1367 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1368 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1369 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1370 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1371 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1372 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1373 if (LI &&
1374 LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1375 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1376
1377 // We do not propagate the old load's debug location, because the new
1378 // load now lives in a different BB, and we want to avoid a jumpy line
1379 // table.
1380 // FIXME: How do we retain source locations without causing poor debugging
1381 // behavior?
1382
1383 // Add the newly created load.
1384 ValuesPerBlock.push_back(
1385 AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1386 MD->invalidateCachedPointerInfo(LoadPtr);
1387 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1388 }
1389
1390 // Perform PHI construction.
1391 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1392 Load->replaceAllUsesWith(V);
1393 if (isa<PHINode>(V))
1394 V->takeName(Load);
1395 if (Instruction *I = dyn_cast<Instruction>(V))
1396 I->setDebugLoc(Load->getDebugLoc());
1397 if (V->getType()->isPtrOrPtrVectorTy())
1398 MD->invalidateCachedPointerInfo(V);
1399 markInstructionForDeletion(Load);
1400 ORE->emit([&]() {
1401 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1402 << "load eliminated by PRE";
1403 });
1404}
1405
1406bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1407 UnavailBlkVect &UnavailableBlocks) {
1408 // Okay, we have *some* definitions of the value. This means that the value
1409 // is available in some of our (transitive) predecessors. Lets think about
1410 // doing PRE of this load. This will involve inserting a new load into the
1411 // predecessor when it's not available. We could do this in general, but
1412 // prefer to not increase code size. As such, we only do this when we know
1413 // that we only have to insert *one* load (which means we're basically moving
1414 // the load, not inserting a new one).
1415
1416 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1417 UnavailableBlocks.end());
1418
1419 // Let's find the first basic block with more than one predecessor. Walk
1420 // backwards through predecessors if needed.
1421 BasicBlock *LoadBB = Load->getParent();
1422 BasicBlock *TmpBB = LoadBB;
1423
1424 // Check that there is no implicit control flow instructions above our load in
1425 // its block. If there is an instruction that doesn't always pass the
1426 // execution to the following instruction, then moving through it may become
1427 // invalid. For example:
1428 //
1429 // int arr[LEN];
1430 // int index = ???;
1431 // ...
1432 // guard(0 <= index && index < LEN);
1433 // use(arr[index]);
1434 //
1435 // It is illegal to move the array access to any point above the guard,
1436 // because if the index is out of bounds we should deoptimize rather than
1437 // access the array.
1438 // Check that there is no guard in this block above our instruction.
1439 bool MustEnsureSafetyOfSpeculativeExecution =
1440 ICF->isDominatedByICFIFromSameBlock(Load);
1441
1442 while (TmpBB->getSinglePredecessor()) {
1443 TmpBB = TmpBB->getSinglePredecessor();
1444 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1445 return false;
1446 if (Blockers.count(TmpBB))
1447 return false;
1448
1449 // If any of these blocks has more than one successor (i.e. if the edge we
1450 // just traversed was critical), then there are other paths through this
1451 // block along which the load may not be anticipated. Hoisting the load
1452 // above this block would be adding the load to execution paths along
1453 // which it was not previously executed.
1454 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1455 return false;
1456
1457 // Check that there is no implicit control flow in a block above.
1458 MustEnsureSafetyOfSpeculativeExecution =
1459 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1460 }
1461
1462 assert(TmpBB);
1463 LoadBB = TmpBB;
1464
1465 // Check to see how many predecessors have the loaded value fully
1466 // available.
1467 MapVector<BasicBlock *, Value *> PredLoads;
1468 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1469 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1470 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1471 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1472 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1473
1474 SmallVector<BasicBlock *, 4> CriticalEdgePred;
1475 for (BasicBlock *Pred : predecessors(LoadBB)) {
1476 // If any predecessor block is an EH pad that does not allow non-PHI
1477 // instructions before the terminator, we can't PRE the load.
1478 if (Pred->getTerminator()->isEHPad()) {
1479 LLVM_DEBUG(
1480 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1481 << Pred->getName() << "': " << *Load << '\n');
1482 return false;
1483 }
1484
1485 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1486 continue;
1487 }
1488
1489 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1490 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1491 LLVM_DEBUG(
1492 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1493 << Pred->getName() << "': " << *Load << '\n');
1494 return false;
1495 }
1496
1497 if (LoadBB->isEHPad()) {
1498 LLVM_DEBUG(
1499 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1500 << Pred->getName() << "': " << *Load << '\n');
1501 return false;
1502 }
1503
1504 // Do not split backedge as it will break the canonical loop form.
1505 if (!isLoadPRESplitBackedgeEnabled())
1506 if (DT->dominates(LoadBB, Pred)) {
1507 LLVM_DEBUG(
1508 dbgs()
1509 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1510 << Pred->getName() << "': " << *Load << '\n');
1511 return false;
1512 }
1513
1514 CriticalEdgePred.push_back(Pred);
1515 } else {
1516 // Only add the predecessors that will not be split for now.
1517 PredLoads[Pred] = nullptr;
1518 }
1519 }
1520
1521 // Decide whether PRE is profitable for this load.
1522 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1523 assert(NumUnavailablePreds != 0 &&
1524 "Fully available value should already be eliminated!");
1525
1526 // If this load is unavailable in multiple predecessors, reject it.
1527 // FIXME: If we could restructure the CFG, we could make a common pred with
1528 // all the preds that don't have an available Load and insert a new load into
1529 // that one block.
1530 if (NumUnavailablePreds != 1)
1531 return false;
1532
1533 // Now we know where we will insert load. We must ensure that it is safe
1534 // to speculatively execute the load at that points.
1535 if (MustEnsureSafetyOfSpeculativeExecution) {
1536 if (CriticalEdgePred.size())
1537 if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), DT))
1538 return false;
1539 for (auto &PL : PredLoads)
1540 if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), DT))
1541 return false;
1542 }
1543
1544 // Split critical edges, and update the unavailable predecessors accordingly.
1545 for (BasicBlock *OrigPred : CriticalEdgePred) {
1546 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1547 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1548 PredLoads[NewPred] = nullptr;
1549 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1550 << LoadBB->getName() << '\n');
1551 }
1552
1553 // Check if the load can safely be moved to all the unavailable predecessors.
1554 bool CanDoPRE = true;
1555 const DataLayout &DL = Load->getModule()->getDataLayout();
1556 SmallVector<Instruction*, 8> NewInsts;
1557 for (auto &PredLoad : PredLoads) {
1558 BasicBlock *UnavailablePred = PredLoad.first;
1559
1560 // Do PHI translation to get its value in the predecessor if necessary. The
1561 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1562 // We do the translation for each edge we skipped by going from Load's block
1563 // to LoadBB, otherwise we might miss pieces needing translation.
1564
1565 // If all preds have a single successor, then we know it is safe to insert
1566 // the load on the pred (?!?), so we can insert code to materialize the
1567 // pointer if it is not available.
1568 Value *LoadPtr = Load->getPointerOperand();
1569 BasicBlock *Cur = Load->getParent();
1570 while (Cur != LoadBB) {
1571 PHITransAddr Address(LoadPtr, DL, AC);
1572 LoadPtr = Address.PHITranslateWithInsertion(
1573 Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1574 if (!LoadPtr) {
1575 CanDoPRE = false;
1576 break;
1577 }
1578 Cur = Cur->getSinglePredecessor();
1579 }
1580
1581 if (LoadPtr) {
1582 PHITransAddr Address(LoadPtr, DL, AC);
1583 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1584 NewInsts);
1585 }
1586 // If we couldn't find or insert a computation of this phi translated value,
1587 // we fail PRE.
1588 if (!LoadPtr) {
1589 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1590 << *Load->getPointerOperand() << "\n");
1591 CanDoPRE = false;
1592 break;
1593 }
1594
1595 PredLoad.second = LoadPtr;
1596 }
1597
1598 if (!CanDoPRE) {
1599 while (!NewInsts.empty()) {
1600 // Erase instructions generated by the failed PHI translation before
1601 // trying to number them. PHI translation might insert instructions
1602 // in basic blocks other than the current one, and we delete them
1603 // directly, as markInstructionForDeletion only allows removing from the
1604 // current basic block.
1605 NewInsts.pop_back_val()->eraseFromParent();
1606 }
1607 // HINT: Don't revert the edge-splitting as following transformation may
1608 // also need to split these critical edges.
1609 return !CriticalEdgePred.empty();
1610 }
1611
1612 // Okay, we can eliminate this load by inserting a reload in the predecessor
1613 // and using PHI construction to get the value in the other predecessors, do
1614 // it.
1615 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1616 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1617 << " INSTS: " << *NewInsts.back()
1618 << '\n');
1619
1620 // Assign value numbers to the new instructions.
1621 for (Instruction *I : NewInsts) {
1622 // Instructions that have been inserted in predecessor(s) to materialize
1623 // the load address do not retain their original debug locations. Doing
1624 // so could lead to confusing (but correct) source attributions.
1625 I->updateLocationAfterHoist();
1626
1627 // FIXME: We really _ought_ to insert these value numbers into their
1628 // parent's availability map. However, in doing so, we risk getting into
1629 // ordering issues. If a block hasn't been processed yet, we would be
1630 // marking a value as AVAIL-IN, which isn't what we intend.
1631 VN.lookupOrAdd(I);
1632 }
1633
1634 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
1635 ++NumPRELoad;
1636 return true;
1637}
1638
1639bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1640 AvailValInBlkVect &ValuesPerBlock,
1641 UnavailBlkVect &UnavailableBlocks) {
1642 if (!LI)
1643 return false;
1644
1645 const Loop *L = LI->getLoopFor(Load->getParent());
1646 // TODO: Generalize to other loop blocks that dominate the latch.
1647 if (!L || L->getHeader() != Load->getParent())
1648 return false;
1649
1650 BasicBlock *Preheader = L->getLoopPreheader();
1651 BasicBlock *Latch = L->getLoopLatch();
1652 if (!Preheader || !Latch)
1653 return false;
1654
1655 Value *LoadPtr = Load->getPointerOperand();
1656 // Must be available in preheader.
1657 if (!L->isLoopInvariant(LoadPtr))
1658 return false;
1659
1660 // We plan to hoist the load to preheader without introducing a new fault.
1661 // In order to do it, we need to prove that we cannot side-exit the loop
1662 // once loop header is first entered before execution of the load.
1663 if (ICF->isDominatedByICFIFromSameBlock(Load))
1664 return false;
1665
1666 BasicBlock *LoopBlock = nullptr;
1667 for (auto *Blocker : UnavailableBlocks) {
1668 // Blockers from outside the loop are handled in preheader.
1669 if (!L->contains(Blocker))
1670 continue;
1671
1672 // Only allow one loop block. Loop header is not less frequently executed
1673 // than each loop block, and likely it is much more frequently executed. But
1674 // in case of multiple loop blocks, we need extra information (such as block
1675 // frequency info) to understand whether it is profitable to PRE into
1676 // multiple loop blocks.
1677 if (LoopBlock)
1678 return false;
1679
1680 // Do not sink into inner loops. This may be non-profitable.
1681 if (L != LI->getLoopFor(Blocker))
1682 return false;
1683
1684 // Blocks that dominate the latch execute on every single iteration, maybe
1685 // except the last one. So PREing into these blocks doesn't make much sense
1686 // in most cases. But the blocks that do not necessarily execute on each
1687 // iteration are sometimes much colder than the header, and this is when
1688 // PRE is potentially profitable.
1689 if (DT->dominates(Blocker, Latch))
1690 return false;
1691
1692 // Make sure that the terminator itself doesn't clobber.
1693 if (Blocker->getTerminator()->mayWriteToMemory())
1694 return false;
1695
1696 LoopBlock = Blocker;
1697 }
1698
1699 if (!LoopBlock)
1700 return false;
1701
1702 // Make sure the memory at this pointer cannot be freed, therefore we can
1703 // safely reload from it after clobber.
1704 if (LoadPtr->canBeFreed())
1705 return false;
1706
1707 // TODO: Support critical edge splitting if blocker has more than 1 successor.
1708 MapVector<BasicBlock *, Value *> AvailableLoads;
1709 AvailableLoads[LoopBlock] = LoadPtr;
1710 AvailableLoads[Preheader] = LoadPtr;
1711
1712 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1713 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads);
1714 ++NumPRELoopLoad;
1715 return true;
1716}
1717
1718static void reportLoadElim(LoadInst *Load, Value *AvailableValue,
1719 OptimizationRemarkEmitter *ORE) {
1720 using namespace ore;
1721
1722 ORE->emit([&]() {
1723 return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1724 << "load of type " << NV("Type", Load->getType()) << " eliminated"
1725 << setExtraArgs() << " in favor of "
1726 << NV("InfavorOfValue", AvailableValue);
1727 });
1728}
1729
1730/// Attempt to eliminate a load whose dependencies are
1731/// non-local by performing PHI construction.
1732bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1733 // non-local speculations are not allowed under asan.
1734 if (Load->getParent()->getParent()->hasFnAttribute(
1735 Attribute::SanitizeAddress) ||
1736 Load->getParent()->getParent()->hasFnAttribute(
1737 Attribute::SanitizeHWAddress))
1738 return false;
1739
1740 // Step 1: Find the non-local dependencies of the load.
1741 LoadDepVect Deps;
1742 MD->getNonLocalPointerDependency(Load, Deps);
1743
1744 // If we had to process more than one hundred blocks to find the
1745 // dependencies, this load isn't worth worrying about. Optimizing
1746 // it will be too expensive.
1747 unsigned NumDeps = Deps.size();
1748 if (NumDeps > MaxNumDeps)
1749 return false;
1750
1751 // If we had a phi translation failure, we'll have a single entry which is a
1752 // clobber in the current block. Reject this early.
1753 if (NumDeps == 1 &&
1754 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1755 LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1756 dbgs() << " has unknown dependencies\n";);
1757 return false;
1758 }
1759
1760 bool Changed = false;
1761 // If this load follows a GEP, see if we can PRE the indices before analyzing.
1762 if (GetElementPtrInst *GEP =
1763 dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1764 for (Use &U : GEP->indices())
1765 if (Instruction *I = dyn_cast<Instruction>(U.get()))
1766 Changed |= performScalarPRE(I);
1767 }
1768
1769 // Step 2: Analyze the availability of the load
1770 AvailValInBlkVect ValuesPerBlock;
1771 UnavailBlkVect UnavailableBlocks;
1772 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1773
1774 // If we have no predecessors that produce a known value for this load, exit
1775 // early.
1776 if (ValuesPerBlock.empty())
1777 return Changed;
1778
1779 // Step 3: Eliminate fully redundancy.
1780 //
1781 // If all of the instructions we depend on produce a known value for this
1782 // load, then it is fully redundant and we can use PHI insertion to compute
1783 // its value. Insert PHIs and remove the fully redundant value now.
1784 if (UnavailableBlocks.empty()) {
1785 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1786
1787 // Perform PHI construction.
1788 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1789 Load->replaceAllUsesWith(V);
1790
1791 if (isa<PHINode>(V))
1792 V->takeName(Load);
1793 if (Instruction *I = dyn_cast<Instruction>(V))
1794 // If instruction I has debug info, then we should not update it.
1795 // Also, if I has a null DebugLoc, then it is still potentially incorrect
1796 // to propagate Load's DebugLoc because Load may not post-dominate I.
1797 if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1798 I->setDebugLoc(Load->getDebugLoc());
1799 if (V->getType()->isPtrOrPtrVectorTy())
1800 MD->invalidateCachedPointerInfo(V);
1801 markInstructionForDeletion(Load);
1802 ++NumGVNLoad;
1803 reportLoadElim(Load, V, ORE);
1804 return true;
1805 }
1806
1807 // Step 4: Eliminate partial redundancy.
1808 if (!isPREEnabled() || !isLoadPREEnabled())
1809 return Changed;
1810 if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent()))
1811 return Changed;
1812
1813 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1814 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1815 return true;
1816
1817 return Changed;
1818}
1819
1820static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1821 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1822 return true;
1823
1824 // Floating point comparisons can be equal, but not equivalent. Cases:
1825 // NaNs for unordered operators
1826 // +0.0 vs 0.0 for all operators
1827 if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1828 (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1829 Cmp->getFastMathFlags().noNaNs())) {
1830 Value *LHS = Cmp->getOperand(0);
1831 Value *RHS = Cmp->getOperand(1);
1832 // If we can prove either side non-zero, then equality must imply
1833 // equivalence.
1834 // FIXME: We should do this optimization if 'no signed zeros' is
1835 // applicable via an instruction-level fast-math-flag or some other
1836 // indicator that relaxed FP semantics are being used.
1837 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1838 return true;
1839 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1840 return true;;
1841 // TODO: Handle vector floating point constants
1842 }
1843 return false;
1844}
1845
1846static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1847 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1848 return true;
1849
1850 // Floating point comparisons can be equal, but not equivelent. Cases:
1851 // NaNs for unordered operators
1852 // +0.0 vs 0.0 for all operators
1853 if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1854 Cmp->getFastMathFlags().noNaNs()) ||
1855 Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1856 Value *LHS = Cmp->getOperand(0);
1857 Value *RHS = Cmp->getOperand(1);
1858 // If we can prove either side non-zero, then equality must imply
1859 // equivalence.
1860 // FIXME: We should do this optimization if 'no signed zeros' is
1861 // applicable via an instruction-level fast-math-flag or some other
1862 // indicator that relaxed FP semantics are being used.
1863 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1864 return true;
1865 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1866 return true;;
1867 // TODO: Handle vector floating point constants
1868 }
1869 return false;
1870}
1871
1872
1873static bool hasUsersIn(Value *V, BasicBlock *BB) {
1874 for (User *U : V->users())
1875 if (isa<Instruction>(U) &&
1876 cast<Instruction>(U)->getParent() == BB)
1877 return true;
1878 return false;
1879}
1880
1881bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
1882 Value *V = IntrinsicI->getArgOperand(0);
1883
1884 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1885 if (Cond->isZero()) {
1886 Type *Int8Ty = Type::getInt8Ty(V->getContext());
1887 // Insert a new store to null instruction before the load to indicate that
1888 // this code is not reachable. FIXME: We could insert unreachable
1889 // instruction directly because we can modify the CFG.
1890 auto *NewS = new StoreInst(PoisonValue::get(Int8Ty),
1891 Constant::getNullValue(Int8Ty->getPointerTo()),
1892 IntrinsicI);
1893 if (MSSAU) {
1894 const MemoryUseOrDef *FirstNonDom = nullptr;
1895 const auto *AL =
1896 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1897
1898 // If there are accesses in the current basic block, find the first one
1899 // that does not come before NewS. The new memory access is inserted
1900 // after the found access or before the terminator if no such access is
1901 // found.
1902 if (AL) {
1903 for (const auto &Acc : *AL) {
1904 if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1905 if (!Current->getMemoryInst()->comesBefore(NewS)) {
1906 FirstNonDom = Current;
1907 break;
1908 }
1909 }
1910 }
1911
1912 // This added store is to null, so it will never executed and we can
1913 // just use the LiveOnEntry def as defining access.
1914 auto *NewDef =
1915 FirstNonDom ? MSSAU->createMemoryAccessBefore(
1916 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1917 const_cast<MemoryUseOrDef *>(FirstNonDom))
1918 : MSSAU->createMemoryAccessInBB(
1919 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1920 NewS->getParent(), MemorySSA::BeforeTerminator);
1921
1922 MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1923 }
1924 }
1925 if (isAssumeWithEmptyBundle(*IntrinsicI))
1926 markInstructionForDeletion(IntrinsicI);
1927 return false;
1928 } else if (isa<Constant>(V)) {
1929 // If it's not false, and constant, it must evaluate to true. This means our
1930 // assume is assume(true), and thus, pointless, and we don't want to do
1931 // anything more here.
1932 return false;
1933 }
1934
1935 Constant *True = ConstantInt::getTrue(V->getContext());
1936 bool Changed = false;
1937
1938 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1939 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1940
1941 // This property is only true in dominated successors, propagateEquality
1942 // will check dominance for us.
1943 Changed |= propagateEquality(V, True, Edge, false);
1944 }
1945
1946 // We can replace assume value with true, which covers cases like this:
1947 // call void @llvm.assume(i1 %cmp)
1948 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1949 ReplaceOperandsWithMap[V] = True;
1950
1951 // Similarly, after assume(!NotV) we know that NotV == false.
1952 Value *NotV;
1953 if (match(V, m_Not(m_Value(NotV))))
1954 ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1955
1956 // If we find an equality fact, canonicalize all dominated uses in this block
1957 // to one of the two values. We heuristically choice the "oldest" of the
1958 // two where age is determined by value number. (Note that propagateEquality
1959 // above handles the cross block case.)
1960 //
1961 // Key case to cover are:
1962 // 1)
1963 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1964 // call void @llvm.assume(i1 %cmp)
1965 // ret float %0 ; will change it to ret float 3.000000e+00
1966 // 2)
1967 // %load = load float, float* %addr
1968 // %cmp = fcmp oeq float %load, %0
1969 // call void @llvm.assume(i1 %cmp)
1970 // ret float %load ; will change it to ret float %0
1971 if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1972 if (impliesEquivalanceIfTrue(CmpI)) {
1973 Value *CmpLHS = CmpI->getOperand(0);
1974 Value *CmpRHS = CmpI->getOperand(1);
1975 // Heuristically pick the better replacement -- the choice of heuristic
1976 // isn't terribly important here, but the fact we canonicalize on some
1977 // replacement is for exposing other simplifications.
1978 // TODO: pull this out as a helper function and reuse w/existing
1979 // (slightly different) logic.
1980 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1981 std::swap(CmpLHS, CmpRHS);
1982 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1983 std::swap(CmpLHS, CmpRHS);
1984 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1985 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1986 // Move the 'oldest' value to the right-hand side, using the value
1987 // number as a proxy for age.
1988 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1989 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1990 if (LVN < RVN)
1991 std::swap(CmpLHS, CmpRHS);
1992 }
1993
1994 // Handle degenerate case where we either haven't pruned a dead path or a
1995 // removed a trivial assume yet.
1996 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1997 return Changed;
1998
1999 LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
2000 << *CmpLHS << " with "
2001 << *CmpRHS << " in block "
2002 << IntrinsicI->getParent()->getName() << "\n");
2003
2004
2005 // Setup the replacement map - this handles uses within the same block
2006 if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
2007 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2008
2009 // NOTE: The non-block local cases are handled by the call to
2010 // propagateEquality above; this block is just about handling the block
2011 // local cases. TODO: There's a bunch of logic in propagateEqualiy which
2012 // isn't duplicated for the block local case, can we share it somehow?
2013 }
2014 }
2015 return Changed;
2016}
2017
2018static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
2019 patchReplacementInstruction(I, Repl);
2020 I->replaceAllUsesWith(Repl);
2021}
2022
2023/// Attempt to eliminate a load, first by eliminating it
2024/// locally, and then attempting non-local elimination if that fails.
2025bool GVNPass::processLoad(LoadInst *L) {
2026 if (!MD)
2027 return false;
2028
2029 // This code hasn't been audited for ordered or volatile memory access
2030 if (!L->isUnordered())
2031 return false;
2032
2033 if (L->use_empty()) {
2034 markInstructionForDeletion(L);
2035 return true;
2036 }
2037
2038 // ... to a pointer that has been loaded from before...
2039 MemDepResult Dep = MD->getDependency(L);
2040
2041 // If it is defined in another block, try harder.
2042 if (Dep.isNonLocal())
2043 return processNonLocalLoad(L);
2044
2045 Value *Address = L->getPointerOperand();
2046 // Only handle the local case below
2047 if (!Dep.isDef() && !Dep.isClobber() && !isa<SelectInst>(Address)) {
2048 // This might be a NonFuncLocal or an Unknown
2049 LLVM_DEBUG(
2050 // fast print dep, using operator<< on instruction is too slow.
2051 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
2052 dbgs() << " has unknown dependence\n";);
2053 return false;
2054 }
2055
2056 AvailableValue AV;
2057 if (AnalyzeLoadAvailability(L, Dep, Address, AV)) {
2058 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
2059
2060 // Replace the load!
2061 patchAndReplaceAllUsesWith(L, AvailableValue);
2062 markInstructionForDeletion(L);
2063 if (MSSAU)
2064 MSSAU->removeMemoryAccess(L);
2065 ++NumGVNLoad;
2066 reportLoadElim(L, AvailableValue, ORE);
2067 // Tell MDA to reexamine the reused pointer since we might have more
2068 // information after forwarding it.
2069 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
2070 MD->invalidateCachedPointerInfo(AvailableValue);
2071 return true;
2072 }
2073
2074 return false;
2075}
2076
2077/// Return a pair the first field showing the value number of \p Exp and the
2078/// second field showing whether it is a value number newly created.
2079std::pair<uint32_t, bool>
2080GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
2081 uint32_t &e = expressionNumbering[Exp];
2082 bool CreateNewValNum = !e;
2083 if (CreateNewValNum) {
2084 Expressions.push_back(Exp);
2085 if (ExprIdx.size() < nextValueNumber + 1)
2086 ExprIdx.resize(nextValueNumber * 2);
2087 e = nextValueNumber;
2088 ExprIdx[nextValueNumber++] = nextExprNumber++;
2089 }
2090 return {e, CreateNewValNum};
2091}
2092
2093/// Return whether all the values related with the same \p num are
2094/// defined in \p BB.
2095bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
2096 GVNPass &Gvn) {
2097 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2098 while (Vals && Vals->BB == BB)
2099 Vals = Vals->Next;
2100 return !Vals;
2101}
2102
2103/// Wrap phiTranslateImpl to provide caching functionality.
2104uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
2105 const BasicBlock *PhiBlock,
2106 uint32_t Num, GVNPass &Gvn) {
2107 auto FindRes = PhiTranslateTable.find({Num, Pred});
2108 if (FindRes != PhiTranslateTable.end())
2109 return FindRes->second;
2110 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2111 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2112 return NewNum;
2113}
2114
2115// Return true if the value number \p Num and NewNum have equal value.
2116// Return false if the result is unknown.
2117bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
2118 const BasicBlock *Pred,
2119 const BasicBlock *PhiBlock,
2120 GVNPass &Gvn) {
2121 CallInst *Call = nullptr;
2122 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2123 while (Vals) {
2124 Call = dyn_cast<CallInst>(Vals->Val);
2125 if (Call && Call->getParent() == PhiBlock)
2126 break;
2127 Vals = Vals->Next;
2128 }
2129
2130 if (AA->doesNotAccessMemory(Call))
2131 return true;
2132
2133 if (!MD || !AA->onlyReadsMemory(Call))
2134 return false;
2135
2136 MemDepResult local_dep = MD->getDependency(Call);
2137 if (!local_dep.isNonLocal())
2138 return false;
2139
2140 const MemoryDependenceResults::NonLocalDepInfo &deps =
2141 MD->getNonLocalCallDependency(Call);
2142
2143 // Check to see if the Call has no function local clobber.
2144 for (const NonLocalDepEntry &D : deps) {
2145 if (D.getResult().isNonFuncLocal())
2146 return true;
2147 }
2148 return false;
2149}
2150
2151/// Translate value number \p Num using phis, so that it has the values of
2152/// the phis in BB.
2153uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2154 const BasicBlock *PhiBlock,
2155 uint32_t Num, GVNPass &Gvn) {
2156 if (PHINode *PN = NumberingPhi[Num]) {
2157 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2158 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2159 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2160 return TransVal;
2161 }
2162 return Num;
2163 }
2164
2165 // If there is any value related with Num is defined in a BB other than
2166 // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2167 // a backedge. We can do an early exit in that case to save compile time.
2168 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2169 return Num;
2170
2171 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2172 return Num;
2173 Expression Exp = Expressions[ExprIdx[Num]];
2174
2175 for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2176 // For InsertValue and ExtractValue, some varargs are index numbers
2177 // instead of value numbers. Those index numbers should not be
2178 // translated.
2179 if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2180 (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2181 (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2182 continue;
2183 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2184 }
2185
2186 if (Exp.commutative) {
2187 assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2188 if (Exp.varargs[0] > Exp.varargs[1]) {
2189 std::swap(Exp.varargs[0], Exp.varargs[1]);
2190 uint32_t Opcode = Exp.opcode >> 8;
2191 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2192 Exp.opcode = (Opcode << 8) |
2193 CmpInst::getSwappedPredicate(
2194 static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2195 }
2196 }
2197
2198 if (uint32_t NewNum = expressionNumbering[Exp]) {
2199 if (Exp.opcode == Instruction::Call && NewNum != Num)
2200 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2201 return NewNum;
2202 }
2203 return Num;
2204}
2205
2206/// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2207/// again.
2208void GVNPass::ValueTable::eraseTranslateCacheEntry(
2209 uint32_t Num, const BasicBlock &CurrBlock) {
2210 for (const BasicBlock *Pred : predecessors(&CurrBlock))
2211 PhiTranslateTable.erase({Num, Pred});
2212}
2213
2214// In order to find a leader for a given value number at a
2215// specific basic block, we first obtain the list of all Values for that number,
2216// and then scan the list to find one whose block dominates the block in
2217// question. This is fast because dominator tree queries consist of only
2218// a few comparisons of DFS numbers.
2219Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
2220 LeaderTableEntry Vals = LeaderTable[num];
2221 if (!Vals.Val) return nullptr;
2222
2223 Value *Val = nullptr;
2224 if (DT->dominates(Vals.BB, BB)) {
2225 Val = Vals.Val;
2226 if (isa<Constant>(Val)) return Val;
2227 }
2228
2229 LeaderTableEntry* Next = Vals.Next;
2230 while (Next) {
2231 if (DT->dominates(Next->BB, BB)) {
2232 if (isa<Constant>(Next->Val)) return Next->Val;
2233 if (!Val) Val = Next->Val;
2234 }
2235
2236 Next = Next->Next;
2237 }
2238
2239 return Val;
2240}
2241
2242/// There is an edge from 'Src' to 'Dst'. Return
2243/// true if every path from the entry block to 'Dst' passes via this edge. In
2244/// particular 'Dst' must not be reachable via another edge from 'Src'.
2245static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
2246 DominatorTree *DT) {
2247 // While in theory it is interesting to consider the case in which Dst has
2248 // more than one predecessor, because Dst might be part of a loop which is
2249 // only reachable from Src, in practice it is pointless since at the time
2250 // GVN runs all such loops have preheaders, which means that Dst will have
2251 // been changed to have only one predecessor, namely Src.
2252 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2253 assert((!Pred || Pred == E.getStart()) &&
2254 "No edge between these basic blocks!");
2255 return Pred != nullptr;
2256}
2257
2258void GVNPass::assignBlockRPONumber(Function &F) {
2259 BlockRPONumber.clear();
2260 uint32_t NextBlockNumber = 1;
2261 ReversePostOrderTraversal<Function *> RPOT(&F);
2262 for (BasicBlock *BB : RPOT)
2263 BlockRPONumber[BB] = NextBlockNumber++;
2264 InvalidBlockRPONumbers = false;
2265}
2266
2267bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2268 bool Changed = false;
2269 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2270 Value *Operand = Instr->getOperand(OpNum);
2271 auto it = ReplaceOperandsWithMap.find(Operand);
2272 if (it != ReplaceOperandsWithMap.end()) {
2273 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2274 << *it->second << " in instruction " << *Instr << '\n');
2275 Instr->setOperand(OpNum, it->second);
2276 Changed = true;
2277 }
2278 }
2279 return Changed;
2280}
2281
2282/// The given values are known to be equal in every block
2283/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
2284/// 'RHS' everywhere in the scope. Returns whether a change was made.
2285/// If DominatesByEdge is false, then it means that we will propagate the RHS
2286/// value starting from the end of Root.Start.
2287bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
2288 const BasicBlockEdge &Root,
2289 bool DominatesByEdge) {
2290 SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2291 Worklist.push_back(std::make_pair(LHS, RHS));
2292 bool Changed = false;
2293 // For speed, compute a conservative fast approximation to
2294 // DT->dominates(Root, Root.getEnd());
2295 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2296
2297 while (!Worklist.empty()) {
2298 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2299 LHS = Item.first; RHS = Item.second;
2300
2301 if (LHS == RHS)
2302 continue;
2303 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2304
2305 // Don't try to propagate equalities between constants.
2306 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2307 continue;
2308
2309 // Prefer a constant on the right-hand side, or an Argument if no constants.
2310 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2311 std::swap(LHS, RHS);
2312 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2313
2314 // If there is no obvious reason to prefer the left-hand side over the
2315 // right-hand side, ensure the longest lived term is on the right-hand side,
2316 // so the shortest lived term will be replaced by the longest lived.
2317 // This tends to expose more simplifications.
2318 uint32_t LVN = VN.lookupOrAdd(LHS);
2319 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2320 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2321 // Move the 'oldest' value to the right-hand side, using the value number
2322 // as a proxy for age.
2323 uint32_t RVN = VN.lookupOrAdd(RHS);
2324 if (LVN < RVN) {
2325 std::swap(LHS, RHS);
2326 LVN = RVN;
2327 }
2328 }
2329
2330 // If value numbering later sees that an instruction in the scope is equal
2331 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
2332 // the invariant that instructions only occur in the leader table for their
2333 // own value number (this is used by removeFromLeaderTable), do not do this
2334 // if RHS is an instruction (if an instruction in the scope is morphed into
2335 // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2336 // using the leader table is about compiling faster, not optimizing better).
2337 // The leader table only tracks basic blocks, not edges. Only add to if we
2338 // have the simple case where the edge dominates the end.
2339 if (RootDominatesEnd && !isa<Instruction>(RHS))
2340 addToLeaderTable(LVN, RHS, Root.getEnd());
2341
2342 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
2343 // LHS always has at least one use that is not dominated by Root, this will
2344 // never do anything if LHS has only one use.
2345 if (!LHS->hasOneUse()) {
2346 unsigned NumReplacements =
2347 DominatesByEdge
2348 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2349 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2350
2351 Changed |= NumReplacements > 0;
2352 NumGVNEqProp += NumReplacements;
2353 // Cached information for anything that uses LHS will be invalid.
2354 if (MD)
2355 MD->invalidateCachedPointerInfo(LHS);
2356 }
2357
2358 // Now try to deduce additional equalities from this one. For example, if
2359 // the known equality was "(A != B)" == "false" then it follows that A and B
2360 // are equal in the scope. Only boolean equalities with an explicit true or
2361 // false RHS are currently supported.
2362 if (!RHS->getType()->isIntegerTy(1))
2363 // Not a boolean equality - bail out.
2364 continue;
2365 ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2366 if (!CI)
2367 // RHS neither 'true' nor 'false' - bail out.
2368 continue;
2369 // Whether RHS equals 'true'. Otherwise it equals 'false'.
2370 bool isKnownTrue = CI->isMinusOne();
2371 bool isKnownFalse = !isKnownTrue;
2372
2373 // If "A && B" is known true then both A and B are known true. If "A || B"
2374 // is known false then both A and B are known false.
2375 Value *A, *B;
2376 if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(