1 | //===- Loads.cpp - Local load analysis ------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines simple local analyses for load instructions. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/Analysis/Loads.h" |
14 | #include "llvm/Analysis/AliasAnalysis.h" |
15 | #include "llvm/Analysis/AssumeBundleQueries.h" |
16 | #include "llvm/Analysis/LoopInfo.h" |
17 | #include "llvm/Analysis/MemoryBuiltins.h" |
18 | #include "llvm/Analysis/MemoryLocation.h" |
19 | #include "llvm/Analysis/ScalarEvolution.h" |
20 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
21 | #include "llvm/Analysis/ValueTracking.h" |
22 | #include "llvm/IR/DataLayout.h" |
23 | #include "llvm/IR/IntrinsicInst.h" |
24 | #include "llvm/IR/Module.h" |
25 | #include "llvm/IR/Operator.h" |
26 | |
27 | using namespace llvm; |
28 | |
29 | static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment, |
30 | const DataLayout &DL) { |
31 | Align BA = Base->getPointerAlignment(DL); |
32 | return BA >= Alignment && Offset.isAligned(A: BA); |
33 | } |
34 | |
35 | /// Test if V is always a pointer to allocated and suitably aligned memory for |
36 | /// a simple load or store. |
37 | static bool isDereferenceableAndAlignedPointer( |
38 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
39 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
40 | const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited, |
41 | unsigned MaxDepth) { |
42 | assert(V->getType()->isPointerTy() && "Base must be pointer" ); |
43 | |
44 | // Recursion limit. |
45 | if (MaxDepth-- == 0) |
46 | return false; |
47 | |
48 | // Already visited? Bail out, we've likely hit unreachable code. |
49 | if (!Visited.insert(Ptr: V).second) |
50 | return false; |
51 | |
52 | // Note that it is not safe to speculate into a malloc'd region because |
53 | // malloc may return null. |
54 | |
55 | // For GEPs, determine if the indexing lands within the allocated object. |
56 | if (const GEPOperator *GEP = dyn_cast<GEPOperator>(Val: V)) { |
57 | const Value *Base = GEP->getPointerOperand(); |
58 | |
59 | APInt Offset(DL.getIndexTypeSizeInBits(Ty: GEP->getType()), 0); |
60 | if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() || |
61 | !Offset.urem(RHS: APInt(Offset.getBitWidth(), Alignment.value())) |
62 | .isMinValue()) |
63 | return false; |
64 | |
65 | // If the base pointer is dereferenceable for Offset+Size bytes, then the |
66 | // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base |
67 | // pointer is aligned to Align bytes, and the Offset is divisible by Align |
68 | // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also |
69 | // aligned to Align bytes. |
70 | |
71 | // Offset and Size may have different bit widths if we have visited an |
72 | // addrspacecast, so we can't do arithmetic directly on the APInt values. |
73 | return isDereferenceableAndAlignedPointer( |
74 | V: Base, Alignment, Size: Offset + Size.sextOrTrunc(width: Offset.getBitWidth()), DL, |
75 | CtxI, AC, DT, TLI, Visited, MaxDepth); |
76 | } |
77 | |
78 | // bitcast instructions are no-ops as far as dereferenceability is concerned. |
79 | if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(Val: V)) { |
80 | if (BC->getSrcTy()->isPointerTy()) |
81 | return isDereferenceableAndAlignedPointer( |
82 | V: BC->getOperand(i_nocapture: 0), Alignment, Size, DL, CtxI, AC, DT, TLI, |
83 | Visited, MaxDepth); |
84 | } |
85 | |
86 | // Recurse into both hands of select. |
87 | if (const SelectInst *Sel = dyn_cast<SelectInst>(Val: V)) { |
88 | return isDereferenceableAndAlignedPointer(V: Sel->getTrueValue(), Alignment, |
89 | Size, DL, CtxI, AC, DT, TLI, |
90 | Visited, MaxDepth) && |
91 | isDereferenceableAndAlignedPointer(V: Sel->getFalseValue(), Alignment, |
92 | Size, DL, CtxI, AC, DT, TLI, |
93 | Visited, MaxDepth); |
94 | } |
95 | |
96 | bool CheckForNonNull, CheckForFreed; |
97 | APInt KnownDerefBytes(Size.getBitWidth(), |
98 | V->getPointerDereferenceableBytes(DL, CanBeNull&: CheckForNonNull, |
99 | CanBeFreed&: CheckForFreed)); |
100 | if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) && |
101 | !CheckForFreed) |
102 | if (!CheckForNonNull || |
103 | isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI))) { |
104 | // As we recursed through GEPs to get here, we've incrementally checked |
105 | // that each step advanced by a multiple of the alignment. If our base is |
106 | // properly aligned, then the original offset accessed must also be. |
107 | APInt Offset(DL.getTypeStoreSizeInBits(Ty: V->getType()), 0); |
108 | return isAligned(Base: V, Offset, Alignment, DL); |
109 | } |
110 | |
111 | /// TODO refactor this function to be able to search independently for |
112 | /// Dereferencability and Alignment requirements. |
113 | |
114 | |
115 | if (const auto *Call = dyn_cast<CallBase>(Val: V)) { |
116 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: true)) |
117 | return isDereferenceableAndAlignedPointer(V: RP, Alignment, Size, DL, CtxI, |
118 | AC, DT, TLI, Visited, MaxDepth); |
119 | |
120 | // If we have a call we can't recurse through, check to see if this is an |
121 | // allocation function for which we can establish an minimum object size. |
122 | // Such a minimum object size is analogous to a deref_or_null attribute in |
123 | // that we still need to prove the result non-null at point of use. |
124 | // NOTE: We can only use the object size as a base fact as we a) need to |
125 | // prove alignment too, and b) don't want the compile time impact of a |
126 | // separate recursive walk. |
127 | ObjectSizeOpts Opts; |
128 | // TODO: It may be okay to round to align, but that would imply that |
129 | // accessing slightly out of bounds was legal, and we're currently |
130 | // inconsistent about that. For the moment, be conservative. |
131 | Opts.RoundToAlign = false; |
132 | Opts.NullIsUnknownSize = true; |
133 | uint64_t ObjSize; |
134 | if (getObjectSize(Ptr: V, Size&: ObjSize, DL, TLI, Opts)) { |
135 | APInt KnownDerefBytes(Size.getBitWidth(), ObjSize); |
136 | if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) && |
137 | isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI)) && |
138 | !V->canBeFreed()) { |
139 | // As we recursed through GEPs to get here, we've incrementally |
140 | // checked that each step advanced by a multiple of the alignment. If |
141 | // our base is properly aligned, then the original offset accessed |
142 | // must also be. |
143 | APInt Offset(DL.getTypeStoreSizeInBits(Ty: V->getType()), 0); |
144 | return isAligned(Base: V, Offset, Alignment, DL); |
145 | } |
146 | } |
147 | } |
148 | |
149 | // For gc.relocate, look through relocations |
150 | if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(Val: V)) |
151 | return isDereferenceableAndAlignedPointer(V: RelocateInst->getDerivedPtr(), |
152 | Alignment, Size, DL, CtxI, AC, DT, |
153 | TLI, Visited, MaxDepth); |
154 | |
155 | if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(Val: V)) |
156 | return isDereferenceableAndAlignedPointer(V: ASC->getOperand(i_nocapture: 0), Alignment, |
157 | Size, DL, CtxI, AC, DT, TLI, |
158 | Visited, MaxDepth); |
159 | |
160 | if (CtxI) { |
161 | /// Look through assumes to see if both dereferencability and alignment can |
162 | /// be provent by an assume |
163 | RetainedKnowledge AlignRK; |
164 | RetainedKnowledge DerefRK; |
165 | if (getKnowledgeForValue( |
166 | V, {Attribute::Dereferenceable, Attribute::Alignment}, AC, |
167 | [&](RetainedKnowledge RK, Instruction *Assume, auto) { |
168 | if (!isValidAssumeForContext(Assume, CtxI)) |
169 | return false; |
170 | if (RK.AttrKind == Attribute::Alignment) |
171 | AlignRK = std::max(AlignRK, RK); |
172 | if (RK.AttrKind == Attribute::Dereferenceable) |
173 | DerefRK = std::max(DerefRK, RK); |
174 | if (AlignRK && DerefRK && AlignRK.ArgValue >= Alignment.value() && |
175 | DerefRK.ArgValue >= Size.getZExtValue()) |
176 | return true; // We have found what we needed so we stop looking |
177 | return false; // Other assumes may have better information. so |
178 | // keep looking |
179 | })) |
180 | return true; |
181 | } |
182 | |
183 | // If we don't know, assume the worst. |
184 | return false; |
185 | } |
186 | |
187 | bool llvm::isDereferenceableAndAlignedPointer( |
188 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
189 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
190 | const TargetLibraryInfo *TLI) { |
191 | // Note: At the moment, Size can be zero. This ends up being interpreted as |
192 | // a query of whether [Base, V] is dereferenceable and V is aligned (since |
193 | // that's what the implementation happened to do). It's unclear if this is |
194 | // the desired semantic, but at least SelectionDAG does exercise this case. |
195 | |
196 | SmallPtrSet<const Value *, 32> Visited; |
197 | return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, |
198 | DT, TLI, Visited, MaxDepth: 16); |
199 | } |
200 | |
201 | bool llvm::isDereferenceableAndAlignedPointer( |
202 | const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, |
203 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
204 | const TargetLibraryInfo *TLI) { |
205 | // For unsized types or scalable vectors we don't know exactly how many bytes |
206 | // are dereferenced, so bail out. |
207 | if (!Ty->isSized() || Ty->isScalableTy()) |
208 | return false; |
209 | |
210 | // When dereferenceability information is provided by a dereferenceable |
211 | // attribute, we know exactly how many bytes are dereferenceable. If we can |
212 | // determine the exact offset to the attributed variable, we can use that |
213 | // information here. |
214 | |
215 | APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()), |
216 | DL.getTypeStoreSize(Ty)); |
217 | return isDereferenceableAndAlignedPointer(V, Alignment, Size: AccessSize, DL, CtxI, |
218 | AC, DT, TLI); |
219 | } |
220 | |
221 | bool llvm::isDereferenceablePointer(const Value *V, Type *Ty, |
222 | const DataLayout &DL, |
223 | const Instruction *CtxI, |
224 | AssumptionCache *AC, |
225 | const DominatorTree *DT, |
226 | const TargetLibraryInfo *TLI) { |
227 | return isDereferenceableAndAlignedPointer(V, Ty, Alignment: Align(1), DL, CtxI, AC, DT, |
228 | TLI); |
229 | } |
230 | |
231 | /// Test if A and B will obviously have the same value. |
232 | /// |
233 | /// This includes recognizing that %t0 and %t1 will have the same |
234 | /// value in code like this: |
235 | /// \code |
236 | /// %t0 = getelementptr \@a, 0, 3 |
237 | /// store i32 0, i32* %t0 |
238 | /// %t1 = getelementptr \@a, 0, 3 |
239 | /// %t2 = load i32* %t1 |
240 | /// \endcode |
241 | /// |
242 | static bool AreEquivalentAddressValues(const Value *A, const Value *B) { |
243 | // Test if the values are trivially equivalent. |
244 | if (A == B) |
245 | return true; |
246 | |
247 | // Test if the values come from identical arithmetic instructions. |
248 | // Use isIdenticalToWhenDefined instead of isIdenticalTo because |
249 | // this function is only used when one address use dominates the |
250 | // other, which means that they'll always either have the same |
251 | // value or one of them will have an undefined value. |
252 | if (isa<BinaryOperator>(Val: A) || isa<CastInst>(Val: A) || isa<PHINode>(Val: A) || |
253 | isa<GetElementPtrInst>(Val: A)) |
254 | if (const Instruction *BI = dyn_cast<Instruction>(Val: B)) |
255 | if (cast<Instruction>(Val: A)->isIdenticalToWhenDefined(I: BI)) |
256 | return true; |
257 | |
258 | // Otherwise they may not be equivalent. |
259 | return false; |
260 | } |
261 | |
262 | bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, |
263 | ScalarEvolution &SE, |
264 | DominatorTree &DT, |
265 | AssumptionCache *AC) { |
266 | auto &DL = LI->getModule()->getDataLayout(); |
267 | Value *Ptr = LI->getPointerOperand(); |
268 | |
269 | APInt EltSize(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()), |
270 | DL.getTypeStoreSize(Ty: LI->getType()).getFixedValue()); |
271 | const Align Alignment = LI->getAlign(); |
272 | |
273 | Instruction * = L->getHeader()->getFirstNonPHI(); |
274 | |
275 | // If given a uniform (i.e. non-varying) address, see if we can prove the |
276 | // access is safe within the loop w/o needing predication. |
277 | if (L->isLoopInvariant(V: Ptr)) |
278 | return isDereferenceableAndAlignedPointer(V: Ptr, Alignment, Size: EltSize, DL, |
279 | CtxI: HeaderFirstNonPHI, AC, DT: &DT); |
280 | |
281 | // Otherwise, check to see if we have a repeating access pattern where we can |
282 | // prove that all accesses are well aligned and dereferenceable. |
283 | auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: SE.getSCEV(V: Ptr)); |
284 | if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine()) |
285 | return false; |
286 | auto* Step = dyn_cast<SCEVConstant>(Val: AddRec->getStepRecurrence(SE)); |
287 | if (!Step) |
288 | return false; |
289 | |
290 | auto TC = SE.getSmallConstantMaxTripCount(L); |
291 | if (!TC) |
292 | return false; |
293 | |
294 | // TODO: Handle overlapping accesses. |
295 | // We should be computing AccessSize as (TC - 1) * Step + EltSize. |
296 | if (EltSize.sgt(RHS: Step->getAPInt())) |
297 | return false; |
298 | |
299 | // Compute the total access size for access patterns with unit stride and |
300 | // patterns with gaps. For patterns with unit stride, Step and EltSize are the |
301 | // same. |
302 | // For patterns with gaps (i.e. non unit stride), we are |
303 | // accessing EltSize bytes at every Step. |
304 | APInt AccessSize = TC * Step->getAPInt(); |
305 | |
306 | assert(SE.isLoopInvariant(AddRec->getStart(), L) && |
307 | "implied by addrec definition" ); |
308 | Value *Base = nullptr; |
309 | if (auto *StartS = dyn_cast<SCEVUnknown>(Val: AddRec->getStart())) { |
310 | Base = StartS->getValue(); |
311 | } else if (auto *StartS = dyn_cast<SCEVAddExpr>(Val: AddRec->getStart())) { |
312 | // Handle (NewBase + offset) as start value. |
313 | const auto *Offset = dyn_cast<SCEVConstant>(Val: StartS->getOperand(i: 0)); |
314 | const auto *NewBase = dyn_cast<SCEVUnknown>(Val: StartS->getOperand(i: 1)); |
315 | if (StartS->getNumOperands() == 2 && Offset && NewBase) { |
316 | // For the moment, restrict ourselves to the case where the offset is a |
317 | // multiple of the requested alignment and the base is aligned. |
318 | // TODO: generalize if a case found which warrants |
319 | if (Offset->getAPInt().urem(RHS: Alignment.value()) != 0) |
320 | return false; |
321 | Base = NewBase->getValue(); |
322 | bool Overflow = false; |
323 | AccessSize = AccessSize.uadd_ov(RHS: Offset->getAPInt(), Overflow); |
324 | if (Overflow) |
325 | return false; |
326 | } |
327 | } |
328 | |
329 | if (!Base) |
330 | return false; |
331 | |
332 | // For the moment, restrict ourselves to the case where the access size is a |
333 | // multiple of the requested alignment and the base is aligned. |
334 | // TODO: generalize if a case found which warrants |
335 | if (EltSize.urem(RHS: Alignment.value()) != 0) |
336 | return false; |
337 | return isDereferenceableAndAlignedPointer(V: Base, Alignment, Size: AccessSize, DL, |
338 | CtxI: HeaderFirstNonPHI, AC, DT: &DT); |
339 | } |
340 | |
341 | /// Check if executing a load of this pointer value cannot trap. |
342 | /// |
343 | /// If DT and ScanFrom are specified this method performs context-sensitive |
344 | /// analysis and returns true if it is safe to load immediately before ScanFrom. |
345 | /// |
346 | /// If it is not obviously safe to load from the specified pointer, we do |
347 | /// a quick local scan of the basic block containing \c ScanFrom, to determine |
348 | /// if the address is already accessed. |
349 | /// |
350 | /// This uses the pointee type to determine how many bytes need to be safe to |
351 | /// load from the pointer. |
352 | bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, |
353 | const DataLayout &DL, |
354 | Instruction *ScanFrom, |
355 | AssumptionCache *AC, |
356 | const DominatorTree *DT, |
357 | const TargetLibraryInfo *TLI) { |
358 | // If DT is not specified we can't make context-sensitive query |
359 | const Instruction* CtxI = DT ? ScanFrom : nullptr; |
360 | if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT, |
361 | TLI)) |
362 | return true; |
363 | |
364 | if (!ScanFrom) |
365 | return false; |
366 | |
367 | if (Size.getBitWidth() > 64) |
368 | return false; |
369 | const TypeSize LoadSize = TypeSize::getFixed(ExactSize: Size.getZExtValue()); |
370 | |
371 | // Otherwise, be a little bit aggressive by scanning the local block where we |
372 | // want to check to see if the pointer is already being loaded or stored |
373 | // from/to. If so, the previous load or store would have already trapped, |
374 | // so there is no harm doing an extra load (also, CSE will later eliminate |
375 | // the load entirely). |
376 | BasicBlock::iterator BBI = ScanFrom->getIterator(), |
377 | E = ScanFrom->getParent()->begin(); |
378 | |
379 | // We can at least always strip pointer casts even though we can't use the |
380 | // base here. |
381 | V = V->stripPointerCasts(); |
382 | |
383 | while (BBI != E) { |
384 | --BBI; |
385 | |
386 | // If we see a free or a call which may write to memory (i.e. which might do |
387 | // a free) the pointer could be marked invalid. |
388 | if (isa<CallInst>(Val: BBI) && BBI->mayWriteToMemory() && |
389 | !isa<LifetimeIntrinsic>(Val: BBI) && !isa<DbgInfoIntrinsic>(Val: BBI)) |
390 | return false; |
391 | |
392 | Value *AccessedPtr; |
393 | Type *AccessedTy; |
394 | Align AccessedAlign; |
395 | if (LoadInst *LI = dyn_cast<LoadInst>(Val&: BBI)) { |
396 | // Ignore volatile loads. The execution of a volatile load cannot |
397 | // be used to prove an address is backed by regular memory; it can, |
398 | // for example, point to an MMIO register. |
399 | if (LI->isVolatile()) |
400 | continue; |
401 | AccessedPtr = LI->getPointerOperand(); |
402 | AccessedTy = LI->getType(); |
403 | AccessedAlign = LI->getAlign(); |
404 | } else if (StoreInst *SI = dyn_cast<StoreInst>(Val&: BBI)) { |
405 | // Ignore volatile stores (see comment for loads). |
406 | if (SI->isVolatile()) |
407 | continue; |
408 | AccessedPtr = SI->getPointerOperand(); |
409 | AccessedTy = SI->getValueOperand()->getType(); |
410 | AccessedAlign = SI->getAlign(); |
411 | } else |
412 | continue; |
413 | |
414 | if (AccessedAlign < Alignment) |
415 | continue; |
416 | |
417 | // Handle trivial cases. |
418 | if (AccessedPtr == V && |
419 | TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy))) |
420 | return true; |
421 | |
422 | if (AreEquivalentAddressValues(A: AccessedPtr->stripPointerCasts(), B: V) && |
423 | TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy))) |
424 | return true; |
425 | } |
426 | return false; |
427 | } |
428 | |
429 | bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment, |
430 | const DataLayout &DL, |
431 | Instruction *ScanFrom, |
432 | AssumptionCache *AC, |
433 | const DominatorTree *DT, |
434 | const TargetLibraryInfo *TLI) { |
435 | TypeSize TySize = DL.getTypeStoreSize(Ty); |
436 | if (TySize.isScalable()) |
437 | return false; |
438 | APInt Size(DL.getIndexTypeSizeInBits(Ty: V->getType()), TySize.getFixedValue()); |
439 | return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT, |
440 | TLI); |
441 | } |
442 | |
443 | /// DefMaxInstsToScan - the default number of maximum instructions |
444 | /// to scan in the block, used by FindAvailableLoadedValue(). |
445 | /// FindAvailableLoadedValue() was introduced in r60148, to improve jump |
446 | /// threading in part by eliminating partially redundant loads. |
447 | /// At that point, the value of MaxInstsToScan was already set to '6' |
448 | /// without documented explanation. |
449 | cl::opt<unsigned> |
450 | llvm::DefMaxInstsToScan("available-load-scan-limit" , cl::init(Val: 6), cl::Hidden, |
451 | cl::desc("Use this to specify the default maximum number of instructions " |
452 | "to scan backward from a given instruction, when searching for " |
453 | "available loaded value" )); |
454 | |
455 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, |
456 | BasicBlock::iterator &ScanFrom, |
457 | unsigned MaxInstsToScan, |
458 | BatchAAResults *AA, bool *IsLoad, |
459 | unsigned *NumScanedInst) { |
460 | // Don't CSE load that is volatile or anything stronger than unordered. |
461 | if (!Load->isUnordered()) |
462 | return nullptr; |
463 | |
464 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
465 | return findAvailablePtrLoadStore(Loc, AccessTy: Load->getType(), AtLeastAtomic: Load->isAtomic(), |
466 | ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoadCSE: IsLoad, |
467 | NumScanedInst); |
468 | } |
469 | |
470 | // Check if the load and the store have the same base, constant offsets and |
471 | // non-overlapping access ranges. |
472 | static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, |
473 | Type *LoadTy, |
474 | const Value *StorePtr, |
475 | Type *StoreTy, |
476 | const DataLayout &DL) { |
477 | APInt LoadOffset(DL.getIndexTypeSizeInBits(Ty: LoadPtr->getType()), 0); |
478 | APInt StoreOffset(DL.getIndexTypeSizeInBits(Ty: StorePtr->getType()), 0); |
479 | const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets( |
480 | DL, Offset&: LoadOffset, /* AllowNonInbounds */ false); |
481 | const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets( |
482 | DL, Offset&: StoreOffset, /* AllowNonInbounds */ false); |
483 | if (LoadBase != StoreBase) |
484 | return false; |
485 | auto LoadAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: LoadTy)); |
486 | auto StoreAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: StoreTy)); |
487 | ConstantRange LoadRange(LoadOffset, |
488 | LoadOffset + LoadAccessSize.toRaw()); |
489 | ConstantRange StoreRange(StoreOffset, |
490 | StoreOffset + StoreAccessSize.toRaw()); |
491 | return LoadRange.intersectWith(CR: StoreRange).isEmptySet(); |
492 | } |
493 | |
494 | static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr, |
495 | Type *AccessTy, bool AtLeastAtomic, |
496 | const DataLayout &DL, bool *IsLoadCSE) { |
497 | // If this is a load of Ptr, the loaded value is available. |
498 | // (This is true even if the load is volatile or atomic, although |
499 | // those cases are unlikely.) |
500 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) { |
501 | // We can value forward from an atomic to a non-atomic, but not the |
502 | // other way around. |
503 | if (LI->isAtomic() < AtLeastAtomic) |
504 | return nullptr; |
505 | |
506 | Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts(); |
507 | if (!AreEquivalentAddressValues(A: LoadPtr, B: Ptr)) |
508 | return nullptr; |
509 | |
510 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: LI->getType(), DestTy: AccessTy, DL)) { |
511 | if (IsLoadCSE) |
512 | *IsLoadCSE = true; |
513 | return LI; |
514 | } |
515 | } |
516 | |
517 | // If this is a store through Ptr, the value is available! |
518 | // (This is true even if the store is volatile or atomic, although |
519 | // those cases are unlikely.) |
520 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
521 | // We can value forward from an atomic to a non-atomic, but not the |
522 | // other way around. |
523 | if (SI->isAtomic() < AtLeastAtomic) |
524 | return nullptr; |
525 | |
526 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
527 | if (!AreEquivalentAddressValues(A: StorePtr, B: Ptr)) |
528 | return nullptr; |
529 | |
530 | if (IsLoadCSE) |
531 | *IsLoadCSE = false; |
532 | |
533 | Value *Val = SI->getValueOperand(); |
534 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: Val->getType(), DestTy: AccessTy, DL)) |
535 | return Val; |
536 | |
537 | TypeSize StoreSize = DL.getTypeSizeInBits(Ty: Val->getType()); |
538 | TypeSize LoadSize = DL.getTypeSizeInBits(Ty: AccessTy); |
539 | if (TypeSize::isKnownLE(LHS: LoadSize, RHS: StoreSize)) |
540 | if (auto *C = dyn_cast<Constant>(Val)) |
541 | return ConstantFoldLoadFromConst(C, Ty: AccessTy, DL); |
542 | } |
543 | |
544 | if (auto *MSI = dyn_cast<MemSetInst>(Val: Inst)) { |
545 | // Don't forward from (non-atomic) memset to atomic load. |
546 | if (AtLeastAtomic) |
547 | return nullptr; |
548 | |
549 | // Only handle constant memsets. |
550 | auto *Val = dyn_cast<ConstantInt>(Val: MSI->getValue()); |
551 | auto *Len = dyn_cast<ConstantInt>(Val: MSI->getLength()); |
552 | if (!Val || !Len) |
553 | return nullptr; |
554 | |
555 | // TODO: Handle offsets. |
556 | Value *Dst = MSI->getDest(); |
557 | if (!AreEquivalentAddressValues(A: Dst, B: Ptr)) |
558 | return nullptr; |
559 | |
560 | if (IsLoadCSE) |
561 | *IsLoadCSE = false; |
562 | |
563 | TypeSize LoadTypeSize = DL.getTypeSizeInBits(Ty: AccessTy); |
564 | if (LoadTypeSize.isScalable()) |
565 | return nullptr; |
566 | |
567 | // Make sure the read bytes are contained in the memset. |
568 | uint64_t LoadSize = LoadTypeSize.getFixedValue(); |
569 | if ((Len->getValue() * 8).ult(RHS: LoadSize)) |
570 | return nullptr; |
571 | |
572 | APInt Splat = LoadSize >= 8 ? APInt::getSplat(NewLen: LoadSize, V: Val->getValue()) |
573 | : Val->getValue().trunc(width: LoadSize); |
574 | ConstantInt *SplatC = ConstantInt::get(Context&: MSI->getContext(), V: Splat); |
575 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: SplatC->getType(), DestTy: AccessTy, DL)) |
576 | return SplatC; |
577 | |
578 | return nullptr; |
579 | } |
580 | |
581 | return nullptr; |
582 | } |
583 | |
584 | Value *llvm::findAvailablePtrLoadStore( |
585 | const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, |
586 | BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, |
587 | BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) { |
588 | if (MaxInstsToScan == 0) |
589 | MaxInstsToScan = ~0U; |
590 | |
591 | const DataLayout &DL = ScanBB->getModule()->getDataLayout(); |
592 | const Value *StrippedPtr = Loc.Ptr->stripPointerCasts(); |
593 | |
594 | while (ScanFrom != ScanBB->begin()) { |
595 | // We must ignore debug info directives when counting (otherwise they |
596 | // would affect codegen). |
597 | Instruction *Inst = &*--ScanFrom; |
598 | if (Inst->isDebugOrPseudoInst()) |
599 | continue; |
600 | |
601 | // Restore ScanFrom to expected value in case next test succeeds |
602 | ScanFrom++; |
603 | |
604 | if (NumScanedInst) |
605 | ++(*NumScanedInst); |
606 | |
607 | // Don't scan huge blocks. |
608 | if (MaxInstsToScan-- == 0) |
609 | return nullptr; |
610 | |
611 | --ScanFrom; |
612 | |
613 | if (Value *Available = getAvailableLoadStore(Inst, Ptr: StrippedPtr, AccessTy, |
614 | AtLeastAtomic, DL, IsLoadCSE)) |
615 | return Available; |
616 | |
617 | // Try to get the store size for the type. |
618 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
619 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
620 | |
621 | // If both StrippedPtr and StorePtr reach all the way to an alloca or |
622 | // global and they are different, ignore the store. This is a trivial form |
623 | // of alias analysis that is important for reg2mem'd code. |
624 | if ((isa<AllocaInst>(Val: StrippedPtr) || isa<GlobalVariable>(Val: StrippedPtr)) && |
625 | (isa<AllocaInst>(Val: StorePtr) || isa<GlobalVariable>(Val: StorePtr)) && |
626 | StrippedPtr != StorePtr) |
627 | continue; |
628 | |
629 | if (!AA) { |
630 | // When AA isn't available, but if the load and the store have the same |
631 | // base, constant offsets and non-overlapping access ranges, ignore the |
632 | // store. This is a simple form of alias analysis that is used by the |
633 | // inliner. FIXME: use BasicAA if possible. |
634 | if (areNonOverlapSameBaseLoadAndStore( |
635 | LoadPtr: Loc.Ptr, LoadTy: AccessTy, StorePtr: SI->getPointerOperand(), |
636 | StoreTy: SI->getValueOperand()->getType(), DL)) |
637 | continue; |
638 | } else { |
639 | // If we have alias analysis and it says the store won't modify the |
640 | // loaded value, ignore the store. |
641 | if (!isModSet(MRI: AA->getModRefInfo(I: SI, OptLoc: Loc))) |
642 | continue; |
643 | } |
644 | |
645 | // Otherwise the store that may or may not alias the pointer, bail out. |
646 | ++ScanFrom; |
647 | return nullptr; |
648 | } |
649 | |
650 | // If this is some other instruction that may clobber Ptr, bail out. |
651 | if (Inst->mayWriteToMemory()) { |
652 | // If alias analysis claims that it really won't modify the load, |
653 | // ignore it. |
654 | if (AA && !isModSet(MRI: AA->getModRefInfo(I: Inst, OptLoc: Loc))) |
655 | continue; |
656 | |
657 | // May modify the pointer, bail out. |
658 | ++ScanFrom; |
659 | return nullptr; |
660 | } |
661 | } |
662 | |
663 | // Got to the start of the block, we didn't find it, but are done for this |
664 | // block. |
665 | return nullptr; |
666 | } |
667 | |
668 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA, |
669 | bool *IsLoadCSE, |
670 | unsigned MaxInstsToScan) { |
671 | const DataLayout &DL = Load->getModule()->getDataLayout(); |
672 | Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts(); |
673 | BasicBlock *ScanBB = Load->getParent(); |
674 | Type *AccessTy = Load->getType(); |
675 | bool AtLeastAtomic = Load->isAtomic(); |
676 | |
677 | if (!Load->isUnordered()) |
678 | return nullptr; |
679 | |
680 | // Try to find an available value first, and delay expensive alias analysis |
681 | // queries until later. |
682 | Value *Available = nullptr; |
683 | SmallVector<Instruction *> MustNotAliasInsts; |
684 | for (Instruction &Inst : make_range(x: ++Load->getReverseIterator(), |
685 | y: ScanBB->rend())) { |
686 | if (Inst.isDebugOrPseudoInst()) |
687 | continue; |
688 | |
689 | if (MaxInstsToScan-- == 0) |
690 | return nullptr; |
691 | |
692 | Available = getAvailableLoadStore(Inst: &Inst, Ptr: StrippedPtr, AccessTy, |
693 | AtLeastAtomic, DL, IsLoadCSE); |
694 | if (Available) |
695 | break; |
696 | |
697 | if (Inst.mayWriteToMemory()) |
698 | MustNotAliasInsts.push_back(Elt: &Inst); |
699 | } |
700 | |
701 | // If we found an available value, ensure that the instructions in between |
702 | // did not modify the memory location. |
703 | if (Available) { |
704 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
705 | for (Instruction *Inst : MustNotAliasInsts) |
706 | if (isModSet(MRI: AA.getModRefInfo(I: Inst, OptLoc: Loc))) |
707 | return nullptr; |
708 | } |
709 | |
710 | return Available; |
711 | } |
712 | |
713 | // Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only |
714 | // feeds into them. |
715 | static bool isPointerUseReplacable(const Use &U) { |
716 | unsigned Limit = 40; |
717 | SmallVector<const User *> Worklist({U.getUser()}); |
718 | SmallPtrSet<const User *, 8> Visited; |
719 | |
720 | while (!Worklist.empty() && --Limit) { |
721 | auto *User = Worklist.pop_back_val(); |
722 | if (!Visited.insert(Ptr: User).second) |
723 | continue; |
724 | if (isa<ICmpInst, PtrToIntInst>(Val: User)) |
725 | continue; |
726 | if (isa<PHINode, SelectInst>(Val: User)) |
727 | Worklist.append(in_start: User->user_begin(), in_end: User->user_end()); |
728 | else |
729 | return false; |
730 | } |
731 | |
732 | return Limit != 0; |
733 | } |
734 | |
735 | // Returns true if `To` is a null pointer, constant dereferenceable pointer or |
736 | // both pointers have the same underlying objects. |
737 | static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, |
738 | const DataLayout &DL) { |
739 | // This is not strictly correct, but we do it for now to retain important |
740 | // optimizations. |
741 | if (isa<ConstantPointerNull>(Val: To)) |
742 | return true; |
743 | if (isa<Constant>(Val: To) && |
744 | isDereferenceablePointer(V: To, Ty: Type::getInt8Ty(C&: To->getContext()), DL)) |
745 | return true; |
746 | if (getUnderlyingObject(V: From) == getUnderlyingObject(V: To)) |
747 | return true; |
748 | return false; |
749 | } |
750 | |
751 | bool llvm::canReplacePointersInUseIfEqual(const Use &U, const Value *To, |
752 | const DataLayout &DL) { |
753 | assert(U->getType() == To->getType() && "values must have matching types" ); |
754 | // Not a pointer, just return true. |
755 | if (!To->getType()->isPointerTy()) |
756 | return true; |
757 | |
758 | if (isPointerAlwaysReplaceable(From: &*U, To, DL)) |
759 | return true; |
760 | return isPointerUseReplacable(U); |
761 | } |
762 | |
763 | bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To, |
764 | const DataLayout &DL) { |
765 | assert(From->getType() == To->getType() && "values must have matching types" ); |
766 | // Not a pointer, just return true. |
767 | if (!From->getType()->isPointerTy()) |
768 | return true; |
769 | |
770 | return isPointerAlwaysReplaceable(From, To, DL); |
771 | } |
772 | |