1//===- LoadStoreOpt.cpp ----------- Generic memory optimizations -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the LoadStoreOpt optimization pass.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/LoadStoreOpt.h"
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/ADT/SmallPtrSet.h"
15#include "llvm/ADT/Statistic.h"
16#include "llvm/Analysis/AliasAnalysis.h"
17#include "llvm/Analysis/MemoryLocation.h"
18#include "llvm/Analysis/OptimizationRemarkEmitter.h"
19#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
20#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
21#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
22#include "llvm/CodeGen/GlobalISel/Utils.h"
23#include "llvm/CodeGen/LowLevelTypeUtils.h"
24#include "llvm/CodeGen/MachineBasicBlock.h"
25#include "llvm/CodeGen/MachineFrameInfo.h"
26#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Register.h"
31#include "llvm/CodeGen/TargetLowering.h"
32#include "llvm/CodeGen/TargetOpcodes.h"
33#include "llvm/IR/DebugInfoMetadata.h"
34#include "llvm/InitializePasses.h"
35#include "llvm/Support/AtomicOrdering.h"
36#include "llvm/Support/Casting.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/MathExtras.h"
40#include <algorithm>
41
42#define DEBUG_TYPE "loadstore-opt"
43
44using namespace llvm;
45using namespace ore;
46using namespace MIPatternMatch;
47
48STATISTIC(NumStoresMerged, "Number of stores merged");
49
50const unsigned MaxStoreSizeToForm = 128;
51
52char LoadStoreOpt::ID = 0;
53INITIALIZE_PASS_BEGIN(LoadStoreOpt, DEBUG_TYPE, "Generic memory optimizations",
54 false, false)
55INITIALIZE_PASS_END(LoadStoreOpt, DEBUG_TYPE, "Generic memory optimizations",
56 false, false)
57
58LoadStoreOpt::LoadStoreOpt(std::function<bool(const MachineFunction &)> F)
59 : MachineFunctionPass(ID), DoNotRunPass(F) {}
60
61LoadStoreOpt::LoadStoreOpt()
62 : LoadStoreOpt([](const MachineFunction &) { return false; }) {}
63
64void LoadStoreOpt::init(MachineFunction &MF) {
65 this->MF = &MF;
66 MRI = &MF.getRegInfo();
67 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
68 TLI = MF.getSubtarget().getTargetLowering();
69 LI = MF.getSubtarget().getLegalizerInfo();
70 Builder.setMF(MF);
71 IsPreLegalizer = !MF.getProperties().hasProperty(
72 P: MachineFunctionProperties::Property::Legalized);
73 InstsToErase.clear();
74}
75
76void LoadStoreOpt::getAnalysisUsage(AnalysisUsage &AU) const {
77 AU.addRequired<AAResultsWrapperPass>();
78 AU.setPreservesAll();
79 getSelectionDAGFallbackAnalysisUsage(AU);
80 MachineFunctionPass::getAnalysisUsage(AU);
81}
82
83BaseIndexOffset GISelAddressing::getPointerInfo(Register Ptr,
84 MachineRegisterInfo &MRI) {
85 BaseIndexOffset Info;
86 Register PtrAddRHS;
87 if (!mi_match(R: Ptr, MRI, P: m_GPtrAdd(L: m_Reg(R&: Info.BaseReg), R: m_Reg(R&: PtrAddRHS)))) {
88 Info.BaseReg = Ptr;
89 Info.IndexReg = Register();
90 Info.IsIndexSignExt = false;
91 return Info;
92 }
93
94 auto RHSCst = getIConstantVRegValWithLookThrough(VReg: PtrAddRHS, MRI);
95 if (RHSCst)
96 Info.Offset = RHSCst->Value.getSExtValue();
97
98 // Just recognize a simple case for now. In future we'll need to match
99 // indexing patterns for base + index + constant.
100 Info.IndexReg = PtrAddRHS;
101 Info.IsIndexSignExt = false;
102 return Info;
103}
104
105bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
106 const MachineInstr &MI2,
107 bool &IsAlias,
108 MachineRegisterInfo &MRI) {
109 auto *LdSt1 = dyn_cast<GLoadStore>(Val: &MI1);
110 auto *LdSt2 = dyn_cast<GLoadStore>(Val: &MI2);
111 if (!LdSt1 || !LdSt2)
112 return false;
113
114 BaseIndexOffset BasePtr0 = getPointerInfo(Ptr: LdSt1->getPointerReg(), MRI);
115 BaseIndexOffset BasePtr1 = getPointerInfo(Ptr: LdSt2->getPointerReg(), MRI);
116
117 if (!BasePtr0.BaseReg.isValid() || !BasePtr1.BaseReg.isValid())
118 return false;
119
120 LocationSize Size1 = LdSt1->getMemSize();
121 LocationSize Size2 = LdSt2->getMemSize();
122
123 int64_t PtrDiff;
124 if (BasePtr0.BaseReg == BasePtr1.BaseReg) {
125 PtrDiff = BasePtr1.Offset - BasePtr0.Offset;
126 // If the size of memory access is unknown, do not use it to do analysis.
127 // One example of unknown size memory access is to load/store scalable
128 // vector objects on the stack.
129 // BasePtr1 is PtrDiff away from BasePtr0. They alias if none of the
130 // following situations arise:
131 if (PtrDiff >= 0 && Size1.hasValue() && !Size1.isScalable()) {
132 // [----BasePtr0----]
133 // [---BasePtr1--]
134 // ========PtrDiff========>
135 IsAlias = !((int64_t)Size1.getValue() <= PtrDiff);
136 return true;
137 }
138 if (PtrDiff < 0 && Size2.hasValue() && !Size2.isScalable()) {
139 // [----BasePtr0----]
140 // [---BasePtr1--]
141 // =====(-PtrDiff)====>
142 IsAlias = !((PtrDiff + (int64_t)Size2.getValue()) <= 0);
143 return true;
144 }
145 return false;
146 }
147
148 // If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
149 // able to calculate their relative offset if at least one arises
150 // from an alloca. However, these allocas cannot overlap and we
151 // can infer there is no alias.
152 auto *Base0Def = getDefIgnoringCopies(Reg: BasePtr0.BaseReg, MRI);
153 auto *Base1Def = getDefIgnoringCopies(Reg: BasePtr1.BaseReg, MRI);
154 if (!Base0Def || !Base1Def)
155 return false; // Couldn't tell anything.
156
157
158 if (Base0Def->getOpcode() != Base1Def->getOpcode())
159 return false;
160
161 if (Base0Def->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
162 MachineFrameInfo &MFI = Base0Def->getMF()->getFrameInfo();
163 // If the bases have the same frame index but we couldn't find a
164 // constant offset, (indices are different) be conservative.
165 if (Base0Def != Base1Def &&
166 (!MFI.isFixedObjectIndex(ObjectIdx: Base0Def->getOperand(i: 1).getIndex()) ||
167 !MFI.isFixedObjectIndex(ObjectIdx: Base1Def->getOperand(i: 1).getIndex()))) {
168 IsAlias = false;
169 return true;
170 }
171 }
172
173 // This implementation is a lot more primitive than the SDAG one for now.
174 // FIXME: what about constant pools?
175 if (Base0Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
176 auto GV0 = Base0Def->getOperand(i: 1).getGlobal();
177 auto GV1 = Base1Def->getOperand(i: 1).getGlobal();
178 if (GV0 != GV1) {
179 IsAlias = false;
180 return true;
181 }
182 }
183
184 // Can't tell anything about aliasing.
185 return false;
186}
187
188bool GISelAddressing::instMayAlias(const MachineInstr &MI,
189 const MachineInstr &Other,
190 MachineRegisterInfo &MRI,
191 AliasAnalysis *AA) {
192 struct MemUseCharacteristics {
193 bool IsVolatile;
194 bool IsAtomic;
195 Register BasePtr;
196 int64_t Offset;
197 LocationSize NumBytes;
198 MachineMemOperand *MMO;
199 };
200
201 auto getCharacteristics =
202 [&](const MachineInstr *MI) -> MemUseCharacteristics {
203 if (const auto *LS = dyn_cast<GLoadStore>(Val: MI)) {
204 Register BaseReg;
205 int64_t Offset = 0;
206 // No pre/post-inc addressing modes are considered here, unlike in SDAG.
207 if (!mi_match(R: LS->getPointerReg(), MRI,
208 P: m_GPtrAdd(L: m_Reg(R&: BaseReg), R: m_ICst(Cst&: Offset)))) {
209 BaseReg = LS->getPointerReg();
210 Offset = 0;
211 }
212
213 LocationSize Size = LS->getMMO().getSize();
214 return {.IsVolatile: LS->isVolatile(), .IsAtomic: LS->isAtomic(), .BasePtr: BaseReg,
215 .Offset: Offset /*base offset*/, .NumBytes: Size, .MMO: &LS->getMMO()};
216 }
217 // FIXME: support recognizing lifetime instructions.
218 // Default.
219 return {.IsVolatile: false /*isvolatile*/,
220 /*isAtomic*/ .IsAtomic: false,
221 .BasePtr: Register(),
222 .Offset: (int64_t)0 /*offset*/,
223 .NumBytes: LocationSize::beforeOrAfterPointer() /*size*/,
224 .MMO: (MachineMemOperand *)nullptr};
225 };
226 MemUseCharacteristics MUC0 = getCharacteristics(&MI),
227 MUC1 = getCharacteristics(&Other);
228
229 // If they are to the same address, then they must be aliases.
230 if (MUC0.BasePtr.isValid() && MUC0.BasePtr == MUC1.BasePtr &&
231 MUC0.Offset == MUC1.Offset)
232 return true;
233
234 // If they are both volatile then they cannot be reordered.
235 if (MUC0.IsVolatile && MUC1.IsVolatile)
236 return true;
237
238 // Be conservative about atomics for the moment
239 // TODO: This is way overconservative for unordered atomics (see D66309)
240 if (MUC0.IsAtomic && MUC1.IsAtomic)
241 return true;
242
243 // If one operation reads from invariant memory, and the other may store, they
244 // cannot alias.
245 if (MUC0.MMO && MUC1.MMO) {
246 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
247 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
248 return false;
249 }
250
251 // If NumBytes is scalable and offset is not 0, conservatively return may
252 // alias
253 if ((MUC0.NumBytes.isScalable() && MUC0.Offset != 0) ||
254 (MUC1.NumBytes.isScalable() && MUC1.Offset != 0))
255 return true;
256
257 const bool BothNotScalable =
258 !MUC0.NumBytes.isScalable() && !MUC1.NumBytes.isScalable();
259
260 // Try to prove that there is aliasing, or that there is no aliasing. Either
261 // way, we can return now. If nothing can be proved, proceed with more tests.
262 bool IsAlias;
263 if (BothNotScalable &&
264 GISelAddressing::aliasIsKnownForLoadStore(MI1: MI, MI2: Other, IsAlias, MRI))
265 return IsAlias;
266
267 // The following all rely on MMO0 and MMO1 being valid.
268 if (!MUC0.MMO || !MUC1.MMO)
269 return true;
270
271 // FIXME: port the alignment based alias analysis from SDAG's isAlias().
272 int64_t SrcValOffset0 = MUC0.MMO->getOffset();
273 int64_t SrcValOffset1 = MUC1.MMO->getOffset();
274 LocationSize Size0 = MUC0.NumBytes;
275 LocationSize Size1 = MUC1.NumBytes;
276 if (AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() && Size0.hasValue() &&
277 Size1.hasValue()) {
278 // Use alias analysis information.
279 int64_t MinOffset = std::min(a: SrcValOffset0, b: SrcValOffset1);
280 int64_t Overlap0 =
281 Size0.getValue().getKnownMinValue() + SrcValOffset0 - MinOffset;
282 int64_t Overlap1 =
283 Size1.getValue().getKnownMinValue() + SrcValOffset1 - MinOffset;
284 LocationSize Loc0 =
285 Size0.isScalable() ? Size0 : LocationSize::precise(Value: Overlap0);
286 LocationSize Loc1 =
287 Size1.isScalable() ? Size1 : LocationSize::precise(Value: Overlap1);
288
289 if (AA->isNoAlias(
290 LocA: MemoryLocation(MUC0.MMO->getValue(), Loc0, MUC0.MMO->getAAInfo()),
291 LocB: MemoryLocation(MUC1.MMO->getValue(), Loc1, MUC1.MMO->getAAInfo())))
292 return false;
293 }
294
295 // Otherwise we have to assume they alias.
296 return true;
297}
298
299/// Returns true if the instruction creates an unavoidable hazard that
300/// forces a boundary between store merge candidates.
301static bool isInstHardMergeHazard(MachineInstr &MI) {
302 return MI.hasUnmodeledSideEffects() || MI.hasOrderedMemoryRef();
303}
304
305bool LoadStoreOpt::mergeStores(SmallVectorImpl<GStore *> &StoresToMerge) {
306 // Try to merge all the stores in the vector, splitting into separate segments
307 // as necessary.
308 assert(StoresToMerge.size() > 1 && "Expected multiple stores to merge");
309 LLT OrigTy = MRI->getType(Reg: StoresToMerge[0]->getValueReg());
310 LLT PtrTy = MRI->getType(Reg: StoresToMerge[0]->getPointerReg());
311 unsigned AS = PtrTy.getAddressSpace();
312 // Ensure the legal store info is computed for this address space.
313 initializeStoreMergeTargetInfo(AddrSpace: AS);
314 const auto &LegalSizes = LegalStoreSizes[AS];
315
316#ifndef NDEBUG
317 for (auto *StoreMI : StoresToMerge)
318 assert(MRI->getType(StoreMI->getValueReg()) == OrigTy);
319#endif
320
321 const auto &DL = MF->getFunction().getParent()->getDataLayout();
322 bool AnyMerged = false;
323 do {
324 unsigned NumPow2 = llvm::bit_floor(Value: StoresToMerge.size());
325 unsigned MaxSizeBits = NumPow2 * OrigTy.getSizeInBits().getFixedValue();
326 // Compute the biggest store we can generate to handle the number of stores.
327 unsigned MergeSizeBits;
328 for (MergeSizeBits = MaxSizeBits; MergeSizeBits > 1; MergeSizeBits /= 2) {
329 LLT StoreTy = LLT::scalar(SizeInBits: MergeSizeBits);
330 EVT StoreEVT =
331 getApproximateEVTForLLT(Ty: StoreTy, DL, Ctx&: MF->getFunction().getContext());
332 if (LegalSizes.size() > MergeSizeBits && LegalSizes[MergeSizeBits] &&
333 TLI->canMergeStoresTo(AS, MemVT: StoreEVT, MF: *MF) &&
334 (TLI->isTypeLegal(VT: StoreEVT)))
335 break; // We can generate a MergeSize bits store.
336 }
337 if (MergeSizeBits <= OrigTy.getSizeInBits())
338 return AnyMerged; // No greater merge.
339
340 unsigned NumStoresToMerge = MergeSizeBits / OrigTy.getSizeInBits();
341 // Perform the actual merging.
342 SmallVector<GStore *, 8> SingleMergeStores(
343 StoresToMerge.begin(), StoresToMerge.begin() + NumStoresToMerge);
344 AnyMerged |= doSingleStoreMerge(Stores&: SingleMergeStores);
345 StoresToMerge.erase(CS: StoresToMerge.begin(),
346 CE: StoresToMerge.begin() + NumStoresToMerge);
347 } while (StoresToMerge.size() > 1);
348 return AnyMerged;
349}
350
351bool LoadStoreOpt::isLegalOrBeforeLegalizer(const LegalityQuery &Query,
352 MachineFunction &MF) const {
353 auto Action = LI->getAction(Query).Action;
354 // If the instruction is unsupported, it can't be legalized at all.
355 if (Action == LegalizeActions::Unsupported)
356 return false;
357 return IsPreLegalizer || Action == LegalizeAction::Legal;
358}
359
360bool LoadStoreOpt::doSingleStoreMerge(SmallVectorImpl<GStore *> &Stores) {
361 assert(Stores.size() > 1);
362 // We know that all the stores are consecutive and there are no aliasing
363 // operations in the range. However, the values that are being stored may be
364 // generated anywhere before each store. To ensure we have the values
365 // available, we materialize the wide value and new store at the place of the
366 // final store in the merge sequence.
367 GStore *FirstStore = Stores[0];
368 const unsigned NumStores = Stores.size();
369 LLT SmallTy = MRI->getType(Reg: FirstStore->getValueReg());
370 LLT WideValueTy =
371 LLT::scalar(SizeInBits: NumStores * SmallTy.getSizeInBits().getFixedValue());
372
373 // For each store, compute pairwise merged debug locs.
374 DebugLoc MergedLoc = Stores.front()->getDebugLoc();
375 for (auto *Store : drop_begin(RangeOrContainer&: Stores))
376 MergedLoc = DILocation::getMergedLocation(LocA: MergedLoc, LocB: Store->getDebugLoc());
377
378 Builder.setInstr(*Stores.back());
379 Builder.setDebugLoc(MergedLoc);
380
381 // If all of the store values are constants, then create a wide constant
382 // directly. Otherwise, we need to generate some instructions to merge the
383 // existing values together into a wider type.
384 SmallVector<APInt, 8> ConstantVals;
385 for (auto *Store : Stores) {
386 auto MaybeCst =
387 getIConstantVRegValWithLookThrough(VReg: Store->getValueReg(), MRI: *MRI);
388 if (!MaybeCst) {
389 ConstantVals.clear();
390 break;
391 }
392 ConstantVals.emplace_back(Args&: MaybeCst->Value);
393 }
394
395 Register WideReg;
396 auto *WideMMO =
397 MF->getMachineMemOperand(MMO: &FirstStore->getMMO(), Offset: 0, Ty: WideValueTy);
398 if (ConstantVals.empty()) {
399 // Mimic the SDAG behaviour here and don't try to do anything for unknown
400 // values. In future, we should also support the cases of loads and
401 // extracted vector elements.
402 return false;
403 }
404
405 assert(ConstantVals.size() == NumStores);
406 // Check if our wide constant is legal.
407 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_CONSTANT, {WideValueTy}}, MF&: *MF))
408 return false;
409 APInt WideConst(WideValueTy.getSizeInBits(), 0);
410 for (unsigned Idx = 0; Idx < ConstantVals.size(); ++Idx) {
411 // Insert the smaller constant into the corresponding position in the
412 // wider one.
413 WideConst.insertBits(SubBits: ConstantVals[Idx], bitPosition: Idx * SmallTy.getSizeInBits());
414 }
415 WideReg = Builder.buildConstant(Res: WideValueTy, Val: WideConst).getReg(Idx: 0);
416 auto NewStore =
417 Builder.buildStore(Val: WideReg, Addr: FirstStore->getPointerReg(), MMO&: *WideMMO);
418 (void) NewStore;
419 LLVM_DEBUG(dbgs() << "Merged " << Stores.size()
420 << " stores into merged store: " << *NewStore);
421 LLVM_DEBUG(for (auto *MI : Stores) dbgs() << " " << *MI;);
422 NumStoresMerged += Stores.size();
423
424 MachineOptimizationRemarkEmitter MORE(*MF, nullptr);
425 MORE.emit(RemarkBuilder: [&]() {
426 MachineOptimizationRemark R(DEBUG_TYPE, "MergedStore",
427 FirstStore->getDebugLoc(),
428 FirstStore->getParent());
429 R << "Merged " << NV("NumMerged", Stores.size()) << " stores of "
430 << NV("OrigWidth", SmallTy.getSizeInBytes())
431 << " bytes into a single store of "
432 << NV("NewWidth", WideValueTy.getSizeInBytes()) << " bytes";
433 return R;
434 });
435
436 for (auto *MI : Stores)
437 InstsToErase.insert(Ptr: MI);
438 return true;
439}
440
441bool LoadStoreOpt::processMergeCandidate(StoreMergeCandidate &C) {
442 if (C.Stores.size() < 2) {
443 C.reset();
444 return false;
445 }
446
447 LLVM_DEBUG(dbgs() << "Checking store merge candidate with " << C.Stores.size()
448 << " stores, starting with " << *C.Stores[0]);
449 // We know that the stores in the candidate are adjacent.
450 // Now we need to check if any potential aliasing instructions recorded
451 // during the search alias with load/stores added to the candidate after.
452 // For example, if we have the candidate:
453 // C.Stores = [ST1, ST2, ST3, ST4]
454 // and after seeing ST2 we saw a load LD1, which did not alias with ST1 or
455 // ST2, then we would have recorded it into the PotentialAliases structure
456 // with the associated index value of "1". Then we see ST3 and ST4 and add
457 // them to the candidate group. We know that LD1 does not alias with ST1 or
458 // ST2, since we already did that check. However we don't yet know if it
459 // may alias ST3 and ST4, so we perform those checks now.
460 SmallVector<GStore *> StoresToMerge;
461
462 auto DoesStoreAliasWithPotential = [&](unsigned Idx, GStore &CheckStore) {
463 for (auto AliasInfo : reverse(C&: C.PotentialAliases)) {
464 MachineInstr *PotentialAliasOp = AliasInfo.first;
465 unsigned PreCheckedIdx = AliasInfo.second;
466 if (static_cast<unsigned>(Idx) < PreCheckedIdx) {
467 // Once our store index is lower than the index associated with the
468 // potential alias, we know that we've already checked for this alias
469 // and all of the earlier potential aliases too.
470 return false;
471 }
472 // Need to check this alias.
473 if (GISelAddressing::instMayAlias(MI: CheckStore, Other: *PotentialAliasOp, MRI&: *MRI,
474 AA)) {
475 LLVM_DEBUG(dbgs() << "Potential alias " << *PotentialAliasOp
476 << " detected\n");
477 return true;
478 }
479 }
480 return false;
481 };
482 // Start from the last store in the group, and check if it aliases with any
483 // of the potential aliasing operations in the list.
484 for (int StoreIdx = C.Stores.size() - 1; StoreIdx >= 0; --StoreIdx) {
485 auto *CheckStore = C.Stores[StoreIdx];
486 if (DoesStoreAliasWithPotential(StoreIdx, *CheckStore))
487 continue;
488 StoresToMerge.emplace_back(Args&: CheckStore);
489 }
490
491 LLVM_DEBUG(dbgs() << StoresToMerge.size()
492 << " stores remaining after alias checks. Merging...\n");
493
494 // Now we've checked for aliasing hazards, merge any stores left.
495 C.reset();
496 if (StoresToMerge.size() < 2)
497 return false;
498 return mergeStores(StoresToMerge);
499}
500
501bool LoadStoreOpt::operationAliasesWithCandidate(MachineInstr &MI,
502 StoreMergeCandidate &C) {
503 if (C.Stores.empty())
504 return false;
505 return llvm::any_of(Range&: C.Stores, P: [&](MachineInstr *OtherMI) {
506 return instMayAlias(MI, Other: *OtherMI, MRI&: *MRI, AA);
507 });
508}
509
510void LoadStoreOpt::StoreMergeCandidate::addPotentialAlias(MachineInstr &MI) {
511 PotentialAliases.emplace_back(Args: std::make_pair(x: &MI, y: Stores.size() - 1));
512}
513
514bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI,
515 StoreMergeCandidate &C) {
516 // Check if the given store writes to an adjacent address, and other
517 // requirements.
518 LLT ValueTy = MRI->getType(Reg: StoreMI.getValueReg());
519 LLT PtrTy = MRI->getType(Reg: StoreMI.getPointerReg());
520
521 // Only handle scalars.
522 if (!ValueTy.isScalar())
523 return false;
524
525 // Don't allow truncating stores for now.
526 if (StoreMI.getMemSizeInBits() != ValueTy.getSizeInBits())
527 return false;
528
529 // Avoid adding volatile or ordered stores to the candidate. We already have a
530 // check for this in instMayAlias() but that only get's called later between
531 // potential aliasing hazards.
532 if (!StoreMI.isSimple())
533 return false;
534
535 Register StoreAddr = StoreMI.getPointerReg();
536 auto BIO = getPointerInfo(Ptr: StoreAddr, MRI&: *MRI);
537 Register StoreBase = BIO.BaseReg;
538 uint64_t StoreOffCst = BIO.Offset;
539 if (C.Stores.empty()) {
540 // This is the first store of the candidate.
541 // If the offset can't possibly allow for a lower addressed store with the
542 // same base, don't bother adding it.
543 if (StoreOffCst < ValueTy.getSizeInBytes())
544 return false;
545 C.BasePtr = StoreBase;
546 C.CurrentLowestOffset = StoreOffCst;
547 C.Stores.emplace_back(Args: &StoreMI);
548 LLVM_DEBUG(dbgs() << "Starting a new merge candidate group with: "
549 << StoreMI);
550 return true;
551 }
552
553 // Check the store is the same size as the existing ones in the candidate.
554 if (MRI->getType(Reg: C.Stores[0]->getValueReg()).getSizeInBits() !=
555 ValueTy.getSizeInBits())
556 return false;
557
558 if (MRI->getType(Reg: C.Stores[0]->getPointerReg()).getAddressSpace() !=
559 PtrTy.getAddressSpace())
560 return false;
561
562 // There are other stores in the candidate. Check that the store address
563 // writes to the next lowest adjacent address.
564 if (C.BasePtr != StoreBase)
565 return false;
566 if ((C.CurrentLowestOffset - ValueTy.getSizeInBytes()) !=
567 static_cast<uint64_t>(StoreOffCst))
568 return false;
569
570 // This writes to an adjacent address. Allow it.
571 C.Stores.emplace_back(Args: &StoreMI);
572 C.CurrentLowestOffset = C.CurrentLowestOffset - ValueTy.getSizeInBytes();
573 LLVM_DEBUG(dbgs() << "Candidate added store: " << StoreMI);
574 return true;
575}
576
577bool LoadStoreOpt::mergeBlockStores(MachineBasicBlock &MBB) {
578 bool Changed = false;
579 // Walk through the block bottom-up, looking for merging candidates.
580 StoreMergeCandidate Candidate;
581 for (MachineInstr &MI : llvm::reverse(C&: MBB)) {
582 if (InstsToErase.contains(Ptr: &MI))
583 continue;
584
585 if (auto *StoreMI = dyn_cast<GStore>(Val: &MI)) {
586 // We have a G_STORE. Add it to the candidate if it writes to an adjacent
587 // address.
588 if (!addStoreToCandidate(StoreMI&: *StoreMI, C&: Candidate)) {
589 // Store wasn't eligible to be added. May need to record it as a
590 // potential alias.
591 if (operationAliasesWithCandidate(MI&: *StoreMI, C&: Candidate)) {
592 Changed |= processMergeCandidate(C&: Candidate);
593 continue;
594 }
595 Candidate.addPotentialAlias(MI&: *StoreMI);
596 }
597 continue;
598 }
599
600 // If we don't have any stores yet, this instruction can't pose a problem.
601 if (Candidate.Stores.empty())
602 continue;
603
604 // We're dealing with some other kind of instruction.
605 if (isInstHardMergeHazard(MI)) {
606 Changed |= processMergeCandidate(C&: Candidate);
607 Candidate.Stores.clear();
608 continue;
609 }
610
611 if (!MI.mayLoadOrStore())
612 continue;
613
614 if (operationAliasesWithCandidate(MI, C&: Candidate)) {
615 // We have a potential alias, so process the current candidate if we can
616 // and then continue looking for a new candidate.
617 Changed |= processMergeCandidate(C&: Candidate);
618 continue;
619 }
620
621 // Record this instruction as a potential alias for future stores that are
622 // added to the candidate.
623 Candidate.addPotentialAlias(MI);
624 }
625
626 // Process any candidate left after finishing searching the entire block.
627 Changed |= processMergeCandidate(C&: Candidate);
628
629 // Erase instructions now that we're no longer iterating over the block.
630 for (auto *MI : InstsToErase)
631 MI->eraseFromParent();
632 InstsToErase.clear();
633 return Changed;
634}
635
636/// Check if the store \p Store is a truncstore that can be merged. That is,
637/// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty
638/// Register then it does not need to match and SrcVal is set to the source
639/// value found.
640/// On match, returns the start byte offset of the \p SrcVal that is being
641/// stored.
642static std::optional<int64_t>
643getTruncStoreByteOffset(GStore &Store, Register &SrcVal,
644 MachineRegisterInfo &MRI) {
645 Register TruncVal;
646 if (!mi_match(R: Store.getValueReg(), MRI, P: m_GTrunc(Src: m_Reg(R&: TruncVal))))
647 return std::nullopt;
648
649 // The shift amount must be a constant multiple of the narrow type.
650 // It is translated to the offset address in the wide source value "y".
651 //
652 // x = G_LSHR y, ShiftAmtC
653 // s8 z = G_TRUNC x
654 // store z, ...
655 Register FoundSrcVal;
656 int64_t ShiftAmt;
657 if (!mi_match(R: TruncVal, MRI,
658 P: m_any_of(preds: m_GLShr(L: m_Reg(R&: FoundSrcVal), R: m_ICst(Cst&: ShiftAmt)),
659 preds: m_GAShr(L: m_Reg(R&: FoundSrcVal), R: m_ICst(Cst&: ShiftAmt))))) {
660 if (!SrcVal.isValid() || TruncVal == SrcVal) {
661 if (!SrcVal.isValid())
662 SrcVal = TruncVal;
663 return 0; // If it's the lowest index store.
664 }
665 return std::nullopt;
666 }
667
668 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
669 if (ShiftAmt % NarrowBits != 0)
670 return std::nullopt;
671 const unsigned Offset = ShiftAmt / NarrowBits;
672
673 if (SrcVal.isValid() && FoundSrcVal != SrcVal)
674 return std::nullopt;
675
676 if (!SrcVal.isValid())
677 SrcVal = FoundSrcVal;
678 else if (MRI.getType(Reg: SrcVal) != MRI.getType(Reg: FoundSrcVal))
679 return std::nullopt;
680 return Offset;
681}
682
683/// Match a pattern where a wide type scalar value is stored by several narrow
684/// stores. Fold it into a single store or a BSWAP and a store if the targets
685/// supports it.
686///
687/// Assuming little endian target:
688/// i8 *p = ...
689/// i32 val = ...
690/// p[0] = (val >> 0) & 0xFF;
691/// p[1] = (val >> 8) & 0xFF;
692/// p[2] = (val >> 16) & 0xFF;
693/// p[3] = (val >> 24) & 0xFF;
694/// =>
695/// *((i32)p) = val;
696///
697/// i8 *p = ...
698/// i32 val = ...
699/// p[0] = (val >> 24) & 0xFF;
700/// p[1] = (val >> 16) & 0xFF;
701/// p[2] = (val >> 8) & 0xFF;
702/// p[3] = (val >> 0) & 0xFF;
703/// =>
704/// *((i32)p) = BSWAP(val);
705bool LoadStoreOpt::mergeTruncStore(GStore &StoreMI,
706 SmallPtrSetImpl<GStore *> &DeletedStores) {
707 LLT MemTy = StoreMI.getMMO().getMemoryType();
708
709 // We only handle merging simple stores of 1-4 bytes.
710 if (!MemTy.isScalar())
711 return false;
712 switch (MemTy.getSizeInBits()) {
713 case 8:
714 case 16:
715 case 32:
716 break;
717 default:
718 return false;
719 }
720 if (!StoreMI.isSimple())
721 return false;
722
723 // We do a simple search for mergeable stores prior to this one.
724 // Any potential alias hazard along the way terminates the search.
725 SmallVector<GStore *> FoundStores;
726
727 // We're looking for:
728 // 1) a (store(trunc(...)))
729 // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get
730 // the partial value stored.
731 // 3) where the offsets form either a little or big-endian sequence.
732
733 auto &LastStore = StoreMI;
734
735 // The single base pointer that all stores must use.
736 Register BaseReg;
737 int64_t LastOffset;
738 if (!mi_match(R: LastStore.getPointerReg(), MRI: *MRI,
739 P: m_GPtrAdd(L: m_Reg(R&: BaseReg), R: m_ICst(Cst&: LastOffset)))) {
740 BaseReg = LastStore.getPointerReg();
741 LastOffset = 0;
742 }
743
744 GStore *LowestIdxStore = &LastStore;
745 int64_t LowestIdxOffset = LastOffset;
746
747 Register WideSrcVal;
748 auto LowestShiftAmt = getTruncStoreByteOffset(Store&: LastStore, SrcVal&: WideSrcVal, MRI&: *MRI);
749 if (!LowestShiftAmt)
750 return false; // Didn't match a trunc.
751 assert(WideSrcVal.isValid());
752
753 LLT WideStoreTy = MRI->getType(Reg: WideSrcVal);
754 // The wide type might not be a multiple of the memory type, e.g. s48 and s32.
755 if (WideStoreTy.getSizeInBits() % MemTy.getSizeInBits() != 0)
756 return false;
757 const unsigned NumStoresRequired =
758 WideStoreTy.getSizeInBits() / MemTy.getSizeInBits();
759
760 SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX);
761 OffsetMap[*LowestShiftAmt] = LastOffset;
762 FoundStores.emplace_back(Args: &LastStore);
763
764 const int MaxInstsToCheck = 10;
765 int NumInstsChecked = 0;
766 for (auto II = ++LastStore.getReverseIterator();
767 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
768 ++II) {
769 NumInstsChecked++;
770 GStore *NewStore;
771 if ((NewStore = dyn_cast<GStore>(Val: &*II))) {
772 if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple())
773 break;
774 } else if (II->isLoadFoldBarrier() || II->mayLoad()) {
775 break;
776 } else {
777 continue; // This is a safe instruction we can look past.
778 }
779
780 Register NewBaseReg;
781 int64_t MemOffset;
782 // Check we're storing to the same base + some offset.
783 if (!mi_match(R: NewStore->getPointerReg(), MRI: *MRI,
784 P: m_GPtrAdd(L: m_Reg(R&: NewBaseReg), R: m_ICst(Cst&: MemOffset)))) {
785 NewBaseReg = NewStore->getPointerReg();
786 MemOffset = 0;
787 }
788 if (BaseReg != NewBaseReg)
789 break;
790
791 auto ShiftByteOffset = getTruncStoreByteOffset(Store&: *NewStore, SrcVal&: WideSrcVal, MRI&: *MRI);
792 if (!ShiftByteOffset)
793 break;
794 if (MemOffset < LowestIdxOffset) {
795 LowestIdxOffset = MemOffset;
796 LowestIdxStore = NewStore;
797 }
798
799 // Map the offset in the store and the offset in the combined value, and
800 // early return if it has been set before.
801 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
802 OffsetMap[*ShiftByteOffset] != INT64_MAX)
803 break;
804 OffsetMap[*ShiftByteOffset] = MemOffset;
805
806 FoundStores.emplace_back(Args&: NewStore);
807 // Reset counter since we've found a matching inst.
808 NumInstsChecked = 0;
809 if (FoundStores.size() == NumStoresRequired)
810 break;
811 }
812
813 if (FoundStores.size() != NumStoresRequired) {
814 if (FoundStores.size() == 1)
815 return false;
816 // We didn't find enough stores to merge into the size of the original
817 // source value, but we may be able to generate a smaller store if we
818 // truncate the source value.
819 WideStoreTy = LLT::scalar(SizeInBits: FoundStores.size() * MemTy.getScalarSizeInBits());
820 }
821
822 unsigned NumStoresFound = FoundStores.size();
823
824 const auto &DL = LastStore.getMF()->getDataLayout();
825 auto &C = LastStore.getMF()->getFunction().getContext();
826 // Check that a store of the wide type is both allowed and fast on the target
827 unsigned Fast = 0;
828 bool Allowed = TLI->allowsMemoryAccess(
829 Context&: C, DL, Ty: WideStoreTy, MMO: LowestIdxStore->getMMO(), Fast: &Fast);
830 if (!Allowed || !Fast)
831 return false;
832
833 // Check if the pieces of the value are going to the expected places in memory
834 // to merge the stores.
835 unsigned NarrowBits = MemTy.getScalarSizeInBits();
836 auto checkOffsets = [&](bool MatchLittleEndian) {
837 if (MatchLittleEndian) {
838 for (unsigned i = 0; i != NumStoresFound; ++i)
839 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
840 return false;
841 } else { // MatchBigEndian by reversing loop counter.
842 for (unsigned i = 0, j = NumStoresFound - 1; i != NumStoresFound;
843 ++i, --j)
844 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
845 return false;
846 }
847 return true;
848 };
849
850 // Check if the offsets line up for the native data layout of this target.
851 bool NeedBswap = false;
852 bool NeedRotate = false;
853 if (!checkOffsets(DL.isLittleEndian())) {
854 // Special-case: check if byte offsets line up for the opposite endian.
855 if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))
856 NeedBswap = true;
857 else if (NumStoresFound == 2 && checkOffsets(DL.isBigEndian()))
858 NeedRotate = true;
859 else
860 return false;
861 }
862
863 if (NeedBswap &&
864 !isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_BSWAP, {WideStoreTy}}, MF&: *MF))
865 return false;
866 if (NeedRotate &&
867 !isLegalOrBeforeLegalizer(
868 Query: {TargetOpcode::G_ROTR, {WideStoreTy, WideStoreTy}}, MF&: *MF))
869 return false;
870
871 Builder.setInstrAndDebugLoc(StoreMI);
872
873 if (WideStoreTy != MRI->getType(Reg: WideSrcVal))
874 WideSrcVal = Builder.buildTrunc(Res: WideStoreTy, Op: WideSrcVal).getReg(Idx: 0);
875
876 if (NeedBswap) {
877 WideSrcVal = Builder.buildBSwap(Dst: WideStoreTy, Src0: WideSrcVal).getReg(Idx: 0);
878 } else if (NeedRotate) {
879 assert(WideStoreTy.getSizeInBits() % 2 == 0 &&
880 "Unexpected type for rotate");
881 auto RotAmt =
882 Builder.buildConstant(Res: WideStoreTy, Val: WideStoreTy.getSizeInBits() / 2);
883 WideSrcVal =
884 Builder.buildRotateRight(Dst: WideStoreTy, Src: WideSrcVal, Amt: RotAmt).getReg(Idx: 0);
885 }
886
887 Builder.buildStore(Val: WideSrcVal, Addr: LowestIdxStore->getPointerReg(),
888 PtrInfo: LowestIdxStore->getMMO().getPointerInfo(),
889 Alignment: LowestIdxStore->getMMO().getAlign());
890
891 // Erase the old stores.
892 for (auto *ST : FoundStores) {
893 ST->eraseFromParent();
894 DeletedStores.insert(Ptr: ST);
895 }
896 return true;
897}
898
899bool LoadStoreOpt::mergeTruncStoresBlock(MachineBasicBlock &BB) {
900 bool Changed = false;
901 SmallVector<GStore *, 16> Stores;
902 SmallPtrSet<GStore *, 8> DeletedStores;
903 // Walk up the block so we can see the most eligible stores.
904 for (MachineInstr &MI : llvm::reverse(C&: BB))
905 if (auto *StoreMI = dyn_cast<GStore>(Val: &MI))
906 Stores.emplace_back(Args&: StoreMI);
907
908 for (auto *StoreMI : Stores) {
909 if (DeletedStores.count(Ptr: StoreMI))
910 continue;
911 if (mergeTruncStore(StoreMI&: *StoreMI, DeletedStores))
912 Changed = true;
913 }
914 return Changed;
915}
916
917bool LoadStoreOpt::mergeFunctionStores(MachineFunction &MF) {
918 bool Changed = false;
919 for (auto &BB : MF){
920 Changed |= mergeBlockStores(MBB&: BB);
921 Changed |= mergeTruncStoresBlock(BB);
922 }
923
924 // Erase all dead instructions left over by the merging.
925 if (Changed) {
926 for (auto &BB : MF) {
927 for (auto &I : make_early_inc_range(Range: make_range(x: BB.rbegin(), y: BB.rend()))) {
928 if (isTriviallyDead(MI: I, MRI: *MRI))
929 I.eraseFromParent();
930 }
931 }
932 }
933
934 return Changed;
935}
936
937void LoadStoreOpt::initializeStoreMergeTargetInfo(unsigned AddrSpace) {
938 // Query the legalizer info to record what store types are legal.
939 // We record this because we don't want to bother trying to merge stores into
940 // illegal ones, which would just result in being split again.
941
942 if (LegalStoreSizes.count(Val: AddrSpace)) {
943 assert(LegalStoreSizes[AddrSpace].any());
944 return; // Already cached sizes for this address space.
945 }
946
947 // Need to reserve at least MaxStoreSizeToForm + 1 bits.
948 BitVector LegalSizes(MaxStoreSizeToForm * 2);
949 const auto &LI = *MF->getSubtarget().getLegalizerInfo();
950 const auto &DL = MF->getFunction().getParent()->getDataLayout();
951 Type *IRPtrTy = PointerType::get(C&: MF->getFunction().getContext(), AddressSpace: AddrSpace);
952 LLT PtrTy = getLLTForType(Ty&: *IRPtrTy, DL);
953 // We assume that we're not going to be generating any stores wider than
954 // MaxStoreSizeToForm bits for now.
955 for (unsigned Size = 2; Size <= MaxStoreSizeToForm; Size *= 2) {
956 LLT Ty = LLT::scalar(SizeInBits: Size);
957 SmallVector<LegalityQuery::MemDesc, 2> MemDescrs(
958 {{Ty, Ty.getSizeInBits(), AtomicOrdering::NotAtomic}});
959 SmallVector<LLT> StoreTys({Ty, PtrTy});
960 LegalityQuery Q(TargetOpcode::G_STORE, StoreTys, MemDescrs);
961 LegalizeActionStep ActionStep = LI.getAction(Query: Q);
962 if (ActionStep.Action == LegalizeActions::Legal)
963 LegalSizes.set(Size);
964 }
965 assert(LegalSizes.any() && "Expected some store sizes to be legal!");
966 LegalStoreSizes[AddrSpace] = LegalSizes;
967}
968
969bool LoadStoreOpt::runOnMachineFunction(MachineFunction &MF) {
970 // If the ISel pipeline failed, do not bother running that pass.
971 if (MF.getProperties().hasProperty(
972 P: MachineFunctionProperties::Property::FailedISel))
973 return false;
974
975 LLVM_DEBUG(dbgs() << "Begin memory optimizations for: " << MF.getName()
976 << '\n');
977
978 init(MF);
979 bool Changed = false;
980 Changed |= mergeFunctionStores(MF);
981
982 LegalStoreSizes.clear();
983 return Changed;
984}
985

source code of llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp