1//===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "llvm/Analysis/MemorySSA.h"
9#include "llvm/Analysis/AliasAnalysis.h"
10#include "llvm/Analysis/AssumptionCache.h"
11#include "llvm/Analysis/BasicAliasAnalysis.h"
12#include "llvm/Analysis/MemorySSAUpdater.h"
13#include "llvm/Analysis/TargetLibraryInfo.h"
14#include "llvm/AsmParser/Parser.h"
15#include "llvm/IR/BasicBlock.h"
16#include "llvm/IR/DataLayout.h"
17#include "llvm/IR/Dominators.h"
18#include "llvm/IR/IRBuilder.h"
19#include "llvm/IR/Instructions.h"
20#include "llvm/IR/LLVMContext.h"
21#include "llvm/Support/SourceMgr.h"
22#include "gtest/gtest.h"
23
24using namespace llvm;
25
26const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
27
28/// There's a lot of common setup between these tests. This fixture helps reduce
29/// that. Tests should mock up a function, store it in F, and then call
30/// setupAnalyses().
31class MemorySSATest : public testing::Test {
32protected:
33 // N.B. Many of these members depend on each other (e.g. the Module depends on
34 // the Context, etc.). So, order matters here (and in TestAnalyses).
35 LLVMContext C;
36 Module M;
37 IRBuilder<> B;
38 DataLayout DL;
39 TargetLibraryInfoImpl TLII;
40 TargetLibraryInfo TLI;
41 Function *F;
42
43 // Things that we need to build after the function is created.
44 struct TestAnalyses {
45 DominatorTree DT;
46 AssumptionCache AC;
47 AAResults AA;
48 BasicAAResult BAA;
49 // We need to defer MSSA construction until AA is *entirely* set up, which
50 // requires calling addAAResult. Hence, we just use a pointer here.
51 std::unique_ptr<MemorySSA> MSSA;
52 MemorySSAWalker *Walker;
53
54 TestAnalyses(MemorySSATest &Test)
55 : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
56 BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {
57 AA.addAAResult(AAResult&: BAA);
58 MSSA = std::make_unique<MemorySSA>(args&: *Test.F, args: &AA, args: &DT);
59 Walker = MSSA->getWalker();
60 }
61 };
62
63 std::unique_ptr<TestAnalyses> Analyses;
64
65 void setupAnalyses() {
66 assert(F);
67 Analyses.reset(p: new TestAnalyses(*this));
68 }
69
70public:
71 MemorySSATest()
72 : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
73};
74
75TEST_F(MemorySSATest, CreateALoad) {
76 // We create a diamond where there is a store on one side, and then after
77 // building MemorySSA, create a load after the merge point, and use it to test
78 // updating by creating an access for the load.
79 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
80 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
81 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
82 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
83 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
84 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
85 B.SetInsertPoint(Entry);
86 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
87 B.SetInsertPoint(Left);
88 Argument *PointerArg = &*F->arg_begin();
89 B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
90 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Left);
91 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Right);
92
93 setupAnalyses();
94 MemorySSA &MSSA = *Analyses->MSSA;
95 MemorySSAUpdater Updater(&MSSA);
96 // Add the load
97 B.SetInsertPoint(Merge);
98 LoadInst *LoadInst = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
99
100 // MemoryPHI should already exist.
101 MemoryPhi *MP = MSSA.getMemoryAccess(BB: Merge);
102 EXPECT_NE(MP, nullptr);
103
104 // Create the load memory acccess
105 MemoryUse *LoadAccess = cast<MemoryUse>(Val: Updater.createMemoryAccessInBB(
106 I: LoadInst, Definition: MP, BB: Merge, Point: MemorySSA::Beginning));
107 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
108 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
109 MSSA.verifyMemorySSA();
110}
111TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
112 // We create a diamond, then build memoryssa with no memory accesses, and
113 // incrementally update it by inserting a store in the, entry, a load in the
114 // merge point, then a store in the branch, another load in the merge point,
115 // and then a store in the entry.
116 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
117 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
118 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
119 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
120 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
121 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
122 B.SetInsertPoint(Entry);
123 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
124 B.SetInsertPoint(TheBB: Left, IP: Left->begin());
125 Argument *PointerArg = &*F->arg_begin();
126 B.SetInsertPoint(Left);
127 B.CreateBr(Dest: Merge);
128 B.SetInsertPoint(Right);
129 B.CreateBr(Dest: Merge);
130
131 setupAnalyses();
132 MemorySSA &MSSA = *Analyses->MSSA;
133 MemorySSAUpdater Updater(&MSSA);
134 // Add the store
135 B.SetInsertPoint(TheBB: Entry, IP: Entry->begin());
136 StoreInst *EntryStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
137 MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
138 I: EntryStore, Definition: nullptr, BB: Entry, Point: MemorySSA::Beginning);
139 Updater.insertDef(Def: cast<MemoryDef>(Val: EntryStoreAccess));
140
141 // Add the load
142 B.SetInsertPoint(TheBB: Merge, IP: Merge->begin());
143 LoadInst *FirstLoad = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
144
145 // MemoryPHI should not already exist.
146 MemoryPhi *MP = MSSA.getMemoryAccess(BB: Merge);
147 EXPECT_EQ(MP, nullptr);
148
149 // Create the load memory access
150 MemoryUse *FirstLoadAccess = cast<MemoryUse>(Val: Updater.createMemoryAccessInBB(
151 I: FirstLoad, Definition: nullptr, BB: Merge, Point: MemorySSA::Beginning));
152 Updater.insertUse(Use: FirstLoadAccess);
153 // Should just have a load using the entry access, because it should discover
154 // the phi is trivial
155 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
156
157 // Create a store on the left
158 // Add the store
159 B.SetInsertPoint(TheBB: Left, IP: Left->begin());
160 StoreInst *LeftStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
161 MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
162 I: LeftStore, Definition: nullptr, BB: Left, Point: MemorySSA::Beginning);
163 Updater.insertDef(Def: cast<MemoryDef>(Val: LeftStoreAccess), RenameUses: false);
164
165 // MemoryPHI should exist after adding LeftStore.
166 MP = MSSA.getMemoryAccess(BB: Merge);
167 EXPECT_NE(MP, nullptr);
168
169 // Add the second load
170 B.SetInsertPoint(TheBB: Merge, IP: Merge->begin());
171 LoadInst *SecondLoad = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
172
173 // Create the load memory access
174 MemoryUse *SecondLoadAccess = cast<MemoryUse>(Val: Updater.createMemoryAccessInBB(
175 I: SecondLoad, Definition: nullptr, BB: Merge, Point: MemorySSA::Beginning));
176 Updater.insertUse(Use: SecondLoadAccess);
177 // Now the load should be a phi of the entry store and the left store
178 MemoryPhi *MergePhi =
179 dyn_cast<MemoryPhi>(Val: SecondLoadAccess->getDefiningAccess());
180 EXPECT_NE(MergePhi, nullptr);
181 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
182 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
183 // Now create a store below the existing one in the entry
184 B.SetInsertPoint(TheBB: Entry, IP: --Entry->end());
185 StoreInst *SecondEntryStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
186 MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
187 I: SecondEntryStore, Definition: nullptr, BB: Entry, Point: MemorySSA::End);
188 // Insert it twice just to test renaming
189 Updater.insertDef(Def: cast<MemoryDef>(Val: SecondEntryStoreAccess), RenameUses: false);
190 EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
191 Updater.insertDef(Def: cast<MemoryDef>(Val: SecondEntryStoreAccess), RenameUses: true);
192 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
193 // and make sure the phi below it got updated, despite being blocks away
194 MergePhi = dyn_cast<MemoryPhi>(Val: SecondLoadAccess->getDefiningAccess());
195 EXPECT_NE(MergePhi, nullptr);
196 EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
197 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
198 MSSA.verifyMemorySSA();
199}
200
201TEST_F(MemorySSATest, CreateALoadUpdater) {
202 // We create a diamond, then build memoryssa with no memory accesses, and
203 // incrementally update it by inserting a store in one of the branches, and a
204 // load in the merge point
205 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
206 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
207 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
208 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
209 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
210 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
211 B.SetInsertPoint(Entry);
212 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
213 B.SetInsertPoint(TheBB: Left, IP: Left->begin());
214 Argument *PointerArg = &*F->arg_begin();
215 B.SetInsertPoint(Left);
216 B.CreateBr(Dest: Merge);
217 B.SetInsertPoint(Right);
218 B.CreateBr(Dest: Merge);
219
220 setupAnalyses();
221 MemorySSA &MSSA = *Analyses->MSSA;
222 MemorySSAUpdater Updater(&MSSA);
223 B.SetInsertPoint(TheBB: Left, IP: Left->begin());
224 // Add the store
225 StoreInst *SI = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
226 MemoryAccess *StoreAccess =
227 Updater.createMemoryAccessInBB(I: SI, Definition: nullptr, BB: Left, Point: MemorySSA::Beginning);
228 Updater.insertDef(Def: cast<MemoryDef>(Val: StoreAccess));
229
230 // MemoryPHI should be created when inserting the def
231 MemoryPhi *MP = MSSA.getMemoryAccess(BB: Merge);
232 EXPECT_NE(MP, nullptr);
233
234 // Add the load
235 B.SetInsertPoint(TheBB: Merge, IP: Merge->begin());
236 LoadInst *LoadInst = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
237
238 // Create the load memory acccess
239 MemoryUse *LoadAccess = cast<MemoryUse>(Val: Updater.createMemoryAccessInBB(
240 I: LoadInst, Definition: nullptr, BB: Merge, Point: MemorySSA::Beginning));
241 Updater.insertUse(Use: LoadAccess);
242 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
243 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
244 MSSA.verifyMemorySSA();
245}
246
247TEST_F(MemorySSATest, SinkLoad) {
248 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
249 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
250 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
251 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
252 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
253 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
254 B.SetInsertPoint(Entry);
255 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
256 B.SetInsertPoint(TheBB: Left, IP: Left->begin());
257 Argument *PointerArg = &*F->arg_begin();
258 B.SetInsertPoint(Left);
259 B.CreateBr(Dest: Merge);
260 B.SetInsertPoint(Right);
261 B.CreateBr(Dest: Merge);
262
263 // Load in left block
264 B.SetInsertPoint(TheBB: Left, IP: Left->begin());
265 LoadInst *LoadInst1 = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
266 // Store in merge block
267 B.SetInsertPoint(TheBB: Merge, IP: Merge->begin());
268 B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
269
270 setupAnalyses();
271 MemorySSA &MSSA = *Analyses->MSSA;
272 MemorySSAUpdater Updater(&MSSA);
273
274 // Mimic sinking of a load:
275 // - clone load
276 // - insert in "exit" block
277 // - insert in mssa
278 // - remove from original block
279
280 LoadInst *LoadInstClone = cast<LoadInst>(Val: LoadInst1->clone());
281 LoadInstClone->insertInto(ParentBB: Merge, It: Merge->begin());
282 MemoryAccess * NewLoadAccess =
283 Updater.createMemoryAccessInBB(I: LoadInstClone, Definition: nullptr,
284 BB: LoadInstClone->getParent(),
285 Point: MemorySSA::Beginning);
286 Updater.insertUse(Use: cast<MemoryUse>(Val: NewLoadAccess));
287 MSSA.verifyMemorySSA();
288 Updater.removeMemoryAccess(MSSA.getMemoryAccess(I: LoadInst1));
289 MSSA.verifyMemorySSA();
290}
291
292TEST_F(MemorySSATest, MoveAStore) {
293 // We create a diamond where there is a in the entry, a store on one side, and
294 // a load at the end. After building MemorySSA, we test updating by moving
295 // the store from the side block to the entry block. This destroys the old
296 // access.
297 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
298 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
299 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
300 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
301 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
302 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
303 B.SetInsertPoint(Entry);
304 Argument *PointerArg = &*F->arg_begin();
305 StoreInst *EntryStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
306 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
307 B.SetInsertPoint(Left);
308 StoreInst *SideStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
309 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Left);
310 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Right);
311 B.SetInsertPoint(Merge);
312 B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
313 setupAnalyses();
314 MemorySSA &MSSA = *Analyses->MSSA;
315 MemorySSAUpdater Updater(&MSSA);
316 // Move the store
317 SideStore->moveBefore(MovePos: Entry->getTerminator());
318 MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(I: EntryStore);
319 MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(I: SideStore);
320 MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
321 I: SideStore, Definition: EntryStoreAccess, InsertPt: EntryStoreAccess);
322 EntryStoreAccess->replaceAllUsesWith(V: NewStoreAccess);
323 Updater.removeMemoryAccess(SideStoreAccess);
324 MSSA.verifyMemorySSA();
325}
326
327TEST_F(MemorySSATest, MoveAStoreUpdater) {
328 // We create a diamond where there is a in the entry, a store on one side, and
329 // a load at the end. After building MemorySSA, we test updating by moving
330 // the store from the side block to the entry block. This destroys the old
331 // access.
332 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
333 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
334 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
335 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
336 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
337 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
338 B.SetInsertPoint(Entry);
339 Argument *PointerArg = &*F->arg_begin();
340 StoreInst *EntryStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
341 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
342 B.SetInsertPoint(Left);
343 auto *SideStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
344 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Left);
345 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Right);
346 B.SetInsertPoint(Merge);
347 auto *MergeLoad = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
348 setupAnalyses();
349 MemorySSA &MSSA = *Analyses->MSSA;
350 MemorySSAUpdater Updater(&MSSA);
351
352 // Move the store
353 SideStore->moveBefore(MovePos: Entry->getTerminator());
354 auto *EntryStoreAccess = MSSA.getMemoryAccess(I: EntryStore);
355 auto *SideStoreAccess = MSSA.getMemoryAccess(I: SideStore);
356 auto *NewStoreAccess = Updater.createMemoryAccessAfter(
357 I: SideStore, Definition: EntryStoreAccess, InsertPt: EntryStoreAccess);
358 // Before, the load will point to a phi of the EntryStore and SideStore.
359 auto *LoadAccess = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: MergeLoad));
360 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
361 MemoryPhi *MergePhi = cast<MemoryPhi>(Val: LoadAccess->getDefiningAccess());
362 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
363 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
364 Updater.removeMemoryAccess(SideStoreAccess);
365 Updater.insertDef(Def: cast<MemoryDef>(Val: NewStoreAccess));
366 // After it's a phi of the new side store access.
367 EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
368 EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
369 MSSA.verifyMemorySSA();
370}
371
372TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
373 // We create a diamond where there is a in the entry, a store on one side, and
374 // a load at the end. After building MemorySSA, we test updating by moving
375 // the store from the side block to the entry block. This does not destroy
376 // the old access.
377 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
378 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
379 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
380 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
381 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
382 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
383 B.SetInsertPoint(Entry);
384 Argument *PointerArg = &*F->arg_begin();
385 StoreInst *EntryStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
386 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
387 B.SetInsertPoint(Left);
388 auto *SideStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
389 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Left);
390 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Right);
391 B.SetInsertPoint(Merge);
392 auto *MergeLoad = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
393 setupAnalyses();
394 MemorySSA &MSSA = *Analyses->MSSA;
395 MemorySSAUpdater Updater(&MSSA);
396
397 // Move the store
398 auto *EntryStoreAccess = MSSA.getMemoryAccess(I: EntryStore);
399 auto *SideStoreAccess = MSSA.getMemoryAccess(I: SideStore);
400 // Before, the load will point to a phi of the EntryStore and SideStore.
401 auto *LoadAccess = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: MergeLoad));
402 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
403 MemoryPhi *MergePhi = cast<MemoryPhi>(Val: LoadAccess->getDefiningAccess());
404 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
405 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
406 SideStore->moveBefore(BB&: *EntryStore->getParent(), I: ++EntryStore->getIterator());
407 Updater.moveAfter(What: SideStoreAccess, Where: EntryStoreAccess);
408 // After, it's a phi of the side store.
409 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
410 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
411
412 MSSA.verifyMemorySSA();
413}
414
415TEST_F(MemorySSATest, MoveAStoreAllAround) {
416 // We create a diamond where there is a in the entry, a store on one side, and
417 // a load at the end. After building MemorySSA, we test updating by moving
418 // the store from the side block to the entry block, then to the other side
419 // block, then to before the load. This does not destroy the old access.
420 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
421 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
422 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
423 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
424 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
425 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
426 B.SetInsertPoint(Entry);
427 Argument *PointerArg = &*F->arg_begin();
428 StoreInst *EntryStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
429 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
430 B.SetInsertPoint(Left);
431 auto *SideStore = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
432 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Left);
433 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Right);
434 B.SetInsertPoint(Merge);
435 auto *MergeLoad = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
436 setupAnalyses();
437 MemorySSA &MSSA = *Analyses->MSSA;
438 MemorySSAUpdater Updater(&MSSA);
439
440 // Move the store
441 auto *EntryStoreAccess = MSSA.getMemoryAccess(I: EntryStore);
442 auto *SideStoreAccess = MSSA.getMemoryAccess(I: SideStore);
443 // Before, the load will point to a phi of the EntryStore and SideStore.
444 auto *LoadAccess = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: MergeLoad));
445 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
446 MemoryPhi *MergePhi = cast<MemoryPhi>(Val: LoadAccess->getDefiningAccess());
447 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
448 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
449 // Move the store before the entry store
450 SideStore->moveBefore(BB&: *EntryStore->getParent(), I: EntryStore->getIterator());
451 Updater.moveBefore(What: SideStoreAccess, Where: EntryStoreAccess);
452 // After, it's a phi of the entry store.
453 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
454 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
455 MSSA.verifyMemorySSA();
456 // Now move the store to the right branch
457 SideStore->moveBefore(BB&: *Right, I: Right->begin());
458 Updater.moveToPlace(What: SideStoreAccess, BB: Right, Where: MemorySSA::Beginning);
459 MSSA.verifyMemorySSA();
460 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
461 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
462 // Now move it before the load
463 SideStore->moveBefore(MovePos: MergeLoad);
464 Updater.moveBefore(What: SideStoreAccess, Where: LoadAccess);
465 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
466 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
467 MSSA.verifyMemorySSA();
468}
469
470TEST_F(MemorySSATest, RemoveAPhi) {
471 // We create a diamond where there is a store on one side, and then a load
472 // after the merge point. This enables us to test a bunch of different
473 // removal cases.
474 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
475 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
476 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
477 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
478 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
479 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
480 B.SetInsertPoint(Entry);
481 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
482 B.SetInsertPoint(Left);
483 Argument *PointerArg = &*F->arg_begin();
484 StoreInst *StoreInst = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
485 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Left);
486 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Right);
487 B.SetInsertPoint(Merge);
488 LoadInst *LoadInst = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
489
490 setupAnalyses();
491 MemorySSA &MSSA = *Analyses->MSSA;
492 MemorySSAUpdater Updater(&MSSA);
493
494 // Before, the load will be a use of a phi<store, liveonentry>.
495 MemoryUse *LoadAccess = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: LoadInst));
496 MemoryDef *StoreAccess = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreInst));
497 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
498 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
499 // Kill the store
500 Updater.removeMemoryAccess(StoreAccess);
501 MemoryPhi *MP = cast<MemoryPhi>(Val: DefiningAccess);
502 // Verify the phi ended up as liveonentry, liveonentry
503 for (auto &Op : MP->incoming_values())
504 EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
505 // Replace the phi uses with the live on entry def
506 MP->replaceAllUsesWith(V: MSSA.getLiveOnEntryDef());
507 // Verify the load is now defined by liveOnEntryDef
508 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
509 // Remove the PHI
510 Updater.removeMemoryAccess(MP);
511 MSSA.verifyMemorySSA();
512}
513
514TEST_F(MemorySSATest, RemoveMemoryAccess) {
515 // We create a diamond where there is a store on one side, and then a load
516 // after the merge point. This enables us to test a bunch of different
517 // removal cases.
518 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
519 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
520 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
521 BasicBlock *Left(BasicBlock::Create(Context&: C, Name: "", Parent: F));
522 BasicBlock *Right(BasicBlock::Create(Context&: C, Name: "", Parent: F));
523 BasicBlock *Merge(BasicBlock::Create(Context&: C, Name: "", Parent: F));
524 B.SetInsertPoint(Entry);
525 B.CreateCondBr(Cond: B.getTrue(), True: Left, False: Right);
526 B.SetInsertPoint(Left);
527 Argument *PointerArg = &*F->arg_begin();
528 StoreInst *StoreInst = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
529 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Left);
530 BranchInst::Create(IfTrue: Merge, InsertAtEnd: Right);
531 B.SetInsertPoint(Merge);
532 LoadInst *LoadInst = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: PointerArg);
533
534 setupAnalyses();
535 MemorySSA &MSSA = *Analyses->MSSA;
536 MemorySSAWalker *Walker = Analyses->Walker;
537 MemorySSAUpdater Updater(&MSSA);
538
539 // Before, the load will be a use of a phi<store, liveonentry>. It should be
540 // the same after.
541 MemoryUse *LoadAccess = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: LoadInst));
542 MemoryDef *StoreAccess = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreInst));
543 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
544 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
545 // The load is currently clobbered by one of the phi arguments, so the walker
546 // should determine the clobbering access as the phi.
547 EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
548 Updater.removeMemoryAccess(StoreAccess);
549 MSSA.verifyMemorySSA();
550 // After the removeaccess, let's see if we got the right accesses
551 // The load should still point to the phi ...
552 EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
553 // but we should now get live on entry for the clobbering definition of the
554 // load, since it will walk past the phi node since every argument is the
555 // same.
556 // XXX: This currently requires either removing the phi or resetting optimized
557 // on the load
558
559 EXPECT_FALSE(
560 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
561 // If we reset optimized, we get live on entry.
562 LoadAccess->resetOptimized();
563 EXPECT_TRUE(
564 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
565 // The phi should now be a two entry phi with two live on entry defs.
566 for (const auto &Op : DefiningAccess->operands()) {
567 MemoryAccess *Operand = cast<MemoryAccess>(Val: &*Op);
568 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
569 }
570
571 // Now we try to remove the single valued phi
572 Updater.removeMemoryAccess(DefiningAccess);
573 MSSA.verifyMemorySSA();
574 // Now the load should be a load of live on entry.
575 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
576}
577
578// We had a bug with caching where the walker would report MemoryDef#3's clobber
579// (below) was MemoryDef#1.
580//
581// define void @F(i8*) {
582// %A = alloca i8, i8 1
583// ; 1 = MemoryDef(liveOnEntry)
584// store i8 0, i8* %A
585// ; 2 = MemoryDef(1)
586// store i8 1, i8* %A
587// ; 3 = MemoryDef(2)
588// store i8 2, i8* %A
589// }
590TEST_F(MemorySSATest, TestTripleStore) {
591 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
592 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
593 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
594 Type *Int8 = Type::getInt8Ty(C);
595 Value *Alloca = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
596 StoreInst *S1 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: Alloca);
597 StoreInst *S2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 1), Ptr: Alloca);
598 StoreInst *S3 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 2), Ptr: Alloca);
599
600 setupAnalyses();
601 MemorySSA &MSSA = *Analyses->MSSA;
602 MemorySSAWalker *Walker = Analyses->Walker;
603
604 unsigned I = 0;
605 for (StoreInst *V : {S1, S2, S3}) {
606 // Everything should be clobbered by its defining access
607 MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(I: V)->getDefiningAccess();
608 MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(I: V);
609 EXPECT_EQ(DefiningAccess, WalkerClobber)
610 << "Store " << I << " doesn't have the correct clobbering access";
611 // EXPECT_EQ expands such that if we increment I above, it won't get
612 // incremented except when we try to print the error message.
613 ++I;
614 }
615}
616
617// ...And fixing the above bug made it obvious that, when walking, MemorySSA's
618// walker was caching the initial node it walked. This was fine (albeit
619// mostly redundant) unless the initial node being walked is a clobber for the
620// query. In that case, we'd cache that the node clobbered itself.
621TEST_F(MemorySSATest, TestStoreAndLoad) {
622 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
623 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
624 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
625 Type *Int8 = Type::getInt8Ty(C);
626 Value *Alloca = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
627 Instruction *SI = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: Alloca);
628 Instruction *LI = B.CreateLoad(Ty: Int8, Ptr: Alloca);
629
630 setupAnalyses();
631 MemorySSA &MSSA = *Analyses->MSSA;
632 MemorySSAWalker *Walker = Analyses->Walker;
633
634 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(I: LI);
635 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
636 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
637}
638
639// Another bug (related to the above two fixes): It was noted that, given the
640// following code:
641// ; 1 = MemoryDef(liveOnEntry)
642// store i8 0, i8* %1
643//
644// ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
645// hand back the store (correctly). A later call to
646// getClobberingMemoryAccess(const Instruction*) would also hand back the store
647// (incorrectly; it should return liveOnEntry).
648//
649// This test checks that repeated calls to either function returns what they're
650// meant to.
651TEST_F(MemorySSATest, TestStoreDoubleQuery) {
652 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
653 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
654 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
655 Type *Int8 = Type::getInt8Ty(C);
656 Value *Alloca = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
657 StoreInst *SI = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: Alloca);
658
659 setupAnalyses();
660 MemorySSA &MSSA = *Analyses->MSSA;
661 MemorySSAWalker *Walker = Analyses->Walker;
662
663 MemoryAccess *StoreAccess = MSSA.getMemoryAccess(I: SI);
664 MemoryLocation StoreLoc = MemoryLocation::get(SI);
665 MemoryAccess *Clobber =
666 Walker->getClobberingMemoryAccess(MA: StoreAccess, Loc: StoreLoc);
667 MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(I: SI);
668
669 EXPECT_EQ(Clobber, StoreAccess);
670 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
671
672 // Try again (with entries in the cache already) for good measure...
673 Clobber = Walker->getClobberingMemoryAccess(MA: StoreAccess, Loc: StoreLoc);
674 LiveOnEntry = Walker->getClobberingMemoryAccess(I: SI);
675 EXPECT_EQ(Clobber, StoreAccess);
676 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
677}
678
679// Bug: During phi optimization, the walker wouldn't cache to the proper result
680// in the farthest-walked BB.
681//
682// Specifically, it would assume that whatever we walked to was a clobber.
683// "Whatever we walked to" isn't a clobber if we hit a cache entry.
684//
685// ...So, we need a test case that looks like:
686// A
687// / \
688// B |
689// \ /
690// C
691//
692// Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
693// The walk must determine that the blocker exists by using cache entries *while
694// walking* 'B'.
695TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
696 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
697 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
698 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "A", Parent: F));
699 Type *Int8 = Type::getInt8Ty(C);
700 Constant *One = ConstantInt::get(Ty: Int8, V: 1);
701 Constant *Zero = ConstantInt::get(Ty: Int8, V: 0);
702 Value *AllocA = B.CreateAlloca(Ty: Int8, ArraySize: One, Name: "a");
703 Value *AllocB = B.CreateAlloca(Ty: Int8, ArraySize: One, Name: "b");
704 BasicBlock *IfThen = BasicBlock::Create(Context&: C, Name: "B", Parent: F);
705 BasicBlock *IfEnd = BasicBlock::Create(Context&: C, Name: "C", Parent: F);
706
707 B.CreateCondBr(Cond: UndefValue::get(T: Type::getInt1Ty(C)), True: IfThen, False: IfEnd);
708
709 B.SetInsertPoint(IfThen);
710 Instruction *FirstStore = B.CreateStore(Val: Zero, Ptr: AllocA);
711 B.CreateStore(Val: Zero, Ptr: AllocB);
712 Instruction *ALoad0 = B.CreateLoad(Ty: Int8, Ptr: AllocA, Name: "");
713 Instruction *BStore = B.CreateStore(Val: Zero, Ptr: AllocB);
714 // Due to use optimization/etc. we make a store to A, which is removed after
715 // we build MSSA. This helps keep the test case simple-ish.
716 Instruction *KillStore = B.CreateStore(Val: Zero, Ptr: AllocA);
717 Instruction *ALoad = B.CreateLoad(Ty: Int8, Ptr: AllocA, Name: "");
718 B.CreateBr(Dest: IfEnd);
719
720 B.SetInsertPoint(IfEnd);
721 Instruction *BelowPhi = B.CreateStore(Val: Zero, Ptr: AllocA);
722
723 setupAnalyses();
724 MemorySSA &MSSA = *Analyses->MSSA;
725 MemorySSAWalker *Walker = Analyses->Walker;
726 MemorySSAUpdater Updater(&MSSA);
727
728 // Kill `KillStore`; it exists solely so that the load after it won't be
729 // optimized to FirstStore.
730 Updater.removeMemoryAccess(MSSA.getMemoryAccess(I: KillStore));
731 KillStore->eraseFromParent();
732 auto *ALoadMA = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: ALoad));
733 EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
734
735 // Populate the cache for the store to AllocB directly after FirstStore. It
736 // should point to something in block B (so something in D can't be optimized
737 // to it).
738 MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(I: ALoad0);
739 EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
740
741 // If the bug exists, this will introduce a bad cache entry for %a on BStore.
742 // It will point to the store to %b after FirstStore. This only happens during
743 // phi optimization.
744 MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(I: BelowPhi);
745 MemoryAccess *Phi = MSSA.getMemoryAccess(BB: IfEnd);
746 EXPECT_EQ(BottomClobber, Phi);
747
748 // This query will first check the cache for {%a, BStore}. It should point to
749 // FirstStore, not to the store after FirstStore.
750 MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(I: ALoad);
751 EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
752}
753
754// Test that our walker properly handles loads with the invariant group
755// attribute. It's a bit hacky, since we add the invariant attribute *after*
756// building MSSA. Otherwise, the use optimizer will optimize it for us, which
757// isn't what we want.
758// FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
759TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
760 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
761 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
762 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
763 Type *Int8 = Type::getInt8Ty(C);
764 Constant *One = ConstantInt::get(Ty: Int8, V: 1);
765 Value *AllocA = B.CreateAlloca(Ty: Int8, ArraySize: One, Name: "");
766
767 Instruction *Store = B.CreateStore(Val: One, Ptr: AllocA);
768 Instruction *Load = B.CreateLoad(Ty: Int8, Ptr: AllocA);
769
770 setupAnalyses();
771 MemorySSA &MSSA = *Analyses->MSSA;
772 MemorySSAWalker *Walker = Analyses->Walker;
773
774 auto *LoadMA = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: Load));
775 auto *StoreMA = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: Store));
776 EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
777
778 // ...At the time of writing, no cache should exist for LoadMA. Be a bit
779 // flexible to future changes.
780 Walker->invalidateInfo(LoadMA);
781 Load->setMetadata(KindID: LLVMContext::MD_invariant_load, Node: MDNode::get(Context&: C, MDs: {}));
782
783 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(MA: LoadMA);
784 EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
785}
786
787// Test loads get reoptimized properly by the walker.
788TEST_F(MemorySSATest, WalkerReopt) {
789 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
790 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
791 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
792 Type *Int8 = Type::getInt8Ty(C);
793 Value *AllocaA = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
794 Instruction *SIA = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: AllocaA);
795 Value *AllocaB = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "B");
796 Instruction *SIB = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: AllocaB);
797 Instruction *LIA = B.CreateLoad(Ty: Int8, Ptr: AllocaA);
798
799 setupAnalyses();
800 MemorySSA &MSSA = *Analyses->MSSA;
801 MemorySSAWalker *Walker = Analyses->Walker;
802 MemorySSAUpdater Updater(&MSSA);
803
804 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(I: LIA);
805 MemoryUse *LoadAccess = cast<MemoryUse>(Val: MSSA.getMemoryAccess(I: LIA));
806 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
807 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
808 Updater.removeMemoryAccess(LoadAccess);
809
810 // Create the load memory access pointing to an unoptimized place.
811 MemoryUse *NewLoadAccess = cast<MemoryUse>(Val: Updater.createMemoryAccessInBB(
812 I: LIA, Definition: MSSA.getMemoryAccess(I: SIB), BB: LIA->getParent(), Point: MemorySSA::End));
813 // This should it cause it to be optimized
814 EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
815 EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
816}
817
818// Test out MemorySSAUpdater::moveBefore
819TEST_F(MemorySSATest, MoveAboveMemoryDef) {
820 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
821 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
822 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
823
824 Type *Int8 = Type::getInt8Ty(C);
825 Value *A = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
826 Value *B_ = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "B");
827 Value *C = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "C");
828
829 StoreInst *StoreA0 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: A);
830 StoreInst *StoreB = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: B_);
831 LoadInst *LoadB = B.CreateLoad(Ty: Int8, Ptr: B_);
832 StoreInst *StoreA1 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 4), Ptr: A);
833 StoreInst *StoreC = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 4), Ptr: C);
834 StoreInst *StoreA2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 4), Ptr: A);
835 LoadInst *LoadC = B.CreateLoad(Ty: Int8, Ptr: C);
836
837 setupAnalyses();
838 MemorySSA &MSSA = *Analyses->MSSA;
839 MemorySSAWalker &Walker = *Analyses->Walker;
840
841 MemorySSAUpdater Updater(&MSSA);
842 StoreC->moveBefore(MovePos: StoreB);
843 Updater.moveBefore(What: cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreC)),
844 Where: cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreB)));
845
846 MSSA.verifyMemorySSA();
847
848 EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
849 MSSA.getMemoryAccess(StoreC));
850 EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
851 MSSA.getMemoryAccess(StoreA0));
852 EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
853 MSSA.getMemoryAccess(StoreA1));
854 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
855 MSSA.getMemoryAccess(StoreB));
856 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
857 MSSA.getMemoryAccess(StoreC));
858
859 // exercise block numbering
860 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
861 MSSA.getMemoryAccess(StoreB)));
862 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
863 MSSA.getMemoryAccess(StoreA2)));
864}
865
866TEST_F(MemorySSATest, Irreducible) {
867 // Create the equivalent of
868 // x = something
869 // if (...)
870 // goto second_loop_entry
871 // while (...) {
872 // second_loop_entry:
873 // }
874 // use(x)
875
876 SmallVector<PHINode *, 8> Inserted;
877 IRBuilder<> B(C);
878 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
879 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
880
881 // Make blocks
882 BasicBlock *IfBB = BasicBlock::Create(Context&: C, Name: "if", Parent: F);
883 BasicBlock *LoopStartBB = BasicBlock::Create(Context&: C, Name: "loopstart", Parent: F);
884 BasicBlock *LoopMainBB = BasicBlock::Create(Context&: C, Name: "loopmain", Parent: F);
885 BasicBlock *AfterLoopBB = BasicBlock::Create(Context&: C, Name: "afterloop", Parent: F);
886 B.SetInsertPoint(IfBB);
887 B.CreateCondBr(Cond: B.getTrue(), True: LoopMainBB, False: LoopStartBB);
888 B.SetInsertPoint(LoopStartBB);
889 B.CreateBr(Dest: LoopMainBB);
890 B.SetInsertPoint(LoopMainBB);
891 B.CreateCondBr(Cond: B.getTrue(), True: LoopStartBB, False: AfterLoopBB);
892 B.SetInsertPoint(AfterLoopBB);
893 Argument *FirstArg = &*F->arg_begin();
894 setupAnalyses();
895 MemorySSA &MSSA = *Analyses->MSSA;
896 MemorySSAUpdater Updater(&MSSA);
897 // Create the load memory acccess
898 LoadInst *LoadInst = B.CreateLoad(Ty: B.getInt8Ty(), Ptr: FirstArg);
899 MemoryUse *LoadAccess = cast<MemoryUse>(Val: Updater.createMemoryAccessInBB(
900 I: LoadInst, Definition: nullptr, BB: AfterLoopBB, Point: MemorySSA::Beginning));
901 Updater.insertUse(Use: LoadAccess);
902 MSSA.verifyMemorySSA();
903}
904
905TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) {
906 // Create:
907 // %1 = alloca i8
908 // ; 1 = MemoryDef(liveOnEntry)
909 // store i8 0, i8* %1
910 // ; 2 = MemoryDef(1)
911 // store i8 0, i8* %1
912 //
913 // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of
914 // `2` after `1` is removed.
915 IRBuilder<> B(C);
916 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
917 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
918
919 BasicBlock *Entry = BasicBlock::Create(Context&: C, Name: "if", Parent: F);
920 B.SetInsertPoint(Entry);
921
922 Value *A = B.CreateAlloca(Ty: B.getInt8Ty());
923 StoreInst *StoreA = B.CreateStore(Val: B.getInt8(C: 0), Ptr: A);
924 StoreInst *StoreB = B.CreateStore(Val: B.getInt8(C: 0), Ptr: A);
925
926 setupAnalyses();
927
928 MemorySSA &MSSA = *Analyses->MSSA;
929
930 auto *DefA = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreA));
931 auto *DefB = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreB));
932
933 MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(MA: DefB);
934 ASSERT_EQ(DefA, BClobber);
935
936 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA);
937 StoreA->eraseFromParent();
938
939 EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef());
940
941 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB),
942 MSSA.getLiveOnEntryDef())
943 << "(DefA = " << DefA << ")";
944}
945
946TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
947 // Create:
948 // %x = alloca i8
949 // %y = alloca i8
950 // ; 1 = MemoryDef(liveOnEntry)
951 // store i8 0, i8* %x
952 // ; 2 = MemoryDef(1)
953 // store i8 0, i8* %y
954 // ; 3 = MemoryDef(2)
955 // store i8 0, i8* %x
956 //
957 // And be sure that MSSA's caching handles the removal of def `1`
958 // appropriately.
959 IRBuilder<> B(C);
960 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
961 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
962
963 BasicBlock *Entry = BasicBlock::Create(Context&: C, Name: "if", Parent: F);
964 B.SetInsertPoint(Entry);
965
966 Value *X = B.CreateAlloca(Ty: B.getInt8Ty());
967 Value *Y = B.CreateAlloca(Ty: B.getInt8Ty());
968 StoreInst *StoreX1 = B.CreateStore(Val: B.getInt8(C: 0), Ptr: X);
969 StoreInst *StoreY = B.CreateStore(Val: B.getInt8(C: 0), Ptr: Y);
970 StoreInst *StoreX2 = B.CreateStore(Val: B.getInt8(C: 0), Ptr: X);
971
972 setupAnalyses();
973
974 MemorySSA &MSSA = *Analyses->MSSA;
975
976 auto *DefX1 = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreX1));
977 auto *DefY = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreY));
978 auto *DefX2 = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreX2));
979
980 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
981 MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(MA: DefX2);
982 ASSERT_EQ(DefX1, X2Clobber);
983
984 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1);
985 StoreX1->eraseFromParent();
986
987 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
988 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2),
989 MSSA.getLiveOnEntryDef())
990 << "(DefX1 = " << DefX1 << ")";
991}
992
993// Test Must alias for optimized defs.
994TEST_F(MemorySSATest, TestStoreMustAlias) {
995 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {}, isVarArg: false),
996 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
997 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
998 Type *Int8 = Type::getInt8Ty(C);
999 Value *AllocaA = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
1000 Value *AllocaB = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "B");
1001 StoreInst *SA1 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 1), Ptr: AllocaA);
1002 StoreInst *SB1 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 1), Ptr: AllocaB);
1003 StoreInst *SA2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 2), Ptr: AllocaA);
1004 StoreInst *SB2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 2), Ptr: AllocaB);
1005 StoreInst *SA3 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 3), Ptr: AllocaA);
1006 StoreInst *SB3 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 3), Ptr: AllocaB);
1007
1008 setupAnalyses();
1009 MemorySSA &MSSA = *Analyses->MSSA;
1010 MemorySSAWalker *Walker = Analyses->Walker;
1011
1012 unsigned I = 0;
1013 for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
1014 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(Val: MSSA.getMemoryAccess(I: V));
1015 EXPECT_EQ(MemDef->isOptimized(), false)
1016 << "Store " << I << " is optimized from the start?";
1017 if (V == SA1)
1018 Walker->getClobberingMemoryAccess(I: V);
1019 else {
1020 MemoryAccess *Def = MemDef->getDefiningAccess();
1021 MemoryAccess *Clob = Walker->getClobberingMemoryAccess(I: V);
1022 EXPECT_NE(Def, Clob) << "Store " << I
1023 << " has Defining Access equal to Clobbering Access";
1024 }
1025 EXPECT_EQ(MemDef->isOptimized(), true)
1026 << "Store " << I << " was not optimized";
1027 // EXPECT_EQ expands such that if we increment I above, it won't get
1028 // incremented except when we try to print the error message.
1029 ++I;
1030 }
1031}
1032
1033// Test May alias for optimized defs.
1034TEST_F(MemorySSATest, TestStoreMayAlias) {
1035 F = Function::Create(
1036 Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy(), B.getPtrTy()}, isVarArg: false),
1037 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1038 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
1039 Type *Int8 = Type::getInt8Ty(C);
1040 auto *ArgIt = F->arg_begin();
1041 Argument *PointerA = &*ArgIt;
1042 Argument *PointerB = &*(++ArgIt);
1043 Value *AllocaC = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "C");
1044 // Store into arg1, must alias because it's LOE => must
1045 StoreInst *SA1 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: PointerA);
1046 // Store into arg2, may alias store to arg1 => may
1047 StoreInst *SB1 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 1), Ptr: PointerB);
1048 // Store into aloca, no alias with args, so must alias LOE => must
1049 StoreInst *SC1 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 2), Ptr: AllocaC);
1050 // Store into arg1, may alias store to arg2 => may
1051 StoreInst *SA2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 3), Ptr: PointerA);
1052 // Store into arg2, may alias store to arg1 => may
1053 StoreInst *SB2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 4), Ptr: PointerB);
1054 // Store into aloca, no alias with args, so must alias SC1 => must
1055 StoreInst *SC2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 5), Ptr: AllocaC);
1056 // Store into arg2, must alias store to arg2 => must
1057 StoreInst *SB3 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 6), Ptr: PointerB);
1058 std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
1059
1060 setupAnalyses();
1061 MemorySSA &MSSA = *Analyses->MSSA;
1062 MemorySSAWalker *Walker = Analyses->Walker;
1063
1064 unsigned I = 0;
1065 for (StoreInst *V : Sts) {
1066 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(Val: MSSA.getMemoryAccess(I: V));
1067 EXPECT_EQ(MemDef->isOptimized(), false)
1068 << "Store " << I << " is optimized from the start?";
1069 ++I;
1070 }
1071
1072 for (StoreInst *V : Sts)
1073 Walker->getClobberingMemoryAccess(I: V);
1074
1075 I = 0;
1076 for (StoreInst *V : Sts) {
1077 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(Val: MSSA.getMemoryAccess(I: V));
1078 EXPECT_EQ(MemDef->isOptimized(), true)
1079 << "Store " << I << " was not optimized";
1080 // EXPECT_EQ expands such that if we increment I above, it won't get
1081 // incremented except when we try to print the error message.
1082 ++I;
1083 }
1084}
1085
1086TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
1087 // Example code:
1088 // define void @a(i8* %foo) {
1089 // %bar = getelementptr i8, i8* %foo, i64 1
1090 // %baz = getelementptr i8, i8* %foo, i64 2
1091 // store i8 0, i8* %foo
1092 // store i8 0, i8* %bar
1093 // call void @llvm.lifetime.end.p0i8(i64 3, i8* %foo)
1094 // call void @llvm.lifetime.start.p0i8(i64 3, i8* %foo)
1095 // store i8 0, i8* %foo
1096 // store i8 0, i8* %bar
1097 // call void @llvm.memset.p0i8(i8* %baz, i8 0, i64 1)
1098 // ret void
1099 // }
1100 //
1101 // Patterns like this are possible after inlining; the stores to %foo and %bar
1102 // should both be clobbered by the lifetime.start call if they're dominated by
1103 // it.
1104
1105 IRBuilder<> B(C);
1106 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
1107 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1108
1109 // Make blocks
1110 BasicBlock *Entry = BasicBlock::Create(Context&: C, Name: "entry", Parent: F);
1111
1112 B.SetInsertPoint(Entry);
1113 Value *Foo = &*F->arg_begin();
1114
1115 Value *Bar = B.CreatePtrAdd(Ptr: Foo, Offset: B.getInt64(C: 1), Name: "bar");
1116 Value *Baz = B.CreatePtrAdd(Ptr: Foo, Offset: B.getInt64(C: 2), Name: "baz");
1117
1118 B.CreateStore(Val: B.getInt8(C: 0), Ptr: Foo);
1119 B.CreateStore(Val: B.getInt8(C: 0), Ptr: Bar);
1120
1121 auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
1122 return Intrinsic::getDeclaration(M: &M, id: ID, Tys: {Foo->getType()});
1123 };
1124
1125 B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
1126 {B.getInt64(C: 3), Foo});
1127 Instruction *LifetimeStart = B.CreateCall(
1128 GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(C: 3), Foo});
1129
1130 Instruction *FooStore = B.CreateStore(Val: B.getInt8(C: 0), Ptr: Foo);
1131 Instruction *BarStore = B.CreateStore(Val: B.getInt8(C: 0), Ptr: Bar);
1132 Instruction *BazMemSet = B.CreateMemSet(Ptr: Baz, Val: B.getInt8(C: 0), Size: 1, Align: Align(1));
1133
1134 setupAnalyses();
1135 MemorySSA &MSSA = *Analyses->MSSA;
1136
1137 MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(I: LifetimeStart);
1138 ASSERT_NE(LifetimeStartAccess, nullptr);
1139
1140 MemoryAccess *FooAccess = MSSA.getMemoryAccess(I: FooStore);
1141 ASSERT_NE(FooAccess, nullptr);
1142
1143 MemoryAccess *BarAccess = MSSA.getMemoryAccess(I: BarStore);
1144 ASSERT_NE(BarAccess, nullptr);
1145
1146 MemoryAccess *BazAccess = MSSA.getMemoryAccess(I: BazMemSet);
1147 ASSERT_NE(BazAccess, nullptr);
1148
1149 MemoryAccess *FooClobber =
1150 MSSA.getWalker()->getClobberingMemoryAccess(MA: FooAccess);
1151 EXPECT_EQ(FooClobber, LifetimeStartAccess);
1152
1153 MemoryAccess *BarClobber =
1154 MSSA.getWalker()->getClobberingMemoryAccess(MA: BarAccess);
1155 EXPECT_EQ(BarClobber, LifetimeStartAccess);
1156
1157 MemoryAccess *BazClobber =
1158 MSSA.getWalker()->getClobberingMemoryAccess(MA: BazAccess);
1159 EXPECT_EQ(BazClobber, LifetimeStartAccess);
1160
1161 MemoryAccess *LifetimeStartClobber =
1162 MSSA.getWalker()->getClobberingMemoryAccess(
1163 MA: LifetimeStartAccess, Loc: MemoryLocation::getAfter(Ptr: Foo));
1164 EXPECT_EQ(LifetimeStartClobber, LifetimeStartAccess);
1165}
1166
1167TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
1168 IRBuilder<> B(C);
1169 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getInt1Ty()}, isVarArg: false),
1170 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1171
1172 // Make a CFG like
1173 // entry
1174 // / \
1175 // a b
1176 // \ /
1177 // c
1178 //
1179 // Put a def in A and a def in B, move the def from A -> B, observe as the
1180 // optimization is invalidated.
1181 BasicBlock *Entry = BasicBlock::Create(Context&: C, Name: "entry", Parent: F);
1182 BasicBlock *BlockA = BasicBlock::Create(Context&: C, Name: "a", Parent: F);
1183 BasicBlock *BlockB = BasicBlock::Create(Context&: C, Name: "b", Parent: F);
1184 BasicBlock *BlockC = BasicBlock::Create(Context&: C, Name: "c", Parent: F);
1185
1186 B.SetInsertPoint(Entry);
1187 Type *Int8 = Type::getInt8Ty(C);
1188 Value *Alloca = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "alloc");
1189 StoreInst *StoreEntry = B.CreateStore(Val: B.getInt8(C: 0), Ptr: Alloca);
1190 B.CreateCondBr(Cond: B.getTrue(), True: BlockA, False: BlockB);
1191
1192 B.SetInsertPoint(BlockA);
1193 StoreInst *StoreA = B.CreateStore(Val: B.getInt8(C: 1), Ptr: Alloca);
1194 B.CreateBr(Dest: BlockC);
1195
1196 B.SetInsertPoint(BlockB);
1197 StoreInst *StoreB = B.CreateStore(Val: B.getInt8(C: 2), Ptr: Alloca);
1198 B.CreateBr(Dest: BlockC);
1199
1200 B.SetInsertPoint(BlockC);
1201 B.CreateUnreachable();
1202
1203 setupAnalyses();
1204 MemorySSA &MSSA = *Analyses->MSSA;
1205
1206 auto *AccessEntry = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreEntry));
1207 auto *StoreAEntry = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreA));
1208 auto *StoreBEntry = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreB));
1209
1210 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1211 AccessEntry);
1212 ASSERT_TRUE(StoreAEntry->isOptimized());
1213
1214 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
1215 AccessEntry);
1216 ASSERT_TRUE(StoreBEntry->isOptimized());
1217
1218 // Note that if we did InsertionPlace::Beginning, we don't go out of our way
1219 // to invalidate the cache for StoreBEntry. If the user wants to actually do
1220 // moves like these, it's up to them to ensure that nearby cache entries are
1221 // correctly invalidated (which, in general, requires walking all instructions
1222 // that the moved instruction dominates. So we probably shouldn't be doing
1223 // moves like this in general. Still, works as a test-case. ;) )
1224 MemorySSAUpdater(&MSSA).moveToPlace(What: StoreAEntry, BB: BlockB,
1225 Where: MemorySSA::InsertionPlace::End);
1226 ASSERT_FALSE(StoreAEntry->isOptimized());
1227 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1228 StoreBEntry);
1229}
1230
1231TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
1232 F = Function::Create(
1233 Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy(), B.getPtrTy()}, isVarArg: false),
1234 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1235 B.SetInsertPoint(BasicBlock::Create(Context&: C, Name: "", Parent: F));
1236 Type *Int8 = Type::getInt8Ty(C);
1237 Value *AllocA = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
1238 Value *AllocB = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "B");
1239
1240 StoreInst *StoreA = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: AllocA);
1241 StoreInst *StoreB = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 1), Ptr: AllocB);
1242 StoreInst *StoreA2 = B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 2), Ptr: AllocA);
1243
1244 setupAnalyses();
1245 MemorySSA &MSSA = *Analyses->MSSA;
1246 MemorySSAWalker *Walker = Analyses->Walker;
1247
1248 // If these don't hold, there's no chance of the test result being useful.
1249 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
1250 MSSA.getLiveOnEntryDef());
1251 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
1252 MSSA.getLiveOnEntryDef());
1253 auto *StoreAAccess = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreA));
1254 auto *StoreA2Access = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreA2));
1255 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
1256 ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
1257
1258 auto *StoreBAccess = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: StoreB));
1259 ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
1260 ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
1261
1262 auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
1263 llvm::sort(C&: Defs, Comp: [](const MemoryDef *LHS, const MemoryDef *RHS) {
1264 return LHS->getID() < RHS->getID();
1265 });
1266 };
1267
1268 auto SortedUserList = [&](const MemoryDef *MD) {
1269 std::vector<const MemoryDef *> Result;
1270 transform(Range: MD->users(), d_first: std::back_inserter(x&: Result),
1271 F: [](const User *U) { return cast<MemoryDef>(Val: U); });
1272 SortVecByID(Result);
1273 return Result;
1274 };
1275
1276 // Use std::vectors, since they have nice pretty-printing if the test fails.
1277 // Parens are necessary because EXPECT_EQ is a macro, and we have commas in
1278 // our init lists...
1279 EXPECT_EQ(SortedUserList(StoreAAccess),
1280 (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
1281
1282 EXPECT_EQ(SortedUserList(StoreBAccess),
1283 std::vector<const MemoryDef *>{StoreA2Access});
1284
1285 // StoreAAccess should be present twice, since it uses liveOnEntry for both
1286 // its defining and optimized accesses. This is a bit awkward, and is not
1287 // relied upon anywhere at the moment. If this is painful, we can fix it.
1288 EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
1289 (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
1290 StoreBAccess}));
1291}
1292
1293// entry
1294// |
1295// header
1296// / \
1297// body |
1298// \ /
1299// exit
1300// header:
1301// ; 1 = MemoryDef(liveOnEntry)
1302// body:
1303// ; 2 = MemoryDef(1)
1304// exit:
1305// ; 3 = MemoryPhi({body, 2}, {header, 1})
1306// ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi.
1307// Insert edge: entry -> exit, check mssa Update is correct.
1308TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) {
1309 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
1310 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1311 Argument *PointerArg = &*F->arg_begin();
1312 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "entry", Parent: F));
1313 BasicBlock *Header(BasicBlock::Create(Context&: C, Name: "header", Parent: F));
1314 BasicBlock *Body(BasicBlock::Create(Context&: C, Name: "body", Parent: F));
1315 BasicBlock *Exit(BasicBlock::Create(Context&: C, Name: "exit", Parent: F));
1316 B.SetInsertPoint(Entry);
1317 BranchInst::Create(IfTrue: Header, InsertAtEnd: Entry);
1318 B.SetInsertPoint(Header);
1319 B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
1320 B.CreateCondBr(Cond: B.getTrue(), True: Exit, False: Body);
1321 B.SetInsertPoint(Body);
1322 B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
1323 BranchInst::Create(IfTrue: Exit, InsertAtEnd: Body);
1324 B.SetInsertPoint(Exit);
1325 StoreInst *S1 = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
1326
1327 setupAnalyses();
1328 MemorySSA &MSSA = *Analyses->MSSA;
1329 MemorySSAWalker *Walker = Analyses->Walker;
1330 std::unique_ptr<MemorySSAUpdater> MSSAU =
1331 std::make_unique<MemorySSAUpdater>(args: &MSSA);
1332
1333 MemoryPhi *Phi = MSSA.getMemoryAccess(BB: Exit);
1334 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1335
1336 // Alter CFG, add edge: entry -> exit
1337 Entry->getTerminator()->eraseFromParent();
1338 B.SetInsertPoint(Entry);
1339 B.CreateCondBr(Cond: B.getTrue(), True: Header, False: Exit);
1340 SmallVector<CFGUpdate, 1> Updates;
1341 Updates.push_back(Elt: {cfg::UpdateKind::Insert, Entry, Exit});
1342 Analyses->DT.applyUpdates(Updates);
1343 MSSAU->applyInsertUpdates(Updates, DT&: Analyses->DT);
1344 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1345}
1346
1347// entry
1348// |
1349// header
1350// / \
1351// body |
1352// \ /
1353// exit
1354// header:
1355// ; 1 = MemoryDef(liveOnEntry)
1356// body:
1357// ; 2 = MemoryDef(1)
1358// exit:
1359// ; 3 = MemoryPhi({body, 2}, {header, 1})
1360// ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate
1361// the optimized access.
1362// Insert edge: entry -> exit, check mssa Update is correct.
1363TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) {
1364 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
1365 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1366 Argument *PointerArg = &*F->arg_begin();
1367 Type *Int8 = Type::getInt8Ty(C);
1368 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "entry", Parent: F));
1369 BasicBlock *Header(BasicBlock::Create(Context&: C, Name: "header", Parent: F));
1370 BasicBlock *Body(BasicBlock::Create(Context&: C, Name: "body", Parent: F));
1371 BasicBlock *Exit(BasicBlock::Create(Context&: C, Name: "exit", Parent: F));
1372
1373 B.SetInsertPoint(Entry);
1374 Value *Alloca = B.CreateAlloca(Ty: Int8, ArraySize: ConstantInt::get(Ty: Int8, V: 1), Name: "A");
1375 BranchInst::Create(IfTrue: Header, InsertAtEnd: Entry);
1376
1377 B.SetInsertPoint(Header);
1378 StoreInst *S1 = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
1379 B.CreateCondBr(Cond: B.getTrue(), True: Exit, False: Body);
1380
1381 B.SetInsertPoint(Body);
1382 B.CreateStore(Val: ConstantInt::get(Ty: Int8, V: 0), Ptr: Alloca);
1383 BranchInst::Create(IfTrue: Exit, InsertAtEnd: Body);
1384
1385 B.SetInsertPoint(Exit);
1386 StoreInst *S2 = B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
1387
1388 setupAnalyses();
1389 MemorySSA &MSSA = *Analyses->MSSA;
1390 MemorySSAWalker *Walker = Analyses->Walker;
1391 std::unique_ptr<MemorySSAUpdater> MSSAU =
1392 std::make_unique<MemorySSAUpdater>(args: &MSSA);
1393
1394 MemoryDef *DefS1 = cast<MemoryDef>(Val: MSSA.getMemoryAccess(I: S1));
1395 EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2));
1396
1397 // Alter CFG, add edge: entry -> exit
1398 Entry->getTerminator()->eraseFromParent();
1399 B.SetInsertPoint(Entry);
1400 B.CreateCondBr(Cond: B.getTrue(), True: Header, False: Exit);
1401 SmallVector<CFGUpdate, 1> Updates;
1402 Updates.push_back(Elt: {cfg::UpdateKind::Insert, Entry, Exit});
1403 Analyses->DT.applyUpdates(Updates);
1404 MSSAU->applyInsertUpdates(Updates, DT&: Analyses->DT);
1405
1406 MemoryPhi *Phi = MSSA.getMemoryAccess(BB: Exit);
1407 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2));
1408}
1409
1410// entry
1411// / |
1412// a |
1413// / \ |
1414// b c f
1415// \ / |
1416// d |
1417// \ /
1418// e
1419// f:
1420// ; 1 = MemoryDef(liveOnEntry)
1421// e:
1422// ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1})
1423//
1424// Insert edge: f -> c, check update is correct.
1425// After update:
1426// f:
1427// ; 1 = MemoryDef(liveOnEntry)
1428// c:
1429// ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1})
1430// d:
1431// ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3})
1432// e:
1433// ; 2 = MemoryPhi({d, 4}, {f, 1})
1434TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) {
1435 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
1436 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1437 Argument *PointerArg = &*F->arg_begin();
1438 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "entry", Parent: F));
1439 BasicBlock *ABlock(BasicBlock::Create(Context&: C, Name: "a", Parent: F));
1440 BasicBlock *BBlock(BasicBlock::Create(Context&: C, Name: "b", Parent: F));
1441 BasicBlock *CBlock(BasicBlock::Create(Context&: C, Name: "c", Parent: F));
1442 BasicBlock *DBlock(BasicBlock::Create(Context&: C, Name: "d", Parent: F));
1443 BasicBlock *EBlock(BasicBlock::Create(Context&: C, Name: "e", Parent: F));
1444 BasicBlock *FBlock(BasicBlock::Create(Context&: C, Name: "f", Parent: F));
1445
1446 B.SetInsertPoint(Entry);
1447 B.CreateCondBr(Cond: B.getTrue(), True: ABlock, False: FBlock);
1448 B.SetInsertPoint(ABlock);
1449 B.CreateCondBr(Cond: B.getTrue(), True: BBlock, False: CBlock);
1450 B.SetInsertPoint(BBlock);
1451 BranchInst::Create(IfTrue: DBlock, InsertAtEnd: BBlock);
1452 B.SetInsertPoint(CBlock);
1453 BranchInst::Create(IfTrue: DBlock, InsertAtEnd: CBlock);
1454 B.SetInsertPoint(DBlock);
1455 BranchInst::Create(IfTrue: EBlock, InsertAtEnd: DBlock);
1456 B.SetInsertPoint(FBlock);
1457 B.CreateStore(Val: B.getInt8(C: 16), Ptr: PointerArg);
1458 BranchInst::Create(IfTrue: EBlock, InsertAtEnd: FBlock);
1459
1460 setupAnalyses();
1461 MemorySSA &MSSA = *Analyses->MSSA;
1462 std::unique_ptr<MemorySSAUpdater> MSSAU =
1463 std::make_unique<MemorySSAUpdater>(args: &MSSA);
1464
1465 // Alter CFG, add edge: f -> c
1466 FBlock->getTerminator()->eraseFromParent();
1467 B.SetInsertPoint(FBlock);
1468 B.CreateCondBr(Cond: B.getTrue(), True: CBlock, False: EBlock);
1469 SmallVector<CFGUpdate, 1> Updates;
1470 Updates.push_back(Elt: {cfg::UpdateKind::Insert, FBlock, CBlock});
1471 Analyses->DT.applyUpdates(Updates);
1472 MSSAU->applyInsertUpdates(Updates, DT&: Analyses->DT);
1473
1474 MemoryPhi *MPC = MSSA.getMemoryAccess(BB: CBlock);
1475 EXPECT_NE(MPC, nullptr);
1476 MemoryPhi *MPD = MSSA.getMemoryAccess(BB: DBlock);
1477 EXPECT_NE(MPD, nullptr);
1478 MemoryPhi *MPE = MSSA.getMemoryAccess(BB: EBlock);
1479 EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock));
1480}
1481
1482TEST_F(MemorySSATest, TestCallClobber) {
1483 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
1484 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1485
1486 Value *Pointer1 = &*F->arg_begin();
1487 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
1488 B.SetInsertPoint(Entry);
1489 Value *Pointer2 = B.CreatePtrAdd(Ptr: Pointer1, Offset: B.getInt64(C: 1));
1490 Instruction *StorePointer1 = B.CreateStore(Val: B.getInt8(C: 0), Ptr: Pointer1);
1491 Instruction *StorePointer2 = B.CreateStore(Val: B.getInt8(C: 0), Ptr: Pointer2);
1492 Instruction *MemSet = B.CreateMemSet(Ptr: Pointer2, Val: B.getInt8(C: 0), Size: 1, Align: Align(1));
1493
1494 setupAnalyses();
1495 MemorySSA &MSSA = *Analyses->MSSA;
1496 MemorySSAWalker *Walker = Analyses->Walker;
1497
1498 MemoryUseOrDef *Store1Access = MSSA.getMemoryAccess(I: StorePointer1);
1499 MemoryUseOrDef *Store2Access = MSSA.getMemoryAccess(I: StorePointer2);
1500 MemoryUseOrDef *MemSetAccess = MSSA.getMemoryAccess(I: MemSet);
1501
1502 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess(
1503 MA: MemSetAccess, Loc: MemoryLocation(Pointer1, LocationSize::precise(Value: 1)));
1504 EXPECT_EQ(Pointer1Clobber, Store1Access);
1505
1506 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess(
1507 MA: MemSetAccess, Loc: MemoryLocation(Pointer2, LocationSize::precise(Value: 1)));
1508 EXPECT_EQ(Pointer2Clobber, MemSetAccess);
1509
1510 MemoryAccess *MemSetClobber = Walker->getClobberingMemoryAccess(MA: MemSetAccess);
1511 EXPECT_EQ(MemSetClobber, Store2Access);
1512}
1513
1514TEST_F(MemorySSATest, TestLoadClobber) {
1515 F = Function::Create(Ty: FunctionType::get(Result: B.getVoidTy(), Params: {B.getPtrTy()}, isVarArg: false),
1516 Linkage: GlobalValue::ExternalLinkage, N: "F", M: &M);
1517
1518 Value *Pointer1 = &*F->arg_begin();
1519 BasicBlock *Entry(BasicBlock::Create(Context&: C, Name: "", Parent: F));
1520 B.SetInsertPoint(Entry);
1521 Value *Pointer2 = B.CreatePtrAdd(Ptr: Pointer1, Offset: B.getInt64(C: 1));
1522 Instruction *LoadPointer1 =
1523 B.CreateLoad(Ty: B.getInt8Ty(), Ptr: Pointer1, /* Volatile */ isVolatile: true);
1524 Instruction *LoadPointer2 =
1525 B.CreateLoad(Ty: B.getInt8Ty(), Ptr: Pointer2, /* Volatile */ isVolatile: true);
1526
1527 setupAnalyses();
1528 MemorySSA &MSSA = *Analyses->MSSA;
1529 MemorySSAWalker *Walker = Analyses->Walker;
1530
1531 MemoryUseOrDef *Load1Access = MSSA.getMemoryAccess(I: LoadPointer1);
1532 MemoryUseOrDef *Load2Access = MSSA.getMemoryAccess(I: LoadPointer2);
1533
1534 // When providing a memory location, we should never return a load as the
1535 // clobber.
1536 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess(
1537 MA: Load2Access, Loc: MemoryLocation(Pointer1, LocationSize::precise(Value: 1)));
1538 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer1Clobber));
1539
1540 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess(
1541 MA: Load2Access, Loc: MemoryLocation(Pointer2, LocationSize::precise(Value: 1)));
1542 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer2Clobber));
1543
1544 MemoryAccess *Load2Clobber = Walker->getClobberingMemoryAccess(MA: Load2Access);
1545 EXPECT_EQ(Load2Clobber, Load1Access);
1546}
1547
1548// We want to test if the location information are retained
1549// when the IsGuaranteedLoopInvariant function handles a
1550// memory access referring to a pointer defined in the entry
1551// block, hence automatically guaranteed to be loop invariant.
1552TEST_F(MemorySSATest, TestLoopInvariantEntryBlockPointer) {
1553 SMDiagnostic E;
1554 auto LocalM =
1555 parseAssemblyString(AsmString: "define void @test(i64 %a0, i8* %a1, i1* %a2) {\n"
1556 "entry:\n"
1557 "%v0 = getelementptr i8, i8* %a1, i64 %a0\n"
1558 "%v1 = bitcast i8* %v0 to i64*\n"
1559 "%v2 = bitcast i8* %v0 to i32*\n"
1560 "%v3 = load i1, i1* %a2\n"
1561 "br i1 %v3, label %body, label %exit\n"
1562 "body:\n"
1563 "store i32 1, i32* %v2\n"
1564 "br label %exit\n"
1565 "exit:\n"
1566 "store i64 0, i64* %v1\n"
1567 "ret void\n"
1568 "}",
1569 Err&: E, Context&: C);
1570 ASSERT_TRUE(LocalM);
1571 F = LocalM->getFunction(Name: "test");
1572 ASSERT_TRUE(F);
1573 // Setup the analysis
1574 setupAnalyses();
1575 MemorySSA &MSSA = *Analyses->MSSA;
1576 // Find the exit block
1577 for (auto &BB : *F) {
1578 if (BB.getName() == "exit") {
1579 // Get the store instruction
1580 auto *SI = BB.getFirstNonPHI();
1581 // Get the memory access and location
1582 MemoryAccess *MA = MSSA.getMemoryAccess(I: SI);
1583 MemoryLocation ML = MemoryLocation::get(Inst: SI);
1584 // Use the 'upward_defs_iterator' which internally calls
1585 // IsGuaranteedLoopInvariant
1586 auto ItA = upward_defs_begin(Pair: {MA, ML}, DT&: MSSA.getDomTree());
1587 auto ItB =
1588 upward_defs_begin(Pair: {ItA->first, ItA->second}, DT&: MSSA.getDomTree());
1589 // Check if the location information have been retained
1590 EXPECT_TRUE(ItB->second.Size.isPrecise());
1591 EXPECT_TRUE(ItB->second.Size.hasValue());
1592 EXPECT_TRUE(ItB->second.Size.getValue() == 8);
1593 }
1594 }
1595}
1596
1597TEST_F(MemorySSATest, TestInvariantGroup) {
1598 SMDiagnostic E;
1599 auto M = parseAssemblyString(AsmString: "declare void @f(i8*)\n"
1600 "define i8 @test(i8* %p) {\n"
1601 "entry:\n"
1602 " store i8 42, i8* %p, !invariant.group !0\n"
1603 " call void @f(i8* %p)\n"
1604 " %v = load i8, i8* %p, !invariant.group !0\n"
1605 " ret i8 %v\n"
1606 "}\n"
1607 "!0 = !{}",
1608 Err&: E, Context&: C);
1609 ASSERT_TRUE(M);
1610 F = M->getFunction(Name: "test");
1611 ASSERT_TRUE(F);
1612 setupAnalyses();
1613 MemorySSA &MSSA = *Analyses->MSSA;
1614 MemorySSAWalker *Walker = Analyses->Walker;
1615
1616 auto &BB = F->getEntryBlock();
1617 auto &SI = cast<StoreInst>(Val&: *BB.begin());
1618 auto &Call = cast<CallBase>(Val&: *std::next(x: BB.begin()));
1619 auto &LI = cast<LoadInst>(Val&: *std::next(x: std::next(x: BB.begin())));
1620
1621 {
1622 MemoryAccess *SAccess = MSSA.getMemoryAccess(I: &SI);
1623 MemoryAccess *LAccess = MSSA.getMemoryAccess(I: &LI);
1624 MemoryAccess *SClobber = Walker->getClobberingMemoryAccess(MA: SAccess);
1625 EXPECT_TRUE(MSSA.isLiveOnEntryDef(SClobber));
1626 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(MA: LAccess);
1627 EXPECT_EQ(SAccess, LClobber);
1628 }
1629
1630 // remove store and verify that the memory accesses still make sense
1631 MemorySSAUpdater Updater(&MSSA);
1632 Updater.removeMemoryAccess(I: &SI);
1633 SI.eraseFromParent();
1634
1635 {
1636 MemoryAccess *CallAccess = MSSA.getMemoryAccess(I: &Call);
1637 MemoryAccess *LAccess = MSSA.getMemoryAccess(I: &LI);
1638 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(MA: LAccess);
1639 EXPECT_EQ(CallAccess, LClobber);
1640 }
1641}
1642
1643static BasicBlock *getBasicBlockByName(Function &F, StringRef Name) {
1644 for (BasicBlock &BB : F)
1645 if (BB.getName() == Name)
1646 return &BB;
1647 llvm_unreachable("Expected to find basic block!");
1648}
1649
1650static Instruction *getInstructionByName(Function &F, StringRef Name) {
1651 for (BasicBlock &BB : F)
1652 for (Instruction &I : BB)
1653 if (I.getName() == Name)
1654 return &I;
1655 llvm_unreachable("Expected to find instruction!");
1656}
1657
1658TEST_F(MemorySSATest, TestVisitedBlocks) {
1659 SMDiagnostic E;
1660 auto M = parseAssemblyString(
1661 AsmString: "define void @test(i64* noalias %P, i64 %N) {\n"
1662 "preheader.n:\n"
1663 " br label %header.n\n"
1664 "header.n:\n"
1665 " %n = phi i64 [ 0, %preheader.n ], [ %inc.n, %latch.n ]\n"
1666 " %guard.cond.i = icmp slt i64 0, %N\n"
1667 " br i1 %guard.cond.i, label %header.i.check, label %other.i\n"
1668 "header.i.check:\n"
1669 " br label %preheader.i\n"
1670 "preheader.i:\n"
1671 " br label %header.i\n"
1672 "header.i:\n"
1673 " %i = phi i64 [ 0, %preheader.i ], [ %inc.i, %header.i ]\n"
1674 " %v1 = load i64, i64* %P, align 8\n"
1675 " %v2 = load i64, i64* %P, align 8\n"
1676 " %inc.i = add nsw i64 %i, 1\n"
1677 " %cmp.i = icmp slt i64 %inc.i, %N\n"
1678 " br i1 %cmp.i, label %header.i, label %exit.i\n"
1679 "exit.i:\n"
1680 " br label %commonexit\n"
1681 "other.i:\n"
1682 " br label %commonexit\n"
1683 "commonexit:\n"
1684 " br label %latch.n\n"
1685 "latch.n:\n"
1686 " %inc.n = add nsw i64 %n, 1\n"
1687 " %cmp.n = icmp slt i64 %inc.n, %N\n"
1688 " br i1 %cmp.n, label %header.n, label %exit.n\n"
1689 "exit.n:\n"
1690 " ret void\n"
1691 "}\n",
1692 Err&: E, Context&: C);
1693 ASSERT_TRUE(M);
1694 F = M->getFunction(Name: "test");
1695 ASSERT_TRUE(F);
1696 setupAnalyses();
1697 MemorySSA &MSSA = *Analyses->MSSA;
1698 MemorySSAUpdater Updater(&MSSA);
1699
1700 {
1701 // Move %v1 before the terminator of %header.i.check
1702 BasicBlock *BB = getBasicBlockByName(F&: *F, Name: "header.i.check");
1703 Instruction *LI = getInstructionByName(F&: *F, Name: "v1");
1704 LI->moveBefore(MovePos: BB->getTerminator());
1705 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(I: LI))
1706 Updater.moveToPlace(What: MUD, BB, Where: MemorySSA::BeforeTerminator);
1707
1708 // Change the termiantor of %header.i.check to `br label true, label
1709 // %preheader.i, label %other.i`
1710 BB->getTerminator()->eraseFromParent();
1711 ConstantInt *BoolTrue = ConstantInt::getTrue(Context&: F->getContext());
1712 BranchInst::Create(IfTrue: getBasicBlockByName(F&: *F, Name: "preheader.i"),
1713 IfFalse: getBasicBlockByName(F&: *F, Name: "other.i"), Cond: BoolTrue, InsertAtEnd: BB);
1714 SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
1715 DTUpdates.push_back(Elt: DominatorTree::UpdateType(
1716 DominatorTree::Insert, BB, getBasicBlockByName(F&: *F, Name: "other.i")));
1717 Updater.applyUpdates(Updates: DTUpdates, DT&: Analyses->DT, UpdateDTFirst: true);
1718 }
1719
1720 // After the first moveToPlace(), %other.i is in VisitedBlocks, even after
1721 // there is a new edge to %other.i, which makes the second moveToPlace()
1722 // traverse incorrectly.
1723 {
1724 // Move %v2 before the terminator of %preheader.i
1725 BasicBlock *BB = getBasicBlockByName(F&: *F, Name: "preheader.i");
1726 Instruction *LI = getInstructionByName(F&: *F, Name: "v2");
1727 LI->moveBefore(MovePos: BB->getTerminator());
1728 // Check that there is no assertion of "Incomplete phi during partial
1729 // rename"
1730 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(I: LI))
1731 Updater.moveToPlace(What: MUD, BB, Where: MemorySSA::BeforeTerminator);
1732 }
1733}
1734
1735TEST_F(MemorySSATest, TestNoDbgInsts) {
1736 SMDiagnostic E;
1737 auto M = parseAssemblyString(AsmString: R"(
1738 define void @test() presplitcoroutine {
1739 entry:
1740 %i = alloca i32
1741 call void @llvm.dbg.declare(metadata ptr %i, metadata !6, metadata !DIExpression()), !dbg !10
1742 call void @llvm.dbg.value(metadata ptr %i, metadata !6, metadata !DIExpression()), !dbg !10
1743 ret void
1744 }
1745
1746 declare void @llvm.dbg.declare(metadata, metadata, metadata) nocallback nofree nosync nounwind readnone speculatable willreturn
1747 declare void @llvm.dbg.value(metadata, metadata, metadata) nocallback nofree nosync nounwind readnone speculatable willreturn
1748
1749 !llvm.dbg.cu = !{!0}
1750
1751 !0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 15.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, splitDebugInlining: false, nameTableKind: None)
1752 !1 = !DIFile(filename: "repro.cpp", directory: ".")
1753 !2 = !{}
1754 !3 = !{i32 7, !"Dwarf Version", i32 4}
1755 !4 = !{i32 2, !"Debug Info Version", i32 3}
1756 !5 = !{!"clang version 15.0.0"}
1757 !6 = !DILocalVariable(name: "i", scope: !7, file: !1, line: 24, type: !10)
1758 !7 = distinct !DILexicalBlock(scope: !8, file: !1, line: 23, column: 12)
1759 !8 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !1, file: !1, line: 23, type: !9, scopeLine: 23, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
1760 !9 = !DISubroutineType(types: !2)
1761 !10 = !DILocation(line: 24, column: 7, scope: !7)
1762 )",
1763 Err&: E, Context&: C);
1764 ASSERT_TRUE(M);
1765 F = M->getFunction(Name: "test");
1766 ASSERT_TRUE(F);
1767 setupAnalyses();
1768 MemorySSA &MSSA = *Analyses->MSSA;
1769 MemorySSAUpdater Updater(&MSSA);
1770
1771 BasicBlock &Entry = F->front();
1772 auto I = Entry.begin();
1773 Instruction *DbgDeclare = cast<Instruction>(Val: I++);
1774 Instruction *DbgValue = cast<Instruction>(Val: I++);
1775 ASSERT_EQ(MSSA.getMemoryAccess(DbgDeclare), nullptr);
1776 ASSERT_EQ(MSSA.getMemoryAccess(DbgValue), nullptr);
1777}
1778

source code of llvm/unittests/Analysis/MemorySSATest.cpp