1 | //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the BlockGenerator and VectorBlockGenerator classes, |
10 | // which generate sequential code and vectorized code for a polyhedral |
11 | // statement, respectively. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "polly/CodeGen/BlockGenerators.h" |
16 | #include "polly/CodeGen/IslExprBuilder.h" |
17 | #include "polly/CodeGen/RuntimeDebugBuilder.h" |
18 | #include "polly/Options.h" |
19 | #include "polly/ScopInfo.h" |
20 | #include "polly/Support/ISLTools.h" |
21 | #include "polly/Support/ScopHelper.h" |
22 | #include "polly/Support/VirtualInstruction.h" |
23 | #include "llvm/Analysis/DomTreeUpdater.h" |
24 | #include "llvm/Analysis/LoopInfo.h" |
25 | #include "llvm/Analysis/RegionInfo.h" |
26 | #include "llvm/Analysis/ScalarEvolution.h" |
27 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
28 | #include "llvm/Transforms/Utils/Local.h" |
29 | #include "isl/ast.h" |
30 | #include <deque> |
31 | |
32 | using namespace llvm; |
33 | using namespace polly; |
34 | |
35 | static cl::opt<bool> Aligned("enable-polly-aligned" , |
36 | cl::desc("Assumed aligned memory accesses." ), |
37 | cl::Hidden, cl::cat(PollyCategory)); |
38 | |
39 | bool PollyDebugPrinting; |
40 | static cl::opt<bool, true> DebugPrintingX( |
41 | "polly-codegen-add-debug-printing" , |
42 | cl::desc("Add printf calls that show the values loaded/stored." ), |
43 | cl::location(L&: PollyDebugPrinting), cl::Hidden, cl::cat(PollyCategory)); |
44 | |
45 | static cl::opt<bool> TraceStmts( |
46 | "polly-codegen-trace-stmts" , |
47 | cl::desc("Add printf calls that print the statement being executed" ), |
48 | cl::Hidden, cl::cat(PollyCategory)); |
49 | |
50 | static cl::opt<bool> TraceScalars( |
51 | "polly-codegen-trace-scalars" , |
52 | cl::desc("Add printf calls that print the values of all scalar values " |
53 | "used in a statement. Requires -polly-codegen-trace-stmts." ), |
54 | cl::Hidden, cl::cat(PollyCategory)); |
55 | |
56 | BlockGenerator::BlockGenerator( |
57 | PollyIRBuilder &B, LoopInfo &LI, ScalarEvolution &SE, DominatorTree &DT, |
58 | AllocaMapTy &ScalarMap, EscapeUsersAllocaMapTy &EscapeMap, |
59 | ValueMapT &GlobalMap, IslExprBuilder *ExprBuilder, BasicBlock *StartBlock) |
60 | : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT), |
61 | EntryBB(nullptr), ScalarMap(ScalarMap), EscapeMap(EscapeMap), |
62 | GlobalMap(GlobalMap), StartBlock(StartBlock) {} |
63 | |
64 | Value *BlockGenerator::trySynthesizeNewValue(ScopStmt &Stmt, Value *Old, |
65 | ValueMapT &BBMap, |
66 | LoopToScevMapT <S, |
67 | Loop *L) const { |
68 | if (!SE.isSCEVable(Ty: Old->getType())) |
69 | return nullptr; |
70 | |
71 | const SCEV *Scev = SE.getSCEVAtScope(V: Old, L); |
72 | if (!Scev) |
73 | return nullptr; |
74 | |
75 | if (isa<SCEVCouldNotCompute>(Val: Scev)) |
76 | return nullptr; |
77 | |
78 | const SCEV *NewScev = SCEVLoopAddRecRewriter::rewrite(Scev, Map&: LTS, SE); |
79 | ValueMapT VTV; |
80 | VTV.insert(I: BBMap.begin(), E: BBMap.end()); |
81 | VTV.insert(I: GlobalMap.begin(), E: GlobalMap.end()); |
82 | |
83 | Scop &S = *Stmt.getParent(); |
84 | const DataLayout &DL = S.getFunction().getParent()->getDataLayout(); |
85 | auto IP = Builder.GetInsertPoint(); |
86 | |
87 | assert(IP != Builder.GetInsertBlock()->end() && |
88 | "Only instructions can be insert points for SCEVExpander" ); |
89 | Value *Expanded = |
90 | expandCodeFor(S, SE, DL, Name: "polly" , E: NewScev, Ty: Old->getType(), IP: &*IP, VMap: &VTV, |
91 | RTCBB: StartBlock->getSinglePredecessor()); |
92 | |
93 | BBMap[Old] = Expanded; |
94 | return Expanded; |
95 | } |
96 | |
97 | Value *BlockGenerator::getNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap, |
98 | LoopToScevMapT <S, Loop *L) const { |
99 | |
100 | auto lookupGlobally = [this](Value *Old) -> Value * { |
101 | Value *New = GlobalMap.lookup(Val: Old); |
102 | if (!New) |
103 | return nullptr; |
104 | |
105 | // Required by: |
106 | // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll |
107 | // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll |
108 | // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll |
109 | // * Isl/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll |
110 | // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll |
111 | // * Isl/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll |
112 | // GlobalMap should be a mapping from (value in original SCoP) to (copied |
113 | // value in generated SCoP), without intermediate mappings, which might |
114 | // easily require transitiveness as well. |
115 | if (Value *NewRemapped = GlobalMap.lookup(Val: New)) |
116 | New = NewRemapped; |
117 | |
118 | // No test case for this code. |
119 | if (Old->getType()->getScalarSizeInBits() < |
120 | New->getType()->getScalarSizeInBits()) |
121 | New = Builder.CreateTruncOrBitCast(V: New, DestTy: Old->getType()); |
122 | |
123 | return New; |
124 | }; |
125 | |
126 | Value *New = nullptr; |
127 | auto VUse = VirtualUse::create(UserStmt: &Stmt, UserScope: L, Val: Old, Virtual: true); |
128 | switch (VUse.getKind()) { |
129 | case VirtualUse::Block: |
130 | // BasicBlock are constants, but the BlockGenerator copies them. |
131 | New = BBMap.lookup(Val: Old); |
132 | break; |
133 | |
134 | case VirtualUse::Constant: |
135 | // Used by: |
136 | // * Isl/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll |
137 | // Constants should not be redefined. In this case, the GlobalMap just |
138 | // contains a mapping to the same constant, which is unnecessary, but |
139 | // harmless. |
140 | if ((New = lookupGlobally(Old))) |
141 | break; |
142 | |
143 | assert(!BBMap.count(Old)); |
144 | New = Old; |
145 | break; |
146 | |
147 | case VirtualUse::ReadOnly: |
148 | assert(!GlobalMap.count(Old)); |
149 | |
150 | // Required for: |
151 | // * Isl/CodeGen/MemAccess/create_arrays.ll |
152 | // * Isl/CodeGen/read-only-scalars.ll |
153 | // * ScheduleOptimizer/pattern-matching-based-opts_10.ll |
154 | // For some reason these reload a read-only value. The reloaded value ends |
155 | // up in BBMap, buts its value should be identical. |
156 | // |
157 | // Required for: |
158 | // * Isl/CodeGen/OpenMP/single_loop_with_param.ll |
159 | // The parallel subfunctions need to reference the read-only value from the |
160 | // parent function, this is done by reloading them locally. |
161 | if ((New = BBMap.lookup(Val: Old))) |
162 | break; |
163 | |
164 | New = Old; |
165 | break; |
166 | |
167 | case VirtualUse::Synthesizable: |
168 | // Used by: |
169 | // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll |
170 | // * Isl/CodeGen/OpenMP/recomputed-srem.ll |
171 | // * Isl/CodeGen/OpenMP/reference-other-bb.ll |
172 | // * Isl/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll |
173 | // For some reason synthesizable values end up in GlobalMap. Their values |
174 | // are the same as trySynthesizeNewValue would return. The legacy |
175 | // implementation prioritized GlobalMap, so this is what we do here as well. |
176 | // Ideally, synthesizable values should not end up in GlobalMap. |
177 | if ((New = lookupGlobally(Old))) |
178 | break; |
179 | |
180 | // Required for: |
181 | // * Isl/CodeGen/RuntimeDebugBuilder/combine_different_values.ll |
182 | // * Isl/CodeGen/getNumberOfIterations.ll |
183 | // * Isl/CodeGen/non_affine_float_compare.ll |
184 | // * ScheduleOptimizer/pattern-matching-based-opts_10.ll |
185 | // Ideally, synthesizable values are synthesized by trySynthesizeNewValue, |
186 | // not precomputed (SCEVExpander has its own caching mechanism). |
187 | // These tests fail without this, but I think trySynthesizeNewValue would |
188 | // just re-synthesize the same instructions. |
189 | if ((New = BBMap.lookup(Val: Old))) |
190 | break; |
191 | |
192 | New = trySynthesizeNewValue(Stmt, Old, BBMap, LTS, L); |
193 | break; |
194 | |
195 | case VirtualUse::Hoisted: |
196 | // TODO: Hoisted invariant loads should be found in GlobalMap only, but not |
197 | // redefined locally (which will be ignored anyway). That is, the following |
198 | // assertion should apply: assert(!BBMap.count(Old)) |
199 | |
200 | New = lookupGlobally(Old); |
201 | break; |
202 | |
203 | case VirtualUse::Intra: |
204 | case VirtualUse::Inter: |
205 | assert(!GlobalMap.count(Old) && |
206 | "Intra and inter-stmt values are never global" ); |
207 | New = BBMap.lookup(Val: Old); |
208 | break; |
209 | } |
210 | assert(New && "Unexpected scalar dependence in region!" ); |
211 | return New; |
212 | } |
213 | |
214 | void BlockGenerator::copyInstScalar(ScopStmt &Stmt, Instruction *Inst, |
215 | ValueMapT &BBMap, LoopToScevMapT <S) { |
216 | // We do not generate debug intrinsics as we did not investigate how to |
217 | // copy them correctly. At the current state, they just crash the code |
218 | // generation as the meta-data operands are not correctly copied. |
219 | if (isa<DbgInfoIntrinsic>(Val: Inst)) |
220 | return; |
221 | |
222 | Instruction *NewInst = Inst->clone(); |
223 | |
224 | // Replace old operands with the new ones. |
225 | for (Value *OldOperand : Inst->operands()) { |
226 | Value *NewOperand = |
227 | getNewValue(Stmt, Old: OldOperand, BBMap, LTS, L: getLoopForStmt(Stmt)); |
228 | |
229 | if (!NewOperand) { |
230 | assert(!isa<StoreInst>(NewInst) && |
231 | "Store instructions are always needed!" ); |
232 | NewInst->deleteValue(); |
233 | return; |
234 | } |
235 | |
236 | NewInst->replaceUsesOfWith(From: OldOperand, To: NewOperand); |
237 | } |
238 | |
239 | Builder.Insert(I: NewInst); |
240 | BBMap[Inst] = NewInst; |
241 | |
242 | assert(NewInst->getModule() == Inst->getModule() && |
243 | "Expecting instructions to be in the same module" ); |
244 | |
245 | if (!NewInst->getType()->isVoidTy()) |
246 | NewInst->setName("p_" + Inst->getName()); |
247 | } |
248 | |
249 | Value * |
250 | BlockGenerator::generateLocationAccessed(ScopStmt &Stmt, MemAccInst Inst, |
251 | ValueMapT &BBMap, LoopToScevMapT <S, |
252 | isl_id_to_ast_expr *NewAccesses) { |
253 | const MemoryAccess &MA = Stmt.getArrayAccessFor(Inst); |
254 | return generateLocationAccessed( |
255 | Stmt, L: getLoopForStmt(Stmt), |
256 | Pointer: Inst.isNull() ? nullptr : Inst.getPointerOperand(), BBMap, LTS, |
257 | NewAccesses, Id: MA.getId().release(), ExpectedType: MA.getAccessValue()->getType()); |
258 | } |
259 | |
260 | Value *BlockGenerator::generateLocationAccessed( |
261 | ScopStmt &Stmt, Loop *L, Value *Pointer, ValueMapT &BBMap, |
262 | LoopToScevMapT <S, isl_id_to_ast_expr *NewAccesses, __isl_take isl_id *Id, |
263 | Type *ExpectedType) { |
264 | isl_ast_expr *AccessExpr = isl_id_to_ast_expr_get(hmap: NewAccesses, key: Id); |
265 | |
266 | if (AccessExpr) { |
267 | AccessExpr = isl_ast_expr_address_of(expr: AccessExpr); |
268 | return ExprBuilder->create(Expr: AccessExpr); |
269 | } |
270 | assert( |
271 | Pointer && |
272 | "If expression was not generated, must use the original pointer value" ); |
273 | return getNewValue(Stmt, Old: Pointer, BBMap, LTS, L); |
274 | } |
275 | |
276 | Value * |
277 | BlockGenerator::getImplicitAddress(MemoryAccess &Access, Loop *L, |
278 | LoopToScevMapT <S, ValueMapT &BBMap, |
279 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
280 | if (Access.isLatestArrayKind()) |
281 | return generateLocationAccessed(Stmt&: *Access.getStatement(), L, Pointer: nullptr, BBMap, |
282 | LTS, NewAccesses, Id: Access.getId().release(), |
283 | ExpectedType: Access.getAccessValue()->getType()); |
284 | |
285 | return getOrCreateAlloca(Access); |
286 | } |
287 | |
288 | Loop *BlockGenerator::getLoopForStmt(const ScopStmt &Stmt) const { |
289 | auto *StmtBB = Stmt.getEntryBlock(); |
290 | return LI.getLoopFor(BB: StmtBB); |
291 | } |
292 | |
293 | Value *BlockGenerator::generateArrayLoad(ScopStmt &Stmt, LoadInst *Load, |
294 | ValueMapT &BBMap, LoopToScevMapT <S, |
295 | isl_id_to_ast_expr *NewAccesses) { |
296 | if (Value *PreloadLoad = GlobalMap.lookup(Val: Load)) |
297 | return PreloadLoad; |
298 | |
299 | Value *NewPointer = |
300 | generateLocationAccessed(Stmt, Inst: Load, BBMap, LTS, NewAccesses); |
301 | Value *ScalarLoad = |
302 | Builder.CreateAlignedLoad(Ty: Load->getType(), Ptr: NewPointer, Align: Load->getAlign(), |
303 | Name: Load->getName() + "_p_scalar_" ); |
304 | |
305 | if (PollyDebugPrinting) |
306 | RuntimeDebugBuilder::createCPUPrinter(Builder, args: "Load from " , args: NewPointer, |
307 | args: ": " , args: ScalarLoad, args: "\n" ); |
308 | |
309 | return ScalarLoad; |
310 | } |
311 | |
312 | void BlockGenerator::generateArrayStore(ScopStmt &Stmt, StoreInst *Store, |
313 | ValueMapT &BBMap, LoopToScevMapT <S, |
314 | isl_id_to_ast_expr *NewAccesses) { |
315 | MemoryAccess &MA = Stmt.getArrayAccessFor(Inst: Store); |
316 | isl::set AccDom = MA.getAccessRelation().domain(); |
317 | std::string Subject = MA.getId().get_name(); |
318 | |
319 | generateConditionalExecution(Stmt, Subdomain: AccDom, Subject: Subject.c_str(), GenThenFunc: [&, this]() { |
320 | Value *NewPointer = |
321 | generateLocationAccessed(Stmt, Inst: Store, BBMap, LTS, NewAccesses); |
322 | Value *ValueOperand = getNewValue(Stmt, Old: Store->getValueOperand(), BBMap, |
323 | LTS, L: getLoopForStmt(Stmt)); |
324 | |
325 | if (PollyDebugPrinting) |
326 | RuntimeDebugBuilder::createCPUPrinter(Builder, args: "Store to " , args: NewPointer, |
327 | args: ": " , args: ValueOperand, args: "\n" ); |
328 | |
329 | Builder.CreateAlignedStore(Val: ValueOperand, Ptr: NewPointer, Align: Store->getAlign()); |
330 | }); |
331 | } |
332 | |
333 | bool BlockGenerator::canSyntheziseInStmt(ScopStmt &Stmt, Instruction *Inst) { |
334 | Loop *L = getLoopForStmt(Stmt); |
335 | return (Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) && |
336 | canSynthesize(V: Inst, S: *Stmt.getParent(), SE: &SE, Scope: L); |
337 | } |
338 | |
339 | void BlockGenerator::copyInstruction(ScopStmt &Stmt, Instruction *Inst, |
340 | ValueMapT &BBMap, LoopToScevMapT <S, |
341 | isl_id_to_ast_expr *NewAccesses) { |
342 | // Terminator instructions control the control flow. They are explicitly |
343 | // expressed in the clast and do not need to be copied. |
344 | if (Inst->isTerminator()) |
345 | return; |
346 | |
347 | // Synthesizable statements will be generated on-demand. |
348 | if (canSyntheziseInStmt(Stmt, Inst)) |
349 | return; |
350 | |
351 | if (auto *Load = dyn_cast<LoadInst>(Val: Inst)) { |
352 | Value *NewLoad = generateArrayLoad(Stmt, Load, BBMap, LTS, NewAccesses); |
353 | // Compute NewLoad before its insertion in BBMap to make the insertion |
354 | // deterministic. |
355 | BBMap[Load] = NewLoad; |
356 | return; |
357 | } |
358 | |
359 | if (auto *Store = dyn_cast<StoreInst>(Val: Inst)) { |
360 | // Identified as redundant by -polly-simplify. |
361 | if (!Stmt.getArrayAccessOrNULLFor(Inst: Store)) |
362 | return; |
363 | |
364 | generateArrayStore(Stmt, Store, BBMap, LTS, NewAccesses); |
365 | return; |
366 | } |
367 | |
368 | if (auto *PHI = dyn_cast<PHINode>(Val: Inst)) { |
369 | copyPHIInstruction(Stmt, PHI, BBMap, LTS); |
370 | return; |
371 | } |
372 | |
373 | // Skip some special intrinsics for which we do not adjust the semantics to |
374 | // the new schedule. All others are handled like every other instruction. |
375 | if (isIgnoredIntrinsic(V: Inst)) |
376 | return; |
377 | |
378 | copyInstScalar(Stmt, Inst, BBMap, LTS); |
379 | } |
380 | |
381 | void BlockGenerator::removeDeadInstructions(BasicBlock *BB, ValueMapT &BBMap) { |
382 | auto NewBB = Builder.GetInsertBlock(); |
383 | for (auto I = NewBB->rbegin(); I != NewBB->rend(); I++) { |
384 | Instruction *NewInst = &*I; |
385 | |
386 | if (!isInstructionTriviallyDead(I: NewInst)) |
387 | continue; |
388 | |
389 | for (auto Pair : BBMap) |
390 | if (Pair.second == NewInst) { |
391 | BBMap.erase(Val: Pair.first); |
392 | } |
393 | |
394 | NewInst->eraseFromParent(); |
395 | I = NewBB->rbegin(); |
396 | } |
397 | } |
398 | |
399 | void BlockGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT <S, |
400 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
401 | assert(Stmt.isBlockStmt() && |
402 | "Only block statements can be copied by the block generator" ); |
403 | |
404 | ValueMapT BBMap; |
405 | |
406 | BasicBlock *BB = Stmt.getBasicBlock(); |
407 | copyBB(Stmt, BB, BBMap, LTS, NewAccesses); |
408 | removeDeadInstructions(BB, BBMap); |
409 | } |
410 | |
411 | BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) { |
412 | BasicBlock *CopyBB = SplitBlock(Old: Builder.GetInsertBlock(), |
413 | SplitPt: &*Builder.GetInsertPoint(), DT: &DT, LI: &LI); |
414 | CopyBB->setName("polly.stmt." + BB->getName()); |
415 | return CopyBB; |
416 | } |
417 | |
418 | BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, |
419 | ValueMapT &BBMap, LoopToScevMapT <S, |
420 | isl_id_to_ast_expr *NewAccesses) { |
421 | BasicBlock *CopyBB = splitBB(BB); |
422 | Builder.SetInsertPoint(&CopyBB->front()); |
423 | generateScalarLoads(Stmt, LTS, BBMap, NewAccesses); |
424 | generateBeginStmtTrace(Stmt, LTS, BBMap); |
425 | |
426 | copyBB(Stmt, BB, BBCopy: CopyBB, BBMap, LTS, NewAccesses); |
427 | |
428 | // After a basic block was copied store all scalars that escape this block in |
429 | // their alloca. |
430 | generateScalarStores(Stmt, LTS, BBMap, NewAccesses); |
431 | return CopyBB; |
432 | } |
433 | |
434 | void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB, |
435 | ValueMapT &BBMap, LoopToScevMapT <S, |
436 | isl_id_to_ast_expr *NewAccesses) { |
437 | EntryBB = &CopyBB->getParent()->getEntryBlock(); |
438 | |
439 | // Block statements and the entry blocks of region statement are code |
440 | // generated from instruction lists. This allow us to optimize the |
441 | // instructions that belong to a certain scop statement. As the code |
442 | // structure of region statements might be arbitrary complex, optimizing the |
443 | // instruction list is not yet supported. |
444 | if (Stmt.isBlockStmt() || (Stmt.isRegionStmt() && Stmt.getEntryBlock() == BB)) |
445 | for (Instruction *Inst : Stmt.getInstructions()) |
446 | copyInstruction(Stmt, Inst, BBMap, LTS, NewAccesses); |
447 | else |
448 | for (Instruction &Inst : *BB) |
449 | copyInstruction(Stmt, Inst: &Inst, BBMap, LTS, NewAccesses); |
450 | } |
451 | |
452 | Value *BlockGenerator::getOrCreateAlloca(const MemoryAccess &Access) { |
453 | assert(!Access.isLatestArrayKind() && "Trying to get alloca for array kind" ); |
454 | |
455 | return getOrCreateAlloca(Array: Access.getLatestScopArrayInfo()); |
456 | } |
457 | |
458 | Value *BlockGenerator::getOrCreateAlloca(const ScopArrayInfo *Array) { |
459 | assert(!Array->isArrayKind() && "Trying to get alloca for array kind" ); |
460 | |
461 | auto &Addr = ScalarMap[Array]; |
462 | |
463 | if (Addr) { |
464 | // Allow allocas to be (temporarily) redirected once by adding a new |
465 | // old-alloca-addr to new-addr mapping to GlobalMap. This functionality |
466 | // is used for example by the OpenMP code generation where a first use |
467 | // of a scalar while still in the host code allocates a normal alloca with |
468 | // getOrCreateAlloca. When the values of this scalar are accessed during |
469 | // the generation of the parallel subfunction, these values are copied over |
470 | // to the parallel subfunction and each request for a scalar alloca slot |
471 | // must be forwarded to the temporary in-subfunction slot. This mapping is |
472 | // removed when the subfunction has been generated and again normal host |
473 | // code is generated. Due to the following reasons it is not possible to |
474 | // perform the GlobalMap lookup right after creating the alloca below, but |
475 | // instead we need to check GlobalMap at each call to getOrCreateAlloca: |
476 | // |
477 | // 1) GlobalMap may be changed multiple times (for each parallel loop), |
478 | // 2) The temporary mapping is commonly only known after the initial |
479 | // alloca has already been generated, and |
480 | // 3) The original alloca value must be restored after leaving the |
481 | // sub-function. |
482 | if (Value *NewAddr = GlobalMap.lookup(Val: &*Addr)) |
483 | return NewAddr; |
484 | return Addr; |
485 | } |
486 | |
487 | Type *Ty = Array->getElementType(); |
488 | Value *ScalarBase = Array->getBasePtr(); |
489 | std::string NameExt; |
490 | if (Array->isPHIKind()) |
491 | NameExt = ".phiops" ; |
492 | else |
493 | NameExt = ".s2a" ; |
494 | |
495 | const DataLayout &DL = Builder.GetInsertBlock()->getModule()->getDataLayout(); |
496 | |
497 | Addr = |
498 | new AllocaInst(Ty, DL.getAllocaAddrSpace(), nullptr, |
499 | DL.getPrefTypeAlign(Ty), ScalarBase->getName() + NameExt); |
500 | EntryBB = &Builder.GetInsertBlock()->getParent()->getEntryBlock(); |
501 | Addr->insertBefore(InsertPos: &*EntryBB->getFirstInsertionPt()); |
502 | |
503 | return Addr; |
504 | } |
505 | |
506 | void BlockGenerator::handleOutsideUsers(const Scop &S, ScopArrayInfo *Array) { |
507 | Instruction *Inst = cast<Instruction>(Val: Array->getBasePtr()); |
508 | |
509 | // If there are escape users we get the alloca for this instruction and put it |
510 | // in the EscapeMap for later finalization. Lastly, if the instruction was |
511 | // copied multiple times we already did this and can exit. |
512 | if (EscapeMap.count(Key: Inst)) |
513 | return; |
514 | |
515 | EscapeUserVectorTy EscapeUsers; |
516 | for (User *U : Inst->users()) { |
517 | |
518 | // Non-instruction user will never escape. |
519 | Instruction *UI = dyn_cast<Instruction>(Val: U); |
520 | if (!UI) |
521 | continue; |
522 | |
523 | if (S.contains(I: UI)) |
524 | continue; |
525 | |
526 | EscapeUsers.push_back(Elt: UI); |
527 | } |
528 | |
529 | // Exit if no escape uses were found. |
530 | if (EscapeUsers.empty()) |
531 | return; |
532 | |
533 | // Get or create an escape alloca for this instruction. |
534 | auto *ScalarAddr = getOrCreateAlloca(Array); |
535 | |
536 | // Remember that this instruction has escape uses and the escape alloca. |
537 | EscapeMap[Inst] = std::make_pair(x&: ScalarAddr, y: std::move(EscapeUsers)); |
538 | } |
539 | |
540 | void BlockGenerator::generateScalarLoads( |
541 | ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap, |
542 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
543 | for (MemoryAccess *MA : Stmt) { |
544 | if (MA->isOriginalArrayKind() || MA->isWrite()) |
545 | continue; |
546 | |
547 | #ifndef NDEBUG |
548 | auto StmtDom = |
549 | Stmt.getDomain().intersect_params(params: Stmt.getParent()->getContext()); |
550 | auto AccDom = MA->getAccessRelation().domain(); |
551 | assert(!StmtDom.is_subset(AccDom).is_false() && |
552 | "Scalar must be loaded in all statement instances" ); |
553 | #endif |
554 | |
555 | auto *Address = |
556 | getImplicitAddress(Access&: *MA, L: getLoopForStmt(Stmt), LTS, BBMap, NewAccesses); |
557 | assert((!isa<Instruction>(Address) || |
558 | DT.dominates(cast<Instruction>(Address)->getParent(), |
559 | Builder.GetInsertBlock())) && |
560 | "Domination violation" ); |
561 | BBMap[MA->getAccessValue()] = Builder.CreateLoad( |
562 | Ty: MA->getElementType(), Ptr: Address, Name: Address->getName() + ".reload" ); |
563 | } |
564 | } |
565 | |
566 | Value *BlockGenerator::buildContainsCondition(ScopStmt &Stmt, |
567 | const isl::set &Subdomain) { |
568 | isl::ast_build AstBuild = Stmt.getAstBuild(); |
569 | isl::set Domain = Stmt.getDomain(); |
570 | |
571 | isl::union_map USchedule = AstBuild.get_schedule(); |
572 | USchedule = USchedule.intersect_domain(uset: Domain); |
573 | |
574 | assert(!USchedule.is_empty()); |
575 | isl::map Schedule = isl::map::from_union_map(umap: USchedule); |
576 | |
577 | isl::set ScheduledDomain = Schedule.range(); |
578 | isl::set ScheduledSet = Subdomain.apply(map: Schedule); |
579 | |
580 | isl::ast_build RestrictedBuild = AstBuild.restrict(set: ScheduledDomain); |
581 | |
582 | isl::ast_expr IsInSet = RestrictedBuild.expr_from(set: ScheduledSet); |
583 | Value *IsInSetExpr = ExprBuilder->create(Expr: IsInSet.copy()); |
584 | IsInSetExpr = Builder.CreateICmpNE( |
585 | LHS: IsInSetExpr, RHS: ConstantInt::get(Ty: IsInSetExpr->getType(), V: 0)); |
586 | |
587 | return IsInSetExpr; |
588 | } |
589 | |
590 | void BlockGenerator::generateConditionalExecution( |
591 | ScopStmt &Stmt, const isl::set &Subdomain, StringRef Subject, |
592 | const std::function<void()> &GenThenFunc) { |
593 | isl::set StmtDom = Stmt.getDomain(); |
594 | |
595 | // If the condition is a tautology, don't generate a condition around the |
596 | // code. |
597 | bool IsPartialWrite = |
598 | !StmtDom.intersect_params(params: Stmt.getParent()->getContext()) |
599 | .is_subset(set2: Subdomain); |
600 | if (!IsPartialWrite) { |
601 | GenThenFunc(); |
602 | return; |
603 | } |
604 | |
605 | // Generate the condition. |
606 | Value *Cond = buildContainsCondition(Stmt, Subdomain); |
607 | |
608 | // Don't call GenThenFunc if it is never executed. An ast index expression |
609 | // might not be defined in this case. |
610 | if (auto *Const = dyn_cast<ConstantInt>(Val: Cond)) |
611 | if (Const->isZero()) |
612 | return; |
613 | |
614 | BasicBlock *HeadBlock = Builder.GetInsertBlock(); |
615 | StringRef BlockName = HeadBlock->getName(); |
616 | |
617 | // Generate the conditional block. |
618 | DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); |
619 | SplitBlockAndInsertIfThen(Cond, SplitBefore: &*Builder.GetInsertPoint(), Unreachable: false, BranchWeights: nullptr, |
620 | DTU: &DTU, LI: &LI); |
621 | BranchInst *Branch = cast<BranchInst>(Val: HeadBlock->getTerminator()); |
622 | BasicBlock *ThenBlock = Branch->getSuccessor(i: 0); |
623 | BasicBlock *TailBlock = Branch->getSuccessor(i: 1); |
624 | |
625 | // Assign descriptive names. |
626 | if (auto *CondInst = dyn_cast<Instruction>(Val: Cond)) |
627 | CondInst->setName("polly." + Subject + ".cond" ); |
628 | ThenBlock->setName(BlockName + "." + Subject + ".partial" ); |
629 | TailBlock->setName(BlockName + ".cont" ); |
630 | |
631 | // Put the client code into the conditional block and continue in the merge |
632 | // block afterwards. |
633 | Builder.SetInsertPoint(TheBB: ThenBlock, IP: ThenBlock->getFirstInsertionPt()); |
634 | GenThenFunc(); |
635 | Builder.SetInsertPoint(TheBB: TailBlock, IP: TailBlock->getFirstInsertionPt()); |
636 | } |
637 | |
638 | static std::string getInstName(Value *Val) { |
639 | std::string Result; |
640 | raw_string_ostream OS(Result); |
641 | Val->printAsOperand(O&: OS, PrintType: false); |
642 | return OS.str(); |
643 | } |
644 | |
645 | void BlockGenerator::generateBeginStmtTrace(ScopStmt &Stmt, LoopToScevMapT <S, |
646 | ValueMapT &BBMap) { |
647 | if (!TraceStmts) |
648 | return; |
649 | |
650 | Scop *S = Stmt.getParent(); |
651 | const char *BaseName = Stmt.getBaseName(); |
652 | |
653 | isl::ast_build AstBuild = Stmt.getAstBuild(); |
654 | isl::set Domain = Stmt.getDomain(); |
655 | |
656 | isl::union_map USchedule = AstBuild.get_schedule().intersect_domain(uset: Domain); |
657 | isl::map Schedule = isl::map::from_union_map(umap: USchedule); |
658 | assert(Schedule.is_empty().is_false() && |
659 | "The stmt must have a valid instance" ); |
660 | |
661 | isl::multi_pw_aff ScheduleMultiPwAff = |
662 | isl::pw_multi_aff::from_map(map: Schedule.reverse()); |
663 | isl::ast_build RestrictedBuild = AstBuild.restrict(set: Schedule.range()); |
664 | |
665 | // Sequence of strings to print. |
666 | SmallVector<llvm::Value *, 8> Values; |
667 | |
668 | // Print the name of the statement. |
669 | // TODO: Indent by the depth of the statement instance in the schedule tree. |
670 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: BaseName)); |
671 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: "(" )); |
672 | |
673 | // Add the coordinate of the statement instance. |
674 | for (unsigned i : rangeIslSize(Begin: 0, End: ScheduleMultiPwAff.dim(type: isl::dim::out))) { |
675 | if (i > 0) |
676 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: "," )); |
677 | |
678 | isl::ast_expr IsInSet = RestrictedBuild.expr_from(pa: ScheduleMultiPwAff.at(pos: i)); |
679 | Values.push_back(Elt: ExprBuilder->create(Expr: IsInSet.copy())); |
680 | } |
681 | |
682 | if (TraceScalars) { |
683 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: ")" )); |
684 | DenseSet<Instruction *> Encountered; |
685 | |
686 | // Add the value of each scalar (and the result of PHIs) used in the |
687 | // statement. |
688 | // TODO: Values used in region-statements. |
689 | for (Instruction *Inst : Stmt.insts()) { |
690 | if (!RuntimeDebugBuilder::isPrintable(Ty: Inst->getType())) |
691 | continue; |
692 | |
693 | if (isa<PHINode>(Val: Inst)) { |
694 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: " " )); |
695 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString( |
696 | Builder, Str: getInstName(Val: Inst))); |
697 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: "=" )); |
698 | Values.push_back(Elt: getNewValue(Stmt, Old: Inst, BBMap, LTS, |
699 | L: LI.getLoopFor(BB: Inst->getParent()))); |
700 | } else { |
701 | for (Value *Op : Inst->operand_values()) { |
702 | // Do not print values that cannot change during the execution of the |
703 | // SCoP. |
704 | auto *OpInst = dyn_cast<Instruction>(Val: Op); |
705 | if (!OpInst) |
706 | continue; |
707 | if (!S->contains(I: OpInst)) |
708 | continue; |
709 | |
710 | // Print each scalar at most once, and exclude values defined in the |
711 | // statement itself. |
712 | if (Encountered.count(V: OpInst)) |
713 | continue; |
714 | |
715 | Values.push_back( |
716 | Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: " " )); |
717 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString( |
718 | Builder, Str: getInstName(Val: OpInst))); |
719 | Values.push_back( |
720 | Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: "=" )); |
721 | Values.push_back(Elt: getNewValue(Stmt, Old: OpInst, BBMap, LTS, |
722 | L: LI.getLoopFor(BB: Inst->getParent()))); |
723 | Encountered.insert(V: OpInst); |
724 | } |
725 | } |
726 | |
727 | Encountered.insert(V: Inst); |
728 | } |
729 | |
730 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: "\n" )); |
731 | } else { |
732 | Values.push_back(Elt: RuntimeDebugBuilder::getPrintableString(Builder, Str: ")\n" )); |
733 | } |
734 | |
735 | RuntimeDebugBuilder::createCPUPrinter(Builder, args: ArrayRef<Value *>(Values)); |
736 | } |
737 | |
738 | void BlockGenerator::generateScalarStores( |
739 | ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap, |
740 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
741 | Loop *L = LI.getLoopFor(BB: Stmt.getBasicBlock()); |
742 | |
743 | assert(Stmt.isBlockStmt() && |
744 | "Region statements need to use the generateScalarStores() function in " |
745 | "the RegionGenerator" ); |
746 | |
747 | for (MemoryAccess *MA : Stmt) { |
748 | if (MA->isOriginalArrayKind() || MA->isRead()) |
749 | continue; |
750 | |
751 | isl::set AccDom = MA->getAccessRelation().domain(); |
752 | std::string Subject = MA->getId().get_name(); |
753 | |
754 | generateConditionalExecution( |
755 | Stmt, Subdomain: AccDom, Subject: Subject.c_str(), GenThenFunc: [&, this, MA]() { |
756 | Value *Val = MA->getAccessValue(); |
757 | if (MA->isAnyPHIKind()) { |
758 | assert(MA->getIncoming().size() >= 1 && |
759 | "Block statements have exactly one exiting block, or " |
760 | "multiple but " |
761 | "with same incoming block and value" ); |
762 | assert(std::all_of(MA->getIncoming().begin(), |
763 | MA->getIncoming().end(), |
764 | [&](std::pair<BasicBlock *, Value *> p) -> bool { |
765 | return p.first == Stmt.getBasicBlock(); |
766 | }) && |
767 | "Incoming block must be statement's block" ); |
768 | Val = MA->getIncoming()[0].second; |
769 | } |
770 | auto Address = getImplicitAddress(Access&: *MA, L: getLoopForStmt(Stmt), LTS, |
771 | BBMap, NewAccesses); |
772 | |
773 | Val = getNewValue(Stmt, Old: Val, BBMap, LTS, L); |
774 | assert((!isa<Instruction>(Val) || |
775 | DT.dominates(cast<Instruction>(Val)->getParent(), |
776 | Builder.GetInsertBlock())) && |
777 | "Domination violation" ); |
778 | assert((!isa<Instruction>(Address) || |
779 | DT.dominates(cast<Instruction>(Address)->getParent(), |
780 | Builder.GetInsertBlock())) && |
781 | "Domination violation" ); |
782 | |
783 | // The new Val might have a different type than the old Val due to |
784 | // ScalarEvolution looking through bitcasts. |
785 | Address = Builder.CreateBitOrPointerCast( |
786 | V: Address, DestTy: Val->getType()->getPointerTo( |
787 | AddrSpace: Address->getType()->getPointerAddressSpace())); |
788 | |
789 | Builder.CreateStore(Val, Ptr: Address); |
790 | }); |
791 | } |
792 | } |
793 | |
794 | void BlockGenerator::createScalarInitialization(Scop &S) { |
795 | BasicBlock *ExitBB = S.getExit(); |
796 | BasicBlock *PreEntryBB = S.getEnteringBlock(); |
797 | |
798 | Builder.SetInsertPoint(&*StartBlock->begin()); |
799 | |
800 | for (auto &Array : S.arrays()) { |
801 | if (Array->getNumberOfDimensions() != 0) |
802 | continue; |
803 | if (Array->isPHIKind()) { |
804 | // For PHI nodes, the only values we need to store are the ones that |
805 | // reach the PHI node from outside the region. In general there should |
806 | // only be one such incoming edge and this edge should enter through |
807 | // 'PreEntryBB'. |
808 | auto PHI = cast<PHINode>(Val: Array->getBasePtr()); |
809 | |
810 | for (auto BI = PHI->block_begin(), BE = PHI->block_end(); BI != BE; BI++) |
811 | if (!S.contains(BB: *BI) && *BI != PreEntryBB) |
812 | llvm_unreachable("Incoming edges from outside the scop should always " |
813 | "come from PreEntryBB" ); |
814 | |
815 | int Idx = PHI->getBasicBlockIndex(BB: PreEntryBB); |
816 | if (Idx < 0) |
817 | continue; |
818 | |
819 | Value *ScalarValue = PHI->getIncomingValue(i: Idx); |
820 | |
821 | Builder.CreateStore(Val: ScalarValue, Ptr: getOrCreateAlloca(Array)); |
822 | continue; |
823 | } |
824 | |
825 | auto *Inst = dyn_cast<Instruction>(Val: Array->getBasePtr()); |
826 | |
827 | if (Inst && S.contains(I: Inst)) |
828 | continue; |
829 | |
830 | // PHI nodes that are not marked as such in their SAI object are either exit |
831 | // PHI nodes we model as common scalars but without initialization, or |
832 | // incoming phi nodes that need to be initialized. Check if the first is the |
833 | // case for Inst and do not create and initialize memory if so. |
834 | if (auto *PHI = dyn_cast_or_null<PHINode>(Val: Inst)) |
835 | if (!S.hasSingleExitEdge() && PHI->getBasicBlockIndex(BB: ExitBB) >= 0) |
836 | continue; |
837 | |
838 | Builder.CreateStore(Val: Array->getBasePtr(), Ptr: getOrCreateAlloca(Array)); |
839 | } |
840 | } |
841 | |
842 | void BlockGenerator::createScalarFinalization(Scop &S) { |
843 | // The exit block of the __unoptimized__ region. |
844 | BasicBlock *ExitBB = S.getExitingBlock(); |
845 | // The merge block __just after__ the region and the optimized region. |
846 | BasicBlock *MergeBB = S.getExit(); |
847 | |
848 | // The exit block of the __optimized__ region. |
849 | BasicBlock *OptExitBB = *(pred_begin(BB: MergeBB)); |
850 | if (OptExitBB == ExitBB) |
851 | OptExitBB = *(++pred_begin(BB: MergeBB)); |
852 | |
853 | Builder.SetInsertPoint(OptExitBB->getTerminator()); |
854 | for (const auto &EscapeMapping : EscapeMap) { |
855 | // Extract the escaping instruction and the escaping users as well as the |
856 | // alloca the instruction was demoted to. |
857 | Instruction *EscapeInst = EscapeMapping.first; |
858 | const auto &EscapeMappingValue = EscapeMapping.second; |
859 | const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second; |
860 | auto *ScalarAddr = cast<AllocaInst>(Val: &*EscapeMappingValue.first); |
861 | |
862 | // Reload the demoted instruction in the optimized version of the SCoP. |
863 | Value *EscapeInstReload = |
864 | Builder.CreateLoad(Ty: ScalarAddr->getAllocatedType(), Ptr: ScalarAddr, |
865 | Name: EscapeInst->getName() + ".final_reload" ); |
866 | EscapeInstReload = |
867 | Builder.CreateBitOrPointerCast(V: EscapeInstReload, DestTy: EscapeInst->getType()); |
868 | |
869 | // Create the merge PHI that merges the optimized and unoptimized version. |
870 | PHINode *MergePHI = PHINode::Create(Ty: EscapeInst->getType(), NumReservedValues: 2, |
871 | NameStr: EscapeInst->getName() + ".merge" ); |
872 | MergePHI->insertBefore(InsertPos: &*MergeBB->getFirstInsertionPt()); |
873 | |
874 | // Add the respective values to the merge PHI. |
875 | MergePHI->addIncoming(V: EscapeInstReload, BB: OptExitBB); |
876 | MergePHI->addIncoming(V: EscapeInst, BB: ExitBB); |
877 | |
878 | // The information of scalar evolution about the escaping instruction needs |
879 | // to be revoked so the new merged instruction will be used. |
880 | if (SE.isSCEVable(Ty: EscapeInst->getType())) |
881 | SE.forgetValue(V: EscapeInst); |
882 | |
883 | // Replace all uses of the demoted instruction with the merge PHI. |
884 | for (Instruction *EUser : EscapeUsers) |
885 | EUser->replaceUsesOfWith(From: EscapeInst, To: MergePHI); |
886 | } |
887 | } |
888 | |
889 | void BlockGenerator::findOutsideUsers(Scop &S) { |
890 | for (auto &Array : S.arrays()) { |
891 | |
892 | if (Array->getNumberOfDimensions() != 0) |
893 | continue; |
894 | |
895 | if (Array->isPHIKind()) |
896 | continue; |
897 | |
898 | auto *Inst = dyn_cast<Instruction>(Val: Array->getBasePtr()); |
899 | |
900 | if (!Inst) |
901 | continue; |
902 | |
903 | // Scop invariant hoisting moves some of the base pointers out of the scop. |
904 | // We can ignore these, as the invariant load hoisting already registers the |
905 | // relevant outside users. |
906 | if (!S.contains(I: Inst)) |
907 | continue; |
908 | |
909 | handleOutsideUsers(S, Array); |
910 | } |
911 | } |
912 | |
913 | void BlockGenerator::createExitPHINodeMerges(Scop &S) { |
914 | if (S.hasSingleExitEdge()) |
915 | return; |
916 | |
917 | auto *ExitBB = S.getExitingBlock(); |
918 | auto *MergeBB = S.getExit(); |
919 | auto *AfterMergeBB = MergeBB->getSingleSuccessor(); |
920 | BasicBlock *OptExitBB = *(pred_begin(BB: MergeBB)); |
921 | if (OptExitBB == ExitBB) |
922 | OptExitBB = *(++pred_begin(BB: MergeBB)); |
923 | |
924 | Builder.SetInsertPoint(OptExitBB->getTerminator()); |
925 | |
926 | for (auto &SAI : S.arrays()) { |
927 | auto *Val = SAI->getBasePtr(); |
928 | |
929 | // Only Value-like scalars need a merge PHI. Exit block PHIs receive either |
930 | // the original PHI's value or the reloaded incoming values from the |
931 | // generated code. An llvm::Value is merged between the original code's |
932 | // value or the generated one. |
933 | if (!SAI->isExitPHIKind()) |
934 | continue; |
935 | |
936 | PHINode *PHI = dyn_cast<PHINode>(Val); |
937 | if (!PHI) |
938 | continue; |
939 | |
940 | if (PHI->getParent() != AfterMergeBB) |
941 | continue; |
942 | |
943 | std::string Name = PHI->getName().str(); |
944 | Value *ScalarAddr = getOrCreateAlloca(Array: SAI); |
945 | Value *Reload = Builder.CreateLoad(Ty: SAI->getElementType(), Ptr: ScalarAddr, |
946 | Name: Name + ".ph.final_reload" ); |
947 | Reload = Builder.CreateBitOrPointerCast(V: Reload, DestTy: PHI->getType()); |
948 | Value *OriginalValue = PHI->getIncomingValueForBlock(BB: MergeBB); |
949 | assert((!isa<Instruction>(OriginalValue) || |
950 | cast<Instruction>(OriginalValue)->getParent() != MergeBB) && |
951 | "Original value must no be one we just generated." ); |
952 | auto *MergePHI = PHINode::Create(Ty: PHI->getType(), NumReservedValues: 2, NameStr: Name + ".ph.merge" ); |
953 | MergePHI->insertBefore(InsertPos: &*MergeBB->getFirstInsertionPt()); |
954 | MergePHI->addIncoming(V: Reload, BB: OptExitBB); |
955 | MergePHI->addIncoming(V: OriginalValue, BB: ExitBB); |
956 | int Idx = PHI->getBasicBlockIndex(BB: MergeBB); |
957 | PHI->setIncomingValue(i: Idx, V: MergePHI); |
958 | } |
959 | } |
960 | |
961 | void BlockGenerator::invalidateScalarEvolution(Scop &S) { |
962 | for (auto &Stmt : S) |
963 | if (Stmt.isCopyStmt()) |
964 | continue; |
965 | else if (Stmt.isBlockStmt()) |
966 | for (auto &Inst : *Stmt.getBasicBlock()) |
967 | SE.forgetValue(V: &Inst); |
968 | else if (Stmt.isRegionStmt()) |
969 | for (auto *BB : Stmt.getRegion()->blocks()) |
970 | for (auto &Inst : *BB) |
971 | SE.forgetValue(V: &Inst); |
972 | else |
973 | llvm_unreachable("Unexpected statement type found" ); |
974 | |
975 | // Invalidate SCEV of loops surrounding the EscapeUsers. |
976 | for (const auto &EscapeMapping : EscapeMap) { |
977 | const EscapeUserVectorTy &EscapeUsers = EscapeMapping.second.second; |
978 | for (Instruction *EUser : EscapeUsers) { |
979 | if (Loop *L = LI.getLoopFor(BB: EUser->getParent())) |
980 | while (L) { |
981 | SE.forgetLoop(L); |
982 | L = L->getParentLoop(); |
983 | } |
984 | } |
985 | } |
986 | } |
987 | |
988 | void BlockGenerator::finalizeSCoP(Scop &S) { |
989 | findOutsideUsers(S); |
990 | createScalarInitialization(S); |
991 | createExitPHINodeMerges(S); |
992 | createScalarFinalization(S); |
993 | invalidateScalarEvolution(S); |
994 | } |
995 | |
996 | BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB, |
997 | BasicBlock *BBCopy) { |
998 | |
999 | BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock(); |
1000 | BasicBlock *BBCopyIDom = EndBlockMap.lookup(Val: BBIDom); |
1001 | |
1002 | if (BBCopyIDom) |
1003 | DT.changeImmediateDominator(BB: BBCopy, NewBB: BBCopyIDom); |
1004 | |
1005 | return StartBlockMap.lookup(Val: BBIDom); |
1006 | } |
1007 | |
1008 | // This is to determine whether an llvm::Value (defined in @p BB) is usable when |
1009 | // leaving a subregion. The straight-forward DT.dominates(BB, R->getExitBlock()) |
1010 | // does not work in cases where the exit block has edges from outside the |
1011 | // region. In that case the llvm::Value would never be usable in in the exit |
1012 | // block. The RegionGenerator however creates an new exit block ('ExitBBCopy') |
1013 | // for the subregion's exiting edges only. We need to determine whether an |
1014 | // llvm::Value is usable in there. We do this by checking whether it dominates |
1015 | // all exiting blocks individually. |
1016 | static bool isDominatingSubregionExit(const DominatorTree &DT, Region *R, |
1017 | BasicBlock *BB) { |
1018 | for (auto ExitingBB : predecessors(BB: R->getExit())) { |
1019 | // Check for non-subregion incoming edges. |
1020 | if (!R->contains(BB: ExitingBB)) |
1021 | continue; |
1022 | |
1023 | if (!DT.dominates(A: BB, B: ExitingBB)) |
1024 | return false; |
1025 | } |
1026 | |
1027 | return true; |
1028 | } |
1029 | |
1030 | // Find the direct dominator of the subregion's exit block if the subregion was |
1031 | // simplified. |
1032 | static BasicBlock *findExitDominator(DominatorTree &DT, Region *R) { |
1033 | BasicBlock *Common = nullptr; |
1034 | for (auto ExitingBB : predecessors(BB: R->getExit())) { |
1035 | // Check for non-subregion incoming edges. |
1036 | if (!R->contains(BB: ExitingBB)) |
1037 | continue; |
1038 | |
1039 | // First exiting edge. |
1040 | if (!Common) { |
1041 | Common = ExitingBB; |
1042 | continue; |
1043 | } |
1044 | |
1045 | Common = DT.findNearestCommonDominator(A: Common, B: ExitingBB); |
1046 | } |
1047 | |
1048 | assert(Common && R->contains(Common)); |
1049 | return Common; |
1050 | } |
1051 | |
1052 | void RegionGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT <S, |
1053 | __isl_keep isl_id_to_ast_expr *IdToAstExp) { |
1054 | assert(Stmt.isRegionStmt() && |
1055 | "Only region statements can be copied by the region generator" ); |
1056 | |
1057 | // Forget all old mappings. |
1058 | StartBlockMap.clear(); |
1059 | EndBlockMap.clear(); |
1060 | RegionMaps.clear(); |
1061 | IncompletePHINodeMap.clear(); |
1062 | |
1063 | // Collection of all values related to this subregion. |
1064 | ValueMapT ValueMap; |
1065 | |
1066 | // The region represented by the statement. |
1067 | Region *R = Stmt.getRegion(); |
1068 | |
1069 | // Create a dedicated entry for the region where we can reload all demoted |
1070 | // inputs. |
1071 | BasicBlock *EntryBB = R->getEntry(); |
1072 | BasicBlock *EntryBBCopy = SplitBlock(Old: Builder.GetInsertBlock(), |
1073 | SplitPt: &*Builder.GetInsertPoint(), DT: &DT, LI: &LI); |
1074 | EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry" ); |
1075 | Builder.SetInsertPoint(&EntryBBCopy->front()); |
1076 | |
1077 | ValueMapT &EntryBBMap = RegionMaps[EntryBBCopy]; |
1078 | generateScalarLoads(Stmt, LTS, BBMap&: EntryBBMap, NewAccesses: IdToAstExp); |
1079 | generateBeginStmtTrace(Stmt, LTS, BBMap&: EntryBBMap); |
1080 | |
1081 | for (auto PI = pred_begin(BB: EntryBB), PE = pred_end(BB: EntryBB); PI != PE; ++PI) |
1082 | if (!R->contains(BB: *PI)) { |
1083 | StartBlockMap[*PI] = EntryBBCopy; |
1084 | EndBlockMap[*PI] = EntryBBCopy; |
1085 | } |
1086 | |
1087 | // Iterate over all blocks in the region in a breadth-first search. |
1088 | std::deque<BasicBlock *> Blocks; |
1089 | SmallSetVector<BasicBlock *, 8> SeenBlocks; |
1090 | Blocks.push_back(x: EntryBB); |
1091 | SeenBlocks.insert(X: EntryBB); |
1092 | |
1093 | while (!Blocks.empty()) { |
1094 | BasicBlock *BB = Blocks.front(); |
1095 | Blocks.pop_front(); |
1096 | |
1097 | // First split the block and update dominance information. |
1098 | BasicBlock *BBCopy = splitBB(BB); |
1099 | BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy); |
1100 | |
1101 | // Get the mapping for this block and initialize it with either the scalar |
1102 | // loads from the generated entering block (which dominates all blocks of |
1103 | // this subregion) or the maps of the immediate dominator, if part of the |
1104 | // subregion. The latter necessarily includes the former. |
1105 | ValueMapT *InitBBMap; |
1106 | if (BBCopyIDom) { |
1107 | assert(RegionMaps.count(BBCopyIDom)); |
1108 | InitBBMap = &RegionMaps[BBCopyIDom]; |
1109 | } else |
1110 | InitBBMap = &EntryBBMap; |
1111 | auto Inserted = RegionMaps.insert(KV: std::make_pair(x&: BBCopy, y&: *InitBBMap)); |
1112 | ValueMapT &RegionMap = Inserted.first->second; |
1113 | |
1114 | // Copy the block with the BlockGenerator. |
1115 | Builder.SetInsertPoint(&BBCopy->front()); |
1116 | copyBB(Stmt, BB, CopyBB: BBCopy, BBMap&: RegionMap, LTS, NewAccesses: IdToAstExp); |
1117 | |
1118 | // In order to remap PHI nodes we store also basic block mappings. |
1119 | StartBlockMap[BB] = BBCopy; |
1120 | EndBlockMap[BB] = Builder.GetInsertBlock(); |
1121 | |
1122 | // Add values to incomplete PHI nodes waiting for this block to be copied. |
1123 | for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB]) |
1124 | addOperandToPHI(Stmt, PHI: PHINodePair.first, PHICopy: PHINodePair.second, IncomingBB: BB, LTS); |
1125 | IncompletePHINodeMap[BB].clear(); |
1126 | |
1127 | // And continue with new successors inside the region. |
1128 | for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++) |
1129 | if (R->contains(BB: *SI) && SeenBlocks.insert(X: *SI)) |
1130 | Blocks.push_back(x: *SI); |
1131 | |
1132 | // Remember value in case it is visible after this subregion. |
1133 | if (isDominatingSubregionExit(DT, R, BB)) |
1134 | ValueMap.insert(I: RegionMap.begin(), E: RegionMap.end()); |
1135 | } |
1136 | |
1137 | // Now create a new dedicated region exit block and add it to the region map. |
1138 | BasicBlock *ExitBBCopy = SplitBlock(Old: Builder.GetInsertBlock(), |
1139 | SplitPt: &*Builder.GetInsertPoint(), DT: &DT, LI: &LI); |
1140 | ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit" ); |
1141 | StartBlockMap[R->getExit()] = ExitBBCopy; |
1142 | EndBlockMap[R->getExit()] = ExitBBCopy; |
1143 | |
1144 | BasicBlock *ExitDomBBCopy = EndBlockMap.lookup(Val: findExitDominator(DT, R)); |
1145 | assert(ExitDomBBCopy && |
1146 | "Common exit dominator must be within region; at least the entry node " |
1147 | "must match" ); |
1148 | DT.changeImmediateDominator(BB: ExitBBCopy, NewBB: ExitDomBBCopy); |
1149 | |
1150 | // As the block generator doesn't handle control flow we need to add the |
1151 | // region control flow by hand after all blocks have been copied. |
1152 | for (BasicBlock *BB : SeenBlocks) { |
1153 | |
1154 | BasicBlock *BBCopyStart = StartBlockMap[BB]; |
1155 | BasicBlock *BBCopyEnd = EndBlockMap[BB]; |
1156 | Instruction *TI = BB->getTerminator(); |
1157 | if (isa<UnreachableInst>(Val: TI)) { |
1158 | while (!BBCopyEnd->empty()) |
1159 | BBCopyEnd->begin()->eraseFromParent(); |
1160 | new UnreachableInst(BBCopyEnd->getContext(), BBCopyEnd); |
1161 | continue; |
1162 | } |
1163 | |
1164 | Instruction *BICopy = BBCopyEnd->getTerminator(); |
1165 | |
1166 | ValueMapT &RegionMap = RegionMaps[BBCopyStart]; |
1167 | RegionMap.insert(I: StartBlockMap.begin(), E: StartBlockMap.end()); |
1168 | |
1169 | Builder.SetInsertPoint(BICopy); |
1170 | copyInstScalar(Stmt, Inst: TI, BBMap&: RegionMap, LTS); |
1171 | BICopy->eraseFromParent(); |
1172 | } |
1173 | |
1174 | // Add counting PHI nodes to all loops in the region that can be used as |
1175 | // replacement for SCEVs referring to the old loop. |
1176 | for (BasicBlock *BB : SeenBlocks) { |
1177 | Loop *L = LI.getLoopFor(BB); |
1178 | if (L == nullptr || L->getHeader() != BB || !R->contains(L)) |
1179 | continue; |
1180 | |
1181 | BasicBlock *BBCopy = StartBlockMap[BB]; |
1182 | Value *NullVal = Builder.getInt32(C: 0); |
1183 | PHINode *LoopPHI = |
1184 | PHINode::Create(Ty: Builder.getInt32Ty(), NumReservedValues: 2, NameStr: "polly.subregion.iv" ); |
1185 | Instruction *LoopPHIInc = BinaryOperator::CreateAdd( |
1186 | V1: LoopPHI, V2: Builder.getInt32(C: 1), Name: "polly.subregion.iv.inc" ); |
1187 | LoopPHI->insertBefore(InsertPos: &BBCopy->front()); |
1188 | LoopPHIInc->insertBefore(InsertPos: BBCopy->getTerminator()); |
1189 | |
1190 | for (auto *PredBB : predecessors(BB)) { |
1191 | if (!R->contains(BB: PredBB)) |
1192 | continue; |
1193 | if (L->contains(BB: PredBB)) |
1194 | LoopPHI->addIncoming(V: LoopPHIInc, BB: EndBlockMap[PredBB]); |
1195 | else |
1196 | LoopPHI->addIncoming(V: NullVal, BB: EndBlockMap[PredBB]); |
1197 | } |
1198 | |
1199 | for (auto *PredBBCopy : predecessors(BB: BBCopy)) |
1200 | if (LoopPHI->getBasicBlockIndex(BB: PredBBCopy) < 0) |
1201 | LoopPHI->addIncoming(V: NullVal, BB: PredBBCopy); |
1202 | |
1203 | LTS[L] = SE.getUnknown(V: LoopPHI); |
1204 | } |
1205 | |
1206 | // Continue generating code in the exit block. |
1207 | Builder.SetInsertPoint(&*ExitBBCopy->getFirstInsertionPt()); |
1208 | |
1209 | // Write values visible to other statements. |
1210 | generateScalarStores(Stmt, LTS, BBMAp&: ValueMap, NewAccesses: IdToAstExp); |
1211 | StartBlockMap.clear(); |
1212 | EndBlockMap.clear(); |
1213 | RegionMaps.clear(); |
1214 | IncompletePHINodeMap.clear(); |
1215 | } |
1216 | |
1217 | PHINode *RegionGenerator::buildExitPHI(MemoryAccess *MA, LoopToScevMapT <S, |
1218 | ValueMapT &BBMap, Loop *L) { |
1219 | ScopStmt *Stmt = MA->getStatement(); |
1220 | Region *SubR = Stmt->getRegion(); |
1221 | auto Incoming = MA->getIncoming(); |
1222 | |
1223 | PollyIRBuilder::InsertPointGuard IPGuard(Builder); |
1224 | PHINode *OrigPHI = cast<PHINode>(Val: MA->getAccessInstruction()); |
1225 | BasicBlock *NewSubregionExit = Builder.GetInsertBlock(); |
1226 | |
1227 | // This can happen if the subregion is simplified after the ScopStmts |
1228 | // have been created; simplification happens as part of CodeGeneration. |
1229 | if (OrigPHI->getParent() != SubR->getExit()) { |
1230 | BasicBlock *FormerExit = SubR->getExitingBlock(); |
1231 | if (FormerExit) |
1232 | NewSubregionExit = StartBlockMap.lookup(Val: FormerExit); |
1233 | } |
1234 | |
1235 | PHINode *NewPHI = PHINode::Create(Ty: OrigPHI->getType(), NumReservedValues: Incoming.size(), |
1236 | NameStr: "polly." + OrigPHI->getName(), |
1237 | InsertBefore: NewSubregionExit->getFirstNonPHIIt()); |
1238 | |
1239 | // Add the incoming values to the PHI. |
1240 | for (auto &Pair : Incoming) { |
1241 | BasicBlock *OrigIncomingBlock = Pair.first; |
1242 | BasicBlock *NewIncomingBlockStart = StartBlockMap.lookup(Val: OrigIncomingBlock); |
1243 | BasicBlock *NewIncomingBlockEnd = EndBlockMap.lookup(Val: OrigIncomingBlock); |
1244 | Builder.SetInsertPoint(NewIncomingBlockEnd->getTerminator()); |
1245 | assert(RegionMaps.count(NewIncomingBlockStart)); |
1246 | assert(RegionMaps.count(NewIncomingBlockEnd)); |
1247 | ValueMapT *LocalBBMap = &RegionMaps[NewIncomingBlockStart]; |
1248 | |
1249 | Value *OrigIncomingValue = Pair.second; |
1250 | Value *NewIncomingValue = |
1251 | getNewValue(Stmt&: *Stmt, Old: OrigIncomingValue, BBMap&: *LocalBBMap, LTS, L); |
1252 | NewPHI->addIncoming(V: NewIncomingValue, BB: NewIncomingBlockEnd); |
1253 | } |
1254 | |
1255 | return NewPHI; |
1256 | } |
1257 | |
1258 | Value *RegionGenerator::getExitScalar(MemoryAccess *MA, LoopToScevMapT <S, |
1259 | ValueMapT &BBMap) { |
1260 | ScopStmt *Stmt = MA->getStatement(); |
1261 | |
1262 | // TODO: Add some test cases that ensure this is really the right choice. |
1263 | Loop *L = LI.getLoopFor(BB: Stmt->getRegion()->getExit()); |
1264 | |
1265 | if (MA->isAnyPHIKind()) { |
1266 | auto Incoming = MA->getIncoming(); |
1267 | assert(!Incoming.empty() && |
1268 | "PHI WRITEs must have originate from at least one incoming block" ); |
1269 | |
1270 | // If there is only one incoming value, we do not need to create a PHI. |
1271 | if (Incoming.size() == 1) { |
1272 | Value *OldVal = Incoming[0].second; |
1273 | return getNewValue(Stmt&: *Stmt, Old: OldVal, BBMap, LTS, L); |
1274 | } |
1275 | |
1276 | return buildExitPHI(MA, LTS, BBMap, L); |
1277 | } |
1278 | |
1279 | // MemoryKind::Value accesses leaving the subregion must dominate the exit |
1280 | // block; just pass the copied value. |
1281 | Value *OldVal = MA->getAccessValue(); |
1282 | return getNewValue(Stmt&: *Stmt, Old: OldVal, BBMap, LTS, L); |
1283 | } |
1284 | |
1285 | void RegionGenerator::generateScalarStores( |
1286 | ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap, |
1287 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1288 | assert(Stmt.getRegion() && |
1289 | "Block statements need to use the generateScalarStores() " |
1290 | "function in the BlockGenerator" ); |
1291 | |
1292 | // Get the exit scalar values before generating the writes. |
1293 | // This is necessary because RegionGenerator::getExitScalar may insert |
1294 | // PHINodes that depend on the region's exiting blocks. But |
1295 | // BlockGenerator::generateConditionalExecution may insert a new basic block |
1296 | // such that the current basic block is not a direct successor of the exiting |
1297 | // blocks anymore. Hence, build the PHINodes while the current block is still |
1298 | // the direct successor. |
1299 | SmallDenseMap<MemoryAccess *, Value *> NewExitScalars; |
1300 | for (MemoryAccess *MA : Stmt) { |
1301 | if (MA->isOriginalArrayKind() || MA->isRead()) |
1302 | continue; |
1303 | |
1304 | Value *NewVal = getExitScalar(MA, LTS, BBMap); |
1305 | NewExitScalars[MA] = NewVal; |
1306 | } |
1307 | |
1308 | for (MemoryAccess *MA : Stmt) { |
1309 | if (MA->isOriginalArrayKind() || MA->isRead()) |
1310 | continue; |
1311 | |
1312 | isl::set AccDom = MA->getAccessRelation().domain(); |
1313 | std::string Subject = MA->getId().get_name(); |
1314 | generateConditionalExecution( |
1315 | Stmt, Subdomain: AccDom, Subject: Subject.c_str(), GenThenFunc: [&, this, MA]() { |
1316 | Value *NewVal = NewExitScalars.lookup(Val: MA); |
1317 | assert(NewVal && "The exit scalar must be determined before" ); |
1318 | Value *Address = getImplicitAddress(Access&: *MA, L: getLoopForStmt(Stmt), LTS, |
1319 | BBMap, NewAccesses); |
1320 | assert((!isa<Instruction>(NewVal) || |
1321 | DT.dominates(cast<Instruction>(NewVal)->getParent(), |
1322 | Builder.GetInsertBlock())) && |
1323 | "Domination violation" ); |
1324 | assert((!isa<Instruction>(Address) || |
1325 | DT.dominates(cast<Instruction>(Address)->getParent(), |
1326 | Builder.GetInsertBlock())) && |
1327 | "Domination violation" ); |
1328 | Builder.CreateStore(Val: NewVal, Ptr: Address); |
1329 | }); |
1330 | } |
1331 | } |
1332 | |
1333 | void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, PHINode *PHI, |
1334 | PHINode *PHICopy, BasicBlock *IncomingBB, |
1335 | LoopToScevMapT <S) { |
1336 | // If the incoming block was not yet copied mark this PHI as incomplete. |
1337 | // Once the block will be copied the incoming value will be added. |
1338 | BasicBlock *BBCopyStart = StartBlockMap[IncomingBB]; |
1339 | BasicBlock *BBCopyEnd = EndBlockMap[IncomingBB]; |
1340 | if (!BBCopyStart) { |
1341 | assert(!BBCopyEnd); |
1342 | assert(Stmt.represents(IncomingBB) && |
1343 | "Bad incoming block for PHI in non-affine region" ); |
1344 | IncompletePHINodeMap[IncomingBB].push_back(Elt: std::make_pair(x&: PHI, y&: PHICopy)); |
1345 | return; |
1346 | } |
1347 | |
1348 | assert(RegionMaps.count(BBCopyStart) && |
1349 | "Incoming PHI block did not have a BBMap" ); |
1350 | ValueMapT &BBCopyMap = RegionMaps[BBCopyStart]; |
1351 | |
1352 | Value *OpCopy = nullptr; |
1353 | |
1354 | if (Stmt.represents(BB: IncomingBB)) { |
1355 | Value *Op = PHI->getIncomingValueForBlock(BB: IncomingBB); |
1356 | |
1357 | // If the current insert block is different from the PHIs incoming block |
1358 | // change it, otherwise do not. |
1359 | auto IP = Builder.GetInsertPoint(); |
1360 | if (IP->getParent() != BBCopyEnd) |
1361 | Builder.SetInsertPoint(BBCopyEnd->getTerminator()); |
1362 | OpCopy = getNewValue(Stmt, Old: Op, BBMap&: BBCopyMap, LTS, L: getLoopForStmt(Stmt)); |
1363 | if (IP->getParent() != BBCopyEnd) |
1364 | Builder.SetInsertPoint(&*IP); |
1365 | } else { |
1366 | // All edges from outside the non-affine region become a single edge |
1367 | // in the new copy of the non-affine region. Make sure to only add the |
1368 | // corresponding edge the first time we encounter a basic block from |
1369 | // outside the non-affine region. |
1370 | if (PHICopy->getBasicBlockIndex(BB: BBCopyEnd) >= 0) |
1371 | return; |
1372 | |
1373 | // Get the reloaded value. |
1374 | OpCopy = getNewValue(Stmt, Old: PHI, BBMap&: BBCopyMap, LTS, L: getLoopForStmt(Stmt)); |
1375 | } |
1376 | |
1377 | assert(OpCopy && "Incoming PHI value was not copied properly" ); |
1378 | PHICopy->addIncoming(V: OpCopy, BB: BBCopyEnd); |
1379 | } |
1380 | |
1381 | void RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI, |
1382 | ValueMapT &BBMap, |
1383 | LoopToScevMapT <S) { |
1384 | unsigned NumIncoming = PHI->getNumIncomingValues(); |
1385 | PHINode *PHICopy = |
1386 | Builder.CreatePHI(Ty: PHI->getType(), NumReservedValues: NumIncoming, Name: "polly." + PHI->getName()); |
1387 | PHICopy->moveBefore(MovePos: PHICopy->getParent()->getFirstNonPHI()); |
1388 | BBMap[PHI] = PHICopy; |
1389 | |
1390 | for (BasicBlock *IncomingBB : PHI->blocks()) |
1391 | addOperandToPHI(Stmt, PHI, PHICopy, IncomingBB, LTS); |
1392 | } |
1393 | |