1 | //===- AffineLoopInvariantCodeMotion.cpp - Code to perform loop fusion-----===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements loop invariant code motion. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "mlir/Dialect/Affine/Passes.h" |
14 | |
15 | #include "mlir/Dialect/Affine/Analysis/Utils.h" |
16 | #include "mlir/Dialect/Func/IR/FuncOps.h" |
17 | #include "mlir/Interfaces/SideEffectInterfaces.h" |
18 | #include "llvm/Support/Debug.h" |
19 | #include "llvm/Support/raw_ostream.h" |
20 | |
21 | namespace mlir { |
22 | namespace affine { |
23 | #define GEN_PASS_DEF_AFFINELOOPINVARIANTCODEMOTION |
24 | #include "mlir/Dialect/Affine/Passes.h.inc" |
25 | } // namespace affine |
26 | } // namespace mlir |
27 | |
28 | #define DEBUG_TYPE "affine-licm" |
29 | |
30 | using namespace mlir; |
31 | using namespace mlir::affine; |
32 | |
33 | namespace { |
34 | |
35 | /// Affine loop invariant code motion (LICM) pass. |
36 | /// TODO: The pass is missing zero tripcount tests. |
37 | /// TODO: When compared to the other standard LICM pass, this pass |
38 | /// has some special handling for affine read/write ops but such handling |
39 | /// requires aliasing to be sound, and as such this pass is unsound. In |
40 | /// addition, this handling is nothing particular to affine memory ops but would |
41 | /// apply to any memory read/write effect ops. Either aliasing should be handled |
42 | /// or this pass can be removed and the standard LICM can be used. |
43 | struct LoopInvariantCodeMotion |
44 | : public affine::impl::AffineLoopInvariantCodeMotionBase< |
45 | LoopInvariantCodeMotion> { |
46 | void runOnOperation() override; |
47 | void runOnAffineForOp(AffineForOp forOp); |
48 | }; |
49 | } // namespace |
50 | |
51 | static bool |
52 | checkInvarianceOfNestedIfOps(AffineIfOp ifOp, AffineForOp loop, |
53 | SmallPtrSetImpl<Operation *> &opsWithUsers, |
54 | SmallPtrSetImpl<Operation *> &opsToHoist); |
55 | static bool isOpLoopInvariant(Operation &op, AffineForOp loop, |
56 | SmallPtrSetImpl<Operation *> &opsWithUsers, |
57 | SmallPtrSetImpl<Operation *> &opsToHoist); |
58 | |
59 | static bool |
60 | areAllOpsInTheBlockListInvariant(Region &blockList, AffineForOp loop, |
61 | SmallPtrSetImpl<Operation *> &opsWithUsers, |
62 | SmallPtrSetImpl<Operation *> &opsToHoist); |
63 | |
64 | /// Returns true if `op` is invariant on `loop`. |
65 | static bool isOpLoopInvariant(Operation &op, AffineForOp loop, |
66 | SmallPtrSetImpl<Operation *> &opsWithUsers, |
67 | SmallPtrSetImpl<Operation *> &opsToHoist) { |
68 | Value iv = loop.getInductionVar(); |
69 | |
70 | if (auto ifOp = dyn_cast<AffineIfOp>(op)) { |
71 | if (!checkInvarianceOfNestedIfOps(ifOp, loop, opsWithUsers, opsToHoist)) |
72 | return false; |
73 | } else if (auto forOp = dyn_cast<AffineForOp>(op)) { |
74 | if (!areAllOpsInTheBlockListInvariant(forOp.getRegion(), loop, opsWithUsers, |
75 | opsToHoist)) |
76 | return false; |
77 | } else if (auto parOp = dyn_cast<AffineParallelOp>(op)) { |
78 | if (!areAllOpsInTheBlockListInvariant(parOp.getRegion(), loop, opsWithUsers, |
79 | opsToHoist)) |
80 | return false; |
81 | } else if (!isMemoryEffectFree(&op) && |
82 | !isa<AffineReadOpInterface, AffineWriteOpInterface>(&op)) { |
83 | // Check for side-effecting ops. Affine read/write ops are handled |
84 | // separately below. |
85 | return false; |
86 | } else if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) { |
87 | // Register op in the set of ops that have users. |
88 | opsWithUsers.insert(Ptr: &op); |
89 | SmallVector<AffineForOp, 8> userIVs; |
90 | auto read = dyn_cast<AffineReadOpInterface>(op); |
91 | Value memref = |
92 | read ? read.getMemRef() : cast<AffineWriteOpInterface>(op).getMemRef(); |
93 | for (auto *user : memref.getUsers()) { |
94 | // If the memref used by the load/store is used in a store elsewhere in |
95 | // the loop nest, we do not hoist. Similarly, if the memref used in a |
96 | // load is also being stored too, we do not hoist the load. |
97 | // FIXME: This is missing checking aliases. |
98 | if (&op == user) |
99 | continue; |
100 | if (hasEffect<MemoryEffects::Write>(user, memref) || |
101 | (hasEffect<MemoryEffects::Read>(user, memref) && |
102 | isa<AffineWriteOpInterface>(op))) { |
103 | userIVs.clear(); |
104 | getAffineForIVs(*user, &userIVs); |
105 | // Check that userIVs don't contain the for loop around the op. |
106 | if (llvm::is_contained(userIVs, loop)) |
107 | return false; |
108 | } |
109 | } |
110 | } |
111 | |
112 | // Check operands. |
113 | ValueRange iterArgs = loop.getRegionIterArgs(); |
114 | for (unsigned int i = 0; i < op.getNumOperands(); ++i) { |
115 | auto *operandSrc = op.getOperand(idx: i).getDefiningOp(); |
116 | |
117 | // If the loop IV is the operand, this op isn't loop invariant. |
118 | if (iv == op.getOperand(idx: i)) |
119 | return false; |
120 | |
121 | // If the one of the iter_args is the operand, this op isn't loop invariant. |
122 | if (llvm::is_contained(Range&: iterArgs, Element: op.getOperand(idx: i))) |
123 | return false; |
124 | |
125 | if (operandSrc) { |
126 | // If the value was defined in the loop (outside of the if/else region), |
127 | // and that operation itself wasn't meant to be hoisted, then mark this |
128 | // operation loop dependent. |
129 | if (opsWithUsers.count(Ptr: operandSrc) && opsToHoist.count(Ptr: operandSrc) == 0) |
130 | return false; |
131 | } |
132 | } |
133 | |
134 | // If no operand was loop variant, mark this op for motion. |
135 | opsToHoist.insert(Ptr: &op); |
136 | return true; |
137 | } |
138 | |
139 | // Checks if all ops in a region (i.e. list of blocks) are loop invariant. |
140 | static bool |
141 | areAllOpsInTheBlockListInvariant(Region &blockList, AffineForOp loop, |
142 | SmallPtrSetImpl<Operation *> &opsWithUsers, |
143 | SmallPtrSetImpl<Operation *> &opsToHoist) { |
144 | |
145 | for (auto &b : blockList) { |
146 | for (auto &op : b) { |
147 | if (!isOpLoopInvariant(op, loop, opsWithUsers, opsToHoist)) |
148 | return false; |
149 | } |
150 | } |
151 | |
152 | return true; |
153 | } |
154 | |
155 | // Returns true if the affine.if op can be hoisted. |
156 | static bool |
157 | checkInvarianceOfNestedIfOps(AffineIfOp ifOp, AffineForOp loop, |
158 | SmallPtrSetImpl<Operation *> &opsWithUsers, |
159 | SmallPtrSetImpl<Operation *> &opsToHoist) { |
160 | if (!areAllOpsInTheBlockListInvariant(ifOp.getThenRegion(), loop, |
161 | opsWithUsers, opsToHoist)) |
162 | return false; |
163 | |
164 | if (!areAllOpsInTheBlockListInvariant(ifOp.getElseRegion(), loop, |
165 | opsWithUsers, opsToHoist)) |
166 | return false; |
167 | |
168 | return true; |
169 | } |
170 | |
171 | void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) { |
172 | // This is the place where hoisted instructions would reside. |
173 | OpBuilder b(forOp.getOperation()); |
174 | |
175 | SmallPtrSet<Operation *, 8> opsToHoist; |
176 | SmallVector<Operation *, 8> opsToMove; |
177 | SmallPtrSet<Operation *, 8> opsWithUsers; |
178 | |
179 | for (Operation &op : *forOp.getBody()) { |
180 | // Register op in the set of ops that have users. This set is used |
181 | // to prevent hoisting ops that depend on these ops that are |
182 | // not being hoisted. |
183 | if (!op.use_empty()) |
184 | opsWithUsers.insert(&op); |
185 | if (!isa<AffineYieldOp>(op)) { |
186 | if (isOpLoopInvariant(op, forOp, opsWithUsers, opsToHoist)) { |
187 | opsToMove.push_back(&op); |
188 | } |
189 | } |
190 | } |
191 | |
192 | // For all instructions that we found to be invariant, place sequentially |
193 | // right before the for loop. |
194 | for (auto *op : opsToMove) { |
195 | op->moveBefore(forOp); |
196 | } |
197 | } |
198 | |
199 | void LoopInvariantCodeMotion::runOnOperation() { |
200 | // Walk through all loops in a function in innermost-loop-first order. This |
201 | // way, we first LICM from the inner loop, and place the ops in |
202 | // the outer loop, which in turn can be further LICM'ed. |
203 | getOperation().walk([&](AffineForOp op) { runOnAffineForOp(forOp: op); }); |
204 | } |
205 | |
206 | std::unique_ptr<OperationPass<func::FuncOp>> |
207 | mlir::affine::createAffineLoopInvariantCodeMotionPass() { |
208 | return std::make_unique<LoopInvariantCodeMotion>(); |
209 | } |
210 | |