1//===- Fusion.cpp - Implementation of linalg Fusion -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the linalg dialect Fusion pass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "mlir/Dialect/Affine/IR/AffineOps.h"
14#include "mlir/Dialect/Arith/IR/Arith.h"
15#include "mlir/Dialect/Linalg/IR/Linalg.h"
16#include "mlir/Dialect/Linalg/Passes.h"
17#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
18#include "mlir/Dialect/Linalg/Utils/Utils.h"
19#include "mlir/Dialect/MemRef/IR/MemRef.h"
20#include "mlir/Dialect/Tensor/IR/Tensor.h"
21#include "mlir/Dialect/Tensor/Utils/Utils.h"
22#include "mlir/IR/AffineExpr.h"
23#include "mlir/IR/AffineMap.h"
24#include "mlir/IR/Dominance.h"
25#include "mlir/Support/LLVM.h"
26#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
27#include "mlir/Transforms/RegionUtils.h"
28#include "llvm/ADT/MapVector.h"
29#include "llvm/ADT/ScopeExit.h"
30#include "llvm/ADT/SmallBitVector.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33
34#include <optional>
35#include <set>
36
37#define DEBUG_TYPE "linalg-fusion"
38
39using namespace mlir;
40using namespace mlir::linalg;
41
42/// Implements a simple high-level fusion pass on linalg structured operations.
43///
44/// In each block, linalg ops are processed in reverse textual order.
45/// Given a linalg op `O`, fusion occurs by:
46/// 1. inspecting the linalg ops that write into the views read by `O`. There
47/// are 2 cases:
48/// a) buffer case: use the SSA value of the views and a simple alias
49/// analysis on subview ops to determine producer-consumer dependences;
50/// b) tensor case: use SSA use-def chains on extract_slice ops;
51/// 2. greedily fuse the linalg ops that produce the subview/extract_slice.
52/// 3. inspect the fused ops and determine whether they have other remaining
53/// LinalgOp uses. If not, then erase the original producing linalg op.
54///
55/// More advanced use cases, analyses as well as profitability heuristics are
56/// left for future work.
57
58struct ShapeDimension {
59 Value shape;
60 unsigned dimension;
61};
62
63// Given an `op`, returns the first (`shape`, `dimension`) pair that identifies
64// the loop range at `loopDepth`. The semantics of the loopToOperandRangesMaps
65// guarantees at least one such dimension is found. If multiple candidates exist
66// they must agree by construction (i.e. have the same size) and we just return
67// the first one.
68static ShapeDimension
69getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth,
70 bool fromSubViewOpOnly = false) {
71 // Iterate over the inputs and outputs in order.
72 // Extract the subranges from the linearized ranges.
73 for (OpOperand &opOperand : op->getOpOperands()) {
74 // The method `getRangeFromOperandShape` requires using SubViewOp or
75 // ExtractSliceOps. If the value isn't defined from there continue.
76 // todo: The method should be adapted to get the values from
77 // `ViewInterface`. The interface needs a `getOrCreateRanges` method which
78 // currently returns a `linalg.range`. The fix here is to move this op to
79 // `std` dialect and add the method to `ViewInterface`.
80 if (fromSubViewOpOnly &&
81 !isa_and_nonnull<memref::SubViewOp, tensor::ExtractSliceOp>(
82 opOperand.get().getDefiningOp()))
83 continue;
84
85 AffineMap map = op.getMatchingIndexingMap(&opOperand);
86 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange I/O idx: "
87 << opOperand.getOperandNumber() << "\n");
88 LLVM_DEBUG(llvm::dbgs()
89 << "getShapeDefiningLoopRange map: " << map << "\n");
90 for (const auto &en : llvm::enumerate(map.getResults())) {
91 auto dimExpr = dyn_cast<AffineDimExpr>(en.value());
92 if (!dimExpr)
93 continue;
94 if (loopDepth == cast<AffineDimExpr>(en.value()).getPosition()) {
95 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange loopDepth: "
96 << loopDepth << "\n");
97 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange shape: "
98 << opOperand.get() << "\n");
99 return ShapeDimension{opOperand.get(),
100 static_cast<unsigned>(en.index())};
101 }
102 }
103 }
104 llvm_unreachable("Expect to be able to extract a shape defining loop range");
105}
106
107static SmallVector<Value> getTiledOperands(LinalgOp producer) {
108 return producer->getOperands();
109}
110
111/// Fuses the producer by cloning the `producer`. The `fusedLoopsAndRanges`
112/// provides the loop range information for the fused loops. The rest are
113/// obtained from the producer itself, since they are not tiled + fused.
114static LinalgOp fuse(OpBuilder &b, LinalgOp producer,
115 const DenseMap<unsigned, Range> &fusedLoopsAndRanges) {
116 SmallVector<OpFoldResult> ivs, tileSizes, sizeBounds;
117 SmallVector<Range> loopRanges;
118 Location loc = producer.getLoc();
119
120 for (unsigned i = 0, e = producer.getNumLoops(); i < e; ++i) {
121 auto shapeDim = getShapeDefiningLoopRange(producer, i);
122 OpFoldResult dim =
123 createFoldedDimOp(b, loc, shapeDim.shape, shapeDim.dimension);
124 sizeBounds.push_back(Elt: dim);
125 auto it = fusedLoopsAndRanges.find(Val: i);
126 if (it != fusedLoopsAndRanges.end()) {
127 ivs.push_back(Elt: it->second.offset);
128 tileSizes.push_back(Elt: it->second.size);
129 loopRanges.push_back(Elt: it->second);
130 LLVM_DEBUG(llvm::dbgs() << "tiled loop#" << i << " with LoopRange "
131 << loopRanges.back() << "\n");
132 } else {
133 tileSizes.push_back(b.getIndexAttr(0));
134 loopRanges.push_back(Elt: Range{b.getIndexAttr(0), .size: dim, b.getIndexAttr(1)});
135 LLVM_DEBUG(llvm::dbgs() << "full loop#" << i << " with LoopRange "
136 << loopRanges.back() << "\n");
137 }
138 }
139
140 SmallVector<Value, 8> clonedShapes;
141 clonedShapes.reserve(N: producer->getNumOperands());
142
143 // Compute subranges for all tensor input/output operands.
144 clonedShapes.append(makeTiledShapes(
145 b, loc, producer, getTiledOperands(producer), ivs, tileSizes, sizeBounds,
146 /**omitPartialTileCheck=*/false));
147
148 // Take result types from the tiled init operands.
149 MutableOperandRange producerDpsInits = producer.getDpsInitsMutable();
150 SmallVector<Type, 4> resultTypes;
151 resultTypes.reserve(N: producer->getNumResults());
152 int64_t firstInitOperandIdx =
153 producerDpsInits.getAsOperandRange().getBeginOperandIndex();
154 for (int64_t i = 0, e = producer->getNumResults(); i < e; ++i) {
155 resultTypes.push_back(Elt: clonedShapes[firstInitOperandIdx + i].getType());
156 }
157
158 // Clone the producer with new operands and result types.
159 LinalgOp clonedOp = clone(b, producer, resultTypes, clonedShapes);
160
161 // Shift all IndexOp results by the tile offset.
162 SmallVector<OpFoldResult> allIvs = llvm::to_vector(
163 Range: llvm::map_range(C&: loopRanges, F: [&](Range range) { return range.offset; }));
164 offsetIndices(b, clonedOp, allIvs);
165
166 return clonedOp;
167}
168
169/// Get the loop range for a dimension `dim` based on the `shapedOperand`. It is
170/// expected to be defined by a subview op or an extract_slice op.
171static Range getRangeFromOperandShape(OpBuilder &b, Location loc,
172 Value shapedOperand, unsigned dim) {
173 Operation *shapeProducingOp = shapedOperand.getDefiningOp();
174 if (auto subViewOp = dyn_cast<memref::SubViewOp>(shapeProducingOp))
175 return subViewOp.getOrCreateRanges(b, loc)[dim];
176 if (auto sliceOp = dyn_cast<tensor::ExtractSliceOp>(shapeProducingOp))
177 return sliceOp.getOrCreateRanges(b, loc)[dim];
178 llvm_unreachable("SubviewOp or ExtractSliceOp expected");
179}
180
181/// Fuses the producer into the loop immediately enclosing the consumer.
182/// This is achieved by "recomputing" the producer at the time it
183/// is needed just before the consumer.
184static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap,
185 OpOperand &consumerOpOperand) {
186 LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n");
187 DenseMap<unsigned, Range> fusedLoopsAndRanges;
188 Value shapedOperand = consumerOpOperand.get();
189 for (const auto &en : llvm::enumerate(First: producerMap.getResults())) {
190 unsigned posInProducerLoop = cast<AffineDimExpr>(Val: en.value()).getPosition();
191 fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape(
192 b, loc: consumerOpOperand.getOwner()->getLoc(), shapedOperand, dim: en.index());
193 }
194 return fuse(b, producerOp, fusedLoopsAndRanges);
195}
196
197/// Walk back use-def chain through scf::For yields.
198/// Sets `producer` and `outputIndex` if it finds a producer LinalgOp
199
200// TODO(ravishankarm, ntv): This can be moved into the dependence graphs
201// dependence tracking since the dependence tracking is similar to what is done
202// w.r.t to buffers.
203static void getProducerOfTensor(Value tensor, OpResult &opResult) {
204 if (!isa<RankedTensorType>(Val: tensor.getType()))
205 return;
206
207 while (true) {
208 LLVM_DEBUG(llvm::dbgs() << "\ngetProducerOfTensor: " << tensor);
209 if (auto linalgOp = tensor.getDefiningOp<LinalgOp>()) {
210 opResult = cast<OpResult>(Val&: tensor);
211 return;
212 }
213 if (auto sliceOp = tensor.getDefiningOp<tensor::ExtractSliceOp>()) {
214 tensor = sliceOp.getSource();
215 continue;
216 }
217 if (auto blockArg = dyn_cast<BlockArgument>(Val&: tensor)) {
218 if (auto forOp = blockArg.getDefiningOp<scf::ForOp>()) {
219 tensor = forOp.getInitArgs()[blockArg.getArgNumber()];
220 continue;
221 }
222 }
223 return;
224 }
225}
226
227FailureOr<FusionInfo>
228mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpOperand &consumerOpOperand) {
229 Value inputTensor = consumerOpOperand.get();
230 OpResult producerOpResult;
231 getProducerOfTensor(tensor: inputTensor, opResult&: producerOpResult);
232 if (!producerOpResult) {
233 LLVM_DEBUG(llvm::dbgs() << "\nUnable to find producer");
234 return failure();
235 }
236 return fuseProducerOfTensor(b, producerOpResult, consumerOpOperand);
237}
238
239FailureOr<FusionInfo>
240mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
241 OpOperand &consumerOpOperand) {
242 auto producerOp = dyn_cast<LinalgOp>(producerOpResult.getOwner());
243 if (!producerOp)
244 return failure();
245
246 LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner());
247 if (!consumerOp)
248 return failure();
249
250 Value inputTensor = consumerOpOperand.get();
251
252 // Must be an extract_slice op to guarantee there are loops we can fuse into.
253 auto sliceOp = inputTensor.getDefiningOp<tensor::ExtractSliceOp>();
254 if (!sliceOp) {
255 LLVM_DEBUG(llvm::dbgs()
256 << "\nNot fusable, not an extract_slice op: " << inputTensor);
257 return failure();
258 }
259
260 // If producer is already in the same block as consumer, we are done.
261 if (consumerOpOperand.get().getParentBlock() ==
262 producerOpResult.getParentBlock())
263 return failure();
264
265 // Insert fused `producer` just before `consumer`.
266 OpBuilder::InsertionGuard g(b);
267 b.setInsertionPoint(consumerOp);
268 LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " << *consumerOp << "\n");
269 OpOperand *opOperand =
270 producerOp.getDpsInitOperand(producerOpResult.getResultNumber());
271 LinalgOp fusedProducer =
272 fuse(b, producerOp, producerOp.getMatchingIndexingMap(opOperand),
273 consumerOpOperand);
274
275 // Replace use.
276 Value def = fusedProducer->getResult(producerOpResult.getResultNumber());
277 Type consumerType = consumerOpOperand.get().getType();
278 // Check if rank-reduction occurred as part of the extract_slice. If yes,
279 // collapse the dropped dimensions.
280 if (cast<ShapedType>(consumerType).getRank() !=
281 cast<ShapedType>(def.getType()).getRank()) {
282 llvm::SmallBitVector droppedDims = sliceOp.getDroppedDims();
283 def =
284 tensor::dropGivenUnitDims(b, fusedProducer.getLoc(), def, droppedDims);
285 }
286 // Canonicalizations are not guaranteed to have happened before constructing
287 // `fusedProducer`. In the tensor case this can result in temporary type
288 // mismatches. Insert a `tensor.cast` op to propagate the transformation
289 // invariant that types are compatible.
290 if (consumerType != def.getType())
291 def = b.create<tensor::CastOp>(fusedProducer.getLoc(), consumerType, def);
292 consumerOpOperand.set(def);
293 return FusionInfo{cast<LinalgOp>(producerOpResult.getOwner()), fusedProducer};
294}
295

source code of mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp