1//===- SparseReinterpretMap.cpp - reinterpret sparse tensor maps ----------===/
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "Utils/CodegenUtils.h"
10#include "Utils/IterationGraphSorter.h"
11
12#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
13#include "mlir/Dialect/Linalg/IR/Linalg.h"
14#include "mlir/Dialect/Linalg/Utils/Utils.h"
15#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
16#include "mlir/Dialect/SparseTensor/IR/SparseTensorType.h"
17#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
18#include "mlir/Dialect/Tensor/IR/Tensor.h"
19#include "mlir/IR/AffineExprVisitor.h"
20#include "mlir/IR/AffineMap.h"
21
22using namespace mlir;
23using namespace mlir::sparse_tensor;
24
25namespace {
26
27//===----------------------------------------------------------------------===//
28// File Local Helper classes.
29//===----------------------------------------------------------------------===//
30
31// CRTP to help implementing a rewriter that demaps all its inputs.
32template <typename SubClass, typename SourceOp>
33struct DemapInsRewriter : public OpRewritePattern<SourceOp> {
34 using OpRewritePattern<SourceOp>::OpRewritePattern;
35 using OpAdaptor = typename SourceOp::Adaptor;
36
37 LogicalResult matchAndRewrite(SourceOp op,
38 PatternRewriter &rewriter) const override {
39 Location loc = op.getLoc();
40
41 // Demaps non-trivial inputs.
42 bool changed = false;
43 SmallVector<Value> deMappedIns(op->getOperands());
44 for (Value &in : deMappedIns) {
45 if (auto stt = tryGetSparseTensorType(val: in); stt && !stt->isIdentity()) {
46 in = rewriter.create<ReinterpretMapOp>(location: loc, args: stt->getDemappedType(), args&: in);
47 changed = true;
48 }
49 }
50
51 // CRTP call.
52 OpAdaptor adaptor(deMappedIns, op);
53 LogicalResult status =
54 static_cast<const SubClass *>(this)->rewriteOp(op, adaptor, rewriter);
55 return changed ? success() : status;
56 }
57};
58
59// Flattens an affine expression into a list of AffineDimExprs.
60struct AffineDimCollector : public AffineExprVisitor<AffineDimCollector> {
61 explicit AffineDimCollector(unsigned dimNum) : dims(dimNum){};
62 void visitDimExpr(AffineDimExpr expr) { dims.set(expr.getPosition()); }
63 BitVector dims;
64};
65
66// Flattens an affine expression into a list of AffineDimExprs.
67struct AffineExprAdmissibleVisitor
68 : public AffineExprVisitor<AffineExprAdmissibleVisitor> {
69 explicit AffineExprAdmissibleVisitor(bool isOutput)
70 : admissible(true), isOutput(isOutput){};
71
72 // We only allow AffineDimExpr on output.
73 void visitAddExpr(AffineBinaryOpExpr expr) {
74 if (isOutput)
75 admissible = false;
76 }
77 void visitMulExpr(AffineBinaryOpExpr expr) {
78 if (isOutput)
79 admissible = false;
80 }
81
82 // We disallow mod, floor div and ceil div on inputs.
83 void visitModExpr(AffineBinaryOpExpr expr) { admissible = false; }
84 void visitFloorDivExpr(AffineBinaryOpExpr expr) { admissible = false; }
85 void visitCeilDivExpr(AffineBinaryOpExpr expr) { admissible = false; }
86 operator bool() { return admissible; }
87
88private:
89 bool admissible;
90 bool isOutput;
91};
92
93// The first BitVector stores levels where inadmissible exprs are used.
94// The second BitVector stores the AffineDimExp that are used by the
95// inadmissible expressions.
96using InadmissInfo = std::pair<BitVector, BitVector>;
97
98} // namespace
99
100//===----------------------------------------------------------------------===//
101// File Local Helper methods.
102//===----------------------------------------------------------------------===//
103
104// Collects the inadmissible affine expression imposed on levels.
105static InadmissInfo collectInadmissInfo(AffineMap map, bool isOutput) {
106 auto ret = std::make_pair(x: BitVector(map.getNumResults()),
107 y: BitVector(map.getNumDims()));
108 AffineDimCollector collector(map.getNumDims());
109 for (unsigned lvl = 0, e = map.getNumResults(); lvl < e; lvl++) {
110 AffineExprAdmissibleVisitor admissible(isOutput);
111 admissible.walkPostOrder(expr: map.getResult(idx: lvl));
112 if (!admissible) {
113 // Record the inadmissible level.
114 ret.first.set(lvl);
115 // Record the AffineDimExpr that is used in the inadmissible expr.
116 collector.walkPostOrder(expr: map.getResult(idx: lvl));
117 }
118 }
119 ret.second = collector.dims;
120 return ret;
121}
122
123// Builds the AffineMap to replace the idx in idxMap to lvl such that all tht
124// inadmissible affine expressions can be eliminated.
125// For example, we can rewrite
126// idxMap = (d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)
127// to
128// idxMap = (l0, l1, l2, l3) -> (l0, l1, l2, l3)
129// by composing inverse(idxMap), that is
130// inverse(idxMap) . idxMap = (l0, l1, l2, l3) -> (l0 * 2 + l2, l1 * 3 + l3)
131// -> ((l0 * 2 + l2) floordiv 2,
132// (l1 * 3 + l3) floordiv 3,
133// (l0 * 2 + l2) mod 2,
134// (l1 * 3 + l3) mod 3) = (l0, l1, l2, l3)
135//
136// This function builds the inverse(idxMap) that replace every dimensions used
137// in `info` to levels, and updates the iterator type array `itTps` for the new
138// index variable introduced.
139//
140// Note that the returned affine map does not retain the order of the input
141// affine map. Instead, it always uses the first `info.inAdlvls.count()` for the
142// replaced levels, and remaining ones for unused dimensions.
143// For example, to handle
144// idxMap = (d0, d1) -> (d0, d1 floordiv 4, d2 mod 4)
145// which is a typical map for block_2to4. The function returns:
146// inverse(idxMap) = (l0, l1, d0) -> (d0, l0 * 4 + l1)
147// in which, (l0, l1) together replaces `d1`, yet they appear
148// before `d0` in the resulting affine map.
149// The index (loop) order can later be canonicalized by a topo sort.
150static AffineMap
151genReplaceDimToLvlMap(const InadmissInfo &info, AffineMap idxMap,
152 SmallVector<utils::IteratorType> &itTps) {
153 MLIRContext *ctx = idxMap.getContext();
154 auto [inAdLvls, usedDims] = info;
155 // Note that idxMap does not equal to dim2Lvl map, it is computed by
156 // composing idx2Dim(dim2Lvl). They are only equal when idx2Dim is an
157 // ID map.
158 // TODO: we might fail here, in those case we should really return
159 // failure instead of assertion error.
160 auto lvl2Idx = inferLvlToDim(dimToLvl: idxMap, context: ctx);
161
162 assert(lvl2Idx.getNumResults() <= idxMap.getNumDims());
163 if (lvl2Idx.getNumResults() != idxMap.getNumDims()) {
164 // This could happen when some dimensions are projected.
165 // E.g., idx2Lvl = (*i*, j, k) -> (j, k)
166 // ==> lvl2Idx = (j, k) -> (j, k)
167 // In this case, we append the unused dimesion at the end.
168 // ==> lvl2Idx = (j, k, *i*) -> (*i*, j, k)
169 SmallVector<AffineExpr> results;
170 AffineDimCollector usedInLvl(idxMap.getNumDims());
171 for (auto e : idxMap.getResults())
172 usedInLvl.walkPostOrder(expr: e);
173
174 unsigned curUsedDimID = 0;
175 unsigned curUnusedDimID = lvl2Idx.getNumDims();
176
177 BitVector unused = usedInLvl.dims.flip();
178 for (unsigned i = 0; i < idxMap.getNumDims(); i++) {
179 if (unused.test(Idx: i))
180 results.push_back(Elt: getAffineDimExpr(position: curUnusedDimID++, context: ctx));
181 else
182 results.push_back(Elt: lvl2Idx.getResult(idx: curUsedDimID++));
183 }
184 lvl2Idx =
185 AffineMap::get(dimCount: lvl2Idx.getNumDims() + unused.count(), symbolCount: 0, results, context: ctx);
186 }
187 assert(lvl2Idx.getNumResults() == idxMap.getNumDims());
188
189 // We do not need to replace the DimExpr that is not used in inadmissible
190 // level expressions. We use the first inAdLvl.count() dim to represent the
191 // replaced level, the remainings are reserved for unchanged ones.
192 // Note that results from the inverse map computed previously does not follow
193 // the convention we used, and we need to fix the mismatch below.
194 unsigned curRepID = 0;
195 unsigned curOriID = inAdLvls.count();
196 SmallVector<AffineExpr> results;
197 SmallVector<AffineExpr> dimRep(idxMap.getNumResults(), AffineExpr());
198 SmallVector<utils::IteratorType> transItTps;
199
200 for (unsigned l : inAdLvls.set_bits()) {
201 // By our convention, the inadmissible level `l` always appears in the
202 // leading part (accumulated by curRepID) of the affine map's parameter
203 // list. Record the mapping so that we can replace all the uses of `l` to
204 // the correct position after the translation.
205 dimRep[l] = getAffineDimExpr(position: curRepID++, context: ctx);
206 // A new index variable is introduced for the inadmissible level, inherit
207 // the iterator type. E.g., if l0 = d0 floordiv 2, the
208 // iterator type of l0 equals to the iterator type of d0.
209 AffineExpr lvlExp = idxMap.getResult(idx: l);
210 AffineDimCollector collector(idxMap.getNumDims());
211 collector.walkPostOrder(expr: lvlExp);
212 // We assumes a level can only be derived from one dimension.
213 assert(collector.dims.count() == 1);
214 transItTps.push_back(Elt: itTps[collector.dims.find_first()]);
215 }
216
217 for (unsigned d = 0, e = idxMap.getNumDims(); d < e; d++) {
218 if (usedDims.test(Idx: d)) {
219 // The dimension is used in some of the inadmissible levels, and it need
220 // to be inversed. Get the inversion from the inverse map, and fix the
221 // mismatch captured by the above loop.
222 results.push_back(Elt: lvl2Idx.getResult(idx: d).replaceDims(dimReplacements: dimRep));
223 } else {
224 // The dimension is not used in any of the inadmissible levels, and it
225 // does not need to be inversed. Fix the mismatch by mapping it to the
226 // trailing part of the affine map (accumulated by curOriID).
227 results.push_back(Elt: getAffineDimExpr(position: curOriID++, context: ctx));
228 transItTps.push_back(Elt: itTps[d]);
229 }
230 }
231 unsigned numDim = idxMap.getNumDims() - usedDims.count() + inAdLvls.count();
232 // Update iterator type.
233 itTps.assign(in_start: transItTps.begin(), in_end: transItTps.end());
234 return AffineMap::get(dimCount: numDim, symbolCount: 0, results, context: ctx);
235}
236
237// Translates the index map in the linalg::GenericOp from idx->dim map to
238// idx->lvl map. Returns failure if the index map can not be translated to an
239// admissible form.
240// Returns the translated index map array and the iterator type array.
241static std::optional<std::pair<ArrayAttr, ArrayAttr>>
242translateMap(linalg::GenericOp op, PatternRewriter &rewriter) {
243 // idxMap is a idx2dim map before reinterpretation.
244 MLIRContext *ctx = op.getContext();
245 SmallVector<AffineMap> idxMapArray = op.getIndexingMapsArray();
246 SmallVector<utils::IteratorType> itTps = op.getIteratorTypesArray();
247 for (unsigned i = 0, e = idxMapArray.size(); i < e; i++) {
248 Value tensor = op->getOpOperand(idx: i).get();
249 auto stt = tryGetSparseTensorType(val: tensor);
250 if (stt && !stt->isIdentity()) {
251 AffineMap dim2Lvl = stt->getDimToLvl();
252 // By composing the idx2dim(dim2lvl), we got a idx2lvl Map
253 idxMapArray[i] = dim2Lvl.compose(map: idxMapArray[i]);
254 }
255 }
256
257 // A naive way to handle common constant expressions that arise during dim2lvl
258 // translation.
259 auto populateCstMapping = [ctx](DenseMap<AffineExpr, AffineExpr> &cstMapping,
260 unsigned pos, int64_t lvlSz) {
261 if (ShapedType::isStatic(dValue: lvlSz)) {
262 auto c0 = getAffineConstantExpr(constant: 0, context: ctx);
263 auto lvlExp = getAffineDimExpr(position: pos, context: ctx);
264 auto szExp = getAffineConstantExpr(constant: lvlSz, context: ctx);
265
266 // lvl floordiv lvlSz = 0
267 auto divExp =
268 getAffineBinaryOpExpr(kind: AffineExprKind::FloorDiv, lhs: lvlExp, rhs: szExp);
269 cstMapping.try_emplace(Key: divExp, Args&: c0);
270
271 // lvl mod lvlSz = lvl
272 auto modExp = getAffineBinaryOpExpr(kind: AffineExprKind::Mod, lhs: lvlExp, rhs: szExp);
273 cstMapping.try_emplace(Key: modExp, Args&: lvlExp);
274 }
275 };
276
277 unsigned boundedNum = 0;
278 // A fixed-point algorithm.
279 bool changed = true;
280 while (changed) {
281 changed = false;
282 for (OpOperand &operand : op->getOpOperands()) {
283 auto stt = tryGetSparseTensorType(val: operand.get());
284 // Skip on dense operands.
285 if (!stt || !stt->getEncoding())
286 continue;
287
288 unsigned tid = operand.getOperandNumber();
289 bool isOutput = &operand == op.getDpsInitOperand(i: 0);
290 AffineMap idxMap = idxMapArray[tid];
291 InadmissInfo inAdInfo = collectInadmissInfo(map: idxMap, isOutput);
292 auto [inAdLvls, dimExprs] = inAdInfo;
293 for (unsigned d : dimExprs.set_bits()) {
294 // The first `boundedNum` used in the AffineMap is introduced to
295 // resolve previous inadmissible expressions. We can not replace them
296 // as it might bring back the inadmissible expressions.
297 if (d < boundedNum)
298 return std::nullopt;
299 }
300
301 if (inAdLvls.count() != 0) {
302 // Naive constant progagation, should be sufficient to handle block
303 // sparsity in our cases.
304 SmallVector<int64_t> lvlShape = stt->getLvlShape();
305 DenseMap<AffineExpr, AffineExpr> cstMapping;
306 unsigned position = 0;
307 for (unsigned lvl : inAdLvls.set_bits()) {
308 int64_t lvlSz = lvlShape[lvl];
309 populateCstMapping(cstMapping, position, lvlSz);
310 position++;
311 }
312
313 AffineMap lvl2Idx = genReplaceDimToLvlMap(info: inAdInfo, idxMap, itTps);
314 // Compose the lvl2Idx Map to all AffineIdxMap to eliminate
315 // inadmissible expressions.
316 for (unsigned tid = 0, e = idxMapArray.size(); tid < e; tid++) {
317 AffineMap transMap = idxMapArray[tid].compose(map: lvl2Idx);
318 idxMapArray[tid] = transMap.replace(
319 map: cstMapping, /*numResultDims=*/transMap.getNumDims(),
320 /*numResultSyms=*/0);
321 }
322 changed = true;
323 boundedNum += inAdLvls.count();
324 }
325 }
326 };
327
328 SmallVector<Attribute> iterAttr =
329 llvm::map_to_vector(C&: itTps, F: [ctx](auto itTp) -> Attribute {
330 return linalg::IteratorTypeAttr::get(context: ctx, value: itTp);
331 });
332
333 return std::make_pair(x: rewriter.getAffineMapArrayAttr(values: idxMapArray),
334 y: rewriter.getArrayAttr(value: iterAttr));
335}
336
337// Generates a "de"mapping reinterpretation of the map.
338static Value genDemap(OpBuilder &builder, SparseTensorEncodingAttr enc,
339 Value val) {
340 return builder.create<ReinterpretMapOp>(location: val.getLoc(), args: enc.withoutDimToLvl(),
341 args&: val);
342}
343
344// Generates a "re"mapping reinterpretation of the map.
345static Value genRemap(OpBuilder &builder, SparseTensorEncodingAttr enc,
346 Value val) {
347 return builder.create<ReinterpretMapOp>(location: val.getLoc(), args&: enc, args&: val);
348}
349
350static SmallVector<Value> remapValueRange(OpBuilder &rewriter, TypeRange types,
351 ValueRange outs) {
352 SmallVector<Value> ret(outs);
353 assert(outs.size() == types.size());
354 for (auto [r, t] : llvm::zip(t&: ret, u&: types))
355 if (r.getType() != t)
356 r = rewriter.create<ReinterpretMapOp>(location: r.getLoc(), args&: t, args&: r);
357 return ret;
358}
359
360namespace {
361
362//===----------------------------------------------------------------------===//
363// Rewriting rules for linalg generic ops.
364//===----------------------------------------------------------------------===//
365
366/// Sparse rewriting rule for the generic `linalg` operation.
367struct GenericOpReinterpretMap
368 : public DemapInsRewriter<GenericOpReinterpretMap, linalg::GenericOp> {
369public:
370 using DemapInsRewriter::DemapInsRewriter;
371 LogicalResult rewriteOp(linalg::GenericOp linalgOp, OpAdaptor adaptor,
372 PatternRewriter &rewriter) const {
373 // Only rewrite single output operations with pure (sparse) tensor
374 // semantics.
375 if (linalgOp.getNumDpsInits() != 1 || !linalgOp.hasPureTensorSemantics() ||
376 !hasAnySparseOperandOrResult(op: linalgOp) ||
377 !hasAnyNonIdentityOperandsOrResults(op: linalgOp))
378 return failure();
379
380 // Try translating the index map.
381 auto transMap = translateMap(op: linalgOp, rewriter);
382 if (!transMap)
383 return rewriter.notifyMatchFailure(
384 arg&: linalgOp, msg: "the sparse kernel can not be sparsified.");
385
386 // On success, replace update the linalg operands and maps in place.
387 Value res = linalgOp.getResult(i: 0);
388 auto stt = tryGetSparseTensorType(val: res);
389 auto [idxMap, itTp] = *transMap;
390
391 rewriter.startOpModification(op: linalgOp);
392 linalgOp.setIndexingMapsAttr(idxMap);
393 linalgOp.setIteratorTypesAttr(itTp);
394 // Use demapped arguments.
395 linalgOp.getInputsMutable().assign(values: adaptor.getInputs());
396 linalgOp.getDpsInitsMutable().assign(values: adaptor.getOutputs());
397 res.setType(adaptor.getOutputs()[0].getType());
398 rewriter.finalizeOpModification(op: linalgOp);
399
400 rewriter.setInsertionPointAfter(linalgOp);
401 if (stt && stt->hasEncoding()) {
402 Value t = genRemap(builder&: rewriter, enc: stt->getEncoding(), val: res);
403 rewriter.replaceAllUsesExcept(from: res, to: t, exceptedUser: t.getDefiningOp());
404 }
405 return success();
406 }
407};
408
409struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
410 using OpRewritePattern::OpRewritePattern;
411 LogicalResult matchAndRewrite(linalg::GenericOp linalgOp,
412 PatternRewriter &rewriter) const override {
413 if (linalgOp.getNumDpsInits() != 1 || !linalgOp.hasPureTensorSemantics() ||
414 hasAnyNonIdentityOperandsOrResults(op: linalgOp) || // need demap first
415 !hasAnySparseOperandOrResult(op: linalgOp)) {
416 return failure();
417 }
418
419 const StringRef sorted = "sorted";
420 if (linalgOp->hasAttr(name: sorted))
421 return failure();
422
423 auto scheduler = IterationGraphSorter::fromGenericOp(genericOp: linalgOp);
424 bool isAdmissible = false;
425 AffineMap order;
426 // A const list of all masks that we used for iteration graph
427 // computation. Must be ordered from more strict to less strict.
428 // Ideally (though might not be guaranteed), the earlier a constraint mask
429 // can be satisfied, the faster the generated kernel will be.
430 const auto allMasks = {SortMask::kIncludeAll, SortMask::kIncludeDense,
431 SortMask::kIncludeDenseInput,
432 SortMask::kIncludeDenseOutput,
433 SortMask::kSparseOnly};
434 for (const SortMask mask : allMasks) {
435 order = scheduler.sort(mask);
436 if (order) {
437 if (isAdmissibleOrder(linalgOp, order)) {
438 isAdmissible = true;
439 break;
440 }
441 // else try a set of less strict constraints.
442 }
443 }
444
445 if (!order) {
446 // Cycles detected.
447 if (failed(Result: resolveCycle(scheduler, linalgOp, rewriter))) {
448 return rewriter.notifyMatchFailure(
449 arg&: linalgOp, msg: "the sparse kernel can not be scheduled: loop detected.");
450 }
451 return success();
452 }
453
454 if (!isAdmissible) {
455 return rewriter.notifyMatchFailure(
456 arg&: linalgOp, msg: "the sparse kernel can not be scheduled.");
457 }
458
459 // Marks the GenericOp to avoid recursive matching.
460 rewriter.modifyOpInPlace(root: linalgOp, callable: [&]() {
461 linalgOp->setAttr(name: sorted, value: rewriter.getBoolAttr(value: true));
462 });
463
464 // Already sorted.
465 if (order.isIdentity())
466 return success();
467
468 assert(order.isPermutation());
469 // `order` is orignial loop -> sorted loop map
470 ArrayAttr preItTypes = linalgOp.getIteratorTypesAttr();
471 SmallVector<Attribute> curItTypes;
472 curItTypes.reserve(N: preItTypes.size());
473 for (AffineExpr expr : order.getResults()) {
474 unsigned loopID = llvm::cast<AffineDimExpr>(Val&: expr).getPosition();
475 curItTypes.push_back(Elt: preItTypes[loopID]);
476 }
477
478 // Inverse `order` to get sorted loop -> original loop map
479 order = inversePermutation(map: order);
480 SmallVector<AffineMap> idxMaps = linalgOp.getIndexingMapsArray();
481 for (AffineMap &idxMap : idxMaps)
482 idxMap = idxMap.compose(map: order); // sorted loop -> lvl map
483
484 rewriter.startOpModification(op: linalgOp);
485 linalgOp.setIndexingMapsAttr(rewriter.getAffineMapArrayAttr(values: idxMaps));
486 linalgOp.setIteratorTypesAttr(rewriter.getArrayAttr(value: curItTypes));
487 rewriter.finalizeOpModification(op: linalgOp);
488
489 return success();
490 }
491
492private:
493 /// Whether the loop order is admissible by sparsification.
494 static bool isAdmissibleOrder(linalg::GenericOp linalgOp, AffineMap order) {
495 if (!hasAnySparseResult(op: linalgOp))
496 return true;
497
498 OpOperand *lhs = linalgOp.getDpsInitOperand(i: 0);
499 unsigned nest = 0;
500 const auto iteratorTypes = linalgOp.getIteratorTypesArray();
501 for (const AffineExpr l : order.getResults()) {
502 unsigned loopId = llvm::cast<AffineDimExpr>(Val: l).getPosition();
503 auto itTp =
504 cast<linalg::IteratorTypeAttr>(Val: linalgOp.getIteratorTypes()[loopId]);
505 if (linalg::isReductionIterator(iteratorType: itTp.getValue()))
506 break; // terminate at first reduction
507 nest++;
508 }
509 // Determine admissible dynamic insertion situations:
510 // (1) fully injective, since there are no reductions,
511 // (2) admissible 1-d expansion in innermost dimension.
512 return static_cast<int64_t>(nest) >= linalgOp.getRank(opOperand: lhs) - 1;
513 };
514
515 // Last resort cycle resolution.
516 static LogicalResult resolveCycle(IterationGraphSorter &scheduler,
517 linalg::LinalgOp linalgOp,
518 PatternRewriter &rewriter) {
519 // Compute topological sort while leaving out every sparse input tensor in
520 // succession until an acylic iteration graph results.
521 for (OpOperand *t : linalgOp.getDpsInputOperands()) {
522 Value tval = t->get();
523 auto srcEnc = getSparseTensorEncoding(type: tval.getType());
524 // The constraints introduced by compound index expression are
525 // complicated. Skip them.
526 AffineMap idxMap = linalgOp.getMatchingIndexingMap(opOperand: t);
527 bool hasCompExpr = llvm::any_of(Range: idxMap.getResults(), P: [](AffineExpr exp) {
528 return !llvm::isa<AffineDimExpr>(Val: exp);
529 });
530 if (!srcEnc || hasCompExpr)
531 continue;
532
533 // Try scheduling loop without constraints from `tval`.
534 AffineMap order = scheduler.sort(mask: SortMask::kSparseOnly, ignored: tval);
535 if (!order) // still cyclic
536 continue;
537
538 // Found an input tensor that resolves the cycle by inserting a
539 // conversion into a sparse tensor that adheres to the iteration
540 // graph order.
541 auto stt = getSparseTensorType(val: tval);
542 assert(stt.isIdentity());
543 order = inversePermutation(map: order);
544 // sorted loop -> lvl map.
545 idxMap = idxMap.compose(map: order);
546
547 // Found a permutation such that the results in `idxMap` is sorted.
548 // For example,
549 // (d0, d1, d2, d3) -> (d2, d1, d0)
550 // loops are scheduled in order of d0->d1->d2->d3, to resolve the cycle,
551 // we find a permutation, perm(d2, d1, d0) -> (d0, d1, d2), such that the
552 // transposed tensor's levels are visited in the same order as the loop
553 // scheduling order.
554 SmallVector<std::pair<unsigned, unsigned>> lvlSeq;
555 for (AffineExpr expr : idxMap.getResults()) {
556 unsigned lvl = llvm::cast<AffineDimExpr>(Val&: expr).getPosition();
557 lvlSeq.push_back(Elt: std::make_pair(x&: lvl, y: lvlSeq.size()));
558 }
559 llvm::sort(C&: lvlSeq, Comp: llvm::less_first());
560 SmallVector<unsigned> perm =
561 llvm::to_vector(Range: llvm::make_second_range(c&: lvlSeq));
562 auto dimToLvl = AffineMap::getPermutationMap(permutation: perm, context: linalgOp.getContext());
563 // The result of the idxMap must be unsorted.
564 assert(!dimToLvl.isIdentity());
565
566 // Inserting the transpose
567 rewriter.setInsertionPoint(linalgOp);
568 RankedTensorType dstTp = stt.withDimToLvl(dimToLvl).getRankedTensorType();
569 Value dst = rewriter.create<ConvertOp>(location: tval.getLoc(), args&: dstTp, args&: tval);
570 rewriter.modifyOpInPlace(root: linalgOp, callable: [&]() {
571 linalgOp->setOperand(idx: t->getOperandNumber(), value: dst);
572 });
573
574 // Release the transposed form afterwards.
575 // TODO: CSE when used in more than one following op?
576 rewriter.setInsertionPointAfter(linalgOp);
577 rewriter.create<bufferization::DeallocTensorOp>(location: dst.getLoc(), args&: dst);
578
579 return success();
580 }
581 // Cannot be resolved with a single conversion.
582 // TODO: convert more than one?
583 return failure();
584 }
585};
586
587//===----------------------------------------------------------------------===//
588// Reinterpret Map Rewriters for operations other than linalg.generics
589//===----------------------------------------------------------------------===//
590
591template <typename AllocOp>
592struct TensorAllocDemapper : public OpRewritePattern<AllocOp> {
593 using OpRewritePattern<AllocOp>::OpRewritePattern;
594 LogicalResult matchAndRewrite(AllocOp op,
595 PatternRewriter &rewriter) const override {
596 if (!hasAnyNonIdentityOperandsOrResults(op))
597 return failure();
598
599 Location loc = op.getLoc();
600 auto stt = getSparseTensorType(op.getResult());
601
602 SmallVector<Value> maxDimCrds;
603 maxDimCrds.reserve(N: stt.getDimRank());
604 ValueRange dynSz = op.getDynamicSizes();
605 for (int64_t dimSz : stt.getDimShape()) {
606 if (ShapedType::isDynamic(dValue: dimSz)) {
607 Value maxCrd = rewriter.create<arith::SubIOp>(
608 location: loc, args: dynSz.front(), args: constantIndex(builder&: rewriter, loc, i: 1));
609 maxDimCrds.push_back(Elt: maxCrd);
610 dynSz = dynSz.drop_front();
611 } else {
612 maxDimCrds.push_back(Elt: constantIndex(builder&: rewriter, loc, i: dimSz - 1));
613 }
614 }
615
616 ValueRange maxLvlCrds = stt.translateCrds(rewriter, loc, maxDimCrds,
617 CrdTransDirectionKind::dim2lvl);
618 auto lvlShape = stt.getLvlShape();
619 SmallVector<Value> dynLvlSzs;
620 for (unsigned i = 0, e = lvlShape.size(); i < e; i++) {
621 if (ShapedType::isDynamic(dValue: lvlShape[i])) {
622 Value sz = rewriter.create<arith::AddIOp>(
623 location: loc, args: maxLvlCrds[i], args: constantIndex(builder&: rewriter, loc, i: 1));
624 dynLvlSzs.push_back(Elt: sz);
625 }
626 }
627
628 assert(dynSz.empty()); // should have consumed all.
629 rewriter.startOpModification(op);
630 op->setOperands(dynLvlSzs);
631 op.getResult().setType(stt.getDemappedType());
632 rewriter.finalizeOpModification(op);
633 rewriter.setInsertionPointAfter(op);
634
635 Value t = genRemap(rewriter, stt.getEncoding(), op.getResult());
636 rewriter.replaceAllUsesExcept(op.getResult(), t, t.getDefiningOp());
637 return success();
638 }
639};
640
641struct TensorInsertDemapper
642 : public DemapInsRewriter<TensorInsertDemapper, tensor::InsertOp> {
643 using DemapInsRewriter::DemapInsRewriter;
644 LogicalResult rewriteOp(tensor::InsertOp op, OpAdaptor adaptor,
645 PatternRewriter &rewriter) const {
646 if (!hasAnySparseResult(op) || !hasAnyNonIdentityOperandsOrResults(op))
647 return failure();
648
649 Location loc = op.getLoc();
650 auto stt = getSparseTensorType(val: op.getResult());
651 ValueRange lvlCrd = stt.translateCrds(builder&: rewriter, loc, crds: op.getIndices(),
652 dir: CrdTransDirectionKind::dim2lvl);
653 auto insertOp = rewriter.create<tensor::InsertOp>(
654 location: loc, args: op.getScalar(), args: adaptor.getDest(), args&: lvlCrd);
655
656 Value out = genRemap(builder&: rewriter, enc: stt.getEncoding(), val: insertOp.getResult());
657 rewriter.replaceOp(op, newValues: out);
658 return success();
659 }
660};
661
662struct SparseAssembleDemapper : public OpRewritePattern<AssembleOp> {
663 using OpRewritePattern::OpRewritePattern;
664 LogicalResult matchAndRewrite(AssembleOp op,
665 PatternRewriter &rewriter) const override {
666 if (!hasAnyNonIdentityOperandsOrResults(op))
667 return failure();
668
669 assert(hasAnySparseResult(op));
670 auto stt = getSparseTensorType(val: op.getResult());
671 rewriter.modifyOpInPlace(
672 root: op, callable: [&op, &stt]() { op.getResult().setType(stt.getDemappedType()); });
673 rewriter.setInsertionPointAfter(op);
674 Value out = genRemap(builder&: rewriter, enc: stt.getEncoding(), val: op.getResult());
675 rewriter.replaceAllUsesExcept(from: op, to: out, exceptedUser: out.getDefiningOp());
676 return success();
677 }
678};
679
680struct SparseDisassembleDemapper
681 : public DemapInsRewriter<SparseDisassembleDemapper, DisassembleOp> {
682 using DemapInsRewriter::DemapInsRewriter;
683 LogicalResult rewriteOp(DisassembleOp op, OpAdaptor adaptor,
684 PatternRewriter &rewriter) const {
685 if (!hasAnyNonIdentityOperandsOrResults(op))
686 return failure();
687
688 assert(hasAnySparseOperandOrResult(op));
689 rewriter.modifyOpInPlace(root: op, callable: [&op, &adaptor]() {
690 op.getTensorMutable().assign(value: adaptor.getTensor());
691 });
692 return success();
693 }
694};
695
696struct ForeachOpDemapper
697 : public DemapInsRewriter<ForeachOpDemapper, ForeachOp> {
698 using DemapInsRewriter::DemapInsRewriter;
699 LogicalResult rewriteOp(ForeachOp op, OpAdaptor adaptor,
700 PatternRewriter &rewriter) const {
701 // Only handle operations with sparse input/output with non-identity dim2lvl
702 // maps.
703 if (!hasAnyNonIdentityOperandsOrResults(op))
704 return failure();
705
706 // TODO: demap constant as well.
707 if (auto constOp = op.getTensor().getDefiningOp<arith::ConstantOp>())
708 if (auto attr = dyn_cast<SparseElementsAttr>(Val: constOp.getValue()))
709 return failure();
710
711 Location loc = op.getLoc();
712 // Cache the type information since we update the foreach op in-place.
713 auto srcStt = getSparseTensorType(val: op.getTensor());
714 SmallVector<Type> prevRetTps(op.getResultTypes());
715
716 rewriter.startOpModification(op);
717 op.getTensorMutable().assign(value: adaptor.getTensor());
718 op.getInitArgsMutable().assign(values: adaptor.getInitArgs());
719 // Update results' types.
720 for (auto r : op.getResults())
721 if (auto stt = tryGetSparseTensorType(val: r); stt && !stt->isIdentity())
722 r.setType(stt->getDemappedType());
723
724 Level lvlRank = getSparseTensorType(val: adaptor.getTensor()).getLvlRank();
725 // Update the foreach body.
726 SmallVector<Type> blockArgTps(lvlRank, rewriter.getIndexType());
727 blockArgTps.push_back(Elt: srcStt.getElementType());
728 blockArgTps.append(in_start: adaptor.getInitArgs().getTypes().begin(),
729 in_end: adaptor.getInitArgs().getTypes().end());
730 Block *body = op.getBody();
731 // Block Args: [dimCrd, val, initArgs]
732 unsigned preArgNum = body->getNumArguments();
733 for (Type t : blockArgTps)
734 body->addArgument(type: t, loc);
735
736 // Block Args: [dimCrd, val, initArgs, lvlCrds, val, DemappedArgs]
737 rewriter.setInsertionPointToStart(body);
738 ValueRange lvlCrds = body->getArguments().slice(N: preArgNum, M: lvlRank);
739
740 ValueRange dimCrds = srcStt.translateCrds(builder&: rewriter, loc, crds: lvlCrds,
741 dir: CrdTransDirectionKind::lvl2dim);
742 rewriter.replaceAllUsesWith(
743 from: body->getArguments().take_front(N: srcStt.getDimRank()), to: dimCrds);
744 body->eraseArguments(start: 0, num: srcStt.getDimRank());
745 // Block Args: [val, initArgs, lvlCrds, val, DemappedArgs]
746 unsigned numInitArgs = op.getInitArgs().size();
747 rewriter.replaceAllUsesWith(from: body->getArgument(i: 0),
748 to: body->getArgument(i: lvlRank + numInitArgs + 1));
749 body->eraseArgument(index: 0);
750 // Block Args: [initArgs, lvlCrds, val, DemappedArgs]
751 ValueRange srcArgs = body->getArguments().take_front(N: numInitArgs);
752 ValueRange dstArgs = body->getArguments().take_back(N: numInitArgs);
753 // Remap back before replacement.
754 SmallVector<Value> reMappedArgs =
755 remapValueRange(rewriter, types: srcArgs.getTypes(), outs: dstArgs);
756 rewriter.replaceAllUsesWith(from: srcArgs, to: reMappedArgs);
757 body->eraseArguments(start: 0, num: numInitArgs);
758 // Block Args: [lvlCrds, DemappedArgs] and we are done.
759
760 // Update yield operations.
761 if (numInitArgs != 0) {
762 rewriter.setInsertionPointToEnd(body);
763 auto yield = llvm::cast<YieldOp>(Val: body->getTerminator());
764 if (auto stt = tryGetSparseTensorType(val: yield.getSingleResult());
765 stt && !stt->isIdentity()) {
766 Value y =
767 genDemap(builder&: rewriter, enc: stt->getEncoding(), val: yield.getSingleResult());
768 rewriter.create<YieldOp>(location: loc, args&: y);
769 rewriter.eraseOp(op: yield);
770 }
771 }
772 rewriter.finalizeOpModification(op);
773
774 rewriter.setInsertionPointAfter(op);
775 SmallVector<Value> outs =
776 remapValueRange(rewriter, types: prevRetTps, outs: op.getResults());
777
778 // Replace all the uses of the foreach results, expect the use in
779 // reinterpret_map used to remap the output.
780 for (auto [from, to] : llvm::zip(t: op.getResults(), u&: outs))
781 rewriter.replaceAllUsesExcept(from, to, exceptedUser: to.getDefiningOp());
782
783 return success();
784 }
785};
786
787} // namespace
788
789void mlir::populateSparseReinterpretMap(RewritePatternSet &patterns,
790 ReinterpretMapScope scope) {
791 if (scope == ReinterpretMapScope::kAll ||
792 scope == ReinterpretMapScope::kGenericOnly) {
793 patterns.add<GenericOpReinterpretMap, GenericOpScheduler>(
794 arg: patterns.getContext());
795 }
796 if (scope == ReinterpretMapScope::kAll ||
797 scope == ReinterpretMapScope::kExceptGeneric) {
798 patterns.add<TensorAllocDemapper<bufferization::AllocTensorOp>,
799 TensorAllocDemapper<tensor::EmptyOp>, SparseAssembleDemapper,
800 SparseDisassembleDemapper, TensorInsertDemapper,
801 ForeachOpDemapper>(arg: patterns.getContext());
802 }
803}
804

source code of mlir/lib/Dialect/SparseTensor/Transforms/SparseReinterpretMap.cpp