1//===- Utils.cpp - Utilities to support the Tensor dialect ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements utilities for the Tensor dialect.
10//
11//===----------------------------------------------------------------------===//
12
13#include "mlir/Dialect/Tensor/Utils/Utils.h"
14
15#include "mlir/Dialect/Affine/IR/AffineOps.h"
16#include "mlir/Dialect/Arith/IR/Arith.h"
17#include "mlir/Dialect/Arith/Utils/Utils.h"
18#include "mlir/Dialect/Utils/IndexingUtils.h"
19#include "mlir/Dialect/Vector/IR/VectorOps.h"
20#include "mlir/Interfaces/ValueBoundsOpInterface.h"
21
22using namespace mlir;
23using namespace mlir::tensor;
24
25PadOp mlir::tensor::createPadHighOp(RankedTensorType resType, Value source,
26 Value pad, bool nofold, Location loc,
27 OpBuilder &b,
28 SmallVector<Value> dynOutDims) {
29
30 // This assumption simplifies the following logic without limiting what's
31 // required _today_. If needed, we can relax it in the future.
32 assert(((resType.getNumDynamicDims() == dynOutDims.size()) ||
33 dynOutDims.empty()) &&
34 "Either none or all output dynamic dims must be specified!");
35
36 // Init "low" and "high" padding values ("low" is kept as is, "high" is
37 // computed below).
38 SmallVector<OpFoldResult> low(resType.getRank(), b.getIndexAttr(0));
39 SmallVector<OpFoldResult> high(resType.getRank(), b.getIndexAttr(0));
40
41 size_t outDimIdx = 0;
42
43 for (const auto [idx, val] : enumerate(resType.getShape())) {
44 bool isDimDynamic = ShapedType::isDynamic(val);
45 bool updatePadHigh = !isDimDynamic || !dynOutDims.empty();
46
47 // Keep the default padding width (i.e. "0") when the output dim is dynamic
48 // and no actual output sizes have been provided.
49 if (!updatePadHigh)
50 continue;
51
52 // Compute the padding width: resDim - sourceDim.
53 AffineExpr d0, d1;
54 bindDims(b.getContext(), d0, d1);
55 OpFoldResult sourceDim = tensor::getMixedSize(b, loc, source, idx);
56 OpFoldResult outDim = isDimDynamic ? OpFoldResult(dynOutDims[outDimIdx++])
57 : OpFoldResult(b.getIndexAttr(val));
58
59 high[idx] = affine::makeComposedFoldedAffineApply(b, loc, d0 - d1,
60 {outDim, sourceDim});
61 }
62 return b.create<PadOp>(loc, resType, source, low, high, pad, nofold);
63}
64
65SmallVector<Value> mlir::tensor::createDynamicDimValues(OpBuilder &b,
66 Location loc,
67 Value rankedTensor) {
68 auto tensorTy = cast<RankedTensorType>(rankedTensor.getType());
69 SmallVector<Value> dynamicDims;
70 for (const auto &en : llvm::enumerate(tensorTy.getShape())) {
71 if (en.value() == ShapedType::kDynamic)
72 dynamicDims.push_back(
73 b.create<tensor::DimOp>(loc, rankedTensor, en.index()));
74 }
75 return dynamicDims;
76}
77
78FailureOr<RankedTensorType>
79mlir::tensor::computeTransposedType(RankedTensorType rankedTensorType,
80 ArrayRef<int64_t> transposeVector) {
81 if (transposeVector.empty())
82 return rankedTensorType;
83
84 if (!isPermutationVector(interchange: transposeVector) ||
85 transposeVector.size() != static_cast<size_t>(rankedTensorType.getRank()))
86 return failure();
87
88 SmallVector<int64_t> transposedShape(rankedTensorType.getShape());
89 applyPermutationToVector(inVec&: transposedShape, permutation: transposeVector);
90
91 using RTTBuilder = RankedTensorType::Builder;
92 RankedTensorType transposedTensorType =
93 RTTBuilder(rankedTensorType).setShape(transposedShape);
94 return transposedTensorType;
95}
96
97CollapseShapeOp
98mlir::tensor::dropGivenUnitDims(OpBuilder &b, Location loc, Value src,
99 const llvm::SmallBitVector &dropDims) {
100 auto srcType = cast<ShapedType>(src.getType());
101 int64_t rank = srcType.getRank();
102 assert(rank == static_cast<int64_t>(dropDims.size()) &&
103 "dropDims dimension does not match src tensor rank");
104 assert(llvm::all_of(
105 dropDims.set_bits(),
106 [&](unsigned dim) { return srcType.getShape()[dim] == 1; }) &&
107 "Dropping non unit dimension");
108 // Computed reassociation map for the corresponding tensor.collapse_shape.
109 SmallVector<ReassociationIndices, 2> reassocMaps;
110 // Current reassociation group to add dropped dimension to.
111
112 int64_t nextDimToGroup = 0;
113 llvm::SmallBitVector keptDims(dropDims);
114 keptDims.flip();
115 int64_t lastSetBit = keptDims.find_last();
116 for (int64_t setBit : keptDims.set_bits()) {
117 // Group consecutive dropped dimension with the next non-dropped dimension.
118 // If this is the last set dimension, also group all subsequent dropped
119 // dimension, if any.
120 int64_t upTo = setBit == lastSetBit ? rank - 1 : setBit;
121 auto seq = llvm::seq_inclusive(Begin: nextDimToGroup, End: upTo);
122 reassocMaps.emplace_back(llvm::make_range(seq.begin(), seq.end()));
123 nextDimToGroup = setBit + 1;
124 }
125 return b.create<tensor::CollapseShapeOp>(loc, src, reassocMaps);
126}
127
128bool mlir::tensor::isCastLikeInsertSliceOp(InsertSliceOp op) {
129 llvm::SmallBitVector droppedDims = op.getDroppedDims();
130 int64_t srcDim = 0;
131 RankedTensorType resultType = op.getDestType();
132 // Source dims and destination dims (apart from dropped dims) must have the
133 // same size.
134 for (int64_t resultDim = 0; resultDim < resultType.getRank(); ++resultDim) {
135 if (droppedDims.test(Idx: resultDim)) {
136 // InsertSlice may expand unit dimensions that result from inserting a
137 // size-1 slice into a non-size-1 result dimension.
138 if (resultType.getDimSize(resultDim) != 1)
139 return false;
140 continue;
141 }
142 FailureOr<bool> equalDimSize = ValueBoundsConstraintSet::areEqual(
143 var1: {op.getSource(), srcDim}, var2: {op.getResult(), resultDim});
144 if (failed(Result: equalDimSize) || !*equalDimSize)
145 return false;
146 ++srcDim;
147 }
148
149 return true;
150}
151
152bool mlir::tensor::isCastLikeExtractSliceOp(ExtractSliceOp op) {
153 llvm::SmallBitVector droppedDims = op.getDroppedDims();
154 int64_t resultDim = 0;
155 // Source dims and result dims (apart from dropped dims) must have the same
156 // size.
157 RankedTensorType sourceType = op.getSourceType();
158 for (int64_t dim = 0, e = sourceType.getRank(); dim < e; ++dim) {
159 if (droppedDims.test(Idx: dim)) {
160 // ExtractSlice may drop unit dimensions that result from taking a size-1
161 // slice from a non-size-1 source dimension.
162 if (sourceType.getDimSize(dim) != 1)
163 return false;
164 continue;
165 }
166 FailureOr<bool> equalDimSize = ValueBoundsConstraintSet::areEqual(
167 var1: {op.getSource(), dim}, var2: {op.getResult(), resultDim});
168 if (failed(Result: equalDimSize) || !*equalDimSize)
169 return false;
170 ++resultDim;
171 }
172
173 return true;
174}
175

Provided by KDAB

Privacy Policy
Learn to use CMake with our Intro Training
Find out more

source code of mlir/lib/Dialect/Tensor/Utils/Utils.cpp