1//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These BufferizableOpInterface implementations provide analysis-related
10// interface methods only. They are getting bufferized by the
11// SparseTensorConversion pass.
12
13#include "mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h"
14
15#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
16#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
17#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
18#include "mlir/IR/Dialect.h"
19#include "mlir/IR/Operation.h"
20#include "mlir/IR/PatternMatch.h"
21
22using namespace mlir::bufferization;
23using namespace mlir::sparse_tensor;
24
25namespace mlir {
26namespace sparse_tensor {
27namespace {
28
29template <typename ConcreteModel, typename ConcreteOp>
30struct SparseBufferizableOpInterfaceExternalModel
31 : public BufferizableOpInterface::ExternalModel<ConcreteModel, ConcreteOp> {
32 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
33 const BufferizationOptions &options) const {
34 return op->emitError(
35 message: "sparse_tensor ops must be bufferized with the sparsifier");
36 }
37};
38
39struct ConcatenateOpInterface
40 : SparseBufferizableOpInterfaceExternalModel<ConcatenateOpInterface,
41 sparse_tensor::ConcatenateOp> {
42 bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
43
44 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
45 const AnalysisState &state) const {
46 return true;
47 }
48
49 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
50 const AnalysisState &state) const {
51 return false;
52 }
53
54 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
55 const AnalysisState &state) const {
56 return {};
57 }
58
59 bool isWritable(Operation *op, Value value,
60 const AnalysisState &state) const {
61 return true;
62 }
63};
64
65struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
66 ConvertOpInterface, sparse_tensor::ConvertOp> {
67 bool bufferizesToAllocation(Operation *op, Value value) const {
68 // ConvertOps may allocate. (Unless they convert between two identical
69 // types, then they fold away.)
70 return true;
71 }
72
73 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
74 const AnalysisState &state) const {
75 return true;
76 }
77
78 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
79 const AnalysisState &state) const {
80 return false;
81 }
82
83 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
84 const AnalysisState &state) const {
85 return {};
86 }
87
88 bool isWritable(Operation *op, Value value,
89 const AnalysisState &state) const {
90 return true;
91 }
92};
93
94struct LoadOpInterface
95 : public SparseBufferizableOpInterfaceExternalModel<LoadOpInterface,
96 sparse_tensor::LoadOp> {
97 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
98 const AnalysisState &state) const {
99 return false;
100 }
101
102 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
103 const AnalysisState &state) const {
104 return false;
105 }
106
107 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
108 const AnalysisState &state) const {
109 return {{op->getOpResult(idx: 0), BufferRelation::Equivalent}};
110 }
111};
112
113struct NewOpInterface
114 : public SparseBufferizableOpInterfaceExternalModel<NewOpInterface,
115 sparse_tensor::NewOp> {
116 bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult,
117 const AnalysisState &state) const {
118 // NewOps allocate but do not write.
119 return false;
120 }
121
122 bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
123};
124
125struct AssembleOpInterface
126 : public SparseBufferizableOpInterfaceExternalModel<
127 AssembleOpInterface, sparse_tensor::AssembleOp> {
128 bool bufferizesToAllocation(Operation *op, Value value) const {
129 // AssembleOp reuses all the buffers instead of allocating new ones
130 return false;
131 }
132
133 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
134 const AnalysisState &state) const {
135 return true;
136 }
137
138 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
139 const AnalysisState &state) const {
140 return false;
141 }
142
143 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
144 const AnalysisState &state) const {
145 assert(op->getNumResults() == 1);
146 // AssembleOp reuses the input tensors as values/coordinates instead of
147 // creating new ones when packing into a COO format.
148 return {{op->getOpResult(idx: 0), BufferRelation::Equivalent}};
149 }
150
151 BufferRelation bufferRelation(Operation *oo, OpResult opResult,
152 const AnalysisState &state) const {
153 return BufferRelation::Unknown;
154 }
155};
156
157struct DisassembleOpInterface
158 : public SparseBufferizableOpInterfaceExternalModel<
159 DisassembleOpInterface, sparse_tensor::DisassembleOp> {
160 bool bufferizesToAllocation(Operation *op, Value value) const {
161 // The output buffer is pre-allocated by the user.
162 return false;
163 }
164
165 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
166 const AnalysisState &state) const {
167 // The first operand is the sparse tensor that we are unpacking.
168 return opOperand.getOperandNumber() == 0;
169 }
170
171 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
172 const AnalysisState &state) const {
173 // We write into the output operand.
174 assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
175 return opOperand.getOperandNumber() > 0;
176 }
177
178 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
179 const AnalysisState &state) const {
180 assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
181
182 if (opOperand.getOperandNumber() == 0)
183 return {};
184 // We write directly into the output tensors and returns them.
185 return {{op->getResult(idx: opOperand.getOperandNumber() - 1),
186 BufferRelation::Equivalent}};
187 }
188};
189
190struct ForeachOpInterface : public SparseBufferizableOpInterfaceExternalModel<
191 ForeachOpInterface, sparse_tensor::ForeachOp> {
192 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
193 const AnalysisState &state) const {
194 return true;
195 }
196
197 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
198 const AnalysisState &state) const {
199 return false;
200 }
201
202 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
203 const AnalysisState &state) const {
204 return {};
205 }
206
207 LogicalResult verifyAnalysis(Operation *op,
208 const AnalysisState &state) const {
209 // A more complex analysis (similar to scf.for) is needed if the op returns
210 // a tensor. That tensor would have to be bufferized (not implemented yet).
211 for (OpResult result : op->getResults()) {
212 if (isa<TensorType>(Val: result.getType()))
213 return op->emitOpError(message: "tensor results are not supported yet");
214 }
215 return success();
216 }
217};
218
219struct NumberOfEntriesOpInterface
220 : public SparseBufferizableOpInterfaceExternalModel<
221 NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
222 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
223 const AnalysisState &state) const {
224 return true;
225 }
226
227 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
228 const AnalysisState &state) const {
229 return false;
230 }
231
232 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
233 const AnalysisState &state) const {
234 return {};
235 }
236};
237
238struct ToCoordinatesBufferOpInterface
239 : public SparseBufferizableOpInterfaceExternalModel<
240 ToCoordinatesBufferOpInterface,
241 sparse_tensor::ToCoordinatesBufferOp> {
242 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
243 const AnalysisState &state) const {
244 return true;
245 }
246
247 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
248 const AnalysisState &state) const {
249 // Potential writes into memory through the result of
250 // `sparse_tensor.coordinates` are not considered.
251 return false;
252 }
253
254 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
255 const AnalysisState &state) const {
256 return {};
257 }
258};
259
260struct ToCoordinatesOpInterface
261 : public SparseBufferizableOpInterfaceExternalModel<
262 ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> {
263 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
264 const AnalysisState &state) const {
265 return true;
266 }
267
268 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
269 const AnalysisState &state) const {
270 // Potential writes into memory through the result of
271 // `sparse_tensor.coordinates` are not considered.
272 return false;
273 }
274
275 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
276 const AnalysisState &state) const {
277 return {};
278 }
279};
280
281struct ToPositionsOpInterface
282 : public SparseBufferizableOpInterfaceExternalModel<
283 ToPositionsOpInterface, sparse_tensor::ToPositionsOp> {
284 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
285 const AnalysisState &state) const {
286 return true;
287 }
288
289 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
290 const AnalysisState &state) const {
291 // Potential writes into memory through the result of
292 // `sparse_tensor.positions` are not considered.
293 return false;
294 }
295
296 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
297 const AnalysisState &state) const {
298 return {};
299 }
300};
301
302struct ToValuesOpInterface
303 : public SparseBufferizableOpInterfaceExternalModel<
304 ToValuesOpInterface, sparse_tensor::ToValuesOp> {
305 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
306 const AnalysisState &state) const {
307 return true;
308 }
309
310 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
311 const AnalysisState &state) const {
312 // Potential writes into memory through the result of sparse_tensor.values
313 // are not considered.
314 return false;
315 }
316
317 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
318 const AnalysisState &state) const {
319 return {};
320 }
321};
322
323} // namespace
324} // namespace sparse_tensor
325} // namespace mlir
326
327void mlir::sparse_tensor::registerBufferizableOpInterfaceExternalModels(
328 DialectRegistry &registry) {
329 registry.addExtension(extensionFn: +[](MLIRContext *ctx,
330 sparse_tensor::SparseTensorDialect *dialect) {
331 sparse_tensor::ConcatenateOp::attachInterface<ConcatenateOpInterface>(*ctx);
332 sparse_tensor::ConvertOp::attachInterface<ConvertOpInterface>(*ctx);
333 sparse_tensor::LoadOp::attachInterface<LoadOpInterface>(*ctx);
334 sparse_tensor::NewOp::attachInterface<NewOpInterface>(*ctx);
335 sparse_tensor::NumberOfEntriesOp::attachInterface<
336 NumberOfEntriesOpInterface>(*ctx);
337 sparse_tensor::AssembleOp::attachInterface<AssembleOpInterface>(*ctx);
338 sparse_tensor::DisassembleOp::attachInterface<DisassembleOpInterface>(*ctx);
339 sparse_tensor::ForeachOp::attachInterface<ForeachOpInterface>(*ctx);
340 sparse_tensor::ToCoordinatesBufferOp::attachInterface<
341 ToCoordinatesBufferOpInterface>(*ctx);
342 sparse_tensor::ToCoordinatesOp::attachInterface<ToCoordinatesOpInterface>(
343 *ctx);
344 sparse_tensor::ToPositionsOp::attachInterface<ToPositionsOpInterface>(*ctx);
345 sparse_tensor::ToValuesOp::attachInterface<ToValuesOpInterface>(*ctx);
346 });
347}
348

source code of mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp