1//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These BufferizableOpInterface implementations provide analysis-related
10// interface methods only. They are getting bufferized by the
11// SparseTensorConversion pass.
12
13#include "mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h"
14
15#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
16#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
17#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
18#include "mlir/IR/Dialect.h"
19#include "mlir/IR/Operation.h"
20#include "mlir/IR/PatternMatch.h"
21
22using namespace mlir::bufferization;
23using namespace mlir::sparse_tensor;
24
25namespace mlir {
26namespace sparse_tensor {
27namespace {
28
29template <typename ConcreteModel, typename ConcreteOp>
30struct SparseBufferizableOpInterfaceExternalModel
31 : public BufferizableOpInterface::ExternalModel<ConcreteModel, ConcreteOp> {
32 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
33 const BufferizationOptions &options,
34 BufferizationState &state) const {
35 return op->emitError(
36 message: "sparse_tensor ops must be bufferized with the sparsifier");
37 }
38};
39
40struct ConcatenateOpInterface
41 : SparseBufferizableOpInterfaceExternalModel<ConcatenateOpInterface,
42 sparse_tensor::ConcatenateOp> {
43 bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
44
45 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
46 const AnalysisState &state) const {
47 return true;
48 }
49
50 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
51 const AnalysisState &state) const {
52 return false;
53 }
54
55 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
56 const AnalysisState &state) const {
57 return {};
58 }
59
60 bool isWritable(Operation *op, Value value,
61 const AnalysisState &state) const {
62 return true;
63 }
64};
65
66struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
67 ConvertOpInterface, sparse_tensor::ConvertOp> {
68 bool bufferizesToAllocation(Operation *op, Value value) const {
69 // ConvertOps may allocate. (Unless they convert between two identical
70 // types, then they fold away.)
71 return true;
72 }
73
74 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
75 const AnalysisState &state) const {
76 return true;
77 }
78
79 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
80 const AnalysisState &state) const {
81 return false;
82 }
83
84 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
85 const AnalysisState &state) const {
86 return {};
87 }
88
89 bool isWritable(Operation *op, Value value,
90 const AnalysisState &state) const {
91 return true;
92 }
93};
94
95struct LoadOpInterface
96 : public SparseBufferizableOpInterfaceExternalModel<LoadOpInterface,
97 sparse_tensor::LoadOp> {
98 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
99 const AnalysisState &state) const {
100 return false;
101 }
102
103 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
104 const AnalysisState &state) const {
105 return false;
106 }
107
108 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
109 const AnalysisState &state) const {
110 return {{op->getOpResult(idx: 0), BufferRelation::Equivalent}};
111 }
112};
113
114struct NewOpInterface
115 : public SparseBufferizableOpInterfaceExternalModel<NewOpInterface,
116 sparse_tensor::NewOp> {
117 bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult,
118 const AnalysisState &state) const {
119 // NewOps allocate but do not write.
120 return false;
121 }
122
123 bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
124};
125
126struct AssembleOpInterface
127 : public SparseBufferizableOpInterfaceExternalModel<
128 AssembleOpInterface, sparse_tensor::AssembleOp> {
129 bool bufferizesToAllocation(Operation *op, Value value) const {
130 // AssembleOp reuses all the buffers instead of allocating new ones
131 return false;
132 }
133
134 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
135 const AnalysisState &state) const {
136 return true;
137 }
138
139 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
140 const AnalysisState &state) const {
141 return false;
142 }
143
144 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
145 const AnalysisState &state) const {
146 assert(op->getNumResults() == 1);
147 // AssembleOp reuses the input tensors as values/coordinates instead of
148 // creating new ones when packing into a COO format.
149 return {{op->getOpResult(idx: 0), BufferRelation::Equivalent}};
150 }
151
152 BufferRelation bufferRelation(Operation *oo, OpResult opResult,
153 const AnalysisState &state) const {
154 return BufferRelation::Unknown;
155 }
156};
157
158struct DisassembleOpInterface
159 : public SparseBufferizableOpInterfaceExternalModel<
160 DisassembleOpInterface, sparse_tensor::DisassembleOp> {
161 bool bufferizesToAllocation(Operation *op, Value value) const {
162 // The output buffer is pre-allocated by the user.
163 return false;
164 }
165
166 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
167 const AnalysisState &state) const {
168 // The first operand is the sparse tensor that we are unpacking.
169 return opOperand.getOperandNumber() == 0;
170 }
171
172 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
173 const AnalysisState &state) const {
174 // We write into the output operand.
175 assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
176 return opOperand.getOperandNumber() > 0;
177 }
178
179 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
180 const AnalysisState &state) const {
181 assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
182
183 if (opOperand.getOperandNumber() == 0)
184 return {};
185 // We write directly into the output tensors and returns them.
186 return {{op->getResult(idx: opOperand.getOperandNumber() - 1),
187 BufferRelation::Equivalent}};
188 }
189};
190
191struct ForeachOpInterface : public SparseBufferizableOpInterfaceExternalModel<
192 ForeachOpInterface, sparse_tensor::ForeachOp> {
193 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
194 const AnalysisState &state) const {
195 return true;
196 }
197
198 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
199 const AnalysisState &state) const {
200 return false;
201 }
202
203 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
204 const AnalysisState &state) const {
205 return {};
206 }
207
208 LogicalResult verifyAnalysis(Operation *op,
209 const AnalysisState &state) const {
210 // A more complex analysis (similar to scf.for) is needed if the op returns
211 // a tensor. That tensor would have to be bufferized (not implemented yet).
212 for (OpResult result : op->getResults()) {
213 if (isa<TensorType>(Val: result.getType()))
214 return op->emitOpError(message: "tensor results are not supported yet");
215 }
216 return success();
217 }
218};
219
220struct NumberOfEntriesOpInterface
221 : public SparseBufferizableOpInterfaceExternalModel<
222 NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
223 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
224 const AnalysisState &state) const {
225 return true;
226 }
227
228 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
229 const AnalysisState &state) const {
230 return false;
231 }
232
233 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
234 const AnalysisState &state) const {
235 return {};
236 }
237};
238
239struct ToCoordinatesBufferOpInterface
240 : public SparseBufferizableOpInterfaceExternalModel<
241 ToCoordinatesBufferOpInterface,
242 sparse_tensor::ToCoordinatesBufferOp> {
243 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
244 const AnalysisState &state) const {
245 return true;
246 }
247
248 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
249 const AnalysisState &state) const {
250 // Potential writes into memory through the result of
251 // `sparse_tensor.coordinates` are not considered.
252 return false;
253 }
254
255 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
256 const AnalysisState &state) const {
257 return {};
258 }
259};
260
261struct ToCoordinatesOpInterface
262 : public SparseBufferizableOpInterfaceExternalModel<
263 ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> {
264 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
265 const AnalysisState &state) const {
266 return true;
267 }
268
269 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
270 const AnalysisState &state) const {
271 // Potential writes into memory through the result of
272 // `sparse_tensor.coordinates` are not considered.
273 return false;
274 }
275
276 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
277 const AnalysisState &state) const {
278 return {};
279 }
280};
281
282struct ToPositionsOpInterface
283 : public SparseBufferizableOpInterfaceExternalModel<
284 ToPositionsOpInterface, sparse_tensor::ToPositionsOp> {
285 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
286 const AnalysisState &state) const {
287 return true;
288 }
289
290 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
291 const AnalysisState &state) const {
292 // Potential writes into memory through the result of
293 // `sparse_tensor.positions` are not considered.
294 return false;
295 }
296
297 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
298 const AnalysisState &state) const {
299 return {};
300 }
301};
302
303struct ToValuesOpInterface
304 : public SparseBufferizableOpInterfaceExternalModel<
305 ToValuesOpInterface, sparse_tensor::ToValuesOp> {
306 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
307 const AnalysisState &state) const {
308 return true;
309 }
310
311 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
312 const AnalysisState &state) const {
313 // Potential writes into memory through the result of sparse_tensor.values
314 // are not considered.
315 return false;
316 }
317
318 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
319 const AnalysisState &state) const {
320 return {};
321 }
322};
323
324} // namespace
325} // namespace sparse_tensor
326} // namespace mlir
327
328void mlir::sparse_tensor::registerBufferizableOpInterfaceExternalModels(
329 DialectRegistry &registry) {
330 registry.addExtension(extensionFn: +[](MLIRContext *ctx,
331 sparse_tensor::SparseTensorDialect *dialect) {
332 sparse_tensor::ConcatenateOp::attachInterface<ConcatenateOpInterface>(*ctx);
333 sparse_tensor::ConvertOp::attachInterface<ConvertOpInterface>(*ctx);
334 sparse_tensor::LoadOp::attachInterface<LoadOpInterface>(*ctx);
335 sparse_tensor::NewOp::attachInterface<NewOpInterface>(*ctx);
336 sparse_tensor::NumberOfEntriesOp::attachInterface<
337 NumberOfEntriesOpInterface>(*ctx);
338 sparse_tensor::AssembleOp::attachInterface<AssembleOpInterface>(*ctx);
339 sparse_tensor::DisassembleOp::attachInterface<DisassembleOpInterface>(*ctx);
340 sparse_tensor::ForeachOp::attachInterface<ForeachOpInterface>(*ctx);
341 sparse_tensor::ToCoordinatesBufferOp::attachInterface<
342 ToCoordinatesBufferOpInterface>(*ctx);
343 sparse_tensor::ToCoordinatesOp::attachInterface<ToCoordinatesOpInterface>(
344 *ctx);
345 sparse_tensor::ToPositionsOp::attachInterface<ToPositionsOpInterface>(*ctx);
346 sparse_tensor::ToValuesOp::attachInterface<ToValuesOpInterface>(*ctx);
347 });
348}
349

source code of mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp