1//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "mlir/Dialect/Shape/Transforms/BufferizableOpInterfaceImpl.h"
10
11#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
12#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
13#include "mlir/Dialect/Shape/IR/Shape.h"
14#include "mlir/IR/Operation.h"
15#include "mlir/IR/PatternMatch.h"
16
17using namespace mlir;
18using namespace mlir::bufferization;
19using namespace mlir::shape;
20
21namespace mlir {
22namespace shape {
23namespace {
24
25/// Bufferization of shape.assuming.
26struct AssumingOpInterface
27 : public BufferizableOpInterface::ExternalModel<AssumingOpInterface,
28 shape::AssumingOp> {
29 AliasingOpOperandList
30 getAliasingOpOperands(Operation *op, Value value,
31 const AnalysisState &state) const {
32 // AssumingOps do not have tensor OpOperands. The yielded value can be any
33 // SSA value that is in scope. To allow for use-def chain traversal through
34 // AssumingOps in the analysis, the corresponding yield value is considered
35 // to be aliasing with the result.
36 auto assumingOp = cast<shape::AssumingOp>(Val: op);
37 size_t resultNum = std::distance(first: op->getOpResults().begin(),
38 last: llvm::find(Range: op->getOpResults(), Val: value));
39 // TODO: Support multiple blocks.
40 assert(llvm::hasSingleElement(assumingOp.getDoRegion().getBlocks()) &&
41 "expected exactly 1 block");
42 auto yieldOp = dyn_cast<shape::AssumingYieldOp>(
43 Val: assumingOp.getDoRegion().front().getTerminator());
44 assert(yieldOp && "expected shape.assuming_yield terminator");
45 return {{&yieldOp->getOpOperand(idx: resultNum), BufferRelation::Equivalent}};
46 }
47
48 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
49 const BufferizationOptions &options,
50 BufferizationState &state) const {
51 auto assumingOp = cast<shape::AssumingOp>(Val: op);
52 assert(llvm::hasSingleElement(assumingOp.getDoRegion().getBlocks()) &&
53 "only 1 block supported");
54 auto yieldOp = cast<shape::AssumingYieldOp>(
55 Val: assumingOp.getDoRegion().front().getTerminator());
56
57 // Create new op and move over region.
58 TypeRange newResultTypes(yieldOp.getOperands());
59 auto newOp = rewriter.create<shape::AssumingOp>(
60 location: op->getLoc(), args&: newResultTypes, args: assumingOp.getWitness());
61 newOp.getDoRegion().takeBody(other&: assumingOp.getRegion());
62
63 // Update all uses of the old op.
64 rewriter.setInsertionPointAfter(newOp);
65 SmallVector<Value> newResults;
66 for (const auto &it : llvm::enumerate(First: assumingOp->getResultTypes())) {
67 if (isa<TensorType>(Val: it.value())) {
68 newResults.push_back(Elt: rewriter.create<bufferization::ToTensorOp>(
69 location: assumingOp.getLoc(), args&: it.value(), args: newOp->getResult(idx: it.index())));
70 } else {
71 newResults.push_back(Elt: newOp->getResult(idx: it.index()));
72 }
73 }
74
75 // Replace old op.
76 rewriter.replaceOp(op: assumingOp, newValues: newResults);
77
78 return success();
79 }
80};
81
82/// Bufferization of shape.assuming_yield. Bufferized as part of their enclosing
83/// ops, so this is for analysis only.
84struct AssumingYieldOpInterface
85 : public BufferizableOpInterface::ExternalModel<AssumingYieldOpInterface,
86 shape::AssumingYieldOp> {
87 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
88 const AnalysisState &state) const {
89 return true;
90 }
91
92 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
93 const AnalysisState &state) const {
94 return false;
95 }
96
97 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
98 const AnalysisState &state) const {
99 assert(isa<shape::AssumingOp>(op->getParentOp()) &&
100 "expected that parent is an AssumingOp");
101 OpResult opResult =
102 op->getParentOp()->getResult(idx: opOperand.getOperandNumber());
103 return {{opResult, BufferRelation::Equivalent}};
104 }
105
106 bool mustBufferizeInPlace(Operation *op, OpOperand &opOperand,
107 const AnalysisState &state) const {
108 // Yield operands always bufferize inplace. Otherwise, an alloc + copy
109 // may be generated inside the block. We should not return/yield allocations
110 // when possible.
111 return true;
112 }
113
114 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
115 const BufferizationOptions &options,
116 BufferizationState &state) const {
117 auto yieldOp = cast<shape::AssumingYieldOp>(Val: op);
118 SmallVector<Value> newResults;
119 for (Value value : yieldOp.getOperands()) {
120 if (isa<TensorType>(Val: value.getType())) {
121 FailureOr<Value> buffer = getBuffer(rewriter, value, options, state);
122 if (failed(Result: buffer))
123 return failure();
124 newResults.push_back(Elt: *buffer);
125 } else {
126 newResults.push_back(Elt: value);
127 }
128 }
129 replaceOpWithNewBufferizedOp<shape::AssumingYieldOp>(rewriter, op,
130 args&: newResults);
131 return success();
132 }
133};
134
135} // namespace
136} // namespace shape
137} // namespace mlir
138
139void mlir::shape::registerBufferizableOpInterfaceExternalModels(
140 DialectRegistry &registry) {
141 registry.addExtension(extensionFn: +[](MLIRContext *ctx, shape::ShapeDialect *dialect) {
142 shape::AssumingOp::attachInterface<AssumingOpInterface>(context&: *ctx);
143 shape::AssumingYieldOp::attachInterface<AssumingYieldOpInterface>(context&: *ctx);
144 });
145}
146

source code of mlir/lib/Dialect/Shape/Transforms/BufferizableOpInterfaceImpl.cpp