1//===- AffineToStandard.cpp - Lower affine constructs to primitives -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file lowers affine constructs (If and For statements, AffineApply
10// operations) within a function into their standard If and For equivalent ops.
11//
12//===----------------------------------------------------------------------===//
13
14#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
15
16#include "mlir/Dialect/Affine/IR/AffineOps.h"
17#include "mlir/Dialect/Affine/Transforms/Transforms.h"
18#include "mlir/Dialect/Affine/Utils.h"
19#include "mlir/Dialect/MemRef/IR/MemRef.h"
20#include "mlir/Dialect/SCF/IR/SCF.h"
21#include "mlir/Dialect/Vector/IR/VectorOps.h"
22#include "mlir/IR/IntegerSet.h"
23#include "mlir/IR/MLIRContext.h"
24#include "mlir/Transforms/DialectConversion.h"
25#include "mlir/Transforms/Passes.h"
26
27namespace mlir {
28#define GEN_PASS_DEF_LOWERAFFINEPASS
29#include "mlir/Conversion/Passes.h.inc"
30} // namespace mlir
31
32using namespace mlir;
33using namespace mlir::affine;
34using namespace mlir::vector;
35
36/// Given a range of values, emit the code that reduces them with "min" or "max"
37/// depending on the provided comparison predicate, sgt for max and slt for min.
38///
39/// Multiple values are scanned in a linear sequence. This creates a data
40/// dependences that wouldn't exist in a tree reduction, but is easier to
41/// recognize as a reduction by the subsequent passes.
42static Value buildMinMaxReductionSeq(Location loc,
43 arith::CmpIPredicate predicate,
44 ValueRange values, OpBuilder &builder) {
45 assert(!values.empty() && "empty min/max chain");
46 assert(predicate == arith::CmpIPredicate::sgt ||
47 predicate == arith::CmpIPredicate::slt);
48
49 auto valueIt = values.begin();
50 Value value = *valueIt++;
51 for (; valueIt != values.end(); ++valueIt) {
52 if (predicate == arith::CmpIPredicate::sgt)
53 value = builder.create<arith::MaxSIOp>(location: loc, args&: value, args: *valueIt);
54 else
55 value = builder.create<arith::MinSIOp>(location: loc, args&: value, args: *valueIt);
56 }
57
58 return value;
59}
60
61/// Emit instructions that correspond to computing the maximum value among the
62/// values of a (potentially) multi-output affine map applied to `operands`.
63static Value lowerAffineMapMax(OpBuilder &builder, Location loc, AffineMap map,
64 ValueRange operands) {
65 if (auto values = expandAffineMap(builder, loc, affineMap: map, operands))
66 return buildMinMaxReductionSeq(loc, predicate: arith::CmpIPredicate::sgt, values: *values,
67 builder);
68 return nullptr;
69}
70
71/// Emit instructions that correspond to computing the minimum value among the
72/// values of a (potentially) multi-output affine map applied to `operands`.
73static Value lowerAffineMapMin(OpBuilder &builder, Location loc, AffineMap map,
74 ValueRange operands) {
75 if (auto values = expandAffineMap(builder, loc, affineMap: map, operands))
76 return buildMinMaxReductionSeq(loc, predicate: arith::CmpIPredicate::slt, values: *values,
77 builder);
78 return nullptr;
79}
80
81/// Emit instructions that correspond to the affine map in the upper bound
82/// applied to the respective operands, and compute the minimum value across
83/// the results.
84Value mlir::lowerAffineUpperBound(AffineForOp op, OpBuilder &builder) {
85 return lowerAffineMapMin(builder, loc: op.getLoc(), map: op.getUpperBoundMap(),
86 operands: op.getUpperBoundOperands());
87}
88
89/// Emit instructions that correspond to the affine map in the lower bound
90/// applied to the respective operands, and compute the maximum value across
91/// the results.
92Value mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
93 return lowerAffineMapMax(builder, loc: op.getLoc(), map: op.getLowerBoundMap(),
94 operands: op.getLowerBoundOperands());
95}
96
97namespace {
98class AffineMinLowering : public OpRewritePattern<AffineMinOp> {
99public:
100 using OpRewritePattern<AffineMinOp>::OpRewritePattern;
101
102 LogicalResult matchAndRewrite(AffineMinOp op,
103 PatternRewriter &rewriter) const override {
104 Value reduced =
105 lowerAffineMapMin(builder&: rewriter, loc: op.getLoc(), map: op.getMap(), operands: op.getOperands());
106 if (!reduced)
107 return failure();
108
109 rewriter.replaceOp(op, newValues: reduced);
110 return success();
111 }
112};
113
114class AffineMaxLowering : public OpRewritePattern<AffineMaxOp> {
115public:
116 using OpRewritePattern<AffineMaxOp>::OpRewritePattern;
117
118 LogicalResult matchAndRewrite(AffineMaxOp op,
119 PatternRewriter &rewriter) const override {
120 Value reduced =
121 lowerAffineMapMax(builder&: rewriter, loc: op.getLoc(), map: op.getMap(), operands: op.getOperands());
122 if (!reduced)
123 return failure();
124
125 rewriter.replaceOp(op, newValues: reduced);
126 return success();
127 }
128};
129
130/// Affine yields ops are removed.
131class AffineYieldOpLowering : public OpRewritePattern<AffineYieldOp> {
132public:
133 using OpRewritePattern<AffineYieldOp>::OpRewritePattern;
134
135 LogicalResult matchAndRewrite(AffineYieldOp op,
136 PatternRewriter &rewriter) const override {
137 if (isa<scf::ParallelOp>(Val: op->getParentOp())) {
138 // Terminator is rewritten as part of the "affine.parallel" lowering
139 // pattern.
140 return failure();
141 }
142 rewriter.replaceOpWithNewOp<scf::YieldOp>(op, args: op.getOperands());
143 return success();
144 }
145};
146
147class AffineForLowering : public OpRewritePattern<AffineForOp> {
148public:
149 using OpRewritePattern<AffineForOp>::OpRewritePattern;
150
151 LogicalResult matchAndRewrite(AffineForOp op,
152 PatternRewriter &rewriter) const override {
153 Location loc = op.getLoc();
154 Value lowerBound = lowerAffineLowerBound(op, builder&: rewriter);
155 Value upperBound = lowerAffineUpperBound(op, builder&: rewriter);
156 Value step =
157 rewriter.create<arith::ConstantIndexOp>(location: loc, args: op.getStepAsInt());
158 auto scfForOp = rewriter.create<scf::ForOp>(location: loc, args&: lowerBound, args&: upperBound,
159 args&: step, args: op.getInits());
160 rewriter.eraseBlock(block: scfForOp.getBody());
161 rewriter.inlineRegionBefore(region&: op.getRegion(), parent&: scfForOp.getRegion(),
162 before: scfForOp.getRegion().end());
163 rewriter.replaceOp(op, newValues: scfForOp.getResults());
164 return success();
165 }
166};
167
168/// Convert an `affine.parallel` (loop nest) operation into a `scf.parallel`
169/// operation.
170class AffineParallelLowering : public OpRewritePattern<AffineParallelOp> {
171public:
172 using OpRewritePattern<AffineParallelOp>::OpRewritePattern;
173
174 LogicalResult matchAndRewrite(AffineParallelOp op,
175 PatternRewriter &rewriter) const override {
176 Location loc = op.getLoc();
177 SmallVector<Value, 8> steps;
178 SmallVector<Value, 8> upperBoundTuple;
179 SmallVector<Value, 8> lowerBoundTuple;
180 SmallVector<Value, 8> identityVals;
181 // Emit IR computing the lower and upper bound by expanding the map
182 // expression.
183 lowerBoundTuple.reserve(N: op.getNumDims());
184 upperBoundTuple.reserve(N: op.getNumDims());
185 for (unsigned i = 0, e = op.getNumDims(); i < e; ++i) {
186 Value lower = lowerAffineMapMax(builder&: rewriter, loc, map: op.getLowerBoundMap(pos: i),
187 operands: op.getLowerBoundsOperands());
188 if (!lower)
189 return rewriter.notifyMatchFailure(arg&: op, msg: "couldn't convert lower bounds");
190 lowerBoundTuple.push_back(Elt: lower);
191
192 Value upper = lowerAffineMapMin(builder&: rewriter, loc, map: op.getUpperBoundMap(pos: i),
193 operands: op.getUpperBoundsOperands());
194 if (!upper)
195 return rewriter.notifyMatchFailure(arg&: op, msg: "couldn't convert upper bounds");
196 upperBoundTuple.push_back(Elt: upper);
197 }
198 steps.reserve(N: op.getSteps().size());
199 for (int64_t step : op.getSteps())
200 steps.push_back(Elt: rewriter.create<arith::ConstantIndexOp>(location: loc, args&: step));
201
202 // Get the terminator op.
203 auto affineParOpTerminator =
204 cast<AffineYieldOp>(Val: op.getBody()->getTerminator());
205 scf::ParallelOp parOp;
206 if (op.getResults().empty()) {
207 // Case with no reduction operations/return values.
208 parOp = rewriter.create<scf::ParallelOp>(location: loc, args&: lowerBoundTuple,
209 args&: upperBoundTuple, args&: steps,
210 /*bodyBuilderFn=*/args: nullptr);
211 rewriter.eraseBlock(block: parOp.getBody());
212 rewriter.inlineRegionBefore(region&: op.getRegion(), parent&: parOp.getRegion(),
213 before: parOp.getRegion().end());
214 rewriter.replaceOp(op, newValues: parOp.getResults());
215 rewriter.setInsertionPoint(affineParOpTerminator);
216 rewriter.replaceOpWithNewOp<scf::ReduceOp>(op: affineParOpTerminator);
217 return success();
218 }
219 // Case with affine.parallel with reduction operations/return values.
220 // scf.parallel handles the reduction operation differently unlike
221 // affine.parallel.
222 ArrayRef<Attribute> reductions = op.getReductions().getValue();
223 for (auto pair : llvm::zip(t&: reductions, u: op.getResultTypes())) {
224 // For each of the reduction operations get the identity values for
225 // initialization of the result values.
226 Attribute reduction = std::get<0>(t&: pair);
227 Type resultType = std::get<1>(t&: pair);
228 std::optional<arith::AtomicRMWKind> reductionOp =
229 arith::symbolizeAtomicRMWKind(
230 static_cast<uint64_t>(cast<IntegerAttr>(Val&: reduction).getInt()));
231 assert(reductionOp && "Reduction operation cannot be of None Type");
232 arith::AtomicRMWKind reductionOpValue = *reductionOp;
233 identityVals.push_back(
234 Elt: arith::getIdentityValue(op: reductionOpValue, resultType, builder&: rewriter, loc));
235 }
236 parOp = rewriter.create<scf::ParallelOp>(
237 location: loc, args&: lowerBoundTuple, args&: upperBoundTuple, args&: steps, args&: identityVals,
238 /*bodyBuilderFn=*/args: nullptr);
239
240 // Copy the body of the affine.parallel op.
241 rewriter.eraseBlock(block: parOp.getBody());
242 rewriter.inlineRegionBefore(region&: op.getRegion(), parent&: parOp.getRegion(),
243 before: parOp.getRegion().end());
244 assert(reductions.size() == affineParOpTerminator->getNumOperands() &&
245 "Unequal number of reductions and operands.");
246
247 // Emit new "scf.reduce" terminator.
248 rewriter.setInsertionPoint(affineParOpTerminator);
249 auto reduceOp = rewriter.replaceOpWithNewOp<scf::ReduceOp>(
250 op: affineParOpTerminator, args: affineParOpTerminator->getOperands());
251 for (unsigned i = 0, end = reductions.size(); i < end; i++) {
252 // For each of the reduction operations get the respective mlir::Value.
253 std::optional<arith::AtomicRMWKind> reductionOp =
254 arith::symbolizeAtomicRMWKind(
255 cast<IntegerAttr>(Val: reductions[i]).getInt());
256 assert(reductionOp && "Reduction Operation cannot be of None Type");
257 arith::AtomicRMWKind reductionOpValue = *reductionOp;
258 rewriter.setInsertionPoint(&parOp.getBody()->back());
259 Block &reductionBody = reduceOp.getReductions()[i].front();
260 rewriter.setInsertionPointToEnd(&reductionBody);
261 Value reductionResult = arith::getReductionOp(
262 op: reductionOpValue, builder&: rewriter, loc, lhs: reductionBody.getArgument(i: 0),
263 rhs: reductionBody.getArgument(i: 1));
264 rewriter.create<scf::ReduceReturnOp>(location: loc, args&: reductionResult);
265 }
266 rewriter.replaceOp(op, newValues: parOp.getResults());
267 return success();
268 }
269};
270
271class AffineIfLowering : public OpRewritePattern<AffineIfOp> {
272public:
273 using OpRewritePattern<AffineIfOp>::OpRewritePattern;
274
275 LogicalResult matchAndRewrite(AffineIfOp op,
276 PatternRewriter &rewriter) const override {
277 auto loc = op.getLoc();
278
279 // Now we just have to handle the condition logic.
280 auto integerSet = op.getIntegerSet();
281 Value zeroConstant = rewriter.create<arith::ConstantIndexOp>(location: loc, args: 0);
282 SmallVector<Value, 8> operands(op.getOperands());
283 auto operandsRef = llvm::ArrayRef(operands);
284
285 // Calculate cond as a conjunction without short-circuiting.
286 Value cond = nullptr;
287 for (unsigned i = 0, e = integerSet.getNumConstraints(); i < e; ++i) {
288 AffineExpr constraintExpr = integerSet.getConstraint(idx: i);
289 bool isEquality = integerSet.isEq(idx: i);
290
291 // Build and apply an affine expression
292 auto numDims = integerSet.getNumDims();
293 Value affResult = expandAffineExpr(builder&: rewriter, loc, expr: constraintExpr,
294 dimValues: operandsRef.take_front(N: numDims),
295 symbolValues: operandsRef.drop_front(N: numDims));
296 if (!affResult)
297 return failure();
298 auto pred =
299 isEquality ? arith::CmpIPredicate::eq : arith::CmpIPredicate::sge;
300 Value cmpVal =
301 rewriter.create<arith::CmpIOp>(location: loc, args&: pred, args&: affResult, args&: zeroConstant);
302 cond = cond
303 ? rewriter.create<arith::AndIOp>(location: loc, args&: cond, args&: cmpVal).getResult()
304 : cmpVal;
305 }
306 cond = cond ? cond
307 : rewriter.create<arith::ConstantIntOp>(location: loc, /*value=*/args: 1,
308 /*width=*/args: 1);
309
310 bool hasElseRegion = !op.getElseRegion().empty();
311 auto ifOp = rewriter.create<scf::IfOp>(location: loc, args: op.getResultTypes(), args&: cond,
312 args&: hasElseRegion);
313 rewriter.inlineRegionBefore(region&: op.getThenRegion(),
314 before: &ifOp.getThenRegion().back());
315 rewriter.eraseBlock(block: &ifOp.getThenRegion().back());
316 if (hasElseRegion) {
317 rewriter.inlineRegionBefore(region&: op.getElseRegion(),
318 before: &ifOp.getElseRegion().back());
319 rewriter.eraseBlock(block: &ifOp.getElseRegion().back());
320 }
321
322 // Replace the Affine IfOp finally.
323 rewriter.replaceOp(op, newValues: ifOp.getResults());
324 return success();
325 }
326};
327
328/// Convert an "affine.apply" operation into a sequence of arithmetic
329/// operations using the StandardOps dialect.
330class AffineApplyLowering : public OpRewritePattern<AffineApplyOp> {
331public:
332 using OpRewritePattern<AffineApplyOp>::OpRewritePattern;
333
334 LogicalResult matchAndRewrite(AffineApplyOp op,
335 PatternRewriter &rewriter) const override {
336 auto maybeExpandedMap =
337 expandAffineMap(builder&: rewriter, loc: op.getLoc(), affineMap: op.getAffineMap(),
338 operands: llvm::to_vector<8>(Range: op.getOperands()));
339 if (!maybeExpandedMap)
340 return failure();
341 rewriter.replaceOp(op, newValues: *maybeExpandedMap);
342 return success();
343 }
344};
345
346/// Apply the affine map from an 'affine.load' operation to its operands, and
347/// feed the results to a newly created 'memref.load' operation (which replaces
348/// the original 'affine.load').
349class AffineLoadLowering : public OpRewritePattern<AffineLoadOp> {
350public:
351 using OpRewritePattern<AffineLoadOp>::OpRewritePattern;
352
353 LogicalResult matchAndRewrite(AffineLoadOp op,
354 PatternRewriter &rewriter) const override {
355 // Expand affine map from 'affineLoadOp'.
356 SmallVector<Value, 8> indices(op.getMapOperands());
357 auto resultOperands =
358 expandAffineMap(builder&: rewriter, loc: op.getLoc(), affineMap: op.getAffineMap(), operands: indices);
359 if (!resultOperands)
360 return failure();
361
362 // Build vector.load memref[expandedMap.results].
363 rewriter.replaceOpWithNewOp<memref::LoadOp>(op, args: op.getMemRef(),
364 args&: *resultOperands);
365 return success();
366 }
367};
368
369/// Apply the affine map from an 'affine.prefetch' operation to its operands,
370/// and feed the results to a newly created 'memref.prefetch' operation (which
371/// replaces the original 'affine.prefetch').
372class AffinePrefetchLowering : public OpRewritePattern<AffinePrefetchOp> {
373public:
374 using OpRewritePattern<AffinePrefetchOp>::OpRewritePattern;
375
376 LogicalResult matchAndRewrite(AffinePrefetchOp op,
377 PatternRewriter &rewriter) const override {
378 // Expand affine map from 'affinePrefetchOp'.
379 SmallVector<Value, 8> indices(op.getMapOperands());
380 auto resultOperands =
381 expandAffineMap(builder&: rewriter, loc: op.getLoc(), affineMap: op.getAffineMap(), operands: indices);
382 if (!resultOperands)
383 return failure();
384
385 // Build memref.prefetch memref[expandedMap.results].
386 rewriter.replaceOpWithNewOp<memref::PrefetchOp>(
387 op, args: op.getMemref(), args&: *resultOperands, args: op.getIsWrite(),
388 args: op.getLocalityHint(), args: op.getIsDataCache());
389 return success();
390 }
391};
392
393/// Apply the affine map from an 'affine.store' operation to its operands, and
394/// feed the results to a newly created 'memref.store' operation (which replaces
395/// the original 'affine.store').
396class AffineStoreLowering : public OpRewritePattern<AffineStoreOp> {
397public:
398 using OpRewritePattern<AffineStoreOp>::OpRewritePattern;
399
400 LogicalResult matchAndRewrite(AffineStoreOp op,
401 PatternRewriter &rewriter) const override {
402 // Expand affine map from 'affineStoreOp'.
403 SmallVector<Value, 8> indices(op.getMapOperands());
404 auto maybeExpandedMap =
405 expandAffineMap(builder&: rewriter, loc: op.getLoc(), affineMap: op.getAffineMap(), operands: indices);
406 if (!maybeExpandedMap)
407 return failure();
408
409 // Build memref.store valueToStore, memref[expandedMap.results].
410 rewriter.replaceOpWithNewOp<memref::StoreOp>(
411 op, args: op.getValueToStore(), args: op.getMemRef(), args&: *maybeExpandedMap);
412 return success();
413 }
414};
415
416/// Apply the affine maps from an 'affine.dma_start' operation to each of their
417/// respective map operands, and feed the results to a newly created
418/// 'memref.dma_start' operation (which replaces the original
419/// 'affine.dma_start').
420class AffineDmaStartLowering : public OpRewritePattern<AffineDmaStartOp> {
421public:
422 using OpRewritePattern<AffineDmaStartOp>::OpRewritePattern;
423
424 LogicalResult matchAndRewrite(AffineDmaStartOp op,
425 PatternRewriter &rewriter) const override {
426 SmallVector<Value, 8> operands(op.getOperands());
427 auto operandsRef = llvm::ArrayRef(operands);
428
429 // Expand affine map for DMA source memref.
430 auto maybeExpandedSrcMap = expandAffineMap(
431 builder&: rewriter, loc: op.getLoc(), affineMap: op.getSrcMap(),
432 operands: operandsRef.drop_front(N: op.getSrcMemRefOperandIndex() + 1));
433 if (!maybeExpandedSrcMap)
434 return failure();
435 // Expand affine map for DMA destination memref.
436 auto maybeExpandedDstMap = expandAffineMap(
437 builder&: rewriter, loc: op.getLoc(), affineMap: op.getDstMap(),
438 operands: operandsRef.drop_front(N: op.getDstMemRefOperandIndex() + 1));
439 if (!maybeExpandedDstMap)
440 return failure();
441 // Expand affine map for DMA tag memref.
442 auto maybeExpandedTagMap = expandAffineMap(
443 builder&: rewriter, loc: op.getLoc(), affineMap: op.getTagMap(),
444 operands: operandsRef.drop_front(N: op.getTagMemRefOperandIndex() + 1));
445 if (!maybeExpandedTagMap)
446 return failure();
447
448 // Build memref.dma_start operation with affine map results.
449 rewriter.replaceOpWithNewOp<memref::DmaStartOp>(
450 op, args: op.getSrcMemRef(), args&: *maybeExpandedSrcMap, args: op.getDstMemRef(),
451 args&: *maybeExpandedDstMap, args: op.getNumElements(), args: op.getTagMemRef(),
452 args&: *maybeExpandedTagMap, args: op.getStride(), args: op.getNumElementsPerStride());
453 return success();
454 }
455};
456
457/// Apply the affine map from an 'affine.dma_wait' operation tag memref,
458/// and feed the results to a newly created 'memref.dma_wait' operation (which
459/// replaces the original 'affine.dma_wait').
460class AffineDmaWaitLowering : public OpRewritePattern<AffineDmaWaitOp> {
461public:
462 using OpRewritePattern<AffineDmaWaitOp>::OpRewritePattern;
463
464 LogicalResult matchAndRewrite(AffineDmaWaitOp op,
465 PatternRewriter &rewriter) const override {
466 // Expand affine map for DMA tag memref.
467 SmallVector<Value, 8> indices(op.getTagIndices());
468 auto maybeExpandedTagMap =
469 expandAffineMap(builder&: rewriter, loc: op.getLoc(), affineMap: op.getTagMap(), operands: indices);
470 if (!maybeExpandedTagMap)
471 return failure();
472
473 // Build memref.dma_wait operation with affine map results.
474 rewriter.replaceOpWithNewOp<memref::DmaWaitOp>(
475 op, args: op.getTagMemRef(), args&: *maybeExpandedTagMap, args: op.getNumElements());
476 return success();
477 }
478};
479
480/// Apply the affine map from an 'affine.vector_load' operation to its operands,
481/// and feed the results to a newly created 'vector.load' operation (which
482/// replaces the original 'affine.vector_load').
483class AffineVectorLoadLowering : public OpRewritePattern<AffineVectorLoadOp> {
484public:
485 using OpRewritePattern<AffineVectorLoadOp>::OpRewritePattern;
486
487 LogicalResult matchAndRewrite(AffineVectorLoadOp op,
488 PatternRewriter &rewriter) const override {
489 // Expand affine map from 'affineVectorLoadOp'.
490 SmallVector<Value, 8> indices(op.getMapOperands());
491 auto resultOperands =
492 expandAffineMap(builder&: rewriter, loc: op.getLoc(), affineMap: op.getAffineMap(), operands: indices);
493 if (!resultOperands)
494 return failure();
495
496 // Build vector.load memref[expandedMap.results].
497 rewriter.replaceOpWithNewOp<vector::LoadOp>(
498 op, args: op.getVectorType(), args: op.getMemRef(), args&: *resultOperands);
499 return success();
500 }
501};
502
503/// Apply the affine map from an 'affine.vector_store' operation to its
504/// operands, and feed the results to a newly created 'vector.store' operation
505/// (which replaces the original 'affine.vector_store').
506class AffineVectorStoreLowering : public OpRewritePattern<AffineVectorStoreOp> {
507public:
508 using OpRewritePattern<AffineVectorStoreOp>::OpRewritePattern;
509
510 LogicalResult matchAndRewrite(AffineVectorStoreOp op,
511 PatternRewriter &rewriter) const override {
512 // Expand affine map from 'affineVectorStoreOp'.
513 SmallVector<Value, 8> indices(op.getMapOperands());
514 auto maybeExpandedMap =
515 expandAffineMap(builder&: rewriter, loc: op.getLoc(), affineMap: op.getAffineMap(), operands: indices);
516 if (!maybeExpandedMap)
517 return failure();
518
519 rewriter.replaceOpWithNewOp<vector::StoreOp>(
520 op, args: op.getValueToStore(), args: op.getMemRef(), args&: *maybeExpandedMap);
521 return success();
522 }
523};
524
525} // namespace
526
527void mlir::populateAffineToStdConversionPatterns(RewritePatternSet &patterns) {
528 // clang-format off
529 patterns.add<
530 AffineApplyLowering,
531 AffineDmaStartLowering,
532 AffineDmaWaitLowering,
533 AffineLoadLowering,
534 AffineMinLowering,
535 AffineMaxLowering,
536 AffineParallelLowering,
537 AffinePrefetchLowering,
538 AffineStoreLowering,
539 AffineForLowering,
540 AffineIfLowering,
541 AffineYieldOpLowering>(arg: patterns.getContext());
542 // clang-format on
543}
544
545void mlir::populateAffineToVectorConversionPatterns(
546 RewritePatternSet &patterns) {
547 // clang-format off
548 patterns.add<
549 AffineVectorLoadLowering,
550 AffineVectorStoreLowering>(arg: patterns.getContext());
551 // clang-format on
552}
553
554namespace {
555class LowerAffine : public impl::LowerAffinePassBase<LowerAffine> {
556 void runOnOperation() override {
557 RewritePatternSet patterns(&getContext());
558 populateAffineToStdConversionPatterns(patterns);
559 populateAffineToVectorConversionPatterns(patterns);
560 populateAffineExpandIndexOpsPatterns(patterns);
561 ConversionTarget target(getContext());
562 target.addLegalDialect<arith::ArithDialect, memref::MemRefDialect,
563 scf::SCFDialect, VectorDialect>();
564 if (failed(Result: applyPartialConversion(op: getOperation(), target,
565 patterns: std::move(patterns))))
566 signalPassFailure();
567 }
568};
569} // namespace
570

source code of mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp