1 | //===- TestAffineDataCopy.cpp - Test affine data copy utility -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements a pass to test affine data copy utility functions and |
10 | // options. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "mlir/Dialect/Affine/Analysis/Utils.h" |
15 | #include "mlir/Dialect/Affine/IR/AffineOps.h" |
16 | #include "mlir/Dialect/Affine/LoopUtils.h" |
17 | #include "mlir/Dialect/Func/IR/FuncOps.h" |
18 | #include "mlir/Dialect/MemRef/IR/MemRef.h" |
19 | #include "mlir/Pass/Pass.h" |
20 | #include "mlir/Transforms/GreedyPatternRewriteDriver.h" |
21 | #include "mlir/Transforms/Passes.h" |
22 | |
23 | #define PASS_NAME "test-affine-data-copy" |
24 | |
25 | using namespace mlir; |
26 | using namespace mlir::affine; |
27 | |
28 | namespace { |
29 | |
30 | struct TestAffineDataCopy |
31 | : public PassWrapper<TestAffineDataCopy, OperationPass<func::FuncOp>> { |
32 | MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAffineDataCopy) |
33 | |
34 | StringRef getArgument() const final { return PASS_NAME; } |
35 | StringRef getDescription() const final { |
36 | return "Tests affine data copy utility functions." ; |
37 | } |
38 | TestAffineDataCopy() = default; |
39 | TestAffineDataCopy(const TestAffineDataCopy &pass) : PassWrapper(pass){}; |
40 | |
41 | void getDependentDialects(DialectRegistry ®istry) const override { |
42 | registry.insert<memref::MemRefDialect>(); |
43 | } |
44 | void runOnOperation() override; |
45 | |
46 | private: |
47 | Option<bool> clMemRefFilter{ |
48 | *this, "memref-filter" , |
49 | llvm::cl::desc( |
50 | "Enable memref filter testing in affine data copy optimization" ), |
51 | llvm::cl::init(Val: false)}; |
52 | Option<bool> clTestGenerateCopyForMemRegion{ |
53 | *this, "for-memref-region" , |
54 | llvm::cl::desc("Test copy generation for a single memref region" ), |
55 | llvm::cl::init(Val: false)}; |
56 | }; |
57 | |
58 | } // namespace |
59 | |
60 | void TestAffineDataCopy::runOnOperation() { |
61 | // Gather all AffineForOps by loop depth. |
62 | std::vector<SmallVector<AffineForOp, 2>> depthToLoops; |
63 | gatherLoops(getOperation(), depthToLoops); |
64 | if (depthToLoops.empty()) |
65 | return; |
66 | |
67 | // Only support tests with a single loop nest and a single innermost loop |
68 | // for now. |
69 | unsigned innermostLoopIdx = depthToLoops.size() - 1; |
70 | if (depthToLoops[0].size() != 1 || depthToLoops[innermostLoopIdx].size() != 1) |
71 | return; |
72 | |
73 | auto loopNest = depthToLoops[0][0]; |
74 | auto innermostLoop = depthToLoops[innermostLoopIdx][0]; |
75 | AffineLoadOp load; |
76 | if (clMemRefFilter || clTestGenerateCopyForMemRegion) { |
77 | // Gather MemRef filter. For simplicity, we use the first loaded memref |
78 | // found in the innermost loop. |
79 | for (auto &op : *innermostLoop.getBody()) { |
80 | if (auto ld = dyn_cast<AffineLoadOp>(op)) { |
81 | load = ld; |
82 | break; |
83 | } |
84 | } |
85 | } |
86 | if (!load) |
87 | return; |
88 | |
89 | AffineCopyOptions copyOptions = {/*generateDma=*/false, |
90 | /*slowMemorySpace=*/0, |
91 | /*fastMemorySpace=*/0, |
92 | /*tagMemorySpace=*/0, |
93 | /*fastMemCapacityBytes=*/32 * 1024 * 1024UL}; |
94 | DenseSet<Operation *> copyNests; |
95 | if (clMemRefFilter) { |
96 | if (failed(affineDataCopyGenerate(loopNest, copyOptions, load.getMemRef(), |
97 | copyNests))) |
98 | return; |
99 | } else if (clTestGenerateCopyForMemRegion) { |
100 | CopyGenerateResult result; |
101 | MemRefRegion region(loopNest.getLoc()); |
102 | if (failed(region.compute(op: load, /*loopDepth=*/0))) |
103 | return; |
104 | if (failed(generateCopyForMemRegion(region, loopNest, copyOptions, result))) |
105 | return; |
106 | } |
107 | |
108 | // Promote any single iteration loops in the copy nests and simplify |
109 | // load/stores. |
110 | SmallVector<Operation *, 4> copyOps; |
111 | for (Operation *nest : copyNests) { |
112 | // With a post order walk, the erasure of loops does not affect |
113 | // continuation of the walk or the collection of load/store ops. |
114 | nest->walk(callback: [&](Operation *op) { |
115 | if (auto forOp = dyn_cast<AffineForOp>(op)) |
116 | (void)promoteIfSingleIteration(forOp); |
117 | else if (auto loadOp = dyn_cast<AffineLoadOp>(op)) |
118 | copyOps.push_back(Elt: loadOp); |
119 | else if (auto storeOp = dyn_cast<AffineStoreOp>(op)) |
120 | copyOps.push_back(Elt: storeOp); |
121 | }); |
122 | } |
123 | |
124 | // Promoting single iteration loops could lead to simplification of |
125 | // generated load's/store's, and the latter could anyway also be |
126 | // canonicalized. |
127 | RewritePatternSet patterns(&getContext()); |
128 | for (Operation *op : copyOps) { |
129 | patterns.clear(); |
130 | if (isa<AffineLoadOp>(op)) { |
131 | AffineLoadOp::getCanonicalizationPatterns(patterns, &getContext()); |
132 | } else { |
133 | assert(isa<AffineStoreOp>(op) && "expected affine store op" ); |
134 | AffineStoreOp::getCanonicalizationPatterns(patterns, &getContext()); |
135 | } |
136 | } |
137 | GreedyRewriteConfig config; |
138 | config.strictMode = GreedyRewriteStrictness::ExistingAndNewOps; |
139 | (void)applyOpPatternsAndFold(copyOps, std::move(patterns), config); |
140 | } |
141 | |
142 | namespace mlir { |
143 | void registerTestAffineDataCopyPass() { |
144 | PassRegistration<TestAffineDataCopy>(); |
145 | } |
146 | } // namespace mlir |
147 | |