1 | //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This implements a straightforward conversion of an loop nest into a GPU |
10 | // kernel. The caller is expected to guarantee that the conversion is correct |
11 | // or to further transform the kernel to ensure correctness. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "mlir/Conversion/SCFToGPU/SCFToGPU.h" |
16 | |
17 | #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" |
18 | #include "mlir/Dialect/Affine/IR/AffineOps.h" |
19 | #include "mlir/Dialect/Arith/IR/Arith.h" |
20 | #include "mlir/Dialect/GPU/IR/GPUDialect.h" |
21 | #include "mlir/Dialect/GPU/Transforms/ParallelLoopMapper.h" |
22 | #include "mlir/Dialect/MemRef/IR/MemRef.h" |
23 | #include "mlir/Dialect/SCF/IR/SCF.h" |
24 | #include "mlir/IR/AffineExpr.h" |
25 | #include "mlir/IR/Builders.h" |
26 | #include "mlir/IR/IRMapping.h" |
27 | #include "mlir/Interfaces/SideEffectInterfaces.h" |
28 | #include "mlir/Pass/Pass.h" |
29 | #include "mlir/Transforms/DialectConversion.h" |
30 | #include "mlir/Transforms/Passes.h" |
31 | #include "mlir/Transforms/RegionUtils.h" |
32 | #include "llvm/ADT/Sequence.h" |
33 | #include "llvm/Support/Debug.h" |
34 | #include <optional> |
35 | |
36 | #define DEBUG_TYPE "loops-to-gpu" |
37 | |
38 | using namespace mlir; |
39 | using namespace mlir::affine; |
40 | using namespace mlir::scf; |
41 | |
42 | // Name of internal attribute to mark visited operations during conversion. |
43 | // |
44 | // NOTE: The conversion originally used the following legality criteria: |
45 | // `!parallelOp->hasAttr(gpu::getMappingAttrName())` |
46 | // But the provided pattern might reject some cases based on more detailed |
47 | // analysis of the `mapping` attribute. |
48 | // To avoid dialect conversion failure due to non-converted illegal operation |
49 | // we use this extra Unit attribute as a marker, that the operation was checked |
50 | // by the pattern and is should be considered as legal in the following legality |
51 | // checks. The `finalizeParallelLoopToGPUConversion` function performs clean up |
52 | // of this extra attributes ans is supposed to be called after the dialect |
53 | // conversion. |
54 | // |
55 | // TODO: Implement a cleaner solution, factoring out the "matching" logic |
56 | // from the pattern and its callees into a separate function that can be called |
57 | // from both the pattern and the op legality check. |
58 | static constexpr StringLiteral kVisitedAttrName = "SCFToGPU_visited" ; |
59 | |
60 | // Extract an indexed value from KernelDim3. |
61 | static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) { |
62 | switch (pos) { |
63 | case 0: |
64 | return dim3.x; |
65 | case 1: |
66 | return dim3.y; |
67 | case 2: |
68 | return dim3.z; |
69 | default: |
70 | llvm_unreachable("dim3 position out of bounds" ); |
71 | } |
72 | return nullptr; |
73 | } |
74 | |
75 | // Get the lower bound-related operands of a loop operation. |
76 | static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) { |
77 | return forOp.getLowerBoundOperands(); |
78 | } |
79 | |
80 | // Get the upper bound-related operands of a loop operation. |
81 | static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) { |
82 | return forOp.getUpperBoundOperands(); |
83 | } |
84 | |
85 | // Get a Value that corresponds to the loop step. If the step is an attribute, |
86 | // materialize a corresponding constant using builder. |
87 | static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) { |
88 | return builder.create<arith::ConstantIndexOp>(forOp.getLoc(), |
89 | forOp.getStepAsInt()); |
90 | } |
91 | |
92 | // Get a Value for the loop lower bound. If the value requires computation, |
93 | // materialize the instructions using builder. |
94 | static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) { |
95 | return lowerAffineLowerBound(forOp, builder); |
96 | } |
97 | |
98 | // Get a Value for the loop upper bound. If the value requires computation, |
99 | // materialize the instructions using builder. |
100 | static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) { |
101 | return lowerAffineUpperBound(forOp, builder); |
102 | } |
103 | |
104 | // Check the structure of the loop nest: |
105 | // - there are enough loops to map to numDims; |
106 | // - the loops are perfectly nested; |
107 | // - the loop bounds can be computed above the outermost loop. |
108 | // This roughly corresponds to the "matcher" part of the pattern-based |
109 | // rewriting infrastructure. |
110 | static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp, |
111 | unsigned numDims) { |
112 | Region &limit = forOp.getRegion(); |
113 | for (unsigned i = 0, e = numDims; i < e; ++i) { |
114 | Operation *nested = &forOp.getBody()->front(); |
115 | if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) || |
116 | !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit)) |
117 | return forOp.emitError( |
118 | "loops with bounds depending on other mapped loops " |
119 | "are not supported" ); |
120 | |
121 | // The innermost loop can have an arbitrary body, skip the perfect nesting |
122 | // check for it. |
123 | if (i == e - 1) |
124 | break; |
125 | |
126 | auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end(); |
127 | if (forOp.getBody()->empty() || std::next(begin, 2) != end) |
128 | return forOp.emitError("expected perfectly nested loops in the body" ); |
129 | |
130 | if (!(forOp = dyn_cast<AffineForOp>(nested))) |
131 | return nested->emitError(message: "expected a nested loop" ); |
132 | } |
133 | return success(); |
134 | } |
135 | |
136 | static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp, |
137 | unsigned numBlockDims, |
138 | unsigned numThreadDims) { |
139 | if (numBlockDims < 1 || numThreadDims < 1) { |
140 | LLVM_DEBUG(llvm::dbgs() << "nothing to map" ); |
141 | return success(); |
142 | } |
143 | |
144 | if (numBlockDims > 3) { |
145 | return forOp.emitError("cannot map to more than 3 block dimensions" ); |
146 | } |
147 | if (numThreadDims > 3) { |
148 | return forOp.emitError("cannot map to more than 3 thread dimensions" ); |
149 | } |
150 | return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims); |
151 | } |
152 | |
153 | namespace { |
154 | // Helper structure that holds common state of the loop to GPU kernel |
155 | // conversion. |
156 | struct AffineLoopToGpuConverter { |
157 | std::optional<AffineForOp> collectBounds(AffineForOp forOp, |
158 | unsigned numLoops); |
159 | |
160 | void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp, |
161 | unsigned numBlockDims, unsigned numThreadDims); |
162 | |
163 | // Ranges of the loops mapped to blocks or threads. |
164 | SmallVector<Value, 6> dims; |
165 | // Lower bounds of the loops mapped to blocks or threads. |
166 | SmallVector<Value, 6> lbs; |
167 | // Induction variables of the loops mapped to blocks or threads. |
168 | SmallVector<Value, 6> ivs; |
169 | // Steps of the loops mapped to blocks or threads. |
170 | SmallVector<Value, 6> steps; |
171 | }; |
172 | } // namespace |
173 | |
174 | // Collect ranges, bounds, steps and induction variables in preparation for |
175 | // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel. |
176 | // This may fail if the IR for computing loop bounds cannot be constructed, for |
177 | // example if an affine loop uses semi-affine maps. Return the last loop to be |
178 | // mapped on success, std::nullopt on failure. |
179 | std::optional<AffineForOp> |
180 | AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) { |
181 | OpBuilder builder(forOp.getOperation()); |
182 | dims.reserve(N: numLoops); |
183 | lbs.reserve(N: numLoops); |
184 | ivs.reserve(N: numLoops); |
185 | steps.reserve(N: numLoops); |
186 | AffineForOp currentLoop = forOp; |
187 | for (unsigned i = 0; i < numLoops; ++i) { |
188 | Value lowerBound = getOrEmitLowerBound(currentLoop, builder); |
189 | Value upperBound = getOrEmitUpperBound(currentLoop, builder); |
190 | if (!lowerBound || !upperBound) { |
191 | return std::nullopt; |
192 | } |
193 | |
194 | Value range = builder.create<arith::SubIOp>(currentLoop.getLoc(), |
195 | upperBound, lowerBound); |
196 | Value step = getOrCreateStep(currentLoop, builder); |
197 | if (getConstantIntValue(step) != static_cast<int64_t>(1)) |
198 | range = |
199 | builder.create<arith::CeilDivSIOp>(currentLoop.getLoc(), range, step); |
200 | dims.push_back(Elt: range); |
201 | |
202 | lbs.push_back(Elt: lowerBound); |
203 | ivs.push_back(Elt: currentLoop.getInductionVar()); |
204 | steps.push_back(Elt: step); |
205 | |
206 | if (i != numLoops - 1) |
207 | currentLoop = cast<AffineForOp>(¤tLoop.getBody()->front()); |
208 | } |
209 | return currentLoop; |
210 | } |
211 | |
212 | // Replace the rooted at "rootForOp" with a GPU launch operation. This expects |
213 | // "innermostForOp" to point to the last loop to be transformed to the kernel, |
214 | // and to have (numBlockDims + numThreadDims) perfectly nested loops between |
215 | // "rootForOp" and "innermostForOp". |
216 | void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp, |
217 | AffineForOp innermostForOp, |
218 | unsigned numBlockDims, |
219 | unsigned numThreadDims) { |
220 | OpBuilder builder(rootForOp.getOperation()); |
221 | // Prepare the grid and block sizes for the launch operation. If there is |
222 | // no loop mapped to a specific dimension, use constant "1" as its size. |
223 | Value constOne = |
224 | (numBlockDims < 3 || numThreadDims < 3) |
225 | ? builder.create<arith::ConstantIndexOp>(rootForOp.getLoc(), 1) |
226 | : nullptr; |
227 | Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne; |
228 | Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne; |
229 | Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne; |
230 | Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne; |
231 | Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne; |
232 | Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne; |
233 | |
234 | // Create a launch op and move the body region of the innermost loop to the |
235 | // launch op. |
236 | auto launchOp = builder.create<gpu::LaunchOp>( |
237 | rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX, |
238 | blockSizeY, blockSizeZ); |
239 | |
240 | // Replace the loop terminator (loops contain only a single block) with the |
241 | // gpu terminator and move the operations from the loop body block to the gpu |
242 | // launch body block. Do not move the entire block because of the difference |
243 | // in block arguments. |
244 | Operation &terminator = innermostForOp.getBody()->back(); |
245 | Location terminatorLoc = terminator.getLoc(); |
246 | terminator.erase(); |
247 | builder.setInsertionPointToEnd(innermostForOp.getBody()); |
248 | builder.create<gpu::TerminatorOp>(terminatorLoc, std::nullopt); |
249 | launchOp.getBody().front().getOperations().splice( |
250 | launchOp.getBody().front().begin(), |
251 | innermostForOp.getBody()->getOperations()); |
252 | |
253 | // Remap the loop iterators to use block/thread identifiers instead. Loops |
254 | // may iterate from LB with step S whereas GPU thread/block ids always iterate |
255 | // from 0 to N with step 1. Therefore, loop induction variables are replaced |
256 | // with (gpu-thread/block-id * S) + LB. |
257 | builder.setInsertionPointToStart(&launchOp.getBody().front()); |
258 | auto *lbArgumentIt = lbs.begin(); |
259 | auto *stepArgumentIt = steps.begin(); |
260 | for (const auto &en : llvm::enumerate(First&: ivs)) { |
261 | Value id = |
262 | en.index() < numBlockDims |
263 | ? getDim3Value(launchOp.getBlockIds(), en.index()) |
264 | : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims); |
265 | Value step = steps[en.index()]; |
266 | if (getConstantIntValue(step) != static_cast<int64_t>(1)) |
267 | id = builder.create<arith::MulIOp>(rootForOp.getLoc(), step, id); |
268 | |
269 | Value ivReplacement = |
270 | builder.create<arith::AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id); |
271 | en.value().replaceAllUsesWith(newValue: ivReplacement); |
272 | std::advance(i&: lbArgumentIt, n: 1); |
273 | std::advance(i&: stepArgumentIt, n: 1); |
274 | } |
275 | |
276 | // We are done and can erase the original outermost loop. |
277 | rootForOp.erase(); |
278 | } |
279 | |
280 | // Generic loop to GPU kernel conversion function. |
281 | static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp, |
282 | unsigned numBlockDims, |
283 | unsigned numThreadDims) { |
284 | if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims))) |
285 | return failure(); |
286 | |
287 | AffineLoopToGpuConverter converter; |
288 | auto maybeInnerLoop = |
289 | converter.collectBounds(forOp, numBlockDims + numThreadDims); |
290 | if (!maybeInnerLoop) |
291 | return failure(); |
292 | converter.createLaunch(rootForOp: forOp, innermostForOp: *maybeInnerLoop, numBlockDims, numThreadDims); |
293 | |
294 | return success(); |
295 | } |
296 | |
297 | LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp, |
298 | unsigned numBlockDims, |
299 | unsigned numThreadDims) { |
300 | return ::convertAffineLoopNestToGPULaunch(forOp: forOp, numBlockDims, numThreadDims); |
301 | } |
302 | |
303 | namespace { |
304 | struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> { |
305 | using OpRewritePattern<ParallelOp>::OpRewritePattern; |
306 | |
307 | LogicalResult matchAndRewrite(ParallelOp parallelOp, |
308 | PatternRewriter &rewriter) const override; |
309 | }; |
310 | } // namespace |
311 | |
312 | /// Tries to derive a static upper bound from the defining operation of |
313 | /// `upperBound`. |
314 | static Value deriveStaticUpperBound(Value upperBound, |
315 | PatternRewriter &rewriter) { |
316 | if (auto op = upperBound.getDefiningOp<arith::ConstantIndexOp>()) { |
317 | return op; |
318 | } |
319 | |
320 | if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) { |
321 | for (const AffineExpr &result : minOp.getMap().getResults()) { |
322 | if (auto constExpr = dyn_cast<AffineConstantExpr>(result)) { |
323 | return rewriter.create<arith::ConstantIndexOp>(minOp.getLoc(), |
324 | constExpr.getValue()); |
325 | } |
326 | } |
327 | } |
328 | |
329 | if (auto minOp = upperBound.getDefiningOp<arith::MinSIOp>()) { |
330 | for (Value operand : {minOp.getLhs(), minOp.getRhs()}) { |
331 | if (auto staticBound = deriveStaticUpperBound(operand, rewriter)) |
332 | return staticBound; |
333 | } |
334 | } |
335 | |
336 | if (auto multiplyOp = upperBound.getDefiningOp<arith::MulIOp>()) { |
337 | if (auto lhs = dyn_cast_or_null<arith::ConstantIndexOp>( |
338 | deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter) |
339 | .getDefiningOp())) |
340 | if (auto rhs = dyn_cast_or_null<arith::ConstantIndexOp>( |
341 | deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter) |
342 | .getDefiningOp())) { |
343 | // Assumptions about the upper bound of minimum computations no longer |
344 | // work if multiplied by mixed signs, so abort in this case. |
345 | if ((lhs.value() < 0) != (rhs.value() < 0)) |
346 | return {}; |
347 | |
348 | return rewriter.create<arith::ConstantIndexOp>( |
349 | multiplyOp.getLoc(), lhs.value() * rhs.value()); |
350 | } |
351 | } |
352 | |
353 | return {}; |
354 | } |
355 | |
356 | static bool isMappedToProcessor(gpu::Processor processor) { |
357 | return processor != gpu::Processor::Sequential; |
358 | } |
359 | |
360 | static unsigned getLaunchOpArgumentNum(gpu::Processor processor) { |
361 | switch (processor) { |
362 | case gpu::Processor::BlockX: |
363 | return 0; |
364 | case gpu::Processor::BlockY: |
365 | return 1; |
366 | case gpu::Processor::BlockZ: |
367 | return 2; |
368 | case gpu::Processor::ThreadX: |
369 | return 3; |
370 | case gpu::Processor::ThreadY: |
371 | return 4; |
372 | case gpu::Processor::ThreadZ: |
373 | return 5; |
374 | default:; |
375 | } |
376 | llvm_unreachable( |
377 | "invalid processor type while retrieving launch op argument number" ); |
378 | } |
379 | |
380 | /// Modifies the current transformation state to capture the effect of the given |
381 | /// `scf.parallel` operation on index substitutions and the operations to be |
382 | /// inserted. |
383 | /// Specifically, if a dimension of a parallel loop is mapped to a hardware id, |
384 | /// this function will |
385 | /// - compute the loop index based on the hardware id and affine map from the |
386 | /// mapping and update `cloningMap` to substitute all uses. |
387 | /// - derive a new upper bound for the hardware id and augment the provided |
388 | /// `gpu.launch operation` accordingly. |
389 | /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch` |
390 | /// and update the rewriter to insert into the conditional's body. |
391 | /// If the dimension is mapped to sequential, |
392 | /// - insert a for loop into the body and update the rewriter to insert into |
393 | /// the for loop's body. |
394 | /// - update the `cloningMap` to replace uses of the index with the index of |
395 | /// the new for loop. |
396 | /// In either case, |
397 | /// - append the instructions from the loops body to worklist, in reverse order. |
398 | /// To note the end of the current scope in case a loop or conditional was |
399 | /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the |
400 | /// worklist. This signals the processor of the worklist to pop the rewriter |
401 | /// one scope-level up. |
402 | static LogicalResult processParallelLoop( |
403 | ParallelOp parallelOp, gpu::LaunchOp launchOp, IRMapping &cloningMap, |
404 | SmallVectorImpl<Operation *> &worklist, |
405 | DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) { |
406 | // TODO: Verify that this is a valid GPU mapping. |
407 | // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential |
408 | ArrayAttr mapping = |
409 | parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName()); |
410 | |
411 | // TODO: Support reductions. |
412 | if (!mapping || parallelOp.getNumResults() != 0) |
413 | return failure(); |
414 | |
415 | Location loc = parallelOp.getLoc(); |
416 | |
417 | auto launchIndependent = [&launchOp](Value val) { |
418 | return val.getParentRegion()->isAncestor(launchOp->getParentRegion()); |
419 | }; |
420 | |
421 | auto ensureLaunchIndependent = [&rewriter, |
422 | launchIndependent](Value val) -> Value { |
423 | if (launchIndependent(val)) |
424 | return val; |
425 | if (auto constOp = val.getDefiningOp<arith::ConstantOp>()) |
426 | return rewriter.create<arith::ConstantOp>(constOp.getLoc(), |
427 | constOp.getValue()); |
428 | return {}; |
429 | }; |
430 | |
431 | for (auto config : llvm::zip( |
432 | mapping, parallelOp.getInductionVars(), parallelOp.getLowerBound(), |
433 | parallelOp.getUpperBound(), parallelOp.getStep())) { |
434 | Attribute mappingAttribute; |
435 | Value iv, lowerBound, upperBound, step; |
436 | std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config; |
437 | auto annotation = |
438 | dyn_cast<gpu::ParallelLoopDimMappingAttr>(mappingAttribute); |
439 | if (!annotation) |
440 | return parallelOp.emitOpError() |
441 | << "expected mapping attribute for lowering to GPU" ; |
442 | Value newIndex; |
443 | gpu::Processor processor = annotation.getProcessor(); |
444 | |
445 | if (isMappedToProcessor(processor)) { |
446 | // Use the corresponding thread/grid index as replacement for the loop iv. |
447 | Value operand = |
448 | launchOp.getBody().getArgument(getLaunchOpArgumentNum(processor)); |
449 | // Take the indexmap and add the lower bound and step computations in. |
450 | // This computes operand * step + lowerBound. |
451 | // Use an affine map here so that it composes nicely with the provided |
452 | // annotation. |
453 | AffineMap lowerAndStep = AffineMap::get( |
454 | 1, 2, |
455 | rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) + |
456 | rewriter.getAffineSymbolExpr(1)); |
457 | newIndex = rewriter.create<AffineApplyOp>( |
458 | loc, annotation.getMap().compose(lowerAndStep), |
459 | ValueRange{operand, ensureLaunchIndependent(step), |
460 | ensureLaunchIndependent(lowerBound)}); |
461 | // If there was also a bound, insert that, too. |
462 | // TODO: Check that we do not assign bounds twice. |
463 | if (annotation.getBound()) { |
464 | // We pass as the single operand to the bound-map the number of |
465 | // iterations, which is (upperBound - lowerBound) ceilDiv step. To |
466 | // support inner loops with dynamic upper bounds (as generated by e.g. |
467 | // tiling), try to derive a max for the bounds. If the used bound for |
468 | // the hardware id is imprecise, wrap the contained code into a |
469 | // conditional. If the lower-bound is constant or defined before the |
470 | // launch, we can use it in the launch bounds. Otherwise fail. |
471 | if (!launchIndependent(lowerBound) && |
472 | !isa_and_nonnull<arith::ConstantOp>(lowerBound.getDefiningOp())) |
473 | return failure(); |
474 | // The step must also be constant or defined outside of the loop nest. |
475 | if (!launchIndependent(step) && |
476 | !isa_and_nonnull<arith::ConstantOp>(step.getDefiningOp())) |
477 | return failure(); |
478 | // If the upper-bound is constant or defined before the launch, we can |
479 | // use it in the launch bounds directly. Otherwise try derive a bound. |
480 | bool boundIsPrecise = |
481 | launchIndependent(upperBound) || |
482 | isa_and_nonnull<arith::ConstantOp>(upperBound.getDefiningOp()); |
483 | { |
484 | PatternRewriter::InsertionGuard guard(rewriter); |
485 | rewriter.setInsertionPoint(launchOp); |
486 | if (!boundIsPrecise) { |
487 | upperBound = deriveStaticUpperBound(upperBound, rewriter); |
488 | if (!upperBound) { |
489 | return rewriter.notifyMatchFailure( |
490 | parallelOp, |
491 | "cannot derive loop-invariant upper bound for number of" |
492 | "iterations" ); |
493 | } |
494 | } |
495 | // Compute the number of iterations needed. We compute this as an |
496 | // affine expression ceilDiv (upperBound - lowerBound) step. We use |
497 | // affine.apply here so that it composes nicely with the provided map. |
498 | AffineMap stepMap = AffineMap::get( |
499 | 1, 2, |
500 | ((rewriter.getAffineDimExpr(0) - rewriter.getAffineSymbolExpr(0)) |
501 | .ceilDiv(rewriter.getAffineSymbolExpr(1)))); |
502 | Value launchBound = rewriter.create<AffineApplyOp>( |
503 | loc, annotation.getBound().compose(stepMap), |
504 | ValueRange{ |
505 | ensureLaunchIndependent( |
506 | cloningMap.lookupOrDefault(upperBound)), |
507 | ensureLaunchIndependent( |
508 | cloningMap.lookupOrDefault(lowerBound)), |
509 | ensureLaunchIndependent(cloningMap.lookupOrDefault(step))}); |
510 | // todo(herhut,ravishankarm): Update the behavior of setMappingAttr |
511 | // when this condition is relaxed. |
512 | if (bounds.contains(processor)) { |
513 | return rewriter.notifyMatchFailure( |
514 | parallelOp, "cannot redefine the bound for processor " + |
515 | Twine(static_cast<int64_t>(processor))); |
516 | } |
517 | bounds[processor] = launchBound; |
518 | } |
519 | if (!boundIsPrecise) { |
520 | // We are using an approximation, create a surrounding conditional. |
521 | Value originalBound = std::get<3>(config); |
522 | arith::CmpIOp pred = rewriter.create<arith::CmpIOp>( |
523 | loc, arith::CmpIPredicate::slt, newIndex, |
524 | cloningMap.lookupOrDefault(originalBound)); |
525 | scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false); |
526 | rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); |
527 | // Put a sentinel into the worklist so we know when to pop out of the |
528 | // if body again. We use the launchOp here, as that cannot be part of |
529 | // the bodies instruction. |
530 | worklist.push_back(launchOp.getOperation()); |
531 | } |
532 | } |
533 | } else { |
534 | // Create a sequential for loop. |
535 | auto loopOp = rewriter.create<scf::ForOp>( |
536 | loc, cloningMap.lookupOrDefault(lowerBound), |
537 | cloningMap.lookupOrDefault(upperBound), |
538 | cloningMap.lookupOrDefault(step)); |
539 | newIndex = loopOp.getInductionVar(); |
540 | rewriter.setInsertionPointToStart(loopOp.getBody()); |
541 | // Put a sentinel into the worklist so we know when to pop out of the loop |
542 | // body again. We use the launchOp here, as that cannot be part of the |
543 | // bodies instruction. |
544 | worklist.push_back(launchOp.getOperation()); |
545 | } |
546 | cloningMap.map(iv, newIndex); |
547 | } |
548 | |
549 | // Propagate custom user defined optional attributes, that can be used at |
550 | // later stage, such as extension data for GPU kernel dispatch |
551 | for (const auto &namedAttr : parallelOp->getAttrs()) { |
552 | if (namedAttr.getName() == gpu::getMappingAttrName() || |
553 | namedAttr.getName() == ParallelOp::getOperandSegmentSizeAttr()) |
554 | continue; |
555 | launchOp->setAttr(namedAttr.getName(), namedAttr.getValue()); |
556 | } |
557 | |
558 | Block *body = parallelOp.getBody(); |
559 | worklist.reserve(N: worklist.size() + body->getOperations().size()); |
560 | for (Operation &op : llvm::reverse(body->without_terminator())) |
561 | worklist.push_back(&op); |
562 | return success(); |
563 | } |
564 | |
565 | /// Lower a `scf.parallel` operation into a corresponding `gpu.launch` |
566 | /// operation. |
567 | /// |
568 | /// This essentially transforms a loop nest into a corresponding SIMT function. |
569 | /// The conversion is driven by mapping annotations on the `scf.parallel` |
570 | /// operations. The mapping is provided via a `DictionaryAttribute` named |
571 | /// `mapping`, which has three entries: |
572 | /// - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are |
573 | /// thread dimensions and 6 is sequential. |
574 | /// - map : An affine map that is used to pre-process hardware ids before |
575 | /// substitution. |
576 | /// - bound : An affine map that is used to compute the bound of the hardware |
577 | /// id based on an upper bound of the number of iterations. |
578 | /// If the `scf.parallel` contains nested `scf.parallel` operations, those |
579 | /// need to be annotated, as well. Structurally, the transformation works by |
580 | /// splicing all operations from nested `scf.parallel` operations into a single |
581 | /// sequence. Indices mapped to hardware ids are substituted with those ids, |
582 | /// wheras sequential mappings result in a sequential for-loop. To have more |
583 | /// flexibility when mapping code to hardware ids, the transform supports two |
584 | /// affine maps. The first `map` is used to compute the actual index for |
585 | /// substitution from the hardware id. The second `bound` is used to compute the |
586 | /// launch dimension for the hardware id from the number of iterations the |
587 | /// mapped loop is performing. Note that the number of iterations might be |
588 | /// imprecise if the corresponding loop-bounds are loop-dependent. In such case, |
589 | /// the hardware id might iterate over additional indices. The transformation |
590 | /// caters for this by predicating the created sequence of instructions on |
591 | /// the actual loop bound. This only works if an static upper bound for the |
592 | /// dynamic loop bound can be derived, currently via analyzing `affine.min` |
593 | /// operations. |
594 | LogicalResult |
595 | ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp, |
596 | PatternRewriter &rewriter) const { |
597 | // Mark the operation as visited for recursive legality check. |
598 | parallelOp->setAttr(kVisitedAttrName, rewriter.getUnitAttr()); |
599 | |
600 | // We can only transform starting at the outer-most loop. Launches inside of |
601 | // parallel loops are not supported. |
602 | if (auto parentLoop = parallelOp->getParentOfType<ParallelOp>()) |
603 | return failure(); |
604 | // Create a launch operation. We start with bound one for all grid/block |
605 | // sizes. Those will be refined later as we discover them from mappings. |
606 | Location loc = parallelOp.getLoc(); |
607 | Value constantOne = |
608 | rewriter.create<arith::ConstantIndexOp>(parallelOp.getLoc(), 1); |
609 | gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>( |
610 | parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne, |
611 | constantOne, constantOne); |
612 | rewriter.setInsertionPointToEnd(&launchOp.getBody().front()); |
613 | rewriter.create<gpu::TerminatorOp>(loc); |
614 | rewriter.setInsertionPointToStart(&launchOp.getBody().front()); |
615 | |
616 | IRMapping cloningMap; |
617 | llvm::DenseMap<gpu::Processor, Value> launchBounds; |
618 | SmallVector<Operation *, 16> worklist; |
619 | if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist, |
620 | launchBounds, rewriter))) |
621 | return failure(); |
622 | |
623 | // Whether we have seen any side-effects. Reset when leaving an inner scope. |
624 | bool seenSideeffects = false; |
625 | // Whether we have left a nesting scope (and hence are no longer innermost). |
626 | bool leftNestingScope = false; |
627 | while (!worklist.empty()) { |
628 | Operation *op = worklist.pop_back_val(); |
629 | // Now walk over the body and clone it. |
630 | // TODO: This is only correct if there either is no further scf.parallel |
631 | // nested or this code is side-effect free. Otherwise we might need |
632 | // predication. We are overly conservative for now and only allow |
633 | // side-effects in the innermost scope. |
634 | if (auto nestedParallel = dyn_cast<ParallelOp>(op)) { |
635 | // Before entering a nested scope, make sure there have been no |
636 | // sideeffects until now. |
637 | if (seenSideeffects) |
638 | return failure(); |
639 | // A nested scf.parallel needs insertion of code to compute indices. |
640 | // Insert that now. This will also update the worklist with the loops |
641 | // body. |
642 | if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap, |
643 | worklist, launchBounds, rewriter))) |
644 | return failure(); |
645 | } else if (op == launchOp.getOperation()) { |
646 | // Found our sentinel value. We have finished the operations from one |
647 | // nesting level, pop one level back up. |
648 | auto *parent = rewriter.getInsertionPoint()->getParentOp(); |
649 | rewriter.setInsertionPointAfter(parent); |
650 | leftNestingScope = true; |
651 | seenSideeffects = false; |
652 | } else { |
653 | // Otherwise we copy it over. |
654 | Operation *clone = rewriter.clone(op&: *op, mapper&: cloningMap); |
655 | cloningMap.map(from: op->getResults(), to: clone->getResults()); |
656 | // Check for side effects. |
657 | // TODO: Handle region side effects properly. |
658 | seenSideeffects |= |
659 | !isMemoryEffectFree(op: clone) || clone->getNumRegions() != 0; |
660 | // If we are no longer in the innermost scope, sideeffects are disallowed. |
661 | if (seenSideeffects && leftNestingScope) |
662 | return failure(); |
663 | } |
664 | } |
665 | |
666 | // Now that we succeeded creating the launch operation, also update the |
667 | // bounds. |
668 | for (auto bound : launchBounds) |
669 | launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)), |
670 | std::get<1>(bound)); |
671 | |
672 | rewriter.eraseOp(op: parallelOp); |
673 | return success(); |
674 | } |
675 | |
676 | void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) { |
677 | patterns.add<ParallelToGpuLaunchLowering>(arg: patterns.getContext()); |
678 | } |
679 | |
680 | void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) { |
681 | target.addLegalDialect<memref::MemRefDialect>(); |
682 | target.addDynamicallyLegalOp<scf::ParallelOp>(callback: [](scf::ParallelOp parallelOp) { |
683 | return !parallelOp->hasAttr(gpu::getMappingAttrName()) || |
684 | parallelOp->hasAttr(kVisitedAttrName); |
685 | }); |
686 | } |
687 | |
688 | void mlir::finalizeParallelLoopToGPUConversion(Operation *op) { |
689 | op->walk([](scf::ParallelOp parallelOp) { |
690 | parallelOp->removeAttr(kVisitedAttrName); |
691 | }); |
692 | } |
693 | |