1//===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the linalg dialect Promotion pass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "mlir/Dialect/Arith/IR/Arith.h"
14#include "mlir/Dialect/Arith/Utils/Utils.h"
15#include "mlir/Dialect/Complex/IR/Complex.h"
16#include "mlir/Dialect/Func/IR/FuncOps.h"
17#include "mlir/Dialect/GPU/IR/GPUDialect.h"
18#include "mlir/Dialect/Linalg/IR/Linalg.h"
19#include "mlir/Dialect/Linalg/Passes.h"
20#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
21#include "mlir/Dialect/SCF/IR/SCF.h"
22#include "mlir/IR/AffineExpr.h"
23#include "mlir/IR/AffineExprVisitor.h"
24#include "mlir/IR/AffineMap.h"
25#include "mlir/IR/ImplicitLocOpBuilder.h"
26#include "mlir/Interfaces/ValueBoundsOpInterface.h"
27#include "mlir/Support/LLVM.h"
28#include "mlir/Transforms/FoldUtils.h"
29#include "llvm/ADT/MapVector.h"
30#include "llvm/ADT/SmallBitVector.h"
31#include "llvm/ADT/SmallSet.h"
32#include "llvm/ADT/TypeSwitch.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35
36using namespace mlir;
37using namespace mlir::linalg;
38using namespace mlir::scf;
39
40using llvm::MapVector;
41
42#define DEBUG_TYPE "linalg-promotion"
43
44/// Alloc a new buffer of `size` * `width` i8; where `width` is given by the
45/// data `layout` for `elementType`.
46/// Use AllocOp or AllocaOp depending on `options`.
47/// Take an optional alignment.
48static Value allocBuffer(ImplicitLocOpBuilder &b,
49 const LinalgPromotionOptions &options,
50 Type elementType, Value allocSize, DataLayout &layout,
51 std::optional<unsigned> alignment = std::nullopt) {
52 llvm::TypeSize width = layout.getTypeSize(t: elementType);
53 assert(!width.isScalable() && "cannot allocate buffer for a scalable vector");
54
55 IntegerAttr alignmentAttr;
56 if (alignment.has_value())
57 alignmentAttr = b.getI64IntegerAttr(alignment.value());
58
59 Attribute memorySpaceAttr;
60 if (options.memorySpace.has_value())
61 memorySpaceAttr = *options.memorySpace;
62
63 // Static buffer.
64 if (std::optional<int64_t> cst = getConstantIntValue(ofr: allocSize)) {
65 auto staticBufferType = MemRefType::get(width.getFixedValue() * cst.value(),
66 b.getIntegerType(8));
67 staticBufferType =
68 MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr);
69 if (options.useAlloca) {
70 return b.create<memref::AllocaOp>(staticBufferType, ValueRange{},
71 alignmentAttr);
72 }
73 return b.create<memref::AllocOp>(staticBufferType, ValueRange{},
74 alignmentAttr);
75 }
76
77 // Fallback dynamic buffer.
78 auto dynamicBufferType =
79 MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
80 dynamicBufferType =
81 MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr);
82 Value mul = b.createOrFold<arith::MulIOp>(
83 b.create<arith::ConstantIndexOp>(width), allocSize);
84 if (options.useAlloca)
85 return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr);
86 return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr);
87}
88
89/// Default allocation callback function. This allocates a promoted buffer when
90/// no call back to do so is provided. The default is to allocate a
91/// memref<..xi8> and return a view to get a memref type of shape
92/// boundingSubViewSize.
93static std::optional<Value> defaultAllocBufferCallBack(
94 const LinalgPromotionOptions &options, OpBuilder &builder,
95 memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
96 std::optional<unsigned> alignment, DataLayout &layout) {
97 ShapedType viewType = subView.getType();
98 ImplicitLocOpBuilder b(subView.getLoc(), builder);
99 auto zero = b.create<arith::ConstantIndexOp>(args: 0);
100 auto one = b.create<arith::ConstantIndexOp>(args: 1);
101
102 Attribute memorySpaceAttr;
103 if (options.memorySpace.has_value())
104 memorySpaceAttr = *options.memorySpace;
105
106 Value allocSize = one;
107 for (const auto &size : llvm::enumerate(boundingSubViewSize))
108 allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
109 Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
110 layout, alignment);
111 SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
112 ShapedType::kDynamic);
113
114 auto viewMemRefType = MemRefType::get(dynSizes, viewType.getElementType());
115 viewMemRefType =
116 MemRefType::Builder(viewMemRefType).setMemorySpace(memorySpaceAttr);
117 Value view = b.createOrFold<memref::ViewOp>(viewMemRefType, buffer, zero,
118 boundingSubViewSize);
119 return view;
120}
121
122/// Default implementation of deallocation of the buffer use for promotion. It
123/// expects to get the same value that the default allocation method returned,
124/// i.e. result of a ViewOp.
125static LogicalResult
126defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
127 OpBuilder &b, Value fullLocalView) {
128 if (!options.useAlloca) {
129 auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
130 b.create<memref::DeallocOp>(viewOp.getSource().getLoc(),
131 viewOp.getSource());
132 }
133 return success();
134}
135
136namespace {
137
138/// Helper struct that captures the information required to apply the
139/// transformation on each op. This bridges the abstraction gap with the
140/// user-facing API which exposes positional arguments to control which operands
141/// are promoted.
142struct LinalgOpInstancePromotionOptions {
143 LinalgOpInstancePromotionOptions(LinalgOp op,
144 const LinalgPromotionOptions &options);
145 /// SubViews to promote.
146 MapVector<int64_t, Value> subViews;
147 /// Subviews operand numbers to copy in using copyInFn.
148 llvm::SmallSet<int64_t, 4> operandsNumbersToCopyIn;
149 /// True if the full view should be used for the promoted buffer.
150 DenseMap<Value, bool> useFullTileBuffers;
151
152 /// Callback functions for allocation and deallocation of promoted buffers, as
153 /// well as to copy the data into and out of these buffers.
154 AllocBufferCallbackFn allocationFn;
155 DeallocBufferCallbackFn deallocationFn;
156 CopyCallbackFn copyInFn;
157 CopyCallbackFn copyOutFn;
158
159 /// Alignment of promoted buffer.
160 std::optional<unsigned> alignment;
161};
162} // namespace
163
164LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
165 LinalgOp linalgOp, const LinalgPromotionOptions &options)
166 : subViews(), alignment(options.alignment) {
167 assert(linalgOp.hasPureBufferSemantics() &&
168 "revisit usage of shaped operand");
169 auto vUseFullTileBuffers =
170 options.useFullTileBuffers.value_or(u: llvm::SmallBitVector());
171 vUseFullTileBuffers.resize(N: linalgOp->getNumOperands(),
172 t: options.useFullTileBuffersDefault);
173
174 for (OpOperand &opOperand : linalgOp->getOpOperands()) {
175 int64_t operandNumber = opOperand.getOperandNumber();
176 if (options.operandsToPromote &&
177 !options.operandsToPromote->count(operandNumber))
178 continue;
179 Operation *op = opOperand.get().getDefiningOp();
180 if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) {
181 subViews[operandNumber] = sv;
182 // In case of linalg generic, copy in only if subview is used in linalg
183 // payload.
184 if (!isa<linalg::GenericOp>(linalgOp) ||
185 linalgOp.payloadUsesValueFromOperand(&opOperand))
186 operandsNumbersToCopyIn.insert(operandNumber);
187 useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber];
188 }
189 }
190
191 if (options.allocationFn) {
192 allocationFn = *options.allocationFn;
193 } else {
194 allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
195 ArrayRef<Value> boundingSubViewSize,
196 DataLayout &layout) -> std::optional<Value> {
197 return defaultAllocBufferCallBack(options, b, subViewOp,
198 boundingSubViewSize, alignment, layout);
199 };
200 }
201
202 if (options.deallocationFn) {
203 deallocationFn = *options.deallocationFn;
204 } else {
205 deallocationFn = [&](OpBuilder &b, Value buffer) {
206 return defaultDeallocBufferCallBack(options, b, fullLocalView: buffer);
207 };
208 }
209
210 // Save the loc because `linalgOp` goes out of scope.
211 Location loc = linalgOp.getLoc();
212 auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
213 Value dst) -> LogicalResult {
214 b.create<linalg::CopyOp>(loc, src, dst);
215 return success();
216 };
217 copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
218 copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
219}
220
221// Performs promotion of a `subView` into a local buffer of the size of the
222// *ranges* of the `subView`. This produces a buffer whose size may be bigger
223// than the actual size of the `subView` at the boundaries.
224// This is related to the full/partial tile problem.
225// Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
226// `partialLocalView` such that:
227// * `buffer` is always the size of the full tile.
228// * `fullLocalView` is a dense contiguous view into that buffer.
229// * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
230// that corresponds to the size of `subView` and accounting for boundary
231// effects.
232// The point of the full tile buffer is that constant static tile sizes are
233// folded and result in a buffer type with statically known size and alignment
234// properties.
235// To account for general boundary effects, padding must be performed on the
236// boundary tiles. For now this is done with an unconditional `fill` op followed
237// by a partial `copy` op.
238FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
239 OpBuilder &b, Location loc, memref::SubViewOp subView,
240 const AllocBufferCallbackFn &allocationFn, DataLayout &layout) {
241 auto viewType = subView.getType();
242 auto rank = viewType.getRank();
243 SmallVector<Value, 4> fullSizes;
244 SmallVector<OpFoldResult> partialSizes;
245 fullSizes.reserve(N: rank);
246 partialSizes.reserve(N: rank);
247 llvm::SmallBitVector droppedDims = subView.getDroppedDims();
248 int64_t resultDimIdx = 0;
249 for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
250 if (droppedDims[en.index()])
251 continue;
252 auto rangeValue = en.value();
253 // Try to extract a tight constant. If the size is known statically, no need
254 // to look for the bound.
255 LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
256 Value size;
257 if (auto attr = llvm::dyn_cast_if_present<Attribute>(rangeValue.size)) {
258 size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
259 } else {
260 FailureOr<int64_t> upperBound =
261 ValueBoundsConstraintSet::computeConstantBound(
262 presburger::BoundType::UB, rangeValue.size,
263 /*stopCondition=*/nullptr, /*closedUB=*/true);
264 size = failed(upperBound)
265 ? getValueOrCreateConstantIndexOp(b, loc, rangeValue.size)
266 : b.create<arith::ConstantIndexOp>(loc, *upperBound);
267 }
268 LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
269 fullSizes.push_back(size);
270 partialSizes.push_back(
271 b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
272 }
273 // If a callback is not specified, then use the default implementation for
274 // allocating the promoted buffer.
275 std::optional<Value> fullLocalView =
276 allocationFn(b, subView, fullSizes, layout);
277 if (!fullLocalView)
278 return failure();
279 SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));
280 SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1));
281 auto partialLocalView = b.createOrFold<memref::SubViewOp>(
282 loc, *fullLocalView, zeros, partialSizes, ones);
283 return PromotionInfo{*fullLocalView, partialLocalView};
284}
285
286static FailureOr<MapVector<int64_t, PromotionInfo>>
287promoteSubViews(ImplicitLocOpBuilder &b,
288 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
289 if (options.subViews.empty())
290 return failure();
291
292 MapVector<int64_t, PromotionInfo> promotionInfoMap;
293
294 for (auto v : options.subViews) {
295 memref::SubViewOp subView =
296 cast<memref::SubViewOp>(v.second.getDefiningOp());
297 auto promotionInfo = promoteSubviewAsNewBuffer(
298 b, b.getLoc(), subView, options.allocationFn, layout);
299 if (failed(promotionInfo))
300 return failure();
301 promotionInfoMap[v.first] = *promotionInfo;
302
303 // Only fill the buffer if the full local view is used
304 if (!options.useFullTileBuffers[v.second])
305 continue;
306 Type subviewEltType = subView.getType().getElementType();
307 Value fillVal =
308 llvm::TypeSwitch<Type, Value>(subviewEltType)
309 .Case(caseFn: [&](FloatType t) {
310 return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0));
311 })
312 .Case(caseFn: [&](IntegerType t) {
313 return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0));
314 })
315 .Case(caseFn: [&](ComplexType t) {
316 Value tmp;
317 if (auto et = dyn_cast<FloatType>(t.getElementType()))
318 tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0));
319 else if (auto et = cast<IntegerType>(t.getElementType()))
320 tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0));
321 return b.create<complex::CreateOp>(t, tmp, tmp);
322 })
323 .Default(defaultFn: [](auto) { return Value(); });
324 if (!fillVal)
325 return failure();
326 b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView);
327 }
328
329 // Copy data into the promoted buffers. Use callback if provided.
330 for (auto v : options.subViews) {
331 auto *info = promotionInfoMap.find(Key: v.first);
332 if (info == promotionInfoMap.end())
333 continue;
334 if (options.operandsNumbersToCopyIn.count(V: v.first) == 0)
335 continue;
336 if (failed(options.copyInFn(
337 b, cast<memref::SubViewOp>(v.second.getDefiningOp()),
338 info->second.partialLocalView)))
339 return failure();
340 }
341 return promotionInfoMap;
342}
343
344static FailureOr<LinalgOp>
345promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
346 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
347 assert(op.hasPureBufferSemantics() &&
348 "expected linalg op with buffer semantics");
349
350 // 1. Promote the specified views and use them in the new op.
351 auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
352 if (failed(Result: promotedBuffersAndViews) ||
353 promotedBuffersAndViews->size() != options.subViews.size())
354 return failure();
355
356 // 2. Append all other operands as they appear, this enforces that such
357 // operands are not views. This is to support cases such as FillOp taking
358 // extra scalars etc. Keep a reference to output buffers;
359 SmallVector<Value, 8> opViews;
360 opViews.reserve(N: op->getNumOperands());
361 SmallVector<std::pair<Value, Value>, 8> writebackViews;
362 writebackViews.reserve(N: promotedBuffersAndViews->size());
363 for (OpOperand &opOperand : op->getOpOperands()) {
364 int64_t operandNumber = opOperand.getOperandNumber();
365 if (options.subViews.count(operandNumber) != 0) {
366 if (options.useFullTileBuffers[opOperand.get()])
367 opViews.push_back(
368 (*promotedBuffersAndViews)[operandNumber].fullLocalView);
369 else
370 opViews.push_back(
371 (*promotedBuffersAndViews)[operandNumber].partialLocalView);
372 if (operandNumber >= op.getNumDpsInputs())
373 writebackViews.emplace_back(std::make_pair(
374 opOperand.get(),
375 (*promotedBuffersAndViews)[operandNumber].partialLocalView));
376 } else {
377 opViews.push_back(opOperand.get());
378 }
379 }
380 op->setOperands(0, opViews.size(), opViews);
381
382 OpBuilder::InsertionGuard guard(b);
383 b.setInsertionPointAfter(op);
384 // 3. Emit write-back for the promoted output views: copy the partial view.
385 for (auto viewAndPartialLocalView : writebackViews) {
386 if (failed(Result: options.copyOutFn(b, viewAndPartialLocalView.second,
387 viewAndPartialLocalView.first)))
388 return failure();
389 }
390
391 // 4. Dealloc all local buffers.
392 for (const auto &pi : *promotedBuffersAndViews)
393 (void)options.deallocationFn(b, pi.second.fullLocalView);
394 return op;
395}
396
397LogicalResult
398mlir::linalg::promoteSubviewsPrecondition(Operation *op,
399 LinalgPromotionOptions options) {
400 LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
401 // Transformation applies to buffers only.
402 if (!linalgOp || !linalgOp.hasPureBufferSemantics())
403 return failure();
404 // Check that at least one of the requested operands is indeed a subview.
405 for (OpOperand &opOperand : linalgOp->getOpOperands()) {
406 auto sv =
407 isa_and_nonnull<memref::SubViewOp>(opOperand.get().getDefiningOp());
408 if (sv) {
409 if (!options.operandsToPromote ||
410 options.operandsToPromote->count(opOperand.getOperandNumber()))
411 return success();
412 }
413 }
414 // TODO: Check all subviews requested are bound by a static constant.
415 // TODO: Check that the total footprint fits within a given size.
416 return failure();
417}
418
419FailureOr<LinalgOp>
420mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
421 const LinalgPromotionOptions &options) {
422 LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
423 auto layout = DataLayout::closest(op: linalgOp);
424 ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
425 auto res = ::promoteSubViews(b, options: linalgOp, layout&: linalgOptions, layout);
426 if (failed(res))
427 return failure();
428 return res;
429}
430
431/// Allocate the given subview to a memory address space in GPU by creating a
432/// allocation operation and setting the memref type address space to desired
433/// address space.
434static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
435 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
436 gpu::AddressSpace addressSpace) {
437 OpBuilder::InsertionGuard guard(builder);
438
439 func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>();
440 if (!funcOp)
441 return std::nullopt;
442
443 // The subview size bounds are expected to be constant; they specify the shape
444 // of the allocation.
445 SmallVector<int64_t> shape;
446 for (Value bound : sizeBounds) {
447 APInt value;
448 if (!matchPattern(bound, m_ConstantInt(&value)))
449 return std::nullopt;
450 shape.push_back(Elt: value.getSExtValue());
451 }
452
453 builder.setInsertionPointToStart(&funcOp.front());
454 auto type = MemRefType::get(
455 shape, subview.getType().getElementType(), MemRefLayoutAttrInterface{},
456 gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace));
457 Value buffer;
458 if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
459 buffer = builder.create<memref::AllocOp>(funcOp.getLoc(), type);
460 } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
461 buffer = builder.create<memref::AllocaOp>(funcOp.getLoc(), type);
462 } else {
463 return std::nullopt;
464 }
465 return buffer;
466}
467
468/// Allocate the subview in the GPU workgroup memory.
469std::optional<Value> mlir::linalg::allocateWorkgroupMemory(
470 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
471 DataLayout &) {
472 return allocateSubviewGPUMemoryInAddressSpace(
473 builder, subview, sizeBounds,
474 gpu::GPUDialect::getWorkgroupAddressSpace());
475}
476
477/// In case of GPU group memory there is no need to deallocate.
478LogicalResult mlir::linalg::deallocateWorkgroupMemory(OpBuilder &,
479 Value /*buffer*/) {
480 return success();
481}
482
483/// Create Memref copy operations and add gpu barrier guards before and after
484/// the copy operation to ensure data integrity.
485LogicalResult mlir::linalg::copyToWorkgroupMemory(OpBuilder &b, Value src,
486 Value dst) {
487 b.create<gpu::BarrierOp>(src.getLoc());
488 Operation *copyOp = b.create<memref::CopyOp>(src.getLoc(), src, dst);
489 b.create<gpu::BarrierOp>(copyOp->getLoc());
490 return success();
491}
492
493/// Allocate the subview in the GPU private memory.
494std::optional<Value> mlir::linalg::allocateGPUPrivateMemory(
495 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
496 DataLayout &) {
497 return allocateSubviewGPUMemoryInAddressSpace(
498 builder, subview, sizeBounds, gpu::GPUDialect::getPrivateAddressSpace());
499}
500
501/// Normal copy to between src and dst.
502LogicalResult mlir::linalg::copyToGPUPrivateMemory(OpBuilder &b, Value src,
503 Value dst) {
504 b.create<memref::CopyOp>(src.getLoc(), src, dst);
505 return success();
506}
507
508/// In case of GPU private memory there is no need to deallocate since the
509/// memory is freed when going outside of the scope.
510LogicalResult mlir::linalg::deallocateGPUPrivateMemory(OpBuilder &,
511 Value /*buffer*/) {
512 return success();
513}
514

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp