1//===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the linalg dialect Promotion pass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "mlir/Dialect/Arith/IR/Arith.h"
14#include "mlir/Dialect/Arith/Utils/Utils.h"
15#include "mlir/Dialect/Complex/IR/Complex.h"
16#include "mlir/Dialect/Func/IR/FuncOps.h"
17#include "mlir/Dialect/GPU/IR/GPUDialect.h"
18#include "mlir/Dialect/Linalg/IR/Linalg.h"
19#include "mlir/Dialect/Linalg/Passes.h"
20#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
21#include "mlir/IR/AffineMap.h"
22#include "mlir/IR/ImplicitLocOpBuilder.h"
23#include "mlir/Interfaces/ValueBoundsOpInterface.h"
24#include "mlir/Support/LLVM.h"
25#include "mlir/Transforms/FoldUtils.h"
26#include "llvm/ADT/MapVector.h"
27#include "llvm/ADT/SmallBitVector.h"
28#include "llvm/ADT/SmallSet.h"
29#include "llvm/ADT/TypeSwitch.h"
30#include "llvm/Support/Debug.h"
31
32using namespace mlir;
33using namespace mlir::linalg;
34using namespace mlir::scf;
35
36using llvm::MapVector;
37
38#define DEBUG_TYPE "linalg-promotion"
39
40/// Alloc a new buffer of `size` * `width` i8; where `width` is given by the
41/// data `layout` for `elementType`.
42/// Use AllocOp or AllocaOp depending on `options`.
43/// Take an optional alignment.
44static Value allocBuffer(ImplicitLocOpBuilder &b,
45 const LinalgPromotionOptions &options,
46 Type elementType, Value allocSize, DataLayout &layout,
47 std::optional<unsigned> alignment = std::nullopt) {
48 llvm::TypeSize width = layout.getTypeSize(t: elementType);
49 assert(!width.isScalable() && "cannot allocate buffer for a scalable vector");
50
51 IntegerAttr alignmentAttr;
52 if (alignment.has_value())
53 alignmentAttr = b.getI64IntegerAttr(value: alignment.value());
54
55 Attribute memorySpaceAttr;
56 if (options.memorySpace.has_value())
57 memorySpaceAttr = *options.memorySpace;
58
59 // Static buffer.
60 if (std::optional<int64_t> cst = getConstantIntValue(ofr: allocSize)) {
61 auto staticBufferType = MemRefType::get(shape: width.getFixedValue() * cst.value(),
62 elementType: b.getIntegerType(width: 8));
63 staticBufferType =
64 MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr);
65 if (options.useAlloca) {
66 return b.create<memref::AllocaOp>(args&: staticBufferType, args: ValueRange{},
67 args&: alignmentAttr);
68 }
69 return b.create<memref::AllocOp>(args&: staticBufferType, args: ValueRange{},
70 args&: alignmentAttr);
71 }
72
73 // Fallback dynamic buffer.
74 auto dynamicBufferType =
75 MemRefType::get(shape: ShapedType::kDynamic, elementType: b.getIntegerType(width: 8));
76 dynamicBufferType =
77 MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr);
78 Value mul = b.createOrFold<arith::MulIOp>(
79 args: b.create<arith::ConstantIndexOp>(args&: width), args&: allocSize);
80 if (options.useAlloca)
81 return b.create<memref::AllocaOp>(args&: dynamicBufferType, args&: mul, args&: alignmentAttr);
82 return b.create<memref::AllocOp>(args&: dynamicBufferType, args&: mul, args&: alignmentAttr);
83}
84
85/// Default allocation callback function. This allocates a promoted buffer when
86/// no call back to do so is provided. The default is to allocate a
87/// memref<..xi8> and return a view to get a memref type of shape
88/// boundingSubViewSize.
89static std::optional<Value> defaultAllocBufferCallBack(
90 const LinalgPromotionOptions &options, OpBuilder &builder,
91 memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
92 std::optional<unsigned> alignment, DataLayout &layout) {
93 ShapedType viewType = subView.getType();
94 ImplicitLocOpBuilder b(subView.getLoc(), builder);
95 auto zero = b.create<arith::ConstantIndexOp>(args: 0);
96 auto one = b.create<arith::ConstantIndexOp>(args: 1);
97
98 Attribute memorySpaceAttr;
99 if (options.memorySpace.has_value())
100 memorySpaceAttr = *options.memorySpace;
101
102 Value allocSize = one;
103 for (const auto &size : llvm::enumerate(First&: boundingSubViewSize))
104 allocSize = b.createOrFold<arith::MulIOp>(args&: allocSize, args: size.value());
105 Value buffer = allocBuffer(b, options, elementType: viewType.getElementType(), allocSize,
106 layout, alignment);
107 SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
108 ShapedType::kDynamic);
109
110 auto viewMemRefType = MemRefType::get(shape: dynSizes, elementType: viewType.getElementType());
111 viewMemRefType =
112 MemRefType::Builder(viewMemRefType).setMemorySpace(memorySpaceAttr);
113 Value view = b.createOrFold<memref::ViewOp>(args&: viewMemRefType, args&: buffer, args&: zero,
114 args&: boundingSubViewSize);
115 return view;
116}
117
118/// Default implementation of deallocation of the buffer use for promotion. It
119/// expects to get the same value that the default allocation method returned,
120/// i.e. result of a ViewOp.
121static LogicalResult
122defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
123 OpBuilder &b, Value fullLocalView) {
124 if (!options.useAlloca) {
125 auto viewOp = cast<memref::ViewOp>(Val: fullLocalView.getDefiningOp());
126 b.create<memref::DeallocOp>(location: viewOp.getSource().getLoc(),
127 args: viewOp.getSource());
128 }
129 return success();
130}
131
132namespace {
133
134/// Helper struct that captures the information required to apply the
135/// transformation on each op. This bridges the abstraction gap with the
136/// user-facing API which exposes positional arguments to control which operands
137/// are promoted.
138struct LinalgOpInstancePromotionOptions {
139 LinalgOpInstancePromotionOptions(LinalgOp op,
140 const LinalgPromotionOptions &options);
141 /// SubViews to promote.
142 MapVector<int64_t, Value> subViews;
143 /// Subviews operand numbers to copy in using copyInFn.
144 llvm::SmallSet<int64_t, 4> operandsNumbersToCopyIn;
145 /// True if the full view should be used for the promoted buffer.
146 DenseMap<Value, bool> useFullTileBuffers;
147 /// True if the original subview size should be used. This means the full tile
148 /// buffer is the same size as the partial view.
149 bool useOriginalSubviewSize;
150
151 /// Callback functions for allocation and deallocation of promoted buffers, as
152 /// well as to copy the data into and out of these buffers.
153 AllocBufferCallbackFn allocationFn;
154 DeallocBufferCallbackFn deallocationFn;
155 CopyCallbackFn copyInFn;
156 CopyCallbackFn copyOutFn;
157
158 /// Alignment of promoted buffer.
159 std::optional<unsigned> alignment;
160};
161} // namespace
162
163LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
164 LinalgOp linalgOp, const LinalgPromotionOptions &options)
165 : subViews(), alignment(options.alignment) {
166 assert(linalgOp.hasPureBufferSemantics() &&
167 "revisit usage of shaped operand");
168 auto vUseFullTileBuffers =
169 options.useFullTileBuffers.value_or(u: llvm::SmallBitVector());
170 vUseFullTileBuffers.resize(N: linalgOp->getNumOperands(),
171 t: options.useFullTileBuffersDefault);
172 useOriginalSubviewSize = options.useOriginalSubviewSize;
173
174 for (OpOperand &opOperand : linalgOp->getOpOperands()) {
175 int64_t operandNumber = opOperand.getOperandNumber();
176 if (options.operandsToPromote &&
177 !options.operandsToPromote->count(V: operandNumber))
178 continue;
179 Operation *op = opOperand.get().getDefiningOp();
180 if (auto sv = dyn_cast_or_null<memref::SubViewOp>(Val: op)) {
181 subViews[operandNumber] = sv;
182 // In case of linalg generic, copy in only if subview is used in linalg
183 // payload.
184 if (!isa<linalg::GenericOp>(Val: linalgOp) ||
185 linalgOp.payloadUsesValueFromOperand(opOperand: &opOperand))
186 operandsNumbersToCopyIn.insert(V: operandNumber);
187 useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber];
188 }
189 }
190
191 if (options.allocationFn) {
192 allocationFn = *options.allocationFn;
193 } else {
194 allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
195 ArrayRef<Value> boundingSubViewSize,
196 DataLayout &layout) -> std::optional<Value> {
197 return defaultAllocBufferCallBack(options, builder&: b, subView: subViewOp,
198 boundingSubViewSize, alignment, layout);
199 };
200 }
201
202 if (options.deallocationFn) {
203 deallocationFn = *options.deallocationFn;
204 } else {
205 deallocationFn = [&](OpBuilder &b, Value buffer) {
206 return defaultDeallocBufferCallBack(options, b, fullLocalView: buffer);
207 };
208 }
209
210 // Save the loc because `linalgOp` goes out of scope.
211 Location loc = linalgOp.getLoc();
212 auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
213 Value dst) -> LogicalResult {
214 b.create<linalg::CopyOp>(location: loc, args&: src, args&: dst);
215 return success();
216 };
217 copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
218 copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
219}
220
221// Performs promotion of a `subView` into a local buffer of the size of the
222// *ranges* of the `subView`. This produces a buffer whose size may be bigger
223// than the actual size of the `subView` at the boundaries.
224// This is related to the full/partial tile problem.
225// Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
226// `partialLocalView` such that:
227// * `buffer` is always the size of the full tile.
228// * `fullLocalView` is a dense contiguous view into that buffer.
229// * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
230// that corresponds to the size of `subView` and accounting for boundary
231// effects.
232// The point of the full tile buffer is that constant static tile sizes are
233// folded and result in a buffer type with statically known size and alignment
234// properties.
235// To account for general boundary effects, padding must be performed on the
236// boundary tiles. For now this is done with an unconditional `fill` op followed
237// by a partial `copy` op.
238FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
239 OpBuilder &b, Location loc, memref::SubViewOp subView,
240 bool useOriginalSubviewSize, const AllocBufferCallbackFn &allocationFn,
241 DataLayout &layout) {
242 auto viewType = subView.getType();
243 auto rank = viewType.getRank();
244 SmallVector<Value, 4> fullSizes;
245 SmallVector<OpFoldResult> partialSizes;
246 fullSizes.reserve(N: rank);
247 partialSizes.reserve(N: rank);
248 llvm::SmallBitVector droppedDims = subView.getDroppedDims();
249 int64_t resultDimIdx = 0;
250 for (const auto &en : llvm::enumerate(First: subView.getOrCreateRanges(b, loc))) {
251 if (droppedDims[en.index()])
252 continue;
253 auto rangeValue = en.value();
254 // Try to extract a tight constant. If the size is known statically, no need
255 // to look for the bound.
256 LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
257 Value size;
258 if (llvm::isa_and_present<Attribute>(Val: rangeValue.size) ||
259 useOriginalSubviewSize) {
260 size = getValueOrCreateConstantIndexOp(b, loc, ofr: rangeValue.size);
261 } else {
262 FailureOr<int64_t> upperBound =
263 ValueBoundsConstraintSet::computeConstantBound(
264 type: presburger::BoundType::UB, var: rangeValue.size,
265 /*stopCondition=*/nullptr, /*closedUB=*/true);
266 size = failed(Result: upperBound)
267 ? getValueOrCreateConstantIndexOp(b, loc, ofr: rangeValue.size)
268 : b.create<arith::ConstantIndexOp>(location: loc, args&: *upperBound);
269 }
270 LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
271 fullSizes.push_back(Elt: size);
272 partialSizes.push_back(
273 Elt: b.createOrFold<memref::DimOp>(location: loc, args&: subView, args: resultDimIdx++));
274 }
275 // If a callback is not specified, then use the default implementation for
276 // allocating the promoted buffer.
277 std::optional<Value> fullLocalView =
278 allocationFn(b, subView, fullSizes, layout);
279 if (!fullLocalView)
280 return failure();
281 SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(value: 0));
282 SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(value: 1));
283 auto partialLocalView = b.createOrFold<memref::SubViewOp>(
284 location: loc, args&: *fullLocalView, args&: zeros, args&: partialSizes, args&: ones);
285 return PromotionInfo{.fullLocalView: *fullLocalView, .partialLocalView: partialLocalView};
286}
287
288static FailureOr<MapVector<int64_t, PromotionInfo>>
289promoteSubViews(ImplicitLocOpBuilder &b,
290 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
291 if (options.subViews.empty())
292 return failure();
293
294 MapVector<int64_t, PromotionInfo> promotionInfoMap;
295
296 for (auto v : options.subViews) {
297 memref::SubViewOp subView =
298 cast<memref::SubViewOp>(Val: v.second.getDefiningOp());
299 auto promotionInfo = promoteSubviewAsNewBuffer(
300 b, loc: b.getLoc(), subView, useOriginalSubviewSize: options.useOriginalSubviewSize,
301 allocationFn: options.allocationFn, layout);
302 if (failed(Result: promotionInfo))
303 return failure();
304 promotionInfoMap[v.first] = *promotionInfo;
305
306 // Only fill the buffer if the full local view is used
307 if (!options.useFullTileBuffers[v.second])
308 continue;
309 Type subviewEltType = subView.getType().getElementType();
310 Value fillVal =
311 llvm::TypeSwitch<Type, Value>(subviewEltType)
312 .Case(caseFn: [&](FloatType t) {
313 return b.create<arith::ConstantOp>(args: FloatAttr::get(type: t, value: 0.0));
314 })
315 .Case(caseFn: [&](IntegerType t) {
316 return b.create<arith::ConstantOp>(args: IntegerAttr::get(type: t, value: 0));
317 })
318 .Case(caseFn: [&](ComplexType t) {
319 Value tmp;
320 if (auto et = dyn_cast<FloatType>(Val: t.getElementType()))
321 tmp = b.create<arith::ConstantOp>(args: FloatAttr::get(type: et, value: 0.0));
322 else if (auto et = cast<IntegerType>(Val: t.getElementType()))
323 tmp = b.create<arith::ConstantOp>(args: IntegerAttr::get(type: et, value: 0));
324 return b.create<complex::CreateOp>(args&: t, args&: tmp, args&: tmp);
325 })
326 .Default(defaultFn: [](auto) { return Value(); });
327 if (!fillVal)
328 return failure();
329 b.create<linalg::FillOp>(args&: fillVal, args&: promotionInfo->fullLocalView);
330 }
331
332 // Copy data into the promoted buffers. Use callback if provided.
333 for (auto v : options.subViews) {
334 auto *info = promotionInfoMap.find(Key: v.first);
335 if (info == promotionInfoMap.end())
336 continue;
337 if (options.operandsNumbersToCopyIn.count(V: v.first) == 0)
338 continue;
339 if (failed(Result: options.copyInFn(
340 b, cast<memref::SubViewOp>(Val: v.second.getDefiningOp()),
341 info->second.partialLocalView)))
342 return failure();
343 }
344 return promotionInfoMap;
345}
346
347static FailureOr<LinalgOp>
348promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
349 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
350 assert(op.hasPureBufferSemantics() &&
351 "expected linalg op with buffer semantics");
352
353 // 1. Promote the specified views and use them in the new op.
354 auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
355 if (failed(Result: promotedBuffersAndViews) ||
356 promotedBuffersAndViews->size() != options.subViews.size())
357 return failure();
358
359 // 2. Append all other operands as they appear, this enforces that such
360 // operands are not views. This is to support cases such as FillOp taking
361 // extra scalars etc. Keep a reference to output buffers;
362 SmallVector<Value, 8> opViews;
363 opViews.reserve(N: op->getNumOperands());
364 SmallVector<std::pair<Value, Value>, 8> writebackViews;
365 writebackViews.reserve(N: promotedBuffersAndViews->size());
366 for (OpOperand &opOperand : op->getOpOperands()) {
367 int64_t operandNumber = opOperand.getOperandNumber();
368 if (options.subViews.count(Key: operandNumber) != 0) {
369 if (options.useFullTileBuffers[opOperand.get()])
370 opViews.push_back(
371 Elt: (*promotedBuffersAndViews)[operandNumber].fullLocalView);
372 else
373 opViews.push_back(
374 Elt: (*promotedBuffersAndViews)[operandNumber].partialLocalView);
375 if (operandNumber >= op.getNumDpsInputs())
376 writebackViews.emplace_back(Args: std::make_pair(
377 x: opOperand.get(),
378 y&: (*promotedBuffersAndViews)[operandNumber].partialLocalView));
379 } else {
380 opViews.push_back(Elt: opOperand.get());
381 }
382 }
383 op->setOperands(start: 0, length: opViews.size(), operands: opViews);
384
385 OpBuilder::InsertionGuard guard(b);
386 b.setInsertionPointAfter(op);
387 // 3. Emit write-back for the promoted output views: copy the partial view.
388 for (auto viewAndPartialLocalView : writebackViews) {
389 if (failed(Result: options.copyOutFn(b, viewAndPartialLocalView.second,
390 viewAndPartialLocalView.first)))
391 return failure();
392 }
393
394 // 4. Dealloc all local buffers.
395 for (const auto &pi : *promotedBuffersAndViews)
396 (void)options.deallocationFn(b, pi.second.fullLocalView);
397 return op;
398}
399
400LogicalResult
401mlir::linalg::promoteSubviewsPrecondition(Operation *op,
402 LinalgPromotionOptions options) {
403 LinalgOp linalgOp = dyn_cast<LinalgOp>(Val: op);
404 // Transformation applies to buffers only.
405 if (!linalgOp || !linalgOp.hasPureBufferSemantics())
406 return failure();
407 // Check that at least one of the requested operands is indeed a subview.
408 for (OpOperand &opOperand : linalgOp->getOpOperands()) {
409 auto sv =
410 isa_and_nonnull<memref::SubViewOp>(Val: opOperand.get().getDefiningOp());
411 if (sv) {
412 if (!options.operandsToPromote ||
413 options.operandsToPromote->count(V: opOperand.getOperandNumber()))
414 return success();
415 }
416 }
417 // TODO: Check all subviews requested are bound by a static constant.
418 // TODO: Check that the total footprint fits within a given size.
419 return failure();
420}
421
422FailureOr<LinalgOp>
423mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
424 const LinalgPromotionOptions &options) {
425 LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
426 auto layout = DataLayout::closest(op: linalgOp);
427 ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
428 auto res = ::promoteSubViews(b, op: linalgOp, options: linalgOptions, layout);
429 if (failed(Result: res))
430 return failure();
431 return res;
432}
433
434/// Allocate the given subview to a memory address space in GPU by creating a
435/// allocation operation and setting the memref type address space to desired
436/// address space.
437static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
438 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
439 gpu::AddressSpace addressSpace) {
440 OpBuilder::InsertionGuard guard(builder);
441
442 func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>();
443 if (!funcOp)
444 return std::nullopt;
445
446 // The subview size bounds are expected to be constant; they specify the shape
447 // of the allocation.
448 SmallVector<int64_t> shape;
449 for (Value bound : sizeBounds) {
450 APInt value;
451 if (!matchPattern(value: bound, pattern: m_ConstantInt(bind_value: &value)))
452 return std::nullopt;
453 shape.push_back(Elt: value.getSExtValue());
454 }
455
456 builder.setInsertionPointToStart(&funcOp.front());
457 auto type = MemRefType::get(
458 shape, elementType: subview.getType().getElementType(), layout: MemRefLayoutAttrInterface{},
459 memorySpace: gpu::AddressSpaceAttr::get(context: builder.getContext(), value: addressSpace));
460 Value buffer;
461 if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
462 buffer = builder.create<memref::AllocOp>(location: funcOp.getLoc(), args&: type);
463 } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
464 buffer = builder.create<memref::AllocaOp>(location: funcOp.getLoc(), args&: type);
465 } else {
466 return std::nullopt;
467 }
468 return buffer;
469}
470
471/// Allocate the subview in the GPU workgroup memory.
472std::optional<Value> mlir::linalg::allocateWorkgroupMemory(
473 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
474 DataLayout &) {
475 return allocateSubviewGPUMemoryInAddressSpace(
476 builder, subview, sizeBounds,
477 addressSpace: gpu::GPUDialect::getWorkgroupAddressSpace());
478}
479
480/// In case of GPU group memory there is no need to deallocate.
481LogicalResult mlir::linalg::deallocateWorkgroupMemory(OpBuilder &,
482 Value /*buffer*/) {
483 return success();
484}
485
486/// Create Memref copy operations and add gpu barrier guards before and after
487/// the copy operation to ensure data integrity.
488LogicalResult mlir::linalg::copyToWorkgroupMemory(OpBuilder &b, Value src,
489 Value dst) {
490 b.create<gpu::BarrierOp>(location: src.getLoc());
491 Operation *copyOp = b.create<memref::CopyOp>(location: src.getLoc(), args&: src, args&: dst);
492 b.create<gpu::BarrierOp>(location: copyOp->getLoc());
493 return success();
494}
495
496/// Allocate the subview in the GPU private memory.
497std::optional<Value> mlir::linalg::allocateGPUPrivateMemory(
498 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
499 DataLayout &) {
500 return allocateSubviewGPUMemoryInAddressSpace(
501 builder, subview, sizeBounds, addressSpace: gpu::GPUDialect::getPrivateAddressSpace());
502}
503
504/// Normal copy to between src and dst.
505LogicalResult mlir::linalg::copyToGPUPrivateMemory(OpBuilder &b, Value src,
506 Value dst) {
507 b.create<memref::CopyOp>(location: src.getLoc(), args&: src, args&: dst);
508 return success();
509}
510
511/// In case of GPU private memory there is no need to deallocate since the
512/// memory is freed when going outside of the scope.
513LogicalResult mlir::linalg::deallocateGPUPrivateMemory(OpBuilder &,
514 Value /*buffer*/) {
515 return success();
516}
517

source code of mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp