1//===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements vectorization of loops, operations and data types to
10// a target-independent, n-D super-vector abstraction.
11//
12//===----------------------------------------------------------------------===//
13
14#include "mlir/Dialect/Affine/Passes.h"
15
16#include "mlir/Analysis/SliceAnalysis.h"
17#include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
18#include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
19#include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
20#include "mlir/Dialect/Affine/IR/AffineOps.h"
21#include "mlir/Dialect/Affine/Utils.h"
22#include "mlir/Dialect/Arith/IR/Arith.h"
23#include "mlir/Dialect/Func/IR/FuncOps.h"
24#include "mlir/Dialect/Vector/IR/VectorOps.h"
25#include "mlir/Dialect/Vector/Utils/VectorUtils.h"
26#include "mlir/IR/IRMapping.h"
27#include "mlir/Pass/Pass.h"
28#include "mlir/Support/LLVM.h"
29#include "llvm/ADT/STLExtras.h"
30#include "llvm/Support/Debug.h"
31#include <optional>
32
33namespace mlir {
34namespace affine {
35#define GEN_PASS_DEF_AFFINEVECTORIZE
36#include "mlir/Dialect/Affine/Passes.h.inc"
37} // namespace affine
38} // namespace mlir
39
40using namespace mlir;
41using namespace affine;
42using namespace vector;
43
44///
45/// Implements a high-level vectorization strategy on a Function.
46/// The abstraction used is that of super-vectors, which provide a single,
47/// compact, representation in the vector types, information that is expected
48/// to reduce the impact of the phase ordering problem
49///
50/// Vector granularity:
51/// ===================
52/// This pass is designed to perform vectorization at a super-vector
53/// granularity. A super-vector is loosely defined as a vector type that is a
54/// multiple of a "good" vector size so the HW can efficiently implement a set
55/// of high-level primitives. Multiple is understood along any dimension; e.g.
56/// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
57/// vector<8xf32> HW vector. Note that a "good vector size so the HW can
58/// efficiently implement a set of high-level primitives" is not necessarily an
59/// integer multiple of actual hardware registers. We leave details of this
60/// distinction unspecified for now.
61///
62/// Some may prefer the terminology a "tile of HW vectors". In this case, one
63/// should note that super-vectors implement an "always full tile" abstraction.
64/// They guarantee no partial-tile separation is necessary by relying on a
65/// high-level copy-reshape abstraction that we call vector.transfer. This
66/// copy-reshape operations is also responsible for performing layout
67/// transposition if necessary. In the general case this will require a scoped
68/// allocation in some notional local memory.
69///
70/// Whatever the mental model one prefers to use for this abstraction, the key
71/// point is that we burn into a single, compact, representation in the vector
72/// types, information that is expected to reduce the impact of the phase
73/// ordering problem. Indeed, a vector type conveys information that:
74/// 1. the associated loops have dependency semantics that do not prevent
75/// vectorization;
76/// 2. the associate loops have been sliced in chunks of static sizes that are
77/// compatible with vector sizes (i.e. similar to unroll-and-jam);
78/// 3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
79/// the
80/// vector type and no vectorization hampering transformations can be
81/// applied to them anymore;
82/// 4. the underlying memrefs are accessed in some notional contiguous way
83/// that allows loading into vectors with some amount of spatial locality;
84/// In other words, super-vectorization provides a level of separation of
85/// concern by way of opacity to subsequent passes. This has the effect of
86/// encapsulating and propagating vectorization constraints down the list of
87/// passes until we are ready to lower further.
88///
89/// For a particular target, a notion of minimal n-d vector size will be
90/// specified and vectorization targets a multiple of those. In the following
91/// paragraph, let "k ." represent "a multiple of", to be understood as a
92/// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
93/// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
94///
95/// Some non-exhaustive notable super-vector sizes of interest include:
96/// - CPU: vector<k . HW_vector_size>,
97/// vector<k' . core_count x k . HW_vector_size>,
98/// vector<socket_count x k' . core_count x k . HW_vector_size>;
99/// - GPU: vector<k . warp_size>,
100/// vector<k . warp_size x float2>,
101/// vector<k . warp_size x float4>,
102/// vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
103///
104/// Loops and operations are emitted that operate on those super-vector shapes.
105/// Subsequent lowering passes will materialize to actual HW vector sizes. These
106/// passes are expected to be (gradually) more target-specific.
107///
108/// At a high level, a vectorized load in a loop will resemble:
109/// ```mlir
110/// affine.for %i = ? to ? step ? {
111/// %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
112/// }
113/// ```
114/// It is the responsibility of the implementation of vector.transfer_read to
115/// materialize vector registers from the original scalar memrefs. A later (more
116/// target-dependent) lowering pass will materialize to actual HW vector sizes.
117/// This lowering may be occur at different times:
118/// 1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
119/// DmaWaitOp + vectorized operations for data transformations and shuffle;
120/// thus opening opportunities for unrolling and pipelining. This is an
121/// instance of library call "whiteboxing"; or
122/// 2. later in the a target-specific lowering pass or hand-written library
123/// call; achieving full separation of concerns. This is an instance of
124/// library call; or
125/// 3. a mix of both, e.g. based on a model.
126/// In the future, these operations will expose a contract to constrain the
127/// search on vectorization patterns and sizes.
128///
129/// Occurrence of super-vectorization in the compiler flow:
130/// =======================================================
131/// This is an active area of investigation. We start with 2 remarks to position
132/// super-vectorization in the context of existing ongoing work: LLVM VPLAN
133/// and LLVM SLP Vectorizer.
134///
135/// LLVM VPLAN:
136/// -----------
137/// The astute reader may have noticed that in the limit, super-vectorization
138/// can be applied at a similar time and with similar objectives than VPLAN.
139/// For instance, in the case of a traditional, polyhedral compilation-flow (for
140/// instance, the PPCG project uses ISL to provide dependence analysis,
141/// multi-level(scheduling + tiling), lifting footprint to fast memory,
142/// communication synthesis, mapping, register optimizations) and before
143/// unrolling. When vectorization is applied at this *late* level in a typical
144/// polyhedral flow, and is instantiated with actual hardware vector sizes,
145/// super-vectorization is expected to match (or subsume) the type of patterns
146/// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
147/// is higher level and our implementation should be significantly simpler. Also
148/// note that in this mode, recursive patterns are probably a bit of an overkill
149/// although it is reasonable to expect that mixing a bit of outer loop and
150/// inner loop vectorization + unrolling will provide interesting choices to
151/// MLIR.
152///
153/// LLVM SLP Vectorizer:
154/// --------------------
155/// Super-vectorization however is not meant to be usable in a similar fashion
156/// to the SLP vectorizer. The main difference lies in the information that
157/// both vectorizers use: super-vectorization examines contiguity of memory
158/// references along fastest varying dimensions and loops with recursive nested
159/// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
160/// the other hand, performs flat pattern matching inside a single unrolled loop
161/// body and stitches together pieces of load and store operations into full
162/// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
163/// innermost loop, control-flow dependent patterns that super-vectorization may
164/// not be able to capture easily. In other words, super-vectorization does not
165/// aim at replacing the SLP vectorizer and the two solutions are complementary.
166///
167/// Ongoing investigations:
168/// -----------------------
169/// We discuss the following *early* places where super-vectorization is
170/// applicable and touch on the expected benefits and risks . We list the
171/// opportunities in the context of the traditional polyhedral compiler flow
172/// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
173/// we expect to experiment with super-vectorization:
174/// 1. Right after language lowering to MLIR: this is the earliest time where
175/// super-vectorization is expected to be applied. At this level, all the
176/// language/user/library-level annotations are available and can be fully
177/// exploited. Examples include loop-type annotations (such as parallel,
178/// reduction, scan, dependence distance vector, vectorizable) as well as
179/// memory access annotations (such as non-aliasing writes guaranteed,
180/// indirect accesses that are permutations by construction) accesses or
181/// that a particular operation is prescribed atomic by the user. At this
182/// level, anything that enriches what dependence analysis can do should be
183/// aggressively exploited. At this level we are close to having explicit
184/// vector types in the language, except we do not impose that burden on the
185/// programmer/library: we derive information from scalar code + annotations.
186/// 2. After dependence analysis and before polyhedral scheduling: the
187/// information that supports vectorization does not need to be supplied by a
188/// higher level of abstraction. Traditional dependence analysis is available
189/// in MLIR and will be used to drive vectorization and cost models.
190///
191/// Let's pause here and remark that applying super-vectorization as described
192/// in 1. and 2. presents clear opportunities and risks:
193/// - the opportunity is that vectorization is burned in the type system and
194/// is protected from the adverse effect of loop scheduling, tiling, loop
195/// interchange and all passes downstream. Provided that subsequent passes are
196/// able to operate on vector types; the vector shapes, associated loop
197/// iterator properties, alignment, and contiguity of fastest varying
198/// dimensions are preserved until we lower the super-vector types. We expect
199/// this to significantly rein in on the adverse effects of phase ordering.
200/// - the risks are that a. all passes after super-vectorization have to work
201/// on elemental vector types (not that this is always true, wherever
202/// vectorization is applied) and b. that imposing vectorization constraints
203/// too early may be overall detrimental to loop fusion, tiling and other
204/// transformations because the dependence distances are coarsened when
205/// operating on elemental vector types. For this reason, the pattern
206/// profitability analysis should include a component that also captures the
207/// maximal amount of fusion available under a particular pattern. This is
208/// still at the stage of rough ideas but in this context, search is our
209/// friend as the Tensor Comprehensions and auto-TVM contributions
210/// demonstrated previously.
211/// Bottom-line is we do not yet have good answers for the above but aim at
212/// making it easy to answer such questions.
213///
214/// Back to our listing, the last places where early super-vectorization makes
215/// sense are:
216/// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
217/// to improve locality, parallelism and be configurable (e.g. max-fuse,
218/// smart-fuse etc). They can also have adverse effects on contiguity
219/// properties that are required for vectorization but the vector.transfer
220/// copy-reshape-pad-transpose abstraction is expected to help recapture
221/// these properties.
222/// 4. right after polyhedral-style scheduling+tiling;
223/// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
224/// probably the most promising places because applying tiling achieves a
225/// separation of concerns that allows rescheduling to worry less about
226/// locality and more about parallelism and distribution (e.g. min-fuse).
227///
228/// At these levels the risk-reward looks different: on one hand we probably
229/// lost a good deal of language/user/library-level annotation; on the other
230/// hand we gained parallelism and locality through scheduling and tiling.
231/// However we probably want to ensure tiling is compatible with the
232/// full-tile-only abstraction used in super-vectorization or suffer the
233/// consequences. It is too early to place bets on what will win but we expect
234/// super-vectorization to be the right abstraction to allow exploring at all
235/// these levels. And again, search is our friend.
236///
237/// Lastly, we mention it again here:
238/// 6. as a MLIR-based alternative to VPLAN.
239///
240/// Lowering, unrolling, pipelining:
241/// ================================
242/// TODO: point to the proper places.
243///
244/// Algorithm:
245/// ==========
246/// The algorithm proceeds in a few steps:
247/// 1. defining super-vectorization patterns and matching them on the tree of
248/// AffineForOp. A super-vectorization pattern is defined as a recursive
249/// data structures that matches and captures nested, imperfectly-nested
250/// loops that have a. conformable loop annotations attached (e.g. parallel,
251/// reduction, vectorizable, ...) as well as b. all contiguous load/store
252/// operations along a specified minor dimension (not necessarily the
253/// fastest varying) ;
254/// 2. analyzing those patterns for profitability (TODO: and
255/// interference);
256/// 3. then, for each pattern in order:
257/// a. applying iterative rewriting of the loops and all their nested
258/// operations in topological order. Rewriting is implemented by
259/// coarsening the loops and converting operations and operands to their
260/// vector forms. Processing operations in topological order is relatively
261/// simple due to the structured nature of the control-flow
262/// representation. This order ensures that all the operands of a given
263/// operation have been vectorized before the operation itself in a single
264/// traversal, except for operands defined outside of the loop nest. The
265/// algorithm can convert the following operations to their vector form:
266/// * Affine load and store operations are converted to opaque vector
267/// transfer read and write operations.
268/// * Scalar constant operations/operands are converted to vector
269/// constant operations (splat).
270/// * Uniform operands (only induction variables of loops not mapped to
271/// a vector dimension, or operands defined outside of the loop nest
272/// for now) are broadcasted to a vector.
273/// TODO: Support more uniform cases.
274/// * Affine for operations with 'iter_args' are vectorized by
275/// vectorizing their 'iter_args' operands and results.
276/// TODO: Support more complex loops with divergent lbs and/or ubs.
277/// * The remaining operations in the loop nest are vectorized by
278/// widening their scalar types to vector types.
279/// b. if everything under the root AffineForOp in the current pattern
280/// is vectorized properly, we commit that loop to the IR and remove the
281/// scalar loop. Otherwise, we discard the vectorized loop and keep the
282/// original scalar loop.
283/// c. vectorization is applied on the next pattern in the list. Because
284/// pattern interference avoidance is not yet implemented and that we do
285/// not support further vectorizing an already vector load we need to
286/// re-verify that the pattern is still vectorizable. This is expected to
287/// make cost models more difficult to write and is subject to improvement
288/// in the future.
289///
290/// Choice of loop transformation to support the algorithm:
291/// =======================================================
292/// The choice of loop transformation to apply for coarsening vectorized loops
293/// is still subject to exploratory tradeoffs. In particular, say we want to
294/// vectorize by a factor 128, we want to transform the following input:
295/// ```mlir
296/// affine.for %i = %M to %N {
297/// %a = affine.load %A[%i] : memref<?xf32>
298/// }
299/// ```
300///
301/// Traditionally, one would vectorize late (after scheduling, tiling,
302/// memory promotion etc) say after stripmining (and potentially unrolling in
303/// the case of LLVM's SLP vectorizer):
304/// ```mlir
305/// affine.for %i = floor(%M, 128) to ceil(%N, 128) {
306/// affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
307/// %a = affine.load %A[%ii] : memref<?xf32>
308/// }
309/// }
310/// ```
311///
312/// Instead, we seek to vectorize early and freeze vector types before
313/// scheduling, so we want to generate a pattern that resembles:
314/// ```mlir
315/// affine.for %i = ? to ? step ? {
316/// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
317/// }
318/// ```
319///
320/// i. simply dividing the lower / upper bounds by 128 creates issues
321/// when representing expressions such as ii + 1 because now we only
322/// have access to original values that have been divided. Additional
323/// information is needed to specify accesses at below-128 granularity;
324/// ii. another alternative is to coarsen the loop step but this may have
325/// consequences on dependence analysis and fusability of loops: fusable
326/// loops probably need to have the same step (because we don't want to
327/// stripmine/unroll to enable fusion).
328/// As a consequence, we choose to represent the coarsening using the loop
329/// step for now and reevaluate in the future. Note that we can renormalize
330/// loop steps later if/when we have evidence that they are problematic.
331///
332/// For the simple strawman example above, vectorizing for a 1-D vector
333/// abstraction of size 128 returns code similar to:
334/// ```mlir
335/// affine.for %i = %M to %N step 128 {
336/// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
337/// }
338/// ```
339///
340/// Unsupported cases, extensions, and work in progress (help welcome :-) ):
341/// ========================================================================
342/// 1. lowering to concrete vector types for various HW;
343/// 2. reduction support for n-D vectorization and non-unit steps;
344/// 3. non-effecting padding during vector.transfer_read and filter during
345/// vector.transfer_write;
346/// 4. misalignment support vector.transfer_read / vector.transfer_write
347/// (hopefully without read-modify-writes);
348/// 5. control-flow support;
349/// 6. cost-models, heuristics and search;
350/// 7. Op implementation, extensions and implication on memref views;
351/// 8. many TODOs left around.
352///
353/// Examples:
354/// =========
355/// Consider the following Function:
356/// ```mlir
357/// func @vector_add_2d(%M : index, %N : index) -> f32 {
358/// %A = alloc (%M, %N) : memref<?x?xf32, 0>
359/// %B = alloc (%M, %N) : memref<?x?xf32, 0>
360/// %C = alloc (%M, %N) : memref<?x?xf32, 0>
361/// %f1 = arith.constant 1.0 : f32
362/// %f2 = arith.constant 2.0 : f32
363/// affine.for %i0 = 0 to %M {
364/// affine.for %i1 = 0 to %N {
365/// // non-scoped %f1
366/// affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
367/// }
368/// }
369/// affine.for %i2 = 0 to %M {
370/// affine.for %i3 = 0 to %N {
371/// // non-scoped %f2
372/// affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
373/// }
374/// }
375/// affine.for %i4 = 0 to %M {
376/// affine.for %i5 = 0 to %N {
377/// %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
378/// %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
379/// %s5 = arith.addf %a5, %b5 : f32
380/// // non-scoped %f1
381/// %s6 = arith.addf %s5, %f1 : f32
382/// // non-scoped %f2
383/// %s7 = arith.addf %s5, %f2 : f32
384/// // diamond dependency.
385/// %s8 = arith.addf %s7, %s6 : f32
386/// affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
387/// }
388/// }
389/// %c7 = arith.constant 7 : index
390/// %c42 = arith.constant 42 : index
391/// %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
392/// return %res : f32
393/// }
394/// ```
395///
396/// The -affine-super-vectorize pass with the following arguments:
397/// ```
398/// -affine-super-vectorize="virtual-vector-size=256 test-fastest-varying=0"
399/// ```
400///
401/// produces this standard innermost-loop vectorized code:
402/// ```mlir
403/// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
404/// %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
405/// %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
406/// %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
407/// %cst = arith.constant 1.0 : f32
408/// %cst_0 = arith.constant 2.0 : f32
409/// affine.for %i0 = 0 to %arg0 {
410/// affine.for %i1 = 0 to %arg1 step 256 {
411/// %cst_1 = arith.constant dense<vector<256xf32>, 1.0> :
412/// vector<256xf32>
413/// vector.transfer_write %cst_1, %0[%i0, %i1] :
414/// vector<256xf32>, memref<?x?xf32>
415/// }
416/// }
417/// affine.for %i2 = 0 to %arg0 {
418/// affine.for %i3 = 0 to %arg1 step 256 {
419/// %cst_2 = arith.constant dense<vector<256xf32>, 2.0> :
420/// vector<256xf32>
421/// vector.transfer_write %cst_2, %1[%i2, %i3] :
422/// vector<256xf32>, memref<?x?xf32>
423/// }
424/// }
425/// affine.for %i4 = 0 to %arg0 {
426/// affine.for %i5 = 0 to %arg1 step 256 {
427/// %3 = vector.transfer_read %0[%i4, %i5] :
428/// memref<?x?xf32>, vector<256xf32>
429/// %4 = vector.transfer_read %1[%i4, %i5] :
430/// memref<?x?xf32>, vector<256xf32>
431/// %5 = arith.addf %3, %4 : vector<256xf32>
432/// %cst_3 = arith.constant dense<vector<256xf32>, 1.0> :
433/// vector<256xf32>
434/// %6 = arith.addf %5, %cst_3 : vector<256xf32>
435/// %cst_4 = arith.constant dense<vector<256xf32>, 2.0> :
436/// vector<256xf32>
437/// %7 = arith.addf %5, %cst_4 : vector<256xf32>
438/// %8 = arith.addf %7, %6 : vector<256xf32>
439/// vector.transfer_write %8, %2[%i4, %i5] :
440/// vector<256xf32>, memref<?x?xf32>
441/// }
442/// }
443/// %c7 = arith.constant 7 : index
444/// %c42 = arith.constant 42 : index
445/// %9 = load %2[%c7, %c42] : memref<?x?xf32>
446/// return %9 : f32
447/// }
448/// ```
449///
450/// The -affine-super-vectorize pass with the following arguments:
451/// ```
452/// -affine-super-vectorize="virtual-vector-size=32,256 \
453/// test-fastest-varying=1,0"
454/// ```
455///
456/// produces this more interesting mixed outer-innermost-loop vectorized code:
457/// ```mlir
458/// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
459/// %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
460/// %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
461/// %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
462/// %cst = arith.constant 1.0 : f32
463/// %cst_0 = arith.constant 2.0 : f32
464/// affine.for %i0 = 0 to %arg0 step 32 {
465/// affine.for %i1 = 0 to %arg1 step 256 {
466/// %cst_1 = arith.constant dense<vector<32x256xf32>, 1.0> :
467/// vector<32x256xf32>
468/// vector.transfer_write %cst_1, %0[%i0, %i1] :
469/// vector<32x256xf32>, memref<?x?xf32>
470/// }
471/// }
472/// affine.for %i2 = 0 to %arg0 step 32 {
473/// affine.for %i3 = 0 to %arg1 step 256 {
474/// %cst_2 = arith.constant dense<vector<32x256xf32>, 2.0> :
475/// vector<32x256xf32>
476/// vector.transfer_write %cst_2, %1[%i2, %i3] :
477/// vector<32x256xf32>, memref<?x?xf32>
478/// }
479/// }
480/// affine.for %i4 = 0 to %arg0 step 32 {
481/// affine.for %i5 = 0 to %arg1 step 256 {
482/// %3 = vector.transfer_read %0[%i4, %i5] :
483/// memref<?x?xf32> vector<32x256xf32>
484/// %4 = vector.transfer_read %1[%i4, %i5] :
485/// memref<?x?xf32>, vector<32x256xf32>
486/// %5 = arith.addf %3, %4 : vector<32x256xf32>
487/// %cst_3 = arith.constant dense<vector<32x256xf32>, 1.0> :
488/// vector<32x256xf32>
489/// %6 = arith.addf %5, %cst_3 : vector<32x256xf32>
490/// %cst_4 = arith.constant dense<vector<32x256xf32>, 2.0> :
491/// vector<32x256xf32>
492/// %7 = arith.addf %5, %cst_4 : vector<32x256xf32>
493/// %8 = arith.addf %7, %6 : vector<32x256xf32>
494/// vector.transfer_write %8, %2[%i4, %i5] :
495/// vector<32x256xf32>, memref<?x?xf32>
496/// }
497/// }
498/// %c7 = arith.constant 7 : index
499/// %c42 = arith.constant 42 : index
500/// %9 = load %2[%c7, %c42] : memref<?x?xf32>
501/// return %9 : f32
502/// }
503/// ```
504///
505/// Of course, much more intricate n-D imperfectly-nested patterns can be
506/// vectorized too and specified in a fully declarative fashion.
507///
508/// Reduction:
509/// ==========
510/// Vectorizing reduction loops along the reduction dimension is supported if:
511/// - the reduction kind is supported,
512/// - the vectorization is 1-D, and
513/// - the step size of the loop equals to one.
514///
515/// Comparing to the non-vector-dimension case, two additional things are done
516/// during vectorization of such loops:
517/// - The resulting vector returned from the loop is reduced to a scalar using
518/// `vector.reduce`.
519/// - In some cases a mask is applied to the vector yielded at the end of the
520/// loop to prevent garbage values from being written to the accumulator.
521///
522/// Reduction vectorization is switched off by default, it can be enabled by
523/// passing a map from loops to reductions to utility functions, or by passing
524/// `vectorize-reductions=true` to the vectorization pass.
525///
526/// Consider the following example:
527/// ```mlir
528/// func @vecred(%in: memref<512xf32>) -> f32 {
529/// %cst = arith.constant 0.000000e+00 : f32
530/// %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
531/// %ld = affine.load %in[%i] : memref<512xf32>
532/// %cos = math.cos %ld : f32
533/// %add = arith.addf %part_sum, %cos : f32
534/// affine.yield %add : f32
535/// }
536/// return %sum : f32
537/// }
538/// ```
539///
540/// The -affine-super-vectorize pass with the following arguments:
541/// ```
542/// -affine-super-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
543/// vectorize-reductions=true"
544/// ```
545/// produces the following output:
546/// ```mlir
547/// #map = affine_map<(d0) -> (-d0 + 500)>
548/// func @vecred(%arg0: memref<512xf32>) -> f32 {
549/// %cst = arith.constant 0.000000e+00 : f32
550/// %cst_0 = arith.constant dense<0.000000e+00> : vector<128xf32>
551/// %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
552/// -> (vector<128xf32>) {
553/// // %2 is the number of iterations left in the original loop.
554/// %2 = affine.apply #map(%arg1)
555/// %3 = vector.create_mask %2 : vector<128xi1>
556/// %cst_1 = arith.constant 0.000000e+00 : f32
557/// %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
558/// memref<512xf32>, vector<128xf32>
559/// %5 = math.cos %4 : vector<128xf32>
560/// %6 = arith.addf %arg2, %5 : vector<128xf32>
561/// // We filter out the effect of last 12 elements using the mask.
562/// %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
563/// affine.yield %7 : vector<128xf32>
564/// }
565/// %1 = vector.reduction <add>, %0 : vector<128xf32> into f32
566/// return %1 : f32
567/// }
568/// ```
569///
570/// Note that because of loop misalignment we needed to apply a mask to prevent
571/// last 12 elements from affecting the final result. The mask is full of ones
572/// in every iteration except for the last one, in which it has the form
573/// `11...100...0` with 116 ones and 12 zeros.
574
575#define DEBUG_TYPE "early-vect"
576
577using llvm::dbgs;
578
579/// Forward declaration.
580static FilterFunctionType
581isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
582 int fastestVaryingMemRefDimension);
583
584/// Creates a vectorization pattern from the command line arguments.
585/// Up to 3-D patterns are supported.
586/// If the command line argument requests a pattern of higher order, returns an
587/// empty pattern list which will conservatively result in no vectorization.
588static std::optional<NestedPattern>
589makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
590 ArrayRef<int64_t> fastestVaryingPattern) {
591 using affine::matcher::For;
592 int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
593 int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
594 int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
595 switch (vectorRank) {
596 case 1:
597 return For(filter: isVectorizableLoopPtrFactory(parallelLoops, fastestVaryingMemRefDimension: d0));
598 case 2:
599 return For(filter: isVectorizableLoopPtrFactory(parallelLoops, fastestVaryingMemRefDimension: d0),
600 child: For(filter: isVectorizableLoopPtrFactory(parallelLoops, fastestVaryingMemRefDimension: d1)));
601 case 3:
602 return For(filter: isVectorizableLoopPtrFactory(parallelLoops, fastestVaryingMemRefDimension: d0),
603 child: For(filter: isVectorizableLoopPtrFactory(parallelLoops, fastestVaryingMemRefDimension: d1),
604 child: For(filter: isVectorizableLoopPtrFactory(parallelLoops, fastestVaryingMemRefDimension: d2))));
605 default: {
606 return std::nullopt;
607 }
608 }
609}
610
611static NestedPattern &vectorTransferPattern() {
612 static auto pattern = affine::matcher::Op(
613 filter: llvm::IsaPred<vector::TransferReadOp, vector::TransferWriteOp>);
614 return pattern;
615}
616
617namespace {
618
619/// Base state for the vectorize pass.
620/// Command line arguments are preempted by non-empty pass arguments.
621struct Vectorize : public affine::impl::AffineVectorizeBase<Vectorize> {
622 using Base::Base;
623
624 void runOnOperation() override;
625};
626
627} // namespace
628
629static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
630 unsigned patternDepth,
631 VectorizationStrategy *strategy) {
632 assert(patternDepth > depthInPattern &&
633 "patternDepth is greater than depthInPattern");
634 if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
635 // Don't vectorize this loop
636 return;
637 }
638 strategy->loopToVectorDim[loop] =
639 strategy->vectorSizes.size() - (patternDepth - depthInPattern);
640}
641
642/// Implements a simple strawman strategy for vectorization.
643/// Given a matched pattern `matches` of depth `patternDepth`, this strategy
644/// greedily assigns the fastest varying dimension ** of the vector ** to the
645/// innermost loop in the pattern.
646/// When coupled with a pattern that looks for the fastest varying dimension in
647/// load/store MemRefs, this creates a generic vectorization strategy that works
648/// for any loop in a hierarchy (outermost, innermost or intermediate).
649///
650/// TODO: In the future we should additionally increase the power of the
651/// profitability analysis along 3 directions:
652/// 1. account for loop extents (both static and parametric + annotations);
653/// 2. account for data layout permutations;
654/// 3. account for impact of vectorization on maximal loop fusion.
655/// Then we can quantify the above to build a cost model and search over
656/// strategies.
657static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
658 unsigned depthInPattern,
659 unsigned patternDepth,
660 VectorizationStrategy *strategy) {
661 for (auto m : matches) {
662 if (failed(Result: analyzeProfitability(matches: m.getMatchedChildren(), depthInPattern: depthInPattern + 1,
663 patternDepth, strategy))) {
664 return failure();
665 }
666 vectorizeLoopIfProfitable(loop: m.getMatchedOperation(), depthInPattern,
667 patternDepth, strategy);
668 }
669 return success();
670}
671
672///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
673
674namespace {
675
676struct VectorizationState {
677
678 VectorizationState(MLIRContext *context) : builder(context) {}
679
680 /// Registers the vector replacement of a scalar operation and its result
681 /// values. Both operations must have the same number of results.
682 ///
683 /// This utility is used to register the replacement for the vast majority of
684 /// the vectorized operations.
685 ///
686 /// Example:
687 /// * 'replaced': %0 = arith.addf %1, %2 : f32
688 /// * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
689 void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
690
691 /// Registers the vector replacement of a scalar value. The replacement
692 /// operation should have a single result, which replaces the scalar value.
693 ///
694 /// This utility is used to register the vector replacement of block arguments
695 /// and operation results which are not directly vectorized (i.e., their
696 /// scalar version still exists after vectorization), like uniforms.
697 ///
698 /// Example:
699 /// * 'replaced': block argument or operation outside of the vectorized
700 /// loop.
701 /// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
702 void registerValueVectorReplacement(Value replaced, Operation *replacement);
703
704 /// Registers the vector replacement of a block argument (e.g., iter_args).
705 ///
706 /// Example:
707 /// * 'replaced': 'iter_arg' block argument.
708 /// * 'replacement': vectorized 'iter_arg' block argument.
709 void registerBlockArgVectorReplacement(BlockArgument replaced,
710 BlockArgument replacement);
711
712 /// Registers the scalar replacement of a scalar value. 'replacement' must be
713 /// scalar.
714 ///
715 /// This utility is used to register the replacement of block arguments
716 /// or affine.apply results that are within the loop be vectorized and will
717 /// continue being scalar within the vector loop.
718 ///
719 /// Example:
720 /// * 'replaced': induction variable of a loop to be vectorized.
721 /// * 'replacement': new induction variable in the new vector loop.
722 void registerValueScalarReplacement(Value replaced, Value replacement);
723
724 /// Registers the scalar replacement of a scalar result returned from a
725 /// reduction loop. 'replacement' must be scalar.
726 ///
727 /// This utility is used to register the replacement for scalar results of
728 /// vectorized reduction loops with iter_args.
729 ///
730 /// Example 2:
731 /// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
732 /// * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into
733 /// f32
734 void registerLoopResultScalarReplacement(Value replaced, Value replacement);
735
736 /// Returns in 'replacedVals' the scalar replacement for values in
737 /// 'inputVals'.
738 void getScalarValueReplacementsFor(ValueRange inputVals,
739 SmallVectorImpl<Value> &replacedVals);
740
741 /// Erases the scalar loop nest after its successful vectorization.
742 void finishVectorizationPattern(AffineForOp rootLoop);
743
744 // Used to build and insert all the new operations created. The insertion
745 // point is preserved and updated along the vectorization process.
746 OpBuilder builder;
747
748 // Maps input scalar operations to their vector counterparts.
749 DenseMap<Operation *, Operation *> opVectorReplacement;
750 // Maps input scalar values to their vector counterparts.
751 IRMapping valueVectorReplacement;
752 // Maps input scalar values to their new scalar counterparts in the vector
753 // loop nest.
754 IRMapping valueScalarReplacement;
755 // Maps results of reduction loops to their new scalar counterparts.
756 DenseMap<Value, Value> loopResultScalarReplacement;
757
758 // Maps the newly created vector loops to their vector dimension.
759 DenseMap<Operation *, unsigned> vecLoopToVecDim;
760
761 // Maps the new vectorized loops to the corresponding vector masks if it is
762 // required.
763 DenseMap<Operation *, Value> vecLoopToMask;
764
765 // The strategy drives which loop to vectorize by which amount.
766 const VectorizationStrategy *strategy = nullptr;
767
768private:
769 /// Internal implementation to map input scalar values to new vector or scalar
770 /// values.
771 void registerValueVectorReplacementImpl(Value replaced, Value replacement);
772};
773
774} // namespace
775
776/// Registers the vector replacement of a scalar operation and its result
777/// values. Both operations must have the same number of results.
778///
779/// This utility is used to register the replacement for the vast majority of
780/// the vectorized operations.
781///
782/// Example:
783/// * 'replaced': %0 = arith.addf %1, %2 : f32
784/// * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
785void VectorizationState::registerOpVectorReplacement(Operation *replaced,
786 Operation *replacement) {
787 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
788 LLVM_DEBUG(dbgs() << *replaced << "\n");
789 LLVM_DEBUG(dbgs() << "into\n");
790 LLVM_DEBUG(dbgs() << *replacement << "\n");
791
792 assert(replaced->getNumResults() == replacement->getNumResults() &&
793 "Unexpected replaced and replacement results");
794 assert(opVectorReplacement.count(replaced) == 0 && "already registered");
795 opVectorReplacement[replaced] = replacement;
796
797 for (auto resultTuple :
798 llvm::zip(t: replaced->getResults(), u: replacement->getResults()))
799 registerValueVectorReplacementImpl(replaced: std::get<0>(t&: resultTuple),
800 replacement: std::get<1>(t&: resultTuple));
801}
802
803/// Registers the vector replacement of a scalar value. The replacement
804/// operation should have a single result, which replaces the scalar value.
805///
806/// This utility is used to register the vector replacement of block arguments
807/// and operation results which are not directly vectorized (i.e., their
808/// scalar version still exists after vectorization), like uniforms.
809///
810/// Example:
811/// * 'replaced': block argument or operation outside of the vectorized loop.
812/// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
813void VectorizationState::registerValueVectorReplacement(
814 Value replaced, Operation *replacement) {
815 assert(replacement->getNumResults() == 1 &&
816 "Expected single-result replacement");
817 if (Operation *defOp = replaced.getDefiningOp())
818 registerOpVectorReplacement(replaced: defOp, replacement);
819 else
820 registerValueVectorReplacementImpl(replaced, replacement: replacement->getResult(idx: 0));
821}
822
823/// Registers the vector replacement of a block argument (e.g., iter_args).
824///
825/// Example:
826/// * 'replaced': 'iter_arg' block argument.
827/// * 'replacement': vectorized 'iter_arg' block argument.
828void VectorizationState::registerBlockArgVectorReplacement(
829 BlockArgument replaced, BlockArgument replacement) {
830 registerValueVectorReplacementImpl(replaced, replacement);
831}
832
833void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
834 Value replacement) {
835 assert(!valueVectorReplacement.contains(replaced) &&
836 "Vector replacement already registered");
837 assert(isa<VectorType>(replacement.getType()) &&
838 "Expected vector type in vector replacement");
839 valueVectorReplacement.map(from: replaced, to: replacement);
840}
841
842/// Registers the scalar replacement of a scalar value. 'replacement' must be
843/// scalar.
844///
845/// This utility is used to register the replacement of block arguments
846/// or affine.apply results that are within the loop be vectorized and will
847/// continue being scalar within the vector loop.
848///
849/// Example:
850/// * 'replaced': induction variable of a loop to be vectorized.
851/// * 'replacement': new induction variable in the new vector loop.
852void VectorizationState::registerValueScalarReplacement(Value replaced,
853 Value replacement) {
854 assert(!valueScalarReplacement.contains(replaced) &&
855 "Scalar value replacement already registered");
856 assert(!isa<VectorType>(replacement.getType()) &&
857 "Expected scalar type in scalar replacement");
858 valueScalarReplacement.map(from: replaced, to: replacement);
859}
860
861/// Registers the scalar replacement of a scalar result returned from a
862/// reduction loop. 'replacement' must be scalar.
863///
864/// This utility is used to register the replacement for scalar results of
865/// vectorized reduction loops with iter_args.
866///
867/// Example 2:
868/// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
869/// * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into f32
870void VectorizationState::registerLoopResultScalarReplacement(
871 Value replaced, Value replacement) {
872 assert(isa<AffineForOp>(replaced.getDefiningOp()));
873 assert(loopResultScalarReplacement.count(replaced) == 0 &&
874 "already registered");
875 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
876 "with scalar: "
877 << replacement);
878 loopResultScalarReplacement[replaced] = replacement;
879}
880
881/// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
882void VectorizationState::getScalarValueReplacementsFor(
883 ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
884 for (Value inputVal : inputVals)
885 replacedVals.push_back(Elt: valueScalarReplacement.lookupOrDefault(from: inputVal));
886}
887
888/// Erases a loop nest, including all its nested operations.
889static void eraseLoopNest(AffineForOp forOp) {
890 LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
891 forOp.erase();
892}
893
894/// Erases the scalar loop nest after its successful vectorization.
895void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
896 LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
897 eraseLoopNest(rootLoop);
898}
899
900// Apply 'map' with 'mapOperands' returning resulting values in 'results'.
901static void computeMemoryOpIndices(Operation *op, AffineMap map,
902 ValueRange mapOperands,
903 VectorizationState &state,
904 SmallVectorImpl<Value> &results) {
905 for (auto resultExpr : map.getResults()) {
906 auto singleResMap =
907 AffineMap::get(dimCount: map.getNumDims(), symbolCount: map.getNumSymbols(), result: resultExpr);
908 auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
909 mapOperands);
910 results.push_back(Elt: afOp);
911 }
912}
913
914/// Returns a FilterFunctionType that can be used in NestedPattern to match a
915/// loop whose underlying load/store accesses are either invariant or all
916// varying along the `fastestVaryingMemRefDimension`.
917static FilterFunctionType
918isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
919 int fastestVaryingMemRefDimension) {
920 return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
921 auto loop = cast<AffineForOp>(forOp);
922 if (!parallelLoops.contains(V: loop))
923 return false;
924 int memRefDim = -1;
925 auto vectorizableBody =
926 isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
927 if (!vectorizableBody)
928 return false;
929 return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
930 memRefDim == fastestVaryingMemRefDimension;
931 };
932}
933
934/// Returns the vector type resulting from applying the provided vectorization
935/// strategy on the scalar type.
936static VectorType getVectorType(Type scalarTy,
937 const VectorizationStrategy *strategy) {
938 assert(!isa<VectorType>(scalarTy) && "Expected scalar type");
939 return VectorType::get(strategy->vectorSizes, scalarTy);
940}
941
942/// Tries to transform a scalar constant into a vector constant. Returns the
943/// vector constant if the scalar type is valid vector element type. Returns
944/// nullptr, otherwise.
945static arith::ConstantOp vectorizeConstant(arith::ConstantOp constOp,
946 VectorizationState &state) {
947 Type scalarTy = constOp.getType();
948 if (!VectorType::isValidElementType(scalarTy))
949 return nullptr;
950
951 auto vecTy = getVectorType(scalarTy, state.strategy);
952 auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
953
954 OpBuilder::InsertionGuard guard(state.builder);
955 Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
956 // Find the innermost vectorized ancestor loop to insert the vector constant.
957 while (parentOp && !state.vecLoopToVecDim.count(Val: parentOp))
958 parentOp = parentOp->getParentOp();
959 assert(parentOp && state.vecLoopToVecDim.count(parentOp) &&
960 isa<AffineForOp>(parentOp) && "Expected a vectorized for op");
961 auto vecForOp = cast<AffineForOp>(parentOp);
962 state.builder.setInsertionPointToStart(vecForOp.getBody());
963 auto newConstOp =
964 state.builder.create<arith::ConstantOp>(constOp.getLoc(), vecAttr);
965
966 // Register vector replacement for future uses in the scope.
967 state.registerOpVectorReplacement(replaced: constOp, replacement: newConstOp);
968 return newConstOp;
969}
970
971/// We have no need to vectorize affine.apply. However, we still need to
972/// generate it and replace the operands with values in valueScalarReplacement.
973static Operation *vectorizeAffineApplyOp(AffineApplyOp applyOp,
974 VectorizationState &state) {
975 SmallVector<Value, 8> updatedOperands;
976 for (Value operand : applyOp.getOperands()) {
977 if (state.valueVectorReplacement.contains(operand)) {
978 LLVM_DEBUG(
979 dbgs() << "\n[early-vect]+++++ affine.apply on vector operand\n");
980 return nullptr;
981 } else {
982 Value updatedOperand = state.valueScalarReplacement.lookupOrNull(operand);
983 if (!updatedOperand)
984 updatedOperand = operand;
985 updatedOperands.push_back(updatedOperand);
986 }
987 }
988
989 auto newApplyOp = state.builder.create<AffineApplyOp>(
990 applyOp.getLoc(), applyOp.getAffineMap(), updatedOperands);
991
992 // Register the new affine.apply result.
993 state.registerValueScalarReplacement(replaced: applyOp.getResult(),
994 replacement: newApplyOp.getResult());
995 return newApplyOp;
996}
997
998/// Creates a constant vector filled with the neutral elements of the given
999/// reduction. The scalar type of vector elements will be taken from
1000/// `oldOperand`.
1001static arith::ConstantOp createInitialVector(arith::AtomicRMWKind reductionKind,
1002 Value oldOperand,
1003 VectorizationState &state) {
1004 Type scalarTy = oldOperand.getType();
1005 if (!VectorType::isValidElementType(scalarTy))
1006 return nullptr;
1007
1008 Attribute valueAttr = getIdentityValueAttr(
1009 reductionKind, scalarTy, state.builder, oldOperand.getLoc());
1010 auto vecTy = getVectorType(scalarTy, state.strategy);
1011 auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
1012 auto newConstOp =
1013 state.builder.create<arith::ConstantOp>(oldOperand.getLoc(), vecAttr);
1014
1015 return newConstOp;
1016}
1017
1018/// Creates a mask used to filter out garbage elements in the last iteration
1019/// of unaligned loops. If a mask is not required then `nullptr` is returned.
1020/// The mask will be a vector of booleans representing meaningful vector
1021/// elements in the current iteration. It is filled with ones for each iteration
1022/// except for the last one, where it has the form `11...100...0` with the
1023/// number of ones equal to the number of meaningful elements (i.e. the number
1024/// of iterations that would be left in the original loop).
1025static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
1026 assert(state.strategy->vectorSizes.size() == 1 &&
1027 "Creating a mask non-1-D vectors is not supported.");
1028 assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
1029 "Creating a mask for loops with non-unit original step size is not "
1030 "supported.");
1031
1032 // Check if we have already created the mask.
1033 if (Value mask = state.vecLoopToMask.lookup(Val: vecForOp))
1034 return mask;
1035
1036 // If the loop has constant bounds and the original number of iterations is
1037 // divisable by the vector size then we don't need a mask.
1038 if (vecForOp.hasConstantBounds()) {
1039 int64_t originalTripCount =
1040 vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
1041 if (originalTripCount % vecForOp.getStepAsInt() == 0)
1042 return nullptr;
1043 }
1044
1045 OpBuilder::InsertionGuard guard(state.builder);
1046 state.builder.setInsertionPointToStart(vecForOp.getBody());
1047
1048 // We generate the mask using the `vector.create_mask` operation which accepts
1049 // the number of meaningful elements (i.e. the length of the prefix of 1s).
1050 // To compute the number of meaningful elements we subtract the current value
1051 // of the iteration variable from the upper bound of the loop. Example:
1052 //
1053 // // 500 is the upper bound of the loop
1054 // #map = affine_map<(d0) -> (500 - d0)>
1055 // %elems_left = affine.apply #map(%iv)
1056 // %mask = vector.create_mask %elems_left : vector<128xi1>
1057
1058 Location loc = vecForOp.getLoc();
1059
1060 // First we get the upper bound of the loop using `affine.apply` or
1061 // `affine.min`.
1062 AffineMap ubMap = vecForOp.getUpperBoundMap();
1063 Value ub;
1064 if (ubMap.getNumResults() == 1)
1065 ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1066 vecForOp.getUpperBoundOperands());
1067 else
1068 ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1069 vecForOp.getUpperBoundOperands());
1070 // Then we compute the number of (original) iterations left in the loop.
1071 AffineExpr subExpr =
1072 state.builder.getAffineDimExpr(position: 0) - state.builder.getAffineDimExpr(position: 1);
1073 Value itersLeft =
1074 makeComposedAffineApply(state.builder, loc, AffineMap::get(dimCount: 2, symbolCount: 0, result: subExpr),
1075 {ub, vecForOp.getInductionVar()});
1076 // If the affine maps were successfully composed then `ub` is unneeded.
1077 if (ub.use_empty())
1078 ub.getDefiningOp()->erase();
1079 // Finally we create the mask.
1080 Type maskTy = VectorType::get(state.strategy->vectorSizes,
1081 state.builder.getIntegerType(1));
1082 Value mask =
1083 state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1084
1085 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1086 << itersLeft << "\n"
1087 << mask << "\n");
1088
1089 state.vecLoopToMask[vecForOp] = mask;
1090 return mask;
1091}
1092
1093/// Returns true if the provided value is vector uniform given the vectorization
1094/// strategy.
1095// TODO: For now, only values that are induction variables of loops not in
1096// `loopToVectorDim` or invariants to all the loops in the vectorization
1097// strategy are considered vector uniforms.
1098static bool isUniformDefinition(Value value,
1099 const VectorizationStrategy *strategy) {
1100 AffineForOp forOp = getForInductionVarOwner(value);
1101 if (forOp && strategy->loopToVectorDim.count(Val: forOp) == 0)
1102 return true;
1103
1104 for (auto loopToDim : strategy->loopToVectorDim) {
1105 auto loop = cast<AffineForOp>(loopToDim.first);
1106 if (!loop.isDefinedOutsideOfLoop(value))
1107 return false;
1108 }
1109
1110 if (!value.getType().isIntOrIndexOrFloat())
1111 return false;
1112
1113 return true;
1114}
1115
1116/// Generates a broadcast op for the provided uniform value using the
1117/// vectorization strategy in 'state'.
1118static Operation *vectorizeUniform(Value uniformVal,
1119 VectorizationState &state) {
1120 OpBuilder::InsertionGuard guard(state.builder);
1121 Value uniformScalarRepl =
1122 state.valueScalarReplacement.lookupOrDefault(from: uniformVal);
1123 state.builder.setInsertionPointAfterValue(uniformScalarRepl);
1124
1125 auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
1126 auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
1127 vectorTy, uniformScalarRepl);
1128 state.registerValueVectorReplacement(replaced: uniformVal, replacement: bcastOp);
1129 return bcastOp;
1130}
1131
1132/// Tries to vectorize a given `operand` by applying the following logic:
1133/// 1. if the defining operation has been already vectorized, `operand` is
1134/// already in the proper vector form;
1135/// 2. if the `operand` is a constant, returns the vectorized form of the
1136/// constant;
1137/// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
1138/// 4. otherwise, the vectorization of `operand` is not supported.
1139/// Newly created vector operations are registered in `state` as replacement
1140/// for their scalar counterparts.
1141/// In particular this logic captures some of the use cases where definitions
1142/// that are not scoped under the current pattern are needed to vectorize.
1143/// One such example is top level function constants that need to be splatted.
1144///
1145/// Returns an operand that has been vectorized to match `state`'s strategy if
1146/// vectorization is possible with the above logic. Returns nullptr otherwise.
1147///
1148/// TODO: handle more complex cases.
1149static Value vectorizeOperand(Value operand, VectorizationState &state) {
1150 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
1151 // If this value is already vectorized, we are done.
1152 if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(from: operand)) {
1153 LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
1154 return vecRepl;
1155 }
1156
1157 // An vector operand that is not in the replacement map should never reach
1158 // this point. Reaching this point could mean that the code was already
1159 // vectorized and we shouldn't try to vectorize already vectorized code.
1160 assert(!isa<VectorType>(operand.getType()) &&
1161 "Vector op not found in replacement map");
1162
1163 // Vectorize constant.
1164 if (auto constOp = operand.getDefiningOp<arith::ConstantOp>()) {
1165 auto vecConstant = vectorizeConstant(constOp, state);
1166 LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
1167 return vecConstant.getResult();
1168 }
1169
1170 // Vectorize uniform values.
1171 if (isUniformDefinition(value: operand, strategy: state.strategy)) {
1172 Operation *vecUniform = vectorizeUniform(uniformVal: operand, state);
1173 LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
1174 return vecUniform->getResult(idx: 0);
1175 }
1176
1177 // Check for unsupported block argument scenarios. A supported block argument
1178 // should have been vectorized already.
1179 if (!operand.getDefiningOp())
1180 LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
1181 else
1182 // Generic unsupported case.
1183 LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
1184
1185 return nullptr;
1186}
1187
1188/// Returns true if any vectorized loop IV drives more than one index.
1189static bool isIVMappedToMultipleIndices(
1190 ArrayRef<Value> indices,
1191 const DenseMap<Operation *, unsigned> &loopToVectorDim) {
1192 for (auto &kvp : loopToVectorDim) {
1193 AffineForOp forOp = cast<AffineForOp>(kvp.first);
1194 // Find which indices are invariant w.r.t. this loop IV.
1195 llvm::DenseSet<Value> invariants =
1196 affine::getInvariantAccesses(iv: forOp.getInductionVar(), indices);
1197 // Count how many vary (i.e. are not invariant).
1198 unsigned nonInvariant = 0;
1199 for (Value idx : indices) {
1200 if (invariants.count(V: idx))
1201 continue;
1202
1203 if (++nonInvariant > 1) {
1204 LLVM_DEBUG(dbgs() << "[early‑vect] Bail out: IV "
1205 << forOp.getInductionVar() << " drives "
1206 << nonInvariant << " indices\n");
1207 return true;
1208 }
1209 }
1210 }
1211 return false;
1212}
1213
1214/// Vectorizes an affine load with the vectorization strategy in 'state' by
1215/// generating a 'vector.transfer_read' op with the proper permutation map
1216/// inferred from the indices of the load. The new 'vector.transfer_read' is
1217/// registered as replacement of the scalar load. Returns the newly created
1218/// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
1219/// otherwise.
1220static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
1221 VectorizationState &state) {
1222 MemRefType memRefType = loadOp.getMemRefType();
1223 Type elementType = memRefType.getElementType();
1224 auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1225
1226 // Replace map operands with operands from the vector loop nest.
1227 SmallVector<Value, 8> mapOperands;
1228 state.getScalarValueReplacementsFor(inputVals: loadOp.getMapOperands(), replacedVals&: mapOperands);
1229
1230 // Compute indices for the transfer op. AffineApplyOp's may be generated.
1231 SmallVector<Value, 8> indices;
1232 indices.reserve(N: memRefType.getRank());
1233 if (loadOp.getAffineMap() !=
1234 state.builder.getMultiDimIdentityMap(rank: memRefType.getRank())) {
1235 // Check the operand in loadOp affine map does not come from AffineApplyOp.
1236 for (auto op : mapOperands) {
1237 if (op.getDefiningOp<AffineApplyOp>())
1238 return nullptr;
1239 }
1240 computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
1241 indices);
1242 } else {
1243 indices.append(in_start: mapOperands.begin(), in_end: mapOperands.end());
1244 }
1245
1246 if (isIVMappedToMultipleIndices(indices, loopToVectorDim: state.vecLoopToVecDim))
1247 return nullptr;
1248
1249 // Compute permutation map using the information of new vector loops.
1250 auto permutationMap = makePermutationMap(insertPoint: state.builder.getInsertionBlock(),
1251 indices, loopToVectorDim: state.vecLoopToVecDim);
1252 if (!permutationMap) {
1253 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
1254 return nullptr;
1255 }
1256 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1257 LLVM_DEBUG(permutationMap.print(dbgs()));
1258
1259 auto transfer = state.builder.create<vector::TransferReadOp>(
1260 loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
1261
1262 // Register replacement for future uses in the scope.
1263 state.registerOpVectorReplacement(replaced: loadOp, replacement: transfer);
1264 return transfer;
1265}
1266
1267/// Vectorizes an affine store with the vectorization strategy in 'state' by
1268/// generating a 'vector.transfer_write' op with the proper permutation map
1269/// inferred from the indices of the store. The new 'vector.transfer_store' is
1270/// registered as replacement of the scalar load. Returns the newly created
1271/// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
1272/// otherwise.
1273static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
1274 VectorizationState &state) {
1275 MemRefType memRefType = storeOp.getMemRefType();
1276 Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1277 if (!vectorValue)
1278 return nullptr;
1279
1280 // Replace map operands with operands from the vector loop nest.
1281 SmallVector<Value, 8> mapOperands;
1282 state.getScalarValueReplacementsFor(inputVals: storeOp.getMapOperands(), replacedVals&: mapOperands);
1283
1284 // Compute indices for the transfer op. AffineApplyOp's may be generated.
1285 SmallVector<Value, 8> indices;
1286 indices.reserve(N: memRefType.getRank());
1287 if (storeOp.getAffineMap() !=
1288 state.builder.getMultiDimIdentityMap(rank: memRefType.getRank()))
1289 computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1290 indices);
1291 else
1292 indices.append(in_start: mapOperands.begin(), in_end: mapOperands.end());
1293
1294 if (isIVMappedToMultipleIndices(indices, loopToVectorDim: state.vecLoopToVecDim))
1295 return nullptr;
1296
1297 // Compute permutation map using the information of new vector loops.
1298 auto permutationMap = makePermutationMap(insertPoint: state.builder.getInsertionBlock(),
1299 indices, loopToVectorDim: state.vecLoopToVecDim);
1300 if (!permutationMap)
1301 return nullptr;
1302 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1303 LLVM_DEBUG(permutationMap.print(dbgs()));
1304
1305 auto transfer = state.builder.create<vector::TransferWriteOp>(
1306 storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1307 permutationMap);
1308 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1309
1310 // Register replacement for future uses in the scope.
1311 state.registerOpVectorReplacement(replaced: storeOp, replacement: transfer);
1312 return transfer;
1313}
1314
1315/// Returns true if `value` is a constant equal to the neutral element of the
1316/// given vectorizable reduction.
1317static bool isNeutralElementConst(arith::AtomicRMWKind reductionKind,
1318 Value value, VectorizationState &state) {
1319 Type scalarTy = value.getType();
1320 if (!VectorType::isValidElementType(scalarTy))
1321 return false;
1322 Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1323 state.builder, value.getLoc());
1324 if (auto constOp = dyn_cast_or_null<arith::ConstantOp>(value.getDefiningOp()))
1325 return constOp.getValue() == valueAttr;
1326 return false;
1327}
1328
1329/// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1330/// created and registered as replacement for the scalar loop. The builder's
1331/// insertion point is set to the new loop's body so that subsequent vectorized
1332/// operations are inserted into the new loop. If the loop is a vector
1333/// dimension, the step of the newly created loop will reflect the vectorization
1334/// factor used to vectorized that dimension.
1335static Operation *vectorizeAffineForOp(AffineForOp forOp,
1336 VectorizationState &state) {
1337 const VectorizationStrategy &strategy = *state.strategy;
1338 auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1339 bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1340
1341 // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1342 if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1343 LLVM_DEBUG(
1344 dbgs()
1345 << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1346 << forOp.getStep() << "\n");
1347 return nullptr;
1348 }
1349
1350 // If we are vectorizing a vector dimension, compute a new step for the new
1351 // vectorized loop using the vectorization factor for the vector dimension.
1352 // Otherwise, propagate the step of the scalar loop.
1353 unsigned newStep;
1354 if (isLoopVecDim) {
1355 unsigned vectorDim = loopToVecDimIt->second;
1356 assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1357 int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1358 newStep = forOp.getStepAsInt() * forOpVecFactor;
1359 } else {
1360 newStep = forOp.getStepAsInt();
1361 }
1362
1363 // Get information about reduction kinds.
1364 ArrayRef<LoopReduction> reductions;
1365 if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1366 auto it = strategy.reductionLoops.find(forOp);
1367 assert(it != strategy.reductionLoops.end() &&
1368 "Reduction descriptors not found when vectorizing a reduction loop");
1369 reductions = it->second;
1370 assert(reductions.size() == forOp.getNumIterOperands() &&
1371 "The size of reductions array must match the number of iter_args");
1372 }
1373
1374 // Vectorize 'iter_args'.
1375 SmallVector<Value, 8> vecIterOperands;
1376 if (!isLoopVecDim) {
1377 for (auto operand : forOp.getInits())
1378 vecIterOperands.push_back(vectorizeOperand(operand, state));
1379 } else {
1380 // For reduction loops we need to pass a vector of neutral elements as an
1381 // initial value of the accumulator. We will add the original initial value
1382 // later.
1383 for (auto redAndOperand : llvm::zip(reductions, forOp.getInits())) {
1384 vecIterOperands.push_back(createInitialVector(
1385 std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1386 }
1387 }
1388
1389 auto vecForOp = state.builder.create<AffineForOp>(
1390 forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1391 forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1392 vecIterOperands,
1393 /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1394 // Make sure we don't create a default terminator in the loop body as
1395 // the proper terminator will be added during vectorization.
1396 });
1397
1398 // Register loop-related replacements:
1399 // 1) The new vectorized loop is registered as vector replacement of the
1400 // scalar loop.
1401 // 2) The new iv of the vectorized loop is registered as scalar replacement
1402 // since a scalar copy of the iv will prevail in the vectorized loop.
1403 // TODO: A vector replacement will also be added in the future when
1404 // vectorization of linear ops is supported.
1405 // 3) The new 'iter_args' region arguments are registered as vector
1406 // replacements since they have been vectorized.
1407 // 4) If the loop performs a reduction along the vector dimension, a
1408 // `vector.reduction` or similar op is inserted for each resulting value
1409 // of the loop and its scalar value replaces the corresponding scalar
1410 // result of the loop.
1411 state.registerOpVectorReplacement(replaced: forOp, replacement: vecForOp);
1412 state.registerValueScalarReplacement(replaced: forOp.getInductionVar(),
1413 replacement: vecForOp.getInductionVar());
1414 for (auto iterTuple :
1415 llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1416 state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1417 std::get<1>(iterTuple));
1418
1419 if (isLoopVecDim) {
1420 for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1421 // First, we reduce the vector returned from the loop into a scalar.
1422 Value reducedRes =
1423 getVectorReductionOp(reductions[i].kind, state.builder,
1424 vecForOp.getLoc(), vecForOp.getResult(i));
1425 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1426 << reducedRes);
1427 // Then we combine it with the original (scalar) initial value unless it
1428 // is equal to the neutral element of the reduction.
1429 Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1430 Value finalRes = reducedRes;
1431 if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1432 finalRes =
1433 arith::getReductionOp(reductions[i].kind, state.builder,
1434 reducedRes.getLoc(), reducedRes, origInit);
1435 state.registerLoopResultScalarReplacement(replaced: forOp.getResult(i), replacement: finalRes);
1436 }
1437 }
1438
1439 if (isLoopVecDim)
1440 state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1441
1442 // Change insertion point so that upcoming vectorized instructions are
1443 // inserted into the vectorized loop's body.
1444 state.builder.setInsertionPointToStart(vecForOp.getBody());
1445
1446 // If this is a reduction loop then we may need to create a mask to filter out
1447 // garbage in the last iteration.
1448 if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1449 createMask(vecForOp, state);
1450
1451 return vecForOp;
1452}
1453
1454/// Vectorizes arbitrary operation by plain widening. We apply generic type
1455/// widening of all its results and retrieve the vector counterparts for all its
1456/// operands.
1457static Operation *widenOp(Operation *op, VectorizationState &state) {
1458 SmallVector<Type, 8> vectorTypes;
1459 for (Value result : op->getResults())
1460 vectorTypes.push_back(
1461 VectorType::get(state.strategy->vectorSizes, result.getType()));
1462
1463 SmallVector<Value, 8> vectorOperands;
1464 for (Value operand : op->getOperands()) {
1465 Value vecOperand = vectorizeOperand(operand, state);
1466 if (!vecOperand) {
1467 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1468 return nullptr;
1469 }
1470 vectorOperands.push_back(Elt: vecOperand);
1471 }
1472
1473 // Create a clone of the op with the proper operands and return types.
1474 // TODO: The following assumes there is always an op with a fixed
1475 // name that works both in scalar mode and vector mode.
1476 // TODO: Is it worth considering an Operation.clone operation which
1477 // changes the type so we can promote an Operation with less boilerplate?
1478 Operation *vecOp =
1479 state.builder.create(op->getLoc(), op->getName().getIdentifier(),
1480 vectorOperands, vectorTypes, op->getAttrs());
1481 state.registerOpVectorReplacement(replaced: op, replacement: vecOp);
1482 return vecOp;
1483}
1484
1485/// Vectorizes a yield operation by widening its types. The builder's insertion
1486/// point is set after the vectorized parent op to continue vectorizing the
1487/// operations after the parent op. When vectorizing a reduction loop a mask may
1488/// be used to prevent adding garbage values to the accumulator.
1489static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1490 VectorizationState &state) {
1491 Operation *newYieldOp = widenOp(yieldOp, state);
1492 Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1493
1494 // If there is a mask for this loop then we must prevent garbage values from
1495 // being added to the accumulator by inserting `select` operations, for
1496 // example:
1497 //
1498 // %val_masked = select %mask, %val, %neutralCst : vector<128xi1>,
1499 // vector<128xf32>
1500 // %res = arith.addf %acc, %val_masked : vector<128xf32>
1501 // affine.yield %res : vector<128xf32>
1502 //
1503 if (Value mask = state.vecLoopToMask.lookup(Val: newParentOp)) {
1504 state.builder.setInsertionPoint(newYieldOp);
1505 for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
1506 SmallVector<Operation *> combinerOps;
1507 Value reducedVal = matchReduction(
1508 cast<AffineForOp>(newParentOp).getRegionIterArgs(), i, combinerOps);
1509 assert(reducedVal && "expect non-null value for parallel reduction loop");
1510 assert(combinerOps.size() == 1 && "expect only one combiner op");
1511 // IterOperands are neutral element vectors.
1512 Value neutralVal = cast<AffineForOp>(newParentOp).getInits()[i];
1513 state.builder.setInsertionPoint(combinerOps.back());
1514 Value maskedReducedVal = state.builder.create<arith::SelectOp>(
1515 reducedVal.getLoc(), mask, reducedVal, neutralVal);
1516 LLVM_DEBUG(
1517 dbgs() << "\n[early-vect]+++++ masking an input to a binary op that"
1518 "produces value for a yield Op: "
1519 << maskedReducedVal);
1520 combinerOps.back()->replaceUsesOfWith(from: reducedVal, to: maskedReducedVal);
1521 }
1522 }
1523
1524 state.builder.setInsertionPointAfter(newParentOp);
1525 return newYieldOp;
1526}
1527
1528/// Encodes Operation-specific behavior for vectorization. In general we
1529/// assume that all operands of an op must be vectorized but this is not
1530/// always true. In the future, it would be nice to have a trait that
1531/// describes how a particular operation vectorizes. For now we implement the
1532/// case distinction here. Returns a vectorized form of an operation or
1533/// nullptr if vectorization fails.
1534// TODO: consider adding a trait to Op to describe how it gets vectorized.
1535// Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1536// do one-off logic here; ideally it would be TableGen'd.
1537static Operation *vectorizeOneOperation(Operation *op,
1538 VectorizationState &state) {
1539 // Sanity checks.
1540 assert(!isa<vector::TransferReadOp>(op) &&
1541 "vector.transfer_read cannot be further vectorized");
1542 assert(!isa<vector::TransferWriteOp>(op) &&
1543 "vector.transfer_write cannot be further vectorized");
1544
1545 if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1546 return vectorizeAffineLoad(loadOp, state);
1547 if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1548 return vectorizeAffineStore(storeOp, state);
1549 if (auto forOp = dyn_cast<AffineForOp>(op))
1550 return vectorizeAffineForOp(forOp, state);
1551 if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1552 return vectorizeAffineYieldOp(yieldOp, state);
1553 if (auto constant = dyn_cast<arith::ConstantOp>(op))
1554 return vectorizeConstant(constant, state);
1555 if (auto applyOp = dyn_cast<AffineApplyOp>(op))
1556 return vectorizeAffineApplyOp(applyOp, state);
1557
1558 // Other ops with regions are not supported.
1559 if (op->getNumRegions() != 0)
1560 return nullptr;
1561
1562 return widenOp(op, state);
1563}
1564
1565/// Recursive implementation to convert all the nested loops in 'match' to a 2D
1566/// vector container that preserves the relative nesting level of each loop with
1567/// respect to the others in 'match'. 'currentLevel' is the nesting level that
1568/// will be assigned to the loop in the current 'match'.
1569static void
1570getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1571 std::vector<SmallVector<AffineForOp, 2>> &loops) {
1572 // Add a new empty level to the output if it doesn't exist already.
1573 assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1574 if (currentLevel == loops.size())
1575 loops.emplace_back();
1576
1577 // Add current match and recursively visit its children.
1578 loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1579 for (auto childMatch : match.getMatchedChildren()) {
1580 getMatchedAffineLoopsRec(match: childMatch, currentLevel: currentLevel + 1, loops);
1581 }
1582}
1583
1584/// Converts all the nested loops in 'match' to a 2D vector container that
1585/// preserves the relative nesting level of each loop with respect to the others
1586/// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1587/// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1588/// 'loops[i+1]'.
1589static void
1590getMatchedAffineLoops(NestedMatch match,
1591 std::vector<SmallVector<AffineForOp, 2>> &loops) {
1592 getMatchedAffineLoopsRec(match, /*currLoopDepth=*/currentLevel: 0, loops);
1593}
1594
1595/// Internal implementation to vectorize affine loops from a single loop nest
1596/// using an n-D vectorization strategy.
1597static LogicalResult
1598vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1599 const VectorizationStrategy &strategy) {
1600 assert(loops[0].size() == 1 && "Expected single root loop");
1601 AffineForOp rootLoop = loops[0][0];
1602 VectorizationState state(rootLoop.getContext());
1603 state.builder.setInsertionPointAfter(rootLoop);
1604 state.strategy = &strategy;
1605
1606 // Since patterns are recursive, they can very well intersect.
1607 // Since we do not want a fully greedy strategy in general, we decouple
1608 // pattern matching, from profitability analysis, from application.
1609 // As a consequence we must check that each root pattern is still
1610 // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1611 // TODO: implement a non-greedy profitability analysis that keeps only
1612 // non-intersecting patterns.
1613 if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1614 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1615 return failure();
1616 }
1617
1618 //////////////////////////////////////////////////////////////////////////////
1619 // Vectorize the scalar loop nest following a topological order. A new vector
1620 // loop nest with the vectorized operations is created along the process. If
1621 // vectorization succeeds, the scalar loop nest is erased. If vectorization
1622 // fails, the vector loop nest is erased and the scalar loop nest is not
1623 // modified.
1624 //////////////////////////////////////////////////////////////////////////////
1625
1626 auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1627 LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1628 Operation *vectorOp = vectorizeOneOperation(op, state);
1629 if (!vectorOp) {
1630 LLVM_DEBUG(
1631 dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1632 << *op << "\n");
1633 return WalkResult::interrupt();
1634 }
1635
1636 return WalkResult::advance();
1637 });
1638
1639 if (opVecResult.wasInterrupted()) {
1640 LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1641 << rootLoop << "\n");
1642 // Erase vector loop nest if it was created.
1643 auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1644 if (vecRootLoopIt != state.opVectorReplacement.end())
1645 eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1646
1647 return failure();
1648 }
1649
1650 // Replace results of reduction loops with the scalar values computed using
1651 // `vector.reduce` or similar ops.
1652 for (auto resPair : state.loopResultScalarReplacement)
1653 resPair.first.replaceAllUsesWith(resPair.second);
1654
1655 assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1656 "Expected vector replacement for loop nest");
1657 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1658 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1659 << *state.opVectorReplacement[rootLoop]);
1660
1661 // Finish this vectorization pattern.
1662 state.finishVectorizationPattern(rootLoop: rootLoop);
1663 return success();
1664}
1665
1666/// Extracts the matched loops and vectorizes them following a topological
1667/// order. A new vector loop nest will be created if vectorization succeeds. The
1668/// original loop nest won't be modified in any case.
1669static LogicalResult vectorizeRootMatch(NestedMatch m,
1670 const VectorizationStrategy &strategy) {
1671 std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1672 getMatchedAffineLoops(match: m, loops&: loopsToVectorize);
1673 return vectorizeLoopNest(loops&: loopsToVectorize, strategy);
1674}
1675
1676/// Traverses all the loop matches and classifies them into intersection
1677/// buckets. Two matches intersect if any of them encloses the other one. A
1678/// match intersects with a bucket if the match intersects with the root
1679/// (outermost) loop in that bucket.
1680static void computeIntersectionBuckets(
1681 ArrayRef<NestedMatch> matches,
1682 std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1683 assert(intersectionBuckets.empty() && "Expected empty output");
1684 // Keeps track of the root (outermost) loop of each bucket.
1685 SmallVector<AffineForOp, 8> bucketRoots;
1686
1687 for (const NestedMatch &match : matches) {
1688 AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1689 bool intersects = false;
1690 for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1691 AffineForOp bucketRoot = bucketRoots[i];
1692 // Add match to the bucket if the bucket root encloses the match root.
1693 if (bucketRoot->isAncestor(matchRoot)) {
1694 intersectionBuckets[i].push_back(Elt: match);
1695 intersects = true;
1696 break;
1697 }
1698 // Add match to the bucket if the match root encloses the bucket root. The
1699 // match root becomes the new bucket root.
1700 if (matchRoot->isAncestor(bucketRoot)) {
1701 bucketRoots[i] = matchRoot;
1702 intersectionBuckets[i].push_back(Elt: match);
1703 intersects = true;
1704 break;
1705 }
1706 }
1707
1708 // Match doesn't intersect with any existing bucket. Create a new bucket for
1709 // it.
1710 if (!intersects) {
1711 bucketRoots.push_back(matchRoot);
1712 intersectionBuckets.emplace_back();
1713 intersectionBuckets.back().push_back(Elt: match);
1714 }
1715 }
1716}
1717
1718/// Internal implementation to vectorize affine loops in 'loops' using the n-D
1719/// vectorization factors in 'vectorSizes'. By default, each vectorization
1720/// factor is applied inner-to-outer to the loops of each loop nest.
1721/// 'fastestVaryingPattern' can be optionally used to provide a different loop
1722/// vectorization order. `reductionLoops` can be provided to specify loops which
1723/// can be vectorized along the reduction dimension.
1724static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1725 ArrayRef<int64_t> vectorSizes,
1726 ArrayRef<int64_t> fastestVaryingPattern,
1727 const ReductionLoopMap &reductionLoops) {
1728 assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1729 "Vectorizing reductions is supported only for 1-D vectors");
1730
1731 // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1732 std::optional<NestedPattern> pattern =
1733 makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1734 if (!pattern) {
1735 LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1736 return;
1737 }
1738
1739 LLVM_DEBUG(dbgs() << "\n******************************************");
1740 LLVM_DEBUG(dbgs() << "\n******************************************");
1741 LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1742 LLVM_DEBUG(dbgs() << *parentOp << "\n");
1743
1744 unsigned patternDepth = pattern->getDepth();
1745
1746 // Compute all the pattern matches and classify them into buckets of
1747 // intersecting matches.
1748 SmallVector<NestedMatch, 32> allMatches;
1749 pattern->match(parentOp, &allMatches);
1750 std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1751 computeIntersectionBuckets(matches: allMatches, intersectionBuckets);
1752
1753 // Iterate over all buckets and vectorize the matches eagerly. We can only
1754 // vectorize one match from each bucket since all the matches within a bucket
1755 // intersect.
1756 for (auto &intersectingMatches : intersectionBuckets) {
1757 for (NestedMatch &match : intersectingMatches) {
1758 VectorizationStrategy strategy;
1759 // TODO: depending on profitability, elect to reduce the vector size.
1760 strategy.vectorSizes.assign(in_start: vectorSizes.begin(), in_end: vectorSizes.end());
1761 strategy.reductionLoops = reductionLoops;
1762 if (failed(Result: analyzeProfitability(matches: match.getMatchedChildren(), depthInPattern: 1,
1763 patternDepth, strategy: &strategy))) {
1764 continue;
1765 }
1766 vectorizeLoopIfProfitable(loop: match.getMatchedOperation(), depthInPattern: 0, patternDepth,
1767 strategy: &strategy);
1768 // Vectorize match. Skip the rest of intersecting matches in the bucket if
1769 // vectorization succeeded.
1770 // TODO: if pattern does not apply, report it; alter the cost/benefit.
1771 // TODO: some diagnostics if failure to vectorize occurs.
1772 if (succeeded(Result: vectorizeRootMatch(m: match, strategy)))
1773 break;
1774 }
1775 }
1776
1777 LLVM_DEBUG(dbgs() << "\n");
1778}
1779
1780/// Applies vectorization to the current function by searching over a bunch of
1781/// predetermined patterns.
1782void Vectorize::runOnOperation() {
1783 func::FuncOp f = getOperation();
1784 if (!fastestVaryingPattern.empty() &&
1785 fastestVaryingPattern.size() != vectorSizes.size()) {
1786 f.emitRemark("Fastest varying pattern specified with different size than "
1787 "the vector size.");
1788 return signalPassFailure();
1789 }
1790
1791 if (vectorizeReductions && vectorSizes.size() != 1) {
1792 f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1793 return signalPassFailure();
1794 }
1795
1796 if (llvm::any_of(vectorSizes, [](int64_t size) { return size <= 0; })) {
1797 f.emitError("Vectorization factor must be greater than zero.");
1798 return signalPassFailure();
1799 }
1800
1801 DenseSet<Operation *> parallelLoops;
1802 ReductionLoopMap reductionLoops;
1803
1804 // If 'vectorize-reduction=true' is provided, we also populate the
1805 // `reductionLoops` map.
1806 if (vectorizeReductions) {
1807 f.walk([&parallelLoops, &reductionLoops](AffineForOp loop) {
1808 SmallVector<LoopReduction, 2> reductions;
1809 if (isLoopParallel(loop, &reductions)) {
1810 parallelLoops.insert(loop);
1811 // If it's not a reduction loop, adding it to the map is not necessary.
1812 if (!reductions.empty())
1813 reductionLoops[loop] = reductions;
1814 }
1815 });
1816 } else {
1817 f.walk([&parallelLoops](AffineForOp loop) {
1818 if (isLoopParallel(loop))
1819 parallelLoops.insert(loop);
1820 });
1821 }
1822
1823 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1824 NestedPatternContext mlContext;
1825 vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1826 reductionLoops);
1827}
1828
1829/// Verify that affine loops in 'loops' meet the nesting criteria expected by
1830/// SuperVectorizer:
1831/// * There must be at least one loop.
1832/// * There must be a single root loop (nesting level 0).
1833/// * Each loop at a given nesting level must be nested in a loop from a
1834/// previous nesting level.
1835static LogicalResult
1836verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1837 // Expected at least one loop.
1838 if (loops.empty())
1839 return failure();
1840
1841 // Expected only one root loop.
1842 if (loops[0].size() != 1)
1843 return failure();
1844
1845 // Traverse loops outer-to-inner to check some invariants.
1846 for (int i = 1, end = loops.size(); i < end; ++i) {
1847 for (AffineForOp loop : loops[i]) {
1848 // Check that each loop at this level is nested in one of the loops from
1849 // the previous level.
1850 if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1851 return maybeParent->isProperAncestor(loop);
1852 }))
1853 return failure();
1854
1855 // Check that each loop at this level is not nested in another loop from
1856 // this level.
1857 for (AffineForOp sibling : loops[i]) {
1858 if (sibling->isProperAncestor(loop))
1859 return failure();
1860 }
1861 }
1862 }
1863
1864 return success();
1865}
1866
1867
1868/// External utility to vectorize affine loops in 'loops' using the n-D
1869/// vectorization factors in 'vectorSizes'. By default, each vectorization
1870/// factor is applied inner-to-outer to the loops of each loop nest.
1871/// 'fastestVaryingPattern' can be optionally used to provide a different loop
1872/// vectorization order.
1873/// If `reductionLoops` is not empty, the given reduction loops may be
1874/// vectorized along the reduction dimension.
1875/// TODO: Vectorizing reductions is supported only for 1-D vectorization.
1876void mlir::affine::vectorizeAffineLoops(
1877 Operation *parentOp, DenseSet<Operation *> &loops,
1878 ArrayRef<int64_t> vectorSizes, ArrayRef<int64_t> fastestVaryingPattern,
1879 const ReductionLoopMap &reductionLoops) {
1880 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1881 NestedPatternContext mlContext;
1882 vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1883 reductionLoops);
1884}
1885
1886/// External utility to vectorize affine loops from a single loop nest using an
1887/// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1888/// Loops are provided in a 2D vector container. The first dimension represents
1889/// the nesting level relative to the loops to be vectorized. The second
1890/// dimension contains the loops. This means that:
1891/// a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1892/// b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1893///
1894/// For example, for the following loop nest:
1895///
1896/// func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1897/// %out0: memref<64x128x512xf32>,
1898/// %out1: memref<64x128x128xf32>) {
1899/// affine.for %i0 = 0 to 64 {
1900/// affine.for %i1 = 0 to 128 {
1901/// affine.for %i2 = 0 to 512 {
1902/// %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1903/// affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1904/// }
1905/// affine.for %i3 = 0 to 128 {
1906/// %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1907/// affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1908/// }
1909/// }
1910/// }
1911/// return
1912/// }
1913///
1914/// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1915/// innermost loops;
1916/// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1917/// loops;
1918/// loops = {{%i2}}, to vectorize only the first innermost loop;
1919/// loops = {{%i3}}, to vectorize only the second innermost loop;
1920/// loops = {{%i1}}, to vectorize only the middle loop.
1921LogicalResult mlir::affine::vectorizeAffineLoopNest(
1922 std::vector<SmallVector<AffineForOp, 2>> &loops,
1923 const VectorizationStrategy &strategy) {
1924 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1925 NestedPatternContext mlContext;
1926 if (failed(Result: verifyLoopNesting(loops)))
1927 return failure();
1928 return vectorizeLoopNest(loops, strategy);
1929}
1930

Provided by KDAB

Privacy Policy
Learn to use CMake with our Intro Training
Find out more

source code of mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp