| 1 | //===- LegalizeVectorStorage.cpp - Ensures SVE loads/stores are legal -----===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "mlir/Dialect/ArmSVE/IR/ArmSVEDialect.h" |
| 10 | #include "mlir/Dialect/ArmSVE/Transforms/Passes.h" |
| 11 | #include "mlir/Dialect/Func/IR/FuncOps.h" |
| 12 | #include "mlir/Dialect/MemRef/IR/MemRef.h" |
| 13 | #include "mlir/Dialect/Vector/IR/VectorOps.h" |
| 14 | #include "mlir/Transforms/GreedyPatternRewriteDriver.h" |
| 15 | |
| 16 | namespace mlir::arm_sve { |
| 17 | #define GEN_PASS_DEF_LEGALIZEVECTORSTORAGE |
| 18 | #include "mlir/Dialect/ArmSVE/Transforms/Passes.h.inc" |
| 19 | } // namespace mlir::arm_sve |
| 20 | |
| 21 | using namespace mlir; |
| 22 | using namespace mlir::arm_sve; |
| 23 | |
| 24 | // A tag to mark unrealized_conversions produced by this pass. This is used to |
| 25 | // detect IR this pass failed to completely legalize, and report an error. |
| 26 | // If everything was successfully legalized, no tagged ops will remain after |
| 27 | // this pass. |
| 28 | constexpr StringLiteral kSVELegalizerTag("__arm_sve_legalize_vector_storage__" ); |
| 29 | |
| 30 | /// Definitions: |
| 31 | /// |
| 32 | /// [1] svbool = vector<...x[16]xi1>, which maps to some multiple of full SVE |
| 33 | /// predicate registers. A full predicate is the smallest quantity that can be |
| 34 | /// loaded/stored. |
| 35 | /// |
| 36 | /// [2] SVE mask = hardware-sized SVE predicate mask, i.e. its trailing |
| 37 | /// dimension matches the size of a legal SVE vector size (such as |
| 38 | /// vector<[4]xi1>), but is too small to be stored to memory (i.e smaller than |
| 39 | /// a svbool). |
| 40 | |
| 41 | namespace { |
| 42 | |
| 43 | /// Checks if a vector type is a SVE mask [2]. |
| 44 | bool isSVEMaskType(VectorType type) { |
| 45 | return type.getRank() > 0 && type.getElementType().isInteger(width: 1) && |
| 46 | type.getScalableDims().back() && type.getShape().back() < 16 && |
| 47 | llvm::isPowerOf2_32(Value: type.getShape().back()) && |
| 48 | !llvm::is_contained(Range: type.getScalableDims().drop_back(), Element: true); |
| 49 | } |
| 50 | |
| 51 | VectorType widenScalableMaskTypeToSvbool(VectorType type) { |
| 52 | assert(isSVEMaskType(type)); |
| 53 | return VectorType::Builder(type).setDim(pos: type.getRank() - 1, val: 16); |
| 54 | } |
| 55 | |
| 56 | /// A helper for cloning an op and replacing it will a new version, updated by a |
| 57 | /// callback. |
| 58 | template <typename TOp, typename TLegalizerCallback> |
| 59 | void replaceOpWithLegalizedOp(PatternRewriter &rewriter, TOp op, |
| 60 | TLegalizerCallback callback) { |
| 61 | // Clone the previous op to preserve any properties/attributes. |
| 62 | auto newOp = op.clone(); |
| 63 | rewriter.insert(op: newOp); |
| 64 | rewriter.replaceOp(op, callback(newOp)); |
| 65 | } |
| 66 | |
| 67 | /// A helper for cloning an op and replacing it with a new version, updated by a |
| 68 | /// callback, and an unrealized conversion back to the type of the replaced op. |
| 69 | template <typename TOp, typename TLegalizerCallback> |
| 70 | void replaceOpWithUnrealizedConversion(PatternRewriter &rewriter, TOp op, |
| 71 | TLegalizerCallback callback) { |
| 72 | replaceOpWithLegalizedOp(rewriter, op, [&](TOp newOp) { |
| 73 | // Mark our `unrealized_conversion_casts` with a pass label. |
| 74 | return rewriter.create<UnrealizedConversionCastOp>( |
| 75 | op.getLoc(), TypeRange{op.getResult().getType()}, |
| 76 | ValueRange{callback(newOp)}, |
| 77 | NamedAttribute(rewriter.getStringAttr(bytes: kSVELegalizerTag), |
| 78 | rewriter.getUnitAttr())); |
| 79 | }); |
| 80 | } |
| 81 | |
| 82 | /// Extracts the widened SVE memref value (that's legal to store/load) from the |
| 83 | /// `unrealized_conversion_cast`s added by this pass. |
| 84 | static FailureOr<Value> getSVELegalizedMemref(Value illegalMemref) { |
| 85 | Operation *definingOp = illegalMemref.getDefiningOp(); |
| 86 | if (!definingOp || !definingOp->hasAttr(name: kSVELegalizerTag)) |
| 87 | return failure(); |
| 88 | auto unrealizedConversion = |
| 89 | llvm::cast<UnrealizedConversionCastOp>(Val: definingOp); |
| 90 | return unrealizedConversion.getOperand(i: 0); |
| 91 | } |
| 92 | |
| 93 | /// The default alignment of an alloca in LLVM may request overaligned sizes for |
| 94 | /// SVE types, which will fail during stack frame allocation. This rewrite |
| 95 | /// explicitly adds a reasonable alignment to allocas of scalable types. |
| 96 | struct RelaxScalableVectorAllocaAlignment |
| 97 | : public OpRewritePattern<memref::AllocaOp> { |
| 98 | using OpRewritePattern::OpRewritePattern; |
| 99 | |
| 100 | LogicalResult matchAndRewrite(memref::AllocaOp allocaOp, |
| 101 | PatternRewriter &rewriter) const override { |
| 102 | auto memrefElementType = allocaOp.getType().getElementType(); |
| 103 | auto vectorType = llvm::dyn_cast<VectorType>(Val&: memrefElementType); |
| 104 | if (!vectorType || !vectorType.isScalable() || allocaOp.getAlignment()) |
| 105 | return failure(); |
| 106 | |
| 107 | // Set alignment based on the defaults for SVE vectors and predicates. |
| 108 | unsigned aligment = vectorType.getElementType().isInteger(width: 1) ? 2 : 16; |
| 109 | rewriter.modifyOpInPlace(root: allocaOp, |
| 110 | callable: [&] { allocaOp.setAlignment(aligment); }); |
| 111 | |
| 112 | return success(); |
| 113 | } |
| 114 | }; |
| 115 | |
| 116 | /// Replaces allocations of SVE predicates smaller than an svbool [1] (_illegal_ |
| 117 | /// to load/store) with a wider allocation of svbool (_legal_ to load/store) |
| 118 | /// followed by a tagged unrealized conversion to the original type. |
| 119 | /// |
| 120 | /// Example |
| 121 | /// ``` |
| 122 | /// %alloca = memref.alloca() : memref<vector<[4]xi1>> |
| 123 | /// ``` |
| 124 | /// is rewritten into: |
| 125 | /// ``` |
| 126 | /// %widened = memref.alloca() {alignment = 1 : i64} : memref<vector<[16]xi1>> |
| 127 | /// %alloca = builtin.unrealized_conversion_cast %widened |
| 128 | /// : memref<vector<[16]xi1>> to memref<vector<[4]xi1>> |
| 129 | /// {__arm_sve_legalize_vector_storage__} |
| 130 | /// ``` |
| 131 | template <typename AllocLikeOp> |
| 132 | struct LegalizeSVEMaskAllocation : public OpRewritePattern<AllocLikeOp> { |
| 133 | using OpRewritePattern<AllocLikeOp>::OpRewritePattern; |
| 134 | |
| 135 | LogicalResult matchAndRewrite(AllocLikeOp allocLikeOp, |
| 136 | PatternRewriter &rewriter) const override { |
| 137 | auto vectorType = |
| 138 | llvm::dyn_cast<VectorType>(allocLikeOp.getType().getElementType()); |
| 139 | |
| 140 | if (!vectorType || !isSVEMaskType(vectorType)) |
| 141 | return failure(); |
| 142 | |
| 143 | // Replace this alloc-like op of an SVE mask [2] with one of a (storable) |
| 144 | // svbool mask [1]. A temporary unrealized_conversion_cast is added to the |
| 145 | // old type to allow local rewrites. |
| 146 | replaceOpWithUnrealizedConversion( |
| 147 | rewriter, allocLikeOp, [&](AllocLikeOp newAllocLikeOp) { |
| 148 | newAllocLikeOp.getResult().setType( |
| 149 | llvm::cast<MemRefType>(newAllocLikeOp.getType().cloneWith( |
| 150 | {}, widenScalableMaskTypeToSvbool(vectorType)))); |
| 151 | return newAllocLikeOp; |
| 152 | }); |
| 153 | |
| 154 | return success(); |
| 155 | } |
| 156 | }; |
| 157 | |
| 158 | /// Replaces vector.type_casts of unrealized conversions to SVE predicate memref |
| 159 | /// types that are _illegal_ to load/store from (!= svbool [1]), with type casts |
| 160 | /// of memref types that are _legal_ to load/store, followed by unrealized |
| 161 | /// conversions. |
| 162 | /// |
| 163 | /// Example: |
| 164 | /// ``` |
| 165 | /// %alloca = builtin.unrealized_conversion_cast %widened |
| 166 | /// : memref<vector<[16]xi1>> to memref<vector<[8]xi1>> |
| 167 | /// {__arm_sve_legalize_vector_storage__} |
| 168 | /// %cast = vector.type_cast %alloca |
| 169 | /// : memref<vector<3x[8]xi1>> to memref<3xvector<[8]xi1>> |
| 170 | /// ``` |
| 171 | /// is rewritten into: |
| 172 | /// ``` |
| 173 | /// %widened_cast = vector.type_cast %widened |
| 174 | /// : memref<vector<3x[16]xi1>> to memref<3xvector<[16]xi1>> |
| 175 | /// %cast = builtin.unrealized_conversion_cast %widened_cast |
| 176 | /// : memref<3xvector<[16]xi1>> to memref<3xvector<[8]xi1>> |
| 177 | /// {__arm_sve_legalize_vector_storage__} |
| 178 | /// ``` |
| 179 | struct LegalizeSVEMaskTypeCastConversion |
| 180 | : public OpRewritePattern<vector::TypeCastOp> { |
| 181 | using OpRewritePattern::OpRewritePattern; |
| 182 | |
| 183 | LogicalResult matchAndRewrite(vector::TypeCastOp typeCastOp, |
| 184 | PatternRewriter &rewriter) const override { |
| 185 | auto resultType = typeCastOp.getResultMemRefType(); |
| 186 | auto vectorType = llvm::dyn_cast<VectorType>(Val: resultType.getElementType()); |
| 187 | |
| 188 | if (!vectorType || !isSVEMaskType(type: vectorType)) |
| 189 | return failure(); |
| 190 | |
| 191 | auto legalMemref = getSVELegalizedMemref(illegalMemref: typeCastOp.getMemref()); |
| 192 | if (failed(Result: legalMemref)) |
| 193 | return failure(); |
| 194 | |
| 195 | // Replace this vector.type_cast with one of a (storable) svbool mask [1]. |
| 196 | replaceOpWithUnrealizedConversion( |
| 197 | rewriter, op: typeCastOp, callback: [&](vector::TypeCastOp newTypeCast) { |
| 198 | newTypeCast.setOperand(*legalMemref); |
| 199 | newTypeCast.getResult().setType( |
| 200 | llvm::cast<MemRefType>(Val: newTypeCast.getType().cloneWith( |
| 201 | shape: {}, elementType: widenScalableMaskTypeToSvbool(type: vectorType)))); |
| 202 | return newTypeCast; |
| 203 | }); |
| 204 | |
| 205 | return success(); |
| 206 | } |
| 207 | }; |
| 208 | |
| 209 | /// Replaces stores to unrealized conversions to SVE predicate memref types that |
| 210 | /// are _illegal_ to load/store from (!= svbool [1]), with |
| 211 | /// `arm_sve.convert_to_svbool`s followed by (legal) wider stores. |
| 212 | /// |
| 213 | /// Example: |
| 214 | /// ``` |
| 215 | /// memref.store %mask, %alloca[] : memref<vector<[8]xi1>> |
| 216 | /// ``` |
| 217 | /// is rewritten into: |
| 218 | /// ``` |
| 219 | /// %svbool = arm_sve.convert_to_svbool %mask : vector<[8]xi1> |
| 220 | /// memref.store %svbool, %widened[] : memref<vector<[16]xi1>> |
| 221 | /// ``` |
| 222 | struct LegalizeSVEMaskStoreConversion |
| 223 | : public OpRewritePattern<memref::StoreOp> { |
| 224 | using OpRewritePattern::OpRewritePattern; |
| 225 | |
| 226 | LogicalResult matchAndRewrite(memref::StoreOp storeOp, |
| 227 | PatternRewriter &rewriter) const override { |
| 228 | auto loc = storeOp.getLoc(); |
| 229 | |
| 230 | Value valueToStore = storeOp.getValueToStore(); |
| 231 | auto vectorType = llvm::dyn_cast<VectorType>(Val: valueToStore.getType()); |
| 232 | |
| 233 | if (!vectorType || !isSVEMaskType(type: vectorType)) |
| 234 | return failure(); |
| 235 | |
| 236 | auto legalMemref = getSVELegalizedMemref(illegalMemref: storeOp.getMemref()); |
| 237 | if (failed(Result: legalMemref)) |
| 238 | return failure(); |
| 239 | |
| 240 | auto legalMaskType = widenScalableMaskTypeToSvbool( |
| 241 | type: llvm::cast<VectorType>(Val: valueToStore.getType())); |
| 242 | auto convertToSvbool = rewriter.create<arm_sve::ConvertToSvboolOp>( |
| 243 | location: loc, args&: legalMaskType, args&: valueToStore); |
| 244 | // Replace this store with a conversion to a storable svbool mask [1], |
| 245 | // followed by a wider store. |
| 246 | replaceOpWithLegalizedOp(rewriter, op: storeOp, |
| 247 | callback: [&](memref::StoreOp newStoreOp) { |
| 248 | newStoreOp.setOperand(i: 0, value: convertToSvbool); |
| 249 | newStoreOp.setOperand(i: 1, value: *legalMemref); |
| 250 | return newStoreOp; |
| 251 | }); |
| 252 | |
| 253 | return success(); |
| 254 | } |
| 255 | }; |
| 256 | |
| 257 | /// Replaces loads from unrealized conversions to SVE predicate memref types |
| 258 | /// that are _illegal_ to load/store from (!= svbool [1]), types with (legal) |
| 259 | /// wider loads, followed by `arm_sve.convert_from_svbool`s. |
| 260 | /// |
| 261 | /// Example: |
| 262 | /// ``` |
| 263 | /// %reload = memref.load %alloca[] : memref<vector<[4]xi1>> |
| 264 | /// ``` |
| 265 | /// is rewritten into: |
| 266 | /// ``` |
| 267 | /// %svbool = memref.load %widened[] : memref<vector<[16]xi1>> |
| 268 | /// %reload = arm_sve.convert_from_svbool %reload : vector<[4]xi1> |
| 269 | /// ``` |
| 270 | struct LegalizeSVEMaskLoadConversion : public OpRewritePattern<memref::LoadOp> { |
| 271 | using OpRewritePattern::OpRewritePattern; |
| 272 | |
| 273 | LogicalResult matchAndRewrite(memref::LoadOp loadOp, |
| 274 | PatternRewriter &rewriter) const override { |
| 275 | auto loc = loadOp.getLoc(); |
| 276 | |
| 277 | Value loadedMask = loadOp.getResult(); |
| 278 | auto vectorType = llvm::dyn_cast<VectorType>(Val: loadedMask.getType()); |
| 279 | |
| 280 | if (!vectorType || !isSVEMaskType(type: vectorType)) |
| 281 | return failure(); |
| 282 | |
| 283 | auto legalMemref = getSVELegalizedMemref(illegalMemref: loadOp.getMemref()); |
| 284 | if (failed(Result: legalMemref)) |
| 285 | return failure(); |
| 286 | |
| 287 | auto legalMaskType = widenScalableMaskTypeToSvbool(type: vectorType); |
| 288 | // Replace this load with a legal load of an svbool type, followed by a |
| 289 | // conversion back to the original type. |
| 290 | replaceOpWithLegalizedOp(rewriter, op: loadOp, callback: [&](memref::LoadOp newLoadOp) { |
| 291 | newLoadOp.setMemRef(*legalMemref); |
| 292 | newLoadOp.getResult().setType(legalMaskType); |
| 293 | return rewriter.create<arm_sve::ConvertFromSvboolOp>( |
| 294 | location: loc, args: loadedMask.getType(), args&: newLoadOp); |
| 295 | }); |
| 296 | |
| 297 | return success(); |
| 298 | } |
| 299 | }; |
| 300 | |
| 301 | /// Transforms a `transfer_read` operation so it reads vector of a type that |
| 302 | /// can be mapped to an LLVM type ("LLVM-legal" type). This is done by |
| 303 | /// collapsing trailing dimensions so we obtain a vector type with a single |
| 304 | /// scalable dimension in the rightmost position. |
| 305 | /// |
| 306 | /// Example: |
| 307 | /// ``` |
| 308 | /// %v = vector.transfer_read %M[%i, %j, %c0, %c0], %c0_i8 |
| 309 | /// {in_bounds = [false, true, true, true]} |
| 310 | /// : memref<?x?x2x8xi8>, vector<2x[4]x2x8xi8> |
| 311 | /// ``` |
| 312 | /// is rewritten to |
| 313 | /// ``` |
| 314 | /// %collapse_shape = memref.collapse_shape %M [[0], [1, 2, 3]] |
| 315 | /// : memref<?x?x2x8xi8> into memref<?x?xi8> |
| 316 | /// %0 = vector.transfer_read %collapse_shape[%i, %j], %c0_i8 |
| 317 | /// {in_bounds = [false, true]} |
| 318 | /// : memref<?x?xi8>, vector<2x[64]xi8> |
| 319 | /// %1 = vector.shape_cast %0 : vector<2x[64]xi8> to vector<2x[4]x2x8xi8> |
| 320 | /// ``` |
| 321 | struct LegalizeTransferRead : public OpRewritePattern<vector::TransferReadOp> { |
| 322 | using OpRewritePattern::OpRewritePattern; |
| 323 | |
| 324 | LogicalResult matchAndRewrite(vector::TransferReadOp readOp, |
| 325 | PatternRewriter &rewriter) const override { |
| 326 | |
| 327 | // Do not try to transform masked reads. For example, if we have a transfer |
| 328 | // to a `vector<[4]x4xi8>` we could have a mask like |
| 329 | // 1 1 1 0 |
| 330 | // 1 1 1 0 |
| 331 | // 1 1 1 0 |
| 332 | // 0 0 0 0 |
| 333 | // Flattening this mask would look like |
| 334 | // 1 1 1 0 1 1 1 0 1 1 1 0 0 0 0 0 |
| 335 | // and we have not yet figured out an efficient way to build such a mask, |
| 336 | // neither from the mask operand, nor from the original `vector.create_mask` |
| 337 | // operation (if visible at all). |
| 338 | if (readOp.isMasked() || readOp.getMask()) |
| 339 | return rewriter.notifyMatchFailure(arg&: readOp, |
| 340 | msg: "masked transfers not-supported" ); |
| 341 | |
| 342 | // General permutation maps are not supported. The issue is with transpose, |
| 343 | // broadcast, and other forms of non-identify mapping in the minor |
| 344 | // dimensions which is impossible to represent after collapsing (at least |
| 345 | // because the resulting "collapsed" maps would have smaller number of |
| 346 | // dimension indices). |
| 347 | // TODO: We have not had yet the need for it, but some forms of permutation |
| 348 | // maps with identity in the minor dimensions voukld be supported, for |
| 349 | // example `(i, j, k, p) -> (j, i, k, p)` where we need to collapse only `k` |
| 350 | // and `p`. |
| 351 | if (!readOp.getPermutationMap().isMinorIdentity()) |
| 352 | return rewriter.notifyMatchFailure(arg&: readOp, msg: "non-identity permutation" ); |
| 353 | |
| 354 | // We handle transfers of vectors with rank >= 2 and a single scalable |
| 355 | // dimension. This transformation aims to transform an LLVM-illegal type |
| 356 | // into an LLVM-legal type and one dimensional vectors are already |
| 357 | // LLVM-legal, even if scalable. A value of a vector type with more than one |
| 358 | // scalable dimension is impossible to represent using a vector type with no |
| 359 | // scalable dimensions or a single one. For example a `vector<[4]x[4]xi8>` |
| 360 | // would have `4 * 4 * vscale * vscale` elements and this quantity is |
| 361 | // impossible to represent as `N` or `N * vscale` (where `N` is a constant). |
| 362 | VectorType origVT = readOp.getVectorType(); |
| 363 | ArrayRef<bool> origScalableDims = origVT.getScalableDims(); |
| 364 | const int64_t origVRank = origVT.getRank(); |
| 365 | if (origVRank < 2 || origVT.getNumScalableDims() != 1) |
| 366 | return rewriter.notifyMatchFailure(arg&: readOp, msg: "wrong dimensions" ); |
| 367 | |
| 368 | // Number of trailing dimensions to collapse, including the scalable |
| 369 | // dimension. Nothing to do if the single scalable dimension is already the |
| 370 | // last one. |
| 371 | const int64_t numCollapseDims = std::distance( |
| 372 | first: llvm::find(Range&: origScalableDims, Val: true), last: origScalableDims.end()); |
| 373 | if (numCollapseDims < 2) |
| 374 | return rewriter.notifyMatchFailure(arg&: readOp, |
| 375 | msg: "scalable dimension is trailing" ); |
| 376 | |
| 377 | // We want a simple memref (not a tensor) with contiguous elements for at |
| 378 | // least all the trailing dimensions up to and including the scalable one. |
| 379 | auto memTy = dyn_cast<MemRefType>(Val: readOp.getBase().getType()); |
| 380 | if (!(memTy && memTy.areTrailingDimsContiguous(n: numCollapseDims))) |
| 381 | return rewriter.notifyMatchFailure( |
| 382 | arg&: readOp, msg: "non-contiguous memref dimensions to collapse" ); |
| 383 | |
| 384 | // The dimensions to collapse (excluding the scalable one) of the vector and |
| 385 | // the memref must match. A dynamic memref dimension is considered |
| 386 | // non-matching. The transfers from the dimensions to collapse must be |
| 387 | // in-bounds (it follows the corresponding indices would be zero). This |
| 388 | // guarantees that the operation transfers a contiguous block |
| 389 | // and no padding is necessary. |
| 390 | if (!llvm::equal(LRange: memTy.getShape().take_back(N: numCollapseDims - 1), |
| 391 | RRange: origVT.getShape().take_back(N: numCollapseDims - 1))) |
| 392 | return rewriter.notifyMatchFailure( |
| 393 | arg&: readOp, msg: "memref and vector dimensions do not match" ); |
| 394 | |
| 395 | SmallVector<bool> origInBounds = readOp.getInBoundsValues(); |
| 396 | if (!llvm::all_of( |
| 397 | Range: ArrayRef<bool>(origInBounds).take_back(N: numCollapseDims - 1), |
| 398 | P: [](bool v) { return v; })) |
| 399 | return rewriter.notifyMatchFailure( |
| 400 | arg&: readOp, msg: "out-of-bounds transfer from a dimension to collapse" ); |
| 401 | |
| 402 | // Collapse the trailing dimensions of the memref. |
| 403 | SmallVector<ReassociationIndices> reassoc; |
| 404 | for (int64_t i = 0; i < memTy.getRank() - numCollapseDims + 1; ++i) |
| 405 | reassoc.push_back(Elt: {i}); |
| 406 | for (int64_t i = memTy.getRank() - numCollapseDims + 1; i < memTy.getRank(); |
| 407 | ++i) |
| 408 | reassoc.back().push_back(Elt: i); |
| 409 | if (!memref::CollapseShapeOp::isGuaranteedCollapsible(srcType: memTy, reassociation: reassoc)) |
| 410 | return failure(); |
| 411 | Value collapsedMem = rewriter.create<memref::CollapseShapeOp>( |
| 412 | location: readOp.getLoc(), args: readOp.getBase(), args&: reassoc); |
| 413 | |
| 414 | // Get a vector type with collapsed trailing dimensions. |
| 415 | SmallVector<int64_t> shape(origVT.getShape()); |
| 416 | for (int64_t i = origVRank - numCollapseDims + 1; i < origVRank; ++i) |
| 417 | shape[origVRank - numCollapseDims] *= shape[i]; |
| 418 | shape.pop_back_n(NumItems: numCollapseDims - 1); |
| 419 | auto collapsedVT = |
| 420 | VectorType::get(shape, elementType: origVT.getElementType(), |
| 421 | scalableDims: origScalableDims.drop_back(N: numCollapseDims - 1)); |
| 422 | |
| 423 | // Drop the extra (zero) indices. |
| 424 | auto indices = readOp.getIndices().drop_back(n: numCollapseDims - 1); |
| 425 | |
| 426 | // Create the new `transfer_read`. |
| 427 | auto newReadOp = rewriter.create<vector::TransferReadOp>( |
| 428 | location: readOp.getLoc(), args&: collapsedVT, args&: collapsedMem, args&: indices, |
| 429 | args: readOp.getPadding(), |
| 430 | args: ArrayRef<bool>(origInBounds).drop_back(N: numCollapseDims - 1)); |
| 431 | |
| 432 | // Cast back to the original vector type. |
| 433 | auto toOrigShape = rewriter.create<vector::ShapeCastOp>(location: readOp.getLoc(), |
| 434 | args&: origVT, args&: newReadOp); |
| 435 | |
| 436 | rewriter.replaceOp(op: readOp, newOp: toOrigShape); |
| 437 | return success(); |
| 438 | } |
| 439 | }; |
| 440 | |
| 441 | } // namespace |
| 442 | |
| 443 | void mlir::arm_sve::populateLegalizeVectorStoragePatterns( |
| 444 | RewritePatternSet &patterns) { |
| 445 | patterns |
| 446 | .add<RelaxScalableVectorAllocaAlignment, |
| 447 | LegalizeSVEMaskAllocation<memref::AllocaOp>, |
| 448 | LegalizeSVEMaskAllocation<memref::AllocOp>, |
| 449 | LegalizeSVEMaskTypeCastConversion, LegalizeSVEMaskStoreConversion, |
| 450 | LegalizeSVEMaskLoadConversion, LegalizeTransferRead>( |
| 451 | arg: patterns.getContext()); |
| 452 | } |
| 453 | |
| 454 | namespace { |
| 455 | struct LegalizeVectorStorage |
| 456 | : public arm_sve::impl::LegalizeVectorStorageBase<LegalizeVectorStorage> { |
| 457 | |
| 458 | void runOnOperation() override { |
| 459 | RewritePatternSet patterns(&getContext()); |
| 460 | populateLegalizeVectorStoragePatterns(patterns); |
| 461 | if (failed(Result: applyPatternsGreedily(op: getOperation(), patterns: std::move(patterns)))) { |
| 462 | signalPassFailure(); |
| 463 | } |
| 464 | ConversionTarget target(getContext()); |
| 465 | target.addDynamicallyLegalOp<UnrealizedConversionCastOp>( |
| 466 | callback: [](UnrealizedConversionCastOp unrealizedConversion) { |
| 467 | return !unrealizedConversion->hasAttr(name: kSVELegalizerTag); |
| 468 | }); |
| 469 | // This detects if we failed to completely legalize the IR. |
| 470 | if (failed(Result: applyPartialConversion(op: getOperation(), target, patterns: {}))) |
| 471 | signalPassFailure(); |
| 472 | } |
| 473 | }; |
| 474 | |
| 475 | } // namespace |
| 476 | |
| 477 | std::unique_ptr<Pass> mlir::arm_sve::createLegalizeVectorStoragePass() { |
| 478 | return std::make_unique<LegalizeVectorStorage>(); |
| 479 | } |
| 480 | |