1//===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
10
11#include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h"
12#include "mlir/Conversion/Passes.h"
13#include "mlir/Dialect/Arith/Transforms/Passes.h"
14#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
15#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
16#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
17#include "mlir/Dialect/Func/IR/FuncOps.h"
18#include "mlir/Dialect/GPU/IR/GPUDialect.h"
19#include "mlir/Dialect/GPU/Transforms/Passes.h"
20#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
21#include "mlir/Dialect/Linalg/Passes.h"
22#include "mlir/Dialect/MemRef/Transforms/Passes.h"
23#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
24#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
25#include "mlir/Pass/PassManager.h"
26#include "mlir/Transforms/Passes.h"
27
28//===----------------------------------------------------------------------===//
29// Pipeline implementation.
30//===----------------------------------------------------------------------===//
31
32void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm,
33 const SparsifierOptions &options) {
34 // Rewrite named linalg ops into generic ops.
35 pm.addNestedPass<func::FuncOp>(createLinalgGeneralizeNamedOpsPass());
36
37 // Sparsification and bufferization mini-pipeline.
38 pm.addPass(createSparsificationAndBufferizationPass(
39 getBufferizationOptionsForSparsification(
40 options.testBufferizationAnalysisOnly),
41 options.sparsificationOptions(), options.createSparseDeallocs,
42 options.enableRuntimeLibrary, options.enableBufferInitialization,
43 options.vectorLength,
44 /*enableVLAVectorization=*/options.armSVE,
45 /*enableSIMDIndex32=*/options.force32BitVectorIndices,
46 options.enableGPULibgen));
47
48 // Bail-early for test setup.
49 if (options.testBufferizationAnalysisOnly)
50 return;
51
52 // Storage specifier lowering and bufferization wrap-up.
53 pm.addPass(createStorageSpecifierToLLVMPass());
54 pm.addNestedPass<func::FuncOp>(pass: createCanonicalizerPass());
55 pm.addNestedPass<func::FuncOp>(
56 mlir::bufferization::createFinalizingBufferizePass());
57
58 // GPU code generation.
59 const bool gpuCodegen = options.gpuTriple.hasValue();
60 if (gpuCodegen) {
61 pm.addPass(createSparseGPUCodegenPass());
62 pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass());
63 pm.addNestedPass<gpu::GPUModuleOp>(pass: createConvertSCFToCFPass());
64 pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToNVVMOps());
65 }
66
67 // Progressively lower to LLVM. Note that the convert-vector-to-llvm
68 // pass is repeated on purpose.
69 // TODO(springerm): Add sparse support to the BufferDeallocation pass and add
70 // it to this pipeline.
71 pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
72 pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
73 pm.addNestedPass<func::FuncOp>(memref::createExpandReallocPass());
74 pm.addNestedPass<func::FuncOp>(pass: createConvertSCFToCFPass());
75 pm.addPass(memref::pass: createExpandStridedMetadataPass());
76 pm.addPass(pass: createLowerAffinePass());
77 pm.addPass(pass: createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
78 pm.addPass(pass: createFinalizeMemRefToLLVMConversionPass());
79 pm.addNestedPass<func::FuncOp>(pass: createConvertComplexToStandardPass());
80 pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass());
81 pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
82 pm.addPass(createConvertMathToLibmPass());
83 pm.addPass(createConvertComplexToLibmPass());
84 pm.addPass(pass: createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
85 pm.addPass(pass: createConvertComplexToLLVMPass());
86 pm.addPass(pass: createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
87 pm.addPass(pass: createConvertFuncToLLVMPass());
88
89 // Finalize GPU code generation.
90 if (gpuCodegen) {
91 GpuNVVMAttachTargetOptions nvvmTargetOptions;
92 nvvmTargetOptions.triple = options.gpuTriple;
93 nvvmTargetOptions.chip = options.gpuChip;
94 nvvmTargetOptions.features = options.gpuFeatures;
95 pm.addPass(pass: createGpuNVVMAttachTarget(nvvmTargetOptions));
96 pm.addPass(pass: createGpuToLLVMConversionPass());
97 GpuModuleToBinaryPassOptions gpuModuleToBinaryPassOptions;
98 gpuModuleToBinaryPassOptions.compilationTarget = options.gpuFormat;
99 pm.addPass(pass: createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions));
100 }
101
102 // Ensure all casts are realized.
103 pm.addPass(pass: createReconcileUnrealizedCastsPass());
104}
105
106//===----------------------------------------------------------------------===//
107// Pipeline registration.
108//===----------------------------------------------------------------------===//
109
110void mlir::sparse_tensor::registerSparseTensorPipelines() {
111 PassPipelineRegistration<SparsifierOptions>(
112 "sparsifier",
113 "The standard pipeline for taking sparsity-agnostic IR using the"
114 " sparse-tensor type, and lowering it to LLVM IR with concrete"
115 " representations and algorithms for sparse tensors.",
116 buildSparsifier);
117}
118

source code of mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp