1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2018-2019 HiSilicon Limited. */ |
3 | #include <linux/acpi.h> |
4 | #include <linux/bitops.h> |
5 | #include <linux/debugfs.h> |
6 | #include <linux/init.h> |
7 | #include <linux/io.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/pm_runtime.h> |
12 | #include <linux/topology.h> |
13 | #include <linux/uacce.h> |
14 | #include "hpre.h" |
15 | |
16 | #define HPRE_QM_ABNML_INT_MASK 0x100004 |
17 | #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) |
18 | #define HPRE_COMM_CNT_CLR_CE 0x0 |
19 | #define HPRE_CTRL_CNT_CLR_CE 0x301000 |
20 | #define HPRE_FSM_MAX_CNT 0x301008 |
21 | #define HPRE_VFG_AXQOS 0x30100c |
22 | #define HPRE_VFG_AXCACHE 0x301010 |
23 | #define HPRE_RDCHN_INI_CFG 0x301014 |
24 | #define HPRE_AWUSR_FP_CFG 0x301018 |
25 | #define HPRE_BD_ENDIAN 0x301020 |
26 | #define HPRE_ECC_BYPASS 0x301024 |
27 | #define HPRE_RAS_WIDTH_CFG 0x301028 |
28 | #define HPRE_POISON_BYPASS 0x30102c |
29 | #define HPRE_BD_ARUSR_CFG 0x301030 |
30 | #define HPRE_BD_AWUSR_CFG 0x301034 |
31 | #define HPRE_TYPES_ENB 0x301038 |
32 | #define HPRE_RSA_ENB BIT(0) |
33 | #define HPRE_ECC_ENB BIT(1) |
34 | #define HPRE_DATA_RUSER_CFG 0x30103c |
35 | #define HPRE_DATA_WUSER_CFG 0x301040 |
36 | #define HPRE_INT_MASK 0x301400 |
37 | #define HPRE_INT_STATUS 0x301800 |
38 | #define HPRE_HAC_INT_MSK 0x301400 |
39 | #define HPRE_HAC_RAS_CE_ENB 0x301410 |
40 | #define HPRE_HAC_RAS_NFE_ENB 0x301414 |
41 | #define HPRE_HAC_RAS_FE_ENB 0x301418 |
42 | #define HPRE_HAC_INT_SET 0x301500 |
43 | #define HPRE_RNG_TIMEOUT_NUM 0x301A34 |
44 | #define HPRE_CORE_INT_ENABLE 0 |
45 | #define HPRE_CORE_INT_DISABLE GENMASK(21, 0) |
46 | #define HPRE_RDCHN_INI_ST 0x301a00 |
47 | #define HPRE_CLSTR_BASE 0x302000 |
48 | #define HPRE_CORE_EN_OFFSET 0x04 |
49 | #define HPRE_CORE_INI_CFG_OFFSET 0x20 |
50 | #define HPRE_CORE_INI_STATUS_OFFSET 0x80 |
51 | #define HPRE_CORE_HTBT_WARN_OFFSET 0x8c |
52 | #define HPRE_CORE_IS_SCHD_OFFSET 0x90 |
53 | |
54 | #define HPRE_RAS_CE_ENB 0x301410 |
55 | #define HPRE_RAS_NFE_ENB 0x301414 |
56 | #define HPRE_RAS_FE_ENB 0x301418 |
57 | #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c |
58 | #define HPRE_HAC_RAS_FE_ENABLE 0 |
59 | |
60 | #define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET) |
61 | #define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET) |
62 | #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET) |
63 | #define HPRE_HAC_ECC1_CNT 0x301a04 |
64 | #define HPRE_HAC_ECC2_CNT 0x301a08 |
65 | #define HPRE_HAC_SOURCE_INT 0x301600 |
66 | #define HPRE_CLSTR_ADDR_INTRVL 0x1000 |
67 | #define HPRE_CLUSTER_INQURY 0x100 |
68 | #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 |
69 | #define HPRE_TIMEOUT_ABNML_BIT 6 |
70 | #define HPRE_PASID_EN_BIT 9 |
71 | #define HPRE_REG_RD_INTVRL_US 10 |
72 | #define HPRE_REG_RD_TMOUT_US 1000 |
73 | #define HPRE_DBGFS_VAL_MAX_LEN 20 |
74 | #define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258 |
75 | #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1) |
76 | #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0) |
77 | #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0) |
78 | #define HPRE_BD_USR_MASK GENMASK(1, 0) |
79 | #define HPRE_PREFETCH_CFG 0x301130 |
80 | #define HPRE_SVA_PREFTCH_DFX 0x30115C |
81 | #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30))) |
82 | #define HPRE_PREFETCH_DISABLE BIT(30) |
83 | #define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8)) |
84 | |
85 | /* clock gate */ |
86 | #define HPRE_CLKGATE_CTL 0x301a10 |
87 | #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c |
88 | #define HPRE_CLUSTER_DYN_CTL 0x302010 |
89 | #define HPRE_CORE_SHB_CFG 0x302088 |
90 | #define HPRE_CLKGATE_CTL_EN BIT(0) |
91 | #define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0) |
92 | #define HPRE_CLUSTER_DYN_CTL_EN BIT(0) |
93 | #define HPRE_CORE_GATE_EN (BIT(30) | BIT(31)) |
94 | |
95 | #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 |
96 | #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0) |
97 | #define HPRE_WR_MSI_PORT BIT(2) |
98 | |
99 | #define HPRE_CORE_ECC_2BIT_ERR BIT(1) |
100 | #define HPRE_OOO_ECC_2BIT_ERR BIT(5) |
101 | |
102 | #define HPRE_QM_BME_FLR BIT(7) |
103 | #define HPRE_QM_PM_FLR BIT(11) |
104 | #define HPRE_QM_SRIOV_FLR BIT(12) |
105 | |
106 | #define HPRE_SHAPER_TYPE_RATE 640 |
107 | #define HPRE_VIA_MSI_DSM 1 |
108 | #define HPRE_SQE_MASK_OFFSET 8 |
109 | #define HPRE_SQE_MASK_LEN 24 |
110 | #define HPRE_CTX_Q_NUM_DEF 1 |
111 | |
112 | #define HPRE_DFX_BASE 0x301000 |
113 | #define HPRE_DFX_COMMON1 0x301400 |
114 | #define HPRE_DFX_COMMON2 0x301A00 |
115 | #define HPRE_DFX_CORE 0x302000 |
116 | #define HPRE_DFX_BASE_LEN 0x55 |
117 | #define HPRE_DFX_COMMON1_LEN 0x41 |
118 | #define HPRE_DFX_COMMON2_LEN 0xE |
119 | #define HPRE_DFX_CORE_LEN 0x43 |
120 | |
121 | static const char hpre_name[] = "hisi_hpre" ; |
122 | static struct dentry *hpre_debugfs_root; |
123 | static const struct pci_device_id hpre_dev_ids[] = { |
124 | { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) }, |
125 | { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) }, |
126 | { 0, } |
127 | }; |
128 | |
129 | MODULE_DEVICE_TABLE(pci, hpre_dev_ids); |
130 | |
131 | struct hpre_hw_error { |
132 | u32 int_msk; |
133 | const char *msg; |
134 | }; |
135 | |
136 | static const struct qm_dev_alg hpre_dev_algs[] = { |
137 | { |
138 | .alg_msk = BIT(0), |
139 | .alg = "rsa\n" |
140 | }, { |
141 | .alg_msk = BIT(1), |
142 | .alg = "dh\n" |
143 | }, { |
144 | .alg_msk = BIT(2), |
145 | .alg = "ecdh\n" |
146 | }, { |
147 | .alg_msk = BIT(3), |
148 | .alg = "ecdsa\n" |
149 | }, { |
150 | .alg_msk = BIT(4), |
151 | .alg = "sm2\n" |
152 | }, { |
153 | .alg_msk = BIT(5), |
154 | .alg = "x25519\n" |
155 | }, { |
156 | .alg_msk = BIT(6), |
157 | .alg = "x448\n" |
158 | }, { |
159 | /* sentinel */ |
160 | } |
161 | }; |
162 | |
163 | static struct hisi_qm_list hpre_devices = { |
164 | .register_to_crypto = hpre_algs_register, |
165 | .unregister_from_crypto = hpre_algs_unregister, |
166 | }; |
167 | |
168 | static const char * const hpre_debug_file_name[] = { |
169 | [HPRE_CLEAR_ENABLE] = "rdclr_en" , |
170 | [HPRE_CLUSTER_CTRL] = "cluster_ctrl" , |
171 | }; |
172 | |
173 | enum hpre_cap_type { |
174 | HPRE_QM_NFE_MASK_CAP, |
175 | HPRE_QM_RESET_MASK_CAP, |
176 | HPRE_QM_OOO_SHUTDOWN_MASK_CAP, |
177 | HPRE_QM_CE_MASK_CAP, |
178 | HPRE_NFE_MASK_CAP, |
179 | HPRE_RESET_MASK_CAP, |
180 | HPRE_OOO_SHUTDOWN_MASK_CAP, |
181 | HPRE_CE_MASK_CAP, |
182 | HPRE_CLUSTER_NUM_CAP, |
183 | HPRE_CORE_TYPE_NUM_CAP, |
184 | HPRE_CORE_NUM_CAP, |
185 | HPRE_CLUSTER_CORE_NUM_CAP, |
186 | HPRE_CORE_ENABLE_BITMAP_CAP, |
187 | HPRE_DRV_ALG_BITMAP_CAP, |
188 | HPRE_DEV_ALG_BITMAP_CAP, |
189 | HPRE_CORE1_ALG_BITMAP_CAP, |
190 | HPRE_CORE2_ALG_BITMAP_CAP, |
191 | HPRE_CORE3_ALG_BITMAP_CAP, |
192 | HPRE_CORE4_ALG_BITMAP_CAP, |
193 | HPRE_CORE5_ALG_BITMAP_CAP, |
194 | HPRE_CORE6_ALG_BITMAP_CAP, |
195 | HPRE_CORE7_ALG_BITMAP_CAP, |
196 | HPRE_CORE8_ALG_BITMAP_CAP, |
197 | HPRE_CORE9_ALG_BITMAP_CAP, |
198 | HPRE_CORE10_ALG_BITMAP_CAP |
199 | }; |
200 | |
201 | static const struct hisi_qm_cap_info hpre_basic_info[] = { |
202 | {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37}, |
203 | {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, |
204 | {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, |
205 | {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, |
206 | {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, |
207 | {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, |
208 | {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, |
209 | {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, |
210 | {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, |
211 | {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, |
212 | {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA}, |
213 | {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA}, |
214 | {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF}, |
215 | {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27}, |
216 | {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F}, |
217 | {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
218 | {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
219 | {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
220 | {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
221 | {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
222 | {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
223 | {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
224 | {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, |
225 | {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}, |
226 | {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10} |
227 | }; |
228 | |
229 | enum hpre_pre_store_cap_idx { |
230 | HPRE_CLUSTER_NUM_CAP_IDX = 0x0, |
231 | HPRE_CORE_ENABLE_BITMAP_CAP_IDX, |
232 | HPRE_DRV_ALG_BITMAP_CAP_IDX, |
233 | HPRE_DEV_ALG_BITMAP_CAP_IDX, |
234 | }; |
235 | |
236 | static const u32 hpre_pre_store_caps[] = { |
237 | HPRE_CLUSTER_NUM_CAP, |
238 | HPRE_CORE_ENABLE_BITMAP_CAP, |
239 | HPRE_DRV_ALG_BITMAP_CAP, |
240 | HPRE_DEV_ALG_BITMAP_CAP, |
241 | }; |
242 | |
243 | static const struct hpre_hw_error hpre_hw_errors[] = { |
244 | { |
245 | .int_msk = BIT(0), |
246 | .msg = "core_ecc_1bit_err_int_set" |
247 | }, { |
248 | .int_msk = BIT(1), |
249 | .msg = "core_ecc_2bit_err_int_set" |
250 | }, { |
251 | .int_msk = BIT(2), |
252 | .msg = "dat_wb_poison_int_set" |
253 | }, { |
254 | .int_msk = BIT(3), |
255 | .msg = "dat_rd_poison_int_set" |
256 | }, { |
257 | .int_msk = BIT(4), |
258 | .msg = "bd_rd_poison_int_set" |
259 | }, { |
260 | .int_msk = BIT(5), |
261 | .msg = "ooo_ecc_2bit_err_int_set" |
262 | }, { |
263 | .int_msk = BIT(6), |
264 | .msg = "cluster1_shb_timeout_int_set" |
265 | }, { |
266 | .int_msk = BIT(7), |
267 | .msg = "cluster2_shb_timeout_int_set" |
268 | }, { |
269 | .int_msk = BIT(8), |
270 | .msg = "cluster3_shb_timeout_int_set" |
271 | }, { |
272 | .int_msk = BIT(9), |
273 | .msg = "cluster4_shb_timeout_int_set" |
274 | }, { |
275 | .int_msk = GENMASK(15, 10), |
276 | .msg = "ooo_rdrsp_err_int_set" |
277 | }, { |
278 | .int_msk = GENMASK(21, 16), |
279 | .msg = "ooo_wrrsp_err_int_set" |
280 | }, { |
281 | .int_msk = BIT(22), |
282 | .msg = "pt_rng_timeout_int_set" |
283 | }, { |
284 | .int_msk = BIT(23), |
285 | .msg = "sva_fsm_timeout_int_set" |
286 | }, { |
287 | .int_msk = BIT(24), |
288 | .msg = "sva_int_set" |
289 | }, { |
290 | /* sentinel */ |
291 | } |
292 | }; |
293 | |
294 | static const u64 hpre_cluster_offsets[] = { |
295 | [HPRE_CLUSTER0] = |
296 | HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL, |
297 | [HPRE_CLUSTER1] = |
298 | HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL, |
299 | [HPRE_CLUSTER2] = |
300 | HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL, |
301 | [HPRE_CLUSTER3] = |
302 | HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL, |
303 | }; |
304 | |
305 | static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = { |
306 | {"CORES_EN_STATUS " , HPRE_CORE_EN_OFFSET}, |
307 | {"CORES_INI_CFG " , HPRE_CORE_INI_CFG_OFFSET}, |
308 | {"CORES_INI_STATUS " , HPRE_CORE_INI_STATUS_OFFSET}, |
309 | {"CORES_HTBT_WARN " , HPRE_CORE_HTBT_WARN_OFFSET}, |
310 | {"CORES_IS_SCHD " , HPRE_CORE_IS_SCHD_OFFSET}, |
311 | }; |
312 | |
313 | static const struct debugfs_reg32 hpre_com_dfx_regs[] = { |
314 | {"READ_CLR_EN " , HPRE_CTRL_CNT_CLR_CE}, |
315 | {"AXQOS " , HPRE_VFG_AXQOS}, |
316 | {"AWUSR_CFG " , HPRE_AWUSR_FP_CFG}, |
317 | {"BD_ENDIAN " , HPRE_BD_ENDIAN}, |
318 | {"ECC_CHECK_CTRL " , HPRE_ECC_BYPASS}, |
319 | {"RAS_INT_WIDTH " , HPRE_RAS_WIDTH_CFG}, |
320 | {"POISON_BYPASS " , HPRE_POISON_BYPASS}, |
321 | {"BD_ARUSER " , HPRE_BD_ARUSR_CFG}, |
322 | {"BD_AWUSER " , HPRE_BD_AWUSR_CFG}, |
323 | {"DATA_ARUSER " , HPRE_DATA_RUSER_CFG}, |
324 | {"DATA_AWUSER " , HPRE_DATA_WUSER_CFG}, |
325 | {"INT_STATUS " , HPRE_INT_STATUS}, |
326 | {"INT_MASK " , HPRE_HAC_INT_MSK}, |
327 | {"RAS_CE_ENB " , HPRE_HAC_RAS_CE_ENB}, |
328 | {"RAS_NFE_ENB " , HPRE_HAC_RAS_NFE_ENB}, |
329 | {"RAS_FE_ENB " , HPRE_HAC_RAS_FE_ENB}, |
330 | {"INT_SET " , HPRE_HAC_INT_SET}, |
331 | {"RNG_TIMEOUT_NUM " , HPRE_RNG_TIMEOUT_NUM}, |
332 | }; |
333 | |
334 | static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { |
335 | "send_cnt" , |
336 | "recv_cnt" , |
337 | "send_fail_cnt" , |
338 | "send_busy_cnt" , |
339 | "over_thrhld_cnt" , |
340 | "overtime_thrhld" , |
341 | "invalid_req_cnt" |
342 | }; |
343 | |
344 | /* define the HPRE's dfx regs region and region length */ |
345 | static struct dfx_diff_registers hpre_diff_regs[] = { |
346 | { |
347 | .reg_offset = HPRE_DFX_BASE, |
348 | .reg_len = HPRE_DFX_BASE_LEN, |
349 | }, { |
350 | .reg_offset = HPRE_DFX_COMMON1, |
351 | .reg_len = HPRE_DFX_COMMON1_LEN, |
352 | }, { |
353 | .reg_offset = HPRE_DFX_COMMON2, |
354 | .reg_len = HPRE_DFX_COMMON2_LEN, |
355 | }, { |
356 | .reg_offset = HPRE_DFX_CORE, |
357 | .reg_len = HPRE_DFX_CORE_LEN, |
358 | }, |
359 | }; |
360 | |
361 | bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) |
362 | { |
363 | u32 cap_val; |
364 | |
365 | cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val; |
366 | if (alg & cap_val) |
367 | return true; |
368 | |
369 | return false; |
370 | } |
371 | |
372 | static int hpre_diff_regs_show(struct seq_file *s, void *unused) |
373 | { |
374 | struct hisi_qm *qm = s->private; |
375 | |
376 | hisi_qm_acc_diff_regs_dump(qm, s, dregs: qm->debug.acc_diff_regs, |
377 | ARRAY_SIZE(hpre_diff_regs)); |
378 | |
379 | return 0; |
380 | } |
381 | |
382 | DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs); |
383 | |
384 | static int hpre_com_regs_show(struct seq_file *s, void *unused) |
385 | { |
386 | hisi_qm_regs_dump(s, regset: s->private); |
387 | |
388 | return 0; |
389 | } |
390 | |
391 | DEFINE_SHOW_ATTRIBUTE(hpre_com_regs); |
392 | |
393 | static int hpre_cluster_regs_show(struct seq_file *s, void *unused) |
394 | { |
395 | hisi_qm_regs_dump(s, regset: s->private); |
396 | |
397 | return 0; |
398 | } |
399 | |
400 | DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs); |
401 | |
402 | static const struct kernel_param_ops hpre_uacce_mode_ops = { |
403 | .set = uacce_mode_set, |
404 | .get = param_get_int, |
405 | }; |
406 | |
407 | /* |
408 | * uacce_mode = 0 means hpre only register to crypto, |
409 | * uacce_mode = 1 means hpre both register to crypto and uacce. |
410 | */ |
411 | static u32 uacce_mode = UACCE_MODE_NOUACCE; |
412 | module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444); |
413 | MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); |
414 | |
415 | static bool pf_q_num_flag; |
416 | static int pf_q_num_set(const char *val, const struct kernel_param *kp) |
417 | { |
418 | pf_q_num_flag = true; |
419 | |
420 | return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF); |
421 | } |
422 | |
423 | static const struct kernel_param_ops hpre_pf_q_num_ops = { |
424 | .set = pf_q_num_set, |
425 | .get = param_get_int, |
426 | }; |
427 | |
428 | static u32 pf_q_num = HPRE_PF_DEF_Q_NUM; |
429 | module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444); |
430 | MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)" ); |
431 | |
432 | static const struct kernel_param_ops vfs_num_ops = { |
433 | .set = vfs_num_set, |
434 | .get = param_get_int, |
435 | }; |
436 | |
437 | static u32 vfs_num; |
438 | module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); |
439 | MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)" ); |
440 | |
441 | struct hisi_qp *hpre_create_qp(u8 type) |
442 | { |
443 | int node = cpu_to_node(raw_smp_processor_id()); |
444 | struct hisi_qp *qp = NULL; |
445 | int ret; |
446 | |
447 | if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE) |
448 | return NULL; |
449 | |
450 | /* |
451 | * type: 0 - RSA/DH. algorithm supported in V2, |
452 | * 1 - ECC algorithm in V3. |
453 | */ |
454 | ret = hisi_qm_alloc_qps_node(qm_list: &hpre_devices, qp_num: 1, alg_type: type, node, qps: &qp); |
455 | if (!ret) |
456 | return qp; |
457 | |
458 | return NULL; |
459 | } |
460 | |
461 | static void hpre_config_pasid(struct hisi_qm *qm) |
462 | { |
463 | u32 val1, val2; |
464 | |
465 | if (qm->ver >= QM_HW_V3) |
466 | return; |
467 | |
468 | val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); |
469 | val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); |
470 | if (qm->use_sva) { |
471 | val1 |= BIT(HPRE_PASID_EN_BIT); |
472 | val2 |= BIT(HPRE_PASID_EN_BIT); |
473 | } else { |
474 | val1 &= ~BIT(HPRE_PASID_EN_BIT); |
475 | val2 &= ~BIT(HPRE_PASID_EN_BIT); |
476 | } |
477 | writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG); |
478 | writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG); |
479 | } |
480 | |
481 | static int hpre_cfg_by_dsm(struct hisi_qm *qm) |
482 | { |
483 | struct device *dev = &qm->pdev->dev; |
484 | union acpi_object *obj; |
485 | guid_t guid; |
486 | |
487 | if (guid_parse(uuid: "b06b81ab-0134-4a45-9b0c-483447b95fa7" , u: &guid)) { |
488 | dev_err(dev, "Hpre GUID failed\n" ); |
489 | return -EINVAL; |
490 | } |
491 | |
492 | /* Switch over to MSI handling due to non-standard PCI implementation */ |
493 | obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid: &guid, |
494 | rev: 0, HPRE_VIA_MSI_DSM, NULL); |
495 | if (!obj) { |
496 | dev_err(dev, "ACPI handle failed!\n" ); |
497 | return -EIO; |
498 | } |
499 | |
500 | ACPI_FREE(obj); |
501 | |
502 | return 0; |
503 | } |
504 | |
505 | static int hpre_set_cluster(struct hisi_qm *qm) |
506 | { |
507 | struct device *dev = &qm->pdev->dev; |
508 | unsigned long offset; |
509 | u32 cluster_core_mask; |
510 | u8 clusters_num; |
511 | u32 val = 0; |
512 | int ret, i; |
513 | |
514 | cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val; |
515 | clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; |
516 | for (i = 0; i < clusters_num; i++) { |
517 | offset = i * HPRE_CLSTR_ADDR_INTRVL; |
518 | |
519 | /* clusters initiating */ |
520 | writel(val: cluster_core_mask, |
521 | addr: qm->io_base + offset + HPRE_CORE_ENB); |
522 | writel(val: 0x1, addr: qm->io_base + offset + HPRE_CORE_INI_CFG); |
523 | ret = readl_relaxed_poll_timeout(qm->io_base + offset + |
524 | HPRE_CORE_INI_STATUS, val, |
525 | ((val & cluster_core_mask) == |
526 | cluster_core_mask), |
527 | HPRE_REG_RD_INTVRL_US, |
528 | HPRE_REG_RD_TMOUT_US); |
529 | if (ret) { |
530 | dev_err(dev, |
531 | "cluster %d int st status timeout!\n" , i); |
532 | return -ETIMEDOUT; |
533 | } |
534 | } |
535 | |
536 | return 0; |
537 | } |
538 | |
539 | /* |
540 | * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV). |
541 | * Or it may stay in D3 state when we bind and unbind hpre quickly, |
542 | * as it does FLR triggered by hardware. |
543 | */ |
544 | static void disable_flr_of_bme(struct hisi_qm *qm) |
545 | { |
546 | u32 val; |
547 | |
548 | val = readl(addr: qm->io_base + QM_PEH_AXUSER_CFG); |
549 | val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); |
550 | val |= HPRE_QM_PM_FLR; |
551 | writel(val, addr: qm->io_base + QM_PEH_AXUSER_CFG); |
552 | writel(PEH_AXUSER_CFG_ENABLE, addr: qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); |
553 | } |
554 | |
555 | static void hpre_open_sva_prefetch(struct hisi_qm *qm) |
556 | { |
557 | u32 val; |
558 | int ret; |
559 | |
560 | if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) |
561 | return; |
562 | |
563 | /* Enable prefetch */ |
564 | val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); |
565 | val &= HPRE_PREFETCH_ENABLE; |
566 | writel(val, addr: qm->io_base + HPRE_PREFETCH_CFG); |
567 | |
568 | ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, |
569 | val, !(val & HPRE_PREFETCH_DISABLE), |
570 | HPRE_REG_RD_INTVRL_US, |
571 | HPRE_REG_RD_TMOUT_US); |
572 | if (ret) |
573 | pci_err(qm->pdev, "failed to open sva prefetch\n" ); |
574 | } |
575 | |
576 | static void hpre_close_sva_prefetch(struct hisi_qm *qm) |
577 | { |
578 | u32 val; |
579 | int ret; |
580 | |
581 | if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) |
582 | return; |
583 | |
584 | val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); |
585 | val |= HPRE_PREFETCH_DISABLE; |
586 | writel(val, addr: qm->io_base + HPRE_PREFETCH_CFG); |
587 | |
588 | ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, |
589 | val, !(val & HPRE_SVA_DISABLE_READY), |
590 | HPRE_REG_RD_INTVRL_US, |
591 | HPRE_REG_RD_TMOUT_US); |
592 | if (ret) |
593 | pci_err(qm->pdev, "failed to close sva prefetch\n" ); |
594 | } |
595 | |
596 | static void hpre_enable_clock_gate(struct hisi_qm *qm) |
597 | { |
598 | u32 val; |
599 | |
600 | if (qm->ver < QM_HW_V3) |
601 | return; |
602 | |
603 | val = readl(addr: qm->io_base + HPRE_CLKGATE_CTL); |
604 | val |= HPRE_CLKGATE_CTL_EN; |
605 | writel(val, addr: qm->io_base + HPRE_CLKGATE_CTL); |
606 | |
607 | val = readl(addr: qm->io_base + HPRE_PEH_CFG_AUTO_GATE); |
608 | val |= HPRE_PEH_CFG_AUTO_GATE_EN; |
609 | writel(val, addr: qm->io_base + HPRE_PEH_CFG_AUTO_GATE); |
610 | |
611 | val = readl(addr: qm->io_base + HPRE_CLUSTER_DYN_CTL); |
612 | val |= HPRE_CLUSTER_DYN_CTL_EN; |
613 | writel(val, addr: qm->io_base + HPRE_CLUSTER_DYN_CTL); |
614 | |
615 | val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG); |
616 | val |= HPRE_CORE_GATE_EN; |
617 | writel(val, addr: qm->io_base + HPRE_CORE_SHB_CFG); |
618 | } |
619 | |
620 | static void hpre_disable_clock_gate(struct hisi_qm *qm) |
621 | { |
622 | u32 val; |
623 | |
624 | if (qm->ver < QM_HW_V3) |
625 | return; |
626 | |
627 | val = readl(addr: qm->io_base + HPRE_CLKGATE_CTL); |
628 | val &= ~HPRE_CLKGATE_CTL_EN; |
629 | writel(val, addr: qm->io_base + HPRE_CLKGATE_CTL); |
630 | |
631 | val = readl(addr: qm->io_base + HPRE_PEH_CFG_AUTO_GATE); |
632 | val &= ~HPRE_PEH_CFG_AUTO_GATE_EN; |
633 | writel(val, addr: qm->io_base + HPRE_PEH_CFG_AUTO_GATE); |
634 | |
635 | val = readl(addr: qm->io_base + HPRE_CLUSTER_DYN_CTL); |
636 | val &= ~HPRE_CLUSTER_DYN_CTL_EN; |
637 | writel(val, addr: qm->io_base + HPRE_CLUSTER_DYN_CTL); |
638 | |
639 | val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG); |
640 | val &= ~HPRE_CORE_GATE_EN; |
641 | writel(val, addr: qm->io_base + HPRE_CORE_SHB_CFG); |
642 | } |
643 | |
644 | static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) |
645 | { |
646 | struct device *dev = &qm->pdev->dev; |
647 | u32 val; |
648 | int ret; |
649 | |
650 | /* disabel dynamic clock gate before sram init */ |
651 | hpre_disable_clock_gate(qm); |
652 | |
653 | writel(HPRE_QM_USR_CFG_MASK, addr: qm->io_base + QM_ARUSER_M_CFG_ENABLE); |
654 | writel(HPRE_QM_USR_CFG_MASK, addr: qm->io_base + QM_AWUSER_M_CFG_ENABLE); |
655 | writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG); |
656 | |
657 | /* HPRE need more time, we close this interrupt */ |
658 | val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); |
659 | val |= BIT(HPRE_TIMEOUT_ABNML_BIT); |
660 | writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); |
661 | |
662 | if (qm->ver >= QM_HW_V3) |
663 | writel(HPRE_RSA_ENB | HPRE_ECC_ENB, |
664 | addr: qm->io_base + HPRE_TYPES_ENB); |
665 | else |
666 | writel(HPRE_RSA_ENB, addr: qm->io_base + HPRE_TYPES_ENB); |
667 | |
668 | writel(HPRE_QM_VFG_AX_MASK, addr: qm->io_base + HPRE_VFG_AXCACHE); |
669 | writel(val: 0x0, addr: qm->io_base + HPRE_BD_ENDIAN); |
670 | writel(val: 0x0, addr: qm->io_base + HPRE_INT_MASK); |
671 | writel(val: 0x0, addr: qm->io_base + HPRE_POISON_BYPASS); |
672 | writel(val: 0x0, addr: qm->io_base + HPRE_COMM_CNT_CLR_CE); |
673 | writel(val: 0x0, addr: qm->io_base + HPRE_ECC_BYPASS); |
674 | |
675 | writel(HPRE_BD_USR_MASK, addr: qm->io_base + HPRE_BD_ARUSR_CFG); |
676 | writel(HPRE_BD_USR_MASK, addr: qm->io_base + HPRE_BD_AWUSR_CFG); |
677 | writel(val: 0x1, addr: qm->io_base + HPRE_RDCHN_INI_CFG); |
678 | ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val, |
679 | val & BIT(0), |
680 | HPRE_REG_RD_INTVRL_US, |
681 | HPRE_REG_RD_TMOUT_US); |
682 | if (ret) { |
683 | dev_err(dev, "read rd channel timeout fail!\n" ); |
684 | return -ETIMEDOUT; |
685 | } |
686 | |
687 | ret = hpre_set_cluster(qm); |
688 | if (ret) |
689 | return -ETIMEDOUT; |
690 | |
691 | /* This setting is only needed by Kunpeng 920. */ |
692 | if (qm->ver == QM_HW_V2) { |
693 | ret = hpre_cfg_by_dsm(qm); |
694 | if (ret) |
695 | return ret; |
696 | |
697 | disable_flr_of_bme(qm); |
698 | } |
699 | |
700 | /* Config data buffer pasid needed by Kunpeng 920 */ |
701 | hpre_config_pasid(qm); |
702 | |
703 | hpre_enable_clock_gate(qm); |
704 | |
705 | return ret; |
706 | } |
707 | |
708 | static void hpre_cnt_regs_clear(struct hisi_qm *qm) |
709 | { |
710 | unsigned long offset; |
711 | u8 clusters_num; |
712 | int i; |
713 | |
714 | /* clear clusterX/cluster_ctrl */ |
715 | clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; |
716 | for (i = 0; i < clusters_num; i++) { |
717 | offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; |
718 | writel(val: 0x0, addr: qm->io_base + offset + HPRE_CLUSTER_INQURY); |
719 | } |
720 | |
721 | /* clear rdclr_en */ |
722 | writel(val: 0x0, addr: qm->io_base + HPRE_CTRL_CNT_CLR_CE); |
723 | |
724 | hisi_qm_debug_regs_clear(qm); |
725 | } |
726 | |
727 | static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) |
728 | { |
729 | u32 val1, val2; |
730 | |
731 | val1 = readl(addr: qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); |
732 | if (enable) { |
733 | val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE; |
734 | val2 = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, |
735 | index: HPRE_OOO_SHUTDOWN_MASK_CAP, is_read: qm->cap_ver); |
736 | } else { |
737 | val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; |
738 | val2 = 0x0; |
739 | } |
740 | |
741 | if (qm->ver > QM_HW_V2) |
742 | writel(val: val2, addr: qm->io_base + HPRE_OOO_SHUTDOWN_SEL); |
743 | |
744 | writel(val: val1, addr: qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); |
745 | } |
746 | |
747 | static void hpre_hw_error_disable(struct hisi_qm *qm) |
748 | { |
749 | u32 ce, nfe; |
750 | |
751 | ce = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, index: HPRE_CE_MASK_CAP, is_read: qm->cap_ver); |
752 | nfe = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, index: HPRE_NFE_MASK_CAP, is_read: qm->cap_ver); |
753 | |
754 | /* disable hpre hw error interrupts */ |
755 | writel(val: ce | nfe | HPRE_HAC_RAS_FE_ENABLE, addr: qm->io_base + HPRE_INT_MASK); |
756 | /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */ |
757 | hpre_master_ooo_ctrl(qm, enable: false); |
758 | } |
759 | |
760 | static void hpre_hw_error_enable(struct hisi_qm *qm) |
761 | { |
762 | u32 ce, nfe; |
763 | |
764 | ce = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, index: HPRE_CE_MASK_CAP, is_read: qm->cap_ver); |
765 | nfe = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, index: HPRE_NFE_MASK_CAP, is_read: qm->cap_ver); |
766 | |
767 | /* clear HPRE hw error source if having */ |
768 | writel(val: ce | nfe | HPRE_HAC_RAS_FE_ENABLE, addr: qm->io_base + HPRE_HAC_SOURCE_INT); |
769 | |
770 | /* configure error type */ |
771 | writel(val: ce, addr: qm->io_base + HPRE_RAS_CE_ENB); |
772 | writel(val: nfe, addr: qm->io_base + HPRE_RAS_NFE_ENB); |
773 | writel(HPRE_HAC_RAS_FE_ENABLE, addr: qm->io_base + HPRE_RAS_FE_ENB); |
774 | |
775 | /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */ |
776 | hpre_master_ooo_ctrl(qm, enable: true); |
777 | |
778 | /* enable hpre hw error interrupts */ |
779 | writel(HPRE_CORE_INT_ENABLE, addr: qm->io_base + HPRE_INT_MASK); |
780 | } |
781 | |
782 | static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) |
783 | { |
784 | struct hpre *hpre = container_of(file->debug, struct hpre, debug); |
785 | |
786 | return &hpre->qm; |
787 | } |
788 | |
789 | static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file) |
790 | { |
791 | struct hisi_qm *qm = hpre_file_to_qm(file); |
792 | |
793 | return readl(addr: qm->io_base + HPRE_CTRL_CNT_CLR_CE) & |
794 | HPRE_CTRL_CNT_CLR_CE_BIT; |
795 | } |
796 | |
797 | static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val) |
798 | { |
799 | struct hisi_qm *qm = hpre_file_to_qm(file); |
800 | u32 tmp; |
801 | |
802 | if (val != 1 && val != 0) |
803 | return -EINVAL; |
804 | |
805 | tmp = (readl(addr: qm->io_base + HPRE_CTRL_CNT_CLR_CE) & |
806 | ~HPRE_CTRL_CNT_CLR_CE_BIT) | val; |
807 | writel(val: tmp, addr: qm->io_base + HPRE_CTRL_CNT_CLR_CE); |
808 | |
809 | return 0; |
810 | } |
811 | |
812 | static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) |
813 | { |
814 | struct hisi_qm *qm = hpre_file_to_qm(file); |
815 | int cluster_index = file->index - HPRE_CLUSTER_CTRL; |
816 | unsigned long offset = HPRE_CLSTR_BASE + |
817 | cluster_index * HPRE_CLSTR_ADDR_INTRVL; |
818 | |
819 | return readl(addr: qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); |
820 | } |
821 | |
822 | static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) |
823 | { |
824 | struct hisi_qm *qm = hpre_file_to_qm(file); |
825 | int cluster_index = file->index - HPRE_CLUSTER_CTRL; |
826 | unsigned long offset = HPRE_CLSTR_BASE + cluster_index * |
827 | HPRE_CLSTR_ADDR_INTRVL; |
828 | |
829 | writel(val, addr: qm->io_base + offset + HPRE_CLUSTER_INQURY); |
830 | } |
831 | |
832 | static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, |
833 | size_t count, loff_t *pos) |
834 | { |
835 | struct hpre_debugfs_file *file = filp->private_data; |
836 | struct hisi_qm *qm = hpre_file_to_qm(file); |
837 | char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; |
838 | u32 val; |
839 | int ret; |
840 | |
841 | ret = hisi_qm_get_dfx_access(qm); |
842 | if (ret) |
843 | return ret; |
844 | |
845 | spin_lock_irq(lock: &file->lock); |
846 | switch (file->type) { |
847 | case HPRE_CLEAR_ENABLE: |
848 | val = hpre_clear_enable_read(file); |
849 | break; |
850 | case HPRE_CLUSTER_CTRL: |
851 | val = hpre_cluster_inqry_read(file); |
852 | break; |
853 | default: |
854 | goto err_input; |
855 | } |
856 | spin_unlock_irq(lock: &file->lock); |
857 | |
858 | hisi_qm_put_dfx_access(qm); |
859 | ret = snprintf(buf: tbuf, HPRE_DBGFS_VAL_MAX_LEN, fmt: "%u\n" , val); |
860 | return simple_read_from_buffer(to: buf, count, ppos: pos, from: tbuf, available: ret); |
861 | |
862 | err_input: |
863 | spin_unlock_irq(lock: &file->lock); |
864 | hisi_qm_put_dfx_access(qm); |
865 | return -EINVAL; |
866 | } |
867 | |
868 | static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, |
869 | size_t count, loff_t *pos) |
870 | { |
871 | struct hpre_debugfs_file *file = filp->private_data; |
872 | struct hisi_qm *qm = hpre_file_to_qm(file); |
873 | char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; |
874 | unsigned long val; |
875 | int len, ret; |
876 | |
877 | if (*pos != 0) |
878 | return 0; |
879 | |
880 | if (count >= HPRE_DBGFS_VAL_MAX_LEN) |
881 | return -ENOSPC; |
882 | |
883 | len = simple_write_to_buffer(to: tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1, |
884 | ppos: pos, from: buf, count); |
885 | if (len < 0) |
886 | return len; |
887 | |
888 | tbuf[len] = '\0'; |
889 | if (kstrtoul(s: tbuf, base: 0, res: &val)) |
890 | return -EFAULT; |
891 | |
892 | ret = hisi_qm_get_dfx_access(qm); |
893 | if (ret) |
894 | return ret; |
895 | |
896 | spin_lock_irq(lock: &file->lock); |
897 | switch (file->type) { |
898 | case HPRE_CLEAR_ENABLE: |
899 | ret = hpre_clear_enable_write(file, val); |
900 | if (ret) |
901 | goto err_input; |
902 | break; |
903 | case HPRE_CLUSTER_CTRL: |
904 | hpre_cluster_inqry_write(file, val); |
905 | break; |
906 | default: |
907 | ret = -EINVAL; |
908 | goto err_input; |
909 | } |
910 | |
911 | ret = count; |
912 | |
913 | err_input: |
914 | spin_unlock_irq(lock: &file->lock); |
915 | hisi_qm_put_dfx_access(qm); |
916 | return ret; |
917 | } |
918 | |
919 | static const struct file_operations hpre_ctrl_debug_fops = { |
920 | .owner = THIS_MODULE, |
921 | .open = simple_open, |
922 | .read = hpre_ctrl_debug_read, |
923 | .write = hpre_ctrl_debug_write, |
924 | }; |
925 | |
926 | static int hpre_debugfs_atomic64_get(void *data, u64 *val) |
927 | { |
928 | struct hpre_dfx *dfx_item = data; |
929 | |
930 | *val = atomic64_read(v: &dfx_item->value); |
931 | |
932 | return 0; |
933 | } |
934 | |
935 | static int hpre_debugfs_atomic64_set(void *data, u64 val) |
936 | { |
937 | struct hpre_dfx *dfx_item = data; |
938 | struct hpre_dfx *hpre_dfx = NULL; |
939 | |
940 | if (dfx_item->type == HPRE_OVERTIME_THRHLD) { |
941 | hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; |
942 | atomic64_set(v: &hpre_dfx[HPRE_OVER_THRHLD_CNT].value, i: 0); |
943 | } else if (val) { |
944 | return -EINVAL; |
945 | } |
946 | |
947 | atomic64_set(v: &dfx_item->value, i: val); |
948 | |
949 | return 0; |
950 | } |
951 | |
952 | DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, |
953 | hpre_debugfs_atomic64_set, "%llu\n" ); |
954 | |
955 | static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, |
956 | enum hpre_ctrl_dbgfs_file type, int indx) |
957 | { |
958 | struct hpre *hpre = container_of(qm, struct hpre, qm); |
959 | struct hpre_debug *dbg = &hpre->debug; |
960 | struct dentry *file_dir; |
961 | |
962 | if (dir) |
963 | file_dir = dir; |
964 | else |
965 | file_dir = qm->debug.debug_root; |
966 | |
967 | if (type >= HPRE_DEBUG_FILE_NUM) |
968 | return -EINVAL; |
969 | |
970 | spin_lock_init(&dbg->files[indx].lock); |
971 | dbg->files[indx].debug = dbg; |
972 | dbg->files[indx].type = type; |
973 | dbg->files[indx].index = indx; |
974 | debugfs_create_file(name: hpre_debug_file_name[type], mode: 0600, parent: file_dir, |
975 | data: dbg->files + indx, fops: &hpre_ctrl_debug_fops); |
976 | |
977 | return 0; |
978 | } |
979 | |
980 | static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) |
981 | { |
982 | struct device *dev = &qm->pdev->dev; |
983 | struct debugfs_regset32 *regset; |
984 | |
985 | regset = devm_kzalloc(dev, size: sizeof(*regset), GFP_KERNEL); |
986 | if (!regset) |
987 | return -ENOMEM; |
988 | |
989 | regset->regs = hpre_com_dfx_regs; |
990 | regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); |
991 | regset->base = qm->io_base; |
992 | regset->dev = dev; |
993 | |
994 | debugfs_create_file(name: "regs" , mode: 0444, parent: qm->debug.debug_root, |
995 | data: regset, fops: &hpre_com_regs_fops); |
996 | |
997 | return 0; |
998 | } |
999 | |
1000 | static int hpre_cluster_debugfs_init(struct hisi_qm *qm) |
1001 | { |
1002 | struct device *dev = &qm->pdev->dev; |
1003 | char buf[HPRE_DBGFS_VAL_MAX_LEN]; |
1004 | struct debugfs_regset32 *regset; |
1005 | struct dentry *tmp_d; |
1006 | u8 clusters_num; |
1007 | int i, ret; |
1008 | |
1009 | clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; |
1010 | for (i = 0; i < clusters_num; i++) { |
1011 | ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, fmt: "cluster%d" , i); |
1012 | if (ret >= HPRE_DBGFS_VAL_MAX_LEN) |
1013 | return -EINVAL; |
1014 | tmp_d = debugfs_create_dir(name: buf, parent: qm->debug.debug_root); |
1015 | |
1016 | regset = devm_kzalloc(dev, size: sizeof(*regset), GFP_KERNEL); |
1017 | if (!regset) |
1018 | return -ENOMEM; |
1019 | |
1020 | regset->regs = hpre_cluster_dfx_regs; |
1021 | regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs); |
1022 | regset->base = qm->io_base + hpre_cluster_offsets[i]; |
1023 | regset->dev = dev; |
1024 | |
1025 | debugfs_create_file(name: "regs" , mode: 0444, parent: tmp_d, data: regset, |
1026 | fops: &hpre_cluster_regs_fops); |
1027 | ret = hpre_create_debugfs_file(qm, dir: tmp_d, type: HPRE_CLUSTER_CTRL, |
1028 | indx: i + HPRE_CLUSTER_CTRL); |
1029 | if (ret) |
1030 | return ret; |
1031 | } |
1032 | |
1033 | return 0; |
1034 | } |
1035 | |
1036 | static int hpre_ctrl_debug_init(struct hisi_qm *qm) |
1037 | { |
1038 | int ret; |
1039 | |
1040 | ret = hpre_create_debugfs_file(qm, NULL, type: HPRE_CLEAR_ENABLE, |
1041 | indx: HPRE_CLEAR_ENABLE); |
1042 | if (ret) |
1043 | return ret; |
1044 | |
1045 | ret = hpre_pf_comm_regs_debugfs_init(qm); |
1046 | if (ret) |
1047 | return ret; |
1048 | |
1049 | return hpre_cluster_debugfs_init(qm); |
1050 | } |
1051 | |
1052 | static void hpre_dfx_debug_init(struct hisi_qm *qm) |
1053 | { |
1054 | struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs; |
1055 | struct hpre *hpre = container_of(qm, struct hpre, qm); |
1056 | struct hpre_dfx *dfx = hpre->debug.dfx; |
1057 | struct dentry *parent; |
1058 | int i; |
1059 | |
1060 | parent = debugfs_create_dir(name: "hpre_dfx" , parent: qm->debug.debug_root); |
1061 | for (i = 0; i < HPRE_DFX_FILE_NUM; i++) { |
1062 | dfx[i].type = i; |
1063 | debugfs_create_file(name: hpre_dfx_files[i], mode: 0644, parent, data: &dfx[i], |
1064 | fops: &hpre_atomic64_ops); |
1065 | } |
1066 | |
1067 | if (qm->fun_type == QM_HW_PF && hpre_regs) |
1068 | debugfs_create_file(name: "diff_regs" , mode: 0444, parent, |
1069 | data: qm, fops: &hpre_diff_regs_fops); |
1070 | } |
1071 | |
1072 | static int hpre_debugfs_init(struct hisi_qm *qm) |
1073 | { |
1074 | struct device *dev = &qm->pdev->dev; |
1075 | int ret; |
1076 | |
1077 | qm->debug.debug_root = debugfs_create_dir(name: dev_name(dev), |
1078 | parent: hpre_debugfs_root); |
1079 | |
1080 | qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; |
1081 | qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; |
1082 | ret = hisi_qm_regs_debugfs_init(qm, dregs: hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs)); |
1083 | if (ret) { |
1084 | dev_warn(dev, "Failed to init HPRE diff regs!\n" ); |
1085 | goto debugfs_remove; |
1086 | } |
1087 | |
1088 | hisi_qm_debug_init(qm); |
1089 | |
1090 | if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { |
1091 | ret = hpre_ctrl_debug_init(qm); |
1092 | if (ret) |
1093 | goto failed_to_create; |
1094 | } |
1095 | |
1096 | hpre_dfx_debug_init(qm); |
1097 | |
1098 | return 0; |
1099 | |
1100 | failed_to_create: |
1101 | hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); |
1102 | debugfs_remove: |
1103 | debugfs_remove_recursive(dentry: qm->debug.debug_root); |
1104 | return ret; |
1105 | } |
1106 | |
1107 | static void hpre_debugfs_exit(struct hisi_qm *qm) |
1108 | { |
1109 | hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); |
1110 | |
1111 | debugfs_remove_recursive(dentry: qm->debug.debug_root); |
1112 | } |
1113 | |
1114 | static int hpre_pre_store_cap_reg(struct hisi_qm *qm) |
1115 | { |
1116 | struct hisi_qm_cap_record *hpre_cap; |
1117 | struct device *dev = &qm->pdev->dev; |
1118 | size_t i, size; |
1119 | |
1120 | size = ARRAY_SIZE(hpre_pre_store_caps); |
1121 | hpre_cap = devm_kzalloc(dev, size: sizeof(*hpre_cap) * size, GFP_KERNEL); |
1122 | if (!hpre_cap) |
1123 | return -ENOMEM; |
1124 | |
1125 | for (i = 0; i < size; i++) { |
1126 | hpre_cap[i].type = hpre_pre_store_caps[i]; |
1127 | hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, |
1128 | index: hpre_pre_store_caps[i], is_read: qm->cap_ver); |
1129 | } |
1130 | |
1131 | if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) { |
1132 | dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n" , |
1133 | hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX); |
1134 | return -EINVAL; |
1135 | } |
1136 | |
1137 | qm->cap_tables.dev_cap_table = hpre_cap; |
1138 | |
1139 | return 0; |
1140 | } |
1141 | |
1142 | static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) |
1143 | { |
1144 | u64 alg_msk; |
1145 | int ret; |
1146 | |
1147 | if (pdev->revision == QM_HW_V1) { |
1148 | pci_warn(pdev, "HPRE version 1 is not supported!\n" ); |
1149 | return -EINVAL; |
1150 | } |
1151 | |
1152 | qm->mode = uacce_mode; |
1153 | qm->pdev = pdev; |
1154 | qm->ver = pdev->revision; |
1155 | qm->sqe_size = HPRE_SQE_SIZE; |
1156 | qm->dev_name = hpre_name; |
1157 | |
1158 | qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ? |
1159 | QM_HW_PF : QM_HW_VF; |
1160 | if (qm->fun_type == QM_HW_PF) { |
1161 | qm->qp_base = HPRE_PF_DEF_Q_BASE; |
1162 | qm->qp_num = pf_q_num; |
1163 | qm->debug.curr_qm_qp_num = pf_q_num; |
1164 | qm->qm_list = &hpre_devices; |
1165 | if (pf_q_num_flag) |
1166 | set_bit(nr: QM_MODULE_PARAM, addr: &qm->misc_ctl); |
1167 | } |
1168 | |
1169 | ret = hisi_qm_init(qm); |
1170 | if (ret) { |
1171 | pci_err(pdev, "Failed to init hpre qm configures!\n" ); |
1172 | return ret; |
1173 | } |
1174 | |
1175 | /* Fetch and save the value of capability registers */ |
1176 | ret = hpre_pre_store_cap_reg(qm); |
1177 | if (ret) { |
1178 | pci_err(pdev, "Failed to pre-store capability registers!\n" ); |
1179 | hisi_qm_uninit(qm); |
1180 | return ret; |
1181 | } |
1182 | |
1183 | alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val; |
1184 | ret = hisi_qm_set_algs(qm, alg_msk, dev_algs: hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs)); |
1185 | if (ret) { |
1186 | pci_err(pdev, "Failed to set hpre algs!\n" ); |
1187 | hisi_qm_uninit(qm); |
1188 | } |
1189 | |
1190 | return ret; |
1191 | } |
1192 | |
1193 | static int hpre_show_last_regs_init(struct hisi_qm *qm) |
1194 | { |
1195 | int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); |
1196 | int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); |
1197 | struct qm_debug *debug = &qm->debug; |
1198 | void __iomem *io_base; |
1199 | u8 clusters_num; |
1200 | int i, j, idx; |
1201 | |
1202 | clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; |
1203 | debug->last_words = kcalloc(n: cluster_dfx_regs_num * clusters_num + |
1204 | com_dfx_regs_num, size: sizeof(unsigned int), GFP_KERNEL); |
1205 | if (!debug->last_words) |
1206 | return -ENOMEM; |
1207 | |
1208 | for (i = 0; i < com_dfx_regs_num; i++) |
1209 | debug->last_words[i] = readl_relaxed(qm->io_base + |
1210 | hpre_com_dfx_regs[i].offset); |
1211 | |
1212 | for (i = 0; i < clusters_num; i++) { |
1213 | io_base = qm->io_base + hpre_cluster_offsets[i]; |
1214 | for (j = 0; j < cluster_dfx_regs_num; j++) { |
1215 | idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; |
1216 | debug->last_words[idx] = readl_relaxed( |
1217 | io_base + hpre_cluster_dfx_regs[j].offset); |
1218 | } |
1219 | } |
1220 | |
1221 | return 0; |
1222 | } |
1223 | |
1224 | static void hpre_show_last_regs_uninit(struct hisi_qm *qm) |
1225 | { |
1226 | struct qm_debug *debug = &qm->debug; |
1227 | |
1228 | if (qm->fun_type == QM_HW_VF || !debug->last_words) |
1229 | return; |
1230 | |
1231 | kfree(objp: debug->last_words); |
1232 | debug->last_words = NULL; |
1233 | } |
1234 | |
1235 | static void hpre_show_last_dfx_regs(struct hisi_qm *qm) |
1236 | { |
1237 | int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); |
1238 | int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); |
1239 | struct qm_debug *debug = &qm->debug; |
1240 | struct pci_dev *pdev = qm->pdev; |
1241 | void __iomem *io_base; |
1242 | u8 clusters_num; |
1243 | int i, j, idx; |
1244 | u32 val; |
1245 | |
1246 | if (qm->fun_type == QM_HW_VF || !debug->last_words) |
1247 | return; |
1248 | |
1249 | /* dumps last word of the debugging registers during controller reset */ |
1250 | for (i = 0; i < com_dfx_regs_num; i++) { |
1251 | val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset); |
1252 | if (debug->last_words[i] != val) |
1253 | pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n" , |
1254 | hpre_com_dfx_regs[i].name, debug->last_words[i], val); |
1255 | } |
1256 | |
1257 | clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; |
1258 | for (i = 0; i < clusters_num; i++) { |
1259 | io_base = qm->io_base + hpre_cluster_offsets[i]; |
1260 | for (j = 0; j < cluster_dfx_regs_num; j++) { |
1261 | val = readl_relaxed(io_base + |
1262 | hpre_cluster_dfx_regs[j].offset); |
1263 | idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; |
1264 | if (debug->last_words[idx] != val) |
1265 | pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n" , |
1266 | i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val); |
1267 | } |
1268 | } |
1269 | } |
1270 | |
1271 | static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) |
1272 | { |
1273 | const struct hpre_hw_error *err = hpre_hw_errors; |
1274 | struct device *dev = &qm->pdev->dev; |
1275 | |
1276 | while (err->msg) { |
1277 | if (err->int_msk & err_sts) |
1278 | dev_warn(dev, "%s [error status=0x%x] found\n" , |
1279 | err->msg, err->int_msk); |
1280 | err++; |
1281 | } |
1282 | } |
1283 | |
1284 | static u32 hpre_get_hw_err_status(struct hisi_qm *qm) |
1285 | { |
1286 | return readl(addr: qm->io_base + HPRE_INT_STATUS); |
1287 | } |
1288 | |
1289 | static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) |
1290 | { |
1291 | u32 nfe; |
1292 | |
1293 | writel(val: err_sts, addr: qm->io_base + HPRE_HAC_SOURCE_INT); |
1294 | nfe = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, index: HPRE_NFE_MASK_CAP, is_read: qm->cap_ver); |
1295 | writel(val: nfe, addr: qm->io_base + HPRE_RAS_NFE_ENB); |
1296 | } |
1297 | |
1298 | static void hpre_open_axi_master_ooo(struct hisi_qm *qm) |
1299 | { |
1300 | u32 value; |
1301 | |
1302 | value = readl(addr: qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); |
1303 | writel(val: value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE, |
1304 | addr: qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); |
1305 | writel(val: value | HPRE_AM_OOO_SHUTDOWN_ENABLE, |
1306 | addr: qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); |
1307 | } |
1308 | |
1309 | static void hpre_err_info_init(struct hisi_qm *qm) |
1310 | { |
1311 | struct hisi_qm_err_info *err_info = &qm->err_info; |
1312 | |
1313 | err_info->fe = HPRE_HAC_RAS_FE_ENABLE; |
1314 | err_info->ce = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, index: HPRE_QM_CE_MASK_CAP, is_read: qm->cap_ver); |
1315 | err_info->nfe = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, index: HPRE_QM_NFE_MASK_CAP, is_read: qm->cap_ver); |
1316 | err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; |
1317 | err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, |
1318 | index: HPRE_OOO_SHUTDOWN_MASK_CAP, is_read: qm->cap_ver); |
1319 | err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, |
1320 | index: HPRE_QM_OOO_SHUTDOWN_MASK_CAP, is_read: qm->cap_ver); |
1321 | err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, |
1322 | index: HPRE_QM_RESET_MASK_CAP, is_read: qm->cap_ver); |
1323 | err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, info_table: hpre_basic_info, |
1324 | index: HPRE_RESET_MASK_CAP, is_read: qm->cap_ver); |
1325 | err_info->msi_wr_port = HPRE_WR_MSI_PORT; |
1326 | err_info->acpi_rst = "HRST" ; |
1327 | } |
1328 | |
1329 | static const struct hisi_qm_err_ini hpre_err_ini = { |
1330 | .hw_init = hpre_set_user_domain_and_cache, |
1331 | .hw_err_enable = hpre_hw_error_enable, |
1332 | .hw_err_disable = hpre_hw_error_disable, |
1333 | .get_dev_hw_err_status = hpre_get_hw_err_status, |
1334 | .clear_dev_hw_err_status = hpre_clear_hw_err_status, |
1335 | .log_dev_hw_err = hpre_log_hw_error, |
1336 | .open_axi_master_ooo = hpre_open_axi_master_ooo, |
1337 | .open_sva_prefetch = hpre_open_sva_prefetch, |
1338 | .close_sva_prefetch = hpre_close_sva_prefetch, |
1339 | .show_last_dfx_regs = hpre_show_last_dfx_regs, |
1340 | .err_info_init = hpre_err_info_init, |
1341 | }; |
1342 | |
1343 | static int hpre_pf_probe_init(struct hpre *hpre) |
1344 | { |
1345 | struct hisi_qm *qm = &hpre->qm; |
1346 | int ret; |
1347 | |
1348 | ret = hpre_set_user_domain_and_cache(qm); |
1349 | if (ret) |
1350 | return ret; |
1351 | |
1352 | hpre_open_sva_prefetch(qm); |
1353 | |
1354 | qm->err_ini = &hpre_err_ini; |
1355 | qm->err_ini->err_info_init(qm); |
1356 | hisi_qm_dev_err_init(qm); |
1357 | ret = hpre_show_last_regs_init(qm); |
1358 | if (ret) |
1359 | pci_err(qm->pdev, "Failed to init last word regs!\n" ); |
1360 | |
1361 | return ret; |
1362 | } |
1363 | |
1364 | static int hpre_probe_init(struct hpre *hpre) |
1365 | { |
1366 | u32 type_rate = HPRE_SHAPER_TYPE_RATE; |
1367 | struct hisi_qm *qm = &hpre->qm; |
1368 | int ret; |
1369 | |
1370 | if (qm->fun_type == QM_HW_PF) { |
1371 | ret = hpre_pf_probe_init(hpre); |
1372 | if (ret) |
1373 | return ret; |
1374 | /* Enable shaper type 0 */ |
1375 | if (qm->ver >= QM_HW_V3) { |
1376 | type_rate |= QM_SHAPER_ENABLE; |
1377 | qm->type_rate = type_rate; |
1378 | } |
1379 | } |
1380 | |
1381 | return 0; |
1382 | } |
1383 | |
1384 | static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
1385 | { |
1386 | struct hisi_qm *qm; |
1387 | struct hpre *hpre; |
1388 | int ret; |
1389 | |
1390 | hpre = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hpre), GFP_KERNEL); |
1391 | if (!hpre) |
1392 | return -ENOMEM; |
1393 | |
1394 | qm = &hpre->qm; |
1395 | ret = hpre_qm_init(qm, pdev); |
1396 | if (ret) { |
1397 | pci_err(pdev, "Failed to init HPRE QM (%d)!\n" , ret); |
1398 | return ret; |
1399 | } |
1400 | |
1401 | ret = hpre_probe_init(hpre); |
1402 | if (ret) { |
1403 | pci_err(pdev, "Failed to probe (%d)!\n" , ret); |
1404 | goto err_with_qm_init; |
1405 | } |
1406 | |
1407 | ret = hisi_qm_start(qm); |
1408 | if (ret) |
1409 | goto err_with_err_init; |
1410 | |
1411 | ret = hpre_debugfs_init(qm); |
1412 | if (ret) |
1413 | dev_warn(&pdev->dev, "init debugfs fail!\n" ); |
1414 | |
1415 | hisi_qm_add_list(qm, qm_list: &hpre_devices); |
1416 | ret = hisi_qm_alg_register(qm, qm_list: &hpre_devices, HPRE_CTX_Q_NUM_DEF); |
1417 | if (ret < 0) { |
1418 | pci_err(pdev, "fail to register algs to crypto!\n" ); |
1419 | goto err_qm_del_list; |
1420 | } |
1421 | |
1422 | if (qm->uacce) { |
1423 | ret = uacce_register(uacce: qm->uacce); |
1424 | if (ret) { |
1425 | pci_err(pdev, "failed to register uacce (%d)!\n" , ret); |
1426 | goto err_with_alg_register; |
1427 | } |
1428 | } |
1429 | |
1430 | if (qm->fun_type == QM_HW_PF && vfs_num) { |
1431 | ret = hisi_qm_sriov_enable(pdev, max_vfs: vfs_num); |
1432 | if (ret < 0) |
1433 | goto err_with_alg_register; |
1434 | } |
1435 | |
1436 | hisi_qm_pm_init(qm); |
1437 | |
1438 | return 0; |
1439 | |
1440 | err_with_alg_register: |
1441 | hisi_qm_alg_unregister(qm, qm_list: &hpre_devices, HPRE_CTX_Q_NUM_DEF); |
1442 | |
1443 | err_qm_del_list: |
1444 | hisi_qm_del_list(qm, qm_list: &hpre_devices); |
1445 | hpre_debugfs_exit(qm); |
1446 | hisi_qm_stop(qm, r: QM_NORMAL); |
1447 | |
1448 | err_with_err_init: |
1449 | hpre_show_last_regs_uninit(qm); |
1450 | hisi_qm_dev_err_uninit(qm); |
1451 | |
1452 | err_with_qm_init: |
1453 | hisi_qm_uninit(qm); |
1454 | |
1455 | return ret; |
1456 | } |
1457 | |
1458 | static void hpre_remove(struct pci_dev *pdev) |
1459 | { |
1460 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
1461 | |
1462 | hisi_qm_pm_uninit(qm); |
1463 | hisi_qm_wait_task_finish(qm, qm_list: &hpre_devices); |
1464 | hisi_qm_alg_unregister(qm, qm_list: &hpre_devices, HPRE_CTX_Q_NUM_DEF); |
1465 | hisi_qm_del_list(qm, qm_list: &hpre_devices); |
1466 | if (qm->fun_type == QM_HW_PF && qm->vfs_num) |
1467 | hisi_qm_sriov_disable(pdev, is_frozen: true); |
1468 | |
1469 | hpre_debugfs_exit(qm); |
1470 | hisi_qm_stop(qm, r: QM_NORMAL); |
1471 | |
1472 | if (qm->fun_type == QM_HW_PF) { |
1473 | hpre_cnt_regs_clear(qm); |
1474 | qm->debug.curr_qm_qp_num = 0; |
1475 | hpre_show_last_regs_uninit(qm); |
1476 | hisi_qm_dev_err_uninit(qm); |
1477 | } |
1478 | |
1479 | hisi_qm_uninit(qm); |
1480 | } |
1481 | |
1482 | static const struct dev_pm_ops hpre_pm_ops = { |
1483 | SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL) |
1484 | }; |
1485 | |
1486 | static const struct pci_error_handlers hpre_err_handler = { |
1487 | .error_detected = hisi_qm_dev_err_detected, |
1488 | .slot_reset = hisi_qm_dev_slot_reset, |
1489 | .reset_prepare = hisi_qm_reset_prepare, |
1490 | .reset_done = hisi_qm_reset_done, |
1491 | }; |
1492 | |
1493 | static struct pci_driver hpre_pci_driver = { |
1494 | .name = hpre_name, |
1495 | .id_table = hpre_dev_ids, |
1496 | .probe = hpre_probe, |
1497 | .remove = hpre_remove, |
1498 | .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? |
1499 | hisi_qm_sriov_configure : NULL, |
1500 | .err_handler = &hpre_err_handler, |
1501 | .shutdown = hisi_qm_dev_shutdown, |
1502 | .driver.pm = &hpre_pm_ops, |
1503 | }; |
1504 | |
1505 | struct pci_driver *hisi_hpre_get_pf_driver(void) |
1506 | { |
1507 | return &hpre_pci_driver; |
1508 | } |
1509 | EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver); |
1510 | |
1511 | static void hpre_register_debugfs(void) |
1512 | { |
1513 | if (!debugfs_initialized()) |
1514 | return; |
1515 | |
1516 | hpre_debugfs_root = debugfs_create_dir(name: hpre_name, NULL); |
1517 | } |
1518 | |
1519 | static void hpre_unregister_debugfs(void) |
1520 | { |
1521 | debugfs_remove_recursive(dentry: hpre_debugfs_root); |
1522 | } |
1523 | |
1524 | static int __init hpre_init(void) |
1525 | { |
1526 | int ret; |
1527 | |
1528 | hisi_qm_init_list(qm_list: &hpre_devices); |
1529 | hpre_register_debugfs(); |
1530 | |
1531 | ret = pci_register_driver(&hpre_pci_driver); |
1532 | if (ret) { |
1533 | hpre_unregister_debugfs(); |
1534 | pr_err("hpre: can't register hisi hpre driver.\n" ); |
1535 | } |
1536 | |
1537 | return ret; |
1538 | } |
1539 | |
1540 | static void __exit hpre_exit(void) |
1541 | { |
1542 | pci_unregister_driver(dev: &hpre_pci_driver); |
1543 | hpre_unregister_debugfs(); |
1544 | } |
1545 | |
1546 | module_init(hpre_init); |
1547 | module_exit(hpre_exit); |
1548 | |
1549 | MODULE_LICENSE("GPL v2" ); |
1550 | MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>" ); |
1551 | MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>" ); |
1552 | MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator" ); |
1553 | |