1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/stddef.h> |
8 | #include <linux/pci.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/delay.h> |
12 | #include <asm/byteorder.h> |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/string.h> |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/workqueue.h> |
18 | #include <linux/ethtool.h> |
19 | #include <linux/etherdevice.h> |
20 | #include <linux/vmalloc.h> |
21 | #include <linux/crash_dump.h> |
22 | #include <linux/crc32.h> |
23 | #include <linux/qed/qed_if.h> |
24 | #include <linux/qed/qed_ll2_if.h> |
25 | #include <net/devlink.h> |
26 | #include <linux/phylink.h> |
27 | |
28 | #include "qed.h" |
29 | #include "qed_sriov.h" |
30 | #include "qed_sp.h" |
31 | #include "qed_dev_api.h" |
32 | #include "qed_ll2.h" |
33 | #include "qed_fcoe.h" |
34 | #include "qed_iscsi.h" |
35 | |
36 | #include "qed_mcp.h" |
37 | #include "qed_reg_addr.h" |
38 | #include "qed_hw.h" |
39 | #include "qed_selftest.h" |
40 | #include "qed_debug.h" |
41 | #include "qed_devlink.h" |
42 | |
43 | #define QED_ROCE_QPS (8192) |
44 | #define QED_ROCE_DPIS (8) |
45 | #define QED_RDMA_SRQS QED_ROCE_QPS |
46 | #define QED_NVM_CFG_GET_FLAGS 0xA |
47 | #define QED_NVM_CFG_GET_PF_FLAGS 0x1A |
48 | #define QED_NVM_CFG_MAX_ATTRS 50 |
49 | |
50 | static char version[] = |
51 | "QLogic FastLinQ 4xxxx Core Module qed\n" ; |
52 | |
53 | MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module" ); |
54 | MODULE_LICENSE("GPL" ); |
55 | |
56 | #define FW_FILE_VERSION \ |
57 | __stringify(FW_MAJOR_VERSION) "." \ |
58 | __stringify(FW_MINOR_VERSION) "." \ |
59 | __stringify(FW_REVISION_VERSION) "." \ |
60 | __stringify(FW_ENGINEERING_VERSION) |
61 | |
62 | #define QED_FW_FILE_NAME \ |
63 | "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" |
64 | |
65 | MODULE_FIRMWARE(QED_FW_FILE_NAME); |
66 | |
67 | /* MFW speed capabilities maps */ |
68 | |
69 | struct qed_mfw_speed_map { |
70 | u32 mfw_val; |
71 | __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); |
72 | |
73 | const u32 *cap_arr; |
74 | u32 arr_size; |
75 | }; |
76 | |
77 | #define QED_MFW_SPEED_MAP(type, arr) \ |
78 | { \ |
79 | .mfw_val = (type), \ |
80 | .cap_arr = (arr), \ |
81 | .arr_size = ARRAY_SIZE(arr), \ |
82 | } |
83 | |
84 | static const u32 qed_mfw_ext_1g[] __initconst = { |
85 | ETHTOOL_LINK_MODE_1000baseT_Full_BIT, |
86 | ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, |
87 | ETHTOOL_LINK_MODE_1000baseX_Full_BIT, |
88 | }; |
89 | |
90 | static const u32 qed_mfw_ext_10g[] __initconst = { |
91 | ETHTOOL_LINK_MODE_10000baseT_Full_BIT, |
92 | ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, |
93 | ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, |
94 | ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, |
95 | ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, |
96 | ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, |
97 | ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, |
98 | ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, |
99 | }; |
100 | |
101 | static const u32 qed_mfw_ext_25g[] __initconst = { |
102 | ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, |
103 | ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, |
104 | ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, |
105 | }; |
106 | |
107 | static const u32 qed_mfw_ext_40g[] __initconst = { |
108 | ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, |
109 | ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, |
110 | ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, |
111 | ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, |
112 | }; |
113 | |
114 | static const u32 qed_mfw_ext_50g_base_r[] __initconst = { |
115 | ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, |
116 | ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, |
117 | ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, |
118 | ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, |
119 | ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, |
120 | }; |
121 | |
122 | static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { |
123 | ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, |
124 | ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, |
125 | ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, |
126 | }; |
127 | |
128 | static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { |
129 | ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, |
130 | ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, |
131 | ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, |
132 | ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, |
133 | ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, |
134 | }; |
135 | |
136 | static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { |
137 | ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, |
138 | ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, |
139 | ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, |
140 | ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, |
141 | }; |
142 | |
143 | static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { |
144 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), |
145 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), |
146 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), |
147 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), |
148 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, |
149 | qed_mfw_ext_50g_base_r), |
150 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, |
151 | qed_mfw_ext_50g_base_r2), |
152 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, |
153 | qed_mfw_ext_100g_base_r2), |
154 | QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, |
155 | qed_mfw_ext_100g_base_r4), |
156 | }; |
157 | |
158 | static const u32 qed_mfw_legacy_1g[] __initconst = { |
159 | ETHTOOL_LINK_MODE_1000baseT_Full_BIT, |
160 | ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, |
161 | ETHTOOL_LINK_MODE_1000baseX_Full_BIT, |
162 | }; |
163 | |
164 | static const u32 qed_mfw_legacy_10g[] __initconst = { |
165 | ETHTOOL_LINK_MODE_10000baseT_Full_BIT, |
166 | ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, |
167 | ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, |
168 | ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, |
169 | ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, |
170 | ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, |
171 | ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, |
172 | ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, |
173 | }; |
174 | |
175 | static const u32 qed_mfw_legacy_20g[] __initconst = { |
176 | ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, |
177 | }; |
178 | |
179 | static const u32 qed_mfw_legacy_25g[] __initconst = { |
180 | ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, |
181 | ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, |
182 | ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, |
183 | }; |
184 | |
185 | static const u32 qed_mfw_legacy_40g[] __initconst = { |
186 | ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, |
187 | ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, |
188 | ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, |
189 | ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, |
190 | }; |
191 | |
192 | static const u32 qed_mfw_legacy_50g[] __initconst = { |
193 | ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, |
194 | ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, |
195 | ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, |
196 | }; |
197 | |
198 | static const u32 qed_mfw_legacy_bb_100g[] __initconst = { |
199 | ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, |
200 | ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, |
201 | ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, |
202 | ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, |
203 | }; |
204 | |
205 | static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { |
206 | QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, |
207 | qed_mfw_legacy_1g), |
208 | QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, |
209 | qed_mfw_legacy_10g), |
210 | QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, |
211 | qed_mfw_legacy_20g), |
212 | QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, |
213 | qed_mfw_legacy_25g), |
214 | QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, |
215 | qed_mfw_legacy_40g), |
216 | QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, |
217 | qed_mfw_legacy_50g), |
218 | QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, |
219 | qed_mfw_legacy_bb_100g), |
220 | }; |
221 | |
222 | static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) |
223 | { |
224 | linkmode_set_bit_array(array: map->cap_arr, array_size: map->arr_size, addr: map->caps); |
225 | |
226 | map->cap_arr = NULL; |
227 | map->arr_size = 0; |
228 | } |
229 | |
230 | static void __init qed_mfw_speed_maps_init(void) |
231 | { |
232 | u32 i; |
233 | |
234 | for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) |
235 | qed_mfw_speed_map_populate(map: qed_mfw_ext_maps + i); |
236 | |
237 | for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) |
238 | qed_mfw_speed_map_populate(map: qed_mfw_legacy_maps + i); |
239 | } |
240 | |
241 | static int __init qed_init(void) |
242 | { |
243 | pr_info("%s" , version); |
244 | |
245 | qed_mfw_speed_maps_init(); |
246 | |
247 | return 0; |
248 | } |
249 | module_init(qed_init); |
250 | |
251 | static void __exit qed_exit(void) |
252 | { |
253 | /* To prevent marking this module as "permanent" */ |
254 | } |
255 | module_exit(qed_exit); |
256 | |
257 | static void qed_free_pci(struct qed_dev *cdev) |
258 | { |
259 | struct pci_dev *pdev = cdev->pdev; |
260 | |
261 | if (cdev->doorbells && cdev->db_size) |
262 | iounmap(addr: cdev->doorbells); |
263 | if (cdev->regview) |
264 | iounmap(addr: cdev->regview); |
265 | if (atomic_read(v: &pdev->enable_cnt) == 1) |
266 | pci_release_regions(pdev); |
267 | |
268 | pci_disable_device(dev: pdev); |
269 | } |
270 | |
271 | #define PCI_REVISION_ID_ERROR_VAL 0xff |
272 | |
273 | /* Performs PCI initializations as well as initializing PCI-related parameters |
274 | * in the device structrue. Returns 0 in case of success. |
275 | */ |
276 | static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) |
277 | { |
278 | u8 rev_id; |
279 | int rc; |
280 | |
281 | cdev->pdev = pdev; |
282 | |
283 | rc = pci_enable_device(dev: pdev); |
284 | if (rc) { |
285 | DP_NOTICE(cdev, "Cannot enable PCI device\n" ); |
286 | goto err0; |
287 | } |
288 | |
289 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
290 | DP_NOTICE(cdev, "No memory region found in bar #0\n" ); |
291 | rc = -EIO; |
292 | goto err1; |
293 | } |
294 | |
295 | if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { |
296 | DP_NOTICE(cdev, "No memory region found in bar #2\n" ); |
297 | rc = -EIO; |
298 | goto err1; |
299 | } |
300 | |
301 | if (atomic_read(v: &pdev->enable_cnt) == 1) { |
302 | rc = pci_request_regions(pdev, "qed" ); |
303 | if (rc) { |
304 | DP_NOTICE(cdev, |
305 | "Failed to request PCI memory resources\n" ); |
306 | goto err1; |
307 | } |
308 | pci_set_master(dev: pdev); |
309 | pci_save_state(dev: pdev); |
310 | } |
311 | |
312 | pci_read_config_byte(dev: pdev, PCI_REVISION_ID, val: &rev_id); |
313 | if (rev_id == PCI_REVISION_ID_ERROR_VAL) { |
314 | DP_NOTICE(cdev, |
315 | "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n" , |
316 | rev_id); |
317 | rc = -ENODEV; |
318 | goto err2; |
319 | } |
320 | if (!pci_is_pcie(dev: pdev)) { |
321 | DP_NOTICE(cdev, "The bus is not PCI Express\n" ); |
322 | rc = -EIO; |
323 | goto err2; |
324 | } |
325 | |
326 | cdev->pci_params.pm_cap = pci_find_capability(dev: pdev, PCI_CAP_ID_PM); |
327 | if (IS_PF(cdev) && !cdev->pci_params.pm_cap) |
328 | DP_NOTICE(cdev, "Cannot find power management capability\n" ); |
329 | |
330 | rc = dma_set_mask_and_coherent(dev: &cdev->pdev->dev, DMA_BIT_MASK(64)); |
331 | if (rc) { |
332 | DP_NOTICE(cdev, "Can't request DMA addresses\n" ); |
333 | rc = -EIO; |
334 | goto err2; |
335 | } |
336 | |
337 | cdev->pci_params.mem_start = pci_resource_start(pdev, 0); |
338 | cdev->pci_params.mem_end = pci_resource_end(pdev, 0); |
339 | cdev->pci_params.irq = pdev->irq; |
340 | |
341 | cdev->regview = pci_ioremap_bar(pdev, bar: 0); |
342 | if (!cdev->regview) { |
343 | DP_NOTICE(cdev, "Cannot map register space, aborting\n" ); |
344 | rc = -ENOMEM; |
345 | goto err2; |
346 | } |
347 | |
348 | cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); |
349 | cdev->db_size = pci_resource_len(cdev->pdev, 2); |
350 | if (!cdev->db_size) { |
351 | if (IS_PF(cdev)) { |
352 | DP_NOTICE(cdev, "No Doorbell bar available\n" ); |
353 | return -EINVAL; |
354 | } else { |
355 | return 0; |
356 | } |
357 | } |
358 | |
359 | cdev->doorbells = ioremap_wc(offset: cdev->db_phys_addr, size: cdev->db_size); |
360 | |
361 | if (!cdev->doorbells) { |
362 | DP_NOTICE(cdev, "Cannot map doorbell space\n" ); |
363 | return -ENOMEM; |
364 | } |
365 | |
366 | return 0; |
367 | |
368 | err2: |
369 | pci_release_regions(pdev); |
370 | err1: |
371 | pci_disable_device(dev: pdev); |
372 | err0: |
373 | return rc; |
374 | } |
375 | |
376 | int qed_fill_dev_info(struct qed_dev *cdev, |
377 | struct qed_dev_info *dev_info) |
378 | { |
379 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
380 | struct qed_hw_info *hw_info = &p_hwfn->hw_info; |
381 | struct qed_tunnel_info *tun = &cdev->tunnel; |
382 | struct qed_ptt *ptt; |
383 | |
384 | memset(dev_info, 0, sizeof(struct qed_dev_info)); |
385 | |
386 | if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && |
387 | tun->vxlan.b_mode_enabled) |
388 | dev_info->vxlan_enable = true; |
389 | |
390 | if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && |
391 | tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && |
392 | tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) |
393 | dev_info->gre_enable = true; |
394 | |
395 | if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && |
396 | tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && |
397 | tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) |
398 | dev_info->geneve_enable = true; |
399 | |
400 | dev_info->num_hwfns = cdev->num_hwfns; |
401 | dev_info->pci_mem_start = cdev->pci_params.mem_start; |
402 | dev_info->pci_mem_end = cdev->pci_params.mem_end; |
403 | dev_info->pci_irq = cdev->pci_params.irq; |
404 | dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); |
405 | dev_info->dev_type = cdev->type; |
406 | ether_addr_copy(dst: dev_info->hw_mac, src: hw_info->hw_mac_addr); |
407 | |
408 | if (IS_PF(cdev)) { |
409 | dev_info->fw_major = FW_MAJOR_VERSION; |
410 | dev_info->fw_minor = FW_MINOR_VERSION; |
411 | dev_info->fw_rev = FW_REVISION_VERSION; |
412 | dev_info->fw_eng = FW_ENGINEERING_VERSION; |
413 | dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, |
414 | &cdev->mf_bits); |
415 | if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) |
416 | dev_info->b_arfs_capable = true; |
417 | dev_info->tx_switching = true; |
418 | |
419 | if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) |
420 | dev_info->wol_support = true; |
421 | |
422 | dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); |
423 | dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); |
424 | dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; |
425 | } else { |
426 | qed_vf_get_fw_version(p_hwfn: &cdev->hwfns[0], fw_major: &dev_info->fw_major, |
427 | fw_minor: &dev_info->fw_minor, fw_rev: &dev_info->fw_rev, |
428 | fw_eng: &dev_info->fw_eng); |
429 | } |
430 | |
431 | if (IS_PF(cdev)) { |
432 | ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); |
433 | if (ptt) { |
434 | qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), p_ptt: ptt, |
435 | p_mfw_ver: &dev_info->mfw_rev, NULL); |
436 | |
437 | qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), p_ptt: ptt, |
438 | p_mbi_ver: &dev_info->mbi_version); |
439 | |
440 | qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), p_ptt: ptt, |
441 | p_flash_size: &dev_info->flash_size); |
442 | |
443 | qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt: ptt); |
444 | } |
445 | } else { |
446 | qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, |
447 | p_mfw_ver: &dev_info->mfw_rev, NULL); |
448 | } |
449 | |
450 | dev_info->mtu = hw_info->mtu; |
451 | cdev->common_dev_info = *dev_info; |
452 | |
453 | return 0; |
454 | } |
455 | |
456 | static void qed_free_cdev(struct qed_dev *cdev) |
457 | { |
458 | kfree(objp: (void *)cdev); |
459 | } |
460 | |
461 | static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) |
462 | { |
463 | struct qed_dev *cdev; |
464 | |
465 | cdev = kzalloc(size: sizeof(*cdev), GFP_KERNEL); |
466 | if (!cdev) |
467 | return cdev; |
468 | |
469 | qed_init_struct(cdev); |
470 | |
471 | return cdev; |
472 | } |
473 | |
474 | /* Sets the requested power state */ |
475 | static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) |
476 | { |
477 | if (!cdev) |
478 | return -ENODEV; |
479 | |
480 | DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n" ); |
481 | return 0; |
482 | } |
483 | |
484 | /* probing */ |
485 | static struct qed_dev *qed_probe(struct pci_dev *pdev, |
486 | struct qed_probe_params *params) |
487 | { |
488 | struct qed_dev *cdev; |
489 | int rc; |
490 | |
491 | cdev = qed_alloc_cdev(pdev); |
492 | if (!cdev) |
493 | goto err0; |
494 | |
495 | cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; |
496 | cdev->protocol = params->protocol; |
497 | |
498 | if (params->is_vf) |
499 | cdev->b_is_vf = true; |
500 | |
501 | qed_init_dp(cdev, dp_module: params->dp_module, dp_level: params->dp_level); |
502 | |
503 | cdev->recov_in_prog = params->recov_in_prog; |
504 | |
505 | rc = qed_init_pci(cdev, pdev); |
506 | if (rc) { |
507 | DP_ERR(cdev, "init pci failed\n" ); |
508 | goto err1; |
509 | } |
510 | DP_INFO(cdev, "PCI init completed successfully\n" ); |
511 | |
512 | rc = qed_hw_prepare(cdev, personality: QED_PCI_DEFAULT); |
513 | if (rc) { |
514 | DP_ERR(cdev, "hw prepare failed\n" ); |
515 | goto err2; |
516 | } |
517 | |
518 | DP_INFO(cdev, "%s completed successfully\n" , __func__); |
519 | |
520 | return cdev; |
521 | |
522 | err2: |
523 | qed_free_pci(cdev); |
524 | err1: |
525 | qed_free_cdev(cdev); |
526 | err0: |
527 | return NULL; |
528 | } |
529 | |
530 | static void qed_remove(struct qed_dev *cdev) |
531 | { |
532 | if (!cdev) |
533 | return; |
534 | |
535 | qed_hw_remove(cdev); |
536 | |
537 | qed_free_pci(cdev); |
538 | |
539 | qed_set_power_state(cdev, PCI_D3hot); |
540 | |
541 | qed_free_cdev(cdev); |
542 | } |
543 | |
544 | static void qed_disable_msix(struct qed_dev *cdev) |
545 | { |
546 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
547 | pci_disable_msix(dev: cdev->pdev); |
548 | kfree(objp: cdev->int_params.msix_table); |
549 | } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { |
550 | pci_disable_msi(dev: cdev->pdev); |
551 | } |
552 | |
553 | memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); |
554 | } |
555 | |
556 | static int qed_enable_msix(struct qed_dev *cdev, |
557 | struct qed_int_params *int_params) |
558 | { |
559 | int i, rc, cnt; |
560 | |
561 | cnt = int_params->in.num_vectors; |
562 | |
563 | for (i = 0; i < cnt; i++) |
564 | int_params->msix_table[i].entry = i; |
565 | |
566 | rc = pci_enable_msix_range(dev: cdev->pdev, entries: int_params->msix_table, |
567 | minvec: int_params->in.min_msix_cnt, maxvec: cnt); |
568 | if (rc < cnt && rc >= int_params->in.min_msix_cnt && |
569 | (rc % cdev->num_hwfns)) { |
570 | pci_disable_msix(dev: cdev->pdev); |
571 | |
572 | /* If fastpath is initialized, we need at least one interrupt |
573 | * per hwfn [and the slow path interrupts]. New requested number |
574 | * should be a multiple of the number of hwfns. |
575 | */ |
576 | cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; |
577 | DP_NOTICE(cdev, |
578 | "Trying to enable MSI-X with less vectors (%d out of %d)\n" , |
579 | cnt, int_params->in.num_vectors); |
580 | rc = pci_enable_msix_exact(dev: cdev->pdev, entries: int_params->msix_table, |
581 | nvec: cnt); |
582 | if (!rc) |
583 | rc = cnt; |
584 | } |
585 | |
586 | /* For VFs, we should return with an error in case we didn't get the |
587 | * exact number of msix vectors as we requested. |
588 | * Not doing that will lead to a crash when starting queues for |
589 | * this VF. |
590 | */ |
591 | if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { |
592 | /* MSI-x configuration was achieved */ |
593 | int_params->out.int_mode = QED_INT_MODE_MSIX; |
594 | int_params->out.num_vectors = rc; |
595 | rc = 0; |
596 | } else { |
597 | DP_NOTICE(cdev, |
598 | "Failed to enable MSI-X [Requested %d vectors][rc %d]\n" , |
599 | cnt, rc); |
600 | } |
601 | |
602 | return rc; |
603 | } |
604 | |
605 | /* This function outputs the int mode and the number of enabled msix vector */ |
606 | static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) |
607 | { |
608 | struct qed_int_params *int_params = &cdev->int_params; |
609 | struct msix_entry *tbl; |
610 | int rc = 0, cnt; |
611 | |
612 | switch (int_params->in.int_mode) { |
613 | case QED_INT_MODE_MSIX: |
614 | /* Allocate MSIX table */ |
615 | cnt = int_params->in.num_vectors; |
616 | int_params->msix_table = kcalloc(n: cnt, size: sizeof(*tbl), GFP_KERNEL); |
617 | if (!int_params->msix_table) { |
618 | rc = -ENOMEM; |
619 | goto out; |
620 | } |
621 | |
622 | /* Enable MSIX */ |
623 | rc = qed_enable_msix(cdev, int_params); |
624 | if (!rc) |
625 | goto out; |
626 | |
627 | DP_NOTICE(cdev, "Failed to enable MSI-X\n" ); |
628 | kfree(objp: int_params->msix_table); |
629 | if (force_mode) |
630 | goto out; |
631 | fallthrough; |
632 | |
633 | case QED_INT_MODE_MSI: |
634 | if (cdev->num_hwfns == 1) { |
635 | rc = pci_enable_msi(dev: cdev->pdev); |
636 | if (!rc) { |
637 | int_params->out.int_mode = QED_INT_MODE_MSI; |
638 | goto out; |
639 | } |
640 | |
641 | DP_NOTICE(cdev, "Failed to enable MSI\n" ); |
642 | if (force_mode) |
643 | goto out; |
644 | } |
645 | fallthrough; |
646 | |
647 | case QED_INT_MODE_INTA: |
648 | int_params->out.int_mode = QED_INT_MODE_INTA; |
649 | rc = 0; |
650 | goto out; |
651 | default: |
652 | DP_NOTICE(cdev, "Unknown int_mode value %d\n" , |
653 | int_params->in.int_mode); |
654 | rc = -EINVAL; |
655 | } |
656 | |
657 | out: |
658 | if (!rc) |
659 | DP_INFO(cdev, "Using %s interrupts\n" , |
660 | int_params->out.int_mode == QED_INT_MODE_INTA ? |
661 | "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? |
662 | "MSI" : "MSIX" ); |
663 | cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; |
664 | |
665 | return rc; |
666 | } |
667 | |
668 | static void qed_simd_handler_config(struct qed_dev *cdev, void *token, |
669 | int index, void(*handler)(void *)) |
670 | { |
671 | struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; |
672 | int relative_idx = index / cdev->num_hwfns; |
673 | |
674 | hwfn->simd_proto_handler[relative_idx].func = handler; |
675 | hwfn->simd_proto_handler[relative_idx].token = token; |
676 | } |
677 | |
678 | static void qed_simd_handler_clean(struct qed_dev *cdev, int index) |
679 | { |
680 | struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; |
681 | int relative_idx = index / cdev->num_hwfns; |
682 | |
683 | memset(&hwfn->simd_proto_handler[relative_idx], 0, |
684 | sizeof(struct qed_simd_fp_handler)); |
685 | } |
686 | |
687 | static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) |
688 | { |
689 | tasklet_schedule(t: (struct tasklet_struct *)tasklet); |
690 | return IRQ_HANDLED; |
691 | } |
692 | |
693 | static irqreturn_t qed_single_int(int irq, void *dev_instance) |
694 | { |
695 | struct qed_dev *cdev = (struct qed_dev *)dev_instance; |
696 | struct qed_hwfn *hwfn; |
697 | irqreturn_t rc = IRQ_NONE; |
698 | u64 status; |
699 | int i, j; |
700 | |
701 | for (i = 0; i < cdev->num_hwfns; i++) { |
702 | status = qed_int_igu_read_sisr_reg(p_hwfn: &cdev->hwfns[i]); |
703 | |
704 | if (!status) |
705 | continue; |
706 | |
707 | hwfn = &cdev->hwfns[i]; |
708 | |
709 | /* Slowpath interrupt */ |
710 | if (unlikely(status & 0x1)) { |
711 | tasklet_schedule(t: &hwfn->sp_dpc); |
712 | status &= ~0x1; |
713 | rc = IRQ_HANDLED; |
714 | } |
715 | |
716 | /* Fastpath interrupts */ |
717 | for (j = 0; j < 64; j++) { |
718 | if ((0x2ULL << j) & status) { |
719 | struct qed_simd_fp_handler *p_handler = |
720 | &hwfn->simd_proto_handler[j]; |
721 | |
722 | if (p_handler->func) |
723 | p_handler->func(p_handler->token); |
724 | else |
725 | DP_NOTICE(hwfn, |
726 | "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n" , |
727 | j, status); |
728 | |
729 | status &= ~(0x2ULL << j); |
730 | rc = IRQ_HANDLED; |
731 | } |
732 | } |
733 | |
734 | if (unlikely(status)) |
735 | DP_VERBOSE(hwfn, NETIF_MSG_INTR, |
736 | "got an unknown interrupt status 0x%llx\n" , |
737 | status); |
738 | } |
739 | |
740 | return rc; |
741 | } |
742 | |
743 | int qed_slowpath_irq_req(struct qed_hwfn *hwfn) |
744 | { |
745 | struct qed_dev *cdev = hwfn->cdev; |
746 | u32 int_mode; |
747 | int rc = 0; |
748 | u8 id; |
749 | |
750 | int_mode = cdev->int_params.out.int_mode; |
751 | if (int_mode == QED_INT_MODE_MSIX) { |
752 | id = hwfn->my_id; |
753 | snprintf(buf: hwfn->name, NAME_SIZE, fmt: "sp-%d-%02x:%02x.%02x" , |
754 | id, cdev->pdev->bus->number, |
755 | PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); |
756 | rc = request_irq(irq: cdev->int_params.msix_table[id].vector, |
757 | handler: qed_msix_sp_int, flags: 0, name: hwfn->name, dev: &hwfn->sp_dpc); |
758 | } else { |
759 | unsigned long flags = 0; |
760 | |
761 | snprintf(buf: cdev->name, NAME_SIZE, fmt: "%02x:%02x.%02x" , |
762 | cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), |
763 | PCI_FUNC(cdev->pdev->devfn)); |
764 | |
765 | if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) |
766 | flags |= IRQF_SHARED; |
767 | |
768 | rc = request_irq(irq: cdev->pdev->irq, handler: qed_single_int, |
769 | flags, name: cdev->name, dev: cdev); |
770 | } |
771 | |
772 | if (rc) |
773 | DP_NOTICE(cdev, "request_irq failed, rc = %d\n" , rc); |
774 | else |
775 | DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), |
776 | "Requested slowpath %s\n" , |
777 | (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ" ); |
778 | |
779 | return rc; |
780 | } |
781 | |
782 | static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) |
783 | { |
784 | /* Calling the disable function will make sure that any |
785 | * currently-running function is completed. The following call to the |
786 | * enable function makes this sequence a flush-like operation. |
787 | */ |
788 | if (p_hwfn->b_sp_dpc_enabled) { |
789 | tasklet_disable(t: &p_hwfn->sp_dpc); |
790 | tasklet_enable(t: &p_hwfn->sp_dpc); |
791 | } |
792 | } |
793 | |
794 | void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) |
795 | { |
796 | struct qed_dev *cdev = p_hwfn->cdev; |
797 | u8 id = p_hwfn->my_id; |
798 | u32 int_mode; |
799 | |
800 | int_mode = cdev->int_params.out.int_mode; |
801 | if (int_mode == QED_INT_MODE_MSIX) |
802 | synchronize_irq(irq: cdev->int_params.msix_table[id].vector); |
803 | else |
804 | synchronize_irq(irq: cdev->pdev->irq); |
805 | |
806 | qed_slowpath_tasklet_flush(p_hwfn); |
807 | } |
808 | |
809 | static void qed_slowpath_irq_free(struct qed_dev *cdev) |
810 | { |
811 | int i; |
812 | |
813 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
814 | for_each_hwfn(cdev, i) { |
815 | if (!cdev->hwfns[i].b_int_requested) |
816 | break; |
817 | free_irq(cdev->int_params.msix_table[i].vector, |
818 | &cdev->hwfns[i].sp_dpc); |
819 | } |
820 | } else { |
821 | if (QED_LEADING_HWFN(cdev)->b_int_requested) |
822 | free_irq(cdev->pdev->irq, cdev); |
823 | } |
824 | qed_int_disable_post_isr_release(cdev); |
825 | } |
826 | |
827 | static int qed_nic_stop(struct qed_dev *cdev) |
828 | { |
829 | int i, rc; |
830 | |
831 | rc = qed_hw_stop(cdev); |
832 | |
833 | for (i = 0; i < cdev->num_hwfns; i++) { |
834 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
835 | |
836 | if (p_hwfn->b_sp_dpc_enabled) { |
837 | tasklet_disable(t: &p_hwfn->sp_dpc); |
838 | p_hwfn->b_sp_dpc_enabled = false; |
839 | DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, |
840 | "Disabled sp tasklet [hwfn %d] at %p\n" , |
841 | i, &p_hwfn->sp_dpc); |
842 | } |
843 | } |
844 | |
845 | qed_dbg_pf_exit(cdev); |
846 | |
847 | return rc; |
848 | } |
849 | |
850 | static int qed_nic_setup(struct qed_dev *cdev) |
851 | { |
852 | int rc, i; |
853 | |
854 | /* Determine if interface is going to require LL2 */ |
855 | if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { |
856 | for (i = 0; i < cdev->num_hwfns; i++) { |
857 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
858 | |
859 | p_hwfn->using_ll2 = true; |
860 | } |
861 | } |
862 | |
863 | rc = qed_resc_alloc(cdev); |
864 | if (rc) |
865 | return rc; |
866 | |
867 | DP_INFO(cdev, "Allocated qed resources\n" ); |
868 | |
869 | qed_resc_setup(cdev); |
870 | |
871 | return rc; |
872 | } |
873 | |
874 | static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) |
875 | { |
876 | int limit = 0; |
877 | |
878 | /* Mark the fastpath as free/used */ |
879 | cdev->int_params.fp_initialized = cnt ? true : false; |
880 | |
881 | if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) |
882 | limit = cdev->num_hwfns * 63; |
883 | else if (cdev->int_params.fp_msix_cnt) |
884 | limit = cdev->int_params.fp_msix_cnt; |
885 | |
886 | if (!limit) |
887 | return -ENOMEM; |
888 | |
889 | return min_t(int, cnt, limit); |
890 | } |
891 | |
892 | static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) |
893 | { |
894 | memset(info, 0, sizeof(struct qed_int_info)); |
895 | |
896 | if (!cdev->int_params.fp_initialized) { |
897 | DP_INFO(cdev, |
898 | "Protocol driver requested interrupt information, but its support is not yet configured\n" ); |
899 | return -EINVAL; |
900 | } |
901 | |
902 | /* Need to expose only MSI-X information; Single IRQ is handled solely |
903 | * by qed. |
904 | */ |
905 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
906 | int msix_base = cdev->int_params.fp_msix_base; |
907 | |
908 | info->msix_cnt = cdev->int_params.fp_msix_cnt; |
909 | info->msix = &cdev->int_params.msix_table[msix_base]; |
910 | } |
911 | |
912 | return 0; |
913 | } |
914 | |
915 | static int qed_slowpath_setup_int(struct qed_dev *cdev, |
916 | enum qed_int_mode int_mode) |
917 | { |
918 | struct qed_sb_cnt_info sb_cnt_info; |
919 | int num_l2_queues = 0; |
920 | int rc; |
921 | int i; |
922 | |
923 | if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { |
924 | DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n" ); |
925 | return -EINVAL; |
926 | } |
927 | |
928 | memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); |
929 | cdev->int_params.in.int_mode = int_mode; |
930 | for_each_hwfn(cdev, i) { |
931 | memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); |
932 | qed_int_get_num_sbs(p_hwfn: &cdev->hwfns[i], p_sb_cnt_info: &sb_cnt_info); |
933 | cdev->int_params.in.num_vectors += sb_cnt_info.cnt; |
934 | cdev->int_params.in.num_vectors++; /* slowpath */ |
935 | } |
936 | |
937 | /* We want a minimum of one slowpath and one fastpath vector per hwfn */ |
938 | cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; |
939 | |
940 | if (is_kdump_kernel()) { |
941 | DP_INFO(cdev, |
942 | "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n" , |
943 | cdev->int_params.in.min_msix_cnt); |
944 | cdev->int_params.in.num_vectors = |
945 | cdev->int_params.in.min_msix_cnt; |
946 | } |
947 | |
948 | rc = qed_set_int_mode(cdev, force_mode: false); |
949 | if (rc) { |
950 | DP_ERR(cdev, "%s ERR\n" , __func__); |
951 | return rc; |
952 | } |
953 | |
954 | cdev->int_params.fp_msix_base = cdev->num_hwfns; |
955 | cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - |
956 | cdev->num_hwfns; |
957 | |
958 | if (!IS_ENABLED(CONFIG_QED_RDMA) || |
959 | !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) |
960 | return 0; |
961 | |
962 | for_each_hwfn(cdev, i) |
963 | num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); |
964 | |
965 | DP_VERBOSE(cdev, QED_MSG_RDMA, |
966 | "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n" , |
967 | cdev->int_params.fp_msix_cnt, num_l2_queues); |
968 | |
969 | if (cdev->int_params.fp_msix_cnt > num_l2_queues) { |
970 | cdev->int_params.rdma_msix_cnt = |
971 | (cdev->int_params.fp_msix_cnt - num_l2_queues) |
972 | / cdev->num_hwfns; |
973 | cdev->int_params.rdma_msix_base = |
974 | cdev->int_params.fp_msix_base + num_l2_queues; |
975 | cdev->int_params.fp_msix_cnt = num_l2_queues; |
976 | } else { |
977 | cdev->int_params.rdma_msix_cnt = 0; |
978 | } |
979 | |
980 | DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n" , |
981 | cdev->int_params.rdma_msix_cnt, |
982 | cdev->int_params.rdma_msix_base); |
983 | |
984 | return 0; |
985 | } |
986 | |
987 | static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) |
988 | { |
989 | int rc; |
990 | |
991 | memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); |
992 | cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; |
993 | |
994 | qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), |
995 | num_rxqs: &cdev->int_params.in.num_vectors); |
996 | if (cdev->num_hwfns > 1) { |
997 | u8 vectors = 0; |
998 | |
999 | qed_vf_get_num_rxqs(p_hwfn: &cdev->hwfns[1], num_rxqs: &vectors); |
1000 | cdev->int_params.in.num_vectors += vectors; |
1001 | } |
1002 | |
1003 | /* We want a minimum of one fastpath vector per vf hwfn */ |
1004 | cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; |
1005 | |
1006 | rc = qed_set_int_mode(cdev, force_mode: true); |
1007 | if (rc) |
1008 | return rc; |
1009 | |
1010 | cdev->int_params.fp_msix_base = 0; |
1011 | cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; |
1012 | |
1013 | return 0; |
1014 | } |
1015 | |
1016 | u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, |
1017 | u8 *input_buf, u32 max_size, u8 *unzip_buf) |
1018 | { |
1019 | int rc; |
1020 | |
1021 | p_hwfn->stream->next_in = input_buf; |
1022 | p_hwfn->stream->avail_in = input_len; |
1023 | p_hwfn->stream->next_out = unzip_buf; |
1024 | p_hwfn->stream->avail_out = max_size; |
1025 | |
1026 | rc = zlib_inflateInit2(strm: p_hwfn->stream, MAX_WBITS); |
1027 | |
1028 | if (rc != Z_OK) { |
1029 | DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n" , |
1030 | rc); |
1031 | return 0; |
1032 | } |
1033 | |
1034 | rc = zlib_inflate(strm: p_hwfn->stream, Z_FINISH); |
1035 | zlib_inflateEnd(strm: p_hwfn->stream); |
1036 | |
1037 | if (rc != Z_OK && rc != Z_STREAM_END) { |
1038 | DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n" , |
1039 | p_hwfn->stream->msg, rc); |
1040 | return 0; |
1041 | } |
1042 | |
1043 | return p_hwfn->stream->total_out / 4; |
1044 | } |
1045 | |
1046 | static int qed_alloc_stream_mem(struct qed_dev *cdev) |
1047 | { |
1048 | int i; |
1049 | void *workspace; |
1050 | |
1051 | for_each_hwfn(cdev, i) { |
1052 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1053 | |
1054 | p_hwfn->stream = kzalloc(size: sizeof(*p_hwfn->stream), GFP_KERNEL); |
1055 | if (!p_hwfn->stream) |
1056 | return -ENOMEM; |
1057 | |
1058 | workspace = vzalloc(size: zlib_inflate_workspacesize()); |
1059 | if (!workspace) |
1060 | return -ENOMEM; |
1061 | p_hwfn->stream->workspace = workspace; |
1062 | } |
1063 | |
1064 | return 0; |
1065 | } |
1066 | |
1067 | static void qed_free_stream_mem(struct qed_dev *cdev) |
1068 | { |
1069 | int i; |
1070 | |
1071 | for_each_hwfn(cdev, i) { |
1072 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1073 | |
1074 | if (!p_hwfn->stream) |
1075 | return; |
1076 | |
1077 | vfree(addr: p_hwfn->stream->workspace); |
1078 | kfree(objp: p_hwfn->stream); |
1079 | } |
1080 | } |
1081 | |
1082 | static void qed_update_pf_params(struct qed_dev *cdev, |
1083 | struct qed_pf_params *params) |
1084 | { |
1085 | int i; |
1086 | |
1087 | if (IS_ENABLED(CONFIG_QED_RDMA)) { |
1088 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; |
1089 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; |
1090 | params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; |
1091 | /* divide by 3 the MRs to avoid MF ILT overflow */ |
1092 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; |
1093 | } |
1094 | |
1095 | if (cdev->num_hwfns > 1 || IS_VF(cdev)) |
1096 | params->eth_pf_params.num_arfs_filters = 0; |
1097 | |
1098 | /* In case we might support RDMA, don't allow qede to be greedy |
1099 | * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] |
1100 | * per hwfn. |
1101 | */ |
1102 | if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { |
1103 | u16 *num_cons; |
1104 | |
1105 | num_cons = ¶ms->eth_pf_params.num_cons; |
1106 | *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); |
1107 | } |
1108 | |
1109 | for (i = 0; i < cdev->num_hwfns; i++) { |
1110 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1111 | |
1112 | p_hwfn->pf_params = *params; |
1113 | } |
1114 | } |
1115 | |
1116 | #define QED_PERIODIC_DB_REC_COUNT 10 |
1117 | #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 |
1118 | #define QED_PERIODIC_DB_REC_INTERVAL \ |
1119 | msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) |
1120 | |
1121 | static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, |
1122 | enum qed_slowpath_wq_flag wq_flag, |
1123 | unsigned long delay) |
1124 | { |
1125 | if (!hwfn->slowpath_wq_active) |
1126 | return -EINVAL; |
1127 | |
1128 | /* Memory barrier for setting atomic bit */ |
1129 | smp_mb__before_atomic(); |
1130 | set_bit(nr: wq_flag, addr: &hwfn->slowpath_task_flags); |
1131 | /* Memory barrier after setting atomic bit */ |
1132 | smp_mb__after_atomic(); |
1133 | queue_delayed_work(wq: hwfn->slowpath_wq, dwork: &hwfn->slowpath_task, delay); |
1134 | |
1135 | return 0; |
1136 | } |
1137 | |
1138 | void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) |
1139 | { |
1140 | /* Reset periodic Doorbell Recovery counter */ |
1141 | p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; |
1142 | |
1143 | /* Don't schedule periodic Doorbell Recovery if already scheduled */ |
1144 | if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, |
1145 | &p_hwfn->slowpath_task_flags)) |
1146 | return; |
1147 | |
1148 | qed_slowpath_delayed_work(hwfn: p_hwfn, wq_flag: QED_SLOWPATH_PERIODIC_DB_REC, |
1149 | QED_PERIODIC_DB_REC_INTERVAL); |
1150 | } |
1151 | |
1152 | static void qed_slowpath_wq_stop(struct qed_dev *cdev) |
1153 | { |
1154 | int i; |
1155 | |
1156 | if (IS_VF(cdev)) |
1157 | return; |
1158 | |
1159 | for_each_hwfn(cdev, i) { |
1160 | if (!cdev->hwfns[i].slowpath_wq) |
1161 | continue; |
1162 | |
1163 | /* Stop queuing new delayed works */ |
1164 | cdev->hwfns[i].slowpath_wq_active = false; |
1165 | |
1166 | cancel_delayed_work(dwork: &cdev->hwfns[i].slowpath_task); |
1167 | destroy_workqueue(wq: cdev->hwfns[i].slowpath_wq); |
1168 | } |
1169 | } |
1170 | |
1171 | static void qed_slowpath_task(struct work_struct *work) |
1172 | { |
1173 | struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, |
1174 | slowpath_task.work); |
1175 | struct qed_ptt *ptt = qed_ptt_acquire(p_hwfn: hwfn); |
1176 | |
1177 | if (!ptt) { |
1178 | if (hwfn->slowpath_wq_active) |
1179 | queue_delayed_work(wq: hwfn->slowpath_wq, |
1180 | dwork: &hwfn->slowpath_task, delay: 0); |
1181 | |
1182 | return; |
1183 | } |
1184 | |
1185 | if (test_and_clear_bit(nr: QED_SLOWPATH_MFW_TLV_REQ, |
1186 | addr: &hwfn->slowpath_task_flags)) |
1187 | qed_mfw_process_tlv_req(p_hwfn: hwfn, p_ptt: ptt); |
1188 | |
1189 | if (test_and_clear_bit(nr: QED_SLOWPATH_PERIODIC_DB_REC, |
1190 | addr: &hwfn->slowpath_task_flags)) { |
1191 | /* skip qed_db_rec_handler during recovery/unload */ |
1192 | if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) |
1193 | goto out; |
1194 | |
1195 | qed_db_rec_handler(p_hwfn: hwfn, p_ptt: ptt); |
1196 | if (hwfn->periodic_db_rec_count--) |
1197 | qed_slowpath_delayed_work(hwfn, |
1198 | wq_flag: QED_SLOWPATH_PERIODIC_DB_REC, |
1199 | QED_PERIODIC_DB_REC_INTERVAL); |
1200 | } |
1201 | |
1202 | out: |
1203 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
1204 | } |
1205 | |
1206 | static int qed_slowpath_wq_start(struct qed_dev *cdev) |
1207 | { |
1208 | struct qed_hwfn *hwfn; |
1209 | char name[NAME_SIZE]; |
1210 | int i; |
1211 | |
1212 | if (IS_VF(cdev)) |
1213 | return 0; |
1214 | |
1215 | for_each_hwfn(cdev, i) { |
1216 | hwfn = &cdev->hwfns[i]; |
1217 | |
1218 | snprintf(buf: name, NAME_SIZE, fmt: "slowpath-%02x:%02x.%02x" , |
1219 | cdev->pdev->bus->number, |
1220 | PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); |
1221 | |
1222 | hwfn->slowpath_wq = alloc_workqueue(fmt: name, flags: 0, max_active: 0); |
1223 | if (!hwfn->slowpath_wq) { |
1224 | DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n" ); |
1225 | return -ENOMEM; |
1226 | } |
1227 | |
1228 | INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); |
1229 | hwfn->slowpath_wq_active = true; |
1230 | } |
1231 | |
1232 | return 0; |
1233 | } |
1234 | |
1235 | static int qed_slowpath_start(struct qed_dev *cdev, |
1236 | struct qed_slowpath_params *params) |
1237 | { |
1238 | struct qed_drv_load_params drv_load_params; |
1239 | struct qed_hw_init_params hw_init_params; |
1240 | struct qed_mcp_drv_version drv_version; |
1241 | struct qed_tunnel_info tunn_info; |
1242 | const u8 *data = NULL; |
1243 | struct qed_hwfn *hwfn; |
1244 | struct qed_ptt *p_ptt; |
1245 | int rc = -EINVAL; |
1246 | |
1247 | if (qed_iov_wq_start(cdev)) |
1248 | goto err; |
1249 | |
1250 | if (qed_slowpath_wq_start(cdev)) |
1251 | goto err; |
1252 | |
1253 | if (IS_PF(cdev)) { |
1254 | rc = request_firmware(fw: &cdev->firmware, QED_FW_FILE_NAME, |
1255 | device: &cdev->pdev->dev); |
1256 | if (rc) { |
1257 | DP_NOTICE(cdev, |
1258 | "Failed to find fw file - /lib/firmware/%s\n" , |
1259 | QED_FW_FILE_NAME); |
1260 | goto err; |
1261 | } |
1262 | |
1263 | if (cdev->num_hwfns == 1) { |
1264 | p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); |
1265 | if (p_ptt) { |
1266 | QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; |
1267 | } else { |
1268 | DP_NOTICE(cdev, |
1269 | "Failed to acquire PTT for aRFS\n" ); |
1270 | rc = -EINVAL; |
1271 | goto err; |
1272 | } |
1273 | } |
1274 | } |
1275 | |
1276 | cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; |
1277 | rc = qed_nic_setup(cdev); |
1278 | if (rc) |
1279 | goto err; |
1280 | |
1281 | if (IS_PF(cdev)) |
1282 | rc = qed_slowpath_setup_int(cdev, int_mode: params->int_mode); |
1283 | else |
1284 | rc = qed_slowpath_vf_setup_int(cdev); |
1285 | if (rc) |
1286 | goto err1; |
1287 | |
1288 | if (IS_PF(cdev)) { |
1289 | /* Allocate stream for unzipping */ |
1290 | rc = qed_alloc_stream_mem(cdev); |
1291 | if (rc) |
1292 | goto err2; |
1293 | |
1294 | /* First Dword used to differentiate between various sources */ |
1295 | data = cdev->firmware->data + sizeof(u32); |
1296 | |
1297 | qed_dbg_pf_init(cdev); |
1298 | } |
1299 | |
1300 | /* Start the slowpath */ |
1301 | memset(&hw_init_params, 0, sizeof(hw_init_params)); |
1302 | memset(&tunn_info, 0, sizeof(tunn_info)); |
1303 | tunn_info.vxlan.b_mode_enabled = true; |
1304 | tunn_info.l2_gre.b_mode_enabled = true; |
1305 | tunn_info.ip_gre.b_mode_enabled = true; |
1306 | tunn_info.l2_geneve.b_mode_enabled = true; |
1307 | tunn_info.ip_geneve.b_mode_enabled = true; |
1308 | tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; |
1309 | tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; |
1310 | tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; |
1311 | tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; |
1312 | tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; |
1313 | hw_init_params.p_tunn = &tunn_info; |
1314 | hw_init_params.b_hw_start = true; |
1315 | hw_init_params.int_mode = cdev->int_params.out.int_mode; |
1316 | hw_init_params.allow_npar_tx_switch = true; |
1317 | hw_init_params.bin_fw_data = data; |
1318 | |
1319 | memset(&drv_load_params, 0, sizeof(drv_load_params)); |
1320 | drv_load_params.is_crash_kernel = is_kdump_kernel(); |
1321 | drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; |
1322 | drv_load_params.avoid_eng_reset = false; |
1323 | drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; |
1324 | hw_init_params.p_drv_load_params = &drv_load_params; |
1325 | |
1326 | rc = qed_hw_init(cdev, p_params: &hw_init_params); |
1327 | if (rc) |
1328 | goto err2; |
1329 | |
1330 | DP_INFO(cdev, |
1331 | "HW initialization and function start completed successfully\n" ); |
1332 | |
1333 | if (IS_PF(cdev)) { |
1334 | cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | |
1335 | BIT(QED_MODE_L2GENEVE_TUNN) | |
1336 | BIT(QED_MODE_IPGENEVE_TUNN) | |
1337 | BIT(QED_MODE_L2GRE_TUNN) | |
1338 | BIT(QED_MODE_IPGRE_TUNN)); |
1339 | } |
1340 | |
1341 | /* Allocate LL2 interface if needed */ |
1342 | if (QED_LEADING_HWFN(cdev)->using_ll2) { |
1343 | rc = qed_ll2_alloc_if(cdev); |
1344 | if (rc) |
1345 | goto err3; |
1346 | } |
1347 | if (IS_PF(cdev)) { |
1348 | hwfn = QED_LEADING_HWFN(cdev); |
1349 | drv_version.version = (params->drv_major << 24) | |
1350 | (params->drv_minor << 16) | |
1351 | (params->drv_rev << 8) | |
1352 | (params->drv_eng); |
1353 | strscpy(drv_version.name, params->name, |
1354 | MCP_DRV_VER_STR_SIZE - 4); |
1355 | rc = qed_mcp_send_drv_version(p_hwfn: hwfn, p_ptt: hwfn->p_main_ptt, |
1356 | p_ver: &drv_version); |
1357 | if (rc) { |
1358 | DP_NOTICE(cdev, "Failed sending drv version command\n" ); |
1359 | goto err4; |
1360 | } |
1361 | } |
1362 | |
1363 | qed_reset_vport_stats(cdev); |
1364 | |
1365 | return 0; |
1366 | |
1367 | err4: |
1368 | qed_ll2_dealloc_if(cdev); |
1369 | err3: |
1370 | qed_hw_stop(cdev); |
1371 | err2: |
1372 | qed_hw_timers_stop_all(cdev); |
1373 | if (IS_PF(cdev)) |
1374 | qed_slowpath_irq_free(cdev); |
1375 | qed_free_stream_mem(cdev); |
1376 | qed_disable_msix(cdev); |
1377 | err1: |
1378 | qed_resc_free(cdev); |
1379 | err: |
1380 | if (IS_PF(cdev)) |
1381 | release_firmware(fw: cdev->firmware); |
1382 | |
1383 | if (IS_PF(cdev) && (cdev->num_hwfns == 1) && |
1384 | QED_LEADING_HWFN(cdev)->p_arfs_ptt) |
1385 | qed_ptt_release(QED_LEADING_HWFN(cdev), |
1386 | QED_LEADING_HWFN(cdev)->p_arfs_ptt); |
1387 | |
1388 | qed_iov_wq_stop(cdev, schedule_first: false); |
1389 | |
1390 | qed_slowpath_wq_stop(cdev); |
1391 | |
1392 | return rc; |
1393 | } |
1394 | |
1395 | static int qed_slowpath_stop(struct qed_dev *cdev) |
1396 | { |
1397 | if (!cdev) |
1398 | return -ENODEV; |
1399 | |
1400 | qed_slowpath_wq_stop(cdev); |
1401 | |
1402 | qed_ll2_dealloc_if(cdev); |
1403 | |
1404 | if (IS_PF(cdev)) { |
1405 | if (cdev->num_hwfns == 1) |
1406 | qed_ptt_release(QED_LEADING_HWFN(cdev), |
1407 | QED_LEADING_HWFN(cdev)->p_arfs_ptt); |
1408 | qed_free_stream_mem(cdev); |
1409 | if (IS_QED_ETH_IF(cdev)) |
1410 | qed_sriov_disable(cdev, pci_enabled: true); |
1411 | } |
1412 | |
1413 | qed_nic_stop(cdev); |
1414 | |
1415 | if (IS_PF(cdev)) |
1416 | qed_slowpath_irq_free(cdev); |
1417 | |
1418 | qed_disable_msix(cdev); |
1419 | |
1420 | qed_resc_free(cdev); |
1421 | |
1422 | qed_iov_wq_stop(cdev, schedule_first: true); |
1423 | |
1424 | if (IS_PF(cdev)) |
1425 | release_firmware(fw: cdev->firmware); |
1426 | |
1427 | return 0; |
1428 | } |
1429 | |
1430 | static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) |
1431 | { |
1432 | int i; |
1433 | |
1434 | memcpy(cdev->name, name, NAME_SIZE); |
1435 | for_each_hwfn(cdev, i) |
1436 | snprintf(buf: cdev->hwfns[i].name, NAME_SIZE, fmt: "%s-%d" , name, i); |
1437 | } |
1438 | |
1439 | static u32 qed_sb_init(struct qed_dev *cdev, |
1440 | struct qed_sb_info *sb_info, |
1441 | void *sb_virt_addr, |
1442 | dma_addr_t sb_phy_addr, u16 sb_id, |
1443 | enum qed_sb_type type) |
1444 | { |
1445 | struct qed_hwfn *p_hwfn; |
1446 | struct qed_ptt *p_ptt; |
1447 | u16 rel_sb_id; |
1448 | u32 rc; |
1449 | |
1450 | /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ |
1451 | if (type == QED_SB_TYPE_L2_QUEUE) { |
1452 | p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; |
1453 | rel_sb_id = sb_id / cdev->num_hwfns; |
1454 | } else { |
1455 | p_hwfn = QED_AFFIN_HWFN(cdev); |
1456 | rel_sb_id = sb_id; |
1457 | } |
1458 | |
1459 | DP_VERBOSE(cdev, NETIF_MSG_INTR, |
1460 | "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n" , |
1461 | IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); |
1462 | |
1463 | if (IS_PF(p_hwfn->cdev)) { |
1464 | p_ptt = qed_ptt_acquire(p_hwfn); |
1465 | if (!p_ptt) |
1466 | return -EBUSY; |
1467 | |
1468 | rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, |
1469 | sb_phy_addr, sb_id: rel_sb_id); |
1470 | qed_ptt_release(p_hwfn, p_ptt); |
1471 | } else { |
1472 | rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, |
1473 | sb_phy_addr, sb_id: rel_sb_id); |
1474 | } |
1475 | |
1476 | return rc; |
1477 | } |
1478 | |
1479 | static u32 qed_sb_release(struct qed_dev *cdev, |
1480 | struct qed_sb_info *sb_info, |
1481 | u16 sb_id, |
1482 | enum qed_sb_type type) |
1483 | { |
1484 | struct qed_hwfn *p_hwfn; |
1485 | u16 rel_sb_id; |
1486 | u32 rc; |
1487 | |
1488 | /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ |
1489 | if (type == QED_SB_TYPE_L2_QUEUE) { |
1490 | p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; |
1491 | rel_sb_id = sb_id / cdev->num_hwfns; |
1492 | } else { |
1493 | p_hwfn = QED_AFFIN_HWFN(cdev); |
1494 | rel_sb_id = sb_id; |
1495 | } |
1496 | |
1497 | DP_VERBOSE(cdev, NETIF_MSG_INTR, |
1498 | "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n" , |
1499 | IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); |
1500 | |
1501 | rc = qed_int_sb_release(p_hwfn, sb_info, sb_id: rel_sb_id); |
1502 | |
1503 | return rc; |
1504 | } |
1505 | |
1506 | static bool qed_can_link_change(struct qed_dev *cdev) |
1507 | { |
1508 | return true; |
1509 | } |
1510 | |
1511 | static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, |
1512 | const struct qed_link_params *params) |
1513 | { |
1514 | struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; |
1515 | const struct qed_mfw_speed_map *map; |
1516 | u32 i; |
1517 | |
1518 | if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) |
1519 | ext_speed->autoneg = !!params->autoneg; |
1520 | |
1521 | if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { |
1522 | ext_speed->advertised_speeds = 0; |
1523 | |
1524 | for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { |
1525 | map = qed_mfw_ext_maps + i; |
1526 | |
1527 | if (linkmode_intersects(src1: params->adv_speeds, src2: map->caps)) |
1528 | ext_speed->advertised_speeds |= map->mfw_val; |
1529 | } |
1530 | } |
1531 | |
1532 | if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { |
1533 | switch (params->forced_speed) { |
1534 | case SPEED_1000: |
1535 | ext_speed->forced_speed = QED_EXT_SPEED_1G; |
1536 | break; |
1537 | case SPEED_10000: |
1538 | ext_speed->forced_speed = QED_EXT_SPEED_10G; |
1539 | break; |
1540 | case SPEED_20000: |
1541 | ext_speed->forced_speed = QED_EXT_SPEED_20G; |
1542 | break; |
1543 | case SPEED_25000: |
1544 | ext_speed->forced_speed = QED_EXT_SPEED_25G; |
1545 | break; |
1546 | case SPEED_40000: |
1547 | ext_speed->forced_speed = QED_EXT_SPEED_40G; |
1548 | break; |
1549 | case SPEED_50000: |
1550 | ext_speed->forced_speed = QED_EXT_SPEED_50G_R | |
1551 | QED_EXT_SPEED_50G_R2; |
1552 | break; |
1553 | case SPEED_100000: |
1554 | ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | |
1555 | QED_EXT_SPEED_100G_R4 | |
1556 | QED_EXT_SPEED_100G_P4; |
1557 | break; |
1558 | default: |
1559 | break; |
1560 | } |
1561 | } |
1562 | |
1563 | if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) |
1564 | return; |
1565 | |
1566 | switch (params->forced_speed) { |
1567 | case SPEED_25000: |
1568 | switch (params->fec) { |
1569 | case FEC_FORCE_MODE_NONE: |
1570 | link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; |
1571 | break; |
1572 | case FEC_FORCE_MODE_FIRECODE: |
1573 | link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; |
1574 | break; |
1575 | case FEC_FORCE_MODE_RS: |
1576 | link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; |
1577 | break; |
1578 | case FEC_FORCE_MODE_AUTO: |
1579 | link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | |
1580 | ETH_EXT_FEC_25G_BASE_R | |
1581 | ETH_EXT_FEC_25G_NONE; |
1582 | break; |
1583 | default: |
1584 | break; |
1585 | } |
1586 | |
1587 | break; |
1588 | case SPEED_40000: |
1589 | switch (params->fec) { |
1590 | case FEC_FORCE_MODE_NONE: |
1591 | link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; |
1592 | break; |
1593 | case FEC_FORCE_MODE_FIRECODE: |
1594 | link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; |
1595 | break; |
1596 | case FEC_FORCE_MODE_AUTO: |
1597 | link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | |
1598 | ETH_EXT_FEC_40G_NONE; |
1599 | break; |
1600 | default: |
1601 | break; |
1602 | } |
1603 | |
1604 | break; |
1605 | case SPEED_50000: |
1606 | switch (params->fec) { |
1607 | case FEC_FORCE_MODE_NONE: |
1608 | link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; |
1609 | break; |
1610 | case FEC_FORCE_MODE_FIRECODE: |
1611 | link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; |
1612 | break; |
1613 | case FEC_FORCE_MODE_RS: |
1614 | link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; |
1615 | break; |
1616 | case FEC_FORCE_MODE_AUTO: |
1617 | link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | |
1618 | ETH_EXT_FEC_50G_BASE_R | |
1619 | ETH_EXT_FEC_50G_NONE; |
1620 | break; |
1621 | default: |
1622 | break; |
1623 | } |
1624 | |
1625 | break; |
1626 | case SPEED_100000: |
1627 | switch (params->fec) { |
1628 | case FEC_FORCE_MODE_NONE: |
1629 | link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; |
1630 | break; |
1631 | case FEC_FORCE_MODE_FIRECODE: |
1632 | link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; |
1633 | break; |
1634 | case FEC_FORCE_MODE_RS: |
1635 | link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; |
1636 | break; |
1637 | case FEC_FORCE_MODE_AUTO: |
1638 | link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | |
1639 | ETH_EXT_FEC_100G_BASE_R | |
1640 | ETH_EXT_FEC_100G_NONE; |
1641 | break; |
1642 | default: |
1643 | break; |
1644 | } |
1645 | |
1646 | break; |
1647 | default: |
1648 | break; |
1649 | } |
1650 | } |
1651 | |
1652 | static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) |
1653 | { |
1654 | struct qed_mcp_link_params *link_params; |
1655 | struct qed_mcp_link_speed_params *speed; |
1656 | const struct qed_mfw_speed_map *map; |
1657 | struct qed_hwfn *hwfn; |
1658 | struct qed_ptt *ptt; |
1659 | int rc; |
1660 | u32 i; |
1661 | |
1662 | if (!cdev) |
1663 | return -ENODEV; |
1664 | |
1665 | /* The link should be set only once per PF */ |
1666 | hwfn = &cdev->hwfns[0]; |
1667 | |
1668 | /* When VF wants to set link, force it to read the bulletin instead. |
1669 | * This mimics the PF behavior, where a noitification [both immediate |
1670 | * and possible later] would be generated when changing properties. |
1671 | */ |
1672 | if (IS_VF(cdev)) { |
1673 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); |
1674 | return 0; |
1675 | } |
1676 | |
1677 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
1678 | if (!ptt) |
1679 | return -EBUSY; |
1680 | |
1681 | link_params = qed_mcp_get_link_params(p_hwfn: hwfn); |
1682 | if (!link_params) |
1683 | return -ENODATA; |
1684 | |
1685 | speed = &link_params->speed; |
1686 | |
1687 | if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) |
1688 | speed->autoneg = !!params->autoneg; |
1689 | |
1690 | if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { |
1691 | speed->advertised_speeds = 0; |
1692 | |
1693 | for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { |
1694 | map = qed_mfw_legacy_maps + i; |
1695 | |
1696 | if (linkmode_intersects(src1: params->adv_speeds, src2: map->caps)) |
1697 | speed->advertised_speeds |= map->mfw_val; |
1698 | } |
1699 | } |
1700 | |
1701 | if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) |
1702 | speed->forced_speed = params->forced_speed; |
1703 | |
1704 | if (qed_mcp_is_ext_speed_supported(p_hwfn: hwfn)) |
1705 | qed_set_ext_speed_params(link_params, params); |
1706 | |
1707 | if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { |
1708 | if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) |
1709 | link_params->pause.autoneg = true; |
1710 | else |
1711 | link_params->pause.autoneg = false; |
1712 | if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) |
1713 | link_params->pause.forced_rx = true; |
1714 | else |
1715 | link_params->pause.forced_rx = false; |
1716 | if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) |
1717 | link_params->pause.forced_tx = true; |
1718 | else |
1719 | link_params->pause.forced_tx = false; |
1720 | } |
1721 | |
1722 | if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { |
1723 | switch (params->loopback_mode) { |
1724 | case QED_LINK_LOOPBACK_INT_PHY: |
1725 | link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; |
1726 | break; |
1727 | case QED_LINK_LOOPBACK_EXT_PHY: |
1728 | link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; |
1729 | break; |
1730 | case QED_LINK_LOOPBACK_EXT: |
1731 | link_params->loopback_mode = ETH_LOOPBACK_EXT; |
1732 | break; |
1733 | case QED_LINK_LOOPBACK_MAC: |
1734 | link_params->loopback_mode = ETH_LOOPBACK_MAC; |
1735 | break; |
1736 | case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: |
1737 | link_params->loopback_mode = |
1738 | ETH_LOOPBACK_CNIG_AH_ONLY_0123; |
1739 | break; |
1740 | case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: |
1741 | link_params->loopback_mode = |
1742 | ETH_LOOPBACK_CNIG_AH_ONLY_2301; |
1743 | break; |
1744 | case QED_LINK_LOOPBACK_PCS_AH_ONLY: |
1745 | link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; |
1746 | break; |
1747 | case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: |
1748 | link_params->loopback_mode = |
1749 | ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; |
1750 | break; |
1751 | case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: |
1752 | link_params->loopback_mode = |
1753 | ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; |
1754 | break; |
1755 | default: |
1756 | link_params->loopback_mode = ETH_LOOPBACK_NONE; |
1757 | break; |
1758 | } |
1759 | } |
1760 | |
1761 | if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) |
1762 | memcpy(&link_params->eee, ¶ms->eee, |
1763 | sizeof(link_params->eee)); |
1764 | |
1765 | if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) |
1766 | link_params->fec = params->fec; |
1767 | |
1768 | rc = qed_mcp_set_link(p_hwfn: hwfn, p_ptt: ptt, b_up: params->link_up); |
1769 | |
1770 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
1771 | |
1772 | return rc; |
1773 | } |
1774 | |
1775 | static int qed_get_port_type(u32 media_type) |
1776 | { |
1777 | int port_type; |
1778 | |
1779 | switch (media_type) { |
1780 | case MEDIA_SFPP_10G_FIBER: |
1781 | case MEDIA_SFP_1G_FIBER: |
1782 | case MEDIA_XFP_FIBER: |
1783 | case MEDIA_MODULE_FIBER: |
1784 | port_type = PORT_FIBRE; |
1785 | break; |
1786 | case MEDIA_DA_TWINAX: |
1787 | port_type = PORT_DA; |
1788 | break; |
1789 | case MEDIA_BASE_T: |
1790 | port_type = PORT_TP; |
1791 | break; |
1792 | case MEDIA_KR: |
1793 | case MEDIA_NOT_PRESENT: |
1794 | port_type = PORT_NONE; |
1795 | break; |
1796 | case MEDIA_UNSPECIFIED: |
1797 | default: |
1798 | port_type = PORT_OTHER; |
1799 | break; |
1800 | } |
1801 | return port_type; |
1802 | } |
1803 | |
1804 | static int qed_get_link_data(struct qed_hwfn *hwfn, |
1805 | struct qed_mcp_link_params *params, |
1806 | struct qed_mcp_link_state *link, |
1807 | struct qed_mcp_link_capabilities *link_caps) |
1808 | { |
1809 | void *p; |
1810 | |
1811 | if (!IS_PF(hwfn->cdev)) { |
1812 | qed_vf_get_link_params(p_hwfn: hwfn, params); |
1813 | qed_vf_get_link_state(p_hwfn: hwfn, link); |
1814 | qed_vf_get_link_caps(p_hwfn: hwfn, p_link_caps: link_caps); |
1815 | |
1816 | return 0; |
1817 | } |
1818 | |
1819 | p = qed_mcp_get_link_params(p_hwfn: hwfn); |
1820 | if (!p) |
1821 | return -ENXIO; |
1822 | memcpy(params, p, sizeof(*params)); |
1823 | |
1824 | p = qed_mcp_get_link_state(p_hwfn: hwfn); |
1825 | if (!p) |
1826 | return -ENXIO; |
1827 | memcpy(link, p, sizeof(*link)); |
1828 | |
1829 | p = qed_mcp_get_link_capabilities(p_hwfn: hwfn); |
1830 | if (!p) |
1831 | return -ENXIO; |
1832 | memcpy(link_caps, p, sizeof(*link_caps)); |
1833 | |
1834 | return 0; |
1835 | } |
1836 | |
1837 | static void qed_fill_link_capability(struct qed_hwfn *hwfn, |
1838 | struct qed_ptt *ptt, u32 capability, |
1839 | unsigned long *if_caps) |
1840 | { |
1841 | u32 media_type, tcvr_state, tcvr_type; |
1842 | u32 speed_mask, board_cfg; |
1843 | |
1844 | if (qed_mcp_get_media_type(p_hwfn: hwfn, p_ptt: ptt, media_type: &media_type)) |
1845 | media_type = MEDIA_UNSPECIFIED; |
1846 | |
1847 | if (qed_mcp_get_transceiver_data(p_hwfn: hwfn, p_ptt: ptt, p_transceiver_state: &tcvr_state, p_tranceiver_type: &tcvr_type)) |
1848 | tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; |
1849 | |
1850 | if (qed_mcp_trans_speed_mask(p_hwfn: hwfn, p_ptt: ptt, p_speed_mask: &speed_mask)) |
1851 | speed_mask = 0xFFFFFFFF; |
1852 | |
1853 | if (qed_mcp_get_board_config(p_hwfn: hwfn, p_ptt: ptt, p_board_config: &board_cfg)) |
1854 | board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; |
1855 | |
1856 | DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, |
1857 | "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n" , |
1858 | media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); |
1859 | |
1860 | switch (media_type) { |
1861 | case MEDIA_DA_TWINAX: |
1862 | phylink_set(if_caps, FIBRE); |
1863 | |
1864 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) |
1865 | phylink_set(if_caps, 20000baseKR2_Full); |
1866 | |
1867 | /* For DAC media multiple speed capabilities are supported */ |
1868 | capability |= speed_mask; |
1869 | |
1870 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) |
1871 | phylink_set(if_caps, 1000baseKX_Full); |
1872 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) |
1873 | phylink_set(if_caps, 10000baseCR_Full); |
1874 | |
1875 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) |
1876 | switch (tcvr_type) { |
1877 | case ETH_TRANSCEIVER_TYPE_40G_CR4: |
1878 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: |
1879 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: |
1880 | phylink_set(if_caps, 40000baseCR4_Full); |
1881 | break; |
1882 | default: |
1883 | break; |
1884 | } |
1885 | |
1886 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) |
1887 | phylink_set(if_caps, 25000baseCR_Full); |
1888 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) |
1889 | phylink_set(if_caps, 50000baseCR2_Full); |
1890 | |
1891 | if (capability & |
1892 | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) |
1893 | switch (tcvr_type) { |
1894 | case ETH_TRANSCEIVER_TYPE_100G_CR4: |
1895 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: |
1896 | phylink_set(if_caps, 100000baseCR4_Full); |
1897 | break; |
1898 | default: |
1899 | break; |
1900 | } |
1901 | |
1902 | break; |
1903 | case MEDIA_BASE_T: |
1904 | phylink_set(if_caps, TP); |
1905 | |
1906 | if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { |
1907 | if (capability & |
1908 | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) |
1909 | phylink_set(if_caps, 1000baseT_Full); |
1910 | if (capability & |
1911 | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) |
1912 | phylink_set(if_caps, 10000baseT_Full); |
1913 | } |
1914 | |
1915 | if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { |
1916 | phylink_set(if_caps, FIBRE); |
1917 | |
1918 | switch (tcvr_type) { |
1919 | case ETH_TRANSCEIVER_TYPE_1000BASET: |
1920 | phylink_set(if_caps, 1000baseT_Full); |
1921 | break; |
1922 | case ETH_TRANSCEIVER_TYPE_10G_BASET: |
1923 | phylink_set(if_caps, 10000baseT_Full); |
1924 | break; |
1925 | default: |
1926 | break; |
1927 | } |
1928 | } |
1929 | |
1930 | break; |
1931 | case MEDIA_SFP_1G_FIBER: |
1932 | case MEDIA_SFPP_10G_FIBER: |
1933 | case MEDIA_XFP_FIBER: |
1934 | case MEDIA_MODULE_FIBER: |
1935 | phylink_set(if_caps, FIBRE); |
1936 | capability |= speed_mask; |
1937 | |
1938 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) |
1939 | switch (tcvr_type) { |
1940 | case ETH_TRANSCEIVER_TYPE_1G_LX: |
1941 | case ETH_TRANSCEIVER_TYPE_1G_SX: |
1942 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: |
1943 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: |
1944 | phylink_set(if_caps, 1000baseKX_Full); |
1945 | break; |
1946 | default: |
1947 | break; |
1948 | } |
1949 | |
1950 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) |
1951 | switch (tcvr_type) { |
1952 | case ETH_TRANSCEIVER_TYPE_10G_SR: |
1953 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: |
1954 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: |
1955 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: |
1956 | phylink_set(if_caps, 10000baseSR_Full); |
1957 | break; |
1958 | case ETH_TRANSCEIVER_TYPE_10G_LR: |
1959 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: |
1960 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: |
1961 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: |
1962 | phylink_set(if_caps, 10000baseLR_Full); |
1963 | break; |
1964 | case ETH_TRANSCEIVER_TYPE_10G_LRM: |
1965 | phylink_set(if_caps, 10000baseLRM_Full); |
1966 | break; |
1967 | case ETH_TRANSCEIVER_TYPE_10G_ER: |
1968 | phylink_set(if_caps, 10000baseR_FEC); |
1969 | break; |
1970 | default: |
1971 | break; |
1972 | } |
1973 | |
1974 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) |
1975 | phylink_set(if_caps, 20000baseKR2_Full); |
1976 | |
1977 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) |
1978 | switch (tcvr_type) { |
1979 | case ETH_TRANSCEIVER_TYPE_25G_SR: |
1980 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: |
1981 | phylink_set(if_caps, 25000baseSR_Full); |
1982 | break; |
1983 | default: |
1984 | break; |
1985 | } |
1986 | |
1987 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) |
1988 | switch (tcvr_type) { |
1989 | case ETH_TRANSCEIVER_TYPE_40G_LR4: |
1990 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: |
1991 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: |
1992 | phylink_set(if_caps, 40000baseLR4_Full); |
1993 | break; |
1994 | case ETH_TRANSCEIVER_TYPE_40G_SR4: |
1995 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: |
1996 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: |
1997 | phylink_set(if_caps, 40000baseSR4_Full); |
1998 | break; |
1999 | default: |
2000 | break; |
2001 | } |
2002 | |
2003 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) |
2004 | phylink_set(if_caps, 50000baseKR2_Full); |
2005 | |
2006 | if (capability & |
2007 | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) |
2008 | switch (tcvr_type) { |
2009 | case ETH_TRANSCEIVER_TYPE_100G_SR4: |
2010 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: |
2011 | phylink_set(if_caps, 100000baseSR4_Full); |
2012 | break; |
2013 | case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: |
2014 | phylink_set(if_caps, 100000baseLR4_ER4_Full); |
2015 | break; |
2016 | default: |
2017 | break; |
2018 | } |
2019 | |
2020 | break; |
2021 | case MEDIA_KR: |
2022 | phylink_set(if_caps, Backplane); |
2023 | |
2024 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) |
2025 | phylink_set(if_caps, 20000baseKR2_Full); |
2026 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) |
2027 | phylink_set(if_caps, 1000baseKX_Full); |
2028 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) |
2029 | phylink_set(if_caps, 10000baseKR_Full); |
2030 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) |
2031 | phylink_set(if_caps, 25000baseKR_Full); |
2032 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) |
2033 | phylink_set(if_caps, 40000baseKR4_Full); |
2034 | if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) |
2035 | phylink_set(if_caps, 50000baseKR2_Full); |
2036 | if (capability & |
2037 | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) |
2038 | phylink_set(if_caps, 100000baseKR4_Full); |
2039 | |
2040 | break; |
2041 | case MEDIA_UNSPECIFIED: |
2042 | case MEDIA_NOT_PRESENT: |
2043 | default: |
2044 | DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, |
2045 | "Unknown media and transceiver type;\n" ); |
2046 | break; |
2047 | } |
2048 | } |
2049 | |
2050 | static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) |
2051 | { |
2052 | *speed_mask = 0; |
2053 | |
2054 | if (caps & |
2055 | (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) |
2056 | *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; |
2057 | if (caps & QED_LINK_PARTNER_SPEED_10G) |
2058 | *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; |
2059 | if (caps & QED_LINK_PARTNER_SPEED_20G) |
2060 | *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; |
2061 | if (caps & QED_LINK_PARTNER_SPEED_25G) |
2062 | *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; |
2063 | if (caps & QED_LINK_PARTNER_SPEED_40G) |
2064 | *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; |
2065 | if (caps & QED_LINK_PARTNER_SPEED_50G) |
2066 | *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; |
2067 | if (caps & QED_LINK_PARTNER_SPEED_100G) |
2068 | *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; |
2069 | } |
2070 | |
2071 | static void qed_fill_link(struct qed_hwfn *hwfn, |
2072 | struct qed_ptt *ptt, |
2073 | struct qed_link_output *if_link) |
2074 | { |
2075 | struct qed_mcp_link_capabilities link_caps; |
2076 | struct qed_mcp_link_params params; |
2077 | struct qed_mcp_link_state link; |
2078 | u32 media_type, speed_mask; |
2079 | |
2080 | memset(if_link, 0, sizeof(*if_link)); |
2081 | |
2082 | /* Prepare source inputs */ |
2083 | if (qed_get_link_data(hwfn, params: ¶ms, link: &link, link_caps: &link_caps)) { |
2084 | dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n" ); |
2085 | return; |
2086 | } |
2087 | |
2088 | /* Set the link parameters to pass to protocol driver */ |
2089 | if (link.link_up) |
2090 | if_link->link_up = true; |
2091 | |
2092 | if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(p_hwfn: hwfn)) { |
2093 | if (link_caps.default_ext_autoneg) |
2094 | phylink_set(if_link->supported_caps, Autoneg); |
2095 | |
2096 | linkmode_copy(dst: if_link->advertised_caps, src: if_link->supported_caps); |
2097 | |
2098 | if (params.ext_speed.autoneg) |
2099 | phylink_set(if_link->advertised_caps, Autoneg); |
2100 | else |
2101 | phylink_clear(if_link->advertised_caps, Autoneg); |
2102 | |
2103 | qed_fill_link_capability(hwfn, ptt, |
2104 | capability: params.ext_speed.advertised_speeds, |
2105 | if_caps: if_link->advertised_caps); |
2106 | } else { |
2107 | if (link_caps.default_speed_autoneg) |
2108 | phylink_set(if_link->supported_caps, Autoneg); |
2109 | |
2110 | linkmode_copy(dst: if_link->advertised_caps, src: if_link->supported_caps); |
2111 | |
2112 | if (params.speed.autoneg) |
2113 | phylink_set(if_link->advertised_caps, Autoneg); |
2114 | else |
2115 | phylink_clear(if_link->advertised_caps, Autoneg); |
2116 | } |
2117 | |
2118 | if (params.pause.autoneg || |
2119 | (params.pause.forced_rx && params.pause.forced_tx)) |
2120 | phylink_set(if_link->supported_caps, Asym_Pause); |
2121 | if (params.pause.autoneg || params.pause.forced_rx || |
2122 | params.pause.forced_tx) |
2123 | phylink_set(if_link->supported_caps, Pause); |
2124 | |
2125 | if_link->sup_fec = link_caps.fec_default; |
2126 | if_link->active_fec = params.fec; |
2127 | |
2128 | /* Fill link advertised capability */ |
2129 | qed_fill_link_capability(hwfn, ptt, capability: params.speed.advertised_speeds, |
2130 | if_caps: if_link->advertised_caps); |
2131 | |
2132 | /* Fill link supported capability */ |
2133 | qed_fill_link_capability(hwfn, ptt, capability: link_caps.speed_capabilities, |
2134 | if_caps: if_link->supported_caps); |
2135 | |
2136 | /* Fill partner advertised capability */ |
2137 | qed_lp_caps_to_speed_mask(caps: link.partner_adv_speed, speed_mask: &speed_mask); |
2138 | qed_fill_link_capability(hwfn, ptt, capability: speed_mask, if_caps: if_link->lp_caps); |
2139 | |
2140 | if (link.link_up) |
2141 | if_link->speed = link.speed; |
2142 | |
2143 | /* TODO - fill duplex properly */ |
2144 | if_link->duplex = DUPLEX_FULL; |
2145 | qed_mcp_get_media_type(p_hwfn: hwfn, p_ptt: ptt, media_type: &media_type); |
2146 | if_link->port = qed_get_port_type(media_type); |
2147 | |
2148 | if_link->autoneg = params.speed.autoneg; |
2149 | |
2150 | if (params.pause.autoneg) |
2151 | if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; |
2152 | if (params.pause.forced_rx) |
2153 | if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; |
2154 | if (params.pause.forced_tx) |
2155 | if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; |
2156 | |
2157 | if (link.an_complete) |
2158 | phylink_set(if_link->lp_caps, Autoneg); |
2159 | if (link.partner_adv_pause) |
2160 | phylink_set(if_link->lp_caps, Pause); |
2161 | if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || |
2162 | link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) |
2163 | phylink_set(if_link->lp_caps, Asym_Pause); |
2164 | |
2165 | if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { |
2166 | if_link->eee_supported = false; |
2167 | } else { |
2168 | if_link->eee_supported = true; |
2169 | if_link->eee_active = link.eee_active; |
2170 | if_link->sup_caps = link_caps.eee_speed_caps; |
2171 | /* MFW clears adv_caps on eee disable; use configured value */ |
2172 | if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : |
2173 | params.eee.adv_caps; |
2174 | if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; |
2175 | if_link->eee.enable = params.eee.enable; |
2176 | if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; |
2177 | if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; |
2178 | } |
2179 | } |
2180 | |
2181 | static void qed_get_current_link(struct qed_dev *cdev, |
2182 | struct qed_link_output *if_link) |
2183 | { |
2184 | struct qed_hwfn *hwfn; |
2185 | struct qed_ptt *ptt; |
2186 | int i; |
2187 | |
2188 | hwfn = &cdev->hwfns[0]; |
2189 | if (IS_PF(cdev)) { |
2190 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2191 | if (ptt) { |
2192 | qed_fill_link(hwfn, ptt, if_link); |
2193 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2194 | } else { |
2195 | DP_NOTICE(hwfn, "Failed to fill link; No PTT\n" ); |
2196 | } |
2197 | } else { |
2198 | qed_fill_link(hwfn, NULL, if_link); |
2199 | } |
2200 | |
2201 | for_each_hwfn(cdev, i) |
2202 | qed_inform_vf_link_state(hwfn: &cdev->hwfns[i]); |
2203 | } |
2204 | |
2205 | void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) |
2206 | { |
2207 | void *cookie = hwfn->cdev->ops_cookie; |
2208 | struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; |
2209 | struct qed_link_output if_link; |
2210 | |
2211 | qed_fill_link(hwfn, ptt, if_link: &if_link); |
2212 | qed_inform_vf_link_state(hwfn); |
2213 | |
2214 | if (IS_LEAD_HWFN(hwfn) && cookie) |
2215 | op->link_update(cookie, &if_link); |
2216 | } |
2217 | |
2218 | void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) |
2219 | { |
2220 | void *cookie = hwfn->cdev->ops_cookie; |
2221 | struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; |
2222 | |
2223 | if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) |
2224 | op->bw_update(cookie); |
2225 | } |
2226 | |
2227 | static int qed_drain(struct qed_dev *cdev) |
2228 | { |
2229 | struct qed_hwfn *hwfn; |
2230 | struct qed_ptt *ptt; |
2231 | int i, rc; |
2232 | |
2233 | if (IS_VF(cdev)) |
2234 | return 0; |
2235 | |
2236 | for_each_hwfn(cdev, i) { |
2237 | hwfn = &cdev->hwfns[i]; |
2238 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2239 | if (!ptt) { |
2240 | DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n" ); |
2241 | return -EBUSY; |
2242 | } |
2243 | rc = qed_mcp_drain(p_hwfn: hwfn, p_ptt: ptt); |
2244 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2245 | if (rc) |
2246 | return rc; |
2247 | } |
2248 | |
2249 | return 0; |
2250 | } |
2251 | |
2252 | static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, |
2253 | struct qed_nvm_image_att *nvm_image, |
2254 | u32 *crc) |
2255 | { |
2256 | u8 *buf = NULL; |
2257 | int rc; |
2258 | |
2259 | /* Allocate a buffer for holding the nvram image */ |
2260 | buf = kzalloc(size: nvm_image->length, GFP_KERNEL); |
2261 | if (!buf) |
2262 | return -ENOMEM; |
2263 | |
2264 | /* Read image into buffer */ |
2265 | rc = qed_mcp_nvm_read(cdev, addr: nvm_image->start_addr, |
2266 | p_buf: buf, len: nvm_image->length); |
2267 | if (rc) { |
2268 | DP_ERR(cdev, "Failed reading image from nvm\n" ); |
2269 | goto out; |
2270 | } |
2271 | |
2272 | /* Convert the buffer into big-endian format (excluding the |
2273 | * closing 4 bytes of CRC). |
2274 | */ |
2275 | cpu_to_be32_array(dst: (__force __be32 *)buf, src: (const u32 *)buf, |
2276 | DIV_ROUND_UP(nvm_image->length - 4, 4)); |
2277 | |
2278 | /* Calc CRC for the "actual" image buffer, i.e. not including |
2279 | * the last 4 CRC bytes. |
2280 | */ |
2281 | *crc = ~crc32(~0U, buf, nvm_image->length - 4); |
2282 | *crc = (__force u32)cpu_to_be32p(p: crc); |
2283 | |
2284 | out: |
2285 | kfree(objp: buf); |
2286 | |
2287 | return rc; |
2288 | } |
2289 | |
2290 | /* Binary file format - |
2291 | * /----------------------------------------------------------------------\ |
2292 | * 0B | 0x4 [command index] | |
2293 | * 4B | image_type | Options | Number of register settings | |
2294 | * 8B | Value | |
2295 | * 12B | Mask | |
2296 | * 16B | Offset | |
2297 | * \----------------------------------------------------------------------/ |
2298 | * There can be several Value-Mask-Offset sets as specified by 'Number of...'. |
2299 | * Options - 0'b - Calculate & Update CRC for image |
2300 | */ |
2301 | static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, |
2302 | bool *check_resp) |
2303 | { |
2304 | struct qed_nvm_image_att nvm_image; |
2305 | struct qed_hwfn *p_hwfn; |
2306 | bool is_crc = false; |
2307 | u32 image_type; |
2308 | int rc = 0, i; |
2309 | u16 len; |
2310 | |
2311 | *data += 4; |
2312 | image_type = **data; |
2313 | p_hwfn = QED_LEADING_HWFN(cdev); |
2314 | for (i = 0; i < p_hwfn->nvm_info.num_images; i++) |
2315 | if (image_type == p_hwfn->nvm_info.image_att[i].image_type) |
2316 | break; |
2317 | if (i == p_hwfn->nvm_info.num_images) { |
2318 | DP_ERR(cdev, "Failed to find nvram image of type %08x\n" , |
2319 | image_type); |
2320 | return -ENOENT; |
2321 | } |
2322 | |
2323 | nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; |
2324 | nvm_image.length = p_hwfn->nvm_info.image_att[i].len; |
2325 | |
2326 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2327 | "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n" , |
2328 | **data, image_type, nvm_image.start_addr, |
2329 | nvm_image.start_addr + nvm_image.length - 1); |
2330 | (*data)++; |
2331 | is_crc = !!(**data & BIT(0)); |
2332 | (*data)++; |
2333 | len = *((u16 *)*data); |
2334 | *data += 2; |
2335 | if (is_crc) { |
2336 | u32 crc = 0; |
2337 | |
2338 | rc = qed_nvm_flash_image_access_crc(cdev, nvm_image: &nvm_image, crc: &crc); |
2339 | if (rc) { |
2340 | DP_ERR(cdev, "Failed calculating CRC, rc = %d\n" , rc); |
2341 | goto exit; |
2342 | } |
2343 | |
2344 | rc = qed_mcp_nvm_write(cdev, cmd: QED_NVM_WRITE_NVRAM, |
2345 | addr: (nvm_image.start_addr + |
2346 | nvm_image.length - 4), p_buf: (u8 *)&crc, len: 4); |
2347 | if (rc) |
2348 | DP_ERR(cdev, "Failed writing to %08x, rc = %d\n" , |
2349 | nvm_image.start_addr + nvm_image.length - 4, rc); |
2350 | goto exit; |
2351 | } |
2352 | |
2353 | /* Iterate over the values for setting */ |
2354 | while (len) { |
2355 | u32 offset, mask, value, cur_value; |
2356 | u8 buf[4]; |
2357 | |
2358 | value = *((u32 *)*data); |
2359 | *data += 4; |
2360 | mask = *((u32 *)*data); |
2361 | *data += 4; |
2362 | offset = *((u32 *)*data); |
2363 | *data += 4; |
2364 | |
2365 | rc = qed_mcp_nvm_read(cdev, addr: nvm_image.start_addr + offset, p_buf: buf, |
2366 | len: 4); |
2367 | if (rc) { |
2368 | DP_ERR(cdev, "Failed reading from %08x\n" , |
2369 | nvm_image.start_addr + offset); |
2370 | goto exit; |
2371 | } |
2372 | |
2373 | cur_value = le32_to_cpu(*((__le32 *)buf)); |
2374 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2375 | "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n" , |
2376 | nvm_image.start_addr + offset, cur_value, |
2377 | (cur_value & ~mask) | (value & mask), value, mask); |
2378 | value = (value & mask) | (cur_value & ~mask); |
2379 | rc = qed_mcp_nvm_write(cdev, cmd: QED_NVM_WRITE_NVRAM, |
2380 | addr: nvm_image.start_addr + offset, |
2381 | p_buf: (u8 *)&value, len: 4); |
2382 | if (rc) { |
2383 | DP_ERR(cdev, "Failed writing to %08x\n" , |
2384 | nvm_image.start_addr + offset); |
2385 | goto exit; |
2386 | } |
2387 | |
2388 | len--; |
2389 | } |
2390 | exit: |
2391 | return rc; |
2392 | } |
2393 | |
2394 | /* Binary file format - |
2395 | * /----------------------------------------------------------------------\ |
2396 | * 0B | 0x3 [command index] | |
2397 | * 4B | b'0: check_response? | b'1-31 reserved | |
2398 | * 8B | File-type | reserved | |
2399 | * 12B | Image length in bytes | |
2400 | * \----------------------------------------------------------------------/ |
2401 | * Start a new file of the provided type |
2402 | */ |
2403 | static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, |
2404 | const u8 **data, bool *check_resp) |
2405 | { |
2406 | u32 file_type, file_size = 0; |
2407 | int rc; |
2408 | |
2409 | *data += 4; |
2410 | *check_resp = !!(**data & BIT(0)); |
2411 | *data += 4; |
2412 | file_type = **data; |
2413 | |
2414 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2415 | "About to start a new file of type %02x\n" , file_type); |
2416 | if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { |
2417 | *data += 4; |
2418 | file_size = *((u32 *)(*data)); |
2419 | } |
2420 | |
2421 | rc = qed_mcp_nvm_write(cdev, cmd: QED_PUT_FILE_BEGIN, addr: file_type, |
2422 | p_buf: (u8 *)(&file_size), len: 4); |
2423 | *data += 4; |
2424 | |
2425 | return rc; |
2426 | } |
2427 | |
2428 | /* Binary file format - |
2429 | * /----------------------------------------------------------------------\ |
2430 | * 0B | 0x2 [command index] | |
2431 | * 4B | Length in bytes | |
2432 | * 8B | b'0: check_response? | b'1-31 reserved | |
2433 | * 12B | Offset in bytes | |
2434 | * 16B | Data ... | |
2435 | * \----------------------------------------------------------------------/ |
2436 | * Write data as part of a file that was previously started. Data should be |
2437 | * of length equal to that provided in the message |
2438 | */ |
2439 | static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, |
2440 | const u8 **data, bool *check_resp) |
2441 | { |
2442 | u32 offset, len; |
2443 | int rc; |
2444 | |
2445 | *data += 4; |
2446 | len = *((u32 *)(*data)); |
2447 | *data += 4; |
2448 | *check_resp = !!(**data & BIT(0)); |
2449 | *data += 4; |
2450 | offset = *((u32 *)(*data)); |
2451 | *data += 4; |
2452 | |
2453 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2454 | "About to write File-data: %08x bytes to offset %08x\n" , |
2455 | len, offset); |
2456 | |
2457 | rc = qed_mcp_nvm_write(cdev, cmd: QED_PUT_FILE_DATA, addr: offset, |
2458 | p_buf: (char *)(*data), len); |
2459 | *data += len; |
2460 | |
2461 | return rc; |
2462 | } |
2463 | |
2464 | /* Binary file format [General header] - |
2465 | * /----------------------------------------------------------------------\ |
2466 | * 0B | QED_NVM_SIGNATURE | |
2467 | * 4B | Length in bytes | |
2468 | * 8B | Highest command in this batchfile | Reserved | |
2469 | * \----------------------------------------------------------------------/ |
2470 | */ |
2471 | static int qed_nvm_flash_image_validate(struct qed_dev *cdev, |
2472 | const struct firmware *image, |
2473 | const u8 **data) |
2474 | { |
2475 | u32 signature, len; |
2476 | |
2477 | /* Check minimum size */ |
2478 | if (image->size < 12) { |
2479 | DP_ERR(cdev, "Image is too short [%08x]\n" , (u32)image->size); |
2480 | return -EINVAL; |
2481 | } |
2482 | |
2483 | /* Check signature */ |
2484 | signature = *((u32 *)(*data)); |
2485 | if (signature != QED_NVM_SIGNATURE) { |
2486 | DP_ERR(cdev, "Wrong signature '%08x'\n" , signature); |
2487 | return -EINVAL; |
2488 | } |
2489 | |
2490 | *data += 4; |
2491 | /* Validate internal size equals the image-size */ |
2492 | len = *((u32 *)(*data)); |
2493 | if (len != image->size) { |
2494 | DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n" , |
2495 | len, (u32)image->size); |
2496 | return -EINVAL; |
2497 | } |
2498 | |
2499 | *data += 4; |
2500 | /* Make sure driver familiar with all commands necessary for this */ |
2501 | if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { |
2502 | DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n" , |
2503 | *((u16 *)(*data))); |
2504 | return -EINVAL; |
2505 | } |
2506 | |
2507 | *data += 4; |
2508 | |
2509 | return 0; |
2510 | } |
2511 | |
2512 | /* Binary file format - |
2513 | * /----------------------------------------------------------------------\ |
2514 | * 0B | 0x5 [command index] | |
2515 | * 4B | Number of config attributes | Reserved | |
2516 | * 4B | Config ID | Entity ID | Length | |
2517 | * 4B | Value | |
2518 | * | | |
2519 | * \----------------------------------------------------------------------/ |
2520 | * There can be several cfg_id-entity_id-Length-Value sets as specified by |
2521 | * 'Number of config attributes'. |
2522 | * |
2523 | * The API parses config attributes from the user provided buffer and flashes |
2524 | * them to the respective NVM path using Management FW inerface. |
2525 | */ |
2526 | static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) |
2527 | { |
2528 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2529 | u8 entity_id, len, buf[32]; |
2530 | bool need_nvm_init = true; |
2531 | struct qed_ptt *ptt; |
2532 | u16 cfg_id, count; |
2533 | int rc = 0, i; |
2534 | u32 flags; |
2535 | |
2536 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2537 | if (!ptt) |
2538 | return -EAGAIN; |
2539 | |
2540 | /* NVM CFG ID attribute header */ |
2541 | *data += 4; |
2542 | count = *((u16 *)*data); |
2543 | *data += 4; |
2544 | |
2545 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2546 | "Read config ids: num_attrs = %0d\n" , count); |
2547 | /* NVM CFG ID attributes. Start loop index from 1 to avoid additional |
2548 | * arithmetic operations in the implementation. |
2549 | */ |
2550 | for (i = 1; i <= count; i++) { |
2551 | cfg_id = *((u16 *)*data); |
2552 | *data += 2; |
2553 | entity_id = **data; |
2554 | (*data)++; |
2555 | len = **data; |
2556 | (*data)++; |
2557 | memcpy(buf, *data, len); |
2558 | *data += len; |
2559 | |
2560 | flags = 0; |
2561 | if (need_nvm_init) { |
2562 | flags |= QED_NVM_CFG_OPTION_INIT; |
2563 | need_nvm_init = false; |
2564 | } |
2565 | |
2566 | /* Commit to flash and free the resources */ |
2567 | if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { |
2568 | flags |= QED_NVM_CFG_OPTION_COMMIT | |
2569 | QED_NVM_CFG_OPTION_FREE; |
2570 | need_nvm_init = true; |
2571 | } |
2572 | |
2573 | if (entity_id) |
2574 | flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; |
2575 | |
2576 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2577 | "cfg_id = %d entity = %d len = %d\n" , cfg_id, |
2578 | entity_id, len); |
2579 | rc = qed_mcp_nvm_set_cfg(p_hwfn: hwfn, p_ptt: ptt, option_id: cfg_id, entity_id, flags, |
2580 | p_buf: buf, len); |
2581 | if (rc) { |
2582 | DP_ERR(cdev, "Error %d configuring %d\n" , rc, cfg_id); |
2583 | break; |
2584 | } |
2585 | } |
2586 | |
2587 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2588 | |
2589 | return rc; |
2590 | } |
2591 | |
2592 | #define QED_MAX_NVM_BUF_LEN 32 |
2593 | static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) |
2594 | { |
2595 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2596 | u8 buf[QED_MAX_NVM_BUF_LEN]; |
2597 | struct qed_ptt *ptt; |
2598 | u32 len; |
2599 | int rc; |
2600 | |
2601 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2602 | if (!ptt) |
2603 | return QED_MAX_NVM_BUF_LEN; |
2604 | |
2605 | rc = qed_mcp_nvm_get_cfg(p_hwfn: hwfn, p_ptt: ptt, option_id: cmd, entity_id: 0, QED_NVM_CFG_GET_FLAGS, p_buf: buf, |
2606 | p_len: &len); |
2607 | if (rc || !len) { |
2608 | DP_ERR(cdev, "Error %d reading %d\n" , rc, cmd); |
2609 | len = QED_MAX_NVM_BUF_LEN; |
2610 | } |
2611 | |
2612 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2613 | |
2614 | return len; |
2615 | } |
2616 | |
2617 | static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, |
2618 | u32 cmd, u32 entity_id) |
2619 | { |
2620 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2621 | struct qed_ptt *ptt; |
2622 | u32 flags, len; |
2623 | int rc = 0; |
2624 | |
2625 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2626 | if (!ptt) |
2627 | return -EAGAIN; |
2628 | |
2629 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2630 | "Read config cmd = %d entity id %d\n" , cmd, entity_id); |
2631 | flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; |
2632 | rc = qed_mcp_nvm_get_cfg(p_hwfn: hwfn, p_ptt: ptt, option_id: cmd, entity_id, flags, p_buf: *data, p_len: &len); |
2633 | if (rc) |
2634 | DP_ERR(cdev, "Error %d reading %d\n" , rc, cmd); |
2635 | |
2636 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2637 | |
2638 | return rc; |
2639 | } |
2640 | |
2641 | static int qed_nvm_flash(struct qed_dev *cdev, const char *name) |
2642 | { |
2643 | const struct firmware *image; |
2644 | const u8 *data, *data_end; |
2645 | u32 cmd_type; |
2646 | int rc; |
2647 | |
2648 | rc = request_firmware(fw: &image, name, device: &cdev->pdev->dev); |
2649 | if (rc) { |
2650 | DP_ERR(cdev, "Failed to find '%s'\n" , name); |
2651 | return rc; |
2652 | } |
2653 | |
2654 | DP_VERBOSE(cdev, NETIF_MSG_DRV, |
2655 | "Flashing '%s' - firmware's data at %p, size is %08x\n" , |
2656 | name, image->data, (u32)image->size); |
2657 | data = image->data; |
2658 | data_end = data + image->size; |
2659 | |
2660 | rc = qed_nvm_flash_image_validate(cdev, image, data: &data); |
2661 | if (rc) |
2662 | goto exit; |
2663 | |
2664 | while (data < data_end) { |
2665 | bool check_resp = false; |
2666 | |
2667 | /* Parse the actual command */ |
2668 | cmd_type = *((u32 *)data); |
2669 | switch (cmd_type) { |
2670 | case QED_NVM_FLASH_CMD_FILE_DATA: |
2671 | rc = qed_nvm_flash_image_file_data(cdev, data: &data, |
2672 | check_resp: &check_resp); |
2673 | break; |
2674 | case QED_NVM_FLASH_CMD_FILE_START: |
2675 | rc = qed_nvm_flash_image_file_start(cdev, data: &data, |
2676 | check_resp: &check_resp); |
2677 | break; |
2678 | case QED_NVM_FLASH_CMD_NVM_CHANGE: |
2679 | rc = qed_nvm_flash_image_access(cdev, data: &data, |
2680 | check_resp: &check_resp); |
2681 | break; |
2682 | case QED_NVM_FLASH_CMD_NVM_CFG_ID: |
2683 | rc = qed_nvm_flash_cfg_write(cdev, data: &data); |
2684 | break; |
2685 | default: |
2686 | DP_ERR(cdev, "Unknown command %08x\n" , cmd_type); |
2687 | rc = -EINVAL; |
2688 | goto exit; |
2689 | } |
2690 | |
2691 | if (rc) { |
2692 | DP_ERR(cdev, "Command %08x failed\n" , cmd_type); |
2693 | goto exit; |
2694 | } |
2695 | |
2696 | /* Check response if needed */ |
2697 | if (check_resp) { |
2698 | u32 mcp_response = 0; |
2699 | |
2700 | if (qed_mcp_nvm_resp(cdev, p_buf: (u8 *)&mcp_response)) { |
2701 | DP_ERR(cdev, "Failed getting MCP response\n" ); |
2702 | rc = -EINVAL; |
2703 | goto exit; |
2704 | } |
2705 | |
2706 | switch (mcp_response & FW_MSG_CODE_MASK) { |
2707 | case FW_MSG_CODE_OK: |
2708 | case FW_MSG_CODE_NVM_OK: |
2709 | case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: |
2710 | case FW_MSG_CODE_PHY_OK: |
2711 | break; |
2712 | default: |
2713 | DP_ERR(cdev, "MFW returns error: %08x\n" , |
2714 | mcp_response); |
2715 | rc = -EINVAL; |
2716 | goto exit; |
2717 | } |
2718 | } |
2719 | } |
2720 | |
2721 | exit: |
2722 | release_firmware(fw: image); |
2723 | |
2724 | return rc; |
2725 | } |
2726 | |
2727 | static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, |
2728 | u8 *buf, u16 len) |
2729 | { |
2730 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2731 | |
2732 | return qed_mcp_get_nvm_image(p_hwfn: hwfn, image_id: type, p_buffer: buf, buffer_len: len); |
2733 | } |
2734 | |
2735 | void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) |
2736 | { |
2737 | struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; |
2738 | void *cookie = p_hwfn->cdev->ops_cookie; |
2739 | |
2740 | if (ops && ops->schedule_recovery_handler) |
2741 | ops->schedule_recovery_handler(cookie); |
2742 | } |
2743 | |
2744 | static const char * const qed_hw_err_type_descr[] = { |
2745 | [QED_HW_ERR_FAN_FAIL] = "Fan Failure" , |
2746 | [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure" , |
2747 | [QED_HW_ERR_HW_ATTN] = "HW Attention" , |
2748 | [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure" , |
2749 | [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure" , |
2750 | [QED_HW_ERR_FW_ASSERT] = "FW Assertion" , |
2751 | [QED_HW_ERR_LAST] = "Unknown" , |
2752 | }; |
2753 | |
2754 | void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, |
2755 | enum qed_hw_err_type err_type) |
2756 | { |
2757 | struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; |
2758 | void *cookie = p_hwfn->cdev->ops_cookie; |
2759 | const char *err_str; |
2760 | |
2761 | if (err_type > QED_HW_ERR_LAST) |
2762 | err_type = QED_HW_ERR_LAST; |
2763 | err_str = qed_hw_err_type_descr[err_type]; |
2764 | |
2765 | DP_NOTICE(p_hwfn, "HW error occurred [%s]\n" , err_str); |
2766 | |
2767 | /* Call the HW error handler of the protocol driver. |
2768 | * If it is not available - perform a minimal handling of preventing |
2769 | * HW attentions from being reasserted. |
2770 | */ |
2771 | if (ops && ops->schedule_hw_err_handler) |
2772 | ops->schedule_hw_err_handler(cookie, err_type); |
2773 | else |
2774 | qed_int_attn_clr_enable(cdev: p_hwfn->cdev, clr_enable: true); |
2775 | } |
2776 | |
2777 | static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, |
2778 | void *handle) |
2779 | { |
2780 | return qed_set_queue_coalesce(rx_coal, tx_coal, p_handle: handle); |
2781 | } |
2782 | |
2783 | static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) |
2784 | { |
2785 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2786 | struct qed_ptt *ptt; |
2787 | int status = 0; |
2788 | |
2789 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2790 | if (!ptt) |
2791 | return -EAGAIN; |
2792 | |
2793 | status = qed_mcp_set_led(p_hwfn: hwfn, p_ptt: ptt, mode); |
2794 | |
2795 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2796 | |
2797 | return status; |
2798 | } |
2799 | |
2800 | int qed_recovery_process(struct qed_dev *cdev) |
2801 | { |
2802 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
2803 | struct qed_ptt *p_ptt; |
2804 | int rc = 0; |
2805 | |
2806 | p_ptt = qed_ptt_acquire(p_hwfn); |
2807 | if (!p_ptt) |
2808 | return -EAGAIN; |
2809 | |
2810 | rc = qed_start_recovery_process(p_hwfn, p_ptt); |
2811 | |
2812 | qed_ptt_release(p_hwfn, p_ptt); |
2813 | |
2814 | return rc; |
2815 | } |
2816 | |
2817 | static int qed_update_wol(struct qed_dev *cdev, bool enabled) |
2818 | { |
2819 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2820 | struct qed_ptt *ptt; |
2821 | int rc = 0; |
2822 | |
2823 | if (IS_VF(cdev)) |
2824 | return 0; |
2825 | |
2826 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2827 | if (!ptt) |
2828 | return -EAGAIN; |
2829 | |
2830 | rc = qed_mcp_ov_update_wol(p_hwfn: hwfn, p_ptt: ptt, wol: enabled ? QED_OV_WOL_ENABLED |
2831 | : QED_OV_WOL_DISABLED); |
2832 | if (rc) |
2833 | goto out; |
2834 | rc = qed_mcp_ov_update_current_config(p_hwfn: hwfn, p_ptt: ptt, client: QED_OV_CLIENT_DRV); |
2835 | |
2836 | out: |
2837 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2838 | return rc; |
2839 | } |
2840 | |
2841 | static int qed_update_drv_state(struct qed_dev *cdev, bool active) |
2842 | { |
2843 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2844 | struct qed_ptt *ptt; |
2845 | int status = 0; |
2846 | |
2847 | if (IS_VF(cdev)) |
2848 | return 0; |
2849 | |
2850 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2851 | if (!ptt) |
2852 | return -EAGAIN; |
2853 | |
2854 | status = qed_mcp_ov_update_driver_state(p_hwfn: hwfn, p_ptt: ptt, drv_state: active ? |
2855 | QED_OV_DRIVER_STATE_ACTIVE : |
2856 | QED_OV_DRIVER_STATE_DISABLED); |
2857 | |
2858 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2859 | |
2860 | return status; |
2861 | } |
2862 | |
2863 | static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) |
2864 | { |
2865 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2866 | struct qed_ptt *ptt; |
2867 | int status = 0; |
2868 | |
2869 | if (IS_VF(cdev)) |
2870 | return 0; |
2871 | |
2872 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2873 | if (!ptt) |
2874 | return -EAGAIN; |
2875 | |
2876 | status = qed_mcp_ov_update_mac(p_hwfn: hwfn, p_ptt: ptt, mac); |
2877 | if (status) |
2878 | goto out; |
2879 | |
2880 | status = qed_mcp_ov_update_current_config(p_hwfn: hwfn, p_ptt: ptt, client: QED_OV_CLIENT_DRV); |
2881 | |
2882 | out: |
2883 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2884 | return status; |
2885 | } |
2886 | |
2887 | static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) |
2888 | { |
2889 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2890 | struct qed_ptt *ptt; |
2891 | int status = 0; |
2892 | |
2893 | if (IS_VF(cdev)) |
2894 | return 0; |
2895 | |
2896 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2897 | if (!ptt) |
2898 | return -EAGAIN; |
2899 | |
2900 | status = qed_mcp_ov_update_mtu(p_hwfn: hwfn, p_ptt: ptt, mtu); |
2901 | if (status) |
2902 | goto out; |
2903 | |
2904 | status = qed_mcp_ov_update_current_config(p_hwfn: hwfn, p_ptt: ptt, client: QED_OV_CLIENT_DRV); |
2905 | |
2906 | out: |
2907 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2908 | return status; |
2909 | } |
2910 | |
2911 | static int |
2912 | qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, |
2913 | u16 qid, struct qed_sb_info_dbg *sb_dbg) |
2914 | { |
2915 | struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; |
2916 | struct qed_ptt *ptt; |
2917 | int rc; |
2918 | |
2919 | if (IS_VF(cdev)) |
2920 | return -EINVAL; |
2921 | |
2922 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2923 | if (!ptt) { |
2924 | DP_NOTICE(hwfn, "Can't acquire PTT\n" ); |
2925 | return -EAGAIN; |
2926 | } |
2927 | |
2928 | memset(sb_dbg, 0, sizeof(*sb_dbg)); |
2929 | rc = qed_int_get_sb_dbg(p_hwfn: hwfn, p_ptt: ptt, p_sb: sb, p_info: sb_dbg); |
2930 | |
2931 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2932 | return rc; |
2933 | } |
2934 | |
2935 | static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, |
2936 | u8 dev_addr, u32 offset, u32 len) |
2937 | { |
2938 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2939 | struct qed_ptt *ptt; |
2940 | int rc = 0; |
2941 | |
2942 | if (IS_VF(cdev)) |
2943 | return 0; |
2944 | |
2945 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2946 | if (!ptt) |
2947 | return -EAGAIN; |
2948 | |
2949 | rc = qed_mcp_phy_sfp_read(p_hwfn: hwfn, p_ptt: ptt, MFW_PORT(hwfn), addr: dev_addr, |
2950 | offset, len, p_buf: buf); |
2951 | |
2952 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2953 | |
2954 | return rc; |
2955 | } |
2956 | |
2957 | static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) |
2958 | { |
2959 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2960 | struct qed_ptt *ptt; |
2961 | int rc = 0; |
2962 | |
2963 | if (IS_VF(cdev)) |
2964 | return 0; |
2965 | |
2966 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2967 | if (!ptt) |
2968 | return -EAGAIN; |
2969 | |
2970 | rc = qed_dbg_grc_config(p_hwfn: hwfn, grc_param: cfg_id, val); |
2971 | |
2972 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
2973 | |
2974 | return rc; |
2975 | } |
2976 | |
2977 | static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) |
2978 | { |
2979 | char buf[QED_MFW_REPORT_STR_SIZE]; |
2980 | struct qed_hwfn *p_hwfn; |
2981 | struct qed_ptt *p_ptt; |
2982 | va_list vl; |
2983 | |
2984 | va_start(vl, fmt); |
2985 | vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, args: vl); |
2986 | va_end(vl); |
2987 | |
2988 | if (IS_PF(cdev)) { |
2989 | p_hwfn = QED_LEADING_HWFN(cdev); |
2990 | p_ptt = qed_ptt_acquire(p_hwfn); |
2991 | if (p_ptt) { |
2992 | qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, p_buf: buf, strlen(buf)); |
2993 | qed_ptt_release(p_hwfn, p_ptt); |
2994 | } |
2995 | } |
2996 | } |
2997 | |
2998 | static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) |
2999 | { |
3000 | return QED_AFFIN_HWFN_IDX(cdev); |
3001 | } |
3002 | |
3003 | static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) |
3004 | { |
3005 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
3006 | struct qed_ptt *ptt; |
3007 | int rc = 0; |
3008 | |
3009 | *esl_active = false; |
3010 | |
3011 | if (IS_VF(cdev)) |
3012 | return 0; |
3013 | |
3014 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
3015 | if (!ptt) |
3016 | return -EAGAIN; |
3017 | |
3018 | rc = qed_mcp_get_esl_status(p_hwfn: hwfn, p_ptt: ptt, active: esl_active); |
3019 | |
3020 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
3021 | |
3022 | return rc; |
3023 | } |
3024 | |
3025 | static struct qed_selftest_ops qed_selftest_ops_pass = { |
3026 | .selftest_memory = &qed_selftest_memory, |
3027 | .selftest_interrupt = &qed_selftest_interrupt, |
3028 | .selftest_register = &qed_selftest_register, |
3029 | .selftest_clock = &qed_selftest_clock, |
3030 | .selftest_nvram = &qed_selftest_nvram, |
3031 | }; |
3032 | |
3033 | const struct qed_common_ops qed_common_ops_pass = { |
3034 | .selftest = &qed_selftest_ops_pass, |
3035 | .probe = &qed_probe, |
3036 | .remove = &qed_remove, |
3037 | .set_power_state = &qed_set_power_state, |
3038 | .set_name = &qed_set_name, |
3039 | .update_pf_params = &qed_update_pf_params, |
3040 | .slowpath_start = &qed_slowpath_start, |
3041 | .slowpath_stop = &qed_slowpath_stop, |
3042 | .set_fp_int = &qed_set_int_fp, |
3043 | .get_fp_int = &qed_get_int_fp, |
3044 | .sb_init = &qed_sb_init, |
3045 | .sb_release = &qed_sb_release, |
3046 | .simd_handler_config = &qed_simd_handler_config, |
3047 | .simd_handler_clean = &qed_simd_handler_clean, |
3048 | .dbg_grc = &qed_dbg_grc, |
3049 | .dbg_grc_size = &qed_dbg_grc_size, |
3050 | .can_link_change = &qed_can_link_change, |
3051 | .set_link = &qed_set_link, |
3052 | .get_link = &qed_get_current_link, |
3053 | .drain = &qed_drain, |
3054 | .update_msglvl = &qed_init_dp, |
3055 | .devlink_register = qed_devlink_register, |
3056 | .devlink_unregister = qed_devlink_unregister, |
3057 | .report_fatal_error = qed_report_fatal_error, |
3058 | .dbg_all_data = &qed_dbg_all_data, |
3059 | .dbg_all_data_size = &qed_dbg_all_data_size, |
3060 | .chain_alloc = &qed_chain_alloc, |
3061 | .chain_free = &qed_chain_free, |
3062 | .nvm_flash = &qed_nvm_flash, |
3063 | .nvm_get_image = &qed_nvm_get_image, |
3064 | .set_coalesce = &qed_set_coalesce, |
3065 | .set_led = &qed_set_led, |
3066 | .recovery_process = &qed_recovery_process, |
3067 | .recovery_prolog = &qed_recovery_prolog, |
3068 | .attn_clr_enable = &qed_int_attn_clr_enable, |
3069 | .update_drv_state = &qed_update_drv_state, |
3070 | .update_mac = &qed_update_mac, |
3071 | .update_mtu = &qed_update_mtu, |
3072 | .update_wol = &qed_update_wol, |
3073 | .db_recovery_add = &qed_db_recovery_add, |
3074 | .db_recovery_del = &qed_db_recovery_del, |
3075 | .read_module_eeprom = &qed_read_module_eeprom, |
3076 | .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, |
3077 | .read_nvm_cfg = &qed_nvm_flash_cfg_read, |
3078 | .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, |
3079 | .set_grc_config = &qed_set_grc_config, |
3080 | .mfw_report = &qed_mfw_report, |
3081 | .get_sb_info = &qed_get_sb_info, |
3082 | .get_esl_status = &qed_get_esl_status, |
3083 | }; |
3084 | |
3085 | void qed_get_protocol_stats(struct qed_dev *cdev, |
3086 | enum qed_mcp_protocol_type type, |
3087 | union qed_mcp_protocol_stats *stats) |
3088 | { |
3089 | struct qed_eth_stats eth_stats; |
3090 | |
3091 | memset(stats, 0, sizeof(*stats)); |
3092 | |
3093 | switch (type) { |
3094 | case QED_MCP_LAN_STATS: |
3095 | qed_get_vport_stats_context(cdev, stats: ð_stats, is_atomic: true); |
3096 | stats->lan_stats.ucast_rx_pkts = |
3097 | eth_stats.common.rx_ucast_pkts; |
3098 | stats->lan_stats.ucast_tx_pkts = |
3099 | eth_stats.common.tx_ucast_pkts; |
3100 | stats->lan_stats.fcs_err = -1; |
3101 | break; |
3102 | case QED_MCP_FCOE_STATS: |
3103 | qed_get_protocol_stats_fcoe(cdev, stats: &stats->fcoe_stats, is_atomic: true); |
3104 | break; |
3105 | case QED_MCP_ISCSI_STATS: |
3106 | qed_get_protocol_stats_iscsi(cdev, stats: &stats->iscsi_stats, is_atomic: true); |
3107 | break; |
3108 | default: |
3109 | DP_VERBOSE(cdev, QED_MSG_SP, |
3110 | "Invalid protocol type = %d\n" , type); |
3111 | return; |
3112 | } |
3113 | } |
3114 | |
3115 | int qed_mfw_tlv_req(struct qed_hwfn *hwfn) |
3116 | { |
3117 | DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, |
3118 | "Scheduling slowpath task [Flag: %d]\n" , |
3119 | QED_SLOWPATH_MFW_TLV_REQ); |
3120 | /* Memory barrier for setting atomic bit */ |
3121 | smp_mb__before_atomic(); |
3122 | set_bit(nr: QED_SLOWPATH_MFW_TLV_REQ, addr: &hwfn->slowpath_task_flags); |
3123 | /* Memory barrier after setting atomic bit */ |
3124 | smp_mb__after_atomic(); |
3125 | queue_delayed_work(wq: hwfn->slowpath_wq, dwork: &hwfn->slowpath_task, delay: 0); |
3126 | |
3127 | return 0; |
3128 | } |
3129 | |
3130 | static void |
3131 | qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) |
3132 | { |
3133 | struct qed_common_cb_ops *op = cdev->protocol_ops.common; |
3134 | struct qed_eth_stats_common *p_common; |
3135 | struct qed_generic_tlvs gen_tlvs; |
3136 | struct qed_eth_stats stats; |
3137 | int i; |
3138 | |
3139 | memset(&gen_tlvs, 0, sizeof(gen_tlvs)); |
3140 | op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); |
3141 | |
3142 | if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) |
3143 | tlv->flags.ipv4_csum_offload = true; |
3144 | if (gen_tlvs.feat_flags & QED_TLV_LSO) |
3145 | tlv->flags.lso_supported = true; |
3146 | tlv->flags.b_set = true; |
3147 | |
3148 | for (i = 0; i < QED_TLV_MAC_COUNT; i++) { |
3149 | if (is_valid_ether_addr(addr: gen_tlvs.mac[i])) { |
3150 | ether_addr_copy(dst: tlv->mac[i], src: gen_tlvs.mac[i]); |
3151 | tlv->mac_set[i] = true; |
3152 | } |
3153 | } |
3154 | |
3155 | qed_get_vport_stats(cdev, stats: &stats); |
3156 | p_common = &stats.common; |
3157 | tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + |
3158 | p_common->rx_bcast_pkts; |
3159 | tlv->rx_frames_set = true; |
3160 | tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + |
3161 | p_common->rx_bcast_bytes; |
3162 | tlv->rx_bytes_set = true; |
3163 | tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + |
3164 | p_common->tx_bcast_pkts; |
3165 | tlv->tx_frames_set = true; |
3166 | tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + |
3167 | p_common->tx_bcast_bytes; |
3168 | tlv->rx_bytes_set = true; |
3169 | } |
3170 | |
3171 | int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, |
3172 | union qed_mfw_tlv_data *tlv_buf) |
3173 | { |
3174 | struct qed_dev *cdev = hwfn->cdev; |
3175 | struct qed_common_cb_ops *ops; |
3176 | |
3177 | ops = cdev->protocol_ops.common; |
3178 | if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { |
3179 | DP_NOTICE(hwfn, "Can't collect TLV management info\n" ); |
3180 | return -EINVAL; |
3181 | } |
3182 | |
3183 | switch (type) { |
3184 | case QED_MFW_TLV_GENERIC: |
3185 | qed_fill_generic_tlv_data(cdev: hwfn->cdev, tlv: &tlv_buf->generic); |
3186 | break; |
3187 | case QED_MFW_TLV_ETH: |
3188 | ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); |
3189 | break; |
3190 | case QED_MFW_TLV_FCOE: |
3191 | ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); |
3192 | break; |
3193 | case QED_MFW_TLV_ISCSI: |
3194 | ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); |
3195 | break; |
3196 | default: |
3197 | break; |
3198 | } |
3199 | |
3200 | return 0; |
3201 | } |
3202 | |
3203 | unsigned long qed_get_epoch_time(void) |
3204 | { |
3205 | return ktime_get_real_seconds(); |
3206 | } |
3207 | |