1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2015 Cavium, Inc. |
4 | */ |
5 | |
6 | #include <linux/acpi.h> |
7 | #include <linux/module.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/pci.h> |
10 | #include <linux/netdevice.h> |
11 | #include <linux/etherdevice.h> |
12 | #include <linux/phy.h> |
13 | #include <linux/of.h> |
14 | #include <linux/of_mdio.h> |
15 | #include <linux/of_net.h> |
16 | |
17 | #include "nic_reg.h" |
18 | #include "nic.h" |
19 | #include "thunder_bgx.h" |
20 | |
21 | #define DRV_NAME "thunder_bgx" |
22 | #define DRV_VERSION "1.0" |
23 | |
24 | /* RX_DMAC_CTL configuration */ |
25 | enum MCAST_MODE { |
26 | MCAST_MODE_REJECT = 0x0, |
27 | MCAST_MODE_ACCEPT = 0x1, |
28 | MCAST_MODE_CAM_FILTER = 0x2, |
29 | RSVD = 0x3 |
30 | }; |
31 | |
32 | #define BCAST_ACCEPT BIT(0) |
33 | #define CAM_ACCEPT BIT(3) |
34 | #define MCAST_MODE_MASK 0x3 |
35 | #define BGX_MCAST_MODE(x) (x << 1) |
36 | |
37 | struct dmac_map { |
38 | u64 vf_map; |
39 | u64 dmac; |
40 | }; |
41 | |
42 | struct lmac { |
43 | struct bgx *bgx; |
44 | /* actual number of DMACs configured */ |
45 | u8 dmacs_cfg; |
46 | /* overal number of possible DMACs could be configured per LMAC */ |
47 | u8 dmacs_count; |
48 | struct dmac_map *dmacs; /* DMAC:VFs tracking filter array */ |
49 | u8 mac[ETH_ALEN]; |
50 | u8 lmac_type; |
51 | u8 lane_to_sds; |
52 | bool use_training; |
53 | bool autoneg; |
54 | bool link_up; |
55 | int lmacid; /* ID within BGX */ |
56 | int lmacid_bd; /* ID on board */ |
57 | struct net_device netdev; |
58 | struct phy_device *phydev; |
59 | unsigned int last_duplex; |
60 | unsigned int last_link; |
61 | unsigned int last_speed; |
62 | bool is_sgmii; |
63 | struct delayed_work dwork; |
64 | struct workqueue_struct *check_link; |
65 | }; |
66 | |
67 | struct bgx { |
68 | u8 bgx_id; |
69 | struct lmac lmac[MAX_LMAC_PER_BGX]; |
70 | u8 lmac_count; |
71 | u8 max_lmac; |
72 | u8 acpi_lmac_idx; |
73 | void __iomem *reg_base; |
74 | struct pci_dev *pdev; |
75 | bool is_dlm; |
76 | bool is_rgx; |
77 | }; |
78 | |
79 | static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; |
80 | static int lmac_count; /* Total no of LMACs in system */ |
81 | |
82 | static int bgx_xaui_check_link(struct lmac *lmac); |
83 | |
84 | /* Supported devices */ |
85 | static const struct pci_device_id bgx_id_table[] = { |
86 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, |
87 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) }, |
88 | { 0, } /* end of table */ |
89 | }; |
90 | |
91 | MODULE_AUTHOR("Cavium Inc" ); |
92 | MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver" ); |
93 | MODULE_LICENSE("GPL v2" ); |
94 | MODULE_VERSION(DRV_VERSION); |
95 | MODULE_DEVICE_TABLE(pci, bgx_id_table); |
96 | |
97 | /* The Cavium ThunderX network controller can *only* be found in SoCs |
98 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device |
99 | * registers on this platform are implicitly strongly ordered with respect |
100 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use |
101 | * with no memory barriers in this driver. The readq()/writeq() functions add |
102 | * explicit ordering operation which in this case are redundant, and only |
103 | * add overhead. |
104 | */ |
105 | |
106 | /* Register read/write APIs */ |
107 | static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) |
108 | { |
109 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; |
110 | |
111 | return readq_relaxed(addr); |
112 | } |
113 | |
114 | static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) |
115 | { |
116 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; |
117 | |
118 | writeq_relaxed(val, addr); |
119 | } |
120 | |
121 | static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) |
122 | { |
123 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; |
124 | |
125 | writeq_relaxed(val | readq_relaxed(addr), addr); |
126 | } |
127 | |
128 | static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) |
129 | { |
130 | int timeout = 100; |
131 | u64 reg_val; |
132 | |
133 | while (timeout) { |
134 | reg_val = bgx_reg_read(bgx, lmac, offset: reg); |
135 | if (zero && !(reg_val & mask)) |
136 | return 0; |
137 | if (!zero && (reg_val & mask)) |
138 | return 0; |
139 | usleep_range(min: 1000, max: 2000); |
140 | timeout--; |
141 | } |
142 | return 1; |
143 | } |
144 | |
145 | static int max_bgx_per_node; |
146 | static void set_max_bgx_per_node(struct pci_dev *pdev) |
147 | { |
148 | u16 sdevid; |
149 | |
150 | if (max_bgx_per_node) |
151 | return; |
152 | |
153 | pci_read_config_word(dev: pdev, PCI_SUBSYSTEM_ID, val: &sdevid); |
154 | switch (sdevid) { |
155 | case PCI_SUBSYS_DEVID_81XX_BGX: |
156 | case PCI_SUBSYS_DEVID_81XX_RGX: |
157 | max_bgx_per_node = MAX_BGX_PER_CN81XX; |
158 | break; |
159 | case PCI_SUBSYS_DEVID_83XX_BGX: |
160 | max_bgx_per_node = MAX_BGX_PER_CN83XX; |
161 | break; |
162 | case PCI_SUBSYS_DEVID_88XX_BGX: |
163 | default: |
164 | max_bgx_per_node = MAX_BGX_PER_CN88XX; |
165 | break; |
166 | } |
167 | } |
168 | |
169 | static struct bgx *get_bgx(int node, int bgx_idx) |
170 | { |
171 | int idx = (node * max_bgx_per_node) + bgx_idx; |
172 | |
173 | return bgx_vnic[idx]; |
174 | } |
175 | |
176 | /* Return number of BGX present in HW */ |
177 | unsigned bgx_get_map(int node) |
178 | { |
179 | int i; |
180 | unsigned map = 0; |
181 | |
182 | for (i = 0; i < max_bgx_per_node; i++) { |
183 | if (bgx_vnic[(node * max_bgx_per_node) + i]) |
184 | map |= (1 << i); |
185 | } |
186 | |
187 | return map; |
188 | } |
189 | EXPORT_SYMBOL(bgx_get_map); |
190 | |
191 | /* Return number of LMAC configured for this BGX */ |
192 | int bgx_get_lmac_count(int node, int bgx_idx) |
193 | { |
194 | struct bgx *bgx; |
195 | |
196 | bgx = get_bgx(node, bgx_idx); |
197 | if (bgx) |
198 | return bgx->lmac_count; |
199 | |
200 | return 0; |
201 | } |
202 | EXPORT_SYMBOL(bgx_get_lmac_count); |
203 | |
204 | /* Returns the current link status of LMAC */ |
205 | void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) |
206 | { |
207 | struct bgx_link_status *link = (struct bgx_link_status *)status; |
208 | struct bgx *bgx; |
209 | struct lmac *lmac; |
210 | |
211 | bgx = get_bgx(node, bgx_idx); |
212 | if (!bgx) |
213 | return; |
214 | |
215 | lmac = &bgx->lmac[lmacid]; |
216 | link->mac_type = lmac->lmac_type; |
217 | link->link_up = lmac->link_up; |
218 | link->duplex = lmac->last_duplex; |
219 | link->speed = lmac->last_speed; |
220 | } |
221 | EXPORT_SYMBOL(bgx_get_lmac_link_state); |
222 | |
223 | const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) |
224 | { |
225 | struct bgx *bgx = get_bgx(node, bgx_idx); |
226 | |
227 | if (bgx) |
228 | return bgx->lmac[lmacid].mac; |
229 | |
230 | return NULL; |
231 | } |
232 | EXPORT_SYMBOL(bgx_get_lmac_mac); |
233 | |
234 | void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) |
235 | { |
236 | struct bgx *bgx = get_bgx(node, bgx_idx); |
237 | |
238 | if (!bgx) |
239 | return; |
240 | |
241 | ether_addr_copy(dst: bgx->lmac[lmacid].mac, src: mac); |
242 | } |
243 | EXPORT_SYMBOL(bgx_set_lmac_mac); |
244 | |
245 | static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid) |
246 | { |
247 | struct lmac *lmac = NULL; |
248 | u8 idx = 0; |
249 | |
250 | lmac = &bgx->lmac[lmacid]; |
251 | /* reset CAM filters */ |
252 | for (idx = 0; idx < lmac->dmacs_count; idx++) |
253 | bgx_reg_write(bgx, lmac: 0, BGX_CMR_RX_DMACX_CAM + |
254 | ((lmacid * lmac->dmacs_count) + idx) * |
255 | sizeof(u64), val: 0); |
256 | } |
257 | |
258 | static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id) |
259 | { |
260 | int i = 0; |
261 | |
262 | if (!lmac) |
263 | return; |
264 | |
265 | /* We've got reset filters request from some of attached VF, while the |
266 | * others might want to keep their configuration. So in this case lets |
267 | * iterate over all of configured filters and decrease number of |
268 | * referencies. if some addresses get zero refs remove them from list |
269 | */ |
270 | for (i = lmac->dmacs_cfg - 1; i >= 0; i--) { |
271 | lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id); |
272 | if (!lmac->dmacs[i].vf_map) { |
273 | lmac->dmacs_cfg--; |
274 | lmac->dmacs[i].dmac = 0; |
275 | lmac->dmacs[i].vf_map = 0; |
276 | } |
277 | } |
278 | } |
279 | |
280 | static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id) |
281 | { |
282 | u8 i = 0; |
283 | |
284 | if (!lmac) |
285 | return -1; |
286 | |
287 | /* At the same time we could have several VFs 'attached' to some |
288 | * particular LMAC, and each VF is represented as network interface |
289 | * for kernel. So from user perspective it should be possible to |
290 | * manipulate with its' (VF) receive modes. However from PF |
291 | * driver perspective we need to keep track of filter configurations |
292 | * for different VFs to prevent filter values dupes |
293 | */ |
294 | for (i = 0; i < lmac->dmacs_cfg; i++) { |
295 | if (lmac->dmacs[i].dmac == dmac) { |
296 | lmac->dmacs[i].vf_map |= BIT_ULL(vf_id); |
297 | return -1; |
298 | } |
299 | } |
300 | |
301 | if (!(lmac->dmacs_cfg < lmac->dmacs_count)) |
302 | return -1; |
303 | |
304 | /* keep it for further tracking */ |
305 | lmac->dmacs[lmac->dmacs_cfg].dmac = dmac; |
306 | lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id); |
307 | lmac->dmacs_cfg++; |
308 | return 0; |
309 | } |
310 | |
311 | static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid, |
312 | u64 cam_dmac, u8 idx) |
313 | { |
314 | struct lmac *lmac = NULL; |
315 | u64 cfg = 0; |
316 | |
317 | /* skip zero addresses as meaningless */ |
318 | if (!cam_dmac || !bgx) |
319 | return -1; |
320 | |
321 | lmac = &bgx->lmac[lmacid]; |
322 | |
323 | /* configure DCAM filtering for designated LMAC */ |
324 | cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) | |
325 | RX_DMACX_CAM_EN | cam_dmac; |
326 | bgx_reg_write(bgx, lmac: 0, BGX_CMR_RX_DMACX_CAM + |
327 | ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), val: cfg); |
328 | return 0; |
329 | } |
330 | |
331 | void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, |
332 | u64 cam_dmac, u8 vf_id) |
333 | { |
334 | struct bgx *bgx = get_bgx(node, bgx_idx); |
335 | struct lmac *lmac = NULL; |
336 | |
337 | if (!bgx) |
338 | return; |
339 | |
340 | lmac = &bgx->lmac[lmacid]; |
341 | |
342 | if (!cam_dmac) |
343 | cam_dmac = ether_addr_to_u64(addr: lmac->mac); |
344 | |
345 | /* since we might have several VFs attached to particular LMAC |
346 | * and kernel could call mcast config for each of them with the |
347 | * same MAC, check if requested MAC is already in filtering list and |
348 | * updare/prepare list of MACs to be applied later to HW filters |
349 | */ |
350 | bgx_lmac_save_filter(lmac, dmac: cam_dmac, vf_id); |
351 | } |
352 | EXPORT_SYMBOL(bgx_set_dmac_cam_filter); |
353 | |
354 | void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode) |
355 | { |
356 | struct bgx *bgx = get_bgx(node, bgx_idx); |
357 | struct lmac *lmac = NULL; |
358 | u64 cfg = 0; |
359 | u8 i = 0; |
360 | |
361 | if (!bgx) |
362 | return; |
363 | |
364 | lmac = &bgx->lmac[lmacid]; |
365 | |
366 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_CMRX_RX_DMAC_CTL); |
367 | if (mode & BGX_XCAST_BCAST_ACCEPT) |
368 | cfg |= BCAST_ACCEPT; |
369 | else |
370 | cfg &= ~BCAST_ACCEPT; |
371 | |
372 | /* disable all MCASTs and DMAC filtering */ |
373 | cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK)); |
374 | |
375 | /* check requested bits and set filtergin mode appropriately */ |
376 | if (mode & (BGX_XCAST_MCAST_ACCEPT)) { |
377 | cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT)); |
378 | } else if (mode & BGX_XCAST_MCAST_FILTER) { |
379 | cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT); |
380 | for (i = 0; i < lmac->dmacs_cfg; i++) |
381 | bgx_set_dmac_cam_filter_mac(bgx, lmacid, |
382 | cam_dmac: lmac->dmacs[i].dmac, idx: i); |
383 | } |
384 | bgx_reg_write(bgx, lmac: lmacid, BGX_CMRX_RX_DMAC_CTL, val: cfg); |
385 | } |
386 | EXPORT_SYMBOL(bgx_set_xcast_mode); |
387 | |
388 | void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id) |
389 | { |
390 | struct bgx *bgx = get_bgx(node, bgx_idx); |
391 | |
392 | if (!bgx) |
393 | return; |
394 | |
395 | bgx_lmac_remove_filters(lmac: &bgx->lmac[lmacid], vf_id); |
396 | bgx_flush_dmac_cam_filter(bgx, lmacid); |
397 | bgx_set_xcast_mode(node, bgx_idx, lmacid, |
398 | (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT)); |
399 | } |
400 | EXPORT_SYMBOL(bgx_reset_xcast_mode); |
401 | |
402 | void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) |
403 | { |
404 | struct bgx *bgx = get_bgx(node, bgx_idx); |
405 | struct lmac *lmac; |
406 | u64 cfg; |
407 | |
408 | if (!bgx) |
409 | return; |
410 | lmac = &bgx->lmac[lmacid]; |
411 | |
412 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_CMRX_CFG); |
413 | if (enable) { |
414 | cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; |
415 | |
416 | /* enable TX FIFO Underflow interrupt */ |
417 | bgx_reg_modify(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S, |
418 | GMI_TXX_INT_UNDFLW); |
419 | } else { |
420 | cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); |
421 | |
422 | /* Disable TX FIFO Underflow interrupt */ |
423 | bgx_reg_modify(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C, |
424 | GMI_TXX_INT_UNDFLW); |
425 | } |
426 | bgx_reg_write(bgx, lmac: lmacid, BGX_CMRX_CFG, val: cfg); |
427 | |
428 | if (bgx->is_rgx) |
429 | xcv_setup_link(link_up: enable ? lmac->link_up : 0, link_speed: lmac->last_speed); |
430 | } |
431 | EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); |
432 | |
433 | /* Enables or disables timestamp insertion by BGX for Rx packets */ |
434 | void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable) |
435 | { |
436 | struct bgx *bgx = get_bgx(node, bgx_idx); |
437 | struct lmac *lmac; |
438 | u64 csr_offset, cfg; |
439 | |
440 | if (!bgx) |
441 | return; |
442 | |
443 | lmac = &bgx->lmac[lmacid]; |
444 | |
445 | if (lmac->lmac_type == BGX_MODE_SGMII || |
446 | lmac->lmac_type == BGX_MODE_QSGMII || |
447 | lmac->lmac_type == BGX_MODE_RGMII) |
448 | csr_offset = BGX_GMP_GMI_RXX_FRM_CTL; |
449 | else |
450 | csr_offset = BGX_SMUX_RX_FRM_CTL; |
451 | |
452 | cfg = bgx_reg_read(bgx, lmac: lmacid, offset: csr_offset); |
453 | |
454 | if (enable) |
455 | cfg |= BGX_PKT_RX_PTP_EN; |
456 | else |
457 | cfg &= ~BGX_PKT_RX_PTP_EN; |
458 | bgx_reg_write(bgx, lmac: lmacid, offset: csr_offset, val: cfg); |
459 | } |
460 | EXPORT_SYMBOL(bgx_config_timestamping); |
461 | |
462 | void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) |
463 | { |
464 | struct pfc *pfc = (struct pfc *)pause; |
465 | struct bgx *bgx = get_bgx(node, bgx_idx); |
466 | struct lmac *lmac; |
467 | u64 cfg; |
468 | |
469 | if (!bgx) |
470 | return; |
471 | lmac = &bgx->lmac[lmacid]; |
472 | if (lmac->is_sgmii) |
473 | return; |
474 | |
475 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_CBFC_CTL); |
476 | pfc->fc_rx = cfg & RX_EN; |
477 | pfc->fc_tx = cfg & TX_EN; |
478 | pfc->autoneg = 0; |
479 | } |
480 | EXPORT_SYMBOL(bgx_lmac_get_pfc); |
481 | |
482 | void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause) |
483 | { |
484 | struct pfc *pfc = (struct pfc *)pause; |
485 | struct bgx *bgx = get_bgx(node, bgx_idx); |
486 | struct lmac *lmac; |
487 | u64 cfg; |
488 | |
489 | if (!bgx) |
490 | return; |
491 | lmac = &bgx->lmac[lmacid]; |
492 | if (lmac->is_sgmii) |
493 | return; |
494 | |
495 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_CBFC_CTL); |
496 | cfg &= ~(RX_EN | TX_EN); |
497 | cfg |= (pfc->fc_rx ? RX_EN : 0x00); |
498 | cfg |= (pfc->fc_tx ? TX_EN : 0x00); |
499 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_CBFC_CTL, val: cfg); |
500 | } |
501 | EXPORT_SYMBOL(bgx_lmac_set_pfc); |
502 | |
503 | static void bgx_sgmii_change_link_state(struct lmac *lmac) |
504 | { |
505 | struct bgx *bgx = lmac->bgx; |
506 | u64 cmr_cfg; |
507 | u64 port_cfg = 0; |
508 | u64 misc_ctl = 0; |
509 | bool tx_en, rx_en; |
510 | |
511 | cmr_cfg = bgx_reg_read(bgx, lmac: lmac->lmacid, BGX_CMRX_CFG); |
512 | tx_en = cmr_cfg & CMR_PKT_TX_EN; |
513 | rx_en = cmr_cfg & CMR_PKT_RX_EN; |
514 | cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); |
515 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_CMRX_CFG, val: cmr_cfg); |
516 | |
517 | /* Wait for BGX RX to be idle */ |
518 | if (bgx_poll_reg(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, |
519 | GMI_PORT_CFG_RX_IDLE, zero: false)) { |
520 | dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n" , |
521 | bgx->bgx_id, lmac->lmacid); |
522 | return; |
523 | } |
524 | |
525 | /* Wait for BGX TX to be idle */ |
526 | if (bgx_poll_reg(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, |
527 | GMI_PORT_CFG_TX_IDLE, zero: false)) { |
528 | dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n" , |
529 | bgx->bgx_id, lmac->lmacid); |
530 | return; |
531 | } |
532 | |
533 | port_cfg = bgx_reg_read(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); |
534 | misc_ctl = bgx_reg_read(bgx, lmac: lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); |
535 | |
536 | if (lmac->link_up) { |
537 | misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; |
538 | port_cfg &= ~GMI_PORT_CFG_DUPLEX; |
539 | port_cfg |= (lmac->last_duplex << 2); |
540 | } else { |
541 | misc_ctl |= PCS_MISC_CTL_GMX_ENO; |
542 | } |
543 | |
544 | switch (lmac->last_speed) { |
545 | case 10: |
546 | port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ |
547 | port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ |
548 | port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ |
549 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; |
550 | misc_ctl |= 50; /* samp_pt */ |
551 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, val: 64); |
552 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_TXX_BURST, val: 0); |
553 | break; |
554 | case 100: |
555 | port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ |
556 | port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ |
557 | port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ |
558 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; |
559 | misc_ctl |= 5; /* samp_pt */ |
560 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, val: 64); |
561 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_TXX_BURST, val: 0); |
562 | break; |
563 | case 1000: |
564 | port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ |
565 | port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ |
566 | port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ |
567 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; |
568 | misc_ctl |= 1; /* samp_pt */ |
569 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, val: 512); |
570 | if (lmac->last_duplex) |
571 | bgx_reg_write(bgx, lmac: lmac->lmacid, |
572 | BGX_GMP_GMI_TXX_BURST, val: 0); |
573 | else |
574 | bgx_reg_write(bgx, lmac: lmac->lmacid, |
575 | BGX_GMP_GMI_TXX_BURST, val: 8192); |
576 | break; |
577 | default: |
578 | break; |
579 | } |
580 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, val: misc_ctl); |
581 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, val: port_cfg); |
582 | |
583 | /* Restore CMR config settings */ |
584 | cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0); |
585 | bgx_reg_write(bgx, lmac: lmac->lmacid, BGX_CMRX_CFG, val: cmr_cfg); |
586 | |
587 | if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) |
588 | xcv_setup_link(link_up: lmac->link_up, link_speed: lmac->last_speed); |
589 | } |
590 | |
591 | static void bgx_lmac_handler(struct net_device *netdev) |
592 | { |
593 | struct lmac *lmac = container_of(netdev, struct lmac, netdev); |
594 | struct phy_device *phydev; |
595 | int link_changed = 0; |
596 | |
597 | phydev = lmac->phydev; |
598 | |
599 | if (!phydev->link && lmac->last_link) |
600 | link_changed = -1; |
601 | |
602 | if (phydev->link && |
603 | (lmac->last_duplex != phydev->duplex || |
604 | lmac->last_link != phydev->link || |
605 | lmac->last_speed != phydev->speed)) { |
606 | link_changed = 1; |
607 | } |
608 | |
609 | lmac->last_link = phydev->link; |
610 | lmac->last_speed = phydev->speed; |
611 | lmac->last_duplex = phydev->duplex; |
612 | |
613 | if (!link_changed) |
614 | return; |
615 | |
616 | if (link_changed > 0) |
617 | lmac->link_up = true; |
618 | else |
619 | lmac->link_up = false; |
620 | |
621 | if (lmac->is_sgmii) |
622 | bgx_sgmii_change_link_state(lmac); |
623 | else |
624 | bgx_xaui_check_link(lmac); |
625 | } |
626 | |
627 | u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) |
628 | { |
629 | struct bgx *bgx; |
630 | |
631 | bgx = get_bgx(node, bgx_idx); |
632 | if (!bgx) |
633 | return 0; |
634 | |
635 | if (idx > 8) |
636 | lmac = 0; |
637 | return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); |
638 | } |
639 | EXPORT_SYMBOL(bgx_get_rx_stats); |
640 | |
641 | u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) |
642 | { |
643 | struct bgx *bgx; |
644 | |
645 | bgx = get_bgx(node, bgx_idx); |
646 | if (!bgx) |
647 | return 0; |
648 | |
649 | return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); |
650 | } |
651 | EXPORT_SYMBOL(bgx_get_tx_stats); |
652 | |
653 | /* Configure BGX LMAC in internal loopback mode */ |
654 | void bgx_lmac_internal_loopback(int node, int bgx_idx, |
655 | int lmac_idx, bool enable) |
656 | { |
657 | struct bgx *bgx; |
658 | struct lmac *lmac; |
659 | u64 cfg; |
660 | |
661 | bgx = get_bgx(node, bgx_idx); |
662 | if (!bgx) |
663 | return; |
664 | |
665 | lmac = &bgx->lmac[lmac_idx]; |
666 | if (lmac->is_sgmii) { |
667 | cfg = bgx_reg_read(bgx, lmac: lmac_idx, BGX_GMP_PCS_MRX_CTL); |
668 | if (enable) |
669 | cfg |= PCS_MRX_CTL_LOOPBACK1; |
670 | else |
671 | cfg &= ~PCS_MRX_CTL_LOOPBACK1; |
672 | bgx_reg_write(bgx, lmac: lmac_idx, BGX_GMP_PCS_MRX_CTL, val: cfg); |
673 | } else { |
674 | cfg = bgx_reg_read(bgx, lmac: lmac_idx, BGX_SPUX_CONTROL1); |
675 | if (enable) |
676 | cfg |= SPU_CTL_LOOPBACK; |
677 | else |
678 | cfg &= ~SPU_CTL_LOOPBACK; |
679 | bgx_reg_write(bgx, lmac: lmac_idx, BGX_SPUX_CONTROL1, val: cfg); |
680 | } |
681 | } |
682 | EXPORT_SYMBOL(bgx_lmac_internal_loopback); |
683 | |
684 | static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) |
685 | { |
686 | int lmacid = lmac->lmacid; |
687 | u64 cfg; |
688 | |
689 | bgx_reg_modify(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_THRESH, val: 0x30); |
690 | /* max packet size */ |
691 | bgx_reg_modify(bgx, lmac: lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); |
692 | |
693 | /* Disable frame alignment if using preamble */ |
694 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_APPEND); |
695 | if (cfg & 1) |
696 | bgx_reg_write(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, val: 0); |
697 | |
698 | /* Enable lmac */ |
699 | bgx_reg_modify(bgx, lmac: lmacid, BGX_CMRX_CFG, CMR_EN); |
700 | |
701 | /* PCS reset */ |
702 | bgx_reg_modify(bgx, lmac: lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); |
703 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_GMP_PCS_MRX_CTL, |
704 | PCS_MRX_CTL_RESET, zero: true)) { |
705 | dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n" ); |
706 | return -1; |
707 | } |
708 | |
709 | /* power down, reset autoneg, autoneg enable */ |
710 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_GMP_PCS_MRX_CTL); |
711 | cfg &= ~PCS_MRX_CTL_PWR_DN; |
712 | cfg |= PCS_MRX_CTL_RST_AN; |
713 | if (lmac->phydev) { |
714 | cfg |= PCS_MRX_CTL_AN_EN; |
715 | } else { |
716 | /* In scenarios where PHY driver is not present or it's a |
717 | * non-standard PHY, FW sets AN_EN to inform Linux driver |
718 | * to do auto-neg and link polling or not. |
719 | */ |
720 | if (cfg & PCS_MRX_CTL_AN_EN) |
721 | lmac->autoneg = true; |
722 | } |
723 | bgx_reg_write(bgx, lmac: lmacid, BGX_GMP_PCS_MRX_CTL, val: cfg); |
724 | |
725 | if (lmac->lmac_type == BGX_MODE_QSGMII) { |
726 | /* Disable disparity check for QSGMII */ |
727 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_GMP_PCS_MISCX_CTL); |
728 | cfg &= ~PCS_MISC_CTL_DISP_EN; |
729 | bgx_reg_write(bgx, lmac: lmacid, BGX_GMP_PCS_MISCX_CTL, val: cfg); |
730 | return 0; |
731 | } |
732 | |
733 | if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) { |
734 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_GMP_PCS_MRX_STATUS, |
735 | PCS_MRX_STATUS_AN_CPT, zero: false)) { |
736 | dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n" ); |
737 | return -1; |
738 | } |
739 | } |
740 | |
741 | return 0; |
742 | } |
743 | |
744 | static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac) |
745 | { |
746 | u64 cfg; |
747 | int lmacid = lmac->lmacid; |
748 | |
749 | /* Reset SPU */ |
750 | bgx_reg_modify(bgx, lmac: lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); |
751 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, zero: true)) { |
752 | dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n" ); |
753 | return -1; |
754 | } |
755 | |
756 | /* Disable LMAC */ |
757 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_CMRX_CFG); |
758 | cfg &= ~CMR_EN; |
759 | bgx_reg_write(bgx, lmac: lmacid, BGX_CMRX_CFG, val: cfg); |
760 | |
761 | bgx_reg_modify(bgx, lmac: lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); |
762 | /* Set interleaved running disparity for RXAUI */ |
763 | if (lmac->lmac_type == BGX_MODE_RXAUI) |
764 | bgx_reg_modify(bgx, lmac: lmacid, BGX_SPUX_MISC_CONTROL, |
765 | SPU_MISC_CTL_INTLV_RDISP); |
766 | |
767 | /* Clear receive packet disable */ |
768 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_MISC_CONTROL); |
769 | cfg &= ~SPU_MISC_CTL_RX_DIS; |
770 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_MISC_CONTROL, val: cfg); |
771 | |
772 | /* clear all interrupts */ |
773 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_RX_INT); |
774 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_RX_INT, val: cfg); |
775 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_TX_INT); |
776 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_TX_INT, val: cfg); |
777 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_INT); |
778 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_INT, val: cfg); |
779 | |
780 | if (lmac->use_training) { |
781 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_BR_PMD_LP_CUP, val: 0x00); |
782 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_BR_PMD_LD_CUP, val: 0x00); |
783 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_BR_PMD_LD_REP, val: 0x00); |
784 | /* training enable */ |
785 | bgx_reg_modify(bgx, lmac: lmacid, |
786 | BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); |
787 | } |
788 | |
789 | /* Append FCS to each packet */ |
790 | bgx_reg_modify(bgx, lmac: lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); |
791 | |
792 | /* Disable forward error correction */ |
793 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_FEC_CONTROL); |
794 | cfg &= ~SPU_FEC_CTL_FEC_EN; |
795 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_FEC_CONTROL, val: cfg); |
796 | |
797 | /* Disable autoneg */ |
798 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_AN_CONTROL); |
799 | cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); |
800 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_AN_CONTROL, val: cfg); |
801 | |
802 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_AN_ADV); |
803 | if (lmac->lmac_type == BGX_MODE_10G_KR) |
804 | cfg |= (1 << 23); |
805 | else if (lmac->lmac_type == BGX_MODE_40G_KR) |
806 | cfg |= (1 << 24); |
807 | else |
808 | cfg &= ~((1 << 23) | (1 << 24)); |
809 | cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); |
810 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_AN_ADV, val: cfg); |
811 | |
812 | cfg = bgx_reg_read(bgx, lmac: 0, BGX_SPU_DBG_CONTROL); |
813 | cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; |
814 | bgx_reg_write(bgx, lmac: 0, BGX_SPU_DBG_CONTROL, val: cfg); |
815 | |
816 | /* Enable lmac */ |
817 | bgx_reg_modify(bgx, lmac: lmacid, BGX_CMRX_CFG, CMR_EN); |
818 | |
819 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_CONTROL1); |
820 | cfg &= ~SPU_CTL_LOW_POWER; |
821 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_CONTROL1, val: cfg); |
822 | |
823 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_TX_CTL); |
824 | cfg &= ~SMU_TX_CTL_UNI_EN; |
825 | cfg |= SMU_TX_CTL_DIC_EN; |
826 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_TX_CTL, val: cfg); |
827 | |
828 | /* Enable receive and transmission of pause frames */ |
829 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_CBFC_CTL, val: ((0xffffULL << 32) | |
830 | BCK_EN | DRP_EN | TX_EN | RX_EN)); |
831 | /* Configure pause time and interval */ |
832 | bgx_reg_write(bgx, lmac: lmacid, |
833 | BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME); |
834 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL); |
835 | cfg &= ~0xFFFFull; |
836 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL, |
837 | val: cfg | (DEFAULT_PAUSE_TIME - 0x1000)); |
838 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_TX_PAUSE_ZERO, val: 0x01); |
839 | |
840 | /* take lmac_count into account */ |
841 | bgx_reg_modify(bgx, lmac: lmacid, BGX_SMUX_TX_THRESH, val: (0x100 - 1)); |
842 | /* max packet size */ |
843 | bgx_reg_modify(bgx, lmac: lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); |
844 | |
845 | return 0; |
846 | } |
847 | |
848 | static int bgx_xaui_check_link(struct lmac *lmac) |
849 | { |
850 | struct bgx *bgx = lmac->bgx; |
851 | int lmacid = lmac->lmacid; |
852 | int lmac_type = lmac->lmac_type; |
853 | u64 cfg; |
854 | |
855 | if (lmac->use_training) { |
856 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_INT); |
857 | if (!(cfg & (1ull << 13))) { |
858 | cfg = (1ull << 13) | (1ull << 14); |
859 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_INT, val: cfg); |
860 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_BR_PMD_CRTL); |
861 | cfg |= (1ull << 0); |
862 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_BR_PMD_CRTL, val: cfg); |
863 | return -1; |
864 | } |
865 | } |
866 | |
867 | /* wait for PCS to come out of reset */ |
868 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, zero: true)) { |
869 | dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n" ); |
870 | return -1; |
871 | } |
872 | |
873 | if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || |
874 | (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { |
875 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_SPUX_BR_STATUS1, |
876 | SPU_BR_STATUS_BLK_LOCK, zero: false)) { |
877 | dev_err(&bgx->pdev->dev, |
878 | "SPU_BR_STATUS_BLK_LOCK not completed\n" ); |
879 | return -1; |
880 | } |
881 | } else { |
882 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_SPUX_BX_STATUS, |
883 | SPU_BX_STATUS_RX_ALIGN, zero: false)) { |
884 | dev_err(&bgx->pdev->dev, |
885 | "SPU_BX_STATUS_RX_ALIGN not completed\n" ); |
886 | return -1; |
887 | } |
888 | } |
889 | |
890 | /* Clear rcvflt bit (latching high) and read it back */ |
891 | if (bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) |
892 | bgx_reg_modify(bgx, lmac: lmacid, |
893 | BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); |
894 | if (bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { |
895 | dev_err(&bgx->pdev->dev, "Receive fault, retry training\n" ); |
896 | if (lmac->use_training) { |
897 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_INT); |
898 | if (!(cfg & (1ull << 13))) { |
899 | cfg = (1ull << 13) | (1ull << 14); |
900 | bgx_reg_write(bgx, lmac: lmacid, BGX_SPUX_INT, val: cfg); |
901 | cfg = bgx_reg_read(bgx, lmac: lmacid, |
902 | BGX_SPUX_BR_PMD_CRTL); |
903 | cfg |= (1ull << 0); |
904 | bgx_reg_write(bgx, lmac: lmacid, |
905 | BGX_SPUX_BR_PMD_CRTL, val: cfg); |
906 | return -1; |
907 | } |
908 | } |
909 | return -1; |
910 | } |
911 | |
912 | /* Wait for BGX RX to be idle */ |
913 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, zero: false)) { |
914 | dev_err(&bgx->pdev->dev, "SMU RX not idle\n" ); |
915 | return -1; |
916 | } |
917 | |
918 | /* Wait for BGX TX to be idle */ |
919 | if (bgx_poll_reg(bgx, lmac: lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, zero: false)) { |
920 | dev_err(&bgx->pdev->dev, "SMU TX not idle\n" ); |
921 | return -1; |
922 | } |
923 | |
924 | /* Check for MAC RX faults */ |
925 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_RX_CTL); |
926 | /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ |
927 | cfg &= SMU_RX_CTL_STATUS; |
928 | if (!cfg) |
929 | return 0; |
930 | |
931 | /* Rx local/remote fault seen. |
932 | * Do lmac reinit to see if condition recovers |
933 | */ |
934 | bgx_lmac_xaui_init(bgx, lmac); |
935 | |
936 | return -1; |
937 | } |
938 | |
939 | static void bgx_poll_for_sgmii_link(struct lmac *lmac) |
940 | { |
941 | u64 pcs_link, an_result; |
942 | u8 speed; |
943 | |
944 | pcs_link = bgx_reg_read(bgx: lmac->bgx, lmac: lmac->lmacid, |
945 | BGX_GMP_PCS_MRX_STATUS); |
946 | |
947 | /*Link state bit is sticky, read it again*/ |
948 | if (!(pcs_link & PCS_MRX_STATUS_LINK)) |
949 | pcs_link = bgx_reg_read(bgx: lmac->bgx, lmac: lmac->lmacid, |
950 | BGX_GMP_PCS_MRX_STATUS); |
951 | |
952 | if (bgx_poll_reg(bgx: lmac->bgx, lmac: lmac->lmacid, BGX_GMP_PCS_MRX_STATUS, |
953 | PCS_MRX_STATUS_AN_CPT, zero: false)) { |
954 | lmac->link_up = false; |
955 | lmac->last_speed = SPEED_UNKNOWN; |
956 | lmac->last_duplex = DUPLEX_UNKNOWN; |
957 | goto next_poll; |
958 | } |
959 | |
960 | lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false; |
961 | an_result = bgx_reg_read(bgx: lmac->bgx, lmac: lmac->lmacid, |
962 | BGX_GMP_PCS_ANX_AN_RESULTS); |
963 | |
964 | speed = (an_result >> 3) & 0x3; |
965 | lmac->last_duplex = (an_result >> 1) & 0x1; |
966 | switch (speed) { |
967 | case 0: |
968 | lmac->last_speed = SPEED_10; |
969 | break; |
970 | case 1: |
971 | lmac->last_speed = SPEED_100; |
972 | break; |
973 | case 2: |
974 | lmac->last_speed = SPEED_1000; |
975 | break; |
976 | default: |
977 | lmac->link_up = false; |
978 | lmac->last_speed = SPEED_UNKNOWN; |
979 | lmac->last_duplex = DUPLEX_UNKNOWN; |
980 | break; |
981 | } |
982 | |
983 | next_poll: |
984 | |
985 | if (lmac->last_link != lmac->link_up) { |
986 | if (lmac->link_up) |
987 | bgx_sgmii_change_link_state(lmac); |
988 | lmac->last_link = lmac->link_up; |
989 | } |
990 | |
991 | queue_delayed_work(wq: lmac->check_link, dwork: &lmac->dwork, HZ * 3); |
992 | } |
993 | |
994 | static void bgx_poll_for_link(struct work_struct *work) |
995 | { |
996 | struct lmac *lmac; |
997 | u64 spu_link, smu_link; |
998 | |
999 | lmac = container_of(work, struct lmac, dwork.work); |
1000 | if (lmac->is_sgmii) { |
1001 | bgx_poll_for_sgmii_link(lmac); |
1002 | return; |
1003 | } |
1004 | |
1005 | /* Receive link is latching low. Force it high and verify it */ |
1006 | bgx_reg_modify(bgx: lmac->bgx, lmac: lmac->lmacid, |
1007 | BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); |
1008 | bgx_poll_reg(bgx: lmac->bgx, lmac: lmac->lmacid, BGX_SPUX_STATUS1, |
1009 | SPU_STATUS1_RCV_LNK, zero: false); |
1010 | |
1011 | spu_link = bgx_reg_read(bgx: lmac->bgx, lmac: lmac->lmacid, BGX_SPUX_STATUS1); |
1012 | smu_link = bgx_reg_read(bgx: lmac->bgx, lmac: lmac->lmacid, BGX_SMUX_RX_CTL); |
1013 | |
1014 | if ((spu_link & SPU_STATUS1_RCV_LNK) && |
1015 | !(smu_link & SMU_RX_CTL_STATUS)) { |
1016 | lmac->link_up = true; |
1017 | if (lmac->lmac_type == BGX_MODE_XLAUI) |
1018 | lmac->last_speed = SPEED_40000; |
1019 | else |
1020 | lmac->last_speed = SPEED_10000; |
1021 | lmac->last_duplex = DUPLEX_FULL; |
1022 | } else { |
1023 | lmac->link_up = false; |
1024 | lmac->last_speed = SPEED_UNKNOWN; |
1025 | lmac->last_duplex = DUPLEX_UNKNOWN; |
1026 | } |
1027 | |
1028 | if (lmac->last_link != lmac->link_up) { |
1029 | if (lmac->link_up) { |
1030 | if (bgx_xaui_check_link(lmac)) { |
1031 | /* Errors, clear link_up state */ |
1032 | lmac->link_up = false; |
1033 | lmac->last_speed = SPEED_UNKNOWN; |
1034 | lmac->last_duplex = DUPLEX_UNKNOWN; |
1035 | } |
1036 | } |
1037 | lmac->last_link = lmac->link_up; |
1038 | } |
1039 | |
1040 | queue_delayed_work(wq: lmac->check_link, dwork: &lmac->dwork, HZ * 2); |
1041 | } |
1042 | |
1043 | static int phy_interface_mode(u8 lmac_type) |
1044 | { |
1045 | if (lmac_type == BGX_MODE_QSGMII) |
1046 | return PHY_INTERFACE_MODE_QSGMII; |
1047 | if (lmac_type == BGX_MODE_RGMII) |
1048 | return PHY_INTERFACE_MODE_RGMII_RXID; |
1049 | |
1050 | return PHY_INTERFACE_MODE_SGMII; |
1051 | } |
1052 | |
1053 | static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) |
1054 | { |
1055 | struct lmac *lmac; |
1056 | u64 cfg; |
1057 | |
1058 | lmac = &bgx->lmac[lmacid]; |
1059 | lmac->bgx = bgx; |
1060 | |
1061 | if ((lmac->lmac_type == BGX_MODE_SGMII) || |
1062 | (lmac->lmac_type == BGX_MODE_QSGMII) || |
1063 | (lmac->lmac_type == BGX_MODE_RGMII)) { |
1064 | lmac->is_sgmii = true; |
1065 | if (bgx_lmac_sgmii_init(bgx, lmac)) |
1066 | return -1; |
1067 | } else { |
1068 | lmac->is_sgmii = false; |
1069 | if (bgx_lmac_xaui_init(bgx, lmac)) |
1070 | return -1; |
1071 | } |
1072 | |
1073 | if (lmac->is_sgmii) { |
1074 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_APPEND); |
1075 | cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ |
1076 | bgx_reg_modify(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_APPEND, val: cfg); |
1077 | bgx_reg_write(bgx, lmac: lmacid, BGX_GMP_GMI_TXX_MIN_PKT, val: 60 - 1); |
1078 | } else { |
1079 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_SMUX_TX_APPEND); |
1080 | cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ |
1081 | bgx_reg_modify(bgx, lmac: lmacid, BGX_SMUX_TX_APPEND, val: cfg); |
1082 | bgx_reg_write(bgx, lmac: lmacid, BGX_SMUX_TX_MIN_PKT, val: 60 + 4); |
1083 | } |
1084 | |
1085 | /* actual number of filters available to exact LMAC */ |
1086 | lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count); |
1087 | lmac->dmacs = kcalloc(n: lmac->dmacs_count, size: sizeof(*lmac->dmacs), |
1088 | GFP_KERNEL); |
1089 | if (!lmac->dmacs) |
1090 | return -ENOMEM; |
1091 | |
1092 | /* Enable lmac */ |
1093 | bgx_reg_modify(bgx, lmac: lmacid, BGX_CMRX_CFG, CMR_EN); |
1094 | |
1095 | /* Restore default cfg, incase low level firmware changed it */ |
1096 | bgx_reg_write(bgx, lmac: lmacid, BGX_CMRX_RX_DMAC_CTL, val: 0x03); |
1097 | |
1098 | if ((lmac->lmac_type != BGX_MODE_XFI) && |
1099 | (lmac->lmac_type != BGX_MODE_XLAUI) && |
1100 | (lmac->lmac_type != BGX_MODE_40G_KR) && |
1101 | (lmac->lmac_type != BGX_MODE_10G_KR)) { |
1102 | if (!lmac->phydev) { |
1103 | if (lmac->autoneg) { |
1104 | bgx_reg_write(bgx, lmac: lmacid, |
1105 | BGX_GMP_PCS_LINKX_TIMER, |
1106 | PCS_LINKX_TIMER_COUNT); |
1107 | goto poll; |
1108 | } else { |
1109 | /* Default to below link speed and duplex */ |
1110 | lmac->link_up = true; |
1111 | lmac->last_speed = SPEED_1000; |
1112 | lmac->last_duplex = DUPLEX_FULL; |
1113 | bgx_sgmii_change_link_state(lmac); |
1114 | return 0; |
1115 | } |
1116 | } |
1117 | lmac->phydev->dev_flags = 0; |
1118 | |
1119 | if (phy_connect_direct(dev: &lmac->netdev, phydev: lmac->phydev, |
1120 | handler: bgx_lmac_handler, |
1121 | interface: phy_interface_mode(lmac_type: lmac->lmac_type))) |
1122 | return -ENODEV; |
1123 | |
1124 | phy_start(phydev: lmac->phydev); |
1125 | return 0; |
1126 | } |
1127 | |
1128 | poll: |
1129 | lmac->check_link = alloc_ordered_workqueue("check_link" , WQ_MEM_RECLAIM); |
1130 | if (!lmac->check_link) |
1131 | return -ENOMEM; |
1132 | INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); |
1133 | queue_delayed_work(wq: lmac->check_link, dwork: &lmac->dwork, delay: 0); |
1134 | |
1135 | return 0; |
1136 | } |
1137 | |
1138 | static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) |
1139 | { |
1140 | struct lmac *lmac; |
1141 | u64 cfg; |
1142 | |
1143 | lmac = &bgx->lmac[lmacid]; |
1144 | if (lmac->check_link) { |
1145 | /* Destroy work queue */ |
1146 | cancel_delayed_work_sync(dwork: &lmac->dwork); |
1147 | destroy_workqueue(wq: lmac->check_link); |
1148 | } |
1149 | |
1150 | /* Disable packet reception */ |
1151 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_CMRX_CFG); |
1152 | cfg &= ~CMR_PKT_RX_EN; |
1153 | bgx_reg_write(bgx, lmac: lmacid, BGX_CMRX_CFG, val: cfg); |
1154 | |
1155 | /* Give chance for Rx/Tx FIFO to get drained */ |
1156 | bgx_poll_reg(bgx, lmac: lmacid, BGX_CMRX_RX_FIFO_LEN, mask: (u64)0x1FFF, zero: true); |
1157 | bgx_poll_reg(bgx, lmac: lmacid, BGX_CMRX_TX_FIFO_LEN, mask: (u64)0x3FFF, zero: true); |
1158 | |
1159 | /* Disable packet transmission */ |
1160 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_CMRX_CFG); |
1161 | cfg &= ~CMR_PKT_TX_EN; |
1162 | bgx_reg_write(bgx, lmac: lmacid, BGX_CMRX_CFG, val: cfg); |
1163 | |
1164 | /* Disable serdes lanes */ |
1165 | if (!lmac->is_sgmii) |
1166 | bgx_reg_modify(bgx, lmac: lmacid, |
1167 | BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); |
1168 | else |
1169 | bgx_reg_modify(bgx, lmac: lmacid, |
1170 | BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN); |
1171 | |
1172 | /* Disable LMAC */ |
1173 | cfg = bgx_reg_read(bgx, lmac: lmacid, BGX_CMRX_CFG); |
1174 | cfg &= ~CMR_EN; |
1175 | bgx_reg_write(bgx, lmac: lmacid, BGX_CMRX_CFG, val: cfg); |
1176 | |
1177 | bgx_flush_dmac_cam_filter(bgx, lmacid); |
1178 | kfree(objp: lmac->dmacs); |
1179 | |
1180 | if ((lmac->lmac_type != BGX_MODE_XFI) && |
1181 | (lmac->lmac_type != BGX_MODE_XLAUI) && |
1182 | (lmac->lmac_type != BGX_MODE_40G_KR) && |
1183 | (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) |
1184 | phy_disconnect(phydev: lmac->phydev); |
1185 | |
1186 | lmac->phydev = NULL; |
1187 | } |
1188 | |
1189 | static void bgx_init_hw(struct bgx *bgx) |
1190 | { |
1191 | int i; |
1192 | struct lmac *lmac; |
1193 | |
1194 | bgx_reg_modify(bgx, lmac: 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); |
1195 | if (bgx_reg_read(bgx, lmac: 0, BGX_CMR_BIST_STATUS)) |
1196 | dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n" , bgx->bgx_id); |
1197 | |
1198 | /* Set lmac type and lane2serdes mapping */ |
1199 | for (i = 0; i < bgx->lmac_count; i++) { |
1200 | lmac = &bgx->lmac[i]; |
1201 | bgx_reg_write(bgx, lmac: i, BGX_CMRX_CFG, |
1202 | val: (lmac->lmac_type << 8) | lmac->lane_to_sds); |
1203 | bgx->lmac[i].lmacid_bd = lmac_count; |
1204 | lmac_count++; |
1205 | } |
1206 | |
1207 | bgx_reg_write(bgx, lmac: 0, BGX_CMR_TX_LMACS, val: bgx->lmac_count); |
1208 | bgx_reg_write(bgx, lmac: 0, BGX_CMR_RX_LMACS, val: bgx->lmac_count); |
1209 | |
1210 | /* Set the backpressure AND mask */ |
1211 | for (i = 0; i < bgx->lmac_count; i++) |
1212 | bgx_reg_modify(bgx, lmac: 0, BGX_CMR_CHAN_MSK_AND, |
1213 | val: ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << |
1214 | (i * MAX_BGX_CHANS_PER_LMAC)); |
1215 | |
1216 | /* Disable all MAC filtering */ |
1217 | for (i = 0; i < RX_DMAC_COUNT; i++) |
1218 | bgx_reg_write(bgx, lmac: 0, BGX_CMR_RX_DMACX_CAM + (i * 8), val: 0x00); |
1219 | |
1220 | /* Disable MAC steering (NCSI traffic) */ |
1221 | for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) |
1222 | bgx_reg_write(bgx, lmac: 0, BGX_CMR_RX_STEERING + (i * 8), val: 0x00); |
1223 | } |
1224 | |
1225 | static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) |
1226 | { |
1227 | return (u8)(bgx_reg_read(bgx, lmac: lmac->lmacid, BGX_CMRX_CFG) & 0xFF); |
1228 | } |
1229 | |
1230 | static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) |
1231 | { |
1232 | struct device *dev = &bgx->pdev->dev; |
1233 | struct lmac *lmac; |
1234 | char str[27]; |
1235 | |
1236 | if (!bgx->is_dlm && lmacid) |
1237 | return; |
1238 | |
1239 | lmac = &bgx->lmac[lmacid]; |
1240 | if (!bgx->is_dlm) |
1241 | sprintf(buf: str, fmt: "BGX%d QLM mode" , bgx->bgx_id); |
1242 | else |
1243 | sprintf(buf: str, fmt: "BGX%d LMAC%d mode" , bgx->bgx_id, lmacid); |
1244 | |
1245 | switch (lmac->lmac_type) { |
1246 | case BGX_MODE_SGMII: |
1247 | dev_info(dev, "%s: SGMII\n" , (char *)str); |
1248 | break; |
1249 | case BGX_MODE_XAUI: |
1250 | dev_info(dev, "%s: XAUI\n" , (char *)str); |
1251 | break; |
1252 | case BGX_MODE_RXAUI: |
1253 | dev_info(dev, "%s: RXAUI\n" , (char *)str); |
1254 | break; |
1255 | case BGX_MODE_XFI: |
1256 | if (!lmac->use_training) |
1257 | dev_info(dev, "%s: XFI\n" , (char *)str); |
1258 | else |
1259 | dev_info(dev, "%s: 10G_KR\n" , (char *)str); |
1260 | break; |
1261 | case BGX_MODE_XLAUI: |
1262 | if (!lmac->use_training) |
1263 | dev_info(dev, "%s: XLAUI\n" , (char *)str); |
1264 | else |
1265 | dev_info(dev, "%s: 40G_KR4\n" , (char *)str); |
1266 | break; |
1267 | case BGX_MODE_QSGMII: |
1268 | dev_info(dev, "%s: QSGMII\n" , (char *)str); |
1269 | break; |
1270 | case BGX_MODE_RGMII: |
1271 | dev_info(dev, "%s: RGMII\n" , (char *)str); |
1272 | break; |
1273 | case BGX_MODE_INVALID: |
1274 | /* Nothing to do */ |
1275 | break; |
1276 | } |
1277 | } |
1278 | |
1279 | static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac) |
1280 | { |
1281 | switch (lmac->lmac_type) { |
1282 | case BGX_MODE_SGMII: |
1283 | case BGX_MODE_XFI: |
1284 | lmac->lane_to_sds = lmac->lmacid; |
1285 | break; |
1286 | case BGX_MODE_XAUI: |
1287 | case BGX_MODE_XLAUI: |
1288 | case BGX_MODE_RGMII: |
1289 | lmac->lane_to_sds = 0xE4; |
1290 | break; |
1291 | case BGX_MODE_RXAUI: |
1292 | lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4; |
1293 | break; |
1294 | case BGX_MODE_QSGMII: |
1295 | /* There is no way to determine if DLM0/2 is QSGMII or |
1296 | * DLM1/3 is configured to QSGMII as bootloader will |
1297 | * configure all LMACs, so take whatever is configured |
1298 | * by low level firmware. |
1299 | */ |
1300 | lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac); |
1301 | break; |
1302 | default: |
1303 | lmac->lane_to_sds = 0; |
1304 | break; |
1305 | } |
1306 | } |
1307 | |
1308 | static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) |
1309 | { |
1310 | if ((lmac->lmac_type != BGX_MODE_10G_KR) && |
1311 | (lmac->lmac_type != BGX_MODE_40G_KR)) { |
1312 | lmac->use_training = false; |
1313 | return; |
1314 | } |
1315 | |
1316 | lmac->use_training = bgx_reg_read(bgx, lmac: lmacid, BGX_SPUX_BR_PMD_CRTL) & |
1317 | SPU_PMD_CRTL_TRAIN_EN; |
1318 | } |
1319 | |
1320 | static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) |
1321 | { |
1322 | struct lmac *lmac; |
1323 | u64 cmr_cfg; |
1324 | u8 lmac_type; |
1325 | u8 lane_to_sds; |
1326 | |
1327 | lmac = &bgx->lmac[idx]; |
1328 | |
1329 | if (!bgx->is_dlm || bgx->is_rgx) { |
1330 | /* Read LMAC0 type to figure out QLM mode |
1331 | * This is configured by low level firmware |
1332 | */ |
1333 | cmr_cfg = bgx_reg_read(bgx, lmac: 0, BGX_CMRX_CFG); |
1334 | lmac->lmac_type = (cmr_cfg >> 8) & 0x07; |
1335 | if (bgx->is_rgx) |
1336 | lmac->lmac_type = BGX_MODE_RGMII; |
1337 | lmac_set_training(bgx, lmac, lmacid: 0); |
1338 | lmac_set_lane2sds(bgx, lmac); |
1339 | return; |
1340 | } |
1341 | |
1342 | /* For DLMs or SLMs on 80/81/83xx so many lane configurations |
1343 | * are possible and vary across boards. Also Kernel doesn't have |
1344 | * any way to identify board type/info and since firmware does, |
1345 | * just take lmac type and serdes lane config as is. |
1346 | */ |
1347 | cmr_cfg = bgx_reg_read(bgx, lmac: idx, BGX_CMRX_CFG); |
1348 | lmac_type = (u8)((cmr_cfg >> 8) & 0x07); |
1349 | lane_to_sds = (u8)(cmr_cfg & 0xFF); |
1350 | /* Check if config is reset value */ |
1351 | if ((lmac_type == 0) && (lane_to_sds == 0xE4)) |
1352 | lmac->lmac_type = BGX_MODE_INVALID; |
1353 | else |
1354 | lmac->lmac_type = lmac_type; |
1355 | lmac->lane_to_sds = lane_to_sds; |
1356 | lmac_set_training(bgx, lmac, lmacid: lmac->lmacid); |
1357 | } |
1358 | |
1359 | static void bgx_get_qlm_mode(struct bgx *bgx) |
1360 | { |
1361 | struct lmac *lmac; |
1362 | u8 idx; |
1363 | |
1364 | /* Init all LMAC's type to invalid */ |
1365 | for (idx = 0; idx < bgx->max_lmac; idx++) { |
1366 | lmac = &bgx->lmac[idx]; |
1367 | lmac->lmacid = idx; |
1368 | lmac->lmac_type = BGX_MODE_INVALID; |
1369 | lmac->use_training = false; |
1370 | } |
1371 | |
1372 | /* It is assumed that low level firmware sets this value */ |
1373 | bgx->lmac_count = bgx_reg_read(bgx, lmac: 0, BGX_CMR_RX_LMACS) & 0x7; |
1374 | if (bgx->lmac_count > bgx->max_lmac) |
1375 | bgx->lmac_count = bgx->max_lmac; |
1376 | |
1377 | for (idx = 0; idx < bgx->lmac_count; idx++) { |
1378 | bgx_set_lmac_config(bgx, idx); |
1379 | bgx_print_qlm_mode(bgx, lmacid: idx); |
1380 | } |
1381 | } |
1382 | |
1383 | #ifdef CONFIG_ACPI |
1384 | |
1385 | static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, |
1386 | u8 *dst) |
1387 | { |
1388 | u8 mac[ETH_ALEN]; |
1389 | int ret; |
1390 | |
1391 | ret = fwnode_get_mac_address(fwnode: acpi_fwnode_handle(adev), addr: mac); |
1392 | if (ret) { |
1393 | dev_err(dev, "MAC address invalid: %pM\n" , mac); |
1394 | return -EINVAL; |
1395 | } |
1396 | |
1397 | dev_info(dev, "MAC address set to: %pM\n" , mac); |
1398 | |
1399 | ether_addr_copy(dst, src: mac); |
1400 | return 0; |
1401 | } |
1402 | |
1403 | /* Currently only sets the MAC address. */ |
1404 | static acpi_status bgx_acpi_register_phy(acpi_handle handle, |
1405 | u32 lvl, void *context, void **rv) |
1406 | { |
1407 | struct bgx *bgx = context; |
1408 | struct device *dev = &bgx->pdev->dev; |
1409 | struct acpi_device *adev; |
1410 | |
1411 | adev = acpi_fetch_acpi_dev(handle); |
1412 | if (!adev) |
1413 | goto out; |
1414 | |
1415 | acpi_get_mac_address(dev, adev, dst: bgx->lmac[bgx->acpi_lmac_idx].mac); |
1416 | |
1417 | SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); |
1418 | |
1419 | bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; |
1420 | bgx->acpi_lmac_idx++; /* move to next LMAC */ |
1421 | out: |
1422 | return AE_OK; |
1423 | } |
1424 | |
1425 | static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, |
1426 | void *context, void **ret_val) |
1427 | { |
1428 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
1429 | struct bgx *bgx = context; |
1430 | char bgx_sel[5]; |
1431 | |
1432 | snprintf(buf: bgx_sel, size: 5, fmt: "BGX%d" , bgx->bgx_id); |
1433 | if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { |
1434 | pr_warn("Invalid link device\n" ); |
1435 | return AE_OK; |
1436 | } |
1437 | |
1438 | if (strncmp(string.pointer, bgx_sel, 4)) { |
1439 | kfree(objp: string.pointer); |
1440 | return AE_OK; |
1441 | } |
1442 | |
1443 | acpi_walk_namespace(ACPI_TYPE_DEVICE, start_object: handle, max_depth: 1, |
1444 | descending_callback: bgx_acpi_register_phy, NULL, context: bgx, NULL); |
1445 | |
1446 | kfree(objp: string.pointer); |
1447 | return AE_CTRL_TERMINATE; |
1448 | } |
1449 | |
1450 | static int bgx_init_acpi_phy(struct bgx *bgx) |
1451 | { |
1452 | acpi_get_devices(NULL, user_function: bgx_acpi_match_id, context: bgx, return_value: (void **)NULL); |
1453 | return 0; |
1454 | } |
1455 | |
1456 | #else |
1457 | |
1458 | static int bgx_init_acpi_phy(struct bgx *bgx) |
1459 | { |
1460 | return -ENODEV; |
1461 | } |
1462 | |
1463 | #endif /* CONFIG_ACPI */ |
1464 | |
1465 | #if IS_ENABLED(CONFIG_OF_MDIO) |
1466 | |
1467 | static int bgx_init_of_phy(struct bgx *bgx) |
1468 | { |
1469 | struct fwnode_handle *fwn; |
1470 | struct device_node *node = NULL; |
1471 | u8 lmac = 0; |
1472 | |
1473 | device_for_each_child_node(&bgx->pdev->dev, fwn) { |
1474 | struct phy_device *pd; |
1475 | struct device_node *phy_np; |
1476 | |
1477 | /* Should always be an OF node. But if it is not, we |
1478 | * cannot handle it, so exit the loop. |
1479 | */ |
1480 | node = to_of_node(fwn); |
1481 | if (!node) |
1482 | break; |
1483 | |
1484 | of_get_mac_address(np: node, mac: bgx->lmac[lmac].mac); |
1485 | |
1486 | SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); |
1487 | bgx->lmac[lmac].lmacid = lmac; |
1488 | |
1489 | phy_np = of_parse_phandle(np: node, phandle_name: "phy-handle" , index: 0); |
1490 | /* If there is no phy or defective firmware presents |
1491 | * this cortina phy, for which there is no driver |
1492 | * support, ignore it. |
1493 | */ |
1494 | if (phy_np && |
1495 | !of_device_is_compatible(device: phy_np, "cortina,cs4223-slice" )) { |
1496 | /* Wait until the phy drivers are available */ |
1497 | pd = of_phy_find_device(phy_np); |
1498 | if (!pd) |
1499 | goto defer; |
1500 | bgx->lmac[lmac].phydev = pd; |
1501 | } |
1502 | |
1503 | lmac++; |
1504 | if (lmac == bgx->max_lmac) { |
1505 | of_node_put(node); |
1506 | break; |
1507 | } |
1508 | } |
1509 | return 0; |
1510 | |
1511 | defer: |
1512 | /* We are bailing out, try not to leak device reference counts |
1513 | * for phy devices we may have already found. |
1514 | */ |
1515 | while (lmac) { |
1516 | if (bgx->lmac[lmac].phydev) { |
1517 | put_device(dev: &bgx->lmac[lmac].phydev->mdio.dev); |
1518 | bgx->lmac[lmac].phydev = NULL; |
1519 | } |
1520 | lmac--; |
1521 | } |
1522 | of_node_put(node); |
1523 | return -EPROBE_DEFER; |
1524 | } |
1525 | |
1526 | #else |
1527 | |
1528 | static int bgx_init_of_phy(struct bgx *bgx) |
1529 | { |
1530 | return -ENODEV; |
1531 | } |
1532 | |
1533 | #endif /* CONFIG_OF_MDIO */ |
1534 | |
1535 | static int bgx_init_phy(struct bgx *bgx) |
1536 | { |
1537 | if (!acpi_disabled) |
1538 | return bgx_init_acpi_phy(bgx); |
1539 | |
1540 | return bgx_init_of_phy(bgx); |
1541 | } |
1542 | |
1543 | static irqreturn_t bgx_intr_handler(int irq, void *data) |
1544 | { |
1545 | struct bgx *bgx = (struct bgx *)data; |
1546 | u64 status, val; |
1547 | int lmac; |
1548 | |
1549 | for (lmac = 0; lmac < bgx->lmac_count; lmac++) { |
1550 | status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT); |
1551 | if (status & GMI_TXX_INT_UNDFLW) { |
1552 | pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n" , |
1553 | bgx->bgx_id, lmac); |
1554 | val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG); |
1555 | val &= ~CMR_EN; |
1556 | bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); |
1557 | val |= CMR_EN; |
1558 | bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); |
1559 | } |
1560 | /* clear interrupts */ |
1561 | bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, val: status); |
1562 | } |
1563 | |
1564 | return IRQ_HANDLED; |
1565 | } |
1566 | |
1567 | static void bgx_register_intr(struct pci_dev *pdev) |
1568 | { |
1569 | struct bgx *bgx = pci_get_drvdata(pdev); |
1570 | int ret; |
1571 | |
1572 | ret = pci_alloc_irq_vectors(dev: pdev, BGX_LMAC_VEC_OFFSET, |
1573 | BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES); |
1574 | if (ret < 0) { |
1575 | pci_err(pdev, "Req for #%d msix vectors failed\n" , |
1576 | BGX_LMAC_VEC_OFFSET); |
1577 | return; |
1578 | } |
1579 | ret = pci_request_irq(dev: pdev, GMPX_GMI_TX_INT, handler: bgx_intr_handler, NULL, |
1580 | dev_id: bgx, fmt: "BGX%d" , bgx->bgx_id); |
1581 | if (ret) |
1582 | pci_free_irq(dev: pdev, GMPX_GMI_TX_INT, dev_id: bgx); |
1583 | } |
1584 | |
1585 | static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1586 | { |
1587 | int err; |
1588 | struct device *dev = &pdev->dev; |
1589 | struct bgx *bgx = NULL; |
1590 | u8 lmac; |
1591 | u16 sdevid; |
1592 | |
1593 | bgx = devm_kzalloc(dev, size: sizeof(*bgx), GFP_KERNEL); |
1594 | if (!bgx) |
1595 | return -ENOMEM; |
1596 | bgx->pdev = pdev; |
1597 | |
1598 | pci_set_drvdata(pdev, data: bgx); |
1599 | |
1600 | err = pcim_enable_device(pdev); |
1601 | if (err) { |
1602 | pci_set_drvdata(pdev, NULL); |
1603 | return dev_err_probe(dev, err, fmt: "Failed to enable PCI device\n" ); |
1604 | } |
1605 | |
1606 | err = pci_request_regions(pdev, DRV_NAME); |
1607 | if (err) { |
1608 | dev_err(dev, "PCI request regions failed 0x%x\n" , err); |
1609 | goto err_disable_device; |
1610 | } |
1611 | |
1612 | /* MAP configuration registers */ |
1613 | bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, maxlen: 0); |
1614 | if (!bgx->reg_base) { |
1615 | dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n" ); |
1616 | err = -ENOMEM; |
1617 | goto err_release_regions; |
1618 | } |
1619 | |
1620 | set_max_bgx_per_node(pdev); |
1621 | |
1622 | pci_read_config_word(dev: pdev, PCI_DEVICE_ID, val: &sdevid); |
1623 | if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { |
1624 | bgx->bgx_id = (pci_resource_start(pdev, |
1625 | PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; |
1626 | bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node; |
1627 | bgx->max_lmac = MAX_LMAC_PER_BGX; |
1628 | bgx_vnic[bgx->bgx_id] = bgx; |
1629 | } else { |
1630 | bgx->is_rgx = true; |
1631 | bgx->max_lmac = 1; |
1632 | bgx->bgx_id = MAX_BGX_PER_CN81XX - 1; |
1633 | bgx_vnic[bgx->bgx_id] = bgx; |
1634 | xcv_init_hw(); |
1635 | } |
1636 | |
1637 | /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one |
1638 | * BGX i.e BGX2 can be split across 2 DLMs. |
1639 | */ |
1640 | pci_read_config_word(dev: pdev, PCI_SUBSYSTEM_ID, val: &sdevid); |
1641 | if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) || |
1642 | ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2))) |
1643 | bgx->is_dlm = true; |
1644 | |
1645 | bgx_get_qlm_mode(bgx); |
1646 | |
1647 | err = bgx_init_phy(bgx); |
1648 | if (err) |
1649 | goto err_enable; |
1650 | |
1651 | bgx_init_hw(bgx); |
1652 | |
1653 | bgx_register_intr(pdev); |
1654 | |
1655 | /* Enable all LMACs */ |
1656 | for (lmac = 0; lmac < bgx->lmac_count; lmac++) { |
1657 | err = bgx_lmac_enable(bgx, lmacid: lmac); |
1658 | if (err) { |
1659 | dev_err(dev, "BGX%d failed to enable lmac%d\n" , |
1660 | bgx->bgx_id, lmac); |
1661 | while (lmac) |
1662 | bgx_lmac_disable(bgx, lmacid: --lmac); |
1663 | goto err_enable; |
1664 | } |
1665 | } |
1666 | |
1667 | return 0; |
1668 | |
1669 | err_enable: |
1670 | bgx_vnic[bgx->bgx_id] = NULL; |
1671 | pci_free_irq(dev: pdev, GMPX_GMI_TX_INT, dev_id: bgx); |
1672 | err_release_regions: |
1673 | pci_release_regions(pdev); |
1674 | err_disable_device: |
1675 | pci_disable_device(dev: pdev); |
1676 | pci_set_drvdata(pdev, NULL); |
1677 | return err; |
1678 | } |
1679 | |
1680 | static void bgx_remove(struct pci_dev *pdev) |
1681 | { |
1682 | struct bgx *bgx = pci_get_drvdata(pdev); |
1683 | u8 lmac; |
1684 | |
1685 | /* Disable all LMACs */ |
1686 | for (lmac = 0; lmac < bgx->lmac_count; lmac++) |
1687 | bgx_lmac_disable(bgx, lmacid: lmac); |
1688 | |
1689 | pci_free_irq(dev: pdev, GMPX_GMI_TX_INT, dev_id: bgx); |
1690 | |
1691 | bgx_vnic[bgx->bgx_id] = NULL; |
1692 | pci_release_regions(pdev); |
1693 | pci_disable_device(dev: pdev); |
1694 | pci_set_drvdata(pdev, NULL); |
1695 | } |
1696 | |
1697 | static struct pci_driver bgx_driver = { |
1698 | .name = DRV_NAME, |
1699 | .id_table = bgx_id_table, |
1700 | .probe = bgx_probe, |
1701 | .remove = bgx_remove, |
1702 | }; |
1703 | |
1704 | static int __init bgx_init_module(void) |
1705 | { |
1706 | pr_info("%s, ver %s\n" , DRV_NAME, DRV_VERSION); |
1707 | |
1708 | return pci_register_driver(&bgx_driver); |
1709 | } |
1710 | |
1711 | static void __exit bgx_cleanup_module(void) |
1712 | { |
1713 | pci_unregister_driver(dev: &bgx_driver); |
1714 | } |
1715 | |
1716 | module_init(bgx_init_module); |
1717 | module_exit(bgx_cleanup_module); |
1718 | |