1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* niu.c: Neptune ethernet driver. |
3 | * |
4 | * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | |
9 | #include <linux/module.h> |
10 | #include <linux/init.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/pci.h> |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/netdevice.h> |
15 | #include <linux/ethtool.h> |
16 | #include <linux/etherdevice.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/bitops.h> |
20 | #include <linux/mii.h> |
21 | #include <linux/if.h> |
22 | #include <linux/if_ether.h> |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/ip.h> |
25 | #include <linux/in.h> |
26 | #include <linux/ipv6.h> |
27 | #include <linux/log2.h> |
28 | #include <linux/jiffies.h> |
29 | #include <linux/crc32.h> |
30 | #include <linux/list.h> |
31 | #include <linux/slab.h> |
32 | |
33 | #include <linux/io.h> |
34 | #include <linux/of.h> |
35 | |
36 | #include "niu.h" |
37 | |
38 | /* This driver wants to store a link to a "next page" within the |
39 | * page struct itself by overloading the content of the "mapping" |
40 | * member. This is not expected by the page API, but does currently |
41 | * work. However, the randstruct plugin gets very bothered by this |
42 | * case because "mapping" (struct address_space) is randomized, so |
43 | * casts to/from it trigger warnings. Hide this by way of a union, |
44 | * to create a typed alias of "mapping", since that's how it is |
45 | * actually being used here. |
46 | */ |
47 | union niu_page { |
48 | struct page page; |
49 | struct { |
50 | unsigned long __flags; /* unused alias of "flags" */ |
51 | struct list_head __lru; /* unused alias of "lru" */ |
52 | struct page *next; /* alias of "mapping" */ |
53 | }; |
54 | }; |
55 | #define niu_next_page(p) container_of(p, union niu_page, page)->next |
56 | |
57 | #define DRV_MODULE_NAME "niu" |
58 | #define DRV_MODULE_VERSION "1.1" |
59 | #define DRV_MODULE_RELDATE "Apr 22, 2010" |
60 | |
61 | static char version[] = |
62 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n" ; |
63 | |
64 | MODULE_AUTHOR("David S. Miller <davem@davemloft.net>" ); |
65 | MODULE_DESCRIPTION("NIU ethernet driver" ); |
66 | MODULE_LICENSE("GPL" ); |
67 | MODULE_VERSION(DRV_MODULE_VERSION); |
68 | |
69 | #ifndef readq |
70 | static u64 readq(void __iomem *reg) |
71 | { |
72 | return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); |
73 | } |
74 | |
75 | static void writeq(u64 val, void __iomem *reg) |
76 | { |
77 | writel(val & 0xffffffff, reg); |
78 | writel(val >> 32, reg + 0x4UL); |
79 | } |
80 | #endif |
81 | |
82 | static const struct pci_device_id niu_pci_tbl[] = { |
83 | {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, |
84 | {} |
85 | }; |
86 | |
87 | MODULE_DEVICE_TABLE(pci, niu_pci_tbl); |
88 | |
89 | #define NIU_TX_TIMEOUT (5 * HZ) |
90 | |
91 | #define nr64(reg) readq(np->regs + (reg)) |
92 | #define nw64(reg, val) writeq((val), np->regs + (reg)) |
93 | |
94 | #define nr64_mac(reg) readq(np->mac_regs + (reg)) |
95 | #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) |
96 | |
97 | #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) |
98 | #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) |
99 | |
100 | #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) |
101 | #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) |
102 | |
103 | #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) |
104 | #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) |
105 | |
106 | #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
107 | |
108 | static int niu_debug; |
109 | static int debug = -1; |
110 | module_param(debug, int, 0); |
111 | MODULE_PARM_DESC(debug, "NIU debug level" ); |
112 | |
113 | #define niu_lock_parent(np, flags) \ |
114 | spin_lock_irqsave(&np->parent->lock, flags) |
115 | #define niu_unlock_parent(np, flags) \ |
116 | spin_unlock_irqrestore(&np->parent->lock, flags) |
117 | |
118 | static int serdes_init_10g_serdes(struct niu *np); |
119 | |
120 | static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, |
121 | u64 bits, int limit, int delay) |
122 | { |
123 | while (--limit >= 0) { |
124 | u64 val = nr64_mac(reg); |
125 | |
126 | if (!(val & bits)) |
127 | break; |
128 | udelay(delay); |
129 | } |
130 | if (limit < 0) |
131 | return -ENODEV; |
132 | return 0; |
133 | } |
134 | |
135 | static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, |
136 | u64 bits, int limit, int delay, |
137 | const char *reg_name) |
138 | { |
139 | int err; |
140 | |
141 | nw64_mac(reg, bits); |
142 | err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); |
143 | if (err) |
144 | netdev_err(dev: np->dev, format: "bits (%llx) of register %s would not clear, val[%llx]\n" , |
145 | (unsigned long long)bits, reg_name, |
146 | (unsigned long long)nr64_mac(reg)); |
147 | return err; |
148 | } |
149 | |
150 | #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ |
151 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ |
152 | __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ |
153 | }) |
154 | |
155 | static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, |
156 | u64 bits, int limit, int delay) |
157 | { |
158 | while (--limit >= 0) { |
159 | u64 val = nr64_ipp(reg); |
160 | |
161 | if (!(val & bits)) |
162 | break; |
163 | udelay(delay); |
164 | } |
165 | if (limit < 0) |
166 | return -ENODEV; |
167 | return 0; |
168 | } |
169 | |
170 | static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, |
171 | u64 bits, int limit, int delay, |
172 | const char *reg_name) |
173 | { |
174 | int err; |
175 | u64 val; |
176 | |
177 | val = nr64_ipp(reg); |
178 | val |= bits; |
179 | nw64_ipp(reg, val); |
180 | |
181 | err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); |
182 | if (err) |
183 | netdev_err(dev: np->dev, format: "bits (%llx) of register %s would not clear, val[%llx]\n" , |
184 | (unsigned long long)bits, reg_name, |
185 | (unsigned long long)nr64_ipp(reg)); |
186 | return err; |
187 | } |
188 | |
189 | #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ |
190 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ |
191 | __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ |
192 | }) |
193 | |
194 | static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, |
195 | u64 bits, int limit, int delay) |
196 | { |
197 | while (--limit >= 0) { |
198 | u64 val = nr64(reg); |
199 | |
200 | if (!(val & bits)) |
201 | break; |
202 | udelay(delay); |
203 | } |
204 | if (limit < 0) |
205 | return -ENODEV; |
206 | return 0; |
207 | } |
208 | |
209 | #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ |
210 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ |
211 | __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ |
212 | }) |
213 | |
214 | static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, |
215 | u64 bits, int limit, int delay, |
216 | const char *reg_name) |
217 | { |
218 | int err; |
219 | |
220 | nw64(reg, bits); |
221 | err = __niu_wait_bits_clear(np, reg, bits, limit, delay); |
222 | if (err) |
223 | netdev_err(dev: np->dev, format: "bits (%llx) of register %s would not clear, val[%llx]\n" , |
224 | (unsigned long long)bits, reg_name, |
225 | (unsigned long long)nr64(reg)); |
226 | return err; |
227 | } |
228 | |
229 | #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ |
230 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ |
231 | __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ |
232 | }) |
233 | |
234 | static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) |
235 | { |
236 | u64 val = (u64) lp->timer; |
237 | |
238 | if (on) |
239 | val |= LDG_IMGMT_ARM; |
240 | |
241 | nw64(LDG_IMGMT(lp->ldg_num), val); |
242 | } |
243 | |
244 | static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) |
245 | { |
246 | unsigned long mask_reg, bits; |
247 | u64 val; |
248 | |
249 | if (ldn < 0 || ldn > LDN_MAX) |
250 | return -EINVAL; |
251 | |
252 | if (ldn < 64) { |
253 | mask_reg = LD_IM0(ldn); |
254 | bits = LD_IM0_MASK; |
255 | } else { |
256 | mask_reg = LD_IM1(ldn - 64); |
257 | bits = LD_IM1_MASK; |
258 | } |
259 | |
260 | val = nr64(mask_reg); |
261 | if (on) |
262 | val &= ~bits; |
263 | else |
264 | val |= bits; |
265 | nw64(mask_reg, val); |
266 | |
267 | return 0; |
268 | } |
269 | |
270 | static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) |
271 | { |
272 | struct niu_parent *parent = np->parent; |
273 | int i; |
274 | |
275 | for (i = 0; i <= LDN_MAX; i++) { |
276 | int err; |
277 | |
278 | if (parent->ldg_map[i] != lp->ldg_num) |
279 | continue; |
280 | |
281 | err = niu_ldn_irq_enable(np, ldn: i, on); |
282 | if (err) |
283 | return err; |
284 | } |
285 | return 0; |
286 | } |
287 | |
288 | static int niu_enable_interrupts(struct niu *np, int on) |
289 | { |
290 | int i; |
291 | |
292 | for (i = 0; i < np->num_ldg; i++) { |
293 | struct niu_ldg *lp = &np->ldg[i]; |
294 | int err; |
295 | |
296 | err = niu_enable_ldn_in_ldg(np, lp, on); |
297 | if (err) |
298 | return err; |
299 | } |
300 | for (i = 0; i < np->num_ldg; i++) |
301 | niu_ldg_rearm(np, lp: &np->ldg[i], on); |
302 | |
303 | return 0; |
304 | } |
305 | |
306 | static u32 phy_encode(u32 type, int port) |
307 | { |
308 | return type << (port * 2); |
309 | } |
310 | |
311 | static u32 phy_decode(u32 val, int port) |
312 | { |
313 | return (val >> (port * 2)) & PORT_TYPE_MASK; |
314 | } |
315 | |
316 | static int mdio_wait(struct niu *np) |
317 | { |
318 | int limit = 1000; |
319 | u64 val; |
320 | |
321 | while (--limit > 0) { |
322 | val = nr64(MIF_FRAME_OUTPUT); |
323 | if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) |
324 | return val & MIF_FRAME_OUTPUT_DATA; |
325 | |
326 | udelay(10); |
327 | } |
328 | |
329 | return -ENODEV; |
330 | } |
331 | |
332 | static int mdio_read(struct niu *np, int port, int dev, int reg) |
333 | { |
334 | int err; |
335 | |
336 | nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); |
337 | err = mdio_wait(np); |
338 | if (err < 0) |
339 | return err; |
340 | |
341 | nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); |
342 | return mdio_wait(np); |
343 | } |
344 | |
345 | static int mdio_write(struct niu *np, int port, int dev, int reg, int data) |
346 | { |
347 | int err; |
348 | |
349 | nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); |
350 | err = mdio_wait(np); |
351 | if (err < 0) |
352 | return err; |
353 | |
354 | nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); |
355 | err = mdio_wait(np); |
356 | if (err < 0) |
357 | return err; |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | static int mii_read(struct niu *np, int port, int reg) |
363 | { |
364 | nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); |
365 | return mdio_wait(np); |
366 | } |
367 | |
368 | static int mii_write(struct niu *np, int port, int reg, int data) |
369 | { |
370 | int err; |
371 | |
372 | nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); |
373 | err = mdio_wait(np); |
374 | if (err < 0) |
375 | return err; |
376 | |
377 | return 0; |
378 | } |
379 | |
380 | static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) |
381 | { |
382 | int err; |
383 | |
384 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
385 | ESR2_TI_PLL_TX_CFG_L(channel), |
386 | data: val & 0xffff); |
387 | if (!err) |
388 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
389 | ESR2_TI_PLL_TX_CFG_H(channel), |
390 | data: val >> 16); |
391 | return err; |
392 | } |
393 | |
394 | static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) |
395 | { |
396 | int err; |
397 | |
398 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
399 | ESR2_TI_PLL_RX_CFG_L(channel), |
400 | data: val & 0xffff); |
401 | if (!err) |
402 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
403 | ESR2_TI_PLL_RX_CFG_H(channel), |
404 | data: val >> 16); |
405 | return err; |
406 | } |
407 | |
408 | /* Mode is always 10G fiber. */ |
409 | static int serdes_init_niu_10g_fiber(struct niu *np) |
410 | { |
411 | struct niu_link_config *lp = &np->link_config; |
412 | u32 tx_cfg, rx_cfg; |
413 | unsigned long i; |
414 | |
415 | tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); |
416 | rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | |
417 | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | |
418 | PLL_RX_CFG_EQ_LP_ADAPTIVE); |
419 | |
420 | if (lp->loopback_mode == LOOPBACK_PHY) { |
421 | u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; |
422 | |
423 | mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
424 | ESR2_TI_PLL_TEST_CFG_L, data: test_cfg); |
425 | |
426 | tx_cfg |= PLL_TX_CFG_ENTEST; |
427 | rx_cfg |= PLL_RX_CFG_ENTEST; |
428 | } |
429 | |
430 | /* Initialize all 4 lanes of the SERDES. */ |
431 | for (i = 0; i < 4; i++) { |
432 | int err = esr2_set_tx_cfg(np, channel: i, val: tx_cfg); |
433 | if (err) |
434 | return err; |
435 | } |
436 | |
437 | for (i = 0; i < 4; i++) { |
438 | int err = esr2_set_rx_cfg(np, channel: i, val: rx_cfg); |
439 | if (err) |
440 | return err; |
441 | } |
442 | |
443 | return 0; |
444 | } |
445 | |
446 | static int serdes_init_niu_1g_serdes(struct niu *np) |
447 | { |
448 | struct niu_link_config *lp = &np->link_config; |
449 | u16 pll_cfg, pll_sts; |
450 | int max_retry = 100; |
451 | u64 sig, mask, val; |
452 | u32 tx_cfg, rx_cfg; |
453 | unsigned long i; |
454 | int err; |
455 | |
456 | tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | |
457 | PLL_TX_CFG_RATE_HALF); |
458 | rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | |
459 | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | |
460 | PLL_RX_CFG_RATE_HALF); |
461 | |
462 | if (np->port == 0) |
463 | rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; |
464 | |
465 | if (lp->loopback_mode == LOOPBACK_PHY) { |
466 | u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; |
467 | |
468 | mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
469 | ESR2_TI_PLL_TEST_CFG_L, data: test_cfg); |
470 | |
471 | tx_cfg |= PLL_TX_CFG_ENTEST; |
472 | rx_cfg |= PLL_RX_CFG_ENTEST; |
473 | } |
474 | |
475 | /* Initialize PLL for 1G */ |
476 | pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); |
477 | |
478 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
479 | ESR2_TI_PLL_CFG_L, data: pll_cfg); |
480 | if (err) { |
481 | netdev_err(dev: np->dev, format: "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n" , |
482 | np->port, __func__); |
483 | return err; |
484 | } |
485 | |
486 | pll_sts = PLL_CFG_ENPLL; |
487 | |
488 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
489 | ESR2_TI_PLL_STS_L, data: pll_sts); |
490 | if (err) { |
491 | netdev_err(dev: np->dev, format: "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n" , |
492 | np->port, __func__); |
493 | return err; |
494 | } |
495 | |
496 | udelay(200); |
497 | |
498 | /* Initialize all 4 lanes of the SERDES. */ |
499 | for (i = 0; i < 4; i++) { |
500 | err = esr2_set_tx_cfg(np, channel: i, val: tx_cfg); |
501 | if (err) |
502 | return err; |
503 | } |
504 | |
505 | for (i = 0; i < 4; i++) { |
506 | err = esr2_set_rx_cfg(np, channel: i, val: rx_cfg); |
507 | if (err) |
508 | return err; |
509 | } |
510 | |
511 | switch (np->port) { |
512 | case 0: |
513 | val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); |
514 | mask = val; |
515 | break; |
516 | |
517 | case 1: |
518 | val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); |
519 | mask = val; |
520 | break; |
521 | |
522 | default: |
523 | return -EINVAL; |
524 | } |
525 | |
526 | while (max_retry--) { |
527 | sig = nr64(ESR_INT_SIGNALS); |
528 | if ((sig & mask) == val) |
529 | break; |
530 | |
531 | mdelay(500); |
532 | } |
533 | |
534 | if ((sig & mask) != val) { |
535 | netdev_err(dev: np->dev, format: "Port %u signal bits [%08x] are not [%08x]\n" , |
536 | np->port, (int)(sig & mask), (int)val); |
537 | return -ENODEV; |
538 | } |
539 | |
540 | return 0; |
541 | } |
542 | |
543 | static int serdes_init_niu_10g_serdes(struct niu *np) |
544 | { |
545 | struct niu_link_config *lp = &np->link_config; |
546 | u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; |
547 | int max_retry = 100; |
548 | u64 sig, mask, val; |
549 | unsigned long i; |
550 | int err; |
551 | |
552 | tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); |
553 | rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | |
554 | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | |
555 | PLL_RX_CFG_EQ_LP_ADAPTIVE); |
556 | |
557 | if (lp->loopback_mode == LOOPBACK_PHY) { |
558 | u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; |
559 | |
560 | mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
561 | ESR2_TI_PLL_TEST_CFG_L, data: test_cfg); |
562 | |
563 | tx_cfg |= PLL_TX_CFG_ENTEST; |
564 | rx_cfg |= PLL_RX_CFG_ENTEST; |
565 | } |
566 | |
567 | /* Initialize PLL for 10G */ |
568 | pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); |
569 | |
570 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
571 | ESR2_TI_PLL_CFG_L, data: pll_cfg & 0xffff); |
572 | if (err) { |
573 | netdev_err(dev: np->dev, format: "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n" , |
574 | np->port, __func__); |
575 | return err; |
576 | } |
577 | |
578 | pll_sts = PLL_CFG_ENPLL; |
579 | |
580 | err = mdio_write(np, port: np->port, NIU_ESR2_DEV_ADDR, |
581 | ESR2_TI_PLL_STS_L, data: pll_sts & 0xffff); |
582 | if (err) { |
583 | netdev_err(dev: np->dev, format: "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n" , |
584 | np->port, __func__); |
585 | return err; |
586 | } |
587 | |
588 | udelay(200); |
589 | |
590 | /* Initialize all 4 lanes of the SERDES. */ |
591 | for (i = 0; i < 4; i++) { |
592 | err = esr2_set_tx_cfg(np, channel: i, val: tx_cfg); |
593 | if (err) |
594 | return err; |
595 | } |
596 | |
597 | for (i = 0; i < 4; i++) { |
598 | err = esr2_set_rx_cfg(np, channel: i, val: rx_cfg); |
599 | if (err) |
600 | return err; |
601 | } |
602 | |
603 | /* check if serdes is ready */ |
604 | |
605 | switch (np->port) { |
606 | case 0: |
607 | mask = ESR_INT_SIGNALS_P0_BITS; |
608 | val = (ESR_INT_SRDY0_P0 | |
609 | ESR_INT_DET0_P0 | |
610 | ESR_INT_XSRDY_P0 | |
611 | ESR_INT_XDP_P0_CH3 | |
612 | ESR_INT_XDP_P0_CH2 | |
613 | ESR_INT_XDP_P0_CH1 | |
614 | ESR_INT_XDP_P0_CH0); |
615 | break; |
616 | |
617 | case 1: |
618 | mask = ESR_INT_SIGNALS_P1_BITS; |
619 | val = (ESR_INT_SRDY0_P1 | |
620 | ESR_INT_DET0_P1 | |
621 | ESR_INT_XSRDY_P1 | |
622 | ESR_INT_XDP_P1_CH3 | |
623 | ESR_INT_XDP_P1_CH2 | |
624 | ESR_INT_XDP_P1_CH1 | |
625 | ESR_INT_XDP_P1_CH0); |
626 | break; |
627 | |
628 | default: |
629 | return -EINVAL; |
630 | } |
631 | |
632 | while (max_retry--) { |
633 | sig = nr64(ESR_INT_SIGNALS); |
634 | if ((sig & mask) == val) |
635 | break; |
636 | |
637 | mdelay(500); |
638 | } |
639 | |
640 | if ((sig & mask) != val) { |
641 | pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n" , |
642 | np->port, (int)(sig & mask), (int)val); |
643 | |
644 | /* 10G failed, try initializing at 1G */ |
645 | err = serdes_init_niu_1g_serdes(np); |
646 | if (!err) { |
647 | np->flags &= ~NIU_FLAGS_10G; |
648 | np->mac_xcvr = MAC_XCVR_PCS; |
649 | } else { |
650 | netdev_err(dev: np->dev, format: "Port %u 10G/1G SERDES Link Failed\n" , |
651 | np->port); |
652 | return -ENODEV; |
653 | } |
654 | } |
655 | return 0; |
656 | } |
657 | |
658 | static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) |
659 | { |
660 | int err; |
661 | |
662 | err = mdio_read(np, port: np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); |
663 | if (err >= 0) { |
664 | *val = (err & 0xffff); |
665 | err = mdio_read(np, port: np->port, NIU_ESR_DEV_ADDR, |
666 | ESR_RXTX_CTRL_H(chan)); |
667 | if (err >= 0) |
668 | *val |= ((err & 0xffff) << 16); |
669 | err = 0; |
670 | } |
671 | return err; |
672 | } |
673 | |
674 | static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) |
675 | { |
676 | int err; |
677 | |
678 | err = mdio_read(np, port: np->port, NIU_ESR_DEV_ADDR, |
679 | ESR_GLUE_CTRL0_L(chan)); |
680 | if (err >= 0) { |
681 | *val = (err & 0xffff); |
682 | err = mdio_read(np, port: np->port, NIU_ESR_DEV_ADDR, |
683 | ESR_GLUE_CTRL0_H(chan)); |
684 | if (err >= 0) { |
685 | *val |= ((err & 0xffff) << 16); |
686 | err = 0; |
687 | } |
688 | } |
689 | return err; |
690 | } |
691 | |
692 | static int esr_read_reset(struct niu *np, u32 *val) |
693 | { |
694 | int err; |
695 | |
696 | err = mdio_read(np, port: np->port, NIU_ESR_DEV_ADDR, |
697 | ESR_RXTX_RESET_CTRL_L); |
698 | if (err >= 0) { |
699 | *val = (err & 0xffff); |
700 | err = mdio_read(np, port: np->port, NIU_ESR_DEV_ADDR, |
701 | ESR_RXTX_RESET_CTRL_H); |
702 | if (err >= 0) { |
703 | *val |= ((err & 0xffff) << 16); |
704 | err = 0; |
705 | } |
706 | } |
707 | return err; |
708 | } |
709 | |
710 | static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) |
711 | { |
712 | int err; |
713 | |
714 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
715 | ESR_RXTX_CTRL_L(chan), data: val & 0xffff); |
716 | if (!err) |
717 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
718 | ESR_RXTX_CTRL_H(chan), data: (val >> 16)); |
719 | return err; |
720 | } |
721 | |
722 | static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) |
723 | { |
724 | int err; |
725 | |
726 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
727 | ESR_GLUE_CTRL0_L(chan), data: val & 0xffff); |
728 | if (!err) |
729 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
730 | ESR_GLUE_CTRL0_H(chan), data: (val >> 16)); |
731 | return err; |
732 | } |
733 | |
734 | static int esr_reset(struct niu *np) |
735 | { |
736 | u32 reset; |
737 | int err; |
738 | |
739 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
740 | ESR_RXTX_RESET_CTRL_L, data: 0x0000); |
741 | if (err) |
742 | return err; |
743 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
744 | ESR_RXTX_RESET_CTRL_H, data: 0xffff); |
745 | if (err) |
746 | return err; |
747 | udelay(200); |
748 | |
749 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
750 | ESR_RXTX_RESET_CTRL_L, data: 0xffff); |
751 | if (err) |
752 | return err; |
753 | udelay(200); |
754 | |
755 | err = mdio_write(np, port: np->port, NIU_ESR_DEV_ADDR, |
756 | ESR_RXTX_RESET_CTRL_H, data: 0x0000); |
757 | if (err) |
758 | return err; |
759 | udelay(200); |
760 | |
761 | err = esr_read_reset(np, val: &reset); |
762 | if (err) |
763 | return err; |
764 | if (reset != 0) { |
765 | netdev_err(dev: np->dev, format: "Port %u ESR_RESET did not clear [%08x]\n" , |
766 | np->port, reset); |
767 | return -ENODEV; |
768 | } |
769 | |
770 | return 0; |
771 | } |
772 | |
773 | static int serdes_init_10g(struct niu *np) |
774 | { |
775 | struct niu_link_config *lp = &np->link_config; |
776 | unsigned long ctrl_reg, test_cfg_reg, i; |
777 | u64 ctrl_val, test_cfg_val, sig, mask, val; |
778 | int err; |
779 | |
780 | switch (np->port) { |
781 | case 0: |
782 | ctrl_reg = ENET_SERDES_0_CTRL_CFG; |
783 | test_cfg_reg = ENET_SERDES_0_TEST_CFG; |
784 | break; |
785 | case 1: |
786 | ctrl_reg = ENET_SERDES_1_CTRL_CFG; |
787 | test_cfg_reg = ENET_SERDES_1_TEST_CFG; |
788 | break; |
789 | |
790 | default: |
791 | return -EINVAL; |
792 | } |
793 | ctrl_val = (ENET_SERDES_CTRL_SDET_0 | |
794 | ENET_SERDES_CTRL_SDET_1 | |
795 | ENET_SERDES_CTRL_SDET_2 | |
796 | ENET_SERDES_CTRL_SDET_3 | |
797 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | |
798 | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | |
799 | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | |
800 | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | |
801 | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | |
802 | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | |
803 | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | |
804 | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); |
805 | test_cfg_val = 0; |
806 | |
807 | if (lp->loopback_mode == LOOPBACK_PHY) { |
808 | test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << |
809 | ENET_SERDES_TEST_MD_0_SHIFT) | |
810 | (ENET_TEST_MD_PAD_LOOPBACK << |
811 | ENET_SERDES_TEST_MD_1_SHIFT) | |
812 | (ENET_TEST_MD_PAD_LOOPBACK << |
813 | ENET_SERDES_TEST_MD_2_SHIFT) | |
814 | (ENET_TEST_MD_PAD_LOOPBACK << |
815 | ENET_SERDES_TEST_MD_3_SHIFT)); |
816 | } |
817 | |
818 | nw64(ctrl_reg, ctrl_val); |
819 | nw64(test_cfg_reg, test_cfg_val); |
820 | |
821 | /* Initialize all 4 lanes of the SERDES. */ |
822 | for (i = 0; i < 4; i++) { |
823 | u32 rxtx_ctrl, glue0; |
824 | |
825 | err = esr_read_rxtx_ctrl(np, chan: i, val: &rxtx_ctrl); |
826 | if (err) |
827 | return err; |
828 | err = esr_read_glue0(np, chan: i, val: &glue0); |
829 | if (err) |
830 | return err; |
831 | |
832 | rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); |
833 | rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | |
834 | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); |
835 | |
836 | glue0 &= ~(ESR_GLUE_CTRL0_SRATE | |
837 | ESR_GLUE_CTRL0_THCNT | |
838 | ESR_GLUE_CTRL0_BLTIME); |
839 | glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | |
840 | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | |
841 | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | |
842 | (BLTIME_300_CYCLES << |
843 | ESR_GLUE_CTRL0_BLTIME_SHIFT)); |
844 | |
845 | err = esr_write_rxtx_ctrl(np, chan: i, val: rxtx_ctrl); |
846 | if (err) |
847 | return err; |
848 | err = esr_write_glue0(np, chan: i, val: glue0); |
849 | if (err) |
850 | return err; |
851 | } |
852 | |
853 | err = esr_reset(np); |
854 | if (err) |
855 | return err; |
856 | |
857 | sig = nr64(ESR_INT_SIGNALS); |
858 | switch (np->port) { |
859 | case 0: |
860 | mask = ESR_INT_SIGNALS_P0_BITS; |
861 | val = (ESR_INT_SRDY0_P0 | |
862 | ESR_INT_DET0_P0 | |
863 | ESR_INT_XSRDY_P0 | |
864 | ESR_INT_XDP_P0_CH3 | |
865 | ESR_INT_XDP_P0_CH2 | |
866 | ESR_INT_XDP_P0_CH1 | |
867 | ESR_INT_XDP_P0_CH0); |
868 | break; |
869 | |
870 | case 1: |
871 | mask = ESR_INT_SIGNALS_P1_BITS; |
872 | val = (ESR_INT_SRDY0_P1 | |
873 | ESR_INT_DET0_P1 | |
874 | ESR_INT_XSRDY_P1 | |
875 | ESR_INT_XDP_P1_CH3 | |
876 | ESR_INT_XDP_P1_CH2 | |
877 | ESR_INT_XDP_P1_CH1 | |
878 | ESR_INT_XDP_P1_CH0); |
879 | break; |
880 | |
881 | default: |
882 | return -EINVAL; |
883 | } |
884 | |
885 | if ((sig & mask) != val) { |
886 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { |
887 | np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; |
888 | return 0; |
889 | } |
890 | netdev_err(dev: np->dev, format: "Port %u signal bits [%08x] are not [%08x]\n" , |
891 | np->port, (int)(sig & mask), (int)val); |
892 | return -ENODEV; |
893 | } |
894 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) |
895 | np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; |
896 | return 0; |
897 | } |
898 | |
899 | static int serdes_init_1g(struct niu *np) |
900 | { |
901 | u64 val; |
902 | |
903 | val = nr64(ENET_SERDES_1_PLL_CFG); |
904 | val &= ~ENET_SERDES_PLL_FBDIV2; |
905 | switch (np->port) { |
906 | case 0: |
907 | val |= ENET_SERDES_PLL_HRATE0; |
908 | break; |
909 | case 1: |
910 | val |= ENET_SERDES_PLL_HRATE1; |
911 | break; |
912 | case 2: |
913 | val |= ENET_SERDES_PLL_HRATE2; |
914 | break; |
915 | case 3: |
916 | val |= ENET_SERDES_PLL_HRATE3; |
917 | break; |
918 | default: |
919 | return -EINVAL; |
920 | } |
921 | nw64(ENET_SERDES_1_PLL_CFG, val); |
922 | |
923 | return 0; |
924 | } |
925 | |
926 | static int serdes_init_1g_serdes(struct niu *np) |
927 | { |
928 | struct niu_link_config *lp = &np->link_config; |
929 | unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; |
930 | u64 ctrl_val, test_cfg_val, sig, mask, val; |
931 | int err; |
932 | u64 reset_val, val_rd; |
933 | |
934 | val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | |
935 | ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | |
936 | ENET_SERDES_PLL_FBDIV0; |
937 | switch (np->port) { |
938 | case 0: |
939 | reset_val = ENET_SERDES_RESET_0; |
940 | ctrl_reg = ENET_SERDES_0_CTRL_CFG; |
941 | test_cfg_reg = ENET_SERDES_0_TEST_CFG; |
942 | pll_cfg = ENET_SERDES_0_PLL_CFG; |
943 | break; |
944 | case 1: |
945 | reset_val = ENET_SERDES_RESET_1; |
946 | ctrl_reg = ENET_SERDES_1_CTRL_CFG; |
947 | test_cfg_reg = ENET_SERDES_1_TEST_CFG; |
948 | pll_cfg = ENET_SERDES_1_PLL_CFG; |
949 | break; |
950 | |
951 | default: |
952 | return -EINVAL; |
953 | } |
954 | ctrl_val = (ENET_SERDES_CTRL_SDET_0 | |
955 | ENET_SERDES_CTRL_SDET_1 | |
956 | ENET_SERDES_CTRL_SDET_2 | |
957 | ENET_SERDES_CTRL_SDET_3 | |
958 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | |
959 | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | |
960 | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | |
961 | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | |
962 | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | |
963 | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | |
964 | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | |
965 | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); |
966 | test_cfg_val = 0; |
967 | |
968 | if (lp->loopback_mode == LOOPBACK_PHY) { |
969 | test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << |
970 | ENET_SERDES_TEST_MD_0_SHIFT) | |
971 | (ENET_TEST_MD_PAD_LOOPBACK << |
972 | ENET_SERDES_TEST_MD_1_SHIFT) | |
973 | (ENET_TEST_MD_PAD_LOOPBACK << |
974 | ENET_SERDES_TEST_MD_2_SHIFT) | |
975 | (ENET_TEST_MD_PAD_LOOPBACK << |
976 | ENET_SERDES_TEST_MD_3_SHIFT)); |
977 | } |
978 | |
979 | nw64(ENET_SERDES_RESET, reset_val); |
980 | mdelay(20); |
981 | val_rd = nr64(ENET_SERDES_RESET); |
982 | val_rd &= ~reset_val; |
983 | nw64(pll_cfg, val); |
984 | nw64(ctrl_reg, ctrl_val); |
985 | nw64(test_cfg_reg, test_cfg_val); |
986 | nw64(ENET_SERDES_RESET, val_rd); |
987 | mdelay(2000); |
988 | |
989 | /* Initialize all 4 lanes of the SERDES. */ |
990 | for (i = 0; i < 4; i++) { |
991 | u32 rxtx_ctrl, glue0; |
992 | |
993 | err = esr_read_rxtx_ctrl(np, chan: i, val: &rxtx_ctrl); |
994 | if (err) |
995 | return err; |
996 | err = esr_read_glue0(np, chan: i, val: &glue0); |
997 | if (err) |
998 | return err; |
999 | |
1000 | rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); |
1001 | rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | |
1002 | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); |
1003 | |
1004 | glue0 &= ~(ESR_GLUE_CTRL0_SRATE | |
1005 | ESR_GLUE_CTRL0_THCNT | |
1006 | ESR_GLUE_CTRL0_BLTIME); |
1007 | glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | |
1008 | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | |
1009 | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | |
1010 | (BLTIME_300_CYCLES << |
1011 | ESR_GLUE_CTRL0_BLTIME_SHIFT)); |
1012 | |
1013 | err = esr_write_rxtx_ctrl(np, chan: i, val: rxtx_ctrl); |
1014 | if (err) |
1015 | return err; |
1016 | err = esr_write_glue0(np, chan: i, val: glue0); |
1017 | if (err) |
1018 | return err; |
1019 | } |
1020 | |
1021 | |
1022 | sig = nr64(ESR_INT_SIGNALS); |
1023 | switch (np->port) { |
1024 | case 0: |
1025 | val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); |
1026 | mask = val; |
1027 | break; |
1028 | |
1029 | case 1: |
1030 | val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); |
1031 | mask = val; |
1032 | break; |
1033 | |
1034 | default: |
1035 | return -EINVAL; |
1036 | } |
1037 | |
1038 | if ((sig & mask) != val) { |
1039 | netdev_err(dev: np->dev, format: "Port %u signal bits [%08x] are not [%08x]\n" , |
1040 | np->port, (int)(sig & mask), (int)val); |
1041 | return -ENODEV; |
1042 | } |
1043 | |
1044 | return 0; |
1045 | } |
1046 | |
1047 | static int link_status_1g_serdes(struct niu *np, int *link_up_p) |
1048 | { |
1049 | struct niu_link_config *lp = &np->link_config; |
1050 | int link_up; |
1051 | u64 val; |
1052 | u16 current_speed; |
1053 | unsigned long flags; |
1054 | u8 current_duplex; |
1055 | |
1056 | link_up = 0; |
1057 | current_speed = SPEED_INVALID; |
1058 | current_duplex = DUPLEX_INVALID; |
1059 | |
1060 | spin_lock_irqsave(&np->lock, flags); |
1061 | |
1062 | val = nr64_pcs(PCS_MII_STAT); |
1063 | |
1064 | if (val & PCS_MII_STAT_LINK_STATUS) { |
1065 | link_up = 1; |
1066 | current_speed = SPEED_1000; |
1067 | current_duplex = DUPLEX_FULL; |
1068 | } |
1069 | |
1070 | lp->active_speed = current_speed; |
1071 | lp->active_duplex = current_duplex; |
1072 | spin_unlock_irqrestore(lock: &np->lock, flags); |
1073 | |
1074 | *link_up_p = link_up; |
1075 | return 0; |
1076 | } |
1077 | |
1078 | static int link_status_10g_serdes(struct niu *np, int *link_up_p) |
1079 | { |
1080 | unsigned long flags; |
1081 | struct niu_link_config *lp = &np->link_config; |
1082 | int link_up = 0; |
1083 | int link_ok = 1; |
1084 | u64 val, val2; |
1085 | u16 current_speed; |
1086 | u8 current_duplex; |
1087 | |
1088 | if (!(np->flags & NIU_FLAGS_10G)) |
1089 | return link_status_1g_serdes(np, link_up_p); |
1090 | |
1091 | current_speed = SPEED_INVALID; |
1092 | current_duplex = DUPLEX_INVALID; |
1093 | spin_lock_irqsave(&np->lock, flags); |
1094 | |
1095 | val = nr64_xpcs(XPCS_STATUS(0)); |
1096 | val2 = nr64_mac(XMAC_INTER2); |
1097 | if (val2 & 0x01000000) |
1098 | link_ok = 0; |
1099 | |
1100 | if ((val & 0x1000ULL) && link_ok) { |
1101 | link_up = 1; |
1102 | current_speed = SPEED_10000; |
1103 | current_duplex = DUPLEX_FULL; |
1104 | } |
1105 | lp->active_speed = current_speed; |
1106 | lp->active_duplex = current_duplex; |
1107 | spin_unlock_irqrestore(lock: &np->lock, flags); |
1108 | *link_up_p = link_up; |
1109 | return 0; |
1110 | } |
1111 | |
1112 | static int link_status_mii(struct niu *np, int *link_up_p) |
1113 | { |
1114 | struct niu_link_config *lp = &np->link_config; |
1115 | int err; |
1116 | int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; |
1117 | int supported, advertising, active_speed, active_duplex; |
1118 | |
1119 | err = mii_read(np, port: np->phy_addr, MII_BMCR); |
1120 | if (unlikely(err < 0)) |
1121 | return err; |
1122 | bmcr = err; |
1123 | |
1124 | err = mii_read(np, port: np->phy_addr, MII_BMSR); |
1125 | if (unlikely(err < 0)) |
1126 | return err; |
1127 | bmsr = err; |
1128 | |
1129 | err = mii_read(np, port: np->phy_addr, MII_ADVERTISE); |
1130 | if (unlikely(err < 0)) |
1131 | return err; |
1132 | advert = err; |
1133 | |
1134 | err = mii_read(np, port: np->phy_addr, MII_LPA); |
1135 | if (unlikely(err < 0)) |
1136 | return err; |
1137 | lpa = err; |
1138 | |
1139 | if (likely(bmsr & BMSR_ESTATEN)) { |
1140 | err = mii_read(np, port: np->phy_addr, MII_ESTATUS); |
1141 | if (unlikely(err < 0)) |
1142 | return err; |
1143 | estatus = err; |
1144 | |
1145 | err = mii_read(np, port: np->phy_addr, MII_CTRL1000); |
1146 | if (unlikely(err < 0)) |
1147 | return err; |
1148 | ctrl1000 = err; |
1149 | |
1150 | err = mii_read(np, port: np->phy_addr, MII_STAT1000); |
1151 | if (unlikely(err < 0)) |
1152 | return err; |
1153 | stat1000 = err; |
1154 | } else |
1155 | estatus = ctrl1000 = stat1000 = 0; |
1156 | |
1157 | supported = 0; |
1158 | if (bmsr & BMSR_ANEGCAPABLE) |
1159 | supported |= SUPPORTED_Autoneg; |
1160 | if (bmsr & BMSR_10HALF) |
1161 | supported |= SUPPORTED_10baseT_Half; |
1162 | if (bmsr & BMSR_10FULL) |
1163 | supported |= SUPPORTED_10baseT_Full; |
1164 | if (bmsr & BMSR_100HALF) |
1165 | supported |= SUPPORTED_100baseT_Half; |
1166 | if (bmsr & BMSR_100FULL) |
1167 | supported |= SUPPORTED_100baseT_Full; |
1168 | if (estatus & ESTATUS_1000_THALF) |
1169 | supported |= SUPPORTED_1000baseT_Half; |
1170 | if (estatus & ESTATUS_1000_TFULL) |
1171 | supported |= SUPPORTED_1000baseT_Full; |
1172 | lp->supported = supported; |
1173 | |
1174 | advertising = mii_adv_to_ethtool_adv_t(adv: advert); |
1175 | advertising |= mii_ctrl1000_to_ethtool_adv_t(adv: ctrl1000); |
1176 | |
1177 | if (bmcr & BMCR_ANENABLE) { |
1178 | int neg, neg1000; |
1179 | |
1180 | lp->active_autoneg = 1; |
1181 | advertising |= ADVERTISED_Autoneg; |
1182 | |
1183 | neg = advert & lpa; |
1184 | neg1000 = (ctrl1000 << 2) & stat1000; |
1185 | |
1186 | if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) |
1187 | active_speed = SPEED_1000; |
1188 | else if (neg & LPA_100) |
1189 | active_speed = SPEED_100; |
1190 | else if (neg & (LPA_10HALF | LPA_10FULL)) |
1191 | active_speed = SPEED_10; |
1192 | else |
1193 | active_speed = SPEED_INVALID; |
1194 | |
1195 | if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) |
1196 | active_duplex = DUPLEX_FULL; |
1197 | else if (active_speed != SPEED_INVALID) |
1198 | active_duplex = DUPLEX_HALF; |
1199 | else |
1200 | active_duplex = DUPLEX_INVALID; |
1201 | } else { |
1202 | lp->active_autoneg = 0; |
1203 | |
1204 | if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) |
1205 | active_speed = SPEED_1000; |
1206 | else if (bmcr & BMCR_SPEED100) |
1207 | active_speed = SPEED_100; |
1208 | else |
1209 | active_speed = SPEED_10; |
1210 | |
1211 | if (bmcr & BMCR_FULLDPLX) |
1212 | active_duplex = DUPLEX_FULL; |
1213 | else |
1214 | active_duplex = DUPLEX_HALF; |
1215 | } |
1216 | |
1217 | lp->active_advertising = advertising; |
1218 | lp->active_speed = active_speed; |
1219 | lp->active_duplex = active_duplex; |
1220 | *link_up_p = !!(bmsr & BMSR_LSTATUS); |
1221 | |
1222 | return 0; |
1223 | } |
1224 | |
1225 | static int link_status_1g_rgmii(struct niu *np, int *link_up_p) |
1226 | { |
1227 | struct niu_link_config *lp = &np->link_config; |
1228 | u16 current_speed, bmsr; |
1229 | unsigned long flags; |
1230 | u8 current_duplex; |
1231 | int err, link_up; |
1232 | |
1233 | link_up = 0; |
1234 | current_speed = SPEED_INVALID; |
1235 | current_duplex = DUPLEX_INVALID; |
1236 | |
1237 | spin_lock_irqsave(&np->lock, flags); |
1238 | |
1239 | err = mii_read(np, port: np->phy_addr, MII_BMSR); |
1240 | if (err < 0) |
1241 | goto out; |
1242 | |
1243 | bmsr = err; |
1244 | if (bmsr & BMSR_LSTATUS) { |
1245 | link_up = 1; |
1246 | current_speed = SPEED_1000; |
1247 | current_duplex = DUPLEX_FULL; |
1248 | } |
1249 | lp->active_speed = current_speed; |
1250 | lp->active_duplex = current_duplex; |
1251 | err = 0; |
1252 | |
1253 | out: |
1254 | spin_unlock_irqrestore(lock: &np->lock, flags); |
1255 | |
1256 | *link_up_p = link_up; |
1257 | return err; |
1258 | } |
1259 | |
1260 | static int link_status_1g(struct niu *np, int *link_up_p) |
1261 | { |
1262 | struct niu_link_config *lp = &np->link_config; |
1263 | unsigned long flags; |
1264 | int err; |
1265 | |
1266 | spin_lock_irqsave(&np->lock, flags); |
1267 | |
1268 | err = link_status_mii(np, link_up_p); |
1269 | lp->supported |= SUPPORTED_TP; |
1270 | lp->active_advertising |= ADVERTISED_TP; |
1271 | |
1272 | spin_unlock_irqrestore(lock: &np->lock, flags); |
1273 | return err; |
1274 | } |
1275 | |
1276 | static int bcm8704_reset(struct niu *np) |
1277 | { |
1278 | int err, limit; |
1279 | |
1280 | err = mdio_read(np, port: np->phy_addr, |
1281 | BCM8704_PHYXS_DEV_ADDR, MII_BMCR); |
1282 | if (err < 0 || err == 0xffff) |
1283 | return err; |
1284 | err |= BMCR_RESET; |
1285 | err = mdio_write(np, port: np->phy_addr, BCM8704_PHYXS_DEV_ADDR, |
1286 | MII_BMCR, data: err); |
1287 | if (err) |
1288 | return err; |
1289 | |
1290 | limit = 1000; |
1291 | while (--limit >= 0) { |
1292 | err = mdio_read(np, port: np->phy_addr, |
1293 | BCM8704_PHYXS_DEV_ADDR, MII_BMCR); |
1294 | if (err < 0) |
1295 | return err; |
1296 | if (!(err & BMCR_RESET)) |
1297 | break; |
1298 | } |
1299 | if (limit < 0) { |
1300 | netdev_err(dev: np->dev, format: "Port %u PHY will not reset (bmcr=%04x)\n" , |
1301 | np->port, (err & 0xffff)); |
1302 | return -ENODEV; |
1303 | } |
1304 | return 0; |
1305 | } |
1306 | |
1307 | /* When written, certain PHY registers need to be read back twice |
1308 | * in order for the bits to settle properly. |
1309 | */ |
1310 | static int bcm8704_user_dev3_readback(struct niu *np, int reg) |
1311 | { |
1312 | int err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); |
1313 | if (err < 0) |
1314 | return err; |
1315 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); |
1316 | if (err < 0) |
1317 | return err; |
1318 | return 0; |
1319 | } |
1320 | |
1321 | static int bcm8706_init_user_dev3(struct niu *np) |
1322 | { |
1323 | int err; |
1324 | |
1325 | |
1326 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1327 | BCM8704_USER_OPT_DIGITAL_CTRL); |
1328 | if (err < 0) |
1329 | return err; |
1330 | err &= ~USER_ODIG_CTRL_GPIOS; |
1331 | err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); |
1332 | err |= USER_ODIG_CTRL_RESV2; |
1333 | err = mdio_write(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1334 | BCM8704_USER_OPT_DIGITAL_CTRL, data: err); |
1335 | if (err) |
1336 | return err; |
1337 | |
1338 | mdelay(1000); |
1339 | |
1340 | return 0; |
1341 | } |
1342 | |
1343 | static int bcm8704_init_user_dev3(struct niu *np) |
1344 | { |
1345 | int err; |
1346 | |
1347 | err = mdio_write(np, port: np->phy_addr, |
1348 | BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, |
1349 | data: (USER_CONTROL_OPTXRST_LVL | |
1350 | USER_CONTROL_OPBIASFLT_LVL | |
1351 | USER_CONTROL_OBTMPFLT_LVL | |
1352 | USER_CONTROL_OPPRFLT_LVL | |
1353 | USER_CONTROL_OPTXFLT_LVL | |
1354 | USER_CONTROL_OPRXLOS_LVL | |
1355 | USER_CONTROL_OPRXFLT_LVL | |
1356 | USER_CONTROL_OPTXON_LVL | |
1357 | (0x3f << USER_CONTROL_RES1_SHIFT))); |
1358 | if (err) |
1359 | return err; |
1360 | |
1361 | err = mdio_write(np, port: np->phy_addr, |
1362 | BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, |
1363 | data: (USER_PMD_TX_CTL_XFP_CLKEN | |
1364 | (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | |
1365 | (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | |
1366 | USER_PMD_TX_CTL_TSCK_LPWREN)); |
1367 | if (err) |
1368 | return err; |
1369 | |
1370 | err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); |
1371 | if (err) |
1372 | return err; |
1373 | err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); |
1374 | if (err) |
1375 | return err; |
1376 | |
1377 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1378 | BCM8704_USER_OPT_DIGITAL_CTRL); |
1379 | if (err < 0) |
1380 | return err; |
1381 | err &= ~USER_ODIG_CTRL_GPIOS; |
1382 | err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); |
1383 | err = mdio_write(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1384 | BCM8704_USER_OPT_DIGITAL_CTRL, data: err); |
1385 | if (err) |
1386 | return err; |
1387 | |
1388 | mdelay(1000); |
1389 | |
1390 | return 0; |
1391 | } |
1392 | |
1393 | static int mrvl88x2011_act_led(struct niu *np, int val) |
1394 | { |
1395 | int err; |
1396 | |
1397 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, |
1398 | MRVL88X2011_LED_8_TO_11_CTL); |
1399 | if (err < 0) |
1400 | return err; |
1401 | |
1402 | err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); |
1403 | err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); |
1404 | |
1405 | return mdio_write(np, port: np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, |
1406 | MRVL88X2011_LED_8_TO_11_CTL, data: err); |
1407 | } |
1408 | |
1409 | static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) |
1410 | { |
1411 | int err; |
1412 | |
1413 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, |
1414 | MRVL88X2011_LED_BLINK_CTL); |
1415 | if (err >= 0) { |
1416 | err &= ~MRVL88X2011_LED_BLKRATE_MASK; |
1417 | err |= (rate << 4); |
1418 | |
1419 | err = mdio_write(np, port: np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, |
1420 | MRVL88X2011_LED_BLINK_CTL, data: err); |
1421 | } |
1422 | |
1423 | return err; |
1424 | } |
1425 | |
1426 | static int xcvr_init_10g_mrvl88x2011(struct niu *np) |
1427 | { |
1428 | int err; |
1429 | |
1430 | /* Set LED functions */ |
1431 | err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); |
1432 | if (err) |
1433 | return err; |
1434 | |
1435 | /* led activity */ |
1436 | err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); |
1437 | if (err) |
1438 | return err; |
1439 | |
1440 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, |
1441 | MRVL88X2011_GENERAL_CTL); |
1442 | if (err < 0) |
1443 | return err; |
1444 | |
1445 | err |= MRVL88X2011_ENA_XFPREFCLK; |
1446 | |
1447 | err = mdio_write(np, port: np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, |
1448 | MRVL88X2011_GENERAL_CTL, data: err); |
1449 | if (err < 0) |
1450 | return err; |
1451 | |
1452 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, |
1453 | MRVL88X2011_PMA_PMD_CTL_1); |
1454 | if (err < 0) |
1455 | return err; |
1456 | |
1457 | if (np->link_config.loopback_mode == LOOPBACK_MAC) |
1458 | err |= MRVL88X2011_LOOPBACK; |
1459 | else |
1460 | err &= ~MRVL88X2011_LOOPBACK; |
1461 | |
1462 | err = mdio_write(np, port: np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, |
1463 | MRVL88X2011_PMA_PMD_CTL_1, data: err); |
1464 | if (err < 0) |
1465 | return err; |
1466 | |
1467 | /* Enable PMD */ |
1468 | return mdio_write(np, port: np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, |
1469 | MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); |
1470 | } |
1471 | |
1472 | |
1473 | static int xcvr_diag_bcm870x(struct niu *np) |
1474 | { |
1475 | u16 analog_stat0, tx_alarm_status; |
1476 | int err = 0; |
1477 | |
1478 | #if 1 |
1479 | err = mdio_read(np, port: np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, |
1480 | MII_STAT1000); |
1481 | if (err < 0) |
1482 | return err; |
1483 | pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n" , np->port, err); |
1484 | |
1485 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, reg: 0x20); |
1486 | if (err < 0) |
1487 | return err; |
1488 | pr_info("Port %u USER_DEV3(0x20) [%04x]\n" , np->port, err); |
1489 | |
1490 | err = mdio_read(np, port: np->phy_addr, BCM8704_PHYXS_DEV_ADDR, |
1491 | MII_NWAYTEST); |
1492 | if (err < 0) |
1493 | return err; |
1494 | pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n" , np->port, err); |
1495 | #endif |
1496 | |
1497 | /* XXX dig this out it might not be so useful XXX */ |
1498 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1499 | BCM8704_USER_ANALOG_STATUS0); |
1500 | if (err < 0) |
1501 | return err; |
1502 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1503 | BCM8704_USER_ANALOG_STATUS0); |
1504 | if (err < 0) |
1505 | return err; |
1506 | analog_stat0 = err; |
1507 | |
1508 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1509 | BCM8704_USER_TX_ALARM_STATUS); |
1510 | if (err < 0) |
1511 | return err; |
1512 | err = mdio_read(np, port: np->phy_addr, BCM8704_USER_DEV3_ADDR, |
1513 | BCM8704_USER_TX_ALARM_STATUS); |
1514 | if (err < 0) |
1515 | return err; |
1516 | tx_alarm_status = err; |
1517 | |
1518 | if (analog_stat0 != 0x03fc) { |
1519 | if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { |
1520 | pr_info("Port %u cable not connected or bad cable\n" , |
1521 | np->port); |
1522 | } else if (analog_stat0 == 0x639c) { |
1523 | pr_info("Port %u optical module is bad or missing\n" , |
1524 | np->port); |
1525 | } |
1526 | } |
1527 | |
1528 | return 0; |
1529 | } |
1530 | |
1531 | static int xcvr_10g_set_lb_bcm870x(struct niu *np) |
1532 | { |
1533 | struct niu_link_config *lp = &np->link_config; |
1534 | int err; |
1535 | |
1536 | err = mdio_read(np, port: np->phy_addr, BCM8704_PCS_DEV_ADDR, |
1537 | MII_BMCR); |
1538 | if (err < 0) |
1539 | return err; |
1540 | |
1541 | err &= ~BMCR_LOOPBACK; |
1542 | |
1543 | if (lp->loopback_mode == LOOPBACK_MAC) |
1544 | err |= BMCR_LOOPBACK; |
1545 | |
1546 | err = mdio_write(np, port: np->phy_addr, BCM8704_PCS_DEV_ADDR, |
1547 | MII_BMCR, data: err); |
1548 | if (err) |
1549 | return err; |
1550 | |
1551 | return 0; |
1552 | } |
1553 | |
1554 | static int xcvr_init_10g_bcm8706(struct niu *np) |
1555 | { |
1556 | int err = 0; |
1557 | u64 val; |
1558 | |
1559 | if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && |
1560 | (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) |
1561 | return err; |
1562 | |
1563 | val = nr64_mac(XMAC_CONFIG); |
1564 | val &= ~XMAC_CONFIG_LED_POLARITY; |
1565 | val |= XMAC_CONFIG_FORCE_LED_ON; |
1566 | nw64_mac(XMAC_CONFIG, val); |
1567 | |
1568 | val = nr64(MIF_CONFIG); |
1569 | val |= MIF_CONFIG_INDIRECT_MODE; |
1570 | nw64(MIF_CONFIG, val); |
1571 | |
1572 | err = bcm8704_reset(np); |
1573 | if (err) |
1574 | return err; |
1575 | |
1576 | err = xcvr_10g_set_lb_bcm870x(np); |
1577 | if (err) |
1578 | return err; |
1579 | |
1580 | err = bcm8706_init_user_dev3(np); |
1581 | if (err) |
1582 | return err; |
1583 | |
1584 | err = xcvr_diag_bcm870x(np); |
1585 | if (err) |
1586 | return err; |
1587 | |
1588 | return 0; |
1589 | } |
1590 | |
1591 | static int xcvr_init_10g_bcm8704(struct niu *np) |
1592 | { |
1593 | int err; |
1594 | |
1595 | err = bcm8704_reset(np); |
1596 | if (err) |
1597 | return err; |
1598 | |
1599 | err = bcm8704_init_user_dev3(np); |
1600 | if (err) |
1601 | return err; |
1602 | |
1603 | err = xcvr_10g_set_lb_bcm870x(np); |
1604 | if (err) |
1605 | return err; |
1606 | |
1607 | err = xcvr_diag_bcm870x(np); |
1608 | if (err) |
1609 | return err; |
1610 | |
1611 | return 0; |
1612 | } |
1613 | |
1614 | static int xcvr_init_10g(struct niu *np) |
1615 | { |
1616 | int phy_id, err; |
1617 | u64 val; |
1618 | |
1619 | val = nr64_mac(XMAC_CONFIG); |
1620 | val &= ~XMAC_CONFIG_LED_POLARITY; |
1621 | val |= XMAC_CONFIG_FORCE_LED_ON; |
1622 | nw64_mac(XMAC_CONFIG, val); |
1623 | |
1624 | /* XXX shared resource, lock parent XXX */ |
1625 | val = nr64(MIF_CONFIG); |
1626 | val |= MIF_CONFIG_INDIRECT_MODE; |
1627 | nw64(MIF_CONFIG, val); |
1628 | |
1629 | phy_id = phy_decode(val: np->parent->port_phy, port: np->port); |
1630 | phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; |
1631 | |
1632 | /* handle different phy types */ |
1633 | switch (phy_id & NIU_PHY_ID_MASK) { |
1634 | case NIU_PHY_ID_MRVL88X2011: |
1635 | err = xcvr_init_10g_mrvl88x2011(np); |
1636 | break; |
1637 | |
1638 | default: /* bcom 8704 */ |
1639 | err = xcvr_init_10g_bcm8704(np); |
1640 | break; |
1641 | } |
1642 | |
1643 | return err; |
1644 | } |
1645 | |
1646 | static int mii_reset(struct niu *np) |
1647 | { |
1648 | int limit, err; |
1649 | |
1650 | err = mii_write(np, port: np->phy_addr, MII_BMCR, BMCR_RESET); |
1651 | if (err) |
1652 | return err; |
1653 | |
1654 | limit = 1000; |
1655 | while (--limit >= 0) { |
1656 | udelay(500); |
1657 | err = mii_read(np, port: np->phy_addr, MII_BMCR); |
1658 | if (err < 0) |
1659 | return err; |
1660 | if (!(err & BMCR_RESET)) |
1661 | break; |
1662 | } |
1663 | if (limit < 0) { |
1664 | netdev_err(dev: np->dev, format: "Port %u MII would not reset, bmcr[%04x]\n" , |
1665 | np->port, err); |
1666 | return -ENODEV; |
1667 | } |
1668 | |
1669 | return 0; |
1670 | } |
1671 | |
1672 | static int xcvr_init_1g_rgmii(struct niu *np) |
1673 | { |
1674 | int err; |
1675 | u64 val; |
1676 | u16 bmcr, bmsr, estat; |
1677 | |
1678 | val = nr64(MIF_CONFIG); |
1679 | val &= ~MIF_CONFIG_INDIRECT_MODE; |
1680 | nw64(MIF_CONFIG, val); |
1681 | |
1682 | err = mii_reset(np); |
1683 | if (err) |
1684 | return err; |
1685 | |
1686 | err = mii_read(np, port: np->phy_addr, MII_BMSR); |
1687 | if (err < 0) |
1688 | return err; |
1689 | bmsr = err; |
1690 | |
1691 | estat = 0; |
1692 | if (bmsr & BMSR_ESTATEN) { |
1693 | err = mii_read(np, port: np->phy_addr, MII_ESTATUS); |
1694 | if (err < 0) |
1695 | return err; |
1696 | estat = err; |
1697 | } |
1698 | |
1699 | bmcr = 0; |
1700 | err = mii_write(np, port: np->phy_addr, MII_BMCR, data: bmcr); |
1701 | if (err) |
1702 | return err; |
1703 | |
1704 | if (bmsr & BMSR_ESTATEN) { |
1705 | u16 ctrl1000 = 0; |
1706 | |
1707 | if (estat & ESTATUS_1000_TFULL) |
1708 | ctrl1000 |= ADVERTISE_1000FULL; |
1709 | err = mii_write(np, port: np->phy_addr, MII_CTRL1000, data: ctrl1000); |
1710 | if (err) |
1711 | return err; |
1712 | } |
1713 | |
1714 | bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); |
1715 | |
1716 | err = mii_write(np, port: np->phy_addr, MII_BMCR, data: bmcr); |
1717 | if (err) |
1718 | return err; |
1719 | |
1720 | err = mii_read(np, port: np->phy_addr, MII_BMCR); |
1721 | if (err < 0) |
1722 | return err; |
1723 | bmcr = mii_read(np, port: np->phy_addr, MII_BMCR); |
1724 | |
1725 | err = mii_read(np, port: np->phy_addr, MII_BMSR); |
1726 | if (err < 0) |
1727 | return err; |
1728 | |
1729 | return 0; |
1730 | } |
1731 | |
1732 | static int mii_init_common(struct niu *np) |
1733 | { |
1734 | struct niu_link_config *lp = &np->link_config; |
1735 | u16 bmcr, bmsr, adv, estat; |
1736 | int err; |
1737 | |
1738 | err = mii_reset(np); |
1739 | if (err) |
1740 | return err; |
1741 | |
1742 | err = mii_read(np, port: np->phy_addr, MII_BMSR); |
1743 | if (err < 0) |
1744 | return err; |
1745 | bmsr = err; |
1746 | |
1747 | estat = 0; |
1748 | if (bmsr & BMSR_ESTATEN) { |
1749 | err = mii_read(np, port: np->phy_addr, MII_ESTATUS); |
1750 | if (err < 0) |
1751 | return err; |
1752 | estat = err; |
1753 | } |
1754 | |
1755 | bmcr = 0; |
1756 | err = mii_write(np, port: np->phy_addr, MII_BMCR, data: bmcr); |
1757 | if (err) |
1758 | return err; |
1759 | |
1760 | if (lp->loopback_mode == LOOPBACK_MAC) { |
1761 | bmcr |= BMCR_LOOPBACK; |
1762 | if (lp->active_speed == SPEED_1000) |
1763 | bmcr |= BMCR_SPEED1000; |
1764 | if (lp->active_duplex == DUPLEX_FULL) |
1765 | bmcr |= BMCR_FULLDPLX; |
1766 | } |
1767 | |
1768 | if (lp->loopback_mode == LOOPBACK_PHY) { |
1769 | u16 aux; |
1770 | |
1771 | aux = (BCM5464R_AUX_CTL_EXT_LB | |
1772 | BCM5464R_AUX_CTL_WRITE_1); |
1773 | err = mii_write(np, port: np->phy_addr, BCM5464R_AUX_CTL, data: aux); |
1774 | if (err) |
1775 | return err; |
1776 | } |
1777 | |
1778 | if (lp->autoneg) { |
1779 | u16 ctrl1000; |
1780 | |
1781 | adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; |
1782 | if ((bmsr & BMSR_10HALF) && |
1783 | (lp->advertising & ADVERTISED_10baseT_Half)) |
1784 | adv |= ADVERTISE_10HALF; |
1785 | if ((bmsr & BMSR_10FULL) && |
1786 | (lp->advertising & ADVERTISED_10baseT_Full)) |
1787 | adv |= ADVERTISE_10FULL; |
1788 | if ((bmsr & BMSR_100HALF) && |
1789 | (lp->advertising & ADVERTISED_100baseT_Half)) |
1790 | adv |= ADVERTISE_100HALF; |
1791 | if ((bmsr & BMSR_100FULL) && |
1792 | (lp->advertising & ADVERTISED_100baseT_Full)) |
1793 | adv |= ADVERTISE_100FULL; |
1794 | err = mii_write(np, port: np->phy_addr, MII_ADVERTISE, data: adv); |
1795 | if (err) |
1796 | return err; |
1797 | |
1798 | if (likely(bmsr & BMSR_ESTATEN)) { |
1799 | ctrl1000 = 0; |
1800 | if ((estat & ESTATUS_1000_THALF) && |
1801 | (lp->advertising & ADVERTISED_1000baseT_Half)) |
1802 | ctrl1000 |= ADVERTISE_1000HALF; |
1803 | if ((estat & ESTATUS_1000_TFULL) && |
1804 | (lp->advertising & ADVERTISED_1000baseT_Full)) |
1805 | ctrl1000 |= ADVERTISE_1000FULL; |
1806 | err = mii_write(np, port: np->phy_addr, |
1807 | MII_CTRL1000, data: ctrl1000); |
1808 | if (err) |
1809 | return err; |
1810 | } |
1811 | |
1812 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
1813 | } else { |
1814 | /* !lp->autoneg */ |
1815 | int fulldpx; |
1816 | |
1817 | if (lp->duplex == DUPLEX_FULL) { |
1818 | bmcr |= BMCR_FULLDPLX; |
1819 | fulldpx = 1; |
1820 | } else if (lp->duplex == DUPLEX_HALF) |
1821 | fulldpx = 0; |
1822 | else |
1823 | return -EINVAL; |
1824 | |
1825 | if (lp->speed == SPEED_1000) { |
1826 | /* if X-full requested while not supported, or |
1827 | X-half requested while not supported... */ |
1828 | if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || |
1829 | (!fulldpx && !(estat & ESTATUS_1000_THALF))) |
1830 | return -EINVAL; |
1831 | bmcr |= BMCR_SPEED1000; |
1832 | } else if (lp->speed == SPEED_100) { |
1833 | if ((fulldpx && !(bmsr & BMSR_100FULL)) || |
1834 | (!fulldpx && !(bmsr & BMSR_100HALF))) |
1835 | return -EINVAL; |
1836 | bmcr |= BMCR_SPEED100; |
1837 | } else if (lp->speed == SPEED_10) { |
1838 | if ((fulldpx && !(bmsr & BMSR_10FULL)) || |
1839 | (!fulldpx && !(bmsr & BMSR_10HALF))) |
1840 | return -EINVAL; |
1841 | } else |
1842 | return -EINVAL; |
1843 | } |
1844 | |
1845 | err = mii_write(np, port: np->phy_addr, MII_BMCR, data: bmcr); |
1846 | if (err) |
1847 | return err; |
1848 | |
1849 | #if 0 |
1850 | err = mii_read(np, np->phy_addr, MII_BMCR); |
1851 | if (err < 0) |
1852 | return err; |
1853 | bmcr = err; |
1854 | |
1855 | err = mii_read(np, np->phy_addr, MII_BMSR); |
1856 | if (err < 0) |
1857 | return err; |
1858 | bmsr = err; |
1859 | |
1860 | pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n" , |
1861 | np->port, bmcr, bmsr); |
1862 | #endif |
1863 | |
1864 | return 0; |
1865 | } |
1866 | |
1867 | static int xcvr_init_1g(struct niu *np) |
1868 | { |
1869 | u64 val; |
1870 | |
1871 | /* XXX shared resource, lock parent XXX */ |
1872 | val = nr64(MIF_CONFIG); |
1873 | val &= ~MIF_CONFIG_INDIRECT_MODE; |
1874 | nw64(MIF_CONFIG, val); |
1875 | |
1876 | return mii_init_common(np); |
1877 | } |
1878 | |
1879 | static int niu_xcvr_init(struct niu *np) |
1880 | { |
1881 | const struct niu_phy_ops *ops = np->phy_ops; |
1882 | int err; |
1883 | |
1884 | err = 0; |
1885 | if (ops->xcvr_init) |
1886 | err = ops->xcvr_init(np); |
1887 | |
1888 | return err; |
1889 | } |
1890 | |
1891 | static int niu_serdes_init(struct niu *np) |
1892 | { |
1893 | const struct niu_phy_ops *ops = np->phy_ops; |
1894 | int err; |
1895 | |
1896 | err = 0; |
1897 | if (ops->serdes_init) |
1898 | err = ops->serdes_init(np); |
1899 | |
1900 | return err; |
1901 | } |
1902 | |
1903 | static void niu_init_xif(struct niu *); |
1904 | static void niu_handle_led(struct niu *, int status); |
1905 | |
1906 | static int niu_link_status_common(struct niu *np, int link_up) |
1907 | { |
1908 | struct niu_link_config *lp = &np->link_config; |
1909 | struct net_device *dev = np->dev; |
1910 | unsigned long flags; |
1911 | |
1912 | if (!netif_carrier_ok(dev) && link_up) { |
1913 | netif_info(np, link, dev, "Link is up at %s, %s duplex\n" , |
1914 | lp->active_speed == SPEED_10000 ? "10Gb/sec" : |
1915 | lp->active_speed == SPEED_1000 ? "1Gb/sec" : |
1916 | lp->active_speed == SPEED_100 ? "100Mbit/sec" : |
1917 | "10Mbit/sec" , |
1918 | lp->active_duplex == DUPLEX_FULL ? "full" : "half" ); |
1919 | |
1920 | spin_lock_irqsave(&np->lock, flags); |
1921 | niu_init_xif(np); |
1922 | niu_handle_led(np, status: 1); |
1923 | spin_unlock_irqrestore(lock: &np->lock, flags); |
1924 | |
1925 | netif_carrier_on(dev); |
1926 | } else if (netif_carrier_ok(dev) && !link_up) { |
1927 | netif_warn(np, link, dev, "Link is down\n" ); |
1928 | spin_lock_irqsave(&np->lock, flags); |
1929 | niu_handle_led(np, status: 0); |
1930 | spin_unlock_irqrestore(lock: &np->lock, flags); |
1931 | netif_carrier_off(dev); |
1932 | } |
1933 | |
1934 | return 0; |
1935 | } |
1936 | |
1937 | static int link_status_10g_mrvl(struct niu *np, int *link_up_p) |
1938 | { |
1939 | int err, link_up, pma_status, pcs_status; |
1940 | |
1941 | link_up = 0; |
1942 | |
1943 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, |
1944 | MRVL88X2011_10G_PMD_STATUS_2); |
1945 | if (err < 0) |
1946 | goto out; |
1947 | |
1948 | /* Check PMA/PMD Register: 1.0001.2 == 1 */ |
1949 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, |
1950 | MRVL88X2011_PMA_PMD_STATUS_1); |
1951 | if (err < 0) |
1952 | goto out; |
1953 | |
1954 | pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); |
1955 | |
1956 | /* Check PMC Register : 3.0001.2 == 1: read twice */ |
1957 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, |
1958 | MRVL88X2011_PMA_PMD_STATUS_1); |
1959 | if (err < 0) |
1960 | goto out; |
1961 | |
1962 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, |
1963 | MRVL88X2011_PMA_PMD_STATUS_1); |
1964 | if (err < 0) |
1965 | goto out; |
1966 | |
1967 | pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); |
1968 | |
1969 | /* Check XGXS Register : 4.0018.[0-3,12] */ |
1970 | err = mdio_read(np, port: np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, |
1971 | MRVL88X2011_10G_XGXS_LANE_STAT); |
1972 | if (err < 0) |
1973 | goto out; |
1974 | |
1975 | if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | |
1976 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | |
1977 | PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | |
1978 | 0x800)) |
1979 | link_up = (pma_status && pcs_status) ? 1 : 0; |
1980 | |
1981 | np->link_config.active_speed = SPEED_10000; |
1982 | np->link_config.active_duplex = DUPLEX_FULL; |
1983 | err = 0; |
1984 | out: |
1985 | mrvl88x2011_act_led(np, val: (link_up ? |
1986 | MRVL88X2011_LED_CTL_PCS_ACT : |
1987 | MRVL88X2011_LED_CTL_OFF)); |
1988 | |
1989 | *link_up_p = link_up; |
1990 | return err; |
1991 | } |
1992 | |
1993 | static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) |
1994 | { |
1995 | int err, link_up; |
1996 | link_up = 0; |
1997 | |
1998 | err = mdio_read(np, port: np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, |
1999 | BCM8704_PMD_RCV_SIGDET); |
2000 | if (err < 0 || err == 0xffff) |
2001 | goto out; |
2002 | if (!(err & PMD_RCV_SIGDET_GLOBAL)) { |
2003 | err = 0; |
2004 | goto out; |
2005 | } |
2006 | |
2007 | err = mdio_read(np, port: np->phy_addr, BCM8704_PCS_DEV_ADDR, |
2008 | BCM8704_PCS_10G_R_STATUS); |
2009 | if (err < 0) |
2010 | goto out; |
2011 | |
2012 | if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { |
2013 | err = 0; |
2014 | goto out; |
2015 | } |
2016 | |
2017 | err = mdio_read(np, port: np->phy_addr, BCM8704_PHYXS_DEV_ADDR, |
2018 | BCM8704_PHYXS_XGXS_LANE_STAT); |
2019 | if (err < 0) |
2020 | goto out; |
2021 | if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | |
2022 | PHYXS_XGXS_LANE_STAT_MAGIC | |
2023 | PHYXS_XGXS_LANE_STAT_PATTEST | |
2024 | PHYXS_XGXS_LANE_STAT_LANE3 | |
2025 | PHYXS_XGXS_LANE_STAT_LANE2 | |
2026 | PHYXS_XGXS_LANE_STAT_LANE1 | |
2027 | PHYXS_XGXS_LANE_STAT_LANE0)) { |
2028 | err = 0; |
2029 | np->link_config.active_speed = SPEED_INVALID; |
2030 | np->link_config.active_duplex = DUPLEX_INVALID; |
2031 | goto out; |
2032 | } |
2033 | |
2034 | link_up = 1; |
2035 | np->link_config.active_speed = SPEED_10000; |
2036 | np->link_config.active_duplex = DUPLEX_FULL; |
2037 | err = 0; |
2038 | |
2039 | out: |
2040 | *link_up_p = link_up; |
2041 | return err; |
2042 | } |
2043 | |
2044 | static int link_status_10g_bcom(struct niu *np, int *link_up_p) |
2045 | { |
2046 | int err, link_up; |
2047 | |
2048 | link_up = 0; |
2049 | |
2050 | err = mdio_read(np, port: np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, |
2051 | BCM8704_PMD_RCV_SIGDET); |
2052 | if (err < 0) |
2053 | goto out; |
2054 | if (!(err & PMD_RCV_SIGDET_GLOBAL)) { |
2055 | err = 0; |
2056 | goto out; |
2057 | } |
2058 | |
2059 | err = mdio_read(np, port: np->phy_addr, BCM8704_PCS_DEV_ADDR, |
2060 | BCM8704_PCS_10G_R_STATUS); |
2061 | if (err < 0) |
2062 | goto out; |
2063 | if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { |
2064 | err = 0; |
2065 | goto out; |
2066 | } |
2067 | |
2068 | err = mdio_read(np, port: np->phy_addr, BCM8704_PHYXS_DEV_ADDR, |
2069 | BCM8704_PHYXS_XGXS_LANE_STAT); |
2070 | if (err < 0) |
2071 | goto out; |
2072 | |
2073 | if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | |
2074 | PHYXS_XGXS_LANE_STAT_MAGIC | |
2075 | PHYXS_XGXS_LANE_STAT_LANE3 | |
2076 | PHYXS_XGXS_LANE_STAT_LANE2 | |
2077 | PHYXS_XGXS_LANE_STAT_LANE1 | |
2078 | PHYXS_XGXS_LANE_STAT_LANE0)) { |
2079 | err = 0; |
2080 | goto out; |
2081 | } |
2082 | |
2083 | link_up = 1; |
2084 | np->link_config.active_speed = SPEED_10000; |
2085 | np->link_config.active_duplex = DUPLEX_FULL; |
2086 | err = 0; |
2087 | |
2088 | out: |
2089 | *link_up_p = link_up; |
2090 | return err; |
2091 | } |
2092 | |
2093 | static int link_status_10g(struct niu *np, int *link_up_p) |
2094 | { |
2095 | unsigned long flags; |
2096 | int err = -EINVAL; |
2097 | |
2098 | spin_lock_irqsave(&np->lock, flags); |
2099 | |
2100 | if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { |
2101 | int phy_id; |
2102 | |
2103 | phy_id = phy_decode(val: np->parent->port_phy, port: np->port); |
2104 | phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; |
2105 | |
2106 | /* handle different phy types */ |
2107 | switch (phy_id & NIU_PHY_ID_MASK) { |
2108 | case NIU_PHY_ID_MRVL88X2011: |
2109 | err = link_status_10g_mrvl(np, link_up_p); |
2110 | break; |
2111 | |
2112 | default: /* bcom 8704 */ |
2113 | err = link_status_10g_bcom(np, link_up_p); |
2114 | break; |
2115 | } |
2116 | } |
2117 | |
2118 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2119 | |
2120 | return err; |
2121 | } |
2122 | |
2123 | static int niu_10g_phy_present(struct niu *np) |
2124 | { |
2125 | u64 sig, mask, val; |
2126 | |
2127 | sig = nr64(ESR_INT_SIGNALS); |
2128 | switch (np->port) { |
2129 | case 0: |
2130 | mask = ESR_INT_SIGNALS_P0_BITS; |
2131 | val = (ESR_INT_SRDY0_P0 | |
2132 | ESR_INT_DET0_P0 | |
2133 | ESR_INT_XSRDY_P0 | |
2134 | ESR_INT_XDP_P0_CH3 | |
2135 | ESR_INT_XDP_P0_CH2 | |
2136 | ESR_INT_XDP_P0_CH1 | |
2137 | ESR_INT_XDP_P0_CH0); |
2138 | break; |
2139 | |
2140 | case 1: |
2141 | mask = ESR_INT_SIGNALS_P1_BITS; |
2142 | val = (ESR_INT_SRDY0_P1 | |
2143 | ESR_INT_DET0_P1 | |
2144 | ESR_INT_XSRDY_P1 | |
2145 | ESR_INT_XDP_P1_CH3 | |
2146 | ESR_INT_XDP_P1_CH2 | |
2147 | ESR_INT_XDP_P1_CH1 | |
2148 | ESR_INT_XDP_P1_CH0); |
2149 | break; |
2150 | |
2151 | default: |
2152 | return 0; |
2153 | } |
2154 | |
2155 | if ((sig & mask) != val) |
2156 | return 0; |
2157 | return 1; |
2158 | } |
2159 | |
2160 | static int link_status_10g_hotplug(struct niu *np, int *link_up_p) |
2161 | { |
2162 | unsigned long flags; |
2163 | int err = 0; |
2164 | int phy_present; |
2165 | int phy_present_prev; |
2166 | |
2167 | spin_lock_irqsave(&np->lock, flags); |
2168 | |
2169 | if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { |
2170 | phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? |
2171 | 1 : 0; |
2172 | phy_present = niu_10g_phy_present(np); |
2173 | if (phy_present != phy_present_prev) { |
2174 | /* state change */ |
2175 | if (phy_present) { |
2176 | /* A NEM was just plugged in */ |
2177 | np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; |
2178 | if (np->phy_ops->xcvr_init) |
2179 | err = np->phy_ops->xcvr_init(np); |
2180 | if (err) { |
2181 | err = mdio_read(np, port: np->phy_addr, |
2182 | BCM8704_PHYXS_DEV_ADDR, MII_BMCR); |
2183 | if (err == 0xffff) { |
2184 | /* No mdio, back-to-back XAUI */ |
2185 | goto out; |
2186 | } |
2187 | /* debounce */ |
2188 | np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; |
2189 | } |
2190 | } else { |
2191 | np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; |
2192 | *link_up_p = 0; |
2193 | netif_warn(np, link, np->dev, |
2194 | "Hotplug PHY Removed\n" ); |
2195 | } |
2196 | } |
2197 | out: |
2198 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { |
2199 | err = link_status_10g_bcm8706(np, link_up_p); |
2200 | if (err == 0xffff) { |
2201 | /* No mdio, back-to-back XAUI: it is C10NEM */ |
2202 | *link_up_p = 1; |
2203 | np->link_config.active_speed = SPEED_10000; |
2204 | np->link_config.active_duplex = DUPLEX_FULL; |
2205 | } |
2206 | } |
2207 | } |
2208 | |
2209 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2210 | |
2211 | return 0; |
2212 | } |
2213 | |
2214 | static int niu_link_status(struct niu *np, int *link_up_p) |
2215 | { |
2216 | const struct niu_phy_ops *ops = np->phy_ops; |
2217 | int err; |
2218 | |
2219 | err = 0; |
2220 | if (ops->link_status) |
2221 | err = ops->link_status(np, link_up_p); |
2222 | |
2223 | return err; |
2224 | } |
2225 | |
2226 | static void niu_timer(struct timer_list *t) |
2227 | { |
2228 | struct niu *np = from_timer(np, t, timer); |
2229 | unsigned long off; |
2230 | int err, link_up; |
2231 | |
2232 | err = niu_link_status(np, link_up_p: &link_up); |
2233 | if (!err) |
2234 | niu_link_status_common(np, link_up); |
2235 | |
2236 | if (netif_carrier_ok(dev: np->dev)) |
2237 | off = 5 * HZ; |
2238 | else |
2239 | off = 1 * HZ; |
2240 | np->timer.expires = jiffies + off; |
2241 | |
2242 | add_timer(timer: &np->timer); |
2243 | } |
2244 | |
2245 | static const struct niu_phy_ops phy_ops_10g_serdes = { |
2246 | .serdes_init = serdes_init_10g_serdes, |
2247 | .link_status = link_status_10g_serdes, |
2248 | }; |
2249 | |
2250 | static const struct niu_phy_ops phy_ops_10g_serdes_niu = { |
2251 | .serdes_init = serdes_init_niu_10g_serdes, |
2252 | .link_status = link_status_10g_serdes, |
2253 | }; |
2254 | |
2255 | static const struct niu_phy_ops phy_ops_1g_serdes_niu = { |
2256 | .serdes_init = serdes_init_niu_1g_serdes, |
2257 | .link_status = link_status_1g_serdes, |
2258 | }; |
2259 | |
2260 | static const struct niu_phy_ops phy_ops_1g_rgmii = { |
2261 | .xcvr_init = xcvr_init_1g_rgmii, |
2262 | .link_status = link_status_1g_rgmii, |
2263 | }; |
2264 | |
2265 | static const struct niu_phy_ops phy_ops_10g_fiber_niu = { |
2266 | .serdes_init = serdes_init_niu_10g_fiber, |
2267 | .xcvr_init = xcvr_init_10g, |
2268 | .link_status = link_status_10g, |
2269 | }; |
2270 | |
2271 | static const struct niu_phy_ops phy_ops_10g_fiber = { |
2272 | .serdes_init = serdes_init_10g, |
2273 | .xcvr_init = xcvr_init_10g, |
2274 | .link_status = link_status_10g, |
2275 | }; |
2276 | |
2277 | static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { |
2278 | .serdes_init = serdes_init_10g, |
2279 | .xcvr_init = xcvr_init_10g_bcm8706, |
2280 | .link_status = link_status_10g_hotplug, |
2281 | }; |
2282 | |
2283 | static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { |
2284 | .serdes_init = serdes_init_niu_10g_fiber, |
2285 | .xcvr_init = xcvr_init_10g_bcm8706, |
2286 | .link_status = link_status_10g_hotplug, |
2287 | }; |
2288 | |
2289 | static const struct niu_phy_ops phy_ops_10g_copper = { |
2290 | .serdes_init = serdes_init_10g, |
2291 | .link_status = link_status_10g, /* XXX */ |
2292 | }; |
2293 | |
2294 | static const struct niu_phy_ops phy_ops_1g_fiber = { |
2295 | .serdes_init = serdes_init_1g, |
2296 | .xcvr_init = xcvr_init_1g, |
2297 | .link_status = link_status_1g, |
2298 | }; |
2299 | |
2300 | static const struct niu_phy_ops phy_ops_1g_copper = { |
2301 | .xcvr_init = xcvr_init_1g, |
2302 | .link_status = link_status_1g, |
2303 | }; |
2304 | |
2305 | struct niu_phy_template { |
2306 | const struct niu_phy_ops *ops; |
2307 | u32 phy_addr_base; |
2308 | }; |
2309 | |
2310 | static const struct niu_phy_template phy_template_niu_10g_fiber = { |
2311 | .ops = &phy_ops_10g_fiber_niu, |
2312 | .phy_addr_base = 16, |
2313 | }; |
2314 | |
2315 | static const struct niu_phy_template phy_template_niu_10g_serdes = { |
2316 | .ops = &phy_ops_10g_serdes_niu, |
2317 | .phy_addr_base = 0, |
2318 | }; |
2319 | |
2320 | static const struct niu_phy_template phy_template_niu_1g_serdes = { |
2321 | .ops = &phy_ops_1g_serdes_niu, |
2322 | .phy_addr_base = 0, |
2323 | }; |
2324 | |
2325 | static const struct niu_phy_template phy_template_10g_fiber = { |
2326 | .ops = &phy_ops_10g_fiber, |
2327 | .phy_addr_base = 8, |
2328 | }; |
2329 | |
2330 | static const struct niu_phy_template phy_template_10g_fiber_hotplug = { |
2331 | .ops = &phy_ops_10g_fiber_hotplug, |
2332 | .phy_addr_base = 8, |
2333 | }; |
2334 | |
2335 | static const struct niu_phy_template phy_template_niu_10g_hotplug = { |
2336 | .ops = &phy_ops_niu_10g_hotplug, |
2337 | .phy_addr_base = 8, |
2338 | }; |
2339 | |
2340 | static const struct niu_phy_template phy_template_10g_copper = { |
2341 | .ops = &phy_ops_10g_copper, |
2342 | .phy_addr_base = 10, |
2343 | }; |
2344 | |
2345 | static const struct niu_phy_template phy_template_1g_fiber = { |
2346 | .ops = &phy_ops_1g_fiber, |
2347 | .phy_addr_base = 0, |
2348 | }; |
2349 | |
2350 | static const struct niu_phy_template phy_template_1g_copper = { |
2351 | .ops = &phy_ops_1g_copper, |
2352 | .phy_addr_base = 0, |
2353 | }; |
2354 | |
2355 | static const struct niu_phy_template phy_template_1g_rgmii = { |
2356 | .ops = &phy_ops_1g_rgmii, |
2357 | .phy_addr_base = 0, |
2358 | }; |
2359 | |
2360 | static const struct niu_phy_template phy_template_10g_serdes = { |
2361 | .ops = &phy_ops_10g_serdes, |
2362 | .phy_addr_base = 0, |
2363 | }; |
2364 | |
2365 | static int niu_atca_port_num[4] = { |
2366 | 0, 0, 11, 10 |
2367 | }; |
2368 | |
2369 | static int serdes_init_10g_serdes(struct niu *np) |
2370 | { |
2371 | struct niu_link_config *lp = &np->link_config; |
2372 | unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; |
2373 | u64 ctrl_val, test_cfg_val, sig, mask, val; |
2374 | |
2375 | switch (np->port) { |
2376 | case 0: |
2377 | ctrl_reg = ENET_SERDES_0_CTRL_CFG; |
2378 | test_cfg_reg = ENET_SERDES_0_TEST_CFG; |
2379 | pll_cfg = ENET_SERDES_0_PLL_CFG; |
2380 | break; |
2381 | case 1: |
2382 | ctrl_reg = ENET_SERDES_1_CTRL_CFG; |
2383 | test_cfg_reg = ENET_SERDES_1_TEST_CFG; |
2384 | pll_cfg = ENET_SERDES_1_PLL_CFG; |
2385 | break; |
2386 | |
2387 | default: |
2388 | return -EINVAL; |
2389 | } |
2390 | ctrl_val = (ENET_SERDES_CTRL_SDET_0 | |
2391 | ENET_SERDES_CTRL_SDET_1 | |
2392 | ENET_SERDES_CTRL_SDET_2 | |
2393 | ENET_SERDES_CTRL_SDET_3 | |
2394 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | |
2395 | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | |
2396 | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | |
2397 | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | |
2398 | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | |
2399 | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | |
2400 | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | |
2401 | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); |
2402 | test_cfg_val = 0; |
2403 | |
2404 | if (lp->loopback_mode == LOOPBACK_PHY) { |
2405 | test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << |
2406 | ENET_SERDES_TEST_MD_0_SHIFT) | |
2407 | (ENET_TEST_MD_PAD_LOOPBACK << |
2408 | ENET_SERDES_TEST_MD_1_SHIFT) | |
2409 | (ENET_TEST_MD_PAD_LOOPBACK << |
2410 | ENET_SERDES_TEST_MD_2_SHIFT) | |
2411 | (ENET_TEST_MD_PAD_LOOPBACK << |
2412 | ENET_SERDES_TEST_MD_3_SHIFT)); |
2413 | } |
2414 | |
2415 | esr_reset(np); |
2416 | nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); |
2417 | nw64(ctrl_reg, ctrl_val); |
2418 | nw64(test_cfg_reg, test_cfg_val); |
2419 | |
2420 | /* Initialize all 4 lanes of the SERDES. */ |
2421 | for (i = 0; i < 4; i++) { |
2422 | u32 rxtx_ctrl, glue0; |
2423 | int err; |
2424 | |
2425 | err = esr_read_rxtx_ctrl(np, chan: i, val: &rxtx_ctrl); |
2426 | if (err) |
2427 | return err; |
2428 | err = esr_read_glue0(np, chan: i, val: &glue0); |
2429 | if (err) |
2430 | return err; |
2431 | |
2432 | rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); |
2433 | rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | |
2434 | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); |
2435 | |
2436 | glue0 &= ~(ESR_GLUE_CTRL0_SRATE | |
2437 | ESR_GLUE_CTRL0_THCNT | |
2438 | ESR_GLUE_CTRL0_BLTIME); |
2439 | glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | |
2440 | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | |
2441 | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | |
2442 | (BLTIME_300_CYCLES << |
2443 | ESR_GLUE_CTRL0_BLTIME_SHIFT)); |
2444 | |
2445 | err = esr_write_rxtx_ctrl(np, chan: i, val: rxtx_ctrl); |
2446 | if (err) |
2447 | return err; |
2448 | err = esr_write_glue0(np, chan: i, val: glue0); |
2449 | if (err) |
2450 | return err; |
2451 | } |
2452 | |
2453 | |
2454 | sig = nr64(ESR_INT_SIGNALS); |
2455 | switch (np->port) { |
2456 | case 0: |
2457 | mask = ESR_INT_SIGNALS_P0_BITS; |
2458 | val = (ESR_INT_SRDY0_P0 | |
2459 | ESR_INT_DET0_P0 | |
2460 | ESR_INT_XSRDY_P0 | |
2461 | ESR_INT_XDP_P0_CH3 | |
2462 | ESR_INT_XDP_P0_CH2 | |
2463 | ESR_INT_XDP_P0_CH1 | |
2464 | ESR_INT_XDP_P0_CH0); |
2465 | break; |
2466 | |
2467 | case 1: |
2468 | mask = ESR_INT_SIGNALS_P1_BITS; |
2469 | val = (ESR_INT_SRDY0_P1 | |
2470 | ESR_INT_DET0_P1 | |
2471 | ESR_INT_XSRDY_P1 | |
2472 | ESR_INT_XDP_P1_CH3 | |
2473 | ESR_INT_XDP_P1_CH2 | |
2474 | ESR_INT_XDP_P1_CH1 | |
2475 | ESR_INT_XDP_P1_CH0); |
2476 | break; |
2477 | |
2478 | default: |
2479 | return -EINVAL; |
2480 | } |
2481 | |
2482 | if ((sig & mask) != val) { |
2483 | int err; |
2484 | err = serdes_init_1g_serdes(np); |
2485 | if (!err) { |
2486 | np->flags &= ~NIU_FLAGS_10G; |
2487 | np->mac_xcvr = MAC_XCVR_PCS; |
2488 | } else { |
2489 | netdev_err(dev: np->dev, format: "Port %u 10G/1G SERDES Link Failed\n" , |
2490 | np->port); |
2491 | return -ENODEV; |
2492 | } |
2493 | } |
2494 | |
2495 | return 0; |
2496 | } |
2497 | |
2498 | static int niu_determine_phy_disposition(struct niu *np) |
2499 | { |
2500 | struct niu_parent *parent = np->parent; |
2501 | u8 plat_type = parent->plat_type; |
2502 | const struct niu_phy_template *tp; |
2503 | u32 phy_addr_off = 0; |
2504 | |
2505 | if (plat_type == PLAT_TYPE_NIU) { |
2506 | switch (np->flags & |
2507 | (NIU_FLAGS_10G | |
2508 | NIU_FLAGS_FIBER | |
2509 | NIU_FLAGS_XCVR_SERDES)) { |
2510 | case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: |
2511 | /* 10G Serdes */ |
2512 | tp = &phy_template_niu_10g_serdes; |
2513 | break; |
2514 | case NIU_FLAGS_XCVR_SERDES: |
2515 | /* 1G Serdes */ |
2516 | tp = &phy_template_niu_1g_serdes; |
2517 | break; |
2518 | case NIU_FLAGS_10G | NIU_FLAGS_FIBER: |
2519 | /* 10G Fiber */ |
2520 | default: |
2521 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { |
2522 | tp = &phy_template_niu_10g_hotplug; |
2523 | if (np->port == 0) |
2524 | phy_addr_off = 8; |
2525 | if (np->port == 1) |
2526 | phy_addr_off = 12; |
2527 | } else { |
2528 | tp = &phy_template_niu_10g_fiber; |
2529 | phy_addr_off += np->port; |
2530 | } |
2531 | break; |
2532 | } |
2533 | } else { |
2534 | switch (np->flags & |
2535 | (NIU_FLAGS_10G | |
2536 | NIU_FLAGS_FIBER | |
2537 | NIU_FLAGS_XCVR_SERDES)) { |
2538 | case 0: |
2539 | /* 1G copper */ |
2540 | tp = &phy_template_1g_copper; |
2541 | if (plat_type == PLAT_TYPE_VF_P0) |
2542 | phy_addr_off = 10; |
2543 | else if (plat_type == PLAT_TYPE_VF_P1) |
2544 | phy_addr_off = 26; |
2545 | |
2546 | phy_addr_off += (np->port ^ 0x3); |
2547 | break; |
2548 | |
2549 | case NIU_FLAGS_10G: |
2550 | /* 10G copper */ |
2551 | tp = &phy_template_10g_copper; |
2552 | break; |
2553 | |
2554 | case NIU_FLAGS_FIBER: |
2555 | /* 1G fiber */ |
2556 | tp = &phy_template_1g_fiber; |
2557 | break; |
2558 | |
2559 | case NIU_FLAGS_10G | NIU_FLAGS_FIBER: |
2560 | /* 10G fiber */ |
2561 | tp = &phy_template_10g_fiber; |
2562 | if (plat_type == PLAT_TYPE_VF_P0 || |
2563 | plat_type == PLAT_TYPE_VF_P1) |
2564 | phy_addr_off = 8; |
2565 | phy_addr_off += np->port; |
2566 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { |
2567 | tp = &phy_template_10g_fiber_hotplug; |
2568 | if (np->port == 0) |
2569 | phy_addr_off = 8; |
2570 | if (np->port == 1) |
2571 | phy_addr_off = 12; |
2572 | } |
2573 | break; |
2574 | |
2575 | case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: |
2576 | case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: |
2577 | case NIU_FLAGS_XCVR_SERDES: |
2578 | switch(np->port) { |
2579 | case 0: |
2580 | case 1: |
2581 | tp = &phy_template_10g_serdes; |
2582 | break; |
2583 | case 2: |
2584 | case 3: |
2585 | tp = &phy_template_1g_rgmii; |
2586 | break; |
2587 | default: |
2588 | return -EINVAL; |
2589 | } |
2590 | phy_addr_off = niu_atca_port_num[np->port]; |
2591 | break; |
2592 | |
2593 | default: |
2594 | return -EINVAL; |
2595 | } |
2596 | } |
2597 | |
2598 | np->phy_ops = tp->ops; |
2599 | np->phy_addr = tp->phy_addr_base + phy_addr_off; |
2600 | |
2601 | return 0; |
2602 | } |
2603 | |
2604 | static int niu_init_link(struct niu *np) |
2605 | { |
2606 | struct niu_parent *parent = np->parent; |
2607 | int err, ignore; |
2608 | |
2609 | if (parent->plat_type == PLAT_TYPE_NIU) { |
2610 | err = niu_xcvr_init(np); |
2611 | if (err) |
2612 | return err; |
2613 | msleep(msecs: 200); |
2614 | } |
2615 | err = niu_serdes_init(np); |
2616 | if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) |
2617 | return err; |
2618 | msleep(msecs: 200); |
2619 | err = niu_xcvr_init(np); |
2620 | if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) |
2621 | niu_link_status(np, link_up_p: &ignore); |
2622 | return 0; |
2623 | } |
2624 | |
2625 | static void niu_set_primary_mac(struct niu *np, const unsigned char *addr) |
2626 | { |
2627 | u16 reg0 = addr[4] << 8 | addr[5]; |
2628 | u16 reg1 = addr[2] << 8 | addr[3]; |
2629 | u16 reg2 = addr[0] << 8 | addr[1]; |
2630 | |
2631 | if (np->flags & NIU_FLAGS_XMAC) { |
2632 | nw64_mac(XMAC_ADDR0, reg0); |
2633 | nw64_mac(XMAC_ADDR1, reg1); |
2634 | nw64_mac(XMAC_ADDR2, reg2); |
2635 | } else { |
2636 | nw64_mac(BMAC_ADDR0, reg0); |
2637 | nw64_mac(BMAC_ADDR1, reg1); |
2638 | nw64_mac(BMAC_ADDR2, reg2); |
2639 | } |
2640 | } |
2641 | |
2642 | static int niu_num_alt_addr(struct niu *np) |
2643 | { |
2644 | if (np->flags & NIU_FLAGS_XMAC) |
2645 | return XMAC_NUM_ALT_ADDR; |
2646 | else |
2647 | return BMAC_NUM_ALT_ADDR; |
2648 | } |
2649 | |
2650 | static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) |
2651 | { |
2652 | u16 reg0 = addr[4] << 8 | addr[5]; |
2653 | u16 reg1 = addr[2] << 8 | addr[3]; |
2654 | u16 reg2 = addr[0] << 8 | addr[1]; |
2655 | |
2656 | if (index >= niu_num_alt_addr(np)) |
2657 | return -EINVAL; |
2658 | |
2659 | if (np->flags & NIU_FLAGS_XMAC) { |
2660 | nw64_mac(XMAC_ALT_ADDR0(index), reg0); |
2661 | nw64_mac(XMAC_ALT_ADDR1(index), reg1); |
2662 | nw64_mac(XMAC_ALT_ADDR2(index), reg2); |
2663 | } else { |
2664 | nw64_mac(BMAC_ALT_ADDR0(index), reg0); |
2665 | nw64_mac(BMAC_ALT_ADDR1(index), reg1); |
2666 | nw64_mac(BMAC_ALT_ADDR2(index), reg2); |
2667 | } |
2668 | |
2669 | return 0; |
2670 | } |
2671 | |
2672 | static int niu_enable_alt_mac(struct niu *np, int index, int on) |
2673 | { |
2674 | unsigned long reg; |
2675 | u64 val, mask; |
2676 | |
2677 | if (index >= niu_num_alt_addr(np)) |
2678 | return -EINVAL; |
2679 | |
2680 | if (np->flags & NIU_FLAGS_XMAC) { |
2681 | reg = XMAC_ADDR_CMPEN; |
2682 | mask = 1 << index; |
2683 | } else { |
2684 | reg = BMAC_ADDR_CMPEN; |
2685 | mask = 1 << (index + 1); |
2686 | } |
2687 | |
2688 | val = nr64_mac(reg); |
2689 | if (on) |
2690 | val |= mask; |
2691 | else |
2692 | val &= ~mask; |
2693 | nw64_mac(reg, val); |
2694 | |
2695 | return 0; |
2696 | } |
2697 | |
2698 | static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, |
2699 | int num, int mac_pref) |
2700 | { |
2701 | u64 val = nr64_mac(reg); |
2702 | val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); |
2703 | val |= num; |
2704 | if (mac_pref) |
2705 | val |= HOST_INFO_MPR; |
2706 | nw64_mac(reg, val); |
2707 | } |
2708 | |
2709 | static int __set_rdc_table_num(struct niu *np, |
2710 | int xmac_index, int bmac_index, |
2711 | int rdc_table_num, int mac_pref) |
2712 | { |
2713 | unsigned long reg; |
2714 | |
2715 | if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) |
2716 | return -EINVAL; |
2717 | if (np->flags & NIU_FLAGS_XMAC) |
2718 | reg = XMAC_HOST_INFO(xmac_index); |
2719 | else |
2720 | reg = BMAC_HOST_INFO(bmac_index); |
2721 | __set_rdc_table_num_hw(np, reg, num: rdc_table_num, mac_pref); |
2722 | return 0; |
2723 | } |
2724 | |
2725 | static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, |
2726 | int mac_pref) |
2727 | { |
2728 | return __set_rdc_table_num(np, xmac_index: 17, bmac_index: 0, rdc_table_num: table_num, mac_pref); |
2729 | } |
2730 | |
2731 | static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, |
2732 | int mac_pref) |
2733 | { |
2734 | return __set_rdc_table_num(np, xmac_index: 16, bmac_index: 8, rdc_table_num: table_num, mac_pref); |
2735 | } |
2736 | |
2737 | static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, |
2738 | int table_num, int mac_pref) |
2739 | { |
2740 | if (idx >= niu_num_alt_addr(np)) |
2741 | return -EINVAL; |
2742 | return __set_rdc_table_num(np, xmac_index: idx, bmac_index: idx + 1, rdc_table_num: table_num, mac_pref); |
2743 | } |
2744 | |
2745 | static u64 vlan_entry_set_parity(u64 reg_val) |
2746 | { |
2747 | u64 port01_mask; |
2748 | u64 port23_mask; |
2749 | |
2750 | port01_mask = 0x00ff; |
2751 | port23_mask = 0xff00; |
2752 | |
2753 | if (hweight64(reg_val & port01_mask) & 1) |
2754 | reg_val |= ENET_VLAN_TBL_PARITY0; |
2755 | else |
2756 | reg_val &= ~ENET_VLAN_TBL_PARITY0; |
2757 | |
2758 | if (hweight64(reg_val & port23_mask) & 1) |
2759 | reg_val |= ENET_VLAN_TBL_PARITY1; |
2760 | else |
2761 | reg_val &= ~ENET_VLAN_TBL_PARITY1; |
2762 | |
2763 | return reg_val; |
2764 | } |
2765 | |
2766 | static void vlan_tbl_write(struct niu *np, unsigned long index, |
2767 | int port, int vpr, int rdc_table) |
2768 | { |
2769 | u64 reg_val = nr64(ENET_VLAN_TBL(index)); |
2770 | |
2771 | reg_val &= ~((ENET_VLAN_TBL_VPR | |
2772 | ENET_VLAN_TBL_VLANRDCTBLN) << |
2773 | ENET_VLAN_TBL_SHIFT(port)); |
2774 | if (vpr) |
2775 | reg_val |= (ENET_VLAN_TBL_VPR << |
2776 | ENET_VLAN_TBL_SHIFT(port)); |
2777 | reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); |
2778 | |
2779 | reg_val = vlan_entry_set_parity(reg_val); |
2780 | |
2781 | nw64(ENET_VLAN_TBL(index), reg_val); |
2782 | } |
2783 | |
2784 | static void vlan_tbl_clear(struct niu *np) |
2785 | { |
2786 | int i; |
2787 | |
2788 | for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) |
2789 | nw64(ENET_VLAN_TBL(i), 0); |
2790 | } |
2791 | |
2792 | static int tcam_wait_bit(struct niu *np, u64 bit) |
2793 | { |
2794 | int limit = 1000; |
2795 | |
2796 | while (--limit > 0) { |
2797 | if (nr64(TCAM_CTL) & bit) |
2798 | break; |
2799 | udelay(1); |
2800 | } |
2801 | if (limit <= 0) |
2802 | return -ENODEV; |
2803 | |
2804 | return 0; |
2805 | } |
2806 | |
2807 | static int tcam_flush(struct niu *np, int index) |
2808 | { |
2809 | nw64(TCAM_KEY_0, 0x00); |
2810 | nw64(TCAM_KEY_MASK_0, 0xff); |
2811 | nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); |
2812 | |
2813 | return tcam_wait_bit(np, TCAM_CTL_STAT); |
2814 | } |
2815 | |
2816 | #if 0 |
2817 | static int tcam_read(struct niu *np, int index, |
2818 | u64 *key, u64 *mask) |
2819 | { |
2820 | int err; |
2821 | |
2822 | nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); |
2823 | err = tcam_wait_bit(np, TCAM_CTL_STAT); |
2824 | if (!err) { |
2825 | key[0] = nr64(TCAM_KEY_0); |
2826 | key[1] = nr64(TCAM_KEY_1); |
2827 | key[2] = nr64(TCAM_KEY_2); |
2828 | key[3] = nr64(TCAM_KEY_3); |
2829 | mask[0] = nr64(TCAM_KEY_MASK_0); |
2830 | mask[1] = nr64(TCAM_KEY_MASK_1); |
2831 | mask[2] = nr64(TCAM_KEY_MASK_2); |
2832 | mask[3] = nr64(TCAM_KEY_MASK_3); |
2833 | } |
2834 | return err; |
2835 | } |
2836 | #endif |
2837 | |
2838 | static int tcam_write(struct niu *np, int index, |
2839 | u64 *key, u64 *mask) |
2840 | { |
2841 | nw64(TCAM_KEY_0, key[0]); |
2842 | nw64(TCAM_KEY_1, key[1]); |
2843 | nw64(TCAM_KEY_2, key[2]); |
2844 | nw64(TCAM_KEY_3, key[3]); |
2845 | nw64(TCAM_KEY_MASK_0, mask[0]); |
2846 | nw64(TCAM_KEY_MASK_1, mask[1]); |
2847 | nw64(TCAM_KEY_MASK_2, mask[2]); |
2848 | nw64(TCAM_KEY_MASK_3, mask[3]); |
2849 | nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); |
2850 | |
2851 | return tcam_wait_bit(np, TCAM_CTL_STAT); |
2852 | } |
2853 | |
2854 | #if 0 |
2855 | static int tcam_assoc_read(struct niu *np, int index, u64 *data) |
2856 | { |
2857 | int err; |
2858 | |
2859 | nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); |
2860 | err = tcam_wait_bit(np, TCAM_CTL_STAT); |
2861 | if (!err) |
2862 | *data = nr64(TCAM_KEY_1); |
2863 | |
2864 | return err; |
2865 | } |
2866 | #endif |
2867 | |
2868 | static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) |
2869 | { |
2870 | nw64(TCAM_KEY_1, assoc_data); |
2871 | nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); |
2872 | |
2873 | return tcam_wait_bit(np, TCAM_CTL_STAT); |
2874 | } |
2875 | |
2876 | static void tcam_enable(struct niu *np, int on) |
2877 | { |
2878 | u64 val = nr64(FFLP_CFG_1); |
2879 | |
2880 | if (on) |
2881 | val &= ~FFLP_CFG_1_TCAM_DIS; |
2882 | else |
2883 | val |= FFLP_CFG_1_TCAM_DIS; |
2884 | nw64(FFLP_CFG_1, val); |
2885 | } |
2886 | |
2887 | static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) |
2888 | { |
2889 | u64 val = nr64(FFLP_CFG_1); |
2890 | |
2891 | val &= ~(FFLP_CFG_1_FFLPINITDONE | |
2892 | FFLP_CFG_1_CAMLAT | |
2893 | FFLP_CFG_1_CAMRATIO); |
2894 | val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); |
2895 | val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); |
2896 | nw64(FFLP_CFG_1, val); |
2897 | |
2898 | val = nr64(FFLP_CFG_1); |
2899 | val |= FFLP_CFG_1_FFLPINITDONE; |
2900 | nw64(FFLP_CFG_1, val); |
2901 | } |
2902 | |
2903 | static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, |
2904 | int on) |
2905 | { |
2906 | unsigned long reg; |
2907 | u64 val; |
2908 | |
2909 | if (class < CLASS_CODE_ETHERTYPE1 || |
2910 | class > CLASS_CODE_ETHERTYPE2) |
2911 | return -EINVAL; |
2912 | |
2913 | reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); |
2914 | val = nr64(reg); |
2915 | if (on) |
2916 | val |= L2_CLS_VLD; |
2917 | else |
2918 | val &= ~L2_CLS_VLD; |
2919 | nw64(reg, val); |
2920 | |
2921 | return 0; |
2922 | } |
2923 | |
2924 | #if 0 |
2925 | static int tcam_user_eth_class_set(struct niu *np, unsigned long class, |
2926 | u64 ether_type) |
2927 | { |
2928 | unsigned long reg; |
2929 | u64 val; |
2930 | |
2931 | if (class < CLASS_CODE_ETHERTYPE1 || |
2932 | class > CLASS_CODE_ETHERTYPE2 || |
2933 | (ether_type & ~(u64)0xffff) != 0) |
2934 | return -EINVAL; |
2935 | |
2936 | reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); |
2937 | val = nr64(reg); |
2938 | val &= ~L2_CLS_ETYPE; |
2939 | val |= (ether_type << L2_CLS_ETYPE_SHIFT); |
2940 | nw64(reg, val); |
2941 | |
2942 | return 0; |
2943 | } |
2944 | #endif |
2945 | |
2946 | static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, |
2947 | int on) |
2948 | { |
2949 | unsigned long reg; |
2950 | u64 val; |
2951 | |
2952 | if (class < CLASS_CODE_USER_PROG1 || |
2953 | class > CLASS_CODE_USER_PROG4) |
2954 | return -EINVAL; |
2955 | |
2956 | reg = L3_CLS(class - CLASS_CODE_USER_PROG1); |
2957 | val = nr64(reg); |
2958 | if (on) |
2959 | val |= L3_CLS_VALID; |
2960 | else |
2961 | val &= ~L3_CLS_VALID; |
2962 | nw64(reg, val); |
2963 | |
2964 | return 0; |
2965 | } |
2966 | |
2967 | static int tcam_user_ip_class_set(struct niu *np, unsigned long class, |
2968 | int ipv6, u64 protocol_id, |
2969 | u64 tos_mask, u64 tos_val) |
2970 | { |
2971 | unsigned long reg; |
2972 | u64 val; |
2973 | |
2974 | if (class < CLASS_CODE_USER_PROG1 || |
2975 | class > CLASS_CODE_USER_PROG4 || |
2976 | (protocol_id & ~(u64)0xff) != 0 || |
2977 | (tos_mask & ~(u64)0xff) != 0 || |
2978 | (tos_val & ~(u64)0xff) != 0) |
2979 | return -EINVAL; |
2980 | |
2981 | reg = L3_CLS(class - CLASS_CODE_USER_PROG1); |
2982 | val = nr64(reg); |
2983 | val &= ~(L3_CLS_IPVER | L3_CLS_PID | |
2984 | L3_CLS_TOSMASK | L3_CLS_TOS); |
2985 | if (ipv6) |
2986 | val |= L3_CLS_IPVER; |
2987 | val |= (protocol_id << L3_CLS_PID_SHIFT); |
2988 | val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); |
2989 | val |= (tos_val << L3_CLS_TOS_SHIFT); |
2990 | nw64(reg, val); |
2991 | |
2992 | return 0; |
2993 | } |
2994 | |
2995 | static int tcam_early_init(struct niu *np) |
2996 | { |
2997 | unsigned long i; |
2998 | int err; |
2999 | |
3000 | tcam_enable(np, on: 0); |
3001 | tcam_set_lat_and_ratio(np, |
3002 | DEFAULT_TCAM_LATENCY, |
3003 | DEFAULT_TCAM_ACCESS_RATIO); |
3004 | for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { |
3005 | err = tcam_user_eth_class_enable(np, class: i, on: 0); |
3006 | if (err) |
3007 | return err; |
3008 | } |
3009 | for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { |
3010 | err = tcam_user_ip_class_enable(np, class: i, on: 0); |
3011 | if (err) |
3012 | return err; |
3013 | } |
3014 | |
3015 | return 0; |
3016 | } |
3017 | |
3018 | static int tcam_flush_all(struct niu *np) |
3019 | { |
3020 | unsigned long i; |
3021 | |
3022 | for (i = 0; i < np->parent->tcam_num_entries; i++) { |
3023 | int err = tcam_flush(np, index: i); |
3024 | if (err) |
3025 | return err; |
3026 | } |
3027 | return 0; |
3028 | } |
3029 | |
3030 | static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) |
3031 | { |
3032 | return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); |
3033 | } |
3034 | |
3035 | #if 0 |
3036 | static int hash_read(struct niu *np, unsigned long partition, |
3037 | unsigned long index, unsigned long num_entries, |
3038 | u64 *data) |
3039 | { |
3040 | u64 val = hash_addr_regval(index, num_entries); |
3041 | unsigned long i; |
3042 | |
3043 | if (partition >= FCRAM_NUM_PARTITIONS || |
3044 | index + num_entries > FCRAM_SIZE) |
3045 | return -EINVAL; |
3046 | |
3047 | nw64(HASH_TBL_ADDR(partition), val); |
3048 | for (i = 0; i < num_entries; i++) |
3049 | data[i] = nr64(HASH_TBL_DATA(partition)); |
3050 | |
3051 | return 0; |
3052 | } |
3053 | #endif |
3054 | |
3055 | static int hash_write(struct niu *np, unsigned long partition, |
3056 | unsigned long index, unsigned long num_entries, |
3057 | u64 *data) |
3058 | { |
3059 | u64 val = hash_addr_regval(index, num_entries); |
3060 | unsigned long i; |
3061 | |
3062 | if (partition >= FCRAM_NUM_PARTITIONS || |
3063 | index + (num_entries * 8) > FCRAM_SIZE) |
3064 | return -EINVAL; |
3065 | |
3066 | nw64(HASH_TBL_ADDR(partition), val); |
3067 | for (i = 0; i < num_entries; i++) |
3068 | nw64(HASH_TBL_DATA(partition), data[i]); |
3069 | |
3070 | return 0; |
3071 | } |
3072 | |
3073 | static void fflp_reset(struct niu *np) |
3074 | { |
3075 | u64 val; |
3076 | |
3077 | nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); |
3078 | udelay(10); |
3079 | nw64(FFLP_CFG_1, 0); |
3080 | |
3081 | val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; |
3082 | nw64(FFLP_CFG_1, val); |
3083 | } |
3084 | |
3085 | static void fflp_set_timings(struct niu *np) |
3086 | { |
3087 | u64 val = nr64(FFLP_CFG_1); |
3088 | |
3089 | val &= ~FFLP_CFG_1_FFLPINITDONE; |
3090 | val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); |
3091 | nw64(FFLP_CFG_1, val); |
3092 | |
3093 | val = nr64(FFLP_CFG_1); |
3094 | val |= FFLP_CFG_1_FFLPINITDONE; |
3095 | nw64(FFLP_CFG_1, val); |
3096 | |
3097 | val = nr64(FCRAM_REF_TMR); |
3098 | val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); |
3099 | val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); |
3100 | val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); |
3101 | nw64(FCRAM_REF_TMR, val); |
3102 | } |
3103 | |
3104 | static int fflp_set_partition(struct niu *np, u64 partition, |
3105 | u64 mask, u64 base, int enable) |
3106 | { |
3107 | unsigned long reg; |
3108 | u64 val; |
3109 | |
3110 | if (partition >= FCRAM_NUM_PARTITIONS || |
3111 | (mask & ~(u64)0x1f) != 0 || |
3112 | (base & ~(u64)0x1f) != 0) |
3113 | return -EINVAL; |
3114 | |
3115 | reg = FLW_PRT_SEL(partition); |
3116 | |
3117 | val = nr64(reg); |
3118 | val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); |
3119 | val |= (mask << FLW_PRT_SEL_MASK_SHIFT); |
3120 | val |= (base << FLW_PRT_SEL_BASE_SHIFT); |
3121 | if (enable) |
3122 | val |= FLW_PRT_SEL_EXT; |
3123 | nw64(reg, val); |
3124 | |
3125 | return 0; |
3126 | } |
3127 | |
3128 | static int fflp_disable_all_partitions(struct niu *np) |
3129 | { |
3130 | unsigned long i; |
3131 | |
3132 | for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { |
3133 | int err = fflp_set_partition(np, partition: 0, mask: 0, base: 0, enable: 0); |
3134 | if (err) |
3135 | return err; |
3136 | } |
3137 | return 0; |
3138 | } |
3139 | |
3140 | static void fflp_llcsnap_enable(struct niu *np, int on) |
3141 | { |
3142 | u64 val = nr64(FFLP_CFG_1); |
3143 | |
3144 | if (on) |
3145 | val |= FFLP_CFG_1_LLCSNAP; |
3146 | else |
3147 | val &= ~FFLP_CFG_1_LLCSNAP; |
3148 | nw64(FFLP_CFG_1, val); |
3149 | } |
3150 | |
3151 | static void fflp_errors_enable(struct niu *np, int on) |
3152 | { |
3153 | u64 val = nr64(FFLP_CFG_1); |
3154 | |
3155 | if (on) |
3156 | val &= ~FFLP_CFG_1_ERRORDIS; |
3157 | else |
3158 | val |= FFLP_CFG_1_ERRORDIS; |
3159 | nw64(FFLP_CFG_1, val); |
3160 | } |
3161 | |
3162 | static int fflp_hash_clear(struct niu *np) |
3163 | { |
3164 | struct fcram_hash_ipv4 ent; |
3165 | unsigned long i; |
3166 | |
3167 | /* IPV4 hash entry with valid bit clear, rest is don't care. */ |
3168 | memset(&ent, 0, sizeof(ent)); |
3169 | ent.header = HASH_HEADER_EXT; |
3170 | |
3171 | for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { |
3172 | int err = hash_write(np, partition: 0, index: i, num_entries: 1, data: (u64 *) &ent); |
3173 | if (err) |
3174 | return err; |
3175 | } |
3176 | return 0; |
3177 | } |
3178 | |
3179 | static int fflp_early_init(struct niu *np) |
3180 | { |
3181 | struct niu_parent *parent; |
3182 | unsigned long flags; |
3183 | int err; |
3184 | |
3185 | niu_lock_parent(np, flags); |
3186 | |
3187 | parent = np->parent; |
3188 | err = 0; |
3189 | if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { |
3190 | if (np->parent->plat_type != PLAT_TYPE_NIU) { |
3191 | fflp_reset(np); |
3192 | fflp_set_timings(np); |
3193 | err = fflp_disable_all_partitions(np); |
3194 | if (err) { |
3195 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
3196 | "fflp_disable_all_partitions failed, err=%d\n" , |
3197 | err); |
3198 | goto out; |
3199 | } |
3200 | } |
3201 | |
3202 | err = tcam_early_init(np); |
3203 | if (err) { |
3204 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
3205 | "tcam_early_init failed, err=%d\n" , err); |
3206 | goto out; |
3207 | } |
3208 | fflp_llcsnap_enable(np, on: 1); |
3209 | fflp_errors_enable(np, on: 0); |
3210 | nw64(H1POLY, 0); |
3211 | nw64(H2POLY, 0); |
3212 | |
3213 | err = tcam_flush_all(np); |
3214 | if (err) { |
3215 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
3216 | "tcam_flush_all failed, err=%d\n" , err); |
3217 | goto out; |
3218 | } |
3219 | if (np->parent->plat_type != PLAT_TYPE_NIU) { |
3220 | err = fflp_hash_clear(np); |
3221 | if (err) { |
3222 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
3223 | "fflp_hash_clear failed, err=%d\n" , |
3224 | err); |
3225 | goto out; |
3226 | } |
3227 | } |
3228 | |
3229 | vlan_tbl_clear(np); |
3230 | |
3231 | parent->flags |= PARENT_FLGS_CLS_HWINIT; |
3232 | } |
3233 | out: |
3234 | niu_unlock_parent(np, flags); |
3235 | return err; |
3236 | } |
3237 | |
3238 | static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) |
3239 | { |
3240 | if (class_code < CLASS_CODE_USER_PROG1 || |
3241 | class_code > CLASS_CODE_SCTP_IPV6) |
3242 | return -EINVAL; |
3243 | |
3244 | nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); |
3245 | return 0; |
3246 | } |
3247 | |
3248 | static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) |
3249 | { |
3250 | if (class_code < CLASS_CODE_USER_PROG1 || |
3251 | class_code > CLASS_CODE_SCTP_IPV6) |
3252 | return -EINVAL; |
3253 | |
3254 | nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); |
3255 | return 0; |
3256 | } |
3257 | |
3258 | /* Entries for the ports are interleaved in the TCAM */ |
3259 | static u16 tcam_get_index(struct niu *np, u16 idx) |
3260 | { |
3261 | /* One entry reserved for IP fragment rule */ |
3262 | if (idx >= (np->clas.tcam_sz - 1)) |
3263 | idx = 0; |
3264 | return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); |
3265 | } |
3266 | |
3267 | static u16 tcam_get_size(struct niu *np) |
3268 | { |
3269 | /* One entry reserved for IP fragment rule */ |
3270 | return np->clas.tcam_sz - 1; |
3271 | } |
3272 | |
3273 | static u16 tcam_get_valid_entry_cnt(struct niu *np) |
3274 | { |
3275 | /* One entry reserved for IP fragment rule */ |
3276 | return np->clas.tcam_valid_entries - 1; |
3277 | } |
3278 | |
3279 | static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, |
3280 | u32 offset, u32 size, u32 truesize) |
3281 | { |
3282 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, off: offset, size); |
3283 | |
3284 | skb->len += size; |
3285 | skb->data_len += size; |
3286 | skb->truesize += truesize; |
3287 | } |
3288 | |
3289 | static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) |
3290 | { |
3291 | a >>= PAGE_SHIFT; |
3292 | a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); |
3293 | |
3294 | return a & (MAX_RBR_RING_SIZE - 1); |
3295 | } |
3296 | |
3297 | static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, |
3298 | struct page ***link) |
3299 | { |
3300 | unsigned int h = niu_hash_rxaddr(rp, a: addr); |
3301 | struct page *p, **pp; |
3302 | |
3303 | addr &= PAGE_MASK; |
3304 | pp = &rp->rxhash[h]; |
3305 | for (; (p = *pp) != NULL; pp = &niu_next_page(p)) { |
3306 | if (p->index == addr) { |
3307 | *link = pp; |
3308 | goto found; |
3309 | } |
3310 | } |
3311 | BUG(); |
3312 | |
3313 | found: |
3314 | return p; |
3315 | } |
3316 | |
3317 | static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) |
3318 | { |
3319 | unsigned int h = niu_hash_rxaddr(rp, a: base); |
3320 | |
3321 | page->index = base; |
3322 | niu_next_page(page) = rp->rxhash[h]; |
3323 | rp->rxhash[h] = page; |
3324 | } |
3325 | |
3326 | static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, |
3327 | gfp_t mask, int start_index) |
3328 | { |
3329 | struct page *page; |
3330 | u64 addr; |
3331 | int i; |
3332 | |
3333 | page = alloc_page(mask); |
3334 | if (!page) |
3335 | return -ENOMEM; |
3336 | |
3337 | addr = np->ops->map_page(np->device, page, 0, |
3338 | PAGE_SIZE, DMA_FROM_DEVICE); |
3339 | if (!addr) { |
3340 | __free_page(page); |
3341 | return -ENOMEM; |
3342 | } |
3343 | |
3344 | niu_hash_page(rp, page, base: addr); |
3345 | if (rp->rbr_blocks_per_page > 1) |
3346 | page_ref_add(page, nr: rp->rbr_blocks_per_page - 1); |
3347 | |
3348 | for (i = 0; i < rp->rbr_blocks_per_page; i++) { |
3349 | __le32 *rbr = &rp->rbr[start_index + i]; |
3350 | |
3351 | *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); |
3352 | addr += rp->rbr_block_size; |
3353 | } |
3354 | |
3355 | return 0; |
3356 | } |
3357 | |
3358 | static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) |
3359 | { |
3360 | int index = rp->rbr_index; |
3361 | |
3362 | rp->rbr_pending++; |
3363 | if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { |
3364 | int err = niu_rbr_add_page(np, rp, mask, start_index: index); |
3365 | |
3366 | if (unlikely(err)) { |
3367 | rp->rbr_pending--; |
3368 | return; |
3369 | } |
3370 | |
3371 | rp->rbr_index += rp->rbr_blocks_per_page; |
3372 | BUG_ON(rp->rbr_index > rp->rbr_table_size); |
3373 | if (rp->rbr_index == rp->rbr_table_size) |
3374 | rp->rbr_index = 0; |
3375 | |
3376 | if (rp->rbr_pending >= rp->rbr_kick_thresh) { |
3377 | nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); |
3378 | rp->rbr_pending = 0; |
3379 | } |
3380 | } |
3381 | } |
3382 | |
3383 | static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) |
3384 | { |
3385 | unsigned int index = rp->rcr_index; |
3386 | int num_rcr = 0; |
3387 | |
3388 | rp->rx_dropped++; |
3389 | while (1) { |
3390 | struct page *page, **link; |
3391 | u64 addr, val; |
3392 | u32 rcr_size; |
3393 | |
3394 | num_rcr++; |
3395 | |
3396 | val = le64_to_cpup(p: &rp->rcr[index]); |
3397 | addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << |
3398 | RCR_ENTRY_PKT_BUF_ADDR_SHIFT; |
3399 | page = niu_find_rxpage(rp, addr, link: &link); |
3400 | |
3401 | rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> |
3402 | RCR_ENTRY_PKTBUFSZ_SHIFT]; |
3403 | if ((page->index + PAGE_SIZE) - rcr_size == addr) { |
3404 | *link = niu_next_page(page); |
3405 | np->ops->unmap_page(np->device, page->index, |
3406 | PAGE_SIZE, DMA_FROM_DEVICE); |
3407 | page->index = 0; |
3408 | niu_next_page(page) = NULL; |
3409 | __free_page(page); |
3410 | rp->rbr_refill_pending++; |
3411 | } |
3412 | |
3413 | index = NEXT_RCR(rp, index); |
3414 | if (!(val & RCR_ENTRY_MULTI)) |
3415 | break; |
3416 | |
3417 | } |
3418 | rp->rcr_index = index; |
3419 | |
3420 | return num_rcr; |
3421 | } |
3422 | |
3423 | static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, |
3424 | struct rx_ring_info *rp) |
3425 | { |
3426 | unsigned int index = rp->rcr_index; |
3427 | struct rx_pkt_hdr1 *rh; |
3428 | struct sk_buff *skb; |
3429 | int len, num_rcr; |
3430 | |
3431 | skb = netdev_alloc_skb(dev: np->dev, RX_SKB_ALLOC_SIZE); |
3432 | if (unlikely(!skb)) |
3433 | return niu_rx_pkt_ignore(np, rp); |
3434 | |
3435 | num_rcr = 0; |
3436 | while (1) { |
3437 | struct page *page, **link; |
3438 | u32 rcr_size, append_size; |
3439 | u64 addr, val, off; |
3440 | |
3441 | num_rcr++; |
3442 | |
3443 | val = le64_to_cpup(p: &rp->rcr[index]); |
3444 | |
3445 | len = (val & RCR_ENTRY_L2_LEN) >> |
3446 | RCR_ENTRY_L2_LEN_SHIFT; |
3447 | append_size = len + ETH_HLEN + ETH_FCS_LEN; |
3448 | |
3449 | addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << |
3450 | RCR_ENTRY_PKT_BUF_ADDR_SHIFT; |
3451 | page = niu_find_rxpage(rp, addr, link: &link); |
3452 | |
3453 | rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> |
3454 | RCR_ENTRY_PKTBUFSZ_SHIFT]; |
3455 | |
3456 | off = addr & ~PAGE_MASK; |
3457 | if (num_rcr == 1) { |
3458 | int ptype; |
3459 | |
3460 | ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); |
3461 | if ((ptype == RCR_PKT_TYPE_TCP || |
3462 | ptype == RCR_PKT_TYPE_UDP) && |
3463 | !(val & (RCR_ENTRY_NOPORT | |
3464 | RCR_ENTRY_ERROR))) |
3465 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3466 | else |
3467 | skb_checksum_none_assert(skb); |
3468 | } else if (!(val & RCR_ENTRY_MULTI)) |
3469 | append_size = append_size - skb->len; |
3470 | |
3471 | niu_rx_skb_append(skb, page, offset: off, size: append_size, truesize: rcr_size); |
3472 | if ((page->index + rp->rbr_block_size) - rcr_size == addr) { |
3473 | *link = niu_next_page(page); |
3474 | np->ops->unmap_page(np->device, page->index, |
3475 | PAGE_SIZE, DMA_FROM_DEVICE); |
3476 | page->index = 0; |
3477 | niu_next_page(page) = NULL; |
3478 | rp->rbr_refill_pending++; |
3479 | } else |
3480 | get_page(page); |
3481 | |
3482 | index = NEXT_RCR(rp, index); |
3483 | if (!(val & RCR_ENTRY_MULTI)) |
3484 | break; |
3485 | |
3486 | } |
3487 | rp->rcr_index = index; |
3488 | |
3489 | len += sizeof(*rh); |
3490 | len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); |
3491 | __pskb_pull_tail(skb, delta: len); |
3492 | |
3493 | rh = (struct rx_pkt_hdr1 *) skb->data; |
3494 | if (np->dev->features & NETIF_F_RXHASH) |
3495 | skb_set_hash(skb, |
3496 | hash: ((u32)rh->hashval2_0 << 24 | |
3497 | (u32)rh->hashval2_1 << 16 | |
3498 | (u32)rh->hashval1_1 << 8 | |
3499 | (u32)rh->hashval1_2 << 0), |
3500 | type: PKT_HASH_TYPE_L3); |
3501 | skb_pull(skb, len: sizeof(*rh)); |
3502 | |
3503 | rp->rx_packets++; |
3504 | rp->rx_bytes += skb->len; |
3505 | |
3506 | skb->protocol = eth_type_trans(skb, dev: np->dev); |
3507 | skb_record_rx_queue(skb, rx_queue: rp->rx_channel); |
3508 | napi_gro_receive(napi, skb); |
3509 | |
3510 | return num_rcr; |
3511 | } |
3512 | |
3513 | static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) |
3514 | { |
3515 | int blocks_per_page = rp->rbr_blocks_per_page; |
3516 | int err, index = rp->rbr_index; |
3517 | |
3518 | err = 0; |
3519 | while (index < (rp->rbr_table_size - blocks_per_page)) { |
3520 | err = niu_rbr_add_page(np, rp, mask, start_index: index); |
3521 | if (unlikely(err)) |
3522 | break; |
3523 | |
3524 | index += blocks_per_page; |
3525 | } |
3526 | |
3527 | rp->rbr_index = index; |
3528 | return err; |
3529 | } |
3530 | |
3531 | static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) |
3532 | { |
3533 | int i; |
3534 | |
3535 | for (i = 0; i < MAX_RBR_RING_SIZE; i++) { |
3536 | struct page *page; |
3537 | |
3538 | page = rp->rxhash[i]; |
3539 | while (page) { |
3540 | struct page *next = niu_next_page(page); |
3541 | u64 base = page->index; |
3542 | |
3543 | np->ops->unmap_page(np->device, base, PAGE_SIZE, |
3544 | DMA_FROM_DEVICE); |
3545 | page->index = 0; |
3546 | niu_next_page(page) = NULL; |
3547 | |
3548 | __free_page(page); |
3549 | |
3550 | page = next; |
3551 | } |
3552 | } |
3553 | |
3554 | for (i = 0; i < rp->rbr_table_size; i++) |
3555 | rp->rbr[i] = cpu_to_le32(0); |
3556 | rp->rbr_index = 0; |
3557 | } |
3558 | |
3559 | static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) |
3560 | { |
3561 | struct tx_buff_info *tb = &rp->tx_buffs[idx]; |
3562 | struct sk_buff *skb = tb->skb; |
3563 | struct tx_pkt_hdr *tp; |
3564 | u64 tx_flags; |
3565 | int i, len; |
3566 | |
3567 | tp = (struct tx_pkt_hdr *) skb->data; |
3568 | tx_flags = le64_to_cpup(p: &tp->flags); |
3569 | |
3570 | rp->tx_packets++; |
3571 | rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - |
3572 | ((tx_flags & TXHDR_PAD) / 2)); |
3573 | |
3574 | len = skb_headlen(skb); |
3575 | np->ops->unmap_single(np->device, tb->mapping, |
3576 | len, DMA_TO_DEVICE); |
3577 | |
3578 | if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) |
3579 | rp->mark_pending--; |
3580 | |
3581 | tb->skb = NULL; |
3582 | do { |
3583 | idx = NEXT_TX(rp, idx); |
3584 | len -= MAX_TX_DESC_LEN; |
3585 | } while (len > 0); |
3586 | |
3587 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
3588 | tb = &rp->tx_buffs[idx]; |
3589 | BUG_ON(tb->skb != NULL); |
3590 | np->ops->unmap_page(np->device, tb->mapping, |
3591 | skb_frag_size(frag: &skb_shinfo(skb)->frags[i]), |
3592 | DMA_TO_DEVICE); |
3593 | idx = NEXT_TX(rp, idx); |
3594 | } |
3595 | |
3596 | dev_kfree_skb(skb); |
3597 | |
3598 | return idx; |
3599 | } |
3600 | |
3601 | #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) |
3602 | |
3603 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) |
3604 | { |
3605 | struct netdev_queue *txq; |
3606 | u16 pkt_cnt, tmp; |
3607 | int cons, index; |
3608 | u64 cs; |
3609 | |
3610 | index = (rp - np->tx_rings); |
3611 | txq = netdev_get_tx_queue(dev: np->dev, index); |
3612 | |
3613 | cs = rp->tx_cs; |
3614 | if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) |
3615 | goto out; |
3616 | |
3617 | tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; |
3618 | pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & |
3619 | (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); |
3620 | |
3621 | rp->last_pkt_cnt = tmp; |
3622 | |
3623 | cons = rp->cons; |
3624 | |
3625 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, |
3626 | "%s() pkt_cnt[%u] cons[%d]\n" , __func__, pkt_cnt, cons); |
3627 | |
3628 | while (pkt_cnt--) |
3629 | cons = release_tx_packet(np, rp, idx: cons); |
3630 | |
3631 | rp->cons = cons; |
3632 | smp_mb(); |
3633 | |
3634 | out: |
3635 | if (unlikely(netif_tx_queue_stopped(txq) && |
3636 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { |
3637 | __netif_tx_lock(txq, smp_processor_id()); |
3638 | if (netif_tx_queue_stopped(dev_queue: txq) && |
3639 | (niu_tx_avail(tp: rp) > NIU_TX_WAKEUP_THRESH(rp))) |
3640 | netif_tx_wake_queue(dev_queue: txq); |
3641 | __netif_tx_unlock(txq); |
3642 | } |
3643 | } |
3644 | |
3645 | static inline void niu_sync_rx_discard_stats(struct niu *np, |
3646 | struct rx_ring_info *rp, |
3647 | const int limit) |
3648 | { |
3649 | /* This elaborate scheme is needed for reading the RX discard |
3650 | * counters, as they are only 16-bit and can overflow quickly, |
3651 | * and because the overflow indication bit is not usable as |
3652 | * the counter value does not wrap, but remains at max value |
3653 | * 0xFFFF. |
3654 | * |
3655 | * In theory and in practice counters can be lost in between |
3656 | * reading nr64() and clearing the counter nw64(). For this |
3657 | * reason, the number of counter clearings nw64() is |
3658 | * limited/reduced though the limit parameter. |
3659 | */ |
3660 | int rx_channel = rp->rx_channel; |
3661 | u32 misc, wred; |
3662 | |
3663 | /* RXMISC (Receive Miscellaneous Discard Count), covers the |
3664 | * following discard events: IPP (Input Port Process), |
3665 | * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive |
3666 | * Block Ring) prefetch buffer is empty. |
3667 | */ |
3668 | misc = nr64(RXMISC(rx_channel)); |
3669 | if (unlikely((misc & RXMISC_COUNT) > limit)) { |
3670 | nw64(RXMISC(rx_channel), 0); |
3671 | rp->rx_errors += misc & RXMISC_COUNT; |
3672 | |
3673 | if (unlikely(misc & RXMISC_OFLOW)) |
3674 | dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n" , |
3675 | rx_channel); |
3676 | |
3677 | netif_printk(np, rx_err, KERN_DEBUG, np->dev, |
3678 | "rx-%d: MISC drop=%u over=%u\n" , |
3679 | rx_channel, misc, misc-limit); |
3680 | } |
3681 | |
3682 | /* WRED (Weighted Random Early Discard) by hardware */ |
3683 | wred = nr64(RED_DIS_CNT(rx_channel)); |
3684 | if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { |
3685 | nw64(RED_DIS_CNT(rx_channel), 0); |
3686 | rp->rx_dropped += wred & RED_DIS_CNT_COUNT; |
3687 | |
3688 | if (unlikely(wred & RED_DIS_CNT_OFLOW)) |
3689 | dev_err(np->device, "rx-%d: Counter overflow WRED discard\n" , rx_channel); |
3690 | |
3691 | netif_printk(np, rx_err, KERN_DEBUG, np->dev, |
3692 | "rx-%d: WRED drop=%u over=%u\n" , |
3693 | rx_channel, wred, wred-limit); |
3694 | } |
3695 | } |
3696 | |
3697 | static int niu_rx_work(struct napi_struct *napi, struct niu *np, |
3698 | struct rx_ring_info *rp, int budget) |
3699 | { |
3700 | int qlen, rcr_done = 0, work_done = 0; |
3701 | struct rxdma_mailbox *mbox = rp->mbox; |
3702 | u64 stat; |
3703 | |
3704 | #if 1 |
3705 | stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); |
3706 | qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; |
3707 | #else |
3708 | stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); |
3709 | qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); |
3710 | #endif |
3711 | mbox->rx_dma_ctl_stat = 0; |
3712 | mbox->rcrstat_a = 0; |
3713 | |
3714 | netif_printk(np, rx_status, KERN_DEBUG, np->dev, |
3715 | "%s(chan[%d]), stat[%llx] qlen=%d\n" , |
3716 | __func__, rp->rx_channel, (unsigned long long)stat, qlen); |
3717 | |
3718 | rcr_done = work_done = 0; |
3719 | qlen = min(qlen, budget); |
3720 | while (work_done < qlen) { |
3721 | rcr_done += niu_process_rx_pkt(napi, np, rp); |
3722 | work_done++; |
3723 | } |
3724 | |
3725 | if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { |
3726 | unsigned int i; |
3727 | |
3728 | for (i = 0; i < rp->rbr_refill_pending; i++) |
3729 | niu_rbr_refill(np, rp, GFP_ATOMIC); |
3730 | rp->rbr_refill_pending = 0; |
3731 | } |
3732 | |
3733 | stat = (RX_DMA_CTL_STAT_MEX | |
3734 | ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | |
3735 | ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); |
3736 | |
3737 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); |
3738 | |
3739 | /* Only sync discards stats when qlen indicate potential for drops */ |
3740 | if (qlen > 10) |
3741 | niu_sync_rx_discard_stats(np, rp, limit: 0x7FFF); |
3742 | |
3743 | return work_done; |
3744 | } |
3745 | |
3746 | static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) |
3747 | { |
3748 | u64 v0 = lp->v0; |
3749 | u32 tx_vec = (v0 >> 32); |
3750 | u32 rx_vec = (v0 & 0xffffffff); |
3751 | int i, work_done = 0; |
3752 | |
3753 | netif_printk(np, intr, KERN_DEBUG, np->dev, |
3754 | "%s() v0[%016llx]\n" , __func__, (unsigned long long)v0); |
3755 | |
3756 | for (i = 0; i < np->num_tx_rings; i++) { |
3757 | struct tx_ring_info *rp = &np->tx_rings[i]; |
3758 | if (tx_vec & (1 << rp->tx_channel)) |
3759 | niu_tx_work(np, rp); |
3760 | nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); |
3761 | } |
3762 | |
3763 | for (i = 0; i < np->num_rx_rings; i++) { |
3764 | struct rx_ring_info *rp = &np->rx_rings[i]; |
3765 | |
3766 | if (rx_vec & (1 << rp->rx_channel)) { |
3767 | int this_work_done; |
3768 | |
3769 | this_work_done = niu_rx_work(napi: &lp->napi, np, rp, |
3770 | budget); |
3771 | |
3772 | budget -= this_work_done; |
3773 | work_done += this_work_done; |
3774 | } |
3775 | nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); |
3776 | } |
3777 | |
3778 | return work_done; |
3779 | } |
3780 | |
3781 | static int niu_poll(struct napi_struct *napi, int budget) |
3782 | { |
3783 | struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); |
3784 | struct niu *np = lp->np; |
3785 | int work_done; |
3786 | |
3787 | work_done = niu_poll_core(np, lp, budget); |
3788 | |
3789 | if (work_done < budget) { |
3790 | napi_complete_done(n: napi, work_done); |
3791 | niu_ldg_rearm(np, lp, on: 1); |
3792 | } |
3793 | return work_done; |
3794 | } |
3795 | |
3796 | static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, |
3797 | u64 stat) |
3798 | { |
3799 | netdev_err(dev: np->dev, format: "RX channel %u errors ( " , rp->rx_channel); |
3800 | |
3801 | if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) |
3802 | pr_cont("RBR_TMOUT " ); |
3803 | if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) |
3804 | pr_cont("RSP_CNT " ); |
3805 | if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) |
3806 | pr_cont("BYTE_EN_BUS " ); |
3807 | if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) |
3808 | pr_cont("RSP_DAT " ); |
3809 | if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) |
3810 | pr_cont("RCR_ACK " ); |
3811 | if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) |
3812 | pr_cont("RCR_SHA_PAR " ); |
3813 | if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) |
3814 | pr_cont("RBR_PRE_PAR " ); |
3815 | if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) |
3816 | pr_cont("CONFIG " ); |
3817 | if (stat & RX_DMA_CTL_STAT_RCRINCON) |
3818 | pr_cont("RCRINCON " ); |
3819 | if (stat & RX_DMA_CTL_STAT_RCRFULL) |
3820 | pr_cont("RCRFULL " ); |
3821 | if (stat & RX_DMA_CTL_STAT_RBRFULL) |
3822 | pr_cont("RBRFULL " ); |
3823 | if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) |
3824 | pr_cont("RBRLOGPAGE " ); |
3825 | if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) |
3826 | pr_cont("CFIGLOGPAGE " ); |
3827 | if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) |
3828 | pr_cont("DC_FIDO " ); |
3829 | |
3830 | pr_cont(")\n" ); |
3831 | } |
3832 | |
3833 | static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) |
3834 | { |
3835 | u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); |
3836 | int err = 0; |
3837 | |
3838 | |
3839 | if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | |
3840 | RX_DMA_CTL_STAT_PORT_FATAL)) |
3841 | err = -EINVAL; |
3842 | |
3843 | if (err) { |
3844 | netdev_err(dev: np->dev, format: "RX channel %u error, stat[%llx]\n" , |
3845 | rp->rx_channel, |
3846 | (unsigned long long) stat); |
3847 | |
3848 | niu_log_rxchan_errors(np, rp, stat); |
3849 | } |
3850 | |
3851 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), |
3852 | stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); |
3853 | |
3854 | return err; |
3855 | } |
3856 | |
3857 | static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, |
3858 | u64 cs) |
3859 | { |
3860 | netdev_err(dev: np->dev, format: "TX channel %u errors ( " , rp->tx_channel); |
3861 | |
3862 | if (cs & TX_CS_MBOX_ERR) |
3863 | pr_cont("MBOX " ); |
3864 | if (cs & TX_CS_PKT_SIZE_ERR) |
3865 | pr_cont("PKT_SIZE " ); |
3866 | if (cs & TX_CS_TX_RING_OFLOW) |
3867 | pr_cont("TX_RING_OFLOW " ); |
3868 | if (cs & TX_CS_PREF_BUF_PAR_ERR) |
3869 | pr_cont("PREF_BUF_PAR " ); |
3870 | if (cs & TX_CS_NACK_PREF) |
3871 | pr_cont("NACK_PREF " ); |
3872 | if (cs & TX_CS_NACK_PKT_RD) |
3873 | pr_cont("NACK_PKT_RD " ); |
3874 | if (cs & TX_CS_CONF_PART_ERR) |
3875 | pr_cont("CONF_PART " ); |
3876 | if (cs & TX_CS_PKT_PRT_ERR) |
3877 | pr_cont("PKT_PTR " ); |
3878 | |
3879 | pr_cont(")\n" ); |
3880 | } |
3881 | |
3882 | static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) |
3883 | { |
3884 | u64 cs, logh, logl; |
3885 | |
3886 | cs = nr64(TX_CS(rp->tx_channel)); |
3887 | logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); |
3888 | logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); |
3889 | |
3890 | netdev_err(dev: np->dev, format: "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n" , |
3891 | rp->tx_channel, |
3892 | (unsigned long long)cs, |
3893 | (unsigned long long)logh, |
3894 | (unsigned long long)logl); |
3895 | |
3896 | niu_log_txchan_errors(np, rp, cs); |
3897 | |
3898 | return -ENODEV; |
3899 | } |
3900 | |
3901 | static int niu_mif_interrupt(struct niu *np) |
3902 | { |
3903 | u64 mif_status = nr64(MIF_STATUS); |
3904 | int phy_mdint = 0; |
3905 | |
3906 | if (np->flags & NIU_FLAGS_XMAC) { |
3907 | u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); |
3908 | |
3909 | if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) |
3910 | phy_mdint = 1; |
3911 | } |
3912 | |
3913 | netdev_err(dev: np->dev, format: "MIF interrupt, stat[%llx] phy_mdint(%d)\n" , |
3914 | (unsigned long long)mif_status, phy_mdint); |
3915 | |
3916 | return -ENODEV; |
3917 | } |
3918 | |
3919 | static void niu_xmac_interrupt(struct niu *np) |
3920 | { |
3921 | struct niu_xmac_stats *mp = &np->mac_stats.xmac; |
3922 | u64 val; |
3923 | |
3924 | val = nr64_mac(XTXMAC_STATUS); |
3925 | if (val & XTXMAC_STATUS_FRAME_CNT_EXP) |
3926 | mp->tx_frames += TXMAC_FRM_CNT_COUNT; |
3927 | if (val & XTXMAC_STATUS_BYTE_CNT_EXP) |
3928 | mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; |
3929 | if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) |
3930 | mp->tx_fifo_errors++; |
3931 | if (val & XTXMAC_STATUS_TXMAC_OFLOW) |
3932 | mp->tx_overflow_errors++; |
3933 | if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) |
3934 | mp->tx_max_pkt_size_errors++; |
3935 | if (val & XTXMAC_STATUS_TXMAC_UFLOW) |
3936 | mp->tx_underflow_errors++; |
3937 | |
3938 | val = nr64_mac(XRXMAC_STATUS); |
3939 | if (val & XRXMAC_STATUS_LCL_FLT_STATUS) |
3940 | mp->rx_local_faults++; |
3941 | if (val & XRXMAC_STATUS_RFLT_DET) |
3942 | mp->rx_remote_faults++; |
3943 | if (val & XRXMAC_STATUS_LFLT_CNT_EXP) |
3944 | mp->rx_link_faults += LINK_FAULT_CNT_COUNT; |
3945 | if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) |
3946 | mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; |
3947 | if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) |
3948 | mp->rx_frags += RXMAC_FRAG_CNT_COUNT; |
3949 | if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) |
3950 | mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; |
3951 | if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) |
3952 | mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; |
3953 | if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) |
3954 | mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; |
3955 | if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) |
3956 | mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; |
3957 | if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) |
3958 | mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; |
3959 | if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) |
3960 | mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; |
3961 | if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) |
3962 | mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; |
3963 | if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) |
3964 | mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; |
3965 | if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) |
3966 | mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; |
3967 | if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) |
3968 | mp->rx_octets += RXMAC_BT_CNT_COUNT; |
3969 | if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) |
3970 | mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; |
3971 | if (val & XRXMAC_STATUS_LENERR_CNT_EXP) |
3972 | mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; |
3973 | if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) |
3974 | mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; |
3975 | if (val & XRXMAC_STATUS_RXUFLOW) |
3976 | mp->rx_underflows++; |
3977 | if (val & XRXMAC_STATUS_RXOFLOW) |
3978 | mp->rx_overflows++; |
3979 | |
3980 | val = nr64_mac(XMAC_FC_STAT); |
3981 | if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) |
3982 | mp->pause_off_state++; |
3983 | if (val & XMAC_FC_STAT_TX_MAC_PAUSE) |
3984 | mp->pause_on_state++; |
3985 | if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) |
3986 | mp->pause_received++; |
3987 | } |
3988 | |
3989 | static void niu_bmac_interrupt(struct niu *np) |
3990 | { |
3991 | struct niu_bmac_stats *mp = &np->mac_stats.bmac; |
3992 | u64 val; |
3993 | |
3994 | val = nr64_mac(BTXMAC_STATUS); |
3995 | if (val & BTXMAC_STATUS_UNDERRUN) |
3996 | mp->tx_underflow_errors++; |
3997 | if (val & BTXMAC_STATUS_MAX_PKT_ERR) |
3998 | mp->tx_max_pkt_size_errors++; |
3999 | if (val & BTXMAC_STATUS_BYTE_CNT_EXP) |
4000 | mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; |
4001 | if (val & BTXMAC_STATUS_FRAME_CNT_EXP) |
4002 | mp->tx_frames += BTXMAC_FRM_CNT_COUNT; |
4003 | |
4004 | val = nr64_mac(BRXMAC_STATUS); |
4005 | if (val & BRXMAC_STATUS_OVERFLOW) |
4006 | mp->rx_overflows++; |
4007 | if (val & BRXMAC_STATUS_FRAME_CNT_EXP) |
4008 | mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; |
4009 | if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) |
4010 | mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; |
4011 | if (val & BRXMAC_STATUS_CRC_ERR_EXP) |
4012 | mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; |
4013 | if (val & BRXMAC_STATUS_LEN_ERR_EXP) |
4014 | mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; |
4015 | |
4016 | val = nr64_mac(BMAC_CTRL_STATUS); |
4017 | if (val & BMAC_CTRL_STATUS_NOPAUSE) |
4018 | mp->pause_off_state++; |
4019 | if (val & BMAC_CTRL_STATUS_PAUSE) |
4020 | mp->pause_on_state++; |
4021 | if (val & BMAC_CTRL_STATUS_PAUSE_RECV) |
4022 | mp->pause_received++; |
4023 | } |
4024 | |
4025 | static int niu_mac_interrupt(struct niu *np) |
4026 | { |
4027 | if (np->flags & NIU_FLAGS_XMAC) |
4028 | niu_xmac_interrupt(np); |
4029 | else |
4030 | niu_bmac_interrupt(np); |
4031 | |
4032 | return 0; |
4033 | } |
4034 | |
4035 | static void niu_log_device_error(struct niu *np, u64 stat) |
4036 | { |
4037 | netdev_err(dev: np->dev, format: "Core device errors ( " ); |
4038 | |
4039 | if (stat & SYS_ERR_MASK_META2) |
4040 | pr_cont("META2 " ); |
4041 | if (stat & SYS_ERR_MASK_META1) |
4042 | pr_cont("META1 " ); |
4043 | if (stat & SYS_ERR_MASK_PEU) |
4044 | pr_cont("PEU " ); |
4045 | if (stat & SYS_ERR_MASK_TXC) |
4046 | pr_cont("TXC " ); |
4047 | if (stat & SYS_ERR_MASK_RDMC) |
4048 | pr_cont("RDMC " ); |
4049 | if (stat & SYS_ERR_MASK_TDMC) |
4050 | pr_cont("TDMC " ); |
4051 | if (stat & SYS_ERR_MASK_ZCP) |
4052 | pr_cont("ZCP " ); |
4053 | if (stat & SYS_ERR_MASK_FFLP) |
4054 | pr_cont("FFLP " ); |
4055 | if (stat & SYS_ERR_MASK_IPP) |
4056 | pr_cont("IPP " ); |
4057 | if (stat & SYS_ERR_MASK_MAC) |
4058 | pr_cont("MAC " ); |
4059 | if (stat & SYS_ERR_MASK_SMX) |
4060 | pr_cont("SMX " ); |
4061 | |
4062 | pr_cont(")\n" ); |
4063 | } |
4064 | |
4065 | static int niu_device_error(struct niu *np) |
4066 | { |
4067 | u64 stat = nr64(SYS_ERR_STAT); |
4068 | |
4069 | netdev_err(dev: np->dev, format: "Core device error, stat[%llx]\n" , |
4070 | (unsigned long long)stat); |
4071 | |
4072 | niu_log_device_error(np, stat); |
4073 | |
4074 | return -ENODEV; |
4075 | } |
4076 | |
4077 | static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, |
4078 | u64 v0, u64 v1, u64 v2) |
4079 | { |
4080 | |
4081 | int i, err = 0; |
4082 | |
4083 | lp->v0 = v0; |
4084 | lp->v1 = v1; |
4085 | lp->v2 = v2; |
4086 | |
4087 | if (v1 & 0x00000000ffffffffULL) { |
4088 | u32 rx_vec = (v1 & 0xffffffff); |
4089 | |
4090 | for (i = 0; i < np->num_rx_rings; i++) { |
4091 | struct rx_ring_info *rp = &np->rx_rings[i]; |
4092 | |
4093 | if (rx_vec & (1 << rp->rx_channel)) { |
4094 | int r = niu_rx_error(np, rp); |
4095 | if (r) { |
4096 | err = r; |
4097 | } else { |
4098 | if (!v0) |
4099 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), |
4100 | RX_DMA_CTL_STAT_MEX); |
4101 | } |
4102 | } |
4103 | } |
4104 | } |
4105 | if (v1 & 0x7fffffff00000000ULL) { |
4106 | u32 tx_vec = (v1 >> 32) & 0x7fffffff; |
4107 | |
4108 | for (i = 0; i < np->num_tx_rings; i++) { |
4109 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4110 | |
4111 | if (tx_vec & (1 << rp->tx_channel)) { |
4112 | int r = niu_tx_error(np, rp); |
4113 | if (r) |
4114 | err = r; |
4115 | } |
4116 | } |
4117 | } |
4118 | if ((v0 | v1) & 0x8000000000000000ULL) { |
4119 | int r = niu_mif_interrupt(np); |
4120 | if (r) |
4121 | err = r; |
4122 | } |
4123 | if (v2) { |
4124 | if (v2 & 0x01ef) { |
4125 | int r = niu_mac_interrupt(np); |
4126 | if (r) |
4127 | err = r; |
4128 | } |
4129 | if (v2 & 0x0210) { |
4130 | int r = niu_device_error(np); |
4131 | if (r) |
4132 | err = r; |
4133 | } |
4134 | } |
4135 | |
4136 | if (err) |
4137 | niu_enable_interrupts(np, on: 0); |
4138 | |
4139 | return err; |
4140 | } |
4141 | |
4142 | static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, |
4143 | int ldn) |
4144 | { |
4145 | struct rxdma_mailbox *mbox = rp->mbox; |
4146 | u64 stat_write, stat = le64_to_cpup(p: &mbox->rx_dma_ctl_stat); |
4147 | |
4148 | stat_write = (RX_DMA_CTL_STAT_RCRTHRES | |
4149 | RX_DMA_CTL_STAT_RCRTO); |
4150 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); |
4151 | |
4152 | netif_printk(np, intr, KERN_DEBUG, np->dev, |
4153 | "%s() stat[%llx]\n" , __func__, (unsigned long long)stat); |
4154 | } |
4155 | |
4156 | static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, |
4157 | int ldn) |
4158 | { |
4159 | rp->tx_cs = nr64(TX_CS(rp->tx_channel)); |
4160 | |
4161 | netif_printk(np, intr, KERN_DEBUG, np->dev, |
4162 | "%s() cs[%llx]\n" , __func__, (unsigned long long)rp->tx_cs); |
4163 | } |
4164 | |
4165 | static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) |
4166 | { |
4167 | struct niu_parent *parent = np->parent; |
4168 | u32 rx_vec, tx_vec; |
4169 | int i; |
4170 | |
4171 | tx_vec = (v0 >> 32); |
4172 | rx_vec = (v0 & 0xffffffff); |
4173 | |
4174 | for (i = 0; i < np->num_rx_rings; i++) { |
4175 | struct rx_ring_info *rp = &np->rx_rings[i]; |
4176 | int ldn = LDN_RXDMA(rp->rx_channel); |
4177 | |
4178 | if (parent->ldg_map[ldn] != ldg) |
4179 | continue; |
4180 | |
4181 | nw64(LD_IM0(ldn), LD_IM0_MASK); |
4182 | if (rx_vec & (1 << rp->rx_channel)) |
4183 | niu_rxchan_intr(np, rp, ldn); |
4184 | } |
4185 | |
4186 | for (i = 0; i < np->num_tx_rings; i++) { |
4187 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4188 | int ldn = LDN_TXDMA(rp->tx_channel); |
4189 | |
4190 | if (parent->ldg_map[ldn] != ldg) |
4191 | continue; |
4192 | |
4193 | nw64(LD_IM0(ldn), LD_IM0_MASK); |
4194 | if (tx_vec & (1 << rp->tx_channel)) |
4195 | niu_txchan_intr(np, rp, ldn); |
4196 | } |
4197 | } |
4198 | |
4199 | static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, |
4200 | u64 v0, u64 v1, u64 v2) |
4201 | { |
4202 | if (likely(napi_schedule_prep(&lp->napi))) { |
4203 | lp->v0 = v0; |
4204 | lp->v1 = v1; |
4205 | lp->v2 = v2; |
4206 | __niu_fastpath_interrupt(np, ldg: lp->ldg_num, v0); |
4207 | __napi_schedule(n: &lp->napi); |
4208 | } |
4209 | } |
4210 | |
4211 | static irqreturn_t niu_interrupt(int irq, void *dev_id) |
4212 | { |
4213 | struct niu_ldg *lp = dev_id; |
4214 | struct niu *np = lp->np; |
4215 | int ldg = lp->ldg_num; |
4216 | unsigned long flags; |
4217 | u64 v0, v1, v2; |
4218 | |
4219 | if (netif_msg_intr(np)) |
4220 | printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)" , |
4221 | __func__, lp, ldg); |
4222 | |
4223 | spin_lock_irqsave(&np->lock, flags); |
4224 | |
4225 | v0 = nr64(LDSV0(ldg)); |
4226 | v1 = nr64(LDSV1(ldg)); |
4227 | v2 = nr64(LDSV2(ldg)); |
4228 | |
4229 | if (netif_msg_intr(np)) |
4230 | pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n" , |
4231 | (unsigned long long) v0, |
4232 | (unsigned long long) v1, |
4233 | (unsigned long long) v2); |
4234 | |
4235 | if (unlikely(!v0 && !v1 && !v2)) { |
4236 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4237 | return IRQ_NONE; |
4238 | } |
4239 | |
4240 | if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { |
4241 | int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); |
4242 | if (err) |
4243 | goto out; |
4244 | } |
4245 | if (likely(v0 & ~((u64)1 << LDN_MIF))) |
4246 | niu_schedule_napi(np, lp, v0, v1, v2); |
4247 | else |
4248 | niu_ldg_rearm(np, lp, on: 1); |
4249 | out: |
4250 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4251 | |
4252 | return IRQ_HANDLED; |
4253 | } |
4254 | |
4255 | static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) |
4256 | { |
4257 | if (rp->mbox) { |
4258 | np->ops->free_coherent(np->device, |
4259 | sizeof(struct rxdma_mailbox), |
4260 | rp->mbox, rp->mbox_dma); |
4261 | rp->mbox = NULL; |
4262 | } |
4263 | if (rp->rcr) { |
4264 | np->ops->free_coherent(np->device, |
4265 | MAX_RCR_RING_SIZE * sizeof(__le64), |
4266 | rp->rcr, rp->rcr_dma); |
4267 | rp->rcr = NULL; |
4268 | rp->rcr_table_size = 0; |
4269 | rp->rcr_index = 0; |
4270 | } |
4271 | if (rp->rbr) { |
4272 | niu_rbr_free(np, rp); |
4273 | |
4274 | np->ops->free_coherent(np->device, |
4275 | MAX_RBR_RING_SIZE * sizeof(__le32), |
4276 | rp->rbr, rp->rbr_dma); |
4277 | rp->rbr = NULL; |
4278 | rp->rbr_table_size = 0; |
4279 | rp->rbr_index = 0; |
4280 | } |
4281 | kfree(objp: rp->rxhash); |
4282 | rp->rxhash = NULL; |
4283 | } |
4284 | |
4285 | static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) |
4286 | { |
4287 | if (rp->mbox) { |
4288 | np->ops->free_coherent(np->device, |
4289 | sizeof(struct txdma_mailbox), |
4290 | rp->mbox, rp->mbox_dma); |
4291 | rp->mbox = NULL; |
4292 | } |
4293 | if (rp->descr) { |
4294 | int i; |
4295 | |
4296 | for (i = 0; i < MAX_TX_RING_SIZE; i++) { |
4297 | if (rp->tx_buffs[i].skb) |
4298 | (void) release_tx_packet(np, rp, idx: i); |
4299 | } |
4300 | |
4301 | np->ops->free_coherent(np->device, |
4302 | MAX_TX_RING_SIZE * sizeof(__le64), |
4303 | rp->descr, rp->descr_dma); |
4304 | rp->descr = NULL; |
4305 | rp->pending = 0; |
4306 | rp->prod = 0; |
4307 | rp->cons = 0; |
4308 | rp->wrap_bit = 0; |
4309 | } |
4310 | } |
4311 | |
4312 | static void niu_free_channels(struct niu *np) |
4313 | { |
4314 | int i; |
4315 | |
4316 | if (np->rx_rings) { |
4317 | for (i = 0; i < np->num_rx_rings; i++) { |
4318 | struct rx_ring_info *rp = &np->rx_rings[i]; |
4319 | |
4320 | niu_free_rx_ring_info(np, rp); |
4321 | } |
4322 | kfree(objp: np->rx_rings); |
4323 | np->rx_rings = NULL; |
4324 | np->num_rx_rings = 0; |
4325 | } |
4326 | |
4327 | if (np->tx_rings) { |
4328 | for (i = 0; i < np->num_tx_rings; i++) { |
4329 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4330 | |
4331 | niu_free_tx_ring_info(np, rp); |
4332 | } |
4333 | kfree(objp: np->tx_rings); |
4334 | np->tx_rings = NULL; |
4335 | np->num_tx_rings = 0; |
4336 | } |
4337 | } |
4338 | |
4339 | static int niu_alloc_rx_ring_info(struct niu *np, |
4340 | struct rx_ring_info *rp) |
4341 | { |
4342 | BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); |
4343 | |
4344 | rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, size: sizeof(struct page *), |
4345 | GFP_KERNEL); |
4346 | if (!rp->rxhash) |
4347 | return -ENOMEM; |
4348 | |
4349 | rp->mbox = np->ops->alloc_coherent(np->device, |
4350 | sizeof(struct rxdma_mailbox), |
4351 | &rp->mbox_dma, GFP_KERNEL); |
4352 | if (!rp->mbox) |
4353 | return -ENOMEM; |
4354 | if ((unsigned long)rp->mbox & (64UL - 1)) { |
4355 | netdev_err(dev: np->dev, format: "Coherent alloc gives misaligned RXDMA mailbox %p\n" , |
4356 | rp->mbox); |
4357 | return -EINVAL; |
4358 | } |
4359 | |
4360 | rp->rcr = np->ops->alloc_coherent(np->device, |
4361 | MAX_RCR_RING_SIZE * sizeof(__le64), |
4362 | &rp->rcr_dma, GFP_KERNEL); |
4363 | if (!rp->rcr) |
4364 | return -ENOMEM; |
4365 | if ((unsigned long)rp->rcr & (64UL - 1)) { |
4366 | netdev_err(dev: np->dev, format: "Coherent alloc gives misaligned RXDMA RCR table %p\n" , |
4367 | rp->rcr); |
4368 | return -EINVAL; |
4369 | } |
4370 | rp->rcr_table_size = MAX_RCR_RING_SIZE; |
4371 | rp->rcr_index = 0; |
4372 | |
4373 | rp->rbr = np->ops->alloc_coherent(np->device, |
4374 | MAX_RBR_RING_SIZE * sizeof(__le32), |
4375 | &rp->rbr_dma, GFP_KERNEL); |
4376 | if (!rp->rbr) |
4377 | return -ENOMEM; |
4378 | if ((unsigned long)rp->rbr & (64UL - 1)) { |
4379 | netdev_err(dev: np->dev, format: "Coherent alloc gives misaligned RXDMA RBR table %p\n" , |
4380 | rp->rbr); |
4381 | return -EINVAL; |
4382 | } |
4383 | rp->rbr_table_size = MAX_RBR_RING_SIZE; |
4384 | rp->rbr_index = 0; |
4385 | rp->rbr_pending = 0; |
4386 | |
4387 | return 0; |
4388 | } |
4389 | |
4390 | static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) |
4391 | { |
4392 | int mtu = np->dev->mtu; |
4393 | |
4394 | /* These values are recommended by the HW designers for fair |
4395 | * utilization of DRR amongst the rings. |
4396 | */ |
4397 | rp->max_burst = mtu + 32; |
4398 | if (rp->max_burst > 4096) |
4399 | rp->max_burst = 4096; |
4400 | } |
4401 | |
4402 | static int niu_alloc_tx_ring_info(struct niu *np, |
4403 | struct tx_ring_info *rp) |
4404 | { |
4405 | BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); |
4406 | |
4407 | rp->mbox = np->ops->alloc_coherent(np->device, |
4408 | sizeof(struct txdma_mailbox), |
4409 | &rp->mbox_dma, GFP_KERNEL); |
4410 | if (!rp->mbox) |
4411 | return -ENOMEM; |
4412 | if ((unsigned long)rp->mbox & (64UL - 1)) { |
4413 | netdev_err(dev: np->dev, format: "Coherent alloc gives misaligned TXDMA mailbox %p\n" , |
4414 | rp->mbox); |
4415 | return -EINVAL; |
4416 | } |
4417 | |
4418 | rp->descr = np->ops->alloc_coherent(np->device, |
4419 | MAX_TX_RING_SIZE * sizeof(__le64), |
4420 | &rp->descr_dma, GFP_KERNEL); |
4421 | if (!rp->descr) |
4422 | return -ENOMEM; |
4423 | if ((unsigned long)rp->descr & (64UL - 1)) { |
4424 | netdev_err(dev: np->dev, format: "Coherent alloc gives misaligned TXDMA descr table %p\n" , |
4425 | rp->descr); |
4426 | return -EINVAL; |
4427 | } |
4428 | |
4429 | rp->pending = MAX_TX_RING_SIZE; |
4430 | rp->prod = 0; |
4431 | rp->cons = 0; |
4432 | rp->wrap_bit = 0; |
4433 | |
4434 | /* XXX make these configurable... XXX */ |
4435 | rp->mark_freq = rp->pending / 4; |
4436 | |
4437 | niu_set_max_burst(np, rp); |
4438 | |
4439 | return 0; |
4440 | } |
4441 | |
4442 | static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) |
4443 | { |
4444 | u16 bss; |
4445 | |
4446 | bss = min(PAGE_SHIFT, 15); |
4447 | |
4448 | rp->rbr_block_size = 1 << bss; |
4449 | rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); |
4450 | |
4451 | rp->rbr_sizes[0] = 256; |
4452 | rp->rbr_sizes[1] = 1024; |
4453 | if (np->dev->mtu > ETH_DATA_LEN) { |
4454 | switch (PAGE_SIZE) { |
4455 | case 4 * 1024: |
4456 | rp->rbr_sizes[2] = 4096; |
4457 | break; |
4458 | |
4459 | default: |
4460 | rp->rbr_sizes[2] = 8192; |
4461 | break; |
4462 | } |
4463 | } else { |
4464 | rp->rbr_sizes[2] = 2048; |
4465 | } |
4466 | rp->rbr_sizes[3] = rp->rbr_block_size; |
4467 | } |
4468 | |
4469 | static int niu_alloc_channels(struct niu *np) |
4470 | { |
4471 | struct niu_parent *parent = np->parent; |
4472 | int first_rx_channel, first_tx_channel; |
4473 | int num_rx_rings, num_tx_rings; |
4474 | struct rx_ring_info *rx_rings; |
4475 | struct tx_ring_info *tx_rings; |
4476 | int i, port, err; |
4477 | |
4478 | port = np->port; |
4479 | first_rx_channel = first_tx_channel = 0; |
4480 | for (i = 0; i < port; i++) { |
4481 | first_rx_channel += parent->rxchan_per_port[i]; |
4482 | first_tx_channel += parent->txchan_per_port[i]; |
4483 | } |
4484 | |
4485 | num_rx_rings = parent->rxchan_per_port[port]; |
4486 | num_tx_rings = parent->txchan_per_port[port]; |
4487 | |
4488 | rx_rings = kcalloc(n: num_rx_rings, size: sizeof(struct rx_ring_info), |
4489 | GFP_KERNEL); |
4490 | err = -ENOMEM; |
4491 | if (!rx_rings) |
4492 | goto out_err; |
4493 | |
4494 | np->num_rx_rings = num_rx_rings; |
4495 | smp_wmb(); |
4496 | np->rx_rings = rx_rings; |
4497 | |
4498 | netif_set_real_num_rx_queues(dev: np->dev, rxq: num_rx_rings); |
4499 | |
4500 | for (i = 0; i < np->num_rx_rings; i++) { |
4501 | struct rx_ring_info *rp = &np->rx_rings[i]; |
4502 | |
4503 | rp->np = np; |
4504 | rp->rx_channel = first_rx_channel + i; |
4505 | |
4506 | err = niu_alloc_rx_ring_info(np, rp); |
4507 | if (err) |
4508 | goto out_err; |
4509 | |
4510 | niu_size_rbr(np, rp); |
4511 | |
4512 | /* XXX better defaults, configurable, etc... XXX */ |
4513 | rp->nonsyn_window = 64; |
4514 | rp->nonsyn_threshold = rp->rcr_table_size - 64; |
4515 | rp->syn_window = 64; |
4516 | rp->syn_threshold = rp->rcr_table_size - 64; |
4517 | rp->rcr_pkt_threshold = 16; |
4518 | rp->rcr_timeout = 8; |
4519 | rp->rbr_kick_thresh = RBR_REFILL_MIN; |
4520 | if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) |
4521 | rp->rbr_kick_thresh = rp->rbr_blocks_per_page; |
4522 | |
4523 | err = niu_rbr_fill(np, rp, GFP_KERNEL); |
4524 | if (err) |
4525 | goto out_err; |
4526 | } |
4527 | |
4528 | tx_rings = kcalloc(n: num_tx_rings, size: sizeof(struct tx_ring_info), |
4529 | GFP_KERNEL); |
4530 | err = -ENOMEM; |
4531 | if (!tx_rings) |
4532 | goto out_err; |
4533 | |
4534 | np->num_tx_rings = num_tx_rings; |
4535 | smp_wmb(); |
4536 | np->tx_rings = tx_rings; |
4537 | |
4538 | netif_set_real_num_tx_queues(dev: np->dev, txq: num_tx_rings); |
4539 | |
4540 | for (i = 0; i < np->num_tx_rings; i++) { |
4541 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4542 | |
4543 | rp->np = np; |
4544 | rp->tx_channel = first_tx_channel + i; |
4545 | |
4546 | err = niu_alloc_tx_ring_info(np, rp); |
4547 | if (err) |
4548 | goto out_err; |
4549 | } |
4550 | |
4551 | return 0; |
4552 | |
4553 | out_err: |
4554 | niu_free_channels(np); |
4555 | return err; |
4556 | } |
4557 | |
4558 | static int niu_tx_cs_sng_poll(struct niu *np, int channel) |
4559 | { |
4560 | int limit = 1000; |
4561 | |
4562 | while (--limit > 0) { |
4563 | u64 val = nr64(TX_CS(channel)); |
4564 | if (val & TX_CS_SNG_STATE) |
4565 | return 0; |
4566 | } |
4567 | return -ENODEV; |
4568 | } |
4569 | |
4570 | static int niu_tx_channel_stop(struct niu *np, int channel) |
4571 | { |
4572 | u64 val = nr64(TX_CS(channel)); |
4573 | |
4574 | val |= TX_CS_STOP_N_GO; |
4575 | nw64(TX_CS(channel), val); |
4576 | |
4577 | return niu_tx_cs_sng_poll(np, channel); |
4578 | } |
4579 | |
4580 | static int niu_tx_cs_reset_poll(struct niu *np, int channel) |
4581 | { |
4582 | int limit = 1000; |
4583 | |
4584 | while (--limit > 0) { |
4585 | u64 val = nr64(TX_CS(channel)); |
4586 | if (!(val & TX_CS_RST)) |
4587 | return 0; |
4588 | } |
4589 | return -ENODEV; |
4590 | } |
4591 | |
4592 | static int niu_tx_channel_reset(struct niu *np, int channel) |
4593 | { |
4594 | u64 val = nr64(TX_CS(channel)); |
4595 | int err; |
4596 | |
4597 | val |= TX_CS_RST; |
4598 | nw64(TX_CS(channel), val); |
4599 | |
4600 | err = niu_tx_cs_reset_poll(np, channel); |
4601 | if (!err) |
4602 | nw64(TX_RING_KICK(channel), 0); |
4603 | |
4604 | return err; |
4605 | } |
4606 | |
4607 | static int niu_tx_channel_lpage_init(struct niu *np, int channel) |
4608 | { |
4609 | u64 val; |
4610 | |
4611 | nw64(TX_LOG_MASK1(channel), 0); |
4612 | nw64(TX_LOG_VAL1(channel), 0); |
4613 | nw64(TX_LOG_MASK2(channel), 0); |
4614 | nw64(TX_LOG_VAL2(channel), 0); |
4615 | nw64(TX_LOG_PAGE_RELO1(channel), 0); |
4616 | nw64(TX_LOG_PAGE_RELO2(channel), 0); |
4617 | nw64(TX_LOG_PAGE_HDL(channel), 0); |
4618 | |
4619 | val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; |
4620 | val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); |
4621 | nw64(TX_LOG_PAGE_VLD(channel), val); |
4622 | |
4623 | /* XXX TXDMA 32bit mode? XXX */ |
4624 | |
4625 | return 0; |
4626 | } |
4627 | |
4628 | static void niu_txc_enable_port(struct niu *np, int on) |
4629 | { |
4630 | unsigned long flags; |
4631 | u64 val, mask; |
4632 | |
4633 | niu_lock_parent(np, flags); |
4634 | val = nr64(TXC_CONTROL); |
4635 | mask = (u64)1 << np->port; |
4636 | if (on) { |
4637 | val |= TXC_CONTROL_ENABLE | mask; |
4638 | } else { |
4639 | val &= ~mask; |
4640 | if ((val & ~TXC_CONTROL_ENABLE) == 0) |
4641 | val &= ~TXC_CONTROL_ENABLE; |
4642 | } |
4643 | nw64(TXC_CONTROL, val); |
4644 | niu_unlock_parent(np, flags); |
4645 | } |
4646 | |
4647 | static void niu_txc_set_imask(struct niu *np, u64 imask) |
4648 | { |
4649 | unsigned long flags; |
4650 | u64 val; |
4651 | |
4652 | niu_lock_parent(np, flags); |
4653 | val = nr64(TXC_INT_MASK); |
4654 | val &= ~TXC_INT_MASK_VAL(np->port); |
4655 | val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); |
4656 | niu_unlock_parent(np, flags); |
4657 | } |
4658 | |
4659 | static void niu_txc_port_dma_enable(struct niu *np, int on) |
4660 | { |
4661 | u64 val = 0; |
4662 | |
4663 | if (on) { |
4664 | int i; |
4665 | |
4666 | for (i = 0; i < np->num_tx_rings; i++) |
4667 | val |= (1 << np->tx_rings[i].tx_channel); |
4668 | } |
4669 | nw64(TXC_PORT_DMA(np->port), val); |
4670 | } |
4671 | |
4672 | static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) |
4673 | { |
4674 | int err, channel = rp->tx_channel; |
4675 | u64 val, ring_len; |
4676 | |
4677 | err = niu_tx_channel_stop(np, channel); |
4678 | if (err) |
4679 | return err; |
4680 | |
4681 | err = niu_tx_channel_reset(np, channel); |
4682 | if (err) |
4683 | return err; |
4684 | |
4685 | err = niu_tx_channel_lpage_init(np, channel); |
4686 | if (err) |
4687 | return err; |
4688 | |
4689 | nw64(TXC_DMA_MAX(channel), rp->max_burst); |
4690 | nw64(TX_ENT_MSK(channel), 0); |
4691 | |
4692 | if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | |
4693 | TX_RNG_CFIG_STADDR)) { |
4694 | netdev_err(dev: np->dev, format: "TX ring channel %d DMA addr (%llx) is not aligned\n" , |
4695 | channel, (unsigned long long)rp->descr_dma); |
4696 | return -EINVAL; |
4697 | } |
4698 | |
4699 | /* The length field in TX_RNG_CFIG is measured in 64-byte |
4700 | * blocks. rp->pending is the number of TX descriptors in |
4701 | * our ring, 8 bytes each, thus we divide by 8 bytes more |
4702 | * to get the proper value the chip wants. |
4703 | */ |
4704 | ring_len = (rp->pending / 8); |
4705 | |
4706 | val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | |
4707 | rp->descr_dma); |
4708 | nw64(TX_RNG_CFIG(channel), val); |
4709 | |
4710 | if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || |
4711 | ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { |
4712 | netdev_err(dev: np->dev, format: "TX ring channel %d MBOX addr (%llx) has invalid bits\n" , |
4713 | channel, (unsigned long long)rp->mbox_dma); |
4714 | return -EINVAL; |
4715 | } |
4716 | nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); |
4717 | nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); |
4718 | |
4719 | nw64(TX_CS(channel), 0); |
4720 | |
4721 | rp->last_pkt_cnt = 0; |
4722 | |
4723 | return 0; |
4724 | } |
4725 | |
4726 | static void niu_init_rdc_groups(struct niu *np) |
4727 | { |
4728 | struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; |
4729 | int i, first_table_num = tp->first_table_num; |
4730 | |
4731 | for (i = 0; i < tp->num_tables; i++) { |
4732 | struct rdc_table *tbl = &tp->tables[i]; |
4733 | int this_table = first_table_num + i; |
4734 | int slot; |
4735 | |
4736 | for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) |
4737 | nw64(RDC_TBL(this_table, slot), |
4738 | tbl->rxdma_channel[slot]); |
4739 | } |
4740 | |
4741 | nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); |
4742 | } |
4743 | |
4744 | static void niu_init_drr_weight(struct niu *np) |
4745 | { |
4746 | int type = phy_decode(val: np->parent->port_phy, port: np->port); |
4747 | u64 val; |
4748 | |
4749 | switch (type) { |
4750 | case PORT_TYPE_10G: |
4751 | val = PT_DRR_WEIGHT_DEFAULT_10G; |
4752 | break; |
4753 | |
4754 | case PORT_TYPE_1G: |
4755 | default: |
4756 | val = PT_DRR_WEIGHT_DEFAULT_1G; |
4757 | break; |
4758 | } |
4759 | nw64(PT_DRR_WT(np->port), val); |
4760 | } |
4761 | |
4762 | static int niu_init_hostinfo(struct niu *np) |
4763 | { |
4764 | struct niu_parent *parent = np->parent; |
4765 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; |
4766 | int i, err, num_alt = niu_num_alt_addr(np); |
4767 | int first_rdc_table = tp->first_table_num; |
4768 | |
4769 | err = niu_set_primary_mac_rdc_table(np, table_num: first_rdc_table, mac_pref: 1); |
4770 | if (err) |
4771 | return err; |
4772 | |
4773 | err = niu_set_multicast_mac_rdc_table(np, table_num: first_rdc_table, mac_pref: 1); |
4774 | if (err) |
4775 | return err; |
4776 | |
4777 | for (i = 0; i < num_alt; i++) { |
4778 | err = niu_set_alt_mac_rdc_table(np, idx: i, table_num: first_rdc_table, mac_pref: 1); |
4779 | if (err) |
4780 | return err; |
4781 | } |
4782 | |
4783 | return 0; |
4784 | } |
4785 | |
4786 | static int niu_rx_channel_reset(struct niu *np, int channel) |
4787 | { |
4788 | return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), |
4789 | RXDMA_CFIG1_RST, 1000, 10, |
4790 | "RXDMA_CFIG1" ); |
4791 | } |
4792 | |
4793 | static int niu_rx_channel_lpage_init(struct niu *np, int channel) |
4794 | { |
4795 | u64 val; |
4796 | |
4797 | nw64(RX_LOG_MASK1(channel), 0); |
4798 | nw64(RX_LOG_VAL1(channel), 0); |
4799 | nw64(RX_LOG_MASK2(channel), 0); |
4800 | nw64(RX_LOG_VAL2(channel), 0); |
4801 | nw64(RX_LOG_PAGE_RELO1(channel), 0); |
4802 | nw64(RX_LOG_PAGE_RELO2(channel), 0); |
4803 | nw64(RX_LOG_PAGE_HDL(channel), 0); |
4804 | |
4805 | val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; |
4806 | val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); |
4807 | nw64(RX_LOG_PAGE_VLD(channel), val); |
4808 | |
4809 | return 0; |
4810 | } |
4811 | |
4812 | static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) |
4813 | { |
4814 | u64 val; |
4815 | |
4816 | val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | |
4817 | ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | |
4818 | ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | |
4819 | ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); |
4820 | nw64(RDC_RED_PARA(rp->rx_channel), val); |
4821 | } |
4822 | |
4823 | static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) |
4824 | { |
4825 | u64 val = 0; |
4826 | |
4827 | *ret = 0; |
4828 | switch (rp->rbr_block_size) { |
4829 | case 4 * 1024: |
4830 | val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); |
4831 | break; |
4832 | case 8 * 1024: |
4833 | val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); |
4834 | break; |
4835 | case 16 * 1024: |
4836 | val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); |
4837 | break; |
4838 | case 32 * 1024: |
4839 | val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); |
4840 | break; |
4841 | default: |
4842 | return -EINVAL; |
4843 | } |
4844 | val |= RBR_CFIG_B_VLD2; |
4845 | switch (rp->rbr_sizes[2]) { |
4846 | case 2 * 1024: |
4847 | val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); |
4848 | break; |
4849 | case 4 * 1024: |
4850 | val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); |
4851 | break; |
4852 | case 8 * 1024: |
4853 | val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); |
4854 | break; |
4855 | case 16 * 1024: |
4856 | val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); |
4857 | break; |
4858 | |
4859 | default: |
4860 | return -EINVAL; |
4861 | } |
4862 | val |= RBR_CFIG_B_VLD1; |
4863 | switch (rp->rbr_sizes[1]) { |
4864 | case 1 * 1024: |
4865 | val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); |
4866 | break; |
4867 | case 2 * 1024: |
4868 | val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); |
4869 | break; |
4870 | case 4 * 1024: |
4871 | val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); |
4872 | break; |
4873 | case 8 * 1024: |
4874 | val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); |
4875 | break; |
4876 | |
4877 | default: |
4878 | return -EINVAL; |
4879 | } |
4880 | val |= RBR_CFIG_B_VLD0; |
4881 | switch (rp->rbr_sizes[0]) { |
4882 | case 256: |
4883 | val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); |
4884 | break; |
4885 | case 512: |
4886 | val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); |
4887 | break; |
4888 | case 1 * 1024: |
4889 | val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); |
4890 | break; |
4891 | case 2 * 1024: |
4892 | val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); |
4893 | break; |
4894 | |
4895 | default: |
4896 | return -EINVAL; |
4897 | } |
4898 | |
4899 | *ret = val; |
4900 | return 0; |
4901 | } |
4902 | |
4903 | static int niu_enable_rx_channel(struct niu *np, int channel, int on) |
4904 | { |
4905 | u64 val = nr64(RXDMA_CFIG1(channel)); |
4906 | int limit; |
4907 | |
4908 | if (on) |
4909 | val |= RXDMA_CFIG1_EN; |
4910 | else |
4911 | val &= ~RXDMA_CFIG1_EN; |
4912 | nw64(RXDMA_CFIG1(channel), val); |
4913 | |
4914 | limit = 1000; |
4915 | while (--limit > 0) { |
4916 | if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) |
4917 | break; |
4918 | udelay(10); |
4919 | } |
4920 | if (limit <= 0) |
4921 | return -ENODEV; |
4922 | return 0; |
4923 | } |
4924 | |
4925 | static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) |
4926 | { |
4927 | int err, channel = rp->rx_channel; |
4928 | u64 val; |
4929 | |
4930 | err = niu_rx_channel_reset(np, channel); |
4931 | if (err) |
4932 | return err; |
4933 | |
4934 | err = niu_rx_channel_lpage_init(np, channel); |
4935 | if (err) |
4936 | return err; |
4937 | |
4938 | niu_rx_channel_wred_init(np, rp); |
4939 | |
4940 | nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); |
4941 | nw64(RX_DMA_CTL_STAT(channel), |
4942 | (RX_DMA_CTL_STAT_MEX | |
4943 | RX_DMA_CTL_STAT_RCRTHRES | |
4944 | RX_DMA_CTL_STAT_RCRTO | |
4945 | RX_DMA_CTL_STAT_RBR_EMPTY)); |
4946 | nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); |
4947 | nw64(RXDMA_CFIG2(channel), |
4948 | ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | |
4949 | RXDMA_CFIG2_FULL_HDR)); |
4950 | nw64(RBR_CFIG_A(channel), |
4951 | ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | |
4952 | (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); |
4953 | err = niu_compute_rbr_cfig_b(rp, ret: &val); |
4954 | if (err) |
4955 | return err; |
4956 | nw64(RBR_CFIG_B(channel), val); |
4957 | nw64(RCRCFIG_A(channel), |
4958 | ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | |
4959 | (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); |
4960 | nw64(RCRCFIG_B(channel), |
4961 | ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | |
4962 | RCRCFIG_B_ENTOUT | |
4963 | ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); |
4964 | |
4965 | err = niu_enable_rx_channel(np, channel, on: 1); |
4966 | if (err) |
4967 | return err; |
4968 | |
4969 | nw64(RBR_KICK(channel), rp->rbr_index); |
4970 | |
4971 | val = nr64(RX_DMA_CTL_STAT(channel)); |
4972 | val |= RX_DMA_CTL_STAT_RBR_EMPTY; |
4973 | nw64(RX_DMA_CTL_STAT(channel), val); |
4974 | |
4975 | return 0; |
4976 | } |
4977 | |
4978 | static int niu_init_rx_channels(struct niu *np) |
4979 | { |
4980 | unsigned long flags; |
4981 | u64 seed = jiffies_64; |
4982 | int err, i; |
4983 | |
4984 | niu_lock_parent(np, flags); |
4985 | nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); |
4986 | nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); |
4987 | niu_unlock_parent(np, flags); |
4988 | |
4989 | /* XXX RXDMA 32bit mode? XXX */ |
4990 | |
4991 | niu_init_rdc_groups(np); |
4992 | niu_init_drr_weight(np); |
4993 | |
4994 | err = niu_init_hostinfo(np); |
4995 | if (err) |
4996 | return err; |
4997 | |
4998 | for (i = 0; i < np->num_rx_rings; i++) { |
4999 | struct rx_ring_info *rp = &np->rx_rings[i]; |
5000 | |
5001 | err = niu_init_one_rx_channel(np, rp); |
5002 | if (err) |
5003 | return err; |
5004 | } |
5005 | |
5006 | return 0; |
5007 | } |
5008 | |
5009 | static int niu_set_ip_frag_rule(struct niu *np) |
5010 | { |
5011 | struct niu_parent *parent = np->parent; |
5012 | struct niu_classifier *cp = &np->clas; |
5013 | struct niu_tcam_entry *tp; |
5014 | int index, err; |
5015 | |
5016 | index = cp->tcam_top; |
5017 | tp = &parent->tcam[index]; |
5018 | |
5019 | /* Note that the noport bit is the same in both ipv4 and |
5020 | * ipv6 format TCAM entries. |
5021 | */ |
5022 | memset(tp, 0, sizeof(*tp)); |
5023 | tp->key[1] = TCAM_V4KEY1_NOPORT; |
5024 | tp->key_mask[1] = TCAM_V4KEY1_NOPORT; |
5025 | tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | |
5026 | ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); |
5027 | err = tcam_write(np, index, key: tp->key, mask: tp->key_mask); |
5028 | if (err) |
5029 | return err; |
5030 | err = tcam_assoc_write(np, index, assoc_data: tp->assoc_data); |
5031 | if (err) |
5032 | return err; |
5033 | tp->valid = 1; |
5034 | cp->tcam_valid_entries++; |
5035 | |
5036 | return 0; |
5037 | } |
5038 | |
5039 | static int niu_init_classifier_hw(struct niu *np) |
5040 | { |
5041 | struct niu_parent *parent = np->parent; |
5042 | struct niu_classifier *cp = &np->clas; |
5043 | int i, err; |
5044 | |
5045 | nw64(H1POLY, cp->h1_init); |
5046 | nw64(H2POLY, cp->h2_init); |
5047 | |
5048 | err = niu_init_hostinfo(np); |
5049 | if (err) |
5050 | return err; |
5051 | |
5052 | for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { |
5053 | struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; |
5054 | |
5055 | vlan_tbl_write(np, index: i, port: np->port, |
5056 | vpr: vp->vlan_pref, rdc_table: vp->rdc_num); |
5057 | } |
5058 | |
5059 | for (i = 0; i < cp->num_alt_mac_mappings; i++) { |
5060 | struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; |
5061 | |
5062 | err = niu_set_alt_mac_rdc_table(np, idx: ap->alt_mac_num, |
5063 | table_num: ap->rdc_num, mac_pref: ap->mac_pref); |
5064 | if (err) |
5065 | return err; |
5066 | } |
5067 | |
5068 | for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { |
5069 | int index = i - CLASS_CODE_USER_PROG1; |
5070 | |
5071 | err = niu_set_tcam_key(np, class_code: i, key: parent->tcam_key[index]); |
5072 | if (err) |
5073 | return err; |
5074 | err = niu_set_flow_key(np, class_code: i, key: parent->flow_key[index]); |
5075 | if (err) |
5076 | return err; |
5077 | } |
5078 | |
5079 | err = niu_set_ip_frag_rule(np); |
5080 | if (err) |
5081 | return err; |
5082 | |
5083 | tcam_enable(np, on: 1); |
5084 | |
5085 | return 0; |
5086 | } |
5087 | |
5088 | static int niu_zcp_write(struct niu *np, int index, u64 *data) |
5089 | { |
5090 | nw64(ZCP_RAM_DATA0, data[0]); |
5091 | nw64(ZCP_RAM_DATA1, data[1]); |
5092 | nw64(ZCP_RAM_DATA2, data[2]); |
5093 | nw64(ZCP_RAM_DATA3, data[3]); |
5094 | nw64(ZCP_RAM_DATA4, data[4]); |
5095 | nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); |
5096 | nw64(ZCP_RAM_ACC, |
5097 | (ZCP_RAM_ACC_WRITE | |
5098 | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | |
5099 | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); |
5100 | |
5101 | return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, |
5102 | 1000, 100); |
5103 | } |
5104 | |
5105 | static int niu_zcp_read(struct niu *np, int index, u64 *data) |
5106 | { |
5107 | int err; |
5108 | |
5109 | err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, |
5110 | 1000, 100); |
5111 | if (err) { |
5112 | netdev_err(dev: np->dev, format: "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n" , |
5113 | (unsigned long long)nr64(ZCP_RAM_ACC)); |
5114 | return err; |
5115 | } |
5116 | |
5117 | nw64(ZCP_RAM_ACC, |
5118 | (ZCP_RAM_ACC_READ | |
5119 | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | |
5120 | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); |
5121 | |
5122 | err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, |
5123 | 1000, 100); |
5124 | if (err) { |
5125 | netdev_err(dev: np->dev, format: "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n" , |
5126 | (unsigned long long)nr64(ZCP_RAM_ACC)); |
5127 | return err; |
5128 | } |
5129 | |
5130 | data[0] = nr64(ZCP_RAM_DATA0); |
5131 | data[1] = nr64(ZCP_RAM_DATA1); |
5132 | data[2] = nr64(ZCP_RAM_DATA2); |
5133 | data[3] = nr64(ZCP_RAM_DATA3); |
5134 | data[4] = nr64(ZCP_RAM_DATA4); |
5135 | |
5136 | return 0; |
5137 | } |
5138 | |
5139 | static void niu_zcp_cfifo_reset(struct niu *np) |
5140 | { |
5141 | u64 val = nr64(RESET_CFIFO); |
5142 | |
5143 | val |= RESET_CFIFO_RST(np->port); |
5144 | nw64(RESET_CFIFO, val); |
5145 | udelay(10); |
5146 | |
5147 | val &= ~RESET_CFIFO_RST(np->port); |
5148 | nw64(RESET_CFIFO, val); |
5149 | } |
5150 | |
5151 | static int niu_init_zcp(struct niu *np) |
5152 | { |
5153 | u64 data[5], rbuf[5]; |
5154 | int i, max, err; |
5155 | |
5156 | if (np->parent->plat_type != PLAT_TYPE_NIU) { |
5157 | if (np->port == 0 || np->port == 1) |
5158 | max = ATLAS_P0_P1_CFIFO_ENTRIES; |
5159 | else |
5160 | max = ATLAS_P2_P3_CFIFO_ENTRIES; |
5161 | } else |
5162 | max = NIU_CFIFO_ENTRIES; |
5163 | |
5164 | data[0] = 0; |
5165 | data[1] = 0; |
5166 | data[2] = 0; |
5167 | data[3] = 0; |
5168 | data[4] = 0; |
5169 | |
5170 | for (i = 0; i < max; i++) { |
5171 | err = niu_zcp_write(np, index: i, data); |
5172 | if (err) |
5173 | return err; |
5174 | err = niu_zcp_read(np, index: i, data: rbuf); |
5175 | if (err) |
5176 | return err; |
5177 | } |
5178 | |
5179 | niu_zcp_cfifo_reset(np); |
5180 | nw64(CFIFO_ECC(np->port), 0); |
5181 | nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); |
5182 | (void) nr64(ZCP_INT_STAT); |
5183 | nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); |
5184 | |
5185 | return 0; |
5186 | } |
5187 | |
5188 | static void niu_ipp_write(struct niu *np, int index, u64 *data) |
5189 | { |
5190 | u64 val = nr64_ipp(IPP_CFIG); |
5191 | |
5192 | nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); |
5193 | nw64_ipp(IPP_DFIFO_WR_PTR, index); |
5194 | nw64_ipp(IPP_DFIFO_WR0, data[0]); |
5195 | nw64_ipp(IPP_DFIFO_WR1, data[1]); |
5196 | nw64_ipp(IPP_DFIFO_WR2, data[2]); |
5197 | nw64_ipp(IPP_DFIFO_WR3, data[3]); |
5198 | nw64_ipp(IPP_DFIFO_WR4, data[4]); |
5199 | nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); |
5200 | } |
5201 | |
5202 | static void niu_ipp_read(struct niu *np, int index, u64 *data) |
5203 | { |
5204 | nw64_ipp(IPP_DFIFO_RD_PTR, index); |
5205 | data[0] = nr64_ipp(IPP_DFIFO_RD0); |
5206 | data[1] = nr64_ipp(IPP_DFIFO_RD1); |
5207 | data[2] = nr64_ipp(IPP_DFIFO_RD2); |
5208 | data[3] = nr64_ipp(IPP_DFIFO_RD3); |
5209 | data[4] = nr64_ipp(IPP_DFIFO_RD4); |
5210 | } |
5211 | |
5212 | static int niu_ipp_reset(struct niu *np) |
5213 | { |
5214 | return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, |
5215 | 1000, 100, "IPP_CFIG" ); |
5216 | } |
5217 | |
5218 | static int niu_init_ipp(struct niu *np) |
5219 | { |
5220 | u64 data[5], rbuf[5], val; |
5221 | int i, max, err; |
5222 | |
5223 | if (np->parent->plat_type != PLAT_TYPE_NIU) { |
5224 | if (np->port == 0 || np->port == 1) |
5225 | max = ATLAS_P0_P1_DFIFO_ENTRIES; |
5226 | else |
5227 | max = ATLAS_P2_P3_DFIFO_ENTRIES; |
5228 | } else |
5229 | max = NIU_DFIFO_ENTRIES; |
5230 | |
5231 | data[0] = 0; |
5232 | data[1] = 0; |
5233 | data[2] = 0; |
5234 | data[3] = 0; |
5235 | data[4] = 0; |
5236 | |
5237 | for (i = 0; i < max; i++) { |
5238 | niu_ipp_write(np, index: i, data); |
5239 | niu_ipp_read(np, index: i, data: rbuf); |
5240 | } |
5241 | |
5242 | (void) nr64_ipp(IPP_INT_STAT); |
5243 | (void) nr64_ipp(IPP_INT_STAT); |
5244 | |
5245 | err = niu_ipp_reset(np); |
5246 | if (err) |
5247 | return err; |
5248 | |
5249 | (void) nr64_ipp(IPP_PKT_DIS); |
5250 | (void) nr64_ipp(IPP_BAD_CS_CNT); |
5251 | (void) nr64_ipp(IPP_ECC); |
5252 | |
5253 | (void) nr64_ipp(IPP_INT_STAT); |
5254 | |
5255 | nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); |
5256 | |
5257 | val = nr64_ipp(IPP_CFIG); |
5258 | val &= ~IPP_CFIG_IP_MAX_PKT; |
5259 | val |= (IPP_CFIG_IPP_ENABLE | |
5260 | IPP_CFIG_DFIFO_ECC_EN | |
5261 | IPP_CFIG_DROP_BAD_CRC | |
5262 | IPP_CFIG_CKSUM_EN | |
5263 | (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); |
5264 | nw64_ipp(IPP_CFIG, val); |
5265 | |
5266 | return 0; |
5267 | } |
5268 | |
5269 | static void niu_handle_led(struct niu *np, int status) |
5270 | { |
5271 | u64 val; |
5272 | val = nr64_mac(XMAC_CONFIG); |
5273 | |
5274 | if ((np->flags & NIU_FLAGS_10G) != 0 && |
5275 | (np->flags & NIU_FLAGS_FIBER) != 0) { |
5276 | if (status) { |
5277 | val |= XMAC_CONFIG_LED_POLARITY; |
5278 | val &= ~XMAC_CONFIG_FORCE_LED_ON; |
5279 | } else { |
5280 | val |= XMAC_CONFIG_FORCE_LED_ON; |
5281 | val &= ~XMAC_CONFIG_LED_POLARITY; |
5282 | } |
5283 | } |
5284 | |
5285 | nw64_mac(XMAC_CONFIG, val); |
5286 | } |
5287 | |
5288 | static void niu_init_xif_xmac(struct niu *np) |
5289 | { |
5290 | struct niu_link_config *lp = &np->link_config; |
5291 | u64 val; |
5292 | |
5293 | if (np->flags & NIU_FLAGS_XCVR_SERDES) { |
5294 | val = nr64(MIF_CONFIG); |
5295 | val |= MIF_CONFIG_ATCA_GE; |
5296 | nw64(MIF_CONFIG, val); |
5297 | } |
5298 | |
5299 | val = nr64_mac(XMAC_CONFIG); |
5300 | val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; |
5301 | |
5302 | val |= XMAC_CONFIG_TX_OUTPUT_EN; |
5303 | |
5304 | if (lp->loopback_mode == LOOPBACK_MAC) { |
5305 | val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; |
5306 | val |= XMAC_CONFIG_LOOPBACK; |
5307 | } else { |
5308 | val &= ~XMAC_CONFIG_LOOPBACK; |
5309 | } |
5310 | |
5311 | if (np->flags & NIU_FLAGS_10G) { |
5312 | val &= ~XMAC_CONFIG_LFS_DISABLE; |
5313 | } else { |
5314 | val |= XMAC_CONFIG_LFS_DISABLE; |
5315 | if (!(np->flags & NIU_FLAGS_FIBER) && |
5316 | !(np->flags & NIU_FLAGS_XCVR_SERDES)) |
5317 | val |= XMAC_CONFIG_1G_PCS_BYPASS; |
5318 | else |
5319 | val &= ~XMAC_CONFIG_1G_PCS_BYPASS; |
5320 | } |
5321 | |
5322 | val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; |
5323 | |
5324 | if (lp->active_speed == SPEED_100) |
5325 | val |= XMAC_CONFIG_SEL_CLK_25MHZ; |
5326 | else |
5327 | val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; |
5328 | |
5329 | nw64_mac(XMAC_CONFIG, val); |
5330 | |
5331 | val = nr64_mac(XMAC_CONFIG); |
5332 | val &= ~XMAC_CONFIG_MODE_MASK; |
5333 | if (np->flags & NIU_FLAGS_10G) { |
5334 | val |= XMAC_CONFIG_MODE_XGMII; |
5335 | } else { |
5336 | if (lp->active_speed == SPEED_1000) |
5337 | val |= XMAC_CONFIG_MODE_GMII; |
5338 | else |
5339 | val |= XMAC_CONFIG_MODE_MII; |
5340 | } |
5341 | |
5342 | nw64_mac(XMAC_CONFIG, val); |
5343 | } |
5344 | |
5345 | static void niu_init_xif_bmac(struct niu *np) |
5346 | { |
5347 | struct niu_link_config *lp = &np->link_config; |
5348 | u64 val; |
5349 | |
5350 | val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; |
5351 | |
5352 | if (lp->loopback_mode == LOOPBACK_MAC) |
5353 | val |= BMAC_XIF_CONFIG_MII_LOOPBACK; |
5354 | else |
5355 | val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; |
5356 | |
5357 | if (lp->active_speed == SPEED_1000) |
5358 | val |= BMAC_XIF_CONFIG_GMII_MODE; |
5359 | else |
5360 | val &= ~BMAC_XIF_CONFIG_GMII_MODE; |
5361 | |
5362 | val &= ~(BMAC_XIF_CONFIG_LINK_LED | |
5363 | BMAC_XIF_CONFIG_LED_POLARITY); |
5364 | |
5365 | if (!(np->flags & NIU_FLAGS_10G) && |
5366 | !(np->flags & NIU_FLAGS_FIBER) && |
5367 | lp->active_speed == SPEED_100) |
5368 | val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; |
5369 | else |
5370 | val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; |
5371 | |
5372 | nw64_mac(BMAC_XIF_CONFIG, val); |
5373 | } |
5374 | |
5375 | static void niu_init_xif(struct niu *np) |
5376 | { |
5377 | if (np->flags & NIU_FLAGS_XMAC) |
5378 | niu_init_xif_xmac(np); |
5379 | else |
5380 | niu_init_xif_bmac(np); |
5381 | } |
5382 | |
5383 | static void niu_pcs_mii_reset(struct niu *np) |
5384 | { |
5385 | int limit = 1000; |
5386 | u64 val = nr64_pcs(PCS_MII_CTL); |
5387 | val |= PCS_MII_CTL_RST; |
5388 | nw64_pcs(PCS_MII_CTL, val); |
5389 | while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { |
5390 | udelay(100); |
5391 | val = nr64_pcs(PCS_MII_CTL); |
5392 | } |
5393 | } |
5394 | |
5395 | static void niu_xpcs_reset(struct niu *np) |
5396 | { |
5397 | int limit = 1000; |
5398 | u64 val = nr64_xpcs(XPCS_CONTROL1); |
5399 | val |= XPCS_CONTROL1_RESET; |
5400 | nw64_xpcs(XPCS_CONTROL1, val); |
5401 | while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { |
5402 | udelay(100); |
5403 | val = nr64_xpcs(XPCS_CONTROL1); |
5404 | } |
5405 | } |
5406 | |
5407 | static int niu_init_pcs(struct niu *np) |
5408 | { |
5409 | struct niu_link_config *lp = &np->link_config; |
5410 | u64 val; |
5411 | |
5412 | switch (np->flags & (NIU_FLAGS_10G | |
5413 | NIU_FLAGS_FIBER | |
5414 | NIU_FLAGS_XCVR_SERDES)) { |
5415 | case NIU_FLAGS_FIBER: |
5416 | /* 1G fiber */ |
5417 | nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); |
5418 | nw64_pcs(PCS_DPATH_MODE, 0); |
5419 | niu_pcs_mii_reset(np); |
5420 | break; |
5421 | |
5422 | case NIU_FLAGS_10G: |
5423 | case NIU_FLAGS_10G | NIU_FLAGS_FIBER: |
5424 | case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: |
5425 | /* 10G SERDES */ |
5426 | if (!(np->flags & NIU_FLAGS_XMAC)) |
5427 | return -EINVAL; |
5428 | |
5429 | /* 10G copper or fiber */ |
5430 | val = nr64_mac(XMAC_CONFIG); |
5431 | val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; |
5432 | nw64_mac(XMAC_CONFIG, val); |
5433 | |
5434 | niu_xpcs_reset(np); |
5435 | |
5436 | val = nr64_xpcs(XPCS_CONTROL1); |
5437 | if (lp->loopback_mode == LOOPBACK_PHY) |
5438 | val |= XPCS_CONTROL1_LOOPBACK; |
5439 | else |
5440 | val &= ~XPCS_CONTROL1_LOOPBACK; |
5441 | nw64_xpcs(XPCS_CONTROL1, val); |
5442 | |
5443 | nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); |
5444 | (void) nr64_xpcs(XPCS_SYMERR_CNT01); |
5445 | (void) nr64_xpcs(XPCS_SYMERR_CNT23); |
5446 | break; |
5447 | |
5448 | |
5449 | case NIU_FLAGS_XCVR_SERDES: |
5450 | /* 1G SERDES */ |
5451 | niu_pcs_mii_reset(np); |
5452 | nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); |
5453 | nw64_pcs(PCS_DPATH_MODE, 0); |
5454 | break; |
5455 | |
5456 | case 0: |
5457 | /* 1G copper */ |
5458 | case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: |
5459 | /* 1G RGMII FIBER */ |
5460 | nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); |
5461 | niu_pcs_mii_reset(np); |
5462 | break; |
5463 | |
5464 | default: |
5465 | return -EINVAL; |
5466 | } |
5467 | |
5468 | return 0; |
5469 | } |
5470 | |
5471 | static int niu_reset_tx_xmac(struct niu *np) |
5472 | { |
5473 | return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, |
5474 | (XTXMAC_SW_RST_REG_RS | |
5475 | XTXMAC_SW_RST_SOFT_RST), |
5476 | 1000, 100, "XTXMAC_SW_RST" ); |
5477 | } |
5478 | |
5479 | static int niu_reset_tx_bmac(struct niu *np) |
5480 | { |
5481 | int limit; |
5482 | |
5483 | nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); |
5484 | limit = 1000; |
5485 | while (--limit >= 0) { |
5486 | if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) |
5487 | break; |
5488 | udelay(100); |
5489 | } |
5490 | if (limit < 0) { |
5491 | dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n" , |
5492 | np->port, |
5493 | (unsigned long long) nr64_mac(BTXMAC_SW_RST)); |
5494 | return -ENODEV; |
5495 | } |
5496 | |
5497 | return 0; |
5498 | } |
5499 | |
5500 | static int niu_reset_tx_mac(struct niu *np) |
5501 | { |
5502 | if (np->flags & NIU_FLAGS_XMAC) |
5503 | return niu_reset_tx_xmac(np); |
5504 | else |
5505 | return niu_reset_tx_bmac(np); |
5506 | } |
5507 | |
5508 | static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) |
5509 | { |
5510 | u64 val; |
5511 | |
5512 | val = nr64_mac(XMAC_MIN); |
5513 | val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | |
5514 | XMAC_MIN_RX_MIN_PKT_SIZE); |
5515 | val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); |
5516 | val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); |
5517 | nw64_mac(XMAC_MIN, val); |
5518 | |
5519 | nw64_mac(XMAC_MAX, max); |
5520 | |
5521 | nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); |
5522 | |
5523 | val = nr64_mac(XMAC_IPG); |
5524 | if (np->flags & NIU_FLAGS_10G) { |
5525 | val &= ~XMAC_IPG_IPG_XGMII; |
5526 | val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); |
5527 | } else { |
5528 | val &= ~XMAC_IPG_IPG_MII_GMII; |
5529 | val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); |
5530 | } |
5531 | nw64_mac(XMAC_IPG, val); |
5532 | |
5533 | val = nr64_mac(XMAC_CONFIG); |
5534 | val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | |
5535 | XMAC_CONFIG_STRETCH_MODE | |
5536 | XMAC_CONFIG_VAR_MIN_IPG_EN | |
5537 | XMAC_CONFIG_TX_ENABLE); |
5538 | nw64_mac(XMAC_CONFIG, val); |
5539 | |
5540 | nw64_mac(TXMAC_FRM_CNT, 0); |
5541 | nw64_mac(TXMAC_BYTE_CNT, 0); |
5542 | } |
5543 | |
5544 | static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) |
5545 | { |
5546 | u64 val; |
5547 | |
5548 | nw64_mac(BMAC_MIN_FRAME, min); |
5549 | nw64_mac(BMAC_MAX_FRAME, max); |
5550 | |
5551 | nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); |
5552 | nw64_mac(BMAC_CTRL_TYPE, 0x8808); |
5553 | nw64_mac(BMAC_PREAMBLE_SIZE, 7); |
5554 | |
5555 | val = nr64_mac(BTXMAC_CONFIG); |
5556 | val &= ~(BTXMAC_CONFIG_FCS_DISABLE | |
5557 | BTXMAC_CONFIG_ENABLE); |
5558 | nw64_mac(BTXMAC_CONFIG, val); |
5559 | } |
5560 | |
5561 | static void niu_init_tx_mac(struct niu *np) |
5562 | { |
5563 | u64 min, max; |
5564 | |
5565 | min = 64; |
5566 | if (np->dev->mtu > ETH_DATA_LEN) |
5567 | max = 9216; |
5568 | else |
5569 | max = 1522; |
5570 | |
5571 | /* The XMAC_MIN register only accepts values for TX min which |
5572 | * have the low 3 bits cleared. |
5573 | */ |
5574 | BUG_ON(min & 0x7); |
5575 | |
5576 | if (np->flags & NIU_FLAGS_XMAC) |
5577 | niu_init_tx_xmac(np, min, max); |
5578 | else |
5579 | niu_init_tx_bmac(np, min, max); |
5580 | } |
5581 | |
5582 | static int niu_reset_rx_xmac(struct niu *np) |
5583 | { |
5584 | int limit; |
5585 | |
5586 | nw64_mac(XRXMAC_SW_RST, |
5587 | XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); |
5588 | limit = 1000; |
5589 | while (--limit >= 0) { |
5590 | if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | |
5591 | XRXMAC_SW_RST_SOFT_RST))) |
5592 | break; |
5593 | udelay(100); |
5594 | } |
5595 | if (limit < 0) { |
5596 | dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n" , |
5597 | np->port, |
5598 | (unsigned long long) nr64_mac(XRXMAC_SW_RST)); |
5599 | return -ENODEV; |
5600 | } |
5601 | |
5602 | return 0; |
5603 | } |
5604 | |
5605 | static int niu_reset_rx_bmac(struct niu *np) |
5606 | { |
5607 | int limit; |
5608 | |
5609 | nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); |
5610 | limit = 1000; |
5611 | while (--limit >= 0) { |
5612 | if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) |
5613 | break; |
5614 | udelay(100); |
5615 | } |
5616 | if (limit < 0) { |
5617 | dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n" , |
5618 | np->port, |
5619 | (unsigned long long) nr64_mac(BRXMAC_SW_RST)); |
5620 | return -ENODEV; |
5621 | } |
5622 | |
5623 | return 0; |
5624 | } |
5625 | |
5626 | static int niu_reset_rx_mac(struct niu *np) |
5627 | { |
5628 | if (np->flags & NIU_FLAGS_XMAC) |
5629 | return niu_reset_rx_xmac(np); |
5630 | else |
5631 | return niu_reset_rx_bmac(np); |
5632 | } |
5633 | |
5634 | static void niu_init_rx_xmac(struct niu *np) |
5635 | { |
5636 | struct niu_parent *parent = np->parent; |
5637 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; |
5638 | int first_rdc_table = tp->first_table_num; |
5639 | unsigned long i; |
5640 | u64 val; |
5641 | |
5642 | nw64_mac(XMAC_ADD_FILT0, 0); |
5643 | nw64_mac(XMAC_ADD_FILT1, 0); |
5644 | nw64_mac(XMAC_ADD_FILT2, 0); |
5645 | nw64_mac(XMAC_ADD_FILT12_MASK, 0); |
5646 | nw64_mac(XMAC_ADD_FILT00_MASK, 0); |
5647 | for (i = 0; i < MAC_NUM_HASH; i++) |
5648 | nw64_mac(XMAC_HASH_TBL(i), 0); |
5649 | nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); |
5650 | niu_set_primary_mac_rdc_table(np, table_num: first_rdc_table, mac_pref: 1); |
5651 | niu_set_multicast_mac_rdc_table(np, table_num: first_rdc_table, mac_pref: 1); |
5652 | |
5653 | val = nr64_mac(XMAC_CONFIG); |
5654 | val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | |
5655 | XMAC_CONFIG_PROMISCUOUS | |
5656 | XMAC_CONFIG_PROMISC_GROUP | |
5657 | XMAC_CONFIG_ERR_CHK_DIS | |
5658 | XMAC_CONFIG_RX_CRC_CHK_DIS | |
5659 | XMAC_CONFIG_RESERVED_MULTICAST | |
5660 | XMAC_CONFIG_RX_CODEV_CHK_DIS | |
5661 | XMAC_CONFIG_ADDR_FILTER_EN | |
5662 | XMAC_CONFIG_RCV_PAUSE_ENABLE | |
5663 | XMAC_CONFIG_STRIP_CRC | |
5664 | XMAC_CONFIG_PASS_FLOW_CTRL | |
5665 | XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); |
5666 | val |= (XMAC_CONFIG_HASH_FILTER_EN); |
5667 | nw64_mac(XMAC_CONFIG, val); |
5668 | |
5669 | nw64_mac(RXMAC_BT_CNT, 0); |
5670 | nw64_mac(RXMAC_BC_FRM_CNT, 0); |
5671 | nw64_mac(RXMAC_MC_FRM_CNT, 0); |
5672 | nw64_mac(RXMAC_FRAG_CNT, 0); |
5673 | nw64_mac(RXMAC_HIST_CNT1, 0); |
5674 | nw64_mac(RXMAC_HIST_CNT2, 0); |
5675 | nw64_mac(RXMAC_HIST_CNT3, 0); |
5676 | nw64_mac(RXMAC_HIST_CNT4, 0); |
5677 | nw64_mac(RXMAC_HIST_CNT5, 0); |
5678 | nw64_mac(RXMAC_HIST_CNT6, 0); |
5679 | nw64_mac(RXMAC_HIST_CNT7, 0); |
5680 | nw64_mac(RXMAC_MPSZER_CNT, 0); |
5681 | nw64_mac(RXMAC_CRC_ER_CNT, 0); |
5682 | nw64_mac(RXMAC_CD_VIO_CNT, 0); |
5683 | nw64_mac(LINK_FAULT_CNT, 0); |
5684 | } |
5685 | |
5686 | static void niu_init_rx_bmac(struct niu *np) |
5687 | { |
5688 | struct niu_parent *parent = np->parent; |
5689 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; |
5690 | int first_rdc_table = tp->first_table_num; |
5691 | unsigned long i; |
5692 | u64 val; |
5693 | |
5694 | nw64_mac(BMAC_ADD_FILT0, 0); |
5695 | nw64_mac(BMAC_ADD_FILT1, 0); |
5696 | nw64_mac(BMAC_ADD_FILT2, 0); |
5697 | nw64_mac(BMAC_ADD_FILT12_MASK, 0); |
5698 | nw64_mac(BMAC_ADD_FILT00_MASK, 0); |
5699 | for (i = 0; i < MAC_NUM_HASH; i++) |
5700 | nw64_mac(BMAC_HASH_TBL(i), 0); |
5701 | niu_set_primary_mac_rdc_table(np, table_num: first_rdc_table, mac_pref: 1); |
5702 | niu_set_multicast_mac_rdc_table(np, table_num: first_rdc_table, mac_pref: 1); |
5703 | nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); |
5704 | |
5705 | val = nr64_mac(BRXMAC_CONFIG); |
5706 | val &= ~(BRXMAC_CONFIG_ENABLE | |
5707 | BRXMAC_CONFIG_STRIP_PAD | |
5708 | BRXMAC_CONFIG_STRIP_FCS | |
5709 | BRXMAC_CONFIG_PROMISC | |
5710 | BRXMAC_CONFIG_PROMISC_GRP | |
5711 | BRXMAC_CONFIG_ADDR_FILT_EN | |
5712 | BRXMAC_CONFIG_DISCARD_DIS); |
5713 | val |= (BRXMAC_CONFIG_HASH_FILT_EN); |
5714 | nw64_mac(BRXMAC_CONFIG, val); |
5715 | |
5716 | val = nr64_mac(BMAC_ADDR_CMPEN); |
5717 | val |= BMAC_ADDR_CMPEN_EN0; |
5718 | nw64_mac(BMAC_ADDR_CMPEN, val); |
5719 | } |
5720 | |
5721 | static void niu_init_rx_mac(struct niu *np) |
5722 | { |
5723 | niu_set_primary_mac(np, addr: np->dev->dev_addr); |
5724 | |
5725 | if (np->flags & NIU_FLAGS_XMAC) |
5726 | niu_init_rx_xmac(np); |
5727 | else |
5728 | niu_init_rx_bmac(np); |
5729 | } |
5730 | |
5731 | static void niu_enable_tx_xmac(struct niu *np, int on) |
5732 | { |
5733 | u64 val = nr64_mac(XMAC_CONFIG); |
5734 | |
5735 | if (on) |
5736 | val |= XMAC_CONFIG_TX_ENABLE; |
5737 | else |
5738 | val &= ~XMAC_CONFIG_TX_ENABLE; |
5739 | nw64_mac(XMAC_CONFIG, val); |
5740 | } |
5741 | |
5742 | static void niu_enable_tx_bmac(struct niu *np, int on) |
5743 | { |
5744 | u64 val = nr64_mac(BTXMAC_CONFIG); |
5745 | |
5746 | if (on) |
5747 | val |= BTXMAC_CONFIG_ENABLE; |
5748 | else |
5749 | val &= ~BTXMAC_CONFIG_ENABLE; |
5750 | nw64_mac(BTXMAC_CONFIG, val); |
5751 | } |
5752 | |
5753 | static void niu_enable_tx_mac(struct niu *np, int on) |
5754 | { |
5755 | if (np->flags & NIU_FLAGS_XMAC) |
5756 | niu_enable_tx_xmac(np, on); |
5757 | else |
5758 | niu_enable_tx_bmac(np, on); |
5759 | } |
5760 | |
5761 | static void niu_enable_rx_xmac(struct niu *np, int on) |
5762 | { |
5763 | u64 val = nr64_mac(XMAC_CONFIG); |
5764 | |
5765 | val &= ~(XMAC_CONFIG_HASH_FILTER_EN | |
5766 | XMAC_CONFIG_PROMISCUOUS); |
5767 | |
5768 | if (np->flags & NIU_FLAGS_MCAST) |
5769 | val |= XMAC_CONFIG_HASH_FILTER_EN; |
5770 | if (np->flags & NIU_FLAGS_PROMISC) |
5771 | val |= XMAC_CONFIG_PROMISCUOUS; |
5772 | |
5773 | if (on) |
5774 | val |= XMAC_CONFIG_RX_MAC_ENABLE; |
5775 | else |
5776 | val &= ~XMAC_CONFIG_RX_MAC_ENABLE; |
5777 | nw64_mac(XMAC_CONFIG, val); |
5778 | } |
5779 | |
5780 | static void niu_enable_rx_bmac(struct niu *np, int on) |
5781 | { |
5782 | u64 val = nr64_mac(BRXMAC_CONFIG); |
5783 | |
5784 | val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | |
5785 | BRXMAC_CONFIG_PROMISC); |
5786 | |
5787 | if (np->flags & NIU_FLAGS_MCAST) |
5788 | val |= BRXMAC_CONFIG_HASH_FILT_EN; |
5789 | if (np->flags & NIU_FLAGS_PROMISC) |
5790 | val |= BRXMAC_CONFIG_PROMISC; |
5791 | |
5792 | if (on) |
5793 | val |= BRXMAC_CONFIG_ENABLE; |
5794 | else |
5795 | val &= ~BRXMAC_CONFIG_ENABLE; |
5796 | nw64_mac(BRXMAC_CONFIG, val); |
5797 | } |
5798 | |
5799 | static void niu_enable_rx_mac(struct niu *np, int on) |
5800 | { |
5801 | if (np->flags & NIU_FLAGS_XMAC) |
5802 | niu_enable_rx_xmac(np, on); |
5803 | else |
5804 | niu_enable_rx_bmac(np, on); |
5805 | } |
5806 | |
5807 | static int niu_init_mac(struct niu *np) |
5808 | { |
5809 | int err; |
5810 | |
5811 | niu_init_xif(np); |
5812 | err = niu_init_pcs(np); |
5813 | if (err) |
5814 | return err; |
5815 | |
5816 | err = niu_reset_tx_mac(np); |
5817 | if (err) |
5818 | return err; |
5819 | niu_init_tx_mac(np); |
5820 | err = niu_reset_rx_mac(np); |
5821 | if (err) |
5822 | return err; |
5823 | niu_init_rx_mac(np); |
5824 | |
5825 | /* This looks hookey but the RX MAC reset we just did will |
5826 | * undo some of the state we setup in niu_init_tx_mac() so we |
5827 | * have to call it again. In particular, the RX MAC reset will |
5828 | * set the XMAC_MAX register back to it's default value. |
5829 | */ |
5830 | niu_init_tx_mac(np); |
5831 | niu_enable_tx_mac(np, on: 1); |
5832 | |
5833 | niu_enable_rx_mac(np, on: 1); |
5834 | |
5835 | return 0; |
5836 | } |
5837 | |
5838 | static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) |
5839 | { |
5840 | (void) niu_tx_channel_stop(np, channel: rp->tx_channel); |
5841 | } |
5842 | |
5843 | static void niu_stop_tx_channels(struct niu *np) |
5844 | { |
5845 | int i; |
5846 | |
5847 | for (i = 0; i < np->num_tx_rings; i++) { |
5848 | struct tx_ring_info *rp = &np->tx_rings[i]; |
5849 | |
5850 | niu_stop_one_tx_channel(np, rp); |
5851 | } |
5852 | } |
5853 | |
5854 | static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) |
5855 | { |
5856 | (void) niu_tx_channel_reset(np, channel: rp->tx_channel); |
5857 | } |
5858 | |
5859 | static void niu_reset_tx_channels(struct niu *np) |
5860 | { |
5861 | int i; |
5862 | |
5863 | for (i = 0; i < np->num_tx_rings; i++) { |
5864 | struct tx_ring_info *rp = &np->tx_rings[i]; |
5865 | |
5866 | niu_reset_one_tx_channel(np, rp); |
5867 | } |
5868 | } |
5869 | |
5870 | static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) |
5871 | { |
5872 | (void) niu_enable_rx_channel(np, channel: rp->rx_channel, on: 0); |
5873 | } |
5874 | |
5875 | static void niu_stop_rx_channels(struct niu *np) |
5876 | { |
5877 | int i; |
5878 | |
5879 | for (i = 0; i < np->num_rx_rings; i++) { |
5880 | struct rx_ring_info *rp = &np->rx_rings[i]; |
5881 | |
5882 | niu_stop_one_rx_channel(np, rp); |
5883 | } |
5884 | } |
5885 | |
5886 | static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) |
5887 | { |
5888 | int channel = rp->rx_channel; |
5889 | |
5890 | (void) niu_rx_channel_reset(np, channel); |
5891 | nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); |
5892 | nw64(RX_DMA_CTL_STAT(channel), 0); |
5893 | (void) niu_enable_rx_channel(np, channel, on: 0); |
5894 | } |
5895 | |
5896 | static void niu_reset_rx_channels(struct niu *np) |
5897 | { |
5898 | int i; |
5899 | |
5900 | for (i = 0; i < np->num_rx_rings; i++) { |
5901 | struct rx_ring_info *rp = &np->rx_rings[i]; |
5902 | |
5903 | niu_reset_one_rx_channel(np, rp); |
5904 | } |
5905 | } |
5906 | |
5907 | static void niu_disable_ipp(struct niu *np) |
5908 | { |
5909 | u64 rd, wr, val; |
5910 | int limit; |
5911 | |
5912 | rd = nr64_ipp(IPP_DFIFO_RD_PTR); |
5913 | wr = nr64_ipp(IPP_DFIFO_WR_PTR); |
5914 | limit = 100; |
5915 | while (--limit >= 0 && (rd != wr)) { |
5916 | rd = nr64_ipp(IPP_DFIFO_RD_PTR); |
5917 | wr = nr64_ipp(IPP_DFIFO_WR_PTR); |
5918 | } |
5919 | if (limit < 0 && |
5920 | (rd != 0 && wr != 1)) { |
5921 | netdev_err(dev: np->dev, format: "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n" , |
5922 | (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), |
5923 | (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); |
5924 | } |
5925 | |
5926 | val = nr64_ipp(IPP_CFIG); |
5927 | val &= ~(IPP_CFIG_IPP_ENABLE | |
5928 | IPP_CFIG_DFIFO_ECC_EN | |
5929 | IPP_CFIG_DROP_BAD_CRC | |
5930 | IPP_CFIG_CKSUM_EN); |
5931 | nw64_ipp(IPP_CFIG, val); |
5932 | |
5933 | (void) niu_ipp_reset(np); |
5934 | } |
5935 | |
5936 | static int niu_init_hw(struct niu *np) |
5937 | { |
5938 | int i, err; |
5939 | |
5940 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n" ); |
5941 | niu_txc_enable_port(np, on: 1); |
5942 | niu_txc_port_dma_enable(np, on: 1); |
5943 | niu_txc_set_imask(np, imask: 0); |
5944 | |
5945 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n" ); |
5946 | for (i = 0; i < np->num_tx_rings; i++) { |
5947 | struct tx_ring_info *rp = &np->tx_rings[i]; |
5948 | |
5949 | err = niu_init_one_tx_channel(np, rp); |
5950 | if (err) |
5951 | return err; |
5952 | } |
5953 | |
5954 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n" ); |
5955 | err = niu_init_rx_channels(np); |
5956 | if (err) |
5957 | goto out_uninit_tx_channels; |
5958 | |
5959 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n" ); |
5960 | err = niu_init_classifier_hw(np); |
5961 | if (err) |
5962 | goto out_uninit_rx_channels; |
5963 | |
5964 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n" ); |
5965 | err = niu_init_zcp(np); |
5966 | if (err) |
5967 | goto out_uninit_rx_channels; |
5968 | |
5969 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n" ); |
5970 | err = niu_init_ipp(np); |
5971 | if (err) |
5972 | goto out_uninit_rx_channels; |
5973 | |
5974 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n" ); |
5975 | err = niu_init_mac(np); |
5976 | if (err) |
5977 | goto out_uninit_ipp; |
5978 | |
5979 | return 0; |
5980 | |
5981 | out_uninit_ipp: |
5982 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n" ); |
5983 | niu_disable_ipp(np); |
5984 | |
5985 | out_uninit_rx_channels: |
5986 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n" ); |
5987 | niu_stop_rx_channels(np); |
5988 | niu_reset_rx_channels(np); |
5989 | |
5990 | out_uninit_tx_channels: |
5991 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n" ); |
5992 | niu_stop_tx_channels(np); |
5993 | niu_reset_tx_channels(np); |
5994 | |
5995 | return err; |
5996 | } |
5997 | |
5998 | static void niu_stop_hw(struct niu *np) |
5999 | { |
6000 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n" ); |
6001 | niu_enable_interrupts(np, on: 0); |
6002 | |
6003 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n" ); |
6004 | niu_enable_rx_mac(np, on: 0); |
6005 | |
6006 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n" ); |
6007 | niu_disable_ipp(np); |
6008 | |
6009 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n" ); |
6010 | niu_stop_tx_channels(np); |
6011 | |
6012 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n" ); |
6013 | niu_stop_rx_channels(np); |
6014 | |
6015 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n" ); |
6016 | niu_reset_tx_channels(np); |
6017 | |
6018 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n" ); |
6019 | niu_reset_rx_channels(np); |
6020 | } |
6021 | |
6022 | static void niu_set_irq_name(struct niu *np) |
6023 | { |
6024 | int port = np->port; |
6025 | int i, j = 1; |
6026 | |
6027 | sprintf(buf: np->irq_name[0], fmt: "%s:MAC" , np->dev->name); |
6028 | |
6029 | if (port == 0) { |
6030 | sprintf(buf: np->irq_name[1], fmt: "%s:MIF" , np->dev->name); |
6031 | sprintf(buf: np->irq_name[2], fmt: "%s:SYSERR" , np->dev->name); |
6032 | j = 3; |
6033 | } |
6034 | |
6035 | for (i = 0; i < np->num_ldg - j; i++) { |
6036 | if (i < np->num_rx_rings) |
6037 | sprintf(buf: np->irq_name[i+j], fmt: "%s-rx-%d" , |
6038 | np->dev->name, i); |
6039 | else if (i < np->num_tx_rings + np->num_rx_rings) |
6040 | sprintf(buf: np->irq_name[i+j], fmt: "%s-tx-%d" , np->dev->name, |
6041 | i - np->num_rx_rings); |
6042 | } |
6043 | } |
6044 | |
6045 | static int niu_request_irq(struct niu *np) |
6046 | { |
6047 | int i, j, err; |
6048 | |
6049 | niu_set_irq_name(np); |
6050 | |
6051 | err = 0; |
6052 | for (i = 0; i < np->num_ldg; i++) { |
6053 | struct niu_ldg *lp = &np->ldg[i]; |
6054 | |
6055 | err = request_irq(irq: lp->irq, handler: niu_interrupt, IRQF_SHARED, |
6056 | name: np->irq_name[i], dev: lp); |
6057 | if (err) |
6058 | goto out_free_irqs; |
6059 | |
6060 | } |
6061 | |
6062 | return 0; |
6063 | |
6064 | out_free_irqs: |
6065 | for (j = 0; j < i; j++) { |
6066 | struct niu_ldg *lp = &np->ldg[j]; |
6067 | |
6068 | free_irq(lp->irq, lp); |
6069 | } |
6070 | return err; |
6071 | } |
6072 | |
6073 | static void niu_free_irq(struct niu *np) |
6074 | { |
6075 | int i; |
6076 | |
6077 | for (i = 0; i < np->num_ldg; i++) { |
6078 | struct niu_ldg *lp = &np->ldg[i]; |
6079 | |
6080 | free_irq(lp->irq, lp); |
6081 | } |
6082 | } |
6083 | |
6084 | static void niu_enable_napi(struct niu *np) |
6085 | { |
6086 | int i; |
6087 | |
6088 | for (i = 0; i < np->num_ldg; i++) |
6089 | napi_enable(n: &np->ldg[i].napi); |
6090 | } |
6091 | |
6092 | static void niu_disable_napi(struct niu *np) |
6093 | { |
6094 | int i; |
6095 | |
6096 | for (i = 0; i < np->num_ldg; i++) |
6097 | napi_disable(n: &np->ldg[i].napi); |
6098 | } |
6099 | |
6100 | static int niu_open(struct net_device *dev) |
6101 | { |
6102 | struct niu *np = netdev_priv(dev); |
6103 | int err; |
6104 | |
6105 | netif_carrier_off(dev); |
6106 | |
6107 | err = niu_alloc_channels(np); |
6108 | if (err) |
6109 | goto out_err; |
6110 | |
6111 | err = niu_enable_interrupts(np, on: 0); |
6112 | if (err) |
6113 | goto out_free_channels; |
6114 | |
6115 | err = niu_request_irq(np); |
6116 | if (err) |
6117 | goto out_free_channels; |
6118 | |
6119 | niu_enable_napi(np); |
6120 | |
6121 | spin_lock_irq(lock: &np->lock); |
6122 | |
6123 | err = niu_init_hw(np); |
6124 | if (!err) { |
6125 | timer_setup(&np->timer, niu_timer, 0); |
6126 | np->timer.expires = jiffies + HZ; |
6127 | |
6128 | err = niu_enable_interrupts(np, on: 1); |
6129 | if (err) |
6130 | niu_stop_hw(np); |
6131 | } |
6132 | |
6133 | spin_unlock_irq(lock: &np->lock); |
6134 | |
6135 | if (err) { |
6136 | niu_disable_napi(np); |
6137 | goto out_free_irq; |
6138 | } |
6139 | |
6140 | netif_tx_start_all_queues(dev); |
6141 | |
6142 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) |
6143 | netif_carrier_on(dev); |
6144 | |
6145 | add_timer(timer: &np->timer); |
6146 | |
6147 | return 0; |
6148 | |
6149 | out_free_irq: |
6150 | niu_free_irq(np); |
6151 | |
6152 | out_free_channels: |
6153 | niu_free_channels(np); |
6154 | |
6155 | out_err: |
6156 | return err; |
6157 | } |
6158 | |
6159 | static void niu_full_shutdown(struct niu *np, struct net_device *dev) |
6160 | { |
6161 | cancel_work_sync(work: &np->reset_task); |
6162 | |
6163 | niu_disable_napi(np); |
6164 | netif_tx_stop_all_queues(dev); |
6165 | |
6166 | del_timer_sync(timer: &np->timer); |
6167 | |
6168 | spin_lock_irq(lock: &np->lock); |
6169 | |
6170 | niu_stop_hw(np); |
6171 | |
6172 | spin_unlock_irq(lock: &np->lock); |
6173 | } |
6174 | |
6175 | static int niu_close(struct net_device *dev) |
6176 | { |
6177 | struct niu *np = netdev_priv(dev); |
6178 | |
6179 | niu_full_shutdown(np, dev); |
6180 | |
6181 | niu_free_irq(np); |
6182 | |
6183 | niu_free_channels(np); |
6184 | |
6185 | niu_handle_led(np, status: 0); |
6186 | |
6187 | return 0; |
6188 | } |
6189 | |
6190 | static void niu_sync_xmac_stats(struct niu *np) |
6191 | { |
6192 | struct niu_xmac_stats *mp = &np->mac_stats.xmac; |
6193 | |
6194 | mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); |
6195 | mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); |
6196 | |
6197 | mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); |
6198 | mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); |
6199 | mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); |
6200 | mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); |
6201 | mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); |
6202 | mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); |
6203 | mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); |
6204 | mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); |
6205 | mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); |
6206 | mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); |
6207 | mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); |
6208 | mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); |
6209 | mp->rx_octets += nr64_mac(RXMAC_BT_CNT); |
6210 | mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); |
6211 | mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); |
6212 | mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); |
6213 | } |
6214 | |
6215 | static void niu_sync_bmac_stats(struct niu *np) |
6216 | { |
6217 | struct niu_bmac_stats *mp = &np->mac_stats.bmac; |
6218 | |
6219 | mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); |
6220 | mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); |
6221 | |
6222 | mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); |
6223 | mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); |
6224 | mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); |
6225 | mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); |
6226 | } |
6227 | |
6228 | static void niu_sync_mac_stats(struct niu *np) |
6229 | { |
6230 | if (np->flags & NIU_FLAGS_XMAC) |
6231 | niu_sync_xmac_stats(np); |
6232 | else |
6233 | niu_sync_bmac_stats(np); |
6234 | } |
6235 | |
6236 | static void niu_get_rx_stats(struct niu *np, |
6237 | struct rtnl_link_stats64 *stats) |
6238 | { |
6239 | u64 pkts, dropped, errors, bytes; |
6240 | struct rx_ring_info *rx_rings; |
6241 | int i; |
6242 | |
6243 | pkts = dropped = errors = bytes = 0; |
6244 | |
6245 | rx_rings = READ_ONCE(np->rx_rings); |
6246 | if (!rx_rings) |
6247 | goto no_rings; |
6248 | |
6249 | for (i = 0; i < np->num_rx_rings; i++) { |
6250 | struct rx_ring_info *rp = &rx_rings[i]; |
6251 | |
6252 | niu_sync_rx_discard_stats(np, rp, limit: 0); |
6253 | |
6254 | pkts += rp->rx_packets; |
6255 | bytes += rp->rx_bytes; |
6256 | dropped += rp->rx_dropped; |
6257 | errors += rp->rx_errors; |
6258 | } |
6259 | |
6260 | no_rings: |
6261 | stats->rx_packets = pkts; |
6262 | stats->rx_bytes = bytes; |
6263 | stats->rx_dropped = dropped; |
6264 | stats->rx_errors = errors; |
6265 | } |
6266 | |
6267 | static void niu_get_tx_stats(struct niu *np, |
6268 | struct rtnl_link_stats64 *stats) |
6269 | { |
6270 | u64 pkts, errors, bytes; |
6271 | struct tx_ring_info *tx_rings; |
6272 | int i; |
6273 | |
6274 | pkts = errors = bytes = 0; |
6275 | |
6276 | tx_rings = READ_ONCE(np->tx_rings); |
6277 | if (!tx_rings) |
6278 | goto no_rings; |
6279 | |
6280 | for (i = 0; i < np->num_tx_rings; i++) { |
6281 | struct tx_ring_info *rp = &tx_rings[i]; |
6282 | |
6283 | pkts += rp->tx_packets; |
6284 | bytes += rp->tx_bytes; |
6285 | errors += rp->tx_errors; |
6286 | } |
6287 | |
6288 | no_rings: |
6289 | stats->tx_packets = pkts; |
6290 | stats->tx_bytes = bytes; |
6291 | stats->tx_errors = errors; |
6292 | } |
6293 | |
6294 | static void niu_get_stats(struct net_device *dev, |
6295 | struct rtnl_link_stats64 *stats) |
6296 | { |
6297 | struct niu *np = netdev_priv(dev); |
6298 | |
6299 | if (netif_running(dev)) { |
6300 | niu_get_rx_stats(np, stats); |
6301 | niu_get_tx_stats(np, stats); |
6302 | } |
6303 | } |
6304 | |
6305 | static void niu_load_hash_xmac(struct niu *np, u16 *hash) |
6306 | { |
6307 | int i; |
6308 | |
6309 | for (i = 0; i < 16; i++) |
6310 | nw64_mac(XMAC_HASH_TBL(i), hash[i]); |
6311 | } |
6312 | |
6313 | static void niu_load_hash_bmac(struct niu *np, u16 *hash) |
6314 | { |
6315 | int i; |
6316 | |
6317 | for (i = 0; i < 16; i++) |
6318 | nw64_mac(BMAC_HASH_TBL(i), hash[i]); |
6319 | } |
6320 | |
6321 | static void niu_load_hash(struct niu *np, u16 *hash) |
6322 | { |
6323 | if (np->flags & NIU_FLAGS_XMAC) |
6324 | niu_load_hash_xmac(np, hash); |
6325 | else |
6326 | niu_load_hash_bmac(np, hash); |
6327 | } |
6328 | |
6329 | static void niu_set_rx_mode(struct net_device *dev) |
6330 | { |
6331 | struct niu *np = netdev_priv(dev); |
6332 | int i, alt_cnt, err; |
6333 | struct netdev_hw_addr *ha; |
6334 | unsigned long flags; |
6335 | u16 hash[16] = { 0, }; |
6336 | |
6337 | spin_lock_irqsave(&np->lock, flags); |
6338 | niu_enable_rx_mac(np, on: 0); |
6339 | |
6340 | np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); |
6341 | if (dev->flags & IFF_PROMISC) |
6342 | np->flags |= NIU_FLAGS_PROMISC; |
6343 | if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) |
6344 | np->flags |= NIU_FLAGS_MCAST; |
6345 | |
6346 | alt_cnt = netdev_uc_count(dev); |
6347 | if (alt_cnt > niu_num_alt_addr(np)) { |
6348 | alt_cnt = 0; |
6349 | np->flags |= NIU_FLAGS_PROMISC; |
6350 | } |
6351 | |
6352 | if (alt_cnt) { |
6353 | int index = 0; |
6354 | |
6355 | netdev_for_each_uc_addr(ha, dev) { |
6356 | err = niu_set_alt_mac(np, index, addr: ha->addr); |
6357 | if (err) |
6358 | netdev_warn(dev, format: "Error %d adding alt mac %d\n" , |
6359 | err, index); |
6360 | err = niu_enable_alt_mac(np, index, on: 1); |
6361 | if (err) |
6362 | netdev_warn(dev, format: "Error %d enabling alt mac %d\n" , |
6363 | err, index); |
6364 | |
6365 | index++; |
6366 | } |
6367 | } else { |
6368 | int alt_start; |
6369 | if (np->flags & NIU_FLAGS_XMAC) |
6370 | alt_start = 0; |
6371 | else |
6372 | alt_start = 1; |
6373 | for (i = alt_start; i < niu_num_alt_addr(np); i++) { |
6374 | err = niu_enable_alt_mac(np, index: i, on: 0); |
6375 | if (err) |
6376 | netdev_warn(dev, format: "Error %d disabling alt mac %d\n" , |
6377 | err, i); |
6378 | } |
6379 | } |
6380 | if (dev->flags & IFF_ALLMULTI) { |
6381 | for (i = 0; i < 16; i++) |
6382 | hash[i] = 0xffff; |
6383 | } else if (!netdev_mc_empty(dev)) { |
6384 | netdev_for_each_mc_addr(ha, dev) { |
6385 | u32 crc = ether_crc_le(ETH_ALEN, ha->addr); |
6386 | |
6387 | crc >>= 24; |
6388 | hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); |
6389 | } |
6390 | } |
6391 | |
6392 | if (np->flags & NIU_FLAGS_MCAST) |
6393 | niu_load_hash(np, hash); |
6394 | |
6395 | niu_enable_rx_mac(np, on: 1); |
6396 | spin_unlock_irqrestore(lock: &np->lock, flags); |
6397 | } |
6398 | |
6399 | static int niu_set_mac_addr(struct net_device *dev, void *p) |
6400 | { |
6401 | struct niu *np = netdev_priv(dev); |
6402 | struct sockaddr *addr = p; |
6403 | unsigned long flags; |
6404 | |
6405 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
6406 | return -EADDRNOTAVAIL; |
6407 | |
6408 | eth_hw_addr_set(dev, addr: addr->sa_data); |
6409 | |
6410 | if (!netif_running(dev)) |
6411 | return 0; |
6412 | |
6413 | spin_lock_irqsave(&np->lock, flags); |
6414 | niu_enable_rx_mac(np, on: 0); |
6415 | niu_set_primary_mac(np, addr: dev->dev_addr); |
6416 | niu_enable_rx_mac(np, on: 1); |
6417 | spin_unlock_irqrestore(lock: &np->lock, flags); |
6418 | |
6419 | return 0; |
6420 | } |
6421 | |
6422 | static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
6423 | { |
6424 | return -EOPNOTSUPP; |
6425 | } |
6426 | |
6427 | static void niu_netif_stop(struct niu *np) |
6428 | { |
6429 | netif_trans_update(dev: np->dev); /* prevent tx timeout */ |
6430 | |
6431 | niu_disable_napi(np); |
6432 | |
6433 | netif_tx_disable(dev: np->dev); |
6434 | } |
6435 | |
6436 | static void niu_netif_start(struct niu *np) |
6437 | { |
6438 | /* NOTE: unconditional netif_wake_queue is only appropriate |
6439 | * so long as all callers are assured to have free tx slots |
6440 | * (such as after niu_init_hw). |
6441 | */ |
6442 | netif_tx_wake_all_queues(dev: np->dev); |
6443 | |
6444 | niu_enable_napi(np); |
6445 | |
6446 | niu_enable_interrupts(np, on: 1); |
6447 | } |
6448 | |
6449 | static void niu_reset_buffers(struct niu *np) |
6450 | { |
6451 | int i, j, k, err; |
6452 | |
6453 | if (np->rx_rings) { |
6454 | for (i = 0; i < np->num_rx_rings; i++) { |
6455 | struct rx_ring_info *rp = &np->rx_rings[i]; |
6456 | |
6457 | for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { |
6458 | struct page *page; |
6459 | |
6460 | page = rp->rxhash[j]; |
6461 | while (page) { |
6462 | struct page *next = niu_next_page(page); |
6463 | u64 base = page->index; |
6464 | base = base >> RBR_DESCR_ADDR_SHIFT; |
6465 | rp->rbr[k++] = cpu_to_le32(base); |
6466 | page = next; |
6467 | } |
6468 | } |
6469 | for (; k < MAX_RBR_RING_SIZE; k++) { |
6470 | err = niu_rbr_add_page(np, rp, GFP_ATOMIC, start_index: k); |
6471 | if (unlikely(err)) |
6472 | break; |
6473 | } |
6474 | |
6475 | rp->rbr_index = rp->rbr_table_size - 1; |
6476 | rp->rcr_index = 0; |
6477 | rp->rbr_pending = 0; |
6478 | rp->rbr_refill_pending = 0; |
6479 | } |
6480 | } |
6481 | if (np->tx_rings) { |
6482 | for (i = 0; i < np->num_tx_rings; i++) { |
6483 | struct tx_ring_info *rp = &np->tx_rings[i]; |
6484 | |
6485 | for (j = 0; j < MAX_TX_RING_SIZE; j++) { |
6486 | if (rp->tx_buffs[j].skb) |
6487 | (void) release_tx_packet(np, rp, idx: j); |
6488 | } |
6489 | |
6490 | rp->pending = MAX_TX_RING_SIZE; |
6491 | rp->prod = 0; |
6492 | rp->cons = 0; |
6493 | rp->wrap_bit = 0; |
6494 | } |
6495 | } |
6496 | } |
6497 | |
6498 | static void niu_reset_task(struct work_struct *work) |
6499 | { |
6500 | struct niu *np = container_of(work, struct niu, reset_task); |
6501 | unsigned long flags; |
6502 | int err; |
6503 | |
6504 | spin_lock_irqsave(&np->lock, flags); |
6505 | if (!netif_running(dev: np->dev)) { |
6506 | spin_unlock_irqrestore(lock: &np->lock, flags); |
6507 | return; |
6508 | } |
6509 | |
6510 | spin_unlock_irqrestore(lock: &np->lock, flags); |
6511 | |
6512 | del_timer_sync(timer: &np->timer); |
6513 | |
6514 | niu_netif_stop(np); |
6515 | |
6516 | spin_lock_irqsave(&np->lock, flags); |
6517 | |
6518 | niu_stop_hw(np); |
6519 | |
6520 | spin_unlock_irqrestore(lock: &np->lock, flags); |
6521 | |
6522 | niu_reset_buffers(np); |
6523 | |
6524 | spin_lock_irqsave(&np->lock, flags); |
6525 | |
6526 | err = niu_init_hw(np); |
6527 | if (!err) { |
6528 | np->timer.expires = jiffies + HZ; |
6529 | add_timer(timer: &np->timer); |
6530 | niu_netif_start(np); |
6531 | } |
6532 | |
6533 | spin_unlock_irqrestore(lock: &np->lock, flags); |
6534 | } |
6535 | |
6536 | static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue) |
6537 | { |
6538 | struct niu *np = netdev_priv(dev); |
6539 | |
6540 | dev_err(np->device, "%s: Transmit timed out, resetting\n" , |
6541 | dev->name); |
6542 | |
6543 | schedule_work(work: &np->reset_task); |
6544 | } |
6545 | |
6546 | static void niu_set_txd(struct tx_ring_info *rp, int index, |
6547 | u64 mapping, u64 len, u64 mark, |
6548 | u64 n_frags) |
6549 | { |
6550 | __le64 *desc = &rp->descr[index]; |
6551 | |
6552 | *desc = cpu_to_le64(mark | |
6553 | (n_frags << TX_DESC_NUM_PTR_SHIFT) | |
6554 | (len << TX_DESC_TR_LEN_SHIFT) | |
6555 | (mapping & TX_DESC_SAD)); |
6556 | } |
6557 | |
6558 | static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, |
6559 | u64 pad_bytes, u64 len) |
6560 | { |
6561 | u16 eth_proto, eth_proto_inner; |
6562 | u64 csum_bits, l3off, ihl, ret; |
6563 | u8 ip_proto; |
6564 | int ipv6; |
6565 | |
6566 | eth_proto = be16_to_cpu(ehdr->h_proto); |
6567 | eth_proto_inner = eth_proto; |
6568 | if (eth_proto == ETH_P_8021Q) { |
6569 | struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; |
6570 | __be16 val = vp->h_vlan_encapsulated_proto; |
6571 | |
6572 | eth_proto_inner = be16_to_cpu(val); |
6573 | } |
6574 | |
6575 | ipv6 = ihl = 0; |
6576 | switch (skb->protocol) { |
6577 | case cpu_to_be16(ETH_P_IP): |
6578 | ip_proto = ip_hdr(skb)->protocol; |
6579 | ihl = ip_hdr(skb)->ihl; |
6580 | break; |
6581 | case cpu_to_be16(ETH_P_IPV6): |
6582 | ip_proto = ipv6_hdr(skb)->nexthdr; |
6583 | ihl = (40 >> 2); |
6584 | ipv6 = 1; |
6585 | break; |
6586 | default: |
6587 | ip_proto = ihl = 0; |
6588 | break; |
6589 | } |
6590 | |
6591 | csum_bits = TXHDR_CSUM_NONE; |
6592 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
6593 | u64 start, stuff; |
6594 | |
6595 | csum_bits = (ip_proto == IPPROTO_TCP ? |
6596 | TXHDR_CSUM_TCP : |
6597 | (ip_proto == IPPROTO_UDP ? |
6598 | TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); |
6599 | |
6600 | start = skb_checksum_start_offset(skb) - |
6601 | (pad_bytes + sizeof(struct tx_pkt_hdr)); |
6602 | stuff = start + skb->csum_offset; |
6603 | |
6604 | csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; |
6605 | csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; |
6606 | } |
6607 | |
6608 | l3off = skb_network_offset(skb) - |
6609 | (pad_bytes + sizeof(struct tx_pkt_hdr)); |
6610 | |
6611 | ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | |
6612 | (len << TXHDR_LEN_SHIFT) | |
6613 | ((l3off / 2) << TXHDR_L3START_SHIFT) | |
6614 | (ihl << TXHDR_IHL_SHIFT) | |
6615 | ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | |
6616 | ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | |
6617 | (ipv6 ? TXHDR_IP_VER : 0) | |
6618 | csum_bits); |
6619 | |
6620 | return ret; |
6621 | } |
6622 | |
6623 | static netdev_tx_t niu_start_xmit(struct sk_buff *skb, |
6624 | struct net_device *dev) |
6625 | { |
6626 | struct niu *np = netdev_priv(dev); |
6627 | unsigned long align, headroom; |
6628 | struct netdev_queue *txq; |
6629 | struct tx_ring_info *rp; |
6630 | struct tx_pkt_hdr *tp; |
6631 | unsigned int len, nfg; |
6632 | struct ethhdr *ehdr; |
6633 | int prod, i, tlen; |
6634 | u64 mapping, mrk; |
6635 | |
6636 | i = skb_get_queue_mapping(skb); |
6637 | rp = &np->tx_rings[i]; |
6638 | txq = netdev_get_tx_queue(dev, index: i); |
6639 | |
6640 | if (niu_tx_avail(tp: rp) <= (skb_shinfo(skb)->nr_frags + 1)) { |
6641 | netif_tx_stop_queue(dev_queue: txq); |
6642 | dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n" , dev->name); |
6643 | rp->tx_errors++; |
6644 | return NETDEV_TX_BUSY; |
6645 | } |
6646 | |
6647 | if (eth_skb_pad(skb)) |
6648 | goto out; |
6649 | |
6650 | len = sizeof(struct tx_pkt_hdr) + 15; |
6651 | if (skb_headroom(skb) < len) { |
6652 | struct sk_buff *skb_new; |
6653 | |
6654 | skb_new = skb_realloc_headroom(skb, headroom: len); |
6655 | if (!skb_new) |
6656 | goto out_drop; |
6657 | kfree_skb(skb); |
6658 | skb = skb_new; |
6659 | } else |
6660 | skb_orphan(skb); |
6661 | |
6662 | align = ((unsigned long) skb->data & (16 - 1)); |
6663 | headroom = align + sizeof(struct tx_pkt_hdr); |
6664 | |
6665 | ehdr = (struct ethhdr *) skb->data; |
6666 | tp = skb_push(skb, len: headroom); |
6667 | |
6668 | len = skb->len - sizeof(struct tx_pkt_hdr); |
6669 | tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); |
6670 | tp->resv = 0; |
6671 | |
6672 | len = skb_headlen(skb); |
6673 | mapping = np->ops->map_single(np->device, skb->data, |
6674 | len, DMA_TO_DEVICE); |
6675 | |
6676 | prod = rp->prod; |
6677 | |
6678 | rp->tx_buffs[prod].skb = skb; |
6679 | rp->tx_buffs[prod].mapping = mapping; |
6680 | |
6681 | mrk = TX_DESC_SOP; |
6682 | if (++rp->mark_counter == rp->mark_freq) { |
6683 | rp->mark_counter = 0; |
6684 | mrk |= TX_DESC_MARK; |
6685 | rp->mark_pending++; |
6686 | } |
6687 | |
6688 | tlen = len; |
6689 | nfg = skb_shinfo(skb)->nr_frags; |
6690 | while (tlen > 0) { |
6691 | tlen -= MAX_TX_DESC_LEN; |
6692 | nfg++; |
6693 | } |
6694 | |
6695 | while (len > 0) { |
6696 | unsigned int this_len = len; |
6697 | |
6698 | if (this_len > MAX_TX_DESC_LEN) |
6699 | this_len = MAX_TX_DESC_LEN; |
6700 | |
6701 | niu_set_txd(rp, index: prod, mapping, len: this_len, mark: mrk, n_frags: nfg); |
6702 | mrk = nfg = 0; |
6703 | |
6704 | prod = NEXT_TX(rp, prod); |
6705 | mapping += this_len; |
6706 | len -= this_len; |
6707 | } |
6708 | |
6709 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
6710 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
6711 | |
6712 | len = skb_frag_size(frag); |
6713 | mapping = np->ops->map_page(np->device, skb_frag_page(frag), |
6714 | skb_frag_off(frag), len, |
6715 | DMA_TO_DEVICE); |
6716 | |
6717 | rp->tx_buffs[prod].skb = NULL; |
6718 | rp->tx_buffs[prod].mapping = mapping; |
6719 | |
6720 | niu_set_txd(rp, index: prod, mapping, len, mark: 0, n_frags: 0); |
6721 | |
6722 | prod = NEXT_TX(rp, prod); |
6723 | } |
6724 | |
6725 | if (prod < rp->prod) |
6726 | rp->wrap_bit ^= TX_RING_KICK_WRAP; |
6727 | rp->prod = prod; |
6728 | |
6729 | nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); |
6730 | |
6731 | if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { |
6732 | netif_tx_stop_queue(dev_queue: txq); |
6733 | if (niu_tx_avail(tp: rp) > NIU_TX_WAKEUP_THRESH(rp)) |
6734 | netif_tx_wake_queue(dev_queue: txq); |
6735 | } |
6736 | |
6737 | out: |
6738 | return NETDEV_TX_OK; |
6739 | |
6740 | out_drop: |
6741 | rp->tx_errors++; |
6742 | kfree_skb(skb); |
6743 | goto out; |
6744 | } |
6745 | |
6746 | static int niu_change_mtu(struct net_device *dev, int new_mtu) |
6747 | { |
6748 | struct niu *np = netdev_priv(dev); |
6749 | int err, orig_jumbo, new_jumbo; |
6750 | |
6751 | orig_jumbo = (dev->mtu > ETH_DATA_LEN); |
6752 | new_jumbo = (new_mtu > ETH_DATA_LEN); |
6753 | |
6754 | dev->mtu = new_mtu; |
6755 | |
6756 | if (!netif_running(dev) || |
6757 | (orig_jumbo == new_jumbo)) |
6758 | return 0; |
6759 | |
6760 | niu_full_shutdown(np, dev); |
6761 | |
6762 | niu_free_channels(np); |
6763 | |
6764 | niu_enable_napi(np); |
6765 | |
6766 | err = niu_alloc_channels(np); |
6767 | if (err) |
6768 | return err; |
6769 | |
6770 | spin_lock_irq(lock: &np->lock); |
6771 | |
6772 | err = niu_init_hw(np); |
6773 | if (!err) { |
6774 | timer_setup(&np->timer, niu_timer, 0); |
6775 | np->timer.expires = jiffies + HZ; |
6776 | |
6777 | err = niu_enable_interrupts(np, on: 1); |
6778 | if (err) |
6779 | niu_stop_hw(np); |
6780 | } |
6781 | |
6782 | spin_unlock_irq(lock: &np->lock); |
6783 | |
6784 | if (!err) { |
6785 | netif_tx_start_all_queues(dev); |
6786 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) |
6787 | netif_carrier_on(dev); |
6788 | |
6789 | add_timer(timer: &np->timer); |
6790 | } |
6791 | |
6792 | return err; |
6793 | } |
6794 | |
6795 | static void niu_get_drvinfo(struct net_device *dev, |
6796 | struct ethtool_drvinfo *info) |
6797 | { |
6798 | struct niu *np = netdev_priv(dev); |
6799 | struct niu_vpd *vpd = &np->vpd; |
6800 | |
6801 | strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); |
6802 | strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); |
6803 | snprintf(buf: info->fw_version, size: sizeof(info->fw_version), fmt: "%d.%d" , |
6804 | vpd->fcode_major, vpd->fcode_minor); |
6805 | if (np->parent->plat_type != PLAT_TYPE_NIU) |
6806 | strscpy(info->bus_info, pci_name(np->pdev), |
6807 | sizeof(info->bus_info)); |
6808 | } |
6809 | |
6810 | static int niu_get_link_ksettings(struct net_device *dev, |
6811 | struct ethtool_link_ksettings *cmd) |
6812 | { |
6813 | struct niu *np = netdev_priv(dev); |
6814 | struct niu_link_config *lp; |
6815 | |
6816 | lp = &np->link_config; |
6817 | |
6818 | memset(cmd, 0, sizeof(*cmd)); |
6819 | cmd->base.phy_address = np->phy_addr; |
6820 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.supported, |
6821 | legacy_u32: lp->supported); |
6822 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.advertising, |
6823 | legacy_u32: lp->active_advertising); |
6824 | cmd->base.autoneg = lp->active_autoneg; |
6825 | cmd->base.speed = lp->active_speed; |
6826 | cmd->base.duplex = lp->active_duplex; |
6827 | cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; |
6828 | |
6829 | return 0; |
6830 | } |
6831 | |
6832 | static int niu_set_link_ksettings(struct net_device *dev, |
6833 | const struct ethtool_link_ksettings *cmd) |
6834 | { |
6835 | struct niu *np = netdev_priv(dev); |
6836 | struct niu_link_config *lp = &np->link_config; |
6837 | |
6838 | ethtool_convert_link_mode_to_legacy_u32(legacy_u32: &lp->advertising, |
6839 | src: cmd->link_modes.advertising); |
6840 | lp->speed = cmd->base.speed; |
6841 | lp->duplex = cmd->base.duplex; |
6842 | lp->autoneg = cmd->base.autoneg; |
6843 | return niu_init_link(np); |
6844 | } |
6845 | |
6846 | static u32 niu_get_msglevel(struct net_device *dev) |
6847 | { |
6848 | struct niu *np = netdev_priv(dev); |
6849 | return np->msg_enable; |
6850 | } |
6851 | |
6852 | static void niu_set_msglevel(struct net_device *dev, u32 value) |
6853 | { |
6854 | struct niu *np = netdev_priv(dev); |
6855 | np->msg_enable = value; |
6856 | } |
6857 | |
6858 | static int niu_nway_reset(struct net_device *dev) |
6859 | { |
6860 | struct niu *np = netdev_priv(dev); |
6861 | |
6862 | if (np->link_config.autoneg) |
6863 | return niu_init_link(np); |
6864 | |
6865 | return 0; |
6866 | } |
6867 | |
6868 | static int niu_get_eeprom_len(struct net_device *dev) |
6869 | { |
6870 | struct niu *np = netdev_priv(dev); |
6871 | |
6872 | return np->eeprom_len; |
6873 | } |
6874 | |
6875 | static int niu_get_eeprom(struct net_device *dev, |
6876 | struct ethtool_eeprom *eeprom, u8 *data) |
6877 | { |
6878 | struct niu *np = netdev_priv(dev); |
6879 | u32 offset, len, val; |
6880 | |
6881 | offset = eeprom->offset; |
6882 | len = eeprom->len; |
6883 | |
6884 | if (offset + len < offset) |
6885 | return -EINVAL; |
6886 | if (offset >= np->eeprom_len) |
6887 | return -EINVAL; |
6888 | if (offset + len > np->eeprom_len) |
6889 | len = eeprom->len = np->eeprom_len - offset; |
6890 | |
6891 | if (offset & 3) { |
6892 | u32 b_offset, b_count; |
6893 | |
6894 | b_offset = offset & 3; |
6895 | b_count = 4 - b_offset; |
6896 | if (b_count > len) |
6897 | b_count = len; |
6898 | |
6899 | val = nr64(ESPC_NCR((offset - b_offset) / 4)); |
6900 | memcpy(data, ((char *)&val) + b_offset, b_count); |
6901 | data += b_count; |
6902 | len -= b_count; |
6903 | offset += b_count; |
6904 | } |
6905 | while (len >= 4) { |
6906 | val = nr64(ESPC_NCR(offset / 4)); |
6907 | memcpy(data, &val, 4); |
6908 | data += 4; |
6909 | len -= 4; |
6910 | offset += 4; |
6911 | } |
6912 | if (len) { |
6913 | val = nr64(ESPC_NCR(offset / 4)); |
6914 | memcpy(data, &val, len); |
6915 | } |
6916 | return 0; |
6917 | } |
6918 | |
6919 | static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) |
6920 | { |
6921 | switch (flow_type) { |
6922 | case TCP_V4_FLOW: |
6923 | case TCP_V6_FLOW: |
6924 | *pid = IPPROTO_TCP; |
6925 | break; |
6926 | case UDP_V4_FLOW: |
6927 | case UDP_V6_FLOW: |
6928 | *pid = IPPROTO_UDP; |
6929 | break; |
6930 | case SCTP_V4_FLOW: |
6931 | case SCTP_V6_FLOW: |
6932 | *pid = IPPROTO_SCTP; |
6933 | break; |
6934 | case AH_V4_FLOW: |
6935 | case AH_V6_FLOW: |
6936 | *pid = IPPROTO_AH; |
6937 | break; |
6938 | case ESP_V4_FLOW: |
6939 | case ESP_V6_FLOW: |
6940 | *pid = IPPROTO_ESP; |
6941 | break; |
6942 | default: |
6943 | *pid = 0; |
6944 | break; |
6945 | } |
6946 | } |
6947 | |
6948 | static int niu_class_to_ethflow(u64 class, int *flow_type) |
6949 | { |
6950 | switch (class) { |
6951 | case CLASS_CODE_TCP_IPV4: |
6952 | *flow_type = TCP_V4_FLOW; |
6953 | break; |
6954 | case CLASS_CODE_UDP_IPV4: |
6955 | *flow_type = UDP_V4_FLOW; |
6956 | break; |
6957 | case CLASS_CODE_AH_ESP_IPV4: |
6958 | *flow_type = AH_V4_FLOW; |
6959 | break; |
6960 | case CLASS_CODE_SCTP_IPV4: |
6961 | *flow_type = SCTP_V4_FLOW; |
6962 | break; |
6963 | case CLASS_CODE_TCP_IPV6: |
6964 | *flow_type = TCP_V6_FLOW; |
6965 | break; |
6966 | case CLASS_CODE_UDP_IPV6: |
6967 | *flow_type = UDP_V6_FLOW; |
6968 | break; |
6969 | case CLASS_CODE_AH_ESP_IPV6: |
6970 | *flow_type = AH_V6_FLOW; |
6971 | break; |
6972 | case CLASS_CODE_SCTP_IPV6: |
6973 | *flow_type = SCTP_V6_FLOW; |
6974 | break; |
6975 | case CLASS_CODE_USER_PROG1: |
6976 | case CLASS_CODE_USER_PROG2: |
6977 | case CLASS_CODE_USER_PROG3: |
6978 | case CLASS_CODE_USER_PROG4: |
6979 | *flow_type = IP_USER_FLOW; |
6980 | break; |
6981 | default: |
6982 | return -EINVAL; |
6983 | } |
6984 | |
6985 | return 0; |
6986 | } |
6987 | |
6988 | static int niu_ethflow_to_class(int flow_type, u64 *class) |
6989 | { |
6990 | switch (flow_type) { |
6991 | case TCP_V4_FLOW: |
6992 | *class = CLASS_CODE_TCP_IPV4; |
6993 | break; |
6994 | case UDP_V4_FLOW: |
6995 | *class = CLASS_CODE_UDP_IPV4; |
6996 | break; |
6997 | case AH_ESP_V4_FLOW: |
6998 | case AH_V4_FLOW: |
6999 | case ESP_V4_FLOW: |
7000 | *class = CLASS_CODE_AH_ESP_IPV4; |
7001 | break; |
7002 | case SCTP_V4_FLOW: |
7003 | *class = CLASS_CODE_SCTP_IPV4; |
7004 | break; |
7005 | case TCP_V6_FLOW: |
7006 | *class = CLASS_CODE_TCP_IPV6; |
7007 | break; |
7008 | case UDP_V6_FLOW: |
7009 | *class = CLASS_CODE_UDP_IPV6; |
7010 | break; |
7011 | case AH_ESP_V6_FLOW: |
7012 | case AH_V6_FLOW: |
7013 | case ESP_V6_FLOW: |
7014 | *class = CLASS_CODE_AH_ESP_IPV6; |
7015 | break; |
7016 | case SCTP_V6_FLOW: |
7017 | *class = CLASS_CODE_SCTP_IPV6; |
7018 | break; |
7019 | default: |
7020 | return 0; |
7021 | } |
7022 | |
7023 | return 1; |
7024 | } |
7025 | |
7026 | static u64 niu_flowkey_to_ethflow(u64 flow_key) |
7027 | { |
7028 | u64 ethflow = 0; |
7029 | |
7030 | if (flow_key & FLOW_KEY_L2DA) |
7031 | ethflow |= RXH_L2DA; |
7032 | if (flow_key & FLOW_KEY_VLAN) |
7033 | ethflow |= RXH_VLAN; |
7034 | if (flow_key & FLOW_KEY_IPSA) |
7035 | ethflow |= RXH_IP_SRC; |
7036 | if (flow_key & FLOW_KEY_IPDA) |
7037 | ethflow |= RXH_IP_DST; |
7038 | if (flow_key & FLOW_KEY_PROTO) |
7039 | ethflow |= RXH_L3_PROTO; |
7040 | if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) |
7041 | ethflow |= RXH_L4_B_0_1; |
7042 | if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) |
7043 | ethflow |= RXH_L4_B_2_3; |
7044 | |
7045 | return ethflow; |
7046 | |
7047 | } |
7048 | |
7049 | static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) |
7050 | { |
7051 | u64 key = 0; |
7052 | |
7053 | if (ethflow & RXH_L2DA) |
7054 | key |= FLOW_KEY_L2DA; |
7055 | if (ethflow & RXH_VLAN) |
7056 | key |= FLOW_KEY_VLAN; |
7057 | if (ethflow & RXH_IP_SRC) |
7058 | key |= FLOW_KEY_IPSA; |
7059 | if (ethflow & RXH_IP_DST) |
7060 | key |= FLOW_KEY_IPDA; |
7061 | if (ethflow & RXH_L3_PROTO) |
7062 | key |= FLOW_KEY_PROTO; |
7063 | if (ethflow & RXH_L4_B_0_1) |
7064 | key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); |
7065 | if (ethflow & RXH_L4_B_2_3) |
7066 | key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); |
7067 | |
7068 | *flow_key = key; |
7069 | |
7070 | return 1; |
7071 | |
7072 | } |
7073 | |
7074 | static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) |
7075 | { |
7076 | u64 class; |
7077 | |
7078 | nfc->data = 0; |
7079 | |
7080 | if (!niu_ethflow_to_class(flow_type: nfc->flow_type, class: &class)) |
7081 | return -EINVAL; |
7082 | |
7083 | if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & |
7084 | TCAM_KEY_DISC) |
7085 | nfc->data = RXH_DISCARD; |
7086 | else |
7087 | nfc->data = niu_flowkey_to_ethflow(flow_key: np->parent->flow_key[class - |
7088 | CLASS_CODE_USER_PROG1]); |
7089 | return 0; |
7090 | } |
7091 | |
7092 | static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, |
7093 | struct ethtool_rx_flow_spec *fsp) |
7094 | { |
7095 | u32 tmp; |
7096 | u16 prt; |
7097 | |
7098 | tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; |
7099 | fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); |
7100 | |
7101 | tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; |
7102 | fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); |
7103 | |
7104 | tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; |
7105 | fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); |
7106 | |
7107 | tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; |
7108 | fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); |
7109 | |
7110 | fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> |
7111 | TCAM_V4KEY2_TOS_SHIFT; |
7112 | fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> |
7113 | TCAM_V4KEY2_TOS_SHIFT; |
7114 | |
7115 | switch (fsp->flow_type) { |
7116 | case TCP_V4_FLOW: |
7117 | case UDP_V4_FLOW: |
7118 | case SCTP_V4_FLOW: |
7119 | prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> |
7120 | TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; |
7121 | fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); |
7122 | |
7123 | prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> |
7124 | TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; |
7125 | fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); |
7126 | |
7127 | prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> |
7128 | TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; |
7129 | fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); |
7130 | |
7131 | prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> |
7132 | TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; |
7133 | fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); |
7134 | break; |
7135 | case AH_V4_FLOW: |
7136 | case ESP_V4_FLOW: |
7137 | tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> |
7138 | TCAM_V4KEY2_PORT_SPI_SHIFT; |
7139 | fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); |
7140 | |
7141 | tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> |
7142 | TCAM_V4KEY2_PORT_SPI_SHIFT; |
7143 | fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); |
7144 | break; |
7145 | case IP_USER_FLOW: |
7146 | tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> |
7147 | TCAM_V4KEY2_PORT_SPI_SHIFT; |
7148 | fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); |
7149 | |
7150 | tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> |
7151 | TCAM_V4KEY2_PORT_SPI_SHIFT; |
7152 | fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); |
7153 | |
7154 | fsp->h_u.usr_ip4_spec.proto = |
7155 | (tp->key[2] & TCAM_V4KEY2_PROTO) >> |
7156 | TCAM_V4KEY2_PROTO_SHIFT; |
7157 | fsp->m_u.usr_ip4_spec.proto = |
7158 | (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> |
7159 | TCAM_V4KEY2_PROTO_SHIFT; |
7160 | |
7161 | fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; |
7162 | break; |
7163 | default: |
7164 | break; |
7165 | } |
7166 | } |
7167 | |
7168 | static int niu_get_ethtool_tcam_entry(struct niu *np, |
7169 | struct ethtool_rxnfc *nfc) |
7170 | { |
7171 | struct niu_parent *parent = np->parent; |
7172 | struct niu_tcam_entry *tp; |
7173 | struct ethtool_rx_flow_spec *fsp = &nfc->fs; |
7174 | u16 idx; |
7175 | u64 class; |
7176 | int ret = 0; |
7177 | |
7178 | idx = tcam_get_index(np, idx: (u16)nfc->fs.location); |
7179 | |
7180 | tp = &parent->tcam[idx]; |
7181 | if (!tp->valid) { |
7182 | netdev_info(dev: np->dev, format: "niu%d: entry [%d] invalid for idx[%d]\n" , |
7183 | parent->index, (u16)nfc->fs.location, idx); |
7184 | return -EINVAL; |
7185 | } |
7186 | |
7187 | /* fill the flow spec entry */ |
7188 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> |
7189 | TCAM_V4KEY0_CLASS_CODE_SHIFT; |
7190 | ret = niu_class_to_ethflow(class, flow_type: &fsp->flow_type); |
7191 | if (ret < 0) { |
7192 | netdev_info(dev: np->dev, format: "niu%d: niu_class_to_ethflow failed\n" , |
7193 | parent->index); |
7194 | goto out; |
7195 | } |
7196 | |
7197 | if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { |
7198 | u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> |
7199 | TCAM_V4KEY2_PROTO_SHIFT; |
7200 | if (proto == IPPROTO_ESP) { |
7201 | if (fsp->flow_type == AH_V4_FLOW) |
7202 | fsp->flow_type = ESP_V4_FLOW; |
7203 | else |
7204 | fsp->flow_type = ESP_V6_FLOW; |
7205 | } |
7206 | } |
7207 | |
7208 | switch (fsp->flow_type) { |
7209 | case TCP_V4_FLOW: |
7210 | case UDP_V4_FLOW: |
7211 | case SCTP_V4_FLOW: |
7212 | case AH_V4_FLOW: |
7213 | case ESP_V4_FLOW: |
7214 | niu_get_ip4fs_from_tcam_key(tp, fsp); |
7215 | break; |
7216 | case TCP_V6_FLOW: |
7217 | case UDP_V6_FLOW: |
7218 | case SCTP_V6_FLOW: |
7219 | case AH_V6_FLOW: |
7220 | case ESP_V6_FLOW: |
7221 | /* Not yet implemented */ |
7222 | ret = -EINVAL; |
7223 | break; |
7224 | case IP_USER_FLOW: |
7225 | niu_get_ip4fs_from_tcam_key(tp, fsp); |
7226 | break; |
7227 | default: |
7228 | ret = -EINVAL; |
7229 | break; |
7230 | } |
7231 | |
7232 | if (ret < 0) |
7233 | goto out; |
7234 | |
7235 | if (tp->assoc_data & TCAM_ASSOCDATA_DISC) |
7236 | fsp->ring_cookie = RX_CLS_FLOW_DISC; |
7237 | else |
7238 | fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> |
7239 | TCAM_ASSOCDATA_OFFSET_SHIFT; |
7240 | |
7241 | /* put the tcam size here */ |
7242 | nfc->data = tcam_get_size(np); |
7243 | out: |
7244 | return ret; |
7245 | } |
7246 | |
7247 | static int niu_get_ethtool_tcam_all(struct niu *np, |
7248 | struct ethtool_rxnfc *nfc, |
7249 | u32 *rule_locs) |
7250 | { |
7251 | struct niu_parent *parent = np->parent; |
7252 | struct niu_tcam_entry *tp; |
7253 | int i, idx, cnt; |
7254 | unsigned long flags; |
7255 | int ret = 0; |
7256 | |
7257 | /* put the tcam size here */ |
7258 | nfc->data = tcam_get_size(np); |
7259 | |
7260 | niu_lock_parent(np, flags); |
7261 | for (cnt = 0, i = 0; i < nfc->data; i++) { |
7262 | idx = tcam_get_index(np, idx: i); |
7263 | tp = &parent->tcam[idx]; |
7264 | if (!tp->valid) |
7265 | continue; |
7266 | if (cnt == nfc->rule_cnt) { |
7267 | ret = -EMSGSIZE; |
7268 | break; |
7269 | } |
7270 | rule_locs[cnt] = i; |
7271 | cnt++; |
7272 | } |
7273 | niu_unlock_parent(np, flags); |
7274 | |
7275 | nfc->rule_cnt = cnt; |
7276 | |
7277 | return ret; |
7278 | } |
7279 | |
7280 | static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
7281 | u32 *rule_locs) |
7282 | { |
7283 | struct niu *np = netdev_priv(dev); |
7284 | int ret = 0; |
7285 | |
7286 | switch (cmd->cmd) { |
7287 | case ETHTOOL_GRXFH: |
7288 | ret = niu_get_hash_opts(np, nfc: cmd); |
7289 | break; |
7290 | case ETHTOOL_GRXRINGS: |
7291 | cmd->data = np->num_rx_rings; |
7292 | break; |
7293 | case ETHTOOL_GRXCLSRLCNT: |
7294 | cmd->rule_cnt = tcam_get_valid_entry_cnt(np); |
7295 | break; |
7296 | case ETHTOOL_GRXCLSRULE: |
7297 | ret = niu_get_ethtool_tcam_entry(np, nfc: cmd); |
7298 | break; |
7299 | case ETHTOOL_GRXCLSRLALL: |
7300 | ret = niu_get_ethtool_tcam_all(np, nfc: cmd, rule_locs); |
7301 | break; |
7302 | default: |
7303 | ret = -EINVAL; |
7304 | break; |
7305 | } |
7306 | |
7307 | return ret; |
7308 | } |
7309 | |
7310 | static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) |
7311 | { |
7312 | u64 class; |
7313 | u64 flow_key = 0; |
7314 | unsigned long flags; |
7315 | |
7316 | if (!niu_ethflow_to_class(flow_type: nfc->flow_type, class: &class)) |
7317 | return -EINVAL; |
7318 | |
7319 | if (class < CLASS_CODE_USER_PROG1 || |
7320 | class > CLASS_CODE_SCTP_IPV6) |
7321 | return -EINVAL; |
7322 | |
7323 | if (nfc->data & RXH_DISCARD) { |
7324 | niu_lock_parent(np, flags); |
7325 | flow_key = np->parent->tcam_key[class - |
7326 | CLASS_CODE_USER_PROG1]; |
7327 | flow_key |= TCAM_KEY_DISC; |
7328 | nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); |
7329 | np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; |
7330 | niu_unlock_parent(np, flags); |
7331 | return 0; |
7332 | } else { |
7333 | /* Discard was set before, but is not set now */ |
7334 | if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & |
7335 | TCAM_KEY_DISC) { |
7336 | niu_lock_parent(np, flags); |
7337 | flow_key = np->parent->tcam_key[class - |
7338 | CLASS_CODE_USER_PROG1]; |
7339 | flow_key &= ~TCAM_KEY_DISC; |
7340 | nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), |
7341 | flow_key); |
7342 | np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = |
7343 | flow_key; |
7344 | niu_unlock_parent(np, flags); |
7345 | } |
7346 | } |
7347 | |
7348 | if (!niu_ethflow_to_flowkey(ethflow: nfc->data, flow_key: &flow_key)) |
7349 | return -EINVAL; |
7350 | |
7351 | niu_lock_parent(np, flags); |
7352 | nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); |
7353 | np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; |
7354 | niu_unlock_parent(np, flags); |
7355 | |
7356 | return 0; |
7357 | } |
7358 | |
7359 | static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, |
7360 | struct niu_tcam_entry *tp, |
7361 | int l2_rdc_tab, u64 class) |
7362 | { |
7363 | u8 pid = 0; |
7364 | u32 sip, dip, sipm, dipm, spi, spim; |
7365 | u16 sport, dport, spm, dpm; |
7366 | |
7367 | sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); |
7368 | sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); |
7369 | dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); |
7370 | dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); |
7371 | |
7372 | tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; |
7373 | tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; |
7374 | tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; |
7375 | tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; |
7376 | |
7377 | tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; |
7378 | tp->key[3] |= dip; |
7379 | |
7380 | tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; |
7381 | tp->key_mask[3] |= dipm; |
7382 | |
7383 | tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << |
7384 | TCAM_V4KEY2_TOS_SHIFT); |
7385 | tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << |
7386 | TCAM_V4KEY2_TOS_SHIFT); |
7387 | switch (fsp->flow_type) { |
7388 | case TCP_V4_FLOW: |
7389 | case UDP_V4_FLOW: |
7390 | case SCTP_V4_FLOW: |
7391 | sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); |
7392 | spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); |
7393 | dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); |
7394 | dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); |
7395 | |
7396 | tp->key[2] |= (((u64)sport << 16) | dport); |
7397 | tp->key_mask[2] |= (((u64)spm << 16) | dpm); |
7398 | niu_ethflow_to_l3proto(flow_type: fsp->flow_type, pid: &pid); |
7399 | break; |
7400 | case AH_V4_FLOW: |
7401 | case ESP_V4_FLOW: |
7402 | spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); |
7403 | spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); |
7404 | |
7405 | tp->key[2] |= spi; |
7406 | tp->key_mask[2] |= spim; |
7407 | niu_ethflow_to_l3proto(flow_type: fsp->flow_type, pid: &pid); |
7408 | break; |
7409 | case IP_USER_FLOW: |
7410 | spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); |
7411 | spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); |
7412 | |
7413 | tp->key[2] |= spi; |
7414 | tp->key_mask[2] |= spim; |
7415 | pid = fsp->h_u.usr_ip4_spec.proto; |
7416 | break; |
7417 | default: |
7418 | break; |
7419 | } |
7420 | |
7421 | tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); |
7422 | if (pid) { |
7423 | tp->key_mask[2] |= TCAM_V4KEY2_PROTO; |
7424 | } |
7425 | } |
7426 | |
7427 | static int niu_add_ethtool_tcam_entry(struct niu *np, |
7428 | struct ethtool_rxnfc *nfc) |
7429 | { |
7430 | struct niu_parent *parent = np->parent; |
7431 | struct niu_tcam_entry *tp; |
7432 | struct ethtool_rx_flow_spec *fsp = &nfc->fs; |
7433 | struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; |
7434 | int l2_rdc_table = rdc_table->first_table_num; |
7435 | u16 idx; |
7436 | u64 class; |
7437 | unsigned long flags; |
7438 | int err, ret; |
7439 | |
7440 | ret = 0; |
7441 | |
7442 | idx = nfc->fs.location; |
7443 | if (idx >= tcam_get_size(np)) |
7444 | return -EINVAL; |
7445 | |
7446 | if (fsp->flow_type == IP_USER_FLOW) { |
7447 | int i; |
7448 | int add_usr_cls = 0; |
7449 | struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; |
7450 | struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; |
7451 | |
7452 | if (uspec->ip_ver != ETH_RX_NFC_IP4) |
7453 | return -EINVAL; |
7454 | |
7455 | niu_lock_parent(np, flags); |
7456 | |
7457 | for (i = 0; i < NIU_L3_PROG_CLS; i++) { |
7458 | if (parent->l3_cls[i]) { |
7459 | if (uspec->proto == parent->l3_cls_pid[i]) { |
7460 | class = parent->l3_cls[i]; |
7461 | parent->l3_cls_refcnt[i]++; |
7462 | add_usr_cls = 1; |
7463 | break; |
7464 | } |
7465 | } else { |
7466 | /* Program new user IP class */ |
7467 | switch (i) { |
7468 | case 0: |
7469 | class = CLASS_CODE_USER_PROG1; |
7470 | break; |
7471 | case 1: |
7472 | class = CLASS_CODE_USER_PROG2; |
7473 | break; |
7474 | case 2: |
7475 | class = CLASS_CODE_USER_PROG3; |
7476 | break; |
7477 | case 3: |
7478 | class = CLASS_CODE_USER_PROG4; |
7479 | break; |
7480 | default: |
7481 | class = CLASS_CODE_UNRECOG; |
7482 | break; |
7483 | } |
7484 | ret = tcam_user_ip_class_set(np, class, ipv6: 0, |
7485 | protocol_id: uspec->proto, |
7486 | tos_mask: uspec->tos, |
7487 | tos_val: umask->tos); |
7488 | if (ret) |
7489 | goto out; |
7490 | |
7491 | ret = tcam_user_ip_class_enable(np, class, on: 1); |
7492 | if (ret) |
7493 | goto out; |
7494 | parent->l3_cls[i] = class; |
7495 | parent->l3_cls_pid[i] = uspec->proto; |
7496 | parent->l3_cls_refcnt[i]++; |
7497 | add_usr_cls = 1; |
7498 | break; |
7499 | } |
7500 | } |
7501 | if (!add_usr_cls) { |
7502 | netdev_info(dev: np->dev, format: "niu%d: %s(): Could not find/insert class for pid %d\n" , |
7503 | parent->index, __func__, uspec->proto); |
7504 | ret = -EINVAL; |
7505 | goto out; |
7506 | } |
7507 | niu_unlock_parent(np, flags); |
7508 | } else { |
7509 | if (!niu_ethflow_to_class(flow_type: fsp->flow_type, class: &class)) { |
7510 | return -EINVAL; |
7511 | } |
7512 | } |
7513 | |
7514 | niu_lock_parent(np, flags); |
7515 | |
7516 | idx = tcam_get_index(np, idx); |
7517 | tp = &parent->tcam[idx]; |
7518 | |
7519 | memset(tp, 0, sizeof(*tp)); |
7520 | |
7521 | /* fill in the tcam key and mask */ |
7522 | switch (fsp->flow_type) { |
7523 | case TCP_V4_FLOW: |
7524 | case UDP_V4_FLOW: |
7525 | case SCTP_V4_FLOW: |
7526 | case AH_V4_FLOW: |
7527 | case ESP_V4_FLOW: |
7528 | niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_tab: l2_rdc_table, class); |
7529 | break; |
7530 | case TCP_V6_FLOW: |
7531 | case UDP_V6_FLOW: |
7532 | case SCTP_V6_FLOW: |
7533 | case AH_V6_FLOW: |
7534 | case ESP_V6_FLOW: |
7535 | /* Not yet implemented */ |
7536 | netdev_info(dev: np->dev, format: "niu%d: In %s(): flow %d for IPv6 not implemented\n" , |
7537 | parent->index, __func__, fsp->flow_type); |
7538 | ret = -EINVAL; |
7539 | goto out; |
7540 | case IP_USER_FLOW: |
7541 | niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_tab: l2_rdc_table, class); |
7542 | break; |
7543 | default: |
7544 | netdev_info(dev: np->dev, format: "niu%d: In %s(): Unknown flow type %d\n" , |
7545 | parent->index, __func__, fsp->flow_type); |
7546 | ret = -EINVAL; |
7547 | goto out; |
7548 | } |
7549 | |
7550 | /* fill in the assoc data */ |
7551 | if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { |
7552 | tp->assoc_data = TCAM_ASSOCDATA_DISC; |
7553 | } else { |
7554 | if (fsp->ring_cookie >= np->num_rx_rings) { |
7555 | netdev_info(dev: np->dev, format: "niu%d: In %s(): Invalid RX ring %lld\n" , |
7556 | parent->index, __func__, |
7557 | (long long)fsp->ring_cookie); |
7558 | ret = -EINVAL; |
7559 | goto out; |
7560 | } |
7561 | tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | |
7562 | (fsp->ring_cookie << |
7563 | TCAM_ASSOCDATA_OFFSET_SHIFT)); |
7564 | } |
7565 | |
7566 | err = tcam_write(np, index: idx, key: tp->key, mask: tp->key_mask); |
7567 | if (err) { |
7568 | ret = -EINVAL; |
7569 | goto out; |
7570 | } |
7571 | err = tcam_assoc_write(np, index: idx, assoc_data: tp->assoc_data); |
7572 | if (err) { |
7573 | ret = -EINVAL; |
7574 | goto out; |
7575 | } |
7576 | |
7577 | /* validate the entry */ |
7578 | tp->valid = 1; |
7579 | np->clas.tcam_valid_entries++; |
7580 | out: |
7581 | niu_unlock_parent(np, flags); |
7582 | |
7583 | return ret; |
7584 | } |
7585 | |
7586 | static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) |
7587 | { |
7588 | struct niu_parent *parent = np->parent; |
7589 | struct niu_tcam_entry *tp; |
7590 | u16 idx; |
7591 | unsigned long flags; |
7592 | u64 class; |
7593 | int ret = 0; |
7594 | |
7595 | if (loc >= tcam_get_size(np)) |
7596 | return -EINVAL; |
7597 | |
7598 | niu_lock_parent(np, flags); |
7599 | |
7600 | idx = tcam_get_index(np, idx: loc); |
7601 | tp = &parent->tcam[idx]; |
7602 | |
7603 | /* if the entry is of a user defined class, then update*/ |
7604 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> |
7605 | TCAM_V4KEY0_CLASS_CODE_SHIFT; |
7606 | |
7607 | if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { |
7608 | int i; |
7609 | for (i = 0; i < NIU_L3_PROG_CLS; i++) { |
7610 | if (parent->l3_cls[i] == class) { |
7611 | parent->l3_cls_refcnt[i]--; |
7612 | if (!parent->l3_cls_refcnt[i]) { |
7613 | /* disable class */ |
7614 | ret = tcam_user_ip_class_enable(np, |
7615 | class, |
7616 | on: 0); |
7617 | if (ret) |
7618 | goto out; |
7619 | parent->l3_cls[i] = 0; |
7620 | parent->l3_cls_pid[i] = 0; |
7621 | } |
7622 | break; |
7623 | } |
7624 | } |
7625 | if (i == NIU_L3_PROG_CLS) { |
7626 | netdev_info(dev: np->dev, format: "niu%d: In %s(): Usr class 0x%llx not found\n" , |
7627 | parent->index, __func__, |
7628 | (unsigned long long)class); |
7629 | ret = -EINVAL; |
7630 | goto out; |
7631 | } |
7632 | } |
7633 | |
7634 | ret = tcam_flush(np, index: idx); |
7635 | if (ret) |
7636 | goto out; |
7637 | |
7638 | /* invalidate the entry */ |
7639 | tp->valid = 0; |
7640 | np->clas.tcam_valid_entries--; |
7641 | out: |
7642 | niu_unlock_parent(np, flags); |
7643 | |
7644 | return ret; |
7645 | } |
7646 | |
7647 | static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) |
7648 | { |
7649 | struct niu *np = netdev_priv(dev); |
7650 | int ret = 0; |
7651 | |
7652 | switch (cmd->cmd) { |
7653 | case ETHTOOL_SRXFH: |
7654 | ret = niu_set_hash_opts(np, nfc: cmd); |
7655 | break; |
7656 | case ETHTOOL_SRXCLSRLINS: |
7657 | ret = niu_add_ethtool_tcam_entry(np, nfc: cmd); |
7658 | break; |
7659 | case ETHTOOL_SRXCLSRLDEL: |
7660 | ret = niu_del_ethtool_tcam_entry(np, loc: cmd->fs.location); |
7661 | break; |
7662 | default: |
7663 | ret = -EINVAL; |
7664 | break; |
7665 | } |
7666 | |
7667 | return ret; |
7668 | } |
7669 | |
7670 | static const struct { |
7671 | const char string[ETH_GSTRING_LEN]; |
7672 | } niu_xmac_stat_keys[] = { |
7673 | { "tx_frames" }, |
7674 | { "tx_bytes" }, |
7675 | { "tx_fifo_errors" }, |
7676 | { "tx_overflow_errors" }, |
7677 | { "tx_max_pkt_size_errors" }, |
7678 | { "tx_underflow_errors" }, |
7679 | { "rx_local_faults" }, |
7680 | { "rx_remote_faults" }, |
7681 | { "rx_link_faults" }, |
7682 | { "rx_align_errors" }, |
7683 | { "rx_frags" }, |
7684 | { "rx_mcasts" }, |
7685 | { "rx_bcasts" }, |
7686 | { "rx_hist_cnt1" }, |
7687 | { "rx_hist_cnt2" }, |
7688 | { "rx_hist_cnt3" }, |
7689 | { "rx_hist_cnt4" }, |
7690 | { "rx_hist_cnt5" }, |
7691 | { "rx_hist_cnt6" }, |
7692 | { "rx_hist_cnt7" }, |
7693 | { "rx_octets" }, |
7694 | { "rx_code_violations" }, |
7695 | { "rx_len_errors" }, |
7696 | { "rx_crc_errors" }, |
7697 | { "rx_underflows" }, |
7698 | { "rx_overflows" }, |
7699 | { "pause_off_state" }, |
7700 | { "pause_on_state" }, |
7701 | { "pause_received" }, |
7702 | }; |
7703 | |
7704 | #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) |
7705 | |
7706 | static const struct { |
7707 | const char string[ETH_GSTRING_LEN]; |
7708 | } niu_bmac_stat_keys[] = { |
7709 | { "tx_underflow_errors" }, |
7710 | { "tx_max_pkt_size_errors" }, |
7711 | { "tx_bytes" }, |
7712 | { "tx_frames" }, |
7713 | { "rx_overflows" }, |
7714 | { "rx_frames" }, |
7715 | { "rx_align_errors" }, |
7716 | { "rx_crc_errors" }, |
7717 | { "rx_len_errors" }, |
7718 | { "pause_off_state" }, |
7719 | { "pause_on_state" }, |
7720 | { "pause_received" }, |
7721 | }; |
7722 | |
7723 | #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) |
7724 | |
7725 | static const struct { |
7726 | const char string[ETH_GSTRING_LEN]; |
7727 | } niu_rxchan_stat_keys[] = { |
7728 | { "rx_channel" }, |
7729 | { "rx_packets" }, |
7730 | { "rx_bytes" }, |
7731 | { "rx_dropped" }, |
7732 | { "rx_errors" }, |
7733 | }; |
7734 | |
7735 | #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) |
7736 | |
7737 | static const struct { |
7738 | const char string[ETH_GSTRING_LEN]; |
7739 | } niu_txchan_stat_keys[] = { |
7740 | { "tx_channel" }, |
7741 | { "tx_packets" }, |
7742 | { "tx_bytes" }, |
7743 | { "tx_errors" }, |
7744 | }; |
7745 | |
7746 | #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) |
7747 | |
7748 | static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
7749 | { |
7750 | struct niu *np = netdev_priv(dev); |
7751 | int i; |
7752 | |
7753 | if (stringset != ETH_SS_STATS) |
7754 | return; |
7755 | |
7756 | if (np->flags & NIU_FLAGS_XMAC) { |
7757 | memcpy(data, niu_xmac_stat_keys, |
7758 | sizeof(niu_xmac_stat_keys)); |
7759 | data += sizeof(niu_xmac_stat_keys); |
7760 | } else { |
7761 | memcpy(data, niu_bmac_stat_keys, |
7762 | sizeof(niu_bmac_stat_keys)); |
7763 | data += sizeof(niu_bmac_stat_keys); |
7764 | } |
7765 | for (i = 0; i < np->num_rx_rings; i++) { |
7766 | memcpy(data, niu_rxchan_stat_keys, |
7767 | sizeof(niu_rxchan_stat_keys)); |
7768 | data += sizeof(niu_rxchan_stat_keys); |
7769 | } |
7770 | for (i = 0; i < np->num_tx_rings; i++) { |
7771 | memcpy(data, niu_txchan_stat_keys, |
7772 | sizeof(niu_txchan_stat_keys)); |
7773 | data += sizeof(niu_txchan_stat_keys); |
7774 | } |
7775 | } |
7776 | |
7777 | static int niu_get_sset_count(struct net_device *dev, int stringset) |
7778 | { |
7779 | struct niu *np = netdev_priv(dev); |
7780 | |
7781 | if (stringset != ETH_SS_STATS) |
7782 | return -EINVAL; |
7783 | |
7784 | return (np->flags & NIU_FLAGS_XMAC ? |
7785 | NUM_XMAC_STAT_KEYS : |
7786 | NUM_BMAC_STAT_KEYS) + |
7787 | (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + |
7788 | (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); |
7789 | } |
7790 | |
7791 | static void niu_get_ethtool_stats(struct net_device *dev, |
7792 | struct ethtool_stats *stats, u64 *data) |
7793 | { |
7794 | struct niu *np = netdev_priv(dev); |
7795 | int i; |
7796 | |
7797 | niu_sync_mac_stats(np); |
7798 | if (np->flags & NIU_FLAGS_XMAC) { |
7799 | memcpy(data, &np->mac_stats.xmac, |
7800 | sizeof(struct niu_xmac_stats)); |
7801 | data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); |
7802 | } else { |
7803 | memcpy(data, &np->mac_stats.bmac, |
7804 | sizeof(struct niu_bmac_stats)); |
7805 | data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); |
7806 | } |
7807 | for (i = 0; i < np->num_rx_rings; i++) { |
7808 | struct rx_ring_info *rp = &np->rx_rings[i]; |
7809 | |
7810 | niu_sync_rx_discard_stats(np, rp, limit: 0); |
7811 | |
7812 | data[0] = rp->rx_channel; |
7813 | data[1] = rp->rx_packets; |
7814 | data[2] = rp->rx_bytes; |
7815 | data[3] = rp->rx_dropped; |
7816 | data[4] = rp->rx_errors; |
7817 | data += 5; |
7818 | } |
7819 | for (i = 0; i < np->num_tx_rings; i++) { |
7820 | struct tx_ring_info *rp = &np->tx_rings[i]; |
7821 | |
7822 | data[0] = rp->tx_channel; |
7823 | data[1] = rp->tx_packets; |
7824 | data[2] = rp->tx_bytes; |
7825 | data[3] = rp->tx_errors; |
7826 | data += 4; |
7827 | } |
7828 | } |
7829 | |
7830 | static u64 niu_led_state_save(struct niu *np) |
7831 | { |
7832 | if (np->flags & NIU_FLAGS_XMAC) |
7833 | return nr64_mac(XMAC_CONFIG); |
7834 | else |
7835 | return nr64_mac(BMAC_XIF_CONFIG); |
7836 | } |
7837 | |
7838 | static void niu_led_state_restore(struct niu *np, u64 val) |
7839 | { |
7840 | if (np->flags & NIU_FLAGS_XMAC) |
7841 | nw64_mac(XMAC_CONFIG, val); |
7842 | else |
7843 | nw64_mac(BMAC_XIF_CONFIG, val); |
7844 | } |
7845 | |
7846 | static void niu_force_led(struct niu *np, int on) |
7847 | { |
7848 | u64 val, reg, bit; |
7849 | |
7850 | if (np->flags & NIU_FLAGS_XMAC) { |
7851 | reg = XMAC_CONFIG; |
7852 | bit = XMAC_CONFIG_FORCE_LED_ON; |
7853 | } else { |
7854 | reg = BMAC_XIF_CONFIG; |
7855 | bit = BMAC_XIF_CONFIG_LINK_LED; |
7856 | } |
7857 | |
7858 | val = nr64_mac(reg); |
7859 | if (on) |
7860 | val |= bit; |
7861 | else |
7862 | val &= ~bit; |
7863 | nw64_mac(reg, val); |
7864 | } |
7865 | |
7866 | static int niu_set_phys_id(struct net_device *dev, |
7867 | enum ethtool_phys_id_state state) |
7868 | |
7869 | { |
7870 | struct niu *np = netdev_priv(dev); |
7871 | |
7872 | if (!netif_running(dev)) |
7873 | return -EAGAIN; |
7874 | |
7875 | switch (state) { |
7876 | case ETHTOOL_ID_ACTIVE: |
7877 | np->orig_led_state = niu_led_state_save(np); |
7878 | return 1; /* cycle on/off once per second */ |
7879 | |
7880 | case ETHTOOL_ID_ON: |
7881 | niu_force_led(np, on: 1); |
7882 | break; |
7883 | |
7884 | case ETHTOOL_ID_OFF: |
7885 | niu_force_led(np, on: 0); |
7886 | break; |
7887 | |
7888 | case ETHTOOL_ID_INACTIVE: |
7889 | niu_led_state_restore(np, val: np->orig_led_state); |
7890 | } |
7891 | |
7892 | return 0; |
7893 | } |
7894 | |
7895 | static const struct ethtool_ops niu_ethtool_ops = { |
7896 | .get_drvinfo = niu_get_drvinfo, |
7897 | .get_link = ethtool_op_get_link, |
7898 | .get_msglevel = niu_get_msglevel, |
7899 | .set_msglevel = niu_set_msglevel, |
7900 | .nway_reset = niu_nway_reset, |
7901 | .get_eeprom_len = niu_get_eeprom_len, |
7902 | .get_eeprom = niu_get_eeprom, |
7903 | .get_strings = niu_get_strings, |
7904 | .get_sset_count = niu_get_sset_count, |
7905 | .get_ethtool_stats = niu_get_ethtool_stats, |
7906 | .set_phys_id = niu_set_phys_id, |
7907 | .get_rxnfc = niu_get_nfc, |
7908 | .set_rxnfc = niu_set_nfc, |
7909 | .get_link_ksettings = niu_get_link_ksettings, |
7910 | .set_link_ksettings = niu_set_link_ksettings, |
7911 | }; |
7912 | |
7913 | static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, |
7914 | int ldg, int ldn) |
7915 | { |
7916 | if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) |
7917 | return -EINVAL; |
7918 | if (ldn < 0 || ldn > LDN_MAX) |
7919 | return -EINVAL; |
7920 | |
7921 | parent->ldg_map[ldn] = ldg; |
7922 | |
7923 | if (np->parent->plat_type == PLAT_TYPE_NIU) { |
7924 | /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by |
7925 | * the firmware, and we're not supposed to change them. |
7926 | * Validate the mapping, because if it's wrong we probably |
7927 | * won't get any interrupts and that's painful to debug. |
7928 | */ |
7929 | if (nr64(LDG_NUM(ldn)) != ldg) { |
7930 | dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n" , |
7931 | np->port, ldn, ldg, |
7932 | (unsigned long long) nr64(LDG_NUM(ldn))); |
7933 | return -EINVAL; |
7934 | } |
7935 | } else |
7936 | nw64(LDG_NUM(ldn), ldg); |
7937 | |
7938 | return 0; |
7939 | } |
7940 | |
7941 | static int niu_set_ldg_timer_res(struct niu *np, int res) |
7942 | { |
7943 | if (res < 0 || res > LDG_TIMER_RES_VAL) |
7944 | return -EINVAL; |
7945 | |
7946 | |
7947 | nw64(LDG_TIMER_RES, res); |
7948 | |
7949 | return 0; |
7950 | } |
7951 | |
7952 | static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) |
7953 | { |
7954 | if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || |
7955 | (func < 0 || func > 3) || |
7956 | (vector < 0 || vector > 0x1f)) |
7957 | return -EINVAL; |
7958 | |
7959 | nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); |
7960 | |
7961 | return 0; |
7962 | } |
7963 | |
7964 | static int niu_pci_eeprom_read(struct niu *np, u32 addr) |
7965 | { |
7966 | u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | |
7967 | (addr << ESPC_PIO_STAT_ADDR_SHIFT)); |
7968 | int limit; |
7969 | |
7970 | if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) |
7971 | return -EINVAL; |
7972 | |
7973 | frame = frame_base; |
7974 | nw64(ESPC_PIO_STAT, frame); |
7975 | limit = 64; |
7976 | do { |
7977 | udelay(5); |
7978 | frame = nr64(ESPC_PIO_STAT); |
7979 | if (frame & ESPC_PIO_STAT_READ_END) |
7980 | break; |
7981 | } while (limit--); |
7982 | if (!(frame & ESPC_PIO_STAT_READ_END)) { |
7983 | dev_err(np->device, "EEPROM read timeout frame[%llx]\n" , |
7984 | (unsigned long long) frame); |
7985 | return -ENODEV; |
7986 | } |
7987 | |
7988 | frame = frame_base; |
7989 | nw64(ESPC_PIO_STAT, frame); |
7990 | limit = 64; |
7991 | do { |
7992 | udelay(5); |
7993 | frame = nr64(ESPC_PIO_STAT); |
7994 | if (frame & ESPC_PIO_STAT_READ_END) |
7995 | break; |
7996 | } while (limit--); |
7997 | if (!(frame & ESPC_PIO_STAT_READ_END)) { |
7998 | dev_err(np->device, "EEPROM read timeout frame[%llx]\n" , |
7999 | (unsigned long long) frame); |
8000 | return -ENODEV; |
8001 | } |
8002 | |
8003 | frame = nr64(ESPC_PIO_STAT); |
8004 | return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; |
8005 | } |
8006 | |
8007 | static int niu_pci_eeprom_read16(struct niu *np, u32 off) |
8008 | { |
8009 | int err = niu_pci_eeprom_read(np, addr: off); |
8010 | u16 val; |
8011 | |
8012 | if (err < 0) |
8013 | return err; |
8014 | val = (err << 8); |
8015 | err = niu_pci_eeprom_read(np, addr: off + 1); |
8016 | if (err < 0) |
8017 | return err; |
8018 | val |= (err & 0xff); |
8019 | |
8020 | return val; |
8021 | } |
8022 | |
8023 | static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off) |
8024 | { |
8025 | int err = niu_pci_eeprom_read(np, addr: off); |
8026 | u16 val; |
8027 | |
8028 | if (err < 0) |
8029 | return err; |
8030 | |
8031 | val = (err & 0xff); |
8032 | err = niu_pci_eeprom_read(np, addr: off + 1); |
8033 | if (err < 0) |
8034 | return err; |
8035 | |
8036 | val |= (err & 0xff) << 8; |
8037 | |
8038 | return val; |
8039 | } |
8040 | |
8041 | static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf, |
8042 | int namebuf_len) |
8043 | { |
8044 | int i; |
8045 | |
8046 | for (i = 0; i < namebuf_len; i++) { |
8047 | int err = niu_pci_eeprom_read(np, addr: off + i); |
8048 | if (err < 0) |
8049 | return err; |
8050 | *namebuf++ = err; |
8051 | if (!err) |
8052 | break; |
8053 | } |
8054 | if (i >= namebuf_len) |
8055 | return -EINVAL; |
8056 | |
8057 | return i + 1; |
8058 | } |
8059 | |
8060 | static void niu_vpd_parse_version(struct niu *np) |
8061 | { |
8062 | struct niu_vpd *vpd = &np->vpd; |
8063 | int len = strlen(vpd->version) + 1; |
8064 | const char *s = vpd->version; |
8065 | int i; |
8066 | |
8067 | for (i = 0; i < len - 5; i++) { |
8068 | if (!strncmp(s + i, "FCode " , 6)) |
8069 | break; |
8070 | } |
8071 | if (i >= len - 5) |
8072 | return; |
8073 | |
8074 | s += i + 5; |
8075 | sscanf(s, "%d.%d" , &vpd->fcode_major, &vpd->fcode_minor); |
8076 | |
8077 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8078 | "VPD_SCAN: FCODE major(%d) minor(%d)\n" , |
8079 | vpd->fcode_major, vpd->fcode_minor); |
8080 | if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || |
8081 | (vpd->fcode_major == NIU_VPD_MIN_MAJOR && |
8082 | vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) |
8083 | np->flags |= NIU_FLAGS_VPD_VALID; |
8084 | } |
8085 | |
8086 | /* ESPC_PIO_EN_ENABLE must be set */ |
8087 | static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) |
8088 | { |
8089 | unsigned int found_mask = 0; |
8090 | #define FOUND_MASK_MODEL 0x00000001 |
8091 | #define FOUND_MASK_BMODEL 0x00000002 |
8092 | #define FOUND_MASK_VERS 0x00000004 |
8093 | #define FOUND_MASK_MAC 0x00000008 |
8094 | #define FOUND_MASK_NMAC 0x00000010 |
8095 | #define FOUND_MASK_PHY 0x00000020 |
8096 | #define FOUND_MASK_ALL 0x0000003f |
8097 | |
8098 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8099 | "VPD_SCAN: start[%x] end[%x]\n" , start, end); |
8100 | while (start < end) { |
8101 | int len, err, prop_len; |
8102 | char namebuf[64]; |
8103 | u8 *prop_buf; |
8104 | int max_len; |
8105 | |
8106 | if (found_mask == FOUND_MASK_ALL) { |
8107 | niu_vpd_parse_version(np); |
8108 | return 1; |
8109 | } |
8110 | |
8111 | err = niu_pci_eeprom_read(np, addr: start + 2); |
8112 | if (err < 0) |
8113 | return err; |
8114 | len = err; |
8115 | start += 3; |
8116 | |
8117 | prop_len = niu_pci_eeprom_read(np, addr: start + 4); |
8118 | if (prop_len < 0) |
8119 | return prop_len; |
8120 | err = niu_pci_vpd_get_propname(np, off: start + 5, namebuf, namebuf_len: 64); |
8121 | if (err < 0) |
8122 | return err; |
8123 | |
8124 | prop_buf = NULL; |
8125 | max_len = 0; |
8126 | if (!strcmp(namebuf, "model" )) { |
8127 | prop_buf = np->vpd.model; |
8128 | max_len = NIU_VPD_MODEL_MAX; |
8129 | found_mask |= FOUND_MASK_MODEL; |
8130 | } else if (!strcmp(namebuf, "board-model" )) { |
8131 | prop_buf = np->vpd.board_model; |
8132 | max_len = NIU_VPD_BD_MODEL_MAX; |
8133 | found_mask |= FOUND_MASK_BMODEL; |
8134 | } else if (!strcmp(namebuf, "version" )) { |
8135 | prop_buf = np->vpd.version; |
8136 | max_len = NIU_VPD_VERSION_MAX; |
8137 | found_mask |= FOUND_MASK_VERS; |
8138 | } else if (!strcmp(namebuf, "local-mac-address" )) { |
8139 | prop_buf = np->vpd.local_mac; |
8140 | max_len = ETH_ALEN; |
8141 | found_mask |= FOUND_MASK_MAC; |
8142 | } else if (!strcmp(namebuf, "num-mac-addresses" )) { |
8143 | prop_buf = &np->vpd.mac_num; |
8144 | max_len = 1; |
8145 | found_mask |= FOUND_MASK_NMAC; |
8146 | } else if (!strcmp(namebuf, "phy-type" )) { |
8147 | prop_buf = np->vpd.phy_type; |
8148 | max_len = NIU_VPD_PHY_TYPE_MAX; |
8149 | found_mask |= FOUND_MASK_PHY; |
8150 | } |
8151 | |
8152 | if (max_len && prop_len > max_len) { |
8153 | dev_err(np->device, "Property '%s' length (%d) is too long\n" , namebuf, prop_len); |
8154 | return -EINVAL; |
8155 | } |
8156 | |
8157 | if (prop_buf) { |
8158 | u32 off = start + 5 + err; |
8159 | int i; |
8160 | |
8161 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8162 | "VPD_SCAN: Reading in property [%s] len[%d]\n" , |
8163 | namebuf, prop_len); |
8164 | for (i = 0; i < prop_len; i++) { |
8165 | err = niu_pci_eeprom_read(np, addr: off + i); |
8166 | if (err < 0) |
8167 | return err; |
8168 | *prop_buf++ = err; |
8169 | } |
8170 | } |
8171 | |
8172 | start += len; |
8173 | } |
8174 | |
8175 | return 0; |
8176 | } |
8177 | |
8178 | /* ESPC_PIO_EN_ENABLE must be set */ |
8179 | static int niu_pci_vpd_fetch(struct niu *np, u32 start) |
8180 | { |
8181 | u32 offset; |
8182 | int err; |
8183 | |
8184 | err = niu_pci_eeprom_read16_swp(np, off: start + 1); |
8185 | if (err < 0) |
8186 | return err; |
8187 | |
8188 | offset = err + 3; |
8189 | |
8190 | while (start + offset < ESPC_EEPROM_SIZE) { |
8191 | u32 here = start + offset; |
8192 | u32 end; |
8193 | |
8194 | err = niu_pci_eeprom_read(np, addr: here); |
8195 | if (err < 0) |
8196 | return err; |
8197 | if (err != 0x90) |
8198 | return -EINVAL; |
8199 | |
8200 | err = niu_pci_eeprom_read16_swp(np, off: here + 1); |
8201 | if (err < 0) |
8202 | return err; |
8203 | |
8204 | here = start + offset + 3; |
8205 | end = start + offset + err; |
8206 | |
8207 | offset += err; |
8208 | |
8209 | err = niu_pci_vpd_scan_props(np, start: here, end); |
8210 | if (err < 0) |
8211 | return err; |
8212 | /* ret == 1 is not an error */ |
8213 | if (err == 1) |
8214 | return 0; |
8215 | } |
8216 | return 0; |
8217 | } |
8218 | |
8219 | /* ESPC_PIO_EN_ENABLE must be set */ |
8220 | static u32 niu_pci_vpd_offset(struct niu *np) |
8221 | { |
8222 | u32 start = 0, end = ESPC_EEPROM_SIZE, ret; |
8223 | int err; |
8224 | |
8225 | while (start < end) { |
8226 | ret = start; |
8227 | |
8228 | /* ROM header signature? */ |
8229 | err = niu_pci_eeprom_read16(np, off: start + 0); |
8230 | if (err != 0x55aa) |
8231 | return 0; |
8232 | |
8233 | /* Apply offset to PCI data structure. */ |
8234 | err = niu_pci_eeprom_read16(np, off: start + 23); |
8235 | if (err < 0) |
8236 | return 0; |
8237 | start += err; |
8238 | |
8239 | /* Check for "PCIR" signature. */ |
8240 | err = niu_pci_eeprom_read16(np, off: start + 0); |
8241 | if (err != 0x5043) |
8242 | return 0; |
8243 | err = niu_pci_eeprom_read16(np, off: start + 2); |
8244 | if (err != 0x4952) |
8245 | return 0; |
8246 | |
8247 | /* Check for OBP image type. */ |
8248 | err = niu_pci_eeprom_read(np, addr: start + 20); |
8249 | if (err < 0) |
8250 | return 0; |
8251 | if (err != 0x01) { |
8252 | err = niu_pci_eeprom_read(np, addr: ret + 2); |
8253 | if (err < 0) |
8254 | return 0; |
8255 | |
8256 | start = ret + (err * 512); |
8257 | continue; |
8258 | } |
8259 | |
8260 | err = niu_pci_eeprom_read16_swp(np, off: start + 8); |
8261 | if (err < 0) |
8262 | return err; |
8263 | ret += err; |
8264 | |
8265 | err = niu_pci_eeprom_read(np, addr: ret + 0); |
8266 | if (err != 0x82) |
8267 | return 0; |
8268 | |
8269 | return ret; |
8270 | } |
8271 | |
8272 | return 0; |
8273 | } |
8274 | |
8275 | static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop) |
8276 | { |
8277 | if (!strcmp(phy_prop, "mif" )) { |
8278 | /* 1G copper, MII */ |
8279 | np->flags &= ~(NIU_FLAGS_FIBER | |
8280 | NIU_FLAGS_10G); |
8281 | np->mac_xcvr = MAC_XCVR_MII; |
8282 | } else if (!strcmp(phy_prop, "xgf" )) { |
8283 | /* 10G fiber, XPCS */ |
8284 | np->flags |= (NIU_FLAGS_10G | |
8285 | NIU_FLAGS_FIBER); |
8286 | np->mac_xcvr = MAC_XCVR_XPCS; |
8287 | } else if (!strcmp(phy_prop, "pcs" )) { |
8288 | /* 1G fiber, PCS */ |
8289 | np->flags &= ~NIU_FLAGS_10G; |
8290 | np->flags |= NIU_FLAGS_FIBER; |
8291 | np->mac_xcvr = MAC_XCVR_PCS; |
8292 | } else if (!strcmp(phy_prop, "xgc" )) { |
8293 | /* 10G copper, XPCS */ |
8294 | np->flags |= NIU_FLAGS_10G; |
8295 | np->flags &= ~NIU_FLAGS_FIBER; |
8296 | np->mac_xcvr = MAC_XCVR_XPCS; |
8297 | } else if (!strcmp(phy_prop, "xgsd" ) || !strcmp(phy_prop, "gsd" )) { |
8298 | /* 10G Serdes or 1G Serdes, default to 10G */ |
8299 | np->flags |= NIU_FLAGS_10G; |
8300 | np->flags &= ~NIU_FLAGS_FIBER; |
8301 | np->flags |= NIU_FLAGS_XCVR_SERDES; |
8302 | np->mac_xcvr = MAC_XCVR_XPCS; |
8303 | } else { |
8304 | return -EINVAL; |
8305 | } |
8306 | return 0; |
8307 | } |
8308 | |
8309 | static int niu_pci_vpd_get_nports(struct niu *np) |
8310 | { |
8311 | int ports = 0; |
8312 | |
8313 | if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || |
8314 | (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || |
8315 | (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || |
8316 | (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || |
8317 | (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { |
8318 | ports = 4; |
8319 | } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || |
8320 | (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || |
8321 | (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || |
8322 | (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { |
8323 | ports = 2; |
8324 | } |
8325 | |
8326 | return ports; |
8327 | } |
8328 | |
8329 | static void niu_pci_vpd_validate(struct niu *np) |
8330 | { |
8331 | struct net_device *dev = np->dev; |
8332 | struct niu_vpd *vpd = &np->vpd; |
8333 | u8 addr[ETH_ALEN]; |
8334 | u8 val8; |
8335 | |
8336 | if (!is_valid_ether_addr(addr: &vpd->local_mac[0])) { |
8337 | dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n" ); |
8338 | |
8339 | np->flags &= ~NIU_FLAGS_VPD_VALID; |
8340 | return; |
8341 | } |
8342 | |
8343 | if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || |
8344 | !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { |
8345 | np->flags |= NIU_FLAGS_10G; |
8346 | np->flags &= ~NIU_FLAGS_FIBER; |
8347 | np->flags |= NIU_FLAGS_XCVR_SERDES; |
8348 | np->mac_xcvr = MAC_XCVR_PCS; |
8349 | if (np->port > 1) { |
8350 | np->flags |= NIU_FLAGS_FIBER; |
8351 | np->flags &= ~NIU_FLAGS_10G; |
8352 | } |
8353 | if (np->flags & NIU_FLAGS_10G) |
8354 | np->mac_xcvr = MAC_XCVR_XPCS; |
8355 | } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { |
8356 | np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | |
8357 | NIU_FLAGS_HOTPLUG_PHY); |
8358 | } else if (niu_phy_type_prop_decode(np, phy_prop: np->vpd.phy_type)) { |
8359 | dev_err(np->device, "Illegal phy string [%s]\n" , |
8360 | np->vpd.phy_type); |
8361 | dev_err(np->device, "Falling back to SPROM\n" ); |
8362 | np->flags &= ~NIU_FLAGS_VPD_VALID; |
8363 | return; |
8364 | } |
8365 | |
8366 | ether_addr_copy(dst: addr, src: vpd->local_mac); |
8367 | |
8368 | val8 = addr[5]; |
8369 | addr[5] += np->port; |
8370 | if (addr[5] < val8) |
8371 | addr[4]++; |
8372 | |
8373 | eth_hw_addr_set(dev, addr); |
8374 | } |
8375 | |
8376 | static int niu_pci_probe_sprom(struct niu *np) |
8377 | { |
8378 | struct net_device *dev = np->dev; |
8379 | u8 addr[ETH_ALEN]; |
8380 | int len, i; |
8381 | u64 val, sum; |
8382 | u8 val8; |
8383 | |
8384 | val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); |
8385 | val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; |
8386 | len = val / 4; |
8387 | |
8388 | np->eeprom_len = len; |
8389 | |
8390 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8391 | "SPROM: Image size %llu\n" , (unsigned long long)val); |
8392 | |
8393 | sum = 0; |
8394 | for (i = 0; i < len; i++) { |
8395 | val = nr64(ESPC_NCR(i)); |
8396 | sum += (val >> 0) & 0xff; |
8397 | sum += (val >> 8) & 0xff; |
8398 | sum += (val >> 16) & 0xff; |
8399 | sum += (val >> 24) & 0xff; |
8400 | } |
8401 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8402 | "SPROM: Checksum %x\n" , (int)(sum & 0xff)); |
8403 | if ((sum & 0xff) != 0xab) { |
8404 | dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n" , (int)(sum & 0xff)); |
8405 | return -EINVAL; |
8406 | } |
8407 | |
8408 | val = nr64(ESPC_PHY_TYPE); |
8409 | switch (np->port) { |
8410 | case 0: |
8411 | val8 = (val & ESPC_PHY_TYPE_PORT0) >> |
8412 | ESPC_PHY_TYPE_PORT0_SHIFT; |
8413 | break; |
8414 | case 1: |
8415 | val8 = (val & ESPC_PHY_TYPE_PORT1) >> |
8416 | ESPC_PHY_TYPE_PORT1_SHIFT; |
8417 | break; |
8418 | case 2: |
8419 | val8 = (val & ESPC_PHY_TYPE_PORT2) >> |
8420 | ESPC_PHY_TYPE_PORT2_SHIFT; |
8421 | break; |
8422 | case 3: |
8423 | val8 = (val & ESPC_PHY_TYPE_PORT3) >> |
8424 | ESPC_PHY_TYPE_PORT3_SHIFT; |
8425 | break; |
8426 | default: |
8427 | dev_err(np->device, "Bogus port number %u\n" , |
8428 | np->port); |
8429 | return -EINVAL; |
8430 | } |
8431 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8432 | "SPROM: PHY type %x\n" , val8); |
8433 | |
8434 | switch (val8) { |
8435 | case ESPC_PHY_TYPE_1G_COPPER: |
8436 | /* 1G copper, MII */ |
8437 | np->flags &= ~(NIU_FLAGS_FIBER | |
8438 | NIU_FLAGS_10G); |
8439 | np->mac_xcvr = MAC_XCVR_MII; |
8440 | break; |
8441 | |
8442 | case ESPC_PHY_TYPE_1G_FIBER: |
8443 | /* 1G fiber, PCS */ |
8444 | np->flags &= ~NIU_FLAGS_10G; |
8445 | np->flags |= NIU_FLAGS_FIBER; |
8446 | np->mac_xcvr = MAC_XCVR_PCS; |
8447 | break; |
8448 | |
8449 | case ESPC_PHY_TYPE_10G_COPPER: |
8450 | /* 10G copper, XPCS */ |
8451 | np->flags |= NIU_FLAGS_10G; |
8452 | np->flags &= ~NIU_FLAGS_FIBER; |
8453 | np->mac_xcvr = MAC_XCVR_XPCS; |
8454 | break; |
8455 | |
8456 | case ESPC_PHY_TYPE_10G_FIBER: |
8457 | /* 10G fiber, XPCS */ |
8458 | np->flags |= (NIU_FLAGS_10G | |
8459 | NIU_FLAGS_FIBER); |
8460 | np->mac_xcvr = MAC_XCVR_XPCS; |
8461 | break; |
8462 | |
8463 | default: |
8464 | dev_err(np->device, "Bogus SPROM phy type %u\n" , val8); |
8465 | return -EINVAL; |
8466 | } |
8467 | |
8468 | val = nr64(ESPC_MAC_ADDR0); |
8469 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8470 | "SPROM: MAC_ADDR0[%08llx]\n" , (unsigned long long)val); |
8471 | addr[0] = (val >> 0) & 0xff; |
8472 | addr[1] = (val >> 8) & 0xff; |
8473 | addr[2] = (val >> 16) & 0xff; |
8474 | addr[3] = (val >> 24) & 0xff; |
8475 | |
8476 | val = nr64(ESPC_MAC_ADDR1); |
8477 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8478 | "SPROM: MAC_ADDR1[%08llx]\n" , (unsigned long long)val); |
8479 | addr[4] = (val >> 0) & 0xff; |
8480 | addr[5] = (val >> 8) & 0xff; |
8481 | |
8482 | if (!is_valid_ether_addr(addr)) { |
8483 | dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n" , |
8484 | addr); |
8485 | return -EINVAL; |
8486 | } |
8487 | |
8488 | val8 = addr[5]; |
8489 | addr[5] += np->port; |
8490 | if (addr[5] < val8) |
8491 | addr[4]++; |
8492 | |
8493 | eth_hw_addr_set(dev, addr); |
8494 | |
8495 | val = nr64(ESPC_MOD_STR_LEN); |
8496 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8497 | "SPROM: MOD_STR_LEN[%llu]\n" , (unsigned long long)val); |
8498 | if (val >= 8 * 4) |
8499 | return -EINVAL; |
8500 | |
8501 | for (i = 0; i < val; i += 4) { |
8502 | u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); |
8503 | |
8504 | np->vpd.model[i + 3] = (tmp >> 0) & 0xff; |
8505 | np->vpd.model[i + 2] = (tmp >> 8) & 0xff; |
8506 | np->vpd.model[i + 1] = (tmp >> 16) & 0xff; |
8507 | np->vpd.model[i + 0] = (tmp >> 24) & 0xff; |
8508 | } |
8509 | np->vpd.model[val] = '\0'; |
8510 | |
8511 | val = nr64(ESPC_BD_MOD_STR_LEN); |
8512 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8513 | "SPROM: BD_MOD_STR_LEN[%llu]\n" , (unsigned long long)val); |
8514 | if (val >= 4 * 4) |
8515 | return -EINVAL; |
8516 | |
8517 | for (i = 0; i < val; i += 4) { |
8518 | u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); |
8519 | |
8520 | np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; |
8521 | np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; |
8522 | np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; |
8523 | np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; |
8524 | } |
8525 | np->vpd.board_model[val] = '\0'; |
8526 | |
8527 | np->vpd.mac_num = |
8528 | nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; |
8529 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
8530 | "SPROM: NUM_PORTS_MACS[%d]\n" , np->vpd.mac_num); |
8531 | |
8532 | return 0; |
8533 | } |
8534 | |
8535 | static int niu_get_and_validate_port(struct niu *np) |
8536 | { |
8537 | struct niu_parent *parent = np->parent; |
8538 | |
8539 | if (np->port <= 1) |
8540 | np->flags |= NIU_FLAGS_XMAC; |
8541 | |
8542 | if (!parent->num_ports) { |
8543 | if (parent->plat_type == PLAT_TYPE_NIU) { |
8544 | parent->num_ports = 2; |
8545 | } else { |
8546 | parent->num_ports = niu_pci_vpd_get_nports(np); |
8547 | if (!parent->num_ports) { |
8548 | /* Fall back to SPROM as last resort. |
8549 | * This will fail on most cards. |
8550 | */ |
8551 | parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & |
8552 | ESPC_NUM_PORTS_MACS_VAL; |
8553 | |
8554 | /* All of the current probing methods fail on |
8555 | * Maramba on-board parts. |
8556 | */ |
8557 | if (!parent->num_ports) |
8558 | parent->num_ports = 4; |
8559 | } |
8560 | } |
8561 | } |
8562 | |
8563 | if (np->port >= parent->num_ports) |
8564 | return -ENODEV; |
8565 | |
8566 | return 0; |
8567 | } |
8568 | |
8569 | static int phy_record(struct niu_parent *parent, struct phy_probe_info *p, |
8570 | int dev_id_1, int dev_id_2, u8 phy_port, int type) |
8571 | { |
8572 | u32 id = (dev_id_1 << 16) | dev_id_2; |
8573 | u8 idx; |
8574 | |
8575 | if (dev_id_1 < 0 || dev_id_2 < 0) |
8576 | return 0; |
8577 | if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { |
8578 | /* Because of the NIU_PHY_ID_MASK being applied, the 8704 |
8579 | * test covers the 8706 as well. |
8580 | */ |
8581 | if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && |
8582 | ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) |
8583 | return 0; |
8584 | } else { |
8585 | if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) |
8586 | return 0; |
8587 | } |
8588 | |
8589 | pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n" , |
8590 | parent->index, id, |
8591 | type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : |
8592 | type == PHY_TYPE_PCS ? "PCS" : "MII" , |
8593 | phy_port); |
8594 | |
8595 | if (p->cur[type] >= NIU_MAX_PORTS) { |
8596 | pr_err("Too many PHY ports\n" ); |
8597 | return -EINVAL; |
8598 | } |
8599 | idx = p->cur[type]; |
8600 | p->phy_id[type][idx] = id; |
8601 | p->phy_port[type][idx] = phy_port; |
8602 | p->cur[type] = idx + 1; |
8603 | return 0; |
8604 | } |
8605 | |
8606 | static int port_has_10g(struct phy_probe_info *p, int port) |
8607 | { |
8608 | int i; |
8609 | |
8610 | for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { |
8611 | if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) |
8612 | return 1; |
8613 | } |
8614 | for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { |
8615 | if (p->phy_port[PHY_TYPE_PCS][i] == port) |
8616 | return 1; |
8617 | } |
8618 | |
8619 | return 0; |
8620 | } |
8621 | |
8622 | static int count_10g_ports(struct phy_probe_info *p, int *lowest) |
8623 | { |
8624 | int port, cnt; |
8625 | |
8626 | cnt = 0; |
8627 | *lowest = 32; |
8628 | for (port = 8; port < 32; port++) { |
8629 | if (port_has_10g(p, port)) { |
8630 | if (!cnt) |
8631 | *lowest = port; |
8632 | cnt++; |
8633 | } |
8634 | } |
8635 | |
8636 | return cnt; |
8637 | } |
8638 | |
8639 | static int count_1g_ports(struct phy_probe_info *p, int *lowest) |
8640 | { |
8641 | *lowest = 32; |
8642 | if (p->cur[PHY_TYPE_MII]) |
8643 | *lowest = p->phy_port[PHY_TYPE_MII][0]; |
8644 | |
8645 | return p->cur[PHY_TYPE_MII]; |
8646 | } |
8647 | |
8648 | static void niu_n2_divide_channels(struct niu_parent *parent) |
8649 | { |
8650 | int num_ports = parent->num_ports; |
8651 | int i; |
8652 | |
8653 | for (i = 0; i < num_ports; i++) { |
8654 | parent->rxchan_per_port[i] = (16 / num_ports); |
8655 | parent->txchan_per_port[i] = (16 / num_ports); |
8656 | |
8657 | pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n" , |
8658 | parent->index, i, |
8659 | parent->rxchan_per_port[i], |
8660 | parent->txchan_per_port[i]); |
8661 | } |
8662 | } |
8663 | |
8664 | static void niu_divide_channels(struct niu_parent *parent, |
8665 | int num_10g, int num_1g) |
8666 | { |
8667 | int num_ports = parent->num_ports; |
8668 | int rx_chans_per_10g, rx_chans_per_1g; |
8669 | int tx_chans_per_10g, tx_chans_per_1g; |
8670 | int i, tot_rx, tot_tx; |
8671 | |
8672 | if (!num_10g || !num_1g) { |
8673 | rx_chans_per_10g = rx_chans_per_1g = |
8674 | (NIU_NUM_RXCHAN / num_ports); |
8675 | tx_chans_per_10g = tx_chans_per_1g = |
8676 | (NIU_NUM_TXCHAN / num_ports); |
8677 | } else { |
8678 | rx_chans_per_1g = NIU_NUM_RXCHAN / 8; |
8679 | rx_chans_per_10g = (NIU_NUM_RXCHAN - |
8680 | (rx_chans_per_1g * num_1g)) / |
8681 | num_10g; |
8682 | |
8683 | tx_chans_per_1g = NIU_NUM_TXCHAN / 6; |
8684 | tx_chans_per_10g = (NIU_NUM_TXCHAN - |
8685 | (tx_chans_per_1g * num_1g)) / |
8686 | num_10g; |
8687 | } |
8688 | |
8689 | tot_rx = tot_tx = 0; |
8690 | for (i = 0; i < num_ports; i++) { |
8691 | int type = phy_decode(val: parent->port_phy, port: i); |
8692 | |
8693 | if (type == PORT_TYPE_10G) { |
8694 | parent->rxchan_per_port[i] = rx_chans_per_10g; |
8695 | parent->txchan_per_port[i] = tx_chans_per_10g; |
8696 | } else { |
8697 | parent->rxchan_per_port[i] = rx_chans_per_1g; |
8698 | parent->txchan_per_port[i] = tx_chans_per_1g; |
8699 | } |
8700 | pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n" , |
8701 | parent->index, i, |
8702 | parent->rxchan_per_port[i], |
8703 | parent->txchan_per_port[i]); |
8704 | tot_rx += parent->rxchan_per_port[i]; |
8705 | tot_tx += parent->txchan_per_port[i]; |
8706 | } |
8707 | |
8708 | if (tot_rx > NIU_NUM_RXCHAN) { |
8709 | pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n" , |
8710 | parent->index, tot_rx); |
8711 | for (i = 0; i < num_ports; i++) |
8712 | parent->rxchan_per_port[i] = 1; |
8713 | } |
8714 | if (tot_tx > NIU_NUM_TXCHAN) { |
8715 | pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n" , |
8716 | parent->index, tot_tx); |
8717 | for (i = 0; i < num_ports; i++) |
8718 | parent->txchan_per_port[i] = 1; |
8719 | } |
8720 | if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { |
8721 | pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n" , |
8722 | parent->index, tot_rx, tot_tx); |
8723 | } |
8724 | } |
8725 | |
8726 | static void niu_divide_rdc_groups(struct niu_parent *parent, |
8727 | int num_10g, int num_1g) |
8728 | { |
8729 | int i, num_ports = parent->num_ports; |
8730 | int rdc_group, rdc_groups_per_port; |
8731 | int rdc_channel_base; |
8732 | |
8733 | rdc_group = 0; |
8734 | rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; |
8735 | |
8736 | rdc_channel_base = 0; |
8737 | |
8738 | for (i = 0; i < num_ports; i++) { |
8739 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; |
8740 | int grp, num_channels = parent->rxchan_per_port[i]; |
8741 | int this_channel_offset; |
8742 | |
8743 | tp->first_table_num = rdc_group; |
8744 | tp->num_tables = rdc_groups_per_port; |
8745 | this_channel_offset = 0; |
8746 | for (grp = 0; grp < tp->num_tables; grp++) { |
8747 | struct rdc_table *rt = &tp->tables[grp]; |
8748 | int slot; |
8749 | |
8750 | pr_info("niu%d: Port %d RDC tbl(%d) [ " , |
8751 | parent->index, i, tp->first_table_num + grp); |
8752 | for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { |
8753 | rt->rxdma_channel[slot] = |
8754 | rdc_channel_base + this_channel_offset; |
8755 | |
8756 | pr_cont("%d " , rt->rxdma_channel[slot]); |
8757 | |
8758 | if (++this_channel_offset == num_channels) |
8759 | this_channel_offset = 0; |
8760 | } |
8761 | pr_cont("]\n" ); |
8762 | } |
8763 | |
8764 | parent->rdc_default[i] = rdc_channel_base; |
8765 | |
8766 | rdc_channel_base += num_channels; |
8767 | rdc_group += rdc_groups_per_port; |
8768 | } |
8769 | } |
8770 | |
8771 | static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent, |
8772 | struct phy_probe_info *info) |
8773 | { |
8774 | unsigned long flags; |
8775 | int port, err; |
8776 | |
8777 | memset(info, 0, sizeof(*info)); |
8778 | |
8779 | /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ |
8780 | niu_lock_parent(np, flags); |
8781 | err = 0; |
8782 | for (port = 8; port < 32; port++) { |
8783 | int dev_id_1, dev_id_2; |
8784 | |
8785 | dev_id_1 = mdio_read(np, port, |
8786 | NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); |
8787 | dev_id_2 = mdio_read(np, port, |
8788 | NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); |
8789 | err = phy_record(parent, p: info, dev_id_1, dev_id_2, phy_port: port, |
8790 | PHY_TYPE_PMA_PMD); |
8791 | if (err) |
8792 | break; |
8793 | dev_id_1 = mdio_read(np, port, |
8794 | NIU_PCS_DEV_ADDR, MII_PHYSID1); |
8795 | dev_id_2 = mdio_read(np, port, |
8796 | NIU_PCS_DEV_ADDR, MII_PHYSID2); |
8797 | err = phy_record(parent, p: info, dev_id_1, dev_id_2, phy_port: port, |
8798 | PHY_TYPE_PCS); |
8799 | if (err) |
8800 | break; |
8801 | dev_id_1 = mii_read(np, port, MII_PHYSID1); |
8802 | dev_id_2 = mii_read(np, port, MII_PHYSID2); |
8803 | err = phy_record(parent, p: info, dev_id_1, dev_id_2, phy_port: port, |
8804 | PHY_TYPE_MII); |
8805 | if (err) |
8806 | break; |
8807 | } |
8808 | niu_unlock_parent(np, flags); |
8809 | |
8810 | return err; |
8811 | } |
8812 | |
8813 | static int walk_phys(struct niu *np, struct niu_parent *parent) |
8814 | { |
8815 | struct phy_probe_info *info = &parent->phy_probe_info; |
8816 | int lowest_10g, lowest_1g; |
8817 | int num_10g, num_1g; |
8818 | u32 val; |
8819 | int err; |
8820 | |
8821 | num_10g = num_1g = 0; |
8822 | |
8823 | if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || |
8824 | !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { |
8825 | num_10g = 0; |
8826 | num_1g = 2; |
8827 | parent->plat_type = PLAT_TYPE_ATCA_CP3220; |
8828 | parent->num_ports = 4; |
8829 | val = (phy_encode(PORT_TYPE_1G, port: 0) | |
8830 | phy_encode(PORT_TYPE_1G, port: 1) | |
8831 | phy_encode(PORT_TYPE_1G, port: 2) | |
8832 | phy_encode(PORT_TYPE_1G, port: 3)); |
8833 | } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { |
8834 | num_10g = 2; |
8835 | num_1g = 0; |
8836 | parent->num_ports = 2; |
8837 | val = (phy_encode(PORT_TYPE_10G, port: 0) | |
8838 | phy_encode(PORT_TYPE_10G, port: 1)); |
8839 | } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && |
8840 | (parent->plat_type == PLAT_TYPE_NIU)) { |
8841 | /* this is the Monza case */ |
8842 | if (np->flags & NIU_FLAGS_10G) { |
8843 | val = (phy_encode(PORT_TYPE_10G, port: 0) | |
8844 | phy_encode(PORT_TYPE_10G, port: 1)); |
8845 | } else { |
8846 | val = (phy_encode(PORT_TYPE_1G, port: 0) | |
8847 | phy_encode(PORT_TYPE_1G, port: 1)); |
8848 | } |
8849 | } else { |
8850 | err = fill_phy_probe_info(np, parent, info); |
8851 | if (err) |
8852 | return err; |
8853 | |
8854 | num_10g = count_10g_ports(p: info, lowest: &lowest_10g); |
8855 | num_1g = count_1g_ports(p: info, lowest: &lowest_1g); |
8856 | |
8857 | switch ((num_10g << 4) | num_1g) { |
8858 | case 0x24: |
8859 | if (lowest_1g == 10) |
8860 | parent->plat_type = PLAT_TYPE_VF_P0; |
8861 | else if (lowest_1g == 26) |
8862 | parent->plat_type = PLAT_TYPE_VF_P1; |
8863 | else |
8864 | goto unknown_vg_1g_port; |
8865 | |
8866 | fallthrough; |
8867 | case 0x22: |
8868 | val = (phy_encode(PORT_TYPE_10G, port: 0) | |
8869 | phy_encode(PORT_TYPE_10G, port: 1) | |
8870 | phy_encode(PORT_TYPE_1G, port: 2) | |
8871 | phy_encode(PORT_TYPE_1G, port: 3)); |
8872 | break; |
8873 | |
8874 | case 0x20: |
8875 | val = (phy_encode(PORT_TYPE_10G, port: 0) | |
8876 | phy_encode(PORT_TYPE_10G, port: 1)); |
8877 | break; |
8878 | |
8879 | case 0x10: |
8880 | val = phy_encode(PORT_TYPE_10G, port: np->port); |
8881 | break; |
8882 | |
8883 | case 0x14: |
8884 | if (lowest_1g == 10) |
8885 | parent->plat_type = PLAT_TYPE_VF_P0; |
8886 | else if (lowest_1g == 26) |
8887 | parent->plat_type = PLAT_TYPE_VF_P1; |
8888 | else |
8889 | goto unknown_vg_1g_port; |
8890 | |
8891 | fallthrough; |
8892 | case 0x13: |
8893 | if ((lowest_10g & 0x7) == 0) |
8894 | val = (phy_encode(PORT_TYPE_10G, port: 0) | |
8895 | phy_encode(PORT_TYPE_1G, port: 1) | |
8896 | phy_encode(PORT_TYPE_1G, port: 2) | |
8897 | phy_encode(PORT_TYPE_1G, port: 3)); |
8898 | else |
8899 | val = (phy_encode(PORT_TYPE_1G, port: 0) | |
8900 | phy_encode(PORT_TYPE_10G, port: 1) | |
8901 | phy_encode(PORT_TYPE_1G, port: 2) | |
8902 | phy_encode(PORT_TYPE_1G, port: 3)); |
8903 | break; |
8904 | |
8905 | case 0x04: |
8906 | if (lowest_1g == 10) |
8907 | parent->plat_type = PLAT_TYPE_VF_P0; |
8908 | else if (lowest_1g == 26) |
8909 | parent->plat_type = PLAT_TYPE_VF_P1; |
8910 | else |
8911 | goto unknown_vg_1g_port; |
8912 | |
8913 | val = (phy_encode(PORT_TYPE_1G, port: 0) | |
8914 | phy_encode(PORT_TYPE_1G, port: 1) | |
8915 | phy_encode(PORT_TYPE_1G, port: 2) | |
8916 | phy_encode(PORT_TYPE_1G, port: 3)); |
8917 | break; |
8918 | |
8919 | default: |
8920 | pr_err("Unsupported port config 10G[%d] 1G[%d]\n" , |
8921 | num_10g, num_1g); |
8922 | return -EINVAL; |
8923 | } |
8924 | } |
8925 | |
8926 | parent->port_phy = val; |
8927 | |
8928 | if (parent->plat_type == PLAT_TYPE_NIU) |
8929 | niu_n2_divide_channels(parent); |
8930 | else |
8931 | niu_divide_channels(parent, num_10g, num_1g); |
8932 | |
8933 | niu_divide_rdc_groups(parent, num_10g, num_1g); |
8934 | |
8935 | return 0; |
8936 | |
8937 | unknown_vg_1g_port: |
8938 | pr_err("Cannot identify platform type, 1gport=%d\n" , lowest_1g); |
8939 | return -EINVAL; |
8940 | } |
8941 | |
8942 | static int niu_probe_ports(struct niu *np) |
8943 | { |
8944 | struct niu_parent *parent = np->parent; |
8945 | int err, i; |
8946 | |
8947 | if (parent->port_phy == PORT_PHY_UNKNOWN) { |
8948 | err = walk_phys(np, parent); |
8949 | if (err) |
8950 | return err; |
8951 | |
8952 | niu_set_ldg_timer_res(np, res: 2); |
8953 | for (i = 0; i <= LDN_MAX; i++) |
8954 | niu_ldn_irq_enable(np, ldn: i, on: 0); |
8955 | } |
8956 | |
8957 | if (parent->port_phy == PORT_PHY_INVALID) |
8958 | return -EINVAL; |
8959 | |
8960 | return 0; |
8961 | } |
8962 | |
8963 | static int niu_classifier_swstate_init(struct niu *np) |
8964 | { |
8965 | struct niu_classifier *cp = &np->clas; |
8966 | |
8967 | cp->tcam_top = (u16) np->port; |
8968 | cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; |
8969 | cp->h1_init = 0xffffffff; |
8970 | cp->h2_init = 0xffff; |
8971 | |
8972 | return fflp_early_init(np); |
8973 | } |
8974 | |
8975 | static void niu_link_config_init(struct niu *np) |
8976 | { |
8977 | struct niu_link_config *lp = &np->link_config; |
8978 | |
8979 | lp->advertising = (ADVERTISED_10baseT_Half | |
8980 | ADVERTISED_10baseT_Full | |
8981 | ADVERTISED_100baseT_Half | |
8982 | ADVERTISED_100baseT_Full | |
8983 | ADVERTISED_1000baseT_Half | |
8984 | ADVERTISED_1000baseT_Full | |
8985 | ADVERTISED_10000baseT_Full | |
8986 | ADVERTISED_Autoneg); |
8987 | lp->speed = lp->active_speed = SPEED_INVALID; |
8988 | lp->duplex = DUPLEX_FULL; |
8989 | lp->active_duplex = DUPLEX_INVALID; |
8990 | lp->autoneg = 1; |
8991 | #if 0 |
8992 | lp->loopback_mode = LOOPBACK_MAC; |
8993 | lp->active_speed = SPEED_10000; |
8994 | lp->active_duplex = DUPLEX_FULL; |
8995 | #else |
8996 | lp->loopback_mode = LOOPBACK_DISABLED; |
8997 | #endif |
8998 | } |
8999 | |
9000 | static int niu_init_mac_ipp_pcs_base(struct niu *np) |
9001 | { |
9002 | switch (np->port) { |
9003 | case 0: |
9004 | np->mac_regs = np->regs + XMAC_PORT0_OFF; |
9005 | np->ipp_off = 0x00000; |
9006 | np->pcs_off = 0x04000; |
9007 | np->xpcs_off = 0x02000; |
9008 | break; |
9009 | |
9010 | case 1: |
9011 | np->mac_regs = np->regs + XMAC_PORT1_OFF; |
9012 | np->ipp_off = 0x08000; |
9013 | np->pcs_off = 0x0a000; |
9014 | np->xpcs_off = 0x08000; |
9015 | break; |
9016 | |
9017 | case 2: |
9018 | np->mac_regs = np->regs + BMAC_PORT2_OFF; |
9019 | np->ipp_off = 0x04000; |
9020 | np->pcs_off = 0x0e000; |
9021 | np->xpcs_off = ~0UL; |
9022 | break; |
9023 | |
9024 | case 3: |
9025 | np->mac_regs = np->regs + BMAC_PORT3_OFF; |
9026 | np->ipp_off = 0x0c000; |
9027 | np->pcs_off = 0x12000; |
9028 | np->xpcs_off = ~0UL; |
9029 | break; |
9030 | |
9031 | default: |
9032 | dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n" , np->port); |
9033 | return -EINVAL; |
9034 | } |
9035 | |
9036 | return 0; |
9037 | } |
9038 | |
9039 | static void niu_try_msix(struct niu *np, u8 *ldg_num_map) |
9040 | { |
9041 | struct msix_entry msi_vec[NIU_NUM_LDG]; |
9042 | struct niu_parent *parent = np->parent; |
9043 | struct pci_dev *pdev = np->pdev; |
9044 | int i, num_irqs; |
9045 | u8 first_ldg; |
9046 | |
9047 | first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; |
9048 | for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) |
9049 | ldg_num_map[i] = first_ldg + i; |
9050 | |
9051 | num_irqs = (parent->rxchan_per_port[np->port] + |
9052 | parent->txchan_per_port[np->port] + |
9053 | (np->port == 0 ? 3 : 1)); |
9054 | BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); |
9055 | |
9056 | for (i = 0; i < num_irqs; i++) { |
9057 | msi_vec[i].vector = 0; |
9058 | msi_vec[i].entry = i; |
9059 | } |
9060 | |
9061 | num_irqs = pci_enable_msix_range(dev: pdev, entries: msi_vec, minvec: 1, maxvec: num_irqs); |
9062 | if (num_irqs < 0) { |
9063 | np->flags &= ~NIU_FLAGS_MSIX; |
9064 | return; |
9065 | } |
9066 | |
9067 | np->flags |= NIU_FLAGS_MSIX; |
9068 | for (i = 0; i < num_irqs; i++) |
9069 | np->ldg[i].irq = msi_vec[i].vector; |
9070 | np->num_ldg = num_irqs; |
9071 | } |
9072 | |
9073 | static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) |
9074 | { |
9075 | #ifdef CONFIG_SPARC64 |
9076 | struct platform_device *op = np->op; |
9077 | const u32 *int_prop; |
9078 | int i; |
9079 | |
9080 | int_prop = of_get_property(op->dev.of_node, "interrupts" , NULL); |
9081 | if (!int_prop) |
9082 | return -ENODEV; |
9083 | |
9084 | for (i = 0; i < op->archdata.num_irqs; i++) { |
9085 | ldg_num_map[i] = int_prop[i]; |
9086 | np->ldg[i].irq = op->archdata.irqs[i]; |
9087 | } |
9088 | |
9089 | np->num_ldg = op->archdata.num_irqs; |
9090 | |
9091 | return 0; |
9092 | #else |
9093 | return -EINVAL; |
9094 | #endif |
9095 | } |
9096 | |
9097 | static int niu_ldg_init(struct niu *np) |
9098 | { |
9099 | struct niu_parent *parent = np->parent; |
9100 | u8 ldg_num_map[NIU_NUM_LDG]; |
9101 | int first_chan, num_chan; |
9102 | int i, err, ldg_rotor; |
9103 | u8 port; |
9104 | |
9105 | np->num_ldg = 1; |
9106 | np->ldg[0].irq = np->dev->irq; |
9107 | if (parent->plat_type == PLAT_TYPE_NIU) { |
9108 | err = niu_n2_irq_init(np, ldg_num_map); |
9109 | if (err) |
9110 | return err; |
9111 | } else |
9112 | niu_try_msix(np, ldg_num_map); |
9113 | |
9114 | port = np->port; |
9115 | for (i = 0; i < np->num_ldg; i++) { |
9116 | struct niu_ldg *lp = &np->ldg[i]; |
9117 | |
9118 | netif_napi_add(dev: np->dev, napi: &lp->napi, poll: niu_poll); |
9119 | |
9120 | lp->np = np; |
9121 | lp->ldg_num = ldg_num_map[i]; |
9122 | lp->timer = 2; /* XXX */ |
9123 | |
9124 | /* On N2 NIU the firmware has setup the SID mappings so they go |
9125 | * to the correct values that will route the LDG to the proper |
9126 | * interrupt in the NCU interrupt table. |
9127 | */ |
9128 | if (np->parent->plat_type != PLAT_TYPE_NIU) { |
9129 | err = niu_set_ldg_sid(np, ldg: lp->ldg_num, func: port, vector: i); |
9130 | if (err) |
9131 | return err; |
9132 | } |
9133 | } |
9134 | |
9135 | /* We adopt the LDG assignment ordering used by the N2 NIU |
9136 | * 'interrupt' properties because that simplifies a lot of |
9137 | * things. This ordering is: |
9138 | * |
9139 | * MAC |
9140 | * MIF (if port zero) |
9141 | * SYSERR (if port zero) |
9142 | * RX channels |
9143 | * TX channels |
9144 | */ |
9145 | |
9146 | ldg_rotor = 0; |
9147 | |
9148 | err = niu_ldg_assign_ldn(np, parent, ldg: ldg_num_map[ldg_rotor], |
9149 | LDN_MAC(port)); |
9150 | if (err) |
9151 | return err; |
9152 | |
9153 | ldg_rotor++; |
9154 | if (ldg_rotor == np->num_ldg) |
9155 | ldg_rotor = 0; |
9156 | |
9157 | if (port == 0) { |
9158 | err = niu_ldg_assign_ldn(np, parent, |
9159 | ldg: ldg_num_map[ldg_rotor], |
9160 | LDN_MIF); |
9161 | if (err) |
9162 | return err; |
9163 | |
9164 | ldg_rotor++; |
9165 | if (ldg_rotor == np->num_ldg) |
9166 | ldg_rotor = 0; |
9167 | |
9168 | err = niu_ldg_assign_ldn(np, parent, |
9169 | ldg: ldg_num_map[ldg_rotor], |
9170 | LDN_DEVICE_ERROR); |
9171 | if (err) |
9172 | return err; |
9173 | |
9174 | ldg_rotor++; |
9175 | if (ldg_rotor == np->num_ldg) |
9176 | ldg_rotor = 0; |
9177 | |
9178 | } |
9179 | |
9180 | first_chan = 0; |
9181 | for (i = 0; i < port; i++) |
9182 | first_chan += parent->rxchan_per_port[i]; |
9183 | num_chan = parent->rxchan_per_port[port]; |
9184 | |
9185 | for (i = first_chan; i < (first_chan + num_chan); i++) { |
9186 | err = niu_ldg_assign_ldn(np, parent, |
9187 | ldg: ldg_num_map[ldg_rotor], |
9188 | LDN_RXDMA(i)); |
9189 | if (err) |
9190 | return err; |
9191 | ldg_rotor++; |
9192 | if (ldg_rotor == np->num_ldg) |
9193 | ldg_rotor = 0; |
9194 | } |
9195 | |
9196 | first_chan = 0; |
9197 | for (i = 0; i < port; i++) |
9198 | first_chan += parent->txchan_per_port[i]; |
9199 | num_chan = parent->txchan_per_port[port]; |
9200 | for (i = first_chan; i < (first_chan + num_chan); i++) { |
9201 | err = niu_ldg_assign_ldn(np, parent, |
9202 | ldg: ldg_num_map[ldg_rotor], |
9203 | LDN_TXDMA(i)); |
9204 | if (err) |
9205 | return err; |
9206 | ldg_rotor++; |
9207 | if (ldg_rotor == np->num_ldg) |
9208 | ldg_rotor = 0; |
9209 | } |
9210 | |
9211 | return 0; |
9212 | } |
9213 | |
9214 | static void niu_ldg_free(struct niu *np) |
9215 | { |
9216 | if (np->flags & NIU_FLAGS_MSIX) |
9217 | pci_disable_msix(dev: np->pdev); |
9218 | } |
9219 | |
9220 | static int niu_get_of_props(struct niu *np) |
9221 | { |
9222 | #ifdef CONFIG_SPARC64 |
9223 | struct net_device *dev = np->dev; |
9224 | struct device_node *dp; |
9225 | const char *phy_type; |
9226 | const u8 *mac_addr; |
9227 | const char *model; |
9228 | int prop_len; |
9229 | |
9230 | if (np->parent->plat_type == PLAT_TYPE_NIU) |
9231 | dp = np->op->dev.of_node; |
9232 | else |
9233 | dp = pci_device_to_OF_node(np->pdev); |
9234 | |
9235 | phy_type = of_get_property(dp, "phy-type" , NULL); |
9236 | if (!phy_type) { |
9237 | netdev_err(dev, "%pOF: OF node lacks phy-type property\n" , dp); |
9238 | return -EINVAL; |
9239 | } |
9240 | |
9241 | if (!strcmp(phy_type, "none" )) |
9242 | return -ENODEV; |
9243 | |
9244 | strcpy(np->vpd.phy_type, phy_type); |
9245 | |
9246 | if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { |
9247 | netdev_err(dev, "%pOF: Illegal phy string [%s]\n" , |
9248 | dp, np->vpd.phy_type); |
9249 | return -EINVAL; |
9250 | } |
9251 | |
9252 | mac_addr = of_get_property(dp, "local-mac-address" , &prop_len); |
9253 | if (!mac_addr) { |
9254 | netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n" , |
9255 | dp); |
9256 | return -EINVAL; |
9257 | } |
9258 | if (prop_len != dev->addr_len) { |
9259 | netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n" , |
9260 | dp, prop_len); |
9261 | } |
9262 | eth_hw_addr_set(dev, mac_addr); |
9263 | if (!is_valid_ether_addr(&dev->dev_addr[0])) { |
9264 | netdev_err(dev, "%pOF: OF MAC address is invalid\n" , dp); |
9265 | netdev_err(dev, "%pOF: [ %pM ]\n" , dp, dev->dev_addr); |
9266 | return -EINVAL; |
9267 | } |
9268 | |
9269 | model = of_get_property(dp, "model" , NULL); |
9270 | |
9271 | if (model) |
9272 | strcpy(np->vpd.model, model); |
9273 | |
9274 | if (of_property_read_bool(dp, "hot-swappable-phy" )) { |
9275 | np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | |
9276 | NIU_FLAGS_HOTPLUG_PHY); |
9277 | } |
9278 | |
9279 | return 0; |
9280 | #else |
9281 | return -EINVAL; |
9282 | #endif |
9283 | } |
9284 | |
9285 | static int niu_get_invariants(struct niu *np) |
9286 | { |
9287 | int err, have_props; |
9288 | u32 offset; |
9289 | |
9290 | err = niu_get_of_props(np); |
9291 | if (err == -ENODEV) |
9292 | return err; |
9293 | |
9294 | have_props = !err; |
9295 | |
9296 | err = niu_init_mac_ipp_pcs_base(np); |
9297 | if (err) |
9298 | return err; |
9299 | |
9300 | if (have_props) { |
9301 | err = niu_get_and_validate_port(np); |
9302 | if (err) |
9303 | return err; |
9304 | |
9305 | } else { |
9306 | if (np->parent->plat_type == PLAT_TYPE_NIU) |
9307 | return -EINVAL; |
9308 | |
9309 | nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); |
9310 | offset = niu_pci_vpd_offset(np); |
9311 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
9312 | "%s() VPD offset [%08x]\n" , __func__, offset); |
9313 | if (offset) { |
9314 | err = niu_pci_vpd_fetch(np, start: offset); |
9315 | if (err < 0) |
9316 | return err; |
9317 | } |
9318 | nw64(ESPC_PIO_EN, 0); |
9319 | |
9320 | if (np->flags & NIU_FLAGS_VPD_VALID) { |
9321 | niu_pci_vpd_validate(np); |
9322 | err = niu_get_and_validate_port(np); |
9323 | if (err) |
9324 | return err; |
9325 | } |
9326 | |
9327 | if (!(np->flags & NIU_FLAGS_VPD_VALID)) { |
9328 | err = niu_get_and_validate_port(np); |
9329 | if (err) |
9330 | return err; |
9331 | err = niu_pci_probe_sprom(np); |
9332 | if (err) |
9333 | return err; |
9334 | } |
9335 | } |
9336 | |
9337 | err = niu_probe_ports(np); |
9338 | if (err) |
9339 | return err; |
9340 | |
9341 | niu_ldg_init(np); |
9342 | |
9343 | niu_classifier_swstate_init(np); |
9344 | niu_link_config_init(np); |
9345 | |
9346 | err = niu_determine_phy_disposition(np); |
9347 | if (!err) |
9348 | err = niu_init_link(np); |
9349 | |
9350 | return err; |
9351 | } |
9352 | |
9353 | static LIST_HEAD(niu_parent_list); |
9354 | static DEFINE_MUTEX(niu_parent_lock); |
9355 | static int niu_parent_index; |
9356 | |
9357 | static ssize_t show_port_phy(struct device *dev, |
9358 | struct device_attribute *attr, char *buf) |
9359 | { |
9360 | struct platform_device *plat_dev = to_platform_device(dev); |
9361 | struct niu_parent *p = dev_get_platdata(dev: &plat_dev->dev); |
9362 | u32 port_phy = p->port_phy; |
9363 | char *orig_buf = buf; |
9364 | int i; |
9365 | |
9366 | if (port_phy == PORT_PHY_UNKNOWN || |
9367 | port_phy == PORT_PHY_INVALID) |
9368 | return 0; |
9369 | |
9370 | for (i = 0; i < p->num_ports; i++) { |
9371 | const char *type_str; |
9372 | int type; |
9373 | |
9374 | type = phy_decode(val: port_phy, port: i); |
9375 | if (type == PORT_TYPE_10G) |
9376 | type_str = "10G" ; |
9377 | else |
9378 | type_str = "1G" ; |
9379 | buf += sprintf(buf, |
9380 | fmt: (i == 0) ? "%s" : " %s" , |
9381 | type_str); |
9382 | } |
9383 | buf += sprintf(buf, fmt: "\n" ); |
9384 | return buf - orig_buf; |
9385 | } |
9386 | |
9387 | static ssize_t show_plat_type(struct device *dev, |
9388 | struct device_attribute *attr, char *buf) |
9389 | { |
9390 | struct platform_device *plat_dev = to_platform_device(dev); |
9391 | struct niu_parent *p = dev_get_platdata(dev: &plat_dev->dev); |
9392 | const char *type_str; |
9393 | |
9394 | switch (p->plat_type) { |
9395 | case PLAT_TYPE_ATLAS: |
9396 | type_str = "atlas" ; |
9397 | break; |
9398 | case PLAT_TYPE_NIU: |
9399 | type_str = "niu" ; |
9400 | break; |
9401 | case PLAT_TYPE_VF_P0: |
9402 | type_str = "vf_p0" ; |
9403 | break; |
9404 | case PLAT_TYPE_VF_P1: |
9405 | type_str = "vf_p1" ; |
9406 | break; |
9407 | default: |
9408 | type_str = "unknown" ; |
9409 | break; |
9410 | } |
9411 | |
9412 | return sprintf(buf, fmt: "%s\n" , type_str); |
9413 | } |
9414 | |
9415 | static ssize_t __show_chan_per_port(struct device *dev, |
9416 | struct device_attribute *attr, char *buf, |
9417 | int rx) |
9418 | { |
9419 | struct platform_device *plat_dev = to_platform_device(dev); |
9420 | struct niu_parent *p = dev_get_platdata(dev: &plat_dev->dev); |
9421 | char *orig_buf = buf; |
9422 | u8 *arr; |
9423 | int i; |
9424 | |
9425 | arr = (rx ? p->rxchan_per_port : p->txchan_per_port); |
9426 | |
9427 | for (i = 0; i < p->num_ports; i++) { |
9428 | buf += sprintf(buf, |
9429 | fmt: (i == 0) ? "%d" : " %d" , |
9430 | arr[i]); |
9431 | } |
9432 | buf += sprintf(buf, fmt: "\n" ); |
9433 | |
9434 | return buf - orig_buf; |
9435 | } |
9436 | |
9437 | static ssize_t show_rxchan_per_port(struct device *dev, |
9438 | struct device_attribute *attr, char *buf) |
9439 | { |
9440 | return __show_chan_per_port(dev, attr, buf, rx: 1); |
9441 | } |
9442 | |
9443 | static ssize_t show_txchan_per_port(struct device *dev, |
9444 | struct device_attribute *attr, char *buf) |
9445 | { |
9446 | return __show_chan_per_port(dev, attr, buf, rx: 1); |
9447 | } |
9448 | |
9449 | static ssize_t show_num_ports(struct device *dev, |
9450 | struct device_attribute *attr, char *buf) |
9451 | { |
9452 | struct platform_device *plat_dev = to_platform_device(dev); |
9453 | struct niu_parent *p = dev_get_platdata(dev: &plat_dev->dev); |
9454 | |
9455 | return sprintf(buf, fmt: "%d\n" , p->num_ports); |
9456 | } |
9457 | |
9458 | static struct device_attribute niu_parent_attributes[] = { |
9459 | __ATTR(port_phy, 0444, show_port_phy, NULL), |
9460 | __ATTR(plat_type, 0444, show_plat_type, NULL), |
9461 | __ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL), |
9462 | __ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL), |
9463 | __ATTR(num_ports, 0444, show_num_ports, NULL), |
9464 | {} |
9465 | }; |
9466 | |
9467 | static struct niu_parent *niu_new_parent(struct niu *np, |
9468 | union niu_parent_id *id, u8 ptype) |
9469 | { |
9470 | struct platform_device *plat_dev; |
9471 | struct niu_parent *p; |
9472 | int i; |
9473 | |
9474 | plat_dev = platform_device_register_simple(name: "niu-board" , id: niu_parent_index, |
9475 | NULL, num: 0); |
9476 | if (IS_ERR(ptr: plat_dev)) |
9477 | return NULL; |
9478 | |
9479 | for (i = 0; niu_parent_attributes[i].attr.name; i++) { |
9480 | int err = device_create_file(device: &plat_dev->dev, |
9481 | entry: &niu_parent_attributes[i]); |
9482 | if (err) |
9483 | goto fail_unregister; |
9484 | } |
9485 | |
9486 | p = kzalloc(size: sizeof(*p), GFP_KERNEL); |
9487 | if (!p) |
9488 | goto fail_unregister; |
9489 | |
9490 | p->index = niu_parent_index++; |
9491 | |
9492 | plat_dev->dev.platform_data = p; |
9493 | p->plat_dev = plat_dev; |
9494 | |
9495 | memcpy(&p->id, id, sizeof(*id)); |
9496 | p->plat_type = ptype; |
9497 | INIT_LIST_HEAD(list: &p->list); |
9498 | atomic_set(v: &p->refcnt, i: 0); |
9499 | list_add(new: &p->list, head: &niu_parent_list); |
9500 | spin_lock_init(&p->lock); |
9501 | |
9502 | p->rxdma_clock_divider = 7500; |
9503 | |
9504 | p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; |
9505 | if (p->plat_type == PLAT_TYPE_NIU) |
9506 | p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; |
9507 | |
9508 | for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { |
9509 | int index = i - CLASS_CODE_USER_PROG1; |
9510 | |
9511 | p->tcam_key[index] = TCAM_KEY_TSEL; |
9512 | p->flow_key[index] = (FLOW_KEY_IPSA | |
9513 | FLOW_KEY_IPDA | |
9514 | FLOW_KEY_PROTO | |
9515 | (FLOW_KEY_L4_BYTE12 << |
9516 | FLOW_KEY_L4_0_SHIFT) | |
9517 | (FLOW_KEY_L4_BYTE12 << |
9518 | FLOW_KEY_L4_1_SHIFT)); |
9519 | } |
9520 | |
9521 | for (i = 0; i < LDN_MAX + 1; i++) |
9522 | p->ldg_map[i] = LDG_INVALID; |
9523 | |
9524 | return p; |
9525 | |
9526 | fail_unregister: |
9527 | platform_device_unregister(plat_dev); |
9528 | return NULL; |
9529 | } |
9530 | |
9531 | static struct niu_parent *niu_get_parent(struct niu *np, |
9532 | union niu_parent_id *id, u8 ptype) |
9533 | { |
9534 | struct niu_parent *p, *tmp; |
9535 | int port = np->port; |
9536 | |
9537 | mutex_lock(&niu_parent_lock); |
9538 | p = NULL; |
9539 | list_for_each_entry(tmp, &niu_parent_list, list) { |
9540 | if (!memcmp(p: id, q: &tmp->id, size: sizeof(*id))) { |
9541 | p = tmp; |
9542 | break; |
9543 | } |
9544 | } |
9545 | if (!p) |
9546 | p = niu_new_parent(np, id, ptype); |
9547 | |
9548 | if (p) { |
9549 | char port_name[8]; |
9550 | int err; |
9551 | |
9552 | sprintf(buf: port_name, fmt: "port%d" , port); |
9553 | err = sysfs_create_link(kobj: &p->plat_dev->dev.kobj, |
9554 | target: &np->device->kobj, |
9555 | name: port_name); |
9556 | if (!err) { |
9557 | p->ports[port] = np; |
9558 | atomic_inc(v: &p->refcnt); |
9559 | } |
9560 | } |
9561 | mutex_unlock(lock: &niu_parent_lock); |
9562 | |
9563 | return p; |
9564 | } |
9565 | |
9566 | static void niu_put_parent(struct niu *np) |
9567 | { |
9568 | struct niu_parent *p = np->parent; |
9569 | u8 port = np->port; |
9570 | char port_name[8]; |
9571 | |
9572 | BUG_ON(!p || p->ports[port] != np); |
9573 | |
9574 | netif_printk(np, probe, KERN_DEBUG, np->dev, |
9575 | "%s() port[%u]\n" , __func__, port); |
9576 | |
9577 | sprintf(buf: port_name, fmt: "port%d" , port); |
9578 | |
9579 | mutex_lock(&niu_parent_lock); |
9580 | |
9581 | sysfs_remove_link(kobj: &p->plat_dev->dev.kobj, name: port_name); |
9582 | |
9583 | p->ports[port] = NULL; |
9584 | np->parent = NULL; |
9585 | |
9586 | if (atomic_dec_and_test(v: &p->refcnt)) { |
9587 | list_del(entry: &p->list); |
9588 | platform_device_unregister(p->plat_dev); |
9589 | } |
9590 | |
9591 | mutex_unlock(lock: &niu_parent_lock); |
9592 | } |
9593 | |
9594 | static void *niu_pci_alloc_coherent(struct device *dev, size_t size, |
9595 | u64 *handle, gfp_t flag) |
9596 | { |
9597 | dma_addr_t dh; |
9598 | void *ret; |
9599 | |
9600 | ret = dma_alloc_coherent(dev, size, dma_handle: &dh, gfp: flag); |
9601 | if (ret) |
9602 | *handle = dh; |
9603 | return ret; |
9604 | } |
9605 | |
9606 | static void niu_pci_free_coherent(struct device *dev, size_t size, |
9607 | void *cpu_addr, u64 handle) |
9608 | { |
9609 | dma_free_coherent(dev, size, cpu_addr, dma_handle: handle); |
9610 | } |
9611 | |
9612 | static u64 niu_pci_map_page(struct device *dev, struct page *page, |
9613 | unsigned long offset, size_t size, |
9614 | enum dma_data_direction direction) |
9615 | { |
9616 | return dma_map_page(dev, page, offset, size, direction); |
9617 | } |
9618 | |
9619 | static void niu_pci_unmap_page(struct device *dev, u64 dma_address, |
9620 | size_t size, enum dma_data_direction direction) |
9621 | { |
9622 | dma_unmap_page(dev, dma_address, size, direction); |
9623 | } |
9624 | |
9625 | static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, |
9626 | size_t size, |
9627 | enum dma_data_direction direction) |
9628 | { |
9629 | return dma_map_single(dev, cpu_addr, size, direction); |
9630 | } |
9631 | |
9632 | static void niu_pci_unmap_single(struct device *dev, u64 dma_address, |
9633 | size_t size, |
9634 | enum dma_data_direction direction) |
9635 | { |
9636 | dma_unmap_single(dev, dma_address, size, direction); |
9637 | } |
9638 | |
9639 | static const struct niu_ops niu_pci_ops = { |
9640 | .alloc_coherent = niu_pci_alloc_coherent, |
9641 | .free_coherent = niu_pci_free_coherent, |
9642 | .map_page = niu_pci_map_page, |
9643 | .unmap_page = niu_pci_unmap_page, |
9644 | .map_single = niu_pci_map_single, |
9645 | .unmap_single = niu_pci_unmap_single, |
9646 | }; |
9647 | |
9648 | static void niu_driver_version(void) |
9649 | { |
9650 | static int niu_version_printed; |
9651 | |
9652 | if (niu_version_printed++ == 0) |
9653 | pr_info("%s" , version); |
9654 | } |
9655 | |
9656 | static struct net_device *niu_alloc_and_init(struct device *gen_dev, |
9657 | struct pci_dev *pdev, |
9658 | struct platform_device *op, |
9659 | const struct niu_ops *ops, u8 port) |
9660 | { |
9661 | struct net_device *dev; |
9662 | struct niu *np; |
9663 | |
9664 | dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); |
9665 | if (!dev) |
9666 | return NULL; |
9667 | |
9668 | SET_NETDEV_DEV(dev, gen_dev); |
9669 | |
9670 | np = netdev_priv(dev); |
9671 | np->dev = dev; |
9672 | np->pdev = pdev; |
9673 | np->op = op; |
9674 | np->device = gen_dev; |
9675 | np->ops = ops; |
9676 | |
9677 | np->msg_enable = niu_debug; |
9678 | |
9679 | spin_lock_init(&np->lock); |
9680 | INIT_WORK(&np->reset_task, niu_reset_task); |
9681 | |
9682 | np->port = port; |
9683 | |
9684 | return dev; |
9685 | } |
9686 | |
9687 | static const struct net_device_ops niu_netdev_ops = { |
9688 | .ndo_open = niu_open, |
9689 | .ndo_stop = niu_close, |
9690 | .ndo_start_xmit = niu_start_xmit, |
9691 | .ndo_get_stats64 = niu_get_stats, |
9692 | .ndo_set_rx_mode = niu_set_rx_mode, |
9693 | .ndo_validate_addr = eth_validate_addr, |
9694 | .ndo_set_mac_address = niu_set_mac_addr, |
9695 | .ndo_eth_ioctl = niu_ioctl, |
9696 | .ndo_tx_timeout = niu_tx_timeout, |
9697 | .ndo_change_mtu = niu_change_mtu, |
9698 | }; |
9699 | |
9700 | static void niu_assign_netdev_ops(struct net_device *dev) |
9701 | { |
9702 | dev->netdev_ops = &niu_netdev_ops; |
9703 | dev->ethtool_ops = &niu_ethtool_ops; |
9704 | dev->watchdog_timeo = NIU_TX_TIMEOUT; |
9705 | } |
9706 | |
9707 | static void niu_device_announce(struct niu *np) |
9708 | { |
9709 | struct net_device *dev = np->dev; |
9710 | |
9711 | pr_info("%s: NIU Ethernet %pM\n" , dev->name, dev->dev_addr); |
9712 | |
9713 | if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { |
9714 | pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n" , |
9715 | dev->name, |
9716 | (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC" ), |
9717 | (np->flags & NIU_FLAGS_10G ? "10G" : "1G" ), |
9718 | (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES" ), |
9719 | (np->mac_xcvr == MAC_XCVR_MII ? "MII" : |
9720 | (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS" )), |
9721 | np->vpd.phy_type); |
9722 | } else { |
9723 | pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n" , |
9724 | dev->name, |
9725 | (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC" ), |
9726 | (np->flags & NIU_FLAGS_10G ? "10G" : "1G" ), |
9727 | (np->flags & NIU_FLAGS_FIBER ? "FIBER" : |
9728 | (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : |
9729 | "COPPER" )), |
9730 | (np->mac_xcvr == MAC_XCVR_MII ? "MII" : |
9731 | (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS" )), |
9732 | np->vpd.phy_type); |
9733 | } |
9734 | } |
9735 | |
9736 | static void niu_set_basic_features(struct net_device *dev) |
9737 | { |
9738 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; |
9739 | dev->features |= dev->hw_features | NETIF_F_RXCSUM; |
9740 | } |
9741 | |
9742 | static int niu_pci_init_one(struct pci_dev *pdev, |
9743 | const struct pci_device_id *ent) |
9744 | { |
9745 | union niu_parent_id parent_id; |
9746 | struct net_device *dev; |
9747 | struct niu *np; |
9748 | int err; |
9749 | |
9750 | niu_driver_version(); |
9751 | |
9752 | err = pci_enable_device(dev: pdev); |
9753 | if (err) { |
9754 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n" ); |
9755 | return err; |
9756 | } |
9757 | |
9758 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || |
9759 | !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { |
9760 | dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n" ); |
9761 | err = -ENODEV; |
9762 | goto err_out_disable_pdev; |
9763 | } |
9764 | |
9765 | err = pci_request_regions(pdev, DRV_MODULE_NAME); |
9766 | if (err) { |
9767 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n" ); |
9768 | goto err_out_disable_pdev; |
9769 | } |
9770 | |
9771 | if (!pci_is_pcie(dev: pdev)) { |
9772 | dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n" ); |
9773 | err = -ENODEV; |
9774 | goto err_out_free_res; |
9775 | } |
9776 | |
9777 | dev = niu_alloc_and_init(gen_dev: &pdev->dev, pdev, NULL, |
9778 | ops: &niu_pci_ops, PCI_FUNC(pdev->devfn)); |
9779 | if (!dev) { |
9780 | err = -ENOMEM; |
9781 | goto err_out_free_res; |
9782 | } |
9783 | np = netdev_priv(dev); |
9784 | |
9785 | memset(&parent_id, 0, sizeof(parent_id)); |
9786 | parent_id.pci.domain = pci_domain_nr(bus: pdev->bus); |
9787 | parent_id.pci.bus = pdev->bus->number; |
9788 | parent_id.pci.device = PCI_SLOT(pdev->devfn); |
9789 | |
9790 | np->parent = niu_get_parent(np, id: &parent_id, |
9791 | PLAT_TYPE_ATLAS); |
9792 | if (!np->parent) { |
9793 | err = -ENOMEM; |
9794 | goto err_out_free_dev; |
9795 | } |
9796 | |
9797 | pcie_capability_clear_and_set_word(dev: pdev, PCI_EXP_DEVCTL, |
9798 | PCI_EXP_DEVCTL_NOSNOOP_EN, |
9799 | PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | |
9800 | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | |
9801 | PCI_EXP_DEVCTL_RELAX_EN); |
9802 | |
9803 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(44)); |
9804 | if (!err) |
9805 | dev->features |= NETIF_F_HIGHDMA; |
9806 | if (err) { |
9807 | err = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
9808 | if (err) { |
9809 | dev_err(&pdev->dev, "No usable DMA configuration, aborting\n" ); |
9810 | goto err_out_release_parent; |
9811 | } |
9812 | } |
9813 | |
9814 | niu_set_basic_features(dev); |
9815 | |
9816 | dev->priv_flags |= IFF_UNICAST_FLT; |
9817 | |
9818 | np->regs = pci_ioremap_bar(pdev, bar: 0); |
9819 | if (!np->regs) { |
9820 | dev_err(&pdev->dev, "Cannot map device registers, aborting\n" ); |
9821 | err = -ENOMEM; |
9822 | goto err_out_release_parent; |
9823 | } |
9824 | |
9825 | pci_set_master(dev: pdev); |
9826 | pci_save_state(dev: pdev); |
9827 | |
9828 | dev->irq = pdev->irq; |
9829 | |
9830 | /* MTU range: 68 - 9216 */ |
9831 | dev->min_mtu = ETH_MIN_MTU; |
9832 | dev->max_mtu = NIU_MAX_MTU; |
9833 | |
9834 | niu_assign_netdev_ops(dev); |
9835 | |
9836 | err = niu_get_invariants(np); |
9837 | if (err) { |
9838 | if (err != -ENODEV) |
9839 | dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n" ); |
9840 | goto err_out_iounmap; |
9841 | } |
9842 | |
9843 | err = register_netdev(dev); |
9844 | if (err) { |
9845 | dev_err(&pdev->dev, "Cannot register net device, aborting\n" ); |
9846 | goto err_out_iounmap; |
9847 | } |
9848 | |
9849 | pci_set_drvdata(pdev, data: dev); |
9850 | |
9851 | niu_device_announce(np); |
9852 | |
9853 | return 0; |
9854 | |
9855 | err_out_iounmap: |
9856 | if (np->regs) { |
9857 | iounmap(addr: np->regs); |
9858 | np->regs = NULL; |
9859 | } |
9860 | |
9861 | err_out_release_parent: |
9862 | niu_put_parent(np); |
9863 | |
9864 | err_out_free_dev: |
9865 | free_netdev(dev); |
9866 | |
9867 | err_out_free_res: |
9868 | pci_release_regions(pdev); |
9869 | |
9870 | err_out_disable_pdev: |
9871 | pci_disable_device(dev: pdev); |
9872 | |
9873 | return err; |
9874 | } |
9875 | |
9876 | static void niu_pci_remove_one(struct pci_dev *pdev) |
9877 | { |
9878 | struct net_device *dev = pci_get_drvdata(pdev); |
9879 | |
9880 | if (dev) { |
9881 | struct niu *np = netdev_priv(dev); |
9882 | |
9883 | unregister_netdev(dev); |
9884 | if (np->regs) { |
9885 | iounmap(addr: np->regs); |
9886 | np->regs = NULL; |
9887 | } |
9888 | |
9889 | niu_ldg_free(np); |
9890 | |
9891 | niu_put_parent(np); |
9892 | |
9893 | free_netdev(dev); |
9894 | pci_release_regions(pdev); |
9895 | pci_disable_device(dev: pdev); |
9896 | } |
9897 | } |
9898 | |
9899 | static int __maybe_unused niu_suspend(struct device *dev_d) |
9900 | { |
9901 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
9902 | struct niu *np = netdev_priv(dev); |
9903 | unsigned long flags; |
9904 | |
9905 | if (!netif_running(dev)) |
9906 | return 0; |
9907 | |
9908 | flush_work(work: &np->reset_task); |
9909 | niu_netif_stop(np); |
9910 | |
9911 | del_timer_sync(timer: &np->timer); |
9912 | |
9913 | spin_lock_irqsave(&np->lock, flags); |
9914 | niu_enable_interrupts(np, on: 0); |
9915 | spin_unlock_irqrestore(lock: &np->lock, flags); |
9916 | |
9917 | netif_device_detach(dev); |
9918 | |
9919 | spin_lock_irqsave(&np->lock, flags); |
9920 | niu_stop_hw(np); |
9921 | spin_unlock_irqrestore(lock: &np->lock, flags); |
9922 | |
9923 | return 0; |
9924 | } |
9925 | |
9926 | static int __maybe_unused niu_resume(struct device *dev_d) |
9927 | { |
9928 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
9929 | struct niu *np = netdev_priv(dev); |
9930 | unsigned long flags; |
9931 | int err; |
9932 | |
9933 | if (!netif_running(dev)) |
9934 | return 0; |
9935 | |
9936 | netif_device_attach(dev); |
9937 | |
9938 | spin_lock_irqsave(&np->lock, flags); |
9939 | |
9940 | err = niu_init_hw(np); |
9941 | if (!err) { |
9942 | np->timer.expires = jiffies + HZ; |
9943 | add_timer(timer: &np->timer); |
9944 | niu_netif_start(np); |
9945 | } |
9946 | |
9947 | spin_unlock_irqrestore(lock: &np->lock, flags); |
9948 | |
9949 | return err; |
9950 | } |
9951 | |
9952 | static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume); |
9953 | |
9954 | static struct pci_driver niu_pci_driver = { |
9955 | .name = DRV_MODULE_NAME, |
9956 | .id_table = niu_pci_tbl, |
9957 | .probe = niu_pci_init_one, |
9958 | .remove = niu_pci_remove_one, |
9959 | .driver.pm = &niu_pm_ops, |
9960 | }; |
9961 | |
9962 | #ifdef CONFIG_SPARC64 |
9963 | static void *niu_phys_alloc_coherent(struct device *dev, size_t size, |
9964 | u64 *dma_addr, gfp_t flag) |
9965 | { |
9966 | unsigned long order = get_order(size); |
9967 | unsigned long page = __get_free_pages(flag, order); |
9968 | |
9969 | if (page == 0UL) |
9970 | return NULL; |
9971 | memset((char *)page, 0, PAGE_SIZE << order); |
9972 | *dma_addr = __pa(page); |
9973 | |
9974 | return (void *) page; |
9975 | } |
9976 | |
9977 | static void niu_phys_free_coherent(struct device *dev, size_t size, |
9978 | void *cpu_addr, u64 handle) |
9979 | { |
9980 | unsigned long order = get_order(size); |
9981 | |
9982 | free_pages((unsigned long) cpu_addr, order); |
9983 | } |
9984 | |
9985 | static u64 niu_phys_map_page(struct device *dev, struct page *page, |
9986 | unsigned long offset, size_t size, |
9987 | enum dma_data_direction direction) |
9988 | { |
9989 | return page_to_phys(page) + offset; |
9990 | } |
9991 | |
9992 | static void niu_phys_unmap_page(struct device *dev, u64 dma_address, |
9993 | size_t size, enum dma_data_direction direction) |
9994 | { |
9995 | /* Nothing to do. */ |
9996 | } |
9997 | |
9998 | static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, |
9999 | size_t size, |
10000 | enum dma_data_direction direction) |
10001 | { |
10002 | return __pa(cpu_addr); |
10003 | } |
10004 | |
10005 | static void niu_phys_unmap_single(struct device *dev, u64 dma_address, |
10006 | size_t size, |
10007 | enum dma_data_direction direction) |
10008 | { |
10009 | /* Nothing to do. */ |
10010 | } |
10011 | |
10012 | static const struct niu_ops niu_phys_ops = { |
10013 | .alloc_coherent = niu_phys_alloc_coherent, |
10014 | .free_coherent = niu_phys_free_coherent, |
10015 | .map_page = niu_phys_map_page, |
10016 | .unmap_page = niu_phys_unmap_page, |
10017 | .map_single = niu_phys_map_single, |
10018 | .unmap_single = niu_phys_unmap_single, |
10019 | }; |
10020 | |
10021 | static int niu_of_probe(struct platform_device *op) |
10022 | { |
10023 | union niu_parent_id parent_id; |
10024 | struct net_device *dev; |
10025 | struct niu *np; |
10026 | const u32 *reg; |
10027 | int err; |
10028 | |
10029 | niu_driver_version(); |
10030 | |
10031 | reg = of_get_property(op->dev.of_node, "reg" , NULL); |
10032 | if (!reg) { |
10033 | dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n" , |
10034 | op->dev.of_node); |
10035 | return -ENODEV; |
10036 | } |
10037 | |
10038 | dev = niu_alloc_and_init(&op->dev, NULL, op, |
10039 | &niu_phys_ops, reg[0] & 0x1); |
10040 | if (!dev) { |
10041 | err = -ENOMEM; |
10042 | goto err_out; |
10043 | } |
10044 | np = netdev_priv(dev); |
10045 | |
10046 | memset(&parent_id, 0, sizeof(parent_id)); |
10047 | parent_id.of = of_get_parent(op->dev.of_node); |
10048 | |
10049 | np->parent = niu_get_parent(np, &parent_id, |
10050 | PLAT_TYPE_NIU); |
10051 | if (!np->parent) { |
10052 | err = -ENOMEM; |
10053 | goto err_out_free_dev; |
10054 | } |
10055 | |
10056 | niu_set_basic_features(dev); |
10057 | |
10058 | np->regs = of_ioremap(&op->resource[1], 0, |
10059 | resource_size(&op->resource[1]), |
10060 | "niu regs" ); |
10061 | if (!np->regs) { |
10062 | dev_err(&op->dev, "Cannot map device registers, aborting\n" ); |
10063 | err = -ENOMEM; |
10064 | goto err_out_release_parent; |
10065 | } |
10066 | |
10067 | np->vir_regs_1 = of_ioremap(&op->resource[2], 0, |
10068 | resource_size(&op->resource[2]), |
10069 | "niu vregs-1" ); |
10070 | if (!np->vir_regs_1) { |
10071 | dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n" ); |
10072 | err = -ENOMEM; |
10073 | goto err_out_iounmap; |
10074 | } |
10075 | |
10076 | np->vir_regs_2 = of_ioremap(&op->resource[3], 0, |
10077 | resource_size(&op->resource[3]), |
10078 | "niu vregs-2" ); |
10079 | if (!np->vir_regs_2) { |
10080 | dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n" ); |
10081 | err = -ENOMEM; |
10082 | goto err_out_iounmap; |
10083 | } |
10084 | |
10085 | niu_assign_netdev_ops(dev); |
10086 | |
10087 | err = niu_get_invariants(np); |
10088 | if (err) { |
10089 | if (err != -ENODEV) |
10090 | dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n" ); |
10091 | goto err_out_iounmap; |
10092 | } |
10093 | |
10094 | err = register_netdev(dev); |
10095 | if (err) { |
10096 | dev_err(&op->dev, "Cannot register net device, aborting\n" ); |
10097 | goto err_out_iounmap; |
10098 | } |
10099 | |
10100 | platform_set_drvdata(op, dev); |
10101 | |
10102 | niu_device_announce(np); |
10103 | |
10104 | return 0; |
10105 | |
10106 | err_out_iounmap: |
10107 | if (np->vir_regs_1) { |
10108 | of_iounmap(&op->resource[2], np->vir_regs_1, |
10109 | resource_size(&op->resource[2])); |
10110 | np->vir_regs_1 = NULL; |
10111 | } |
10112 | |
10113 | if (np->vir_regs_2) { |
10114 | of_iounmap(&op->resource[3], np->vir_regs_2, |
10115 | resource_size(&op->resource[3])); |
10116 | np->vir_regs_2 = NULL; |
10117 | } |
10118 | |
10119 | if (np->regs) { |
10120 | of_iounmap(&op->resource[1], np->regs, |
10121 | resource_size(&op->resource[1])); |
10122 | np->regs = NULL; |
10123 | } |
10124 | |
10125 | err_out_release_parent: |
10126 | niu_put_parent(np); |
10127 | |
10128 | err_out_free_dev: |
10129 | free_netdev(dev); |
10130 | |
10131 | err_out: |
10132 | return err; |
10133 | } |
10134 | |
10135 | static void niu_of_remove(struct platform_device *op) |
10136 | { |
10137 | struct net_device *dev = platform_get_drvdata(op); |
10138 | |
10139 | if (dev) { |
10140 | struct niu *np = netdev_priv(dev); |
10141 | |
10142 | unregister_netdev(dev); |
10143 | |
10144 | if (np->vir_regs_1) { |
10145 | of_iounmap(&op->resource[2], np->vir_regs_1, |
10146 | resource_size(&op->resource[2])); |
10147 | np->vir_regs_1 = NULL; |
10148 | } |
10149 | |
10150 | if (np->vir_regs_2) { |
10151 | of_iounmap(&op->resource[3], np->vir_regs_2, |
10152 | resource_size(&op->resource[3])); |
10153 | np->vir_regs_2 = NULL; |
10154 | } |
10155 | |
10156 | if (np->regs) { |
10157 | of_iounmap(&op->resource[1], np->regs, |
10158 | resource_size(&op->resource[1])); |
10159 | np->regs = NULL; |
10160 | } |
10161 | |
10162 | niu_ldg_free(np); |
10163 | |
10164 | niu_put_parent(np); |
10165 | |
10166 | free_netdev(dev); |
10167 | } |
10168 | } |
10169 | |
10170 | static const struct of_device_id niu_match[] = { |
10171 | { |
10172 | .name = "network" , |
10173 | .compatible = "SUNW,niusl" , |
10174 | }, |
10175 | {}, |
10176 | }; |
10177 | MODULE_DEVICE_TABLE(of, niu_match); |
10178 | |
10179 | static struct platform_driver niu_of_driver = { |
10180 | .driver = { |
10181 | .name = "niu" , |
10182 | .of_match_table = niu_match, |
10183 | }, |
10184 | .probe = niu_of_probe, |
10185 | .remove_new = niu_of_remove, |
10186 | }; |
10187 | |
10188 | #endif /* CONFIG_SPARC64 */ |
10189 | |
10190 | static int __init niu_init(void) |
10191 | { |
10192 | int err = 0; |
10193 | |
10194 | BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); |
10195 | |
10196 | BUILD_BUG_ON(offsetof(struct page, mapping) != |
10197 | offsetof(union niu_page, next)); |
10198 | |
10199 | niu_debug = netif_msg_init(debug_value: debug, NIU_MSG_DEFAULT); |
10200 | |
10201 | #ifdef CONFIG_SPARC64 |
10202 | err = platform_driver_register(&niu_of_driver); |
10203 | #endif |
10204 | |
10205 | if (!err) { |
10206 | err = pci_register_driver(&niu_pci_driver); |
10207 | #ifdef CONFIG_SPARC64 |
10208 | if (err) |
10209 | platform_driver_unregister(&niu_of_driver); |
10210 | #endif |
10211 | } |
10212 | |
10213 | return err; |
10214 | } |
10215 | |
10216 | static void __exit niu_exit(void) |
10217 | { |
10218 | pci_unregister_driver(dev: &niu_pci_driver); |
10219 | #ifdef CONFIG_SPARC64 |
10220 | platform_driver_unregister(&niu_of_driver); |
10221 | #endif |
10222 | } |
10223 | |
10224 | module_init(niu_init); |
10225 | module_exit(niu_exit); |
10226 | |