1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Driver for BCM963xx builtin Ethernet mac |
4 | * |
5 | * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> |
6 | */ |
7 | #include <linux/init.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/module.h> |
10 | #include <linux/clk.h> |
11 | #include <linux/etherdevice.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/ethtool.h> |
15 | #include <linux/crc32.h> |
16 | #include <linux/err.h> |
17 | #include <linux/dma-mapping.h> |
18 | #include <linux/platform_device.h> |
19 | #include <linux/if_vlan.h> |
20 | |
21 | #include <bcm63xx_dev_enet.h> |
22 | #include "bcm63xx_enet.h" |
23 | |
24 | static char bcm_enet_driver_name[] = "bcm63xx_enet" ; |
25 | |
26 | static int copybreak __read_mostly = 128; |
27 | module_param(copybreak, int, 0); |
28 | MODULE_PARM_DESC(copybreak, "Receive copy threshold" ); |
29 | |
30 | /* io registers memory shared between all devices */ |
31 | static void __iomem *bcm_enet_shared_base[3]; |
32 | |
33 | /* |
34 | * io helpers to access mac registers |
35 | */ |
36 | static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) |
37 | { |
38 | return bcm_readl(priv->base + off); |
39 | } |
40 | |
41 | static inline void enet_writel(struct bcm_enet_priv *priv, |
42 | u32 val, u32 off) |
43 | { |
44 | bcm_writel(val, priv->base + off); |
45 | } |
46 | |
47 | /* |
48 | * io helpers to access switch registers |
49 | */ |
50 | static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) |
51 | { |
52 | return bcm_readl(priv->base + off); |
53 | } |
54 | |
55 | static inline void enetsw_writel(struct bcm_enet_priv *priv, |
56 | u32 val, u32 off) |
57 | { |
58 | bcm_writel(val, priv->base + off); |
59 | } |
60 | |
61 | static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) |
62 | { |
63 | return bcm_readw(priv->base + off); |
64 | } |
65 | |
66 | static inline void enetsw_writew(struct bcm_enet_priv *priv, |
67 | u16 val, u32 off) |
68 | { |
69 | bcm_writew(val, priv->base + off); |
70 | } |
71 | |
72 | static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) |
73 | { |
74 | return bcm_readb(priv->base + off); |
75 | } |
76 | |
77 | static inline void enetsw_writeb(struct bcm_enet_priv *priv, |
78 | u8 val, u32 off) |
79 | { |
80 | bcm_writeb(val, priv->base + off); |
81 | } |
82 | |
83 | |
84 | /* io helpers to access shared registers */ |
85 | static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) |
86 | { |
87 | return bcm_readl(bcm_enet_shared_base[0] + off); |
88 | } |
89 | |
90 | static inline void enet_dma_writel(struct bcm_enet_priv *priv, |
91 | u32 val, u32 off) |
92 | { |
93 | bcm_writel(val, bcm_enet_shared_base[0] + off); |
94 | } |
95 | |
96 | static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) |
97 | { |
98 | return bcm_readl(bcm_enet_shared_base[1] + |
99 | bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); |
100 | } |
101 | |
102 | static inline void enet_dmac_writel(struct bcm_enet_priv *priv, |
103 | u32 val, u32 off, int chan) |
104 | { |
105 | bcm_writel(val, bcm_enet_shared_base[1] + |
106 | bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); |
107 | } |
108 | |
109 | static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) |
110 | { |
111 | return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); |
112 | } |
113 | |
114 | static inline void enet_dmas_writel(struct bcm_enet_priv *priv, |
115 | u32 val, u32 off, int chan) |
116 | { |
117 | bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); |
118 | } |
119 | |
120 | /* |
121 | * write given data into mii register and wait for transfer to end |
122 | * with timeout (average measured transfer time is 25us) |
123 | */ |
124 | static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) |
125 | { |
126 | int limit; |
127 | |
128 | /* make sure mii interrupt status is cleared */ |
129 | enet_writel(priv, val: ENET_IR_MII, off: ENET_IR_REG); |
130 | |
131 | enet_writel(priv, val: data, off: ENET_MIIDATA_REG); |
132 | wmb(); |
133 | |
134 | /* busy wait on mii interrupt bit, with timeout */ |
135 | limit = 1000; |
136 | do { |
137 | if (enet_readl(priv, off: ENET_IR_REG) & ENET_IR_MII) |
138 | break; |
139 | udelay(1); |
140 | } while (limit-- > 0); |
141 | |
142 | return (limit < 0) ? 1 : 0; |
143 | } |
144 | |
145 | /* |
146 | * MII internal read callback |
147 | */ |
148 | static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, |
149 | int regnum) |
150 | { |
151 | u32 tmp, val; |
152 | |
153 | tmp = regnum << ENET_MIIDATA_REG_SHIFT; |
154 | tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; |
155 | tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; |
156 | tmp |= ENET_MIIDATA_OP_READ_MASK; |
157 | |
158 | if (do_mdio_op(priv, data: tmp)) |
159 | return -1; |
160 | |
161 | val = enet_readl(priv, off: ENET_MIIDATA_REG); |
162 | val &= 0xffff; |
163 | return val; |
164 | } |
165 | |
166 | /* |
167 | * MII internal write callback |
168 | */ |
169 | static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, |
170 | int regnum, u16 value) |
171 | { |
172 | u32 tmp; |
173 | |
174 | tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; |
175 | tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; |
176 | tmp |= regnum << ENET_MIIDATA_REG_SHIFT; |
177 | tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; |
178 | tmp |= ENET_MIIDATA_OP_WRITE_MASK; |
179 | |
180 | (void)do_mdio_op(priv, data: tmp); |
181 | return 0; |
182 | } |
183 | |
184 | /* |
185 | * MII read callback from phylib |
186 | */ |
187 | static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, |
188 | int regnum) |
189 | { |
190 | return bcm_enet_mdio_read(priv: bus->priv, mii_id, regnum); |
191 | } |
192 | |
193 | /* |
194 | * MII write callback from phylib |
195 | */ |
196 | static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, |
197 | int regnum, u16 value) |
198 | { |
199 | return bcm_enet_mdio_write(priv: bus->priv, mii_id, regnum, value); |
200 | } |
201 | |
202 | /* |
203 | * MII read callback from mii core |
204 | */ |
205 | static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, |
206 | int regnum) |
207 | { |
208 | return bcm_enet_mdio_read(priv: netdev_priv(dev), mii_id, regnum); |
209 | } |
210 | |
211 | /* |
212 | * MII write callback from mii core |
213 | */ |
214 | static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, |
215 | int regnum, int value) |
216 | { |
217 | bcm_enet_mdio_write(priv: netdev_priv(dev), mii_id, regnum, value); |
218 | } |
219 | |
220 | /* |
221 | * refill rx queue |
222 | */ |
223 | static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode) |
224 | { |
225 | struct bcm_enet_priv *priv; |
226 | |
227 | priv = netdev_priv(dev); |
228 | |
229 | while (priv->rx_desc_count < priv->rx_ring_size) { |
230 | struct bcm_enet_desc *desc; |
231 | int desc_idx; |
232 | u32 len_stat; |
233 | |
234 | desc_idx = priv->rx_dirty_desc; |
235 | desc = &priv->rx_desc_cpu[desc_idx]; |
236 | |
237 | if (!priv->rx_buf[desc_idx]) { |
238 | void *buf; |
239 | |
240 | if (likely(napi_mode)) |
241 | buf = napi_alloc_frag(fragsz: priv->rx_frag_size); |
242 | else |
243 | buf = netdev_alloc_frag(fragsz: priv->rx_frag_size); |
244 | if (unlikely(!buf)) |
245 | break; |
246 | priv->rx_buf[desc_idx] = buf; |
247 | desc->address = dma_map_single(&priv->pdev->dev, |
248 | buf + priv->rx_buf_offset, |
249 | priv->rx_buf_size, |
250 | DMA_FROM_DEVICE); |
251 | } |
252 | |
253 | len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT; |
254 | len_stat |= DMADESC_OWNER_MASK; |
255 | if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { |
256 | len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); |
257 | priv->rx_dirty_desc = 0; |
258 | } else { |
259 | priv->rx_dirty_desc++; |
260 | } |
261 | wmb(); |
262 | desc->len_stat = len_stat; |
263 | |
264 | priv->rx_desc_count++; |
265 | |
266 | /* tell dma engine we allocated one buffer */ |
267 | if (priv->dma_has_sram) |
268 | enet_dma_writel(priv, val: 1, off: ENETDMA_BUFALLOC_REG(priv->rx_chan)); |
269 | else |
270 | enet_dmac_writel(priv, val: 1, off: ENETDMAC_BUFALLOC, chan: priv->rx_chan); |
271 | } |
272 | |
273 | /* If rx ring is still empty, set a timer to try allocating |
274 | * again at a later time. */ |
275 | if (priv->rx_desc_count == 0 && netif_running(dev)) { |
276 | dev_warn(&priv->pdev->dev, "unable to refill rx ring\n" ); |
277 | priv->rx_timeout.expires = jiffies + HZ; |
278 | add_timer(timer: &priv->rx_timeout); |
279 | } |
280 | |
281 | return 0; |
282 | } |
283 | |
284 | /* |
285 | * timer callback to defer refill rx queue in case we're OOM |
286 | */ |
287 | static void bcm_enet_refill_rx_timer(struct timer_list *t) |
288 | { |
289 | struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout); |
290 | struct net_device *dev = priv->net_dev; |
291 | |
292 | spin_lock(lock: &priv->rx_lock); |
293 | bcm_enet_refill_rx(dev, napi_mode: false); |
294 | spin_unlock(lock: &priv->rx_lock); |
295 | } |
296 | |
297 | /* |
298 | * extract packet from rx queue |
299 | */ |
300 | static int bcm_enet_receive_queue(struct net_device *dev, int budget) |
301 | { |
302 | struct bcm_enet_priv *priv; |
303 | struct list_head rx_list; |
304 | struct device *kdev; |
305 | int processed; |
306 | |
307 | priv = netdev_priv(dev); |
308 | INIT_LIST_HEAD(list: &rx_list); |
309 | kdev = &priv->pdev->dev; |
310 | processed = 0; |
311 | |
312 | /* don't scan ring further than number of refilled |
313 | * descriptor */ |
314 | if (budget > priv->rx_desc_count) |
315 | budget = priv->rx_desc_count; |
316 | |
317 | do { |
318 | struct bcm_enet_desc *desc; |
319 | struct sk_buff *skb; |
320 | int desc_idx; |
321 | u32 len_stat; |
322 | unsigned int len; |
323 | void *buf; |
324 | |
325 | desc_idx = priv->rx_curr_desc; |
326 | desc = &priv->rx_desc_cpu[desc_idx]; |
327 | |
328 | /* make sure we actually read the descriptor status at |
329 | * each loop */ |
330 | rmb(); |
331 | |
332 | len_stat = desc->len_stat; |
333 | |
334 | /* break if dma ownership belongs to hw */ |
335 | if (len_stat & DMADESC_OWNER_MASK) |
336 | break; |
337 | |
338 | processed++; |
339 | priv->rx_curr_desc++; |
340 | if (priv->rx_curr_desc == priv->rx_ring_size) |
341 | priv->rx_curr_desc = 0; |
342 | |
343 | /* if the packet does not have start of packet _and_ |
344 | * end of packet flag set, then just recycle it */ |
345 | if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != |
346 | (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { |
347 | dev->stats.rx_dropped++; |
348 | continue; |
349 | } |
350 | |
351 | /* recycle packet if it's marked as bad */ |
352 | if (!priv->enet_is_sw && |
353 | unlikely(len_stat & DMADESC_ERR_MASK)) { |
354 | dev->stats.rx_errors++; |
355 | |
356 | if (len_stat & DMADESC_OVSIZE_MASK) |
357 | dev->stats.rx_length_errors++; |
358 | if (len_stat & DMADESC_CRC_MASK) |
359 | dev->stats.rx_crc_errors++; |
360 | if (len_stat & DMADESC_UNDER_MASK) |
361 | dev->stats.rx_frame_errors++; |
362 | if (len_stat & DMADESC_OV_MASK) |
363 | dev->stats.rx_fifo_errors++; |
364 | continue; |
365 | } |
366 | |
367 | /* valid packet */ |
368 | buf = priv->rx_buf[desc_idx]; |
369 | len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; |
370 | /* don't include FCS */ |
371 | len -= 4; |
372 | |
373 | if (len < copybreak) { |
374 | skb = napi_alloc_skb(napi: &priv->napi, length: len); |
375 | if (unlikely(!skb)) { |
376 | /* forget packet, just rearm desc */ |
377 | dev->stats.rx_dropped++; |
378 | continue; |
379 | } |
380 | |
381 | dma_sync_single_for_cpu(dev: kdev, addr: desc->address, |
382 | size: len, dir: DMA_FROM_DEVICE); |
383 | memcpy(skb->data, buf + priv->rx_buf_offset, len); |
384 | dma_sync_single_for_device(dev: kdev, addr: desc->address, |
385 | size: len, dir: DMA_FROM_DEVICE); |
386 | } else { |
387 | dma_unmap_single(kdev, desc->address, |
388 | priv->rx_buf_size, DMA_FROM_DEVICE); |
389 | priv->rx_buf[desc_idx] = NULL; |
390 | |
391 | skb = napi_build_skb(data: buf, frag_size: priv->rx_frag_size); |
392 | if (unlikely(!skb)) { |
393 | skb_free_frag(addr: buf); |
394 | dev->stats.rx_dropped++; |
395 | continue; |
396 | } |
397 | skb_reserve(skb, len: priv->rx_buf_offset); |
398 | } |
399 | |
400 | skb_put(skb, len); |
401 | skb->protocol = eth_type_trans(skb, dev); |
402 | dev->stats.rx_packets++; |
403 | dev->stats.rx_bytes += len; |
404 | list_add_tail(new: &skb->list, head: &rx_list); |
405 | |
406 | } while (processed < budget); |
407 | |
408 | netif_receive_skb_list(head: &rx_list); |
409 | priv->rx_desc_count -= processed; |
410 | |
411 | if (processed || !priv->rx_desc_count) { |
412 | bcm_enet_refill_rx(dev, napi_mode: true); |
413 | |
414 | /* kick rx dma */ |
415 | enet_dmac_writel(priv, val: priv->dma_chan_en_mask, |
416 | off: ENETDMAC_CHANCFG, chan: priv->rx_chan); |
417 | } |
418 | |
419 | return processed; |
420 | } |
421 | |
422 | |
423 | /* |
424 | * try to or force reclaim of transmitted buffers |
425 | */ |
426 | static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget) |
427 | { |
428 | struct bcm_enet_priv *priv; |
429 | unsigned int bytes; |
430 | int released; |
431 | |
432 | priv = netdev_priv(dev); |
433 | bytes = 0; |
434 | released = 0; |
435 | |
436 | while (priv->tx_desc_count < priv->tx_ring_size) { |
437 | struct bcm_enet_desc *desc; |
438 | struct sk_buff *skb; |
439 | |
440 | /* We run in a bh and fight against start_xmit, which |
441 | * is called with bh disabled */ |
442 | spin_lock(lock: &priv->tx_lock); |
443 | |
444 | desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; |
445 | |
446 | if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { |
447 | spin_unlock(lock: &priv->tx_lock); |
448 | break; |
449 | } |
450 | |
451 | /* ensure other field of the descriptor were not read |
452 | * before we checked ownership */ |
453 | rmb(); |
454 | |
455 | skb = priv->tx_skb[priv->tx_dirty_desc]; |
456 | priv->tx_skb[priv->tx_dirty_desc] = NULL; |
457 | dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, |
458 | DMA_TO_DEVICE); |
459 | |
460 | priv->tx_dirty_desc++; |
461 | if (priv->tx_dirty_desc == priv->tx_ring_size) |
462 | priv->tx_dirty_desc = 0; |
463 | priv->tx_desc_count++; |
464 | |
465 | spin_unlock(lock: &priv->tx_lock); |
466 | |
467 | if (desc->len_stat & DMADESC_UNDER_MASK) |
468 | dev->stats.tx_errors++; |
469 | |
470 | bytes += skb->len; |
471 | napi_consume_skb(skb, budget); |
472 | released++; |
473 | } |
474 | |
475 | netdev_completed_queue(dev, pkts: released, bytes); |
476 | |
477 | if (netif_queue_stopped(dev) && released) |
478 | netif_wake_queue(dev); |
479 | |
480 | return released; |
481 | } |
482 | |
483 | /* |
484 | * poll func, called by network core |
485 | */ |
486 | static int bcm_enet_poll(struct napi_struct *napi, int budget) |
487 | { |
488 | struct bcm_enet_priv *priv; |
489 | struct net_device *dev; |
490 | int rx_work_done; |
491 | |
492 | priv = container_of(napi, struct bcm_enet_priv, napi); |
493 | dev = priv->net_dev; |
494 | |
495 | /* ack interrupts */ |
496 | enet_dmac_writel(priv, val: priv->dma_chan_int_mask, |
497 | off: ENETDMAC_IR, chan: priv->rx_chan); |
498 | enet_dmac_writel(priv, val: priv->dma_chan_int_mask, |
499 | off: ENETDMAC_IR, chan: priv->tx_chan); |
500 | |
501 | /* reclaim sent skb */ |
502 | bcm_enet_tx_reclaim(dev, force: 0, budget); |
503 | |
504 | spin_lock(lock: &priv->rx_lock); |
505 | rx_work_done = bcm_enet_receive_queue(dev, budget); |
506 | spin_unlock(lock: &priv->rx_lock); |
507 | |
508 | if (rx_work_done >= budget) { |
509 | /* rx queue is not yet empty/clean */ |
510 | return rx_work_done; |
511 | } |
512 | |
513 | /* no more packet in rx/tx queue, remove device from poll |
514 | * queue */ |
515 | napi_complete_done(n: napi, work_done: rx_work_done); |
516 | |
517 | /* restore rx/tx interrupt */ |
518 | enet_dmac_writel(priv, priv->dma_chan_int_mask, |
519 | ENETDMAC_IRMASK, priv->rx_chan); |
520 | enet_dmac_writel(priv, priv->dma_chan_int_mask, |
521 | ENETDMAC_IRMASK, priv->tx_chan); |
522 | |
523 | return rx_work_done; |
524 | } |
525 | |
526 | /* |
527 | * mac interrupt handler |
528 | */ |
529 | static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) |
530 | { |
531 | struct net_device *dev; |
532 | struct bcm_enet_priv *priv; |
533 | u32 stat; |
534 | |
535 | dev = dev_id; |
536 | priv = netdev_priv(dev); |
537 | |
538 | stat = enet_readl(priv, ENET_IR_REG); |
539 | if (!(stat & ENET_IR_MIB)) |
540 | return IRQ_NONE; |
541 | |
542 | /* clear & mask interrupt */ |
543 | enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); |
544 | enet_writel(priv, 0, ENET_IRMASK_REG); |
545 | |
546 | /* read mib registers in workqueue */ |
547 | schedule_work(work: &priv->mib_update_task); |
548 | |
549 | return IRQ_HANDLED; |
550 | } |
551 | |
552 | /* |
553 | * rx/tx dma interrupt handler |
554 | */ |
555 | static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) |
556 | { |
557 | struct net_device *dev; |
558 | struct bcm_enet_priv *priv; |
559 | |
560 | dev = dev_id; |
561 | priv = netdev_priv(dev); |
562 | |
563 | /* mask rx/tx interrupts */ |
564 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); |
565 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); |
566 | |
567 | napi_schedule(n: &priv->napi); |
568 | |
569 | return IRQ_HANDLED; |
570 | } |
571 | |
572 | /* |
573 | * tx request callback |
574 | */ |
575 | static netdev_tx_t |
576 | bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
577 | { |
578 | struct bcm_enet_priv *priv; |
579 | struct bcm_enet_desc *desc; |
580 | u32 len_stat; |
581 | netdev_tx_t ret; |
582 | |
583 | priv = netdev_priv(dev); |
584 | |
585 | /* lock against tx reclaim */ |
586 | spin_lock(lock: &priv->tx_lock); |
587 | |
588 | /* make sure the tx hw queue is not full, should not happen |
589 | * since we stop queue before it's the case */ |
590 | if (unlikely(!priv->tx_desc_count)) { |
591 | netif_stop_queue(dev); |
592 | dev_err(&priv->pdev->dev, "xmit called with no tx desc " |
593 | "available?\n" ); |
594 | ret = NETDEV_TX_BUSY; |
595 | goto out_unlock; |
596 | } |
597 | |
598 | /* pad small packets sent on a switch device */ |
599 | if (priv->enet_is_sw && skb->len < 64) { |
600 | int needed = 64 - skb->len; |
601 | char *data; |
602 | |
603 | if (unlikely(skb_tailroom(skb) < needed)) { |
604 | struct sk_buff *nskb; |
605 | |
606 | nskb = skb_copy_expand(skb, newheadroom: 0, newtailroom: needed, GFP_ATOMIC); |
607 | if (!nskb) { |
608 | ret = NETDEV_TX_BUSY; |
609 | goto out_unlock; |
610 | } |
611 | dev_kfree_skb(skb); |
612 | skb = nskb; |
613 | } |
614 | data = skb_put_zero(skb, len: needed); |
615 | } |
616 | |
617 | /* point to the next available desc */ |
618 | desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; |
619 | priv->tx_skb[priv->tx_curr_desc] = skb; |
620 | |
621 | /* fill descriptor */ |
622 | desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, |
623 | DMA_TO_DEVICE); |
624 | |
625 | len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; |
626 | len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | |
627 | DMADESC_APPEND_CRC | |
628 | DMADESC_OWNER_MASK; |
629 | |
630 | priv->tx_curr_desc++; |
631 | if (priv->tx_curr_desc == priv->tx_ring_size) { |
632 | priv->tx_curr_desc = 0; |
633 | len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); |
634 | } |
635 | priv->tx_desc_count--; |
636 | |
637 | /* dma might be already polling, make sure we update desc |
638 | * fields in correct order */ |
639 | wmb(); |
640 | desc->len_stat = len_stat; |
641 | wmb(); |
642 | |
643 | netdev_sent_queue(dev, bytes: skb->len); |
644 | |
645 | /* kick tx dma */ |
646 | if (!netdev_xmit_more() || !priv->tx_desc_count) |
647 | enet_dmac_writel(priv, priv->dma_chan_en_mask, |
648 | ENETDMAC_CHANCFG, priv->tx_chan); |
649 | |
650 | /* stop queue if no more desc available */ |
651 | if (!priv->tx_desc_count) |
652 | netif_stop_queue(dev); |
653 | |
654 | dev->stats.tx_bytes += skb->len; |
655 | dev->stats.tx_packets++; |
656 | ret = NETDEV_TX_OK; |
657 | |
658 | out_unlock: |
659 | spin_unlock(lock: &priv->tx_lock); |
660 | return ret; |
661 | } |
662 | |
663 | /* |
664 | * Change the interface's mac address. |
665 | */ |
666 | static int bcm_enet_set_mac_address(struct net_device *dev, void *p) |
667 | { |
668 | struct bcm_enet_priv *priv; |
669 | struct sockaddr *addr = p; |
670 | u32 val; |
671 | |
672 | priv = netdev_priv(dev); |
673 | eth_hw_addr_set(dev, addr: addr->sa_data); |
674 | |
675 | /* use perfect match register 0 to store my mac address */ |
676 | val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | |
677 | (dev->dev_addr[4] << 8) | dev->dev_addr[5]; |
678 | enet_writel(priv, val, off: ENET_PML_REG(0)); |
679 | |
680 | val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); |
681 | val |= ENET_PMH_DATAVALID_MASK; |
682 | enet_writel(priv, val, off: ENET_PMH_REG(0)); |
683 | |
684 | return 0; |
685 | } |
686 | |
687 | /* |
688 | * Change rx mode (promiscuous/allmulti) and update multicast list |
689 | */ |
690 | static void bcm_enet_set_multicast_list(struct net_device *dev) |
691 | { |
692 | struct bcm_enet_priv *priv; |
693 | struct netdev_hw_addr *ha; |
694 | u32 val; |
695 | int i; |
696 | |
697 | priv = netdev_priv(dev); |
698 | |
699 | val = enet_readl(priv, ENET_RXCFG_REG); |
700 | |
701 | if (dev->flags & IFF_PROMISC) |
702 | val |= ENET_RXCFG_PROMISC_MASK; |
703 | else |
704 | val &= ~ENET_RXCFG_PROMISC_MASK; |
705 | |
706 | /* only 3 perfect match registers left, first one is used for |
707 | * own mac address */ |
708 | if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) |
709 | val |= ENET_RXCFG_ALLMCAST_MASK; |
710 | else |
711 | val &= ~ENET_RXCFG_ALLMCAST_MASK; |
712 | |
713 | /* no need to set perfect match registers if we catch all |
714 | * multicast */ |
715 | if (val & ENET_RXCFG_ALLMCAST_MASK) { |
716 | enet_writel(priv, val, ENET_RXCFG_REG); |
717 | return; |
718 | } |
719 | |
720 | i = 0; |
721 | netdev_for_each_mc_addr(ha, dev) { |
722 | u8 *dmi_addr; |
723 | u32 tmp; |
724 | |
725 | if (i == 3) |
726 | break; |
727 | /* update perfect match registers */ |
728 | dmi_addr = ha->addr; |
729 | tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | |
730 | (dmi_addr[4] << 8) | dmi_addr[5]; |
731 | enet_writel(priv, val: tmp, off: ENET_PML_REG(i + 1)); |
732 | |
733 | tmp = (dmi_addr[0] << 8 | dmi_addr[1]); |
734 | tmp |= ENET_PMH_DATAVALID_MASK; |
735 | enet_writel(priv, val: tmp, off: ENET_PMH_REG(i++ + 1)); |
736 | } |
737 | |
738 | for (; i < 3; i++) { |
739 | enet_writel(priv, val: 0, off: ENET_PML_REG(i + 1)); |
740 | enet_writel(priv, val: 0, off: ENET_PMH_REG(i + 1)); |
741 | } |
742 | |
743 | enet_writel(priv, val, ENET_RXCFG_REG); |
744 | } |
745 | |
746 | /* |
747 | * set mac duplex parameters |
748 | */ |
749 | static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) |
750 | { |
751 | u32 val; |
752 | |
753 | val = enet_readl(priv, ENET_TXCTL_REG); |
754 | if (fullduplex) |
755 | val |= ENET_TXCTL_FD_MASK; |
756 | else |
757 | val &= ~ENET_TXCTL_FD_MASK; |
758 | enet_writel(priv, val, ENET_TXCTL_REG); |
759 | } |
760 | |
761 | /* |
762 | * set mac flow control parameters |
763 | */ |
764 | static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) |
765 | { |
766 | u32 val; |
767 | |
768 | /* rx flow control (pause frame handling) */ |
769 | val = enet_readl(priv, ENET_RXCFG_REG); |
770 | if (rx_en) |
771 | val |= ENET_RXCFG_ENFLOW_MASK; |
772 | else |
773 | val &= ~ENET_RXCFG_ENFLOW_MASK; |
774 | enet_writel(priv, val, ENET_RXCFG_REG); |
775 | |
776 | if (!priv->dma_has_sram) |
777 | return; |
778 | |
779 | /* tx flow control (pause frame generation) */ |
780 | val = enet_dma_readl(priv, ENETDMA_CFG_REG); |
781 | if (tx_en) |
782 | val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); |
783 | else |
784 | val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); |
785 | enet_dma_writel(priv, val, ENETDMA_CFG_REG); |
786 | } |
787 | |
788 | /* |
789 | * link changed callback (from phylib) |
790 | */ |
791 | static void bcm_enet_adjust_phy_link(struct net_device *dev) |
792 | { |
793 | struct bcm_enet_priv *priv; |
794 | struct phy_device *phydev; |
795 | int status_changed; |
796 | |
797 | priv = netdev_priv(dev); |
798 | phydev = dev->phydev; |
799 | status_changed = 0; |
800 | |
801 | if (priv->old_link != phydev->link) { |
802 | status_changed = 1; |
803 | priv->old_link = phydev->link; |
804 | } |
805 | |
806 | /* reflect duplex change in mac configuration */ |
807 | if (phydev->link && phydev->duplex != priv->old_duplex) { |
808 | bcm_enet_set_duplex(priv, |
809 | fullduplex: (phydev->duplex == DUPLEX_FULL) ? 1 : 0); |
810 | status_changed = 1; |
811 | priv->old_duplex = phydev->duplex; |
812 | } |
813 | |
814 | /* enable flow control if remote advertise it (trust phylib to |
815 | * check that duplex is full */ |
816 | if (phydev->link && phydev->pause != priv->old_pause) { |
817 | int rx_pause_en, tx_pause_en; |
818 | |
819 | if (phydev->pause) { |
820 | /* pause was advertised by lpa and us */ |
821 | rx_pause_en = 1; |
822 | tx_pause_en = 1; |
823 | } else if (!priv->pause_auto) { |
824 | /* pause setting overridden by user */ |
825 | rx_pause_en = priv->pause_rx; |
826 | tx_pause_en = priv->pause_tx; |
827 | } else { |
828 | rx_pause_en = 0; |
829 | tx_pause_en = 0; |
830 | } |
831 | |
832 | bcm_enet_set_flow(priv, rx_en: rx_pause_en, tx_en: tx_pause_en); |
833 | status_changed = 1; |
834 | priv->old_pause = phydev->pause; |
835 | } |
836 | |
837 | if (status_changed) { |
838 | pr_info("%s: link %s" , dev->name, phydev->link ? |
839 | "UP" : "DOWN" ); |
840 | if (phydev->link) |
841 | pr_cont(" - %d/%s - flow control %s" , phydev->speed, |
842 | DUPLEX_FULL == phydev->duplex ? "full" : "half" , |
843 | phydev->pause == 1 ? "rx&tx" : "off" ); |
844 | |
845 | pr_cont("\n" ); |
846 | } |
847 | } |
848 | |
849 | /* |
850 | * link changed callback (if phylib is not used) |
851 | */ |
852 | static void bcm_enet_adjust_link(struct net_device *dev) |
853 | { |
854 | struct bcm_enet_priv *priv; |
855 | |
856 | priv = netdev_priv(dev); |
857 | bcm_enet_set_duplex(priv, fullduplex: priv->force_duplex_full); |
858 | bcm_enet_set_flow(priv, rx_en: priv->pause_rx, tx_en: priv->pause_tx); |
859 | netif_carrier_on(dev); |
860 | |
861 | pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n" , |
862 | dev->name, |
863 | priv->force_speed_100 ? 100 : 10, |
864 | priv->force_duplex_full ? "full" : "half" , |
865 | priv->pause_rx ? "rx" : "off" , |
866 | priv->pause_tx ? "tx" : "off" ); |
867 | } |
868 | |
869 | static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv) |
870 | { |
871 | int i; |
872 | |
873 | for (i = 0; i < priv->rx_ring_size; i++) { |
874 | struct bcm_enet_desc *desc; |
875 | |
876 | if (!priv->rx_buf[i]) |
877 | continue; |
878 | |
879 | desc = &priv->rx_desc_cpu[i]; |
880 | dma_unmap_single(kdev, desc->address, priv->rx_buf_size, |
881 | DMA_FROM_DEVICE); |
882 | skb_free_frag(addr: priv->rx_buf[i]); |
883 | } |
884 | kfree(objp: priv->rx_buf); |
885 | } |
886 | |
887 | /* |
888 | * open callback, allocate dma rings & buffers and start rx operation |
889 | */ |
890 | static int bcm_enet_open(struct net_device *dev) |
891 | { |
892 | struct bcm_enet_priv *priv; |
893 | struct sockaddr addr; |
894 | struct device *kdev; |
895 | struct phy_device *phydev; |
896 | int i, ret; |
897 | unsigned int size; |
898 | char phy_id[MII_BUS_ID_SIZE + 3]; |
899 | void *p; |
900 | u32 val; |
901 | |
902 | priv = netdev_priv(dev); |
903 | kdev = &priv->pdev->dev; |
904 | |
905 | if (priv->has_phy) { |
906 | /* connect to PHY */ |
907 | snprintf(buf: phy_id, size: sizeof(phy_id), PHY_ID_FMT, |
908 | priv->mii_bus->id, priv->phy_id); |
909 | |
910 | phydev = phy_connect(dev, bus_id: phy_id, handler: bcm_enet_adjust_phy_link, |
911 | interface: PHY_INTERFACE_MODE_MII); |
912 | |
913 | if (IS_ERR(ptr: phydev)) { |
914 | dev_err(kdev, "could not attach to PHY\n" ); |
915 | return PTR_ERR(ptr: phydev); |
916 | } |
917 | |
918 | /* mask with MAC supported features */ |
919 | phy_support_sym_pause(phydev); |
920 | phy_set_max_speed(phydev, SPEED_100); |
921 | phy_set_sym_pause(phydev, rx: priv->pause_rx, tx: priv->pause_rx, |
922 | autoneg: priv->pause_auto); |
923 | |
924 | phy_attached_info(phydev); |
925 | |
926 | priv->old_link = 0; |
927 | priv->old_duplex = -1; |
928 | priv->old_pause = -1; |
929 | } else { |
930 | phydev = NULL; |
931 | } |
932 | |
933 | /* mask all interrupts and request them */ |
934 | enet_writel(priv, 0, ENET_IRMASK_REG); |
935 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); |
936 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); |
937 | |
938 | ret = request_irq(irq: dev->irq, handler: bcm_enet_isr_mac, flags: 0, name: dev->name, dev); |
939 | if (ret) |
940 | goto out_phy_disconnect; |
941 | |
942 | ret = request_irq(irq: priv->irq_rx, handler: bcm_enet_isr_dma, flags: 0, |
943 | name: dev->name, dev); |
944 | if (ret) |
945 | goto out_freeirq; |
946 | |
947 | ret = request_irq(irq: priv->irq_tx, handler: bcm_enet_isr_dma, |
948 | flags: 0, name: dev->name, dev); |
949 | if (ret) |
950 | goto out_freeirq_rx; |
951 | |
952 | /* initialize perfect match registers */ |
953 | for (i = 0; i < 4; i++) { |
954 | enet_writel(priv, val: 0, off: ENET_PML_REG(i)); |
955 | enet_writel(priv, val: 0, off: ENET_PMH_REG(i)); |
956 | } |
957 | |
958 | /* write device mac address */ |
959 | memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); |
960 | bcm_enet_set_mac_address(dev, p: &addr); |
961 | |
962 | /* allocate rx dma ring */ |
963 | size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); |
964 | p = dma_alloc_coherent(dev: kdev, size, dma_handle: &priv->rx_desc_dma, GFP_KERNEL); |
965 | if (!p) { |
966 | ret = -ENOMEM; |
967 | goto out_freeirq_tx; |
968 | } |
969 | |
970 | priv->rx_desc_alloc_size = size; |
971 | priv->rx_desc_cpu = p; |
972 | |
973 | /* allocate tx dma ring */ |
974 | size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); |
975 | p = dma_alloc_coherent(dev: kdev, size, dma_handle: &priv->tx_desc_dma, GFP_KERNEL); |
976 | if (!p) { |
977 | ret = -ENOMEM; |
978 | goto out_free_rx_ring; |
979 | } |
980 | |
981 | priv->tx_desc_alloc_size = size; |
982 | priv->tx_desc_cpu = p; |
983 | |
984 | priv->tx_skb = kcalloc(n: priv->tx_ring_size, size: sizeof(struct sk_buff *), |
985 | GFP_KERNEL); |
986 | if (!priv->tx_skb) { |
987 | ret = -ENOMEM; |
988 | goto out_free_tx_ring; |
989 | } |
990 | |
991 | priv->tx_desc_count = priv->tx_ring_size; |
992 | priv->tx_dirty_desc = 0; |
993 | priv->tx_curr_desc = 0; |
994 | spin_lock_init(&priv->tx_lock); |
995 | |
996 | /* init & fill rx ring with buffers */ |
997 | priv->rx_buf = kcalloc(n: priv->rx_ring_size, size: sizeof(void *), |
998 | GFP_KERNEL); |
999 | if (!priv->rx_buf) { |
1000 | ret = -ENOMEM; |
1001 | goto out_free_tx_skb; |
1002 | } |
1003 | |
1004 | priv->rx_desc_count = 0; |
1005 | priv->rx_dirty_desc = 0; |
1006 | priv->rx_curr_desc = 0; |
1007 | |
1008 | /* initialize flow control buffer allocation */ |
1009 | if (priv->dma_has_sram) |
1010 | enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, |
1011 | ENETDMA_BUFALLOC_REG(priv->rx_chan)); |
1012 | else |
1013 | enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, |
1014 | ENETDMAC_BUFALLOC, priv->rx_chan); |
1015 | |
1016 | if (bcm_enet_refill_rx(dev, napi_mode: false)) { |
1017 | dev_err(kdev, "cannot allocate rx buffer queue\n" ); |
1018 | ret = -ENOMEM; |
1019 | goto out; |
1020 | } |
1021 | |
1022 | /* write rx & tx ring addresses */ |
1023 | if (priv->dma_has_sram) { |
1024 | enet_dmas_writel(priv, priv->rx_desc_dma, |
1025 | ENETDMAS_RSTART_REG, priv->rx_chan); |
1026 | enet_dmas_writel(priv, priv->tx_desc_dma, |
1027 | ENETDMAS_RSTART_REG, priv->tx_chan); |
1028 | } else { |
1029 | enet_dmac_writel(priv, priv->rx_desc_dma, |
1030 | ENETDMAC_RSTART, priv->rx_chan); |
1031 | enet_dmac_writel(priv, priv->tx_desc_dma, |
1032 | ENETDMAC_RSTART, priv->tx_chan); |
1033 | } |
1034 | |
1035 | /* clear remaining state ram for rx & tx channel */ |
1036 | if (priv->dma_has_sram) { |
1037 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); |
1038 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); |
1039 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); |
1040 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); |
1041 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); |
1042 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); |
1043 | } else { |
1044 | enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); |
1045 | enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); |
1046 | } |
1047 | |
1048 | /* set max rx/tx length */ |
1049 | enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); |
1050 | enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); |
1051 | |
1052 | /* set dma maximum burst len */ |
1053 | enet_dmac_writel(priv, priv->dma_maxburst, |
1054 | ENETDMAC_MAXBURST, priv->rx_chan); |
1055 | enet_dmac_writel(priv, priv->dma_maxburst, |
1056 | ENETDMAC_MAXBURST, priv->tx_chan); |
1057 | |
1058 | /* set correct transmit fifo watermark */ |
1059 | enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); |
1060 | |
1061 | /* set flow control low/high threshold to 1/3 / 2/3 */ |
1062 | if (priv->dma_has_sram) { |
1063 | val = priv->rx_ring_size / 3; |
1064 | enet_dma_writel(priv, val, off: ENETDMA_FLOWCL_REG(priv->rx_chan)); |
1065 | val = (priv->rx_ring_size * 2) / 3; |
1066 | enet_dma_writel(priv, val, off: ENETDMA_FLOWCH_REG(priv->rx_chan)); |
1067 | } else { |
1068 | enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); |
1069 | enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); |
1070 | enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); |
1071 | } |
1072 | |
1073 | /* all set, enable mac and interrupts, start dma engine and |
1074 | * kick rx dma channel */ |
1075 | wmb(); |
1076 | val = enet_readl(priv, ENET_CTL_REG); |
1077 | val |= ENET_CTL_ENABLE_MASK; |
1078 | enet_writel(priv, val, ENET_CTL_REG); |
1079 | if (priv->dma_has_sram) |
1080 | enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); |
1081 | enet_dmac_writel(priv, priv->dma_chan_en_mask, |
1082 | ENETDMAC_CHANCFG, priv->rx_chan); |
1083 | |
1084 | /* watch "mib counters about to overflow" interrupt */ |
1085 | enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); |
1086 | enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); |
1087 | |
1088 | /* watch "packet transferred" interrupt in rx and tx */ |
1089 | enet_dmac_writel(priv, priv->dma_chan_int_mask, |
1090 | ENETDMAC_IR, priv->rx_chan); |
1091 | enet_dmac_writel(priv, priv->dma_chan_int_mask, |
1092 | ENETDMAC_IR, priv->tx_chan); |
1093 | |
1094 | /* make sure we enable napi before rx interrupt */ |
1095 | napi_enable(n: &priv->napi); |
1096 | |
1097 | enet_dmac_writel(priv, priv->dma_chan_int_mask, |
1098 | ENETDMAC_IRMASK, priv->rx_chan); |
1099 | enet_dmac_writel(priv, priv->dma_chan_int_mask, |
1100 | ENETDMAC_IRMASK, priv->tx_chan); |
1101 | |
1102 | if (phydev) |
1103 | phy_start(phydev); |
1104 | else |
1105 | bcm_enet_adjust_link(dev); |
1106 | |
1107 | netif_start_queue(dev); |
1108 | return 0; |
1109 | |
1110 | out: |
1111 | bcm_enet_free_rx_buf_ring(kdev, priv); |
1112 | |
1113 | out_free_tx_skb: |
1114 | kfree(objp: priv->tx_skb); |
1115 | |
1116 | out_free_tx_ring: |
1117 | dma_free_coherent(dev: kdev, size: priv->tx_desc_alloc_size, |
1118 | cpu_addr: priv->tx_desc_cpu, dma_handle: priv->tx_desc_dma); |
1119 | |
1120 | out_free_rx_ring: |
1121 | dma_free_coherent(dev: kdev, size: priv->rx_desc_alloc_size, |
1122 | cpu_addr: priv->rx_desc_cpu, dma_handle: priv->rx_desc_dma); |
1123 | |
1124 | out_freeirq_tx: |
1125 | free_irq(priv->irq_tx, dev); |
1126 | |
1127 | out_freeirq_rx: |
1128 | free_irq(priv->irq_rx, dev); |
1129 | |
1130 | out_freeirq: |
1131 | free_irq(dev->irq, dev); |
1132 | |
1133 | out_phy_disconnect: |
1134 | if (phydev) |
1135 | phy_disconnect(phydev); |
1136 | |
1137 | return ret; |
1138 | } |
1139 | |
1140 | /* |
1141 | * disable mac |
1142 | */ |
1143 | static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) |
1144 | { |
1145 | int limit; |
1146 | u32 val; |
1147 | |
1148 | val = enet_readl(priv, ENET_CTL_REG); |
1149 | val |= ENET_CTL_DISABLE_MASK; |
1150 | enet_writel(priv, val, ENET_CTL_REG); |
1151 | |
1152 | limit = 1000; |
1153 | do { |
1154 | u32 val; |
1155 | |
1156 | val = enet_readl(priv, ENET_CTL_REG); |
1157 | if (!(val & ENET_CTL_DISABLE_MASK)) |
1158 | break; |
1159 | udelay(1); |
1160 | } while (limit--); |
1161 | } |
1162 | |
1163 | /* |
1164 | * disable dma in given channel |
1165 | */ |
1166 | static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) |
1167 | { |
1168 | int limit; |
1169 | |
1170 | enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); |
1171 | |
1172 | limit = 1000; |
1173 | do { |
1174 | u32 val; |
1175 | |
1176 | val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); |
1177 | if (!(val & ENETDMAC_CHANCFG_EN_MASK)) |
1178 | break; |
1179 | udelay(1); |
1180 | } while (limit--); |
1181 | } |
1182 | |
1183 | /* |
1184 | * stop callback |
1185 | */ |
1186 | static int bcm_enet_stop(struct net_device *dev) |
1187 | { |
1188 | struct bcm_enet_priv *priv; |
1189 | struct device *kdev; |
1190 | |
1191 | priv = netdev_priv(dev); |
1192 | kdev = &priv->pdev->dev; |
1193 | |
1194 | netif_stop_queue(dev); |
1195 | napi_disable(n: &priv->napi); |
1196 | if (priv->has_phy) |
1197 | phy_stop(phydev: dev->phydev); |
1198 | del_timer_sync(timer: &priv->rx_timeout); |
1199 | |
1200 | /* mask all interrupts */ |
1201 | enet_writel(priv, 0, ENET_IRMASK_REG); |
1202 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); |
1203 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); |
1204 | |
1205 | /* make sure no mib update is scheduled */ |
1206 | cancel_work_sync(work: &priv->mib_update_task); |
1207 | |
1208 | /* disable dma & mac */ |
1209 | bcm_enet_disable_dma(priv, chan: priv->tx_chan); |
1210 | bcm_enet_disable_dma(priv, chan: priv->rx_chan); |
1211 | bcm_enet_disable_mac(priv); |
1212 | |
1213 | /* force reclaim of all tx buffers */ |
1214 | bcm_enet_tx_reclaim(dev, force: 1, budget: 0); |
1215 | |
1216 | /* free the rx buffer ring */ |
1217 | bcm_enet_free_rx_buf_ring(kdev, priv); |
1218 | |
1219 | /* free remaining allocated memory */ |
1220 | kfree(objp: priv->tx_skb); |
1221 | dma_free_coherent(dev: kdev, size: priv->rx_desc_alloc_size, |
1222 | cpu_addr: priv->rx_desc_cpu, dma_handle: priv->rx_desc_dma); |
1223 | dma_free_coherent(dev: kdev, size: priv->tx_desc_alloc_size, |
1224 | cpu_addr: priv->tx_desc_cpu, dma_handle: priv->tx_desc_dma); |
1225 | free_irq(priv->irq_tx, dev); |
1226 | free_irq(priv->irq_rx, dev); |
1227 | free_irq(dev->irq, dev); |
1228 | |
1229 | /* release phy */ |
1230 | if (priv->has_phy) |
1231 | phy_disconnect(phydev: dev->phydev); |
1232 | |
1233 | /* reset BQL after forced tx reclaim to prevent kernel panic */ |
1234 | netdev_reset_queue(dev_queue: dev); |
1235 | |
1236 | return 0; |
1237 | } |
1238 | |
1239 | /* |
1240 | * ethtool callbacks |
1241 | */ |
1242 | struct bcm_enet_stats { |
1243 | char stat_string[ETH_GSTRING_LEN]; |
1244 | int sizeof_stat; |
1245 | int stat_offset; |
1246 | int mib_reg; |
1247 | }; |
1248 | |
1249 | #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ |
1250 | offsetof(struct bcm_enet_priv, m) |
1251 | #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ |
1252 | offsetof(struct net_device_stats, m) |
1253 | |
1254 | static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { |
1255 | { "rx_packets" , DEV_STAT(rx_packets), -1 }, |
1256 | { "tx_packets" , DEV_STAT(tx_packets), -1 }, |
1257 | { "rx_bytes" , DEV_STAT(rx_bytes), -1 }, |
1258 | { "tx_bytes" , DEV_STAT(tx_bytes), -1 }, |
1259 | { "rx_errors" , DEV_STAT(rx_errors), -1 }, |
1260 | { "tx_errors" , DEV_STAT(tx_errors), -1 }, |
1261 | { "rx_dropped" , DEV_STAT(rx_dropped), -1 }, |
1262 | { "tx_dropped" , DEV_STAT(tx_dropped), -1 }, |
1263 | |
1264 | { "rx_good_octets" , GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, |
1265 | { "rx_good_pkts" , GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, |
1266 | { "rx_broadcast" , GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, |
1267 | { "rx_multicast" , GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, |
1268 | { "rx_64_octets" , GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, |
1269 | { "rx_65_127_oct" , GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, |
1270 | { "rx_128_255_oct" , GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, |
1271 | { "rx_256_511_oct" , GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, |
1272 | { "rx_512_1023_oct" , GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, |
1273 | { "rx_1024_max_oct" , GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, |
1274 | { "rx_jabber" , GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, |
1275 | { "rx_oversize" , GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, |
1276 | { "rx_fragment" , GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, |
1277 | { "rx_dropped" , GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, |
1278 | { "rx_crc_align" , GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, |
1279 | { "rx_undersize" , GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, |
1280 | { "rx_crc" , GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, |
1281 | { "rx_align" , GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, |
1282 | { "rx_symbol_error" , GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, |
1283 | { "rx_pause" , GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, |
1284 | { "rx_control" , GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, |
1285 | |
1286 | { "tx_good_octets" , GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, |
1287 | { "tx_good_pkts" , GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, |
1288 | { "tx_broadcast" , GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, |
1289 | { "tx_multicast" , GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, |
1290 | { "tx_64_oct" , GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, |
1291 | { "tx_65_127_oct" , GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, |
1292 | { "tx_128_255_oct" , GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, |
1293 | { "tx_256_511_oct" , GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, |
1294 | { "tx_512_1023_oct" , GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, |
1295 | { "tx_1024_max_oct" , GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, |
1296 | { "tx_jabber" , GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, |
1297 | { "tx_oversize" , GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, |
1298 | { "tx_fragment" , GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, |
1299 | { "tx_underrun" , GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, |
1300 | { "tx_collisions" , GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, |
1301 | { "tx_single_collision" , GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, |
1302 | { "tx_multiple_collision" , GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, |
1303 | { "tx_excess_collision" , GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, |
1304 | { "tx_late_collision" , GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, |
1305 | { "tx_deferred" , GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, |
1306 | { "tx_carrier_sense" , GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, |
1307 | { "tx_pause" , GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, |
1308 | |
1309 | }; |
1310 | |
1311 | #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) |
1312 | |
1313 | static const u32 unused_mib_regs[] = { |
1314 | ETH_MIB_TX_ALL_OCTETS, |
1315 | ETH_MIB_TX_ALL_PKTS, |
1316 | ETH_MIB_RX_ALL_OCTETS, |
1317 | ETH_MIB_RX_ALL_PKTS, |
1318 | }; |
1319 | |
1320 | |
1321 | static void bcm_enet_get_drvinfo(struct net_device *netdev, |
1322 | struct ethtool_drvinfo *drvinfo) |
1323 | { |
1324 | strscpy(p: drvinfo->driver, q: bcm_enet_driver_name, size: sizeof(drvinfo->driver)); |
1325 | strscpy(p: drvinfo->bus_info, q: "bcm63xx" , size: sizeof(drvinfo->bus_info)); |
1326 | } |
1327 | |
1328 | static int bcm_enet_get_sset_count(struct net_device *netdev, |
1329 | int string_set) |
1330 | { |
1331 | switch (string_set) { |
1332 | case ETH_SS_STATS: |
1333 | return BCM_ENET_STATS_LEN; |
1334 | default: |
1335 | return -EINVAL; |
1336 | } |
1337 | } |
1338 | |
1339 | static void bcm_enet_get_strings(struct net_device *netdev, |
1340 | u32 stringset, u8 *data) |
1341 | { |
1342 | int i; |
1343 | |
1344 | switch (stringset) { |
1345 | case ETH_SS_STATS: |
1346 | for (i = 0; i < BCM_ENET_STATS_LEN; i++) { |
1347 | memcpy(data + i * ETH_GSTRING_LEN, |
1348 | bcm_enet_gstrings_stats[i].stat_string, |
1349 | ETH_GSTRING_LEN); |
1350 | } |
1351 | break; |
1352 | } |
1353 | } |
1354 | |
1355 | static void update_mib_counters(struct bcm_enet_priv *priv) |
1356 | { |
1357 | int i; |
1358 | |
1359 | for (i = 0; i < BCM_ENET_STATS_LEN; i++) { |
1360 | const struct bcm_enet_stats *s; |
1361 | u32 val; |
1362 | char *p; |
1363 | |
1364 | s = &bcm_enet_gstrings_stats[i]; |
1365 | if (s->mib_reg == -1) |
1366 | continue; |
1367 | |
1368 | val = enet_readl(priv, off: ENET_MIB_REG(s->mib_reg)); |
1369 | p = (char *)priv + s->stat_offset; |
1370 | |
1371 | if (s->sizeof_stat == sizeof(u64)) |
1372 | *(u64 *)p += val; |
1373 | else |
1374 | *(u32 *)p += val; |
1375 | } |
1376 | |
1377 | /* also empty unused mib counters to make sure mib counter |
1378 | * overflow interrupt is cleared */ |
1379 | for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) |
1380 | (void)enet_readl(priv, off: ENET_MIB_REG(unused_mib_regs[i])); |
1381 | } |
1382 | |
1383 | static void bcm_enet_update_mib_counters_defer(struct work_struct *t) |
1384 | { |
1385 | struct bcm_enet_priv *priv; |
1386 | |
1387 | priv = container_of(t, struct bcm_enet_priv, mib_update_task); |
1388 | mutex_lock(&priv->mib_update_lock); |
1389 | update_mib_counters(priv); |
1390 | mutex_unlock(lock: &priv->mib_update_lock); |
1391 | |
1392 | /* reenable mib interrupt */ |
1393 | if (netif_running(priv->net_dev)) |
1394 | enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); |
1395 | } |
1396 | |
1397 | static void bcm_enet_get_ethtool_stats(struct net_device *netdev, |
1398 | struct ethtool_stats *stats, |
1399 | u64 *data) |
1400 | { |
1401 | struct bcm_enet_priv *priv; |
1402 | int i; |
1403 | |
1404 | priv = netdev_priv(dev: netdev); |
1405 | |
1406 | mutex_lock(&priv->mib_update_lock); |
1407 | update_mib_counters(priv); |
1408 | |
1409 | for (i = 0; i < BCM_ENET_STATS_LEN; i++) { |
1410 | const struct bcm_enet_stats *s; |
1411 | char *p; |
1412 | |
1413 | s = &bcm_enet_gstrings_stats[i]; |
1414 | if (s->mib_reg == -1) |
1415 | p = (char *)&netdev->stats; |
1416 | else |
1417 | p = (char *)priv; |
1418 | p += s->stat_offset; |
1419 | data[i] = (s->sizeof_stat == sizeof(u64)) ? |
1420 | *(u64 *)p : *(u32 *)p; |
1421 | } |
1422 | mutex_unlock(lock: &priv->mib_update_lock); |
1423 | } |
1424 | |
1425 | static int bcm_enet_nway_reset(struct net_device *dev) |
1426 | { |
1427 | struct bcm_enet_priv *priv; |
1428 | |
1429 | priv = netdev_priv(dev); |
1430 | if (priv->has_phy) |
1431 | return phy_ethtool_nway_reset(ndev: dev); |
1432 | |
1433 | return -EOPNOTSUPP; |
1434 | } |
1435 | |
1436 | static int bcm_enet_get_link_ksettings(struct net_device *dev, |
1437 | struct ethtool_link_ksettings *cmd) |
1438 | { |
1439 | struct bcm_enet_priv *priv; |
1440 | u32 supported, advertising; |
1441 | |
1442 | priv = netdev_priv(dev); |
1443 | |
1444 | if (priv->has_phy) { |
1445 | if (!dev->phydev) |
1446 | return -ENODEV; |
1447 | |
1448 | phy_ethtool_ksettings_get(phydev: dev->phydev, cmd); |
1449 | |
1450 | return 0; |
1451 | } else { |
1452 | cmd->base.autoneg = 0; |
1453 | cmd->base.speed = (priv->force_speed_100) ? |
1454 | SPEED_100 : SPEED_10; |
1455 | cmd->base.duplex = (priv->force_duplex_full) ? |
1456 | DUPLEX_FULL : DUPLEX_HALF; |
1457 | supported = ADVERTISED_10baseT_Half | |
1458 | ADVERTISED_10baseT_Full | |
1459 | ADVERTISED_100baseT_Half | |
1460 | ADVERTISED_100baseT_Full; |
1461 | advertising = 0; |
1462 | ethtool_convert_legacy_u32_to_link_mode( |
1463 | dst: cmd->link_modes.supported, legacy_u32: supported); |
1464 | ethtool_convert_legacy_u32_to_link_mode( |
1465 | dst: cmd->link_modes.advertising, legacy_u32: advertising); |
1466 | cmd->base.port = PORT_MII; |
1467 | } |
1468 | return 0; |
1469 | } |
1470 | |
1471 | static int bcm_enet_set_link_ksettings(struct net_device *dev, |
1472 | const struct ethtool_link_ksettings *cmd) |
1473 | { |
1474 | struct bcm_enet_priv *priv; |
1475 | |
1476 | priv = netdev_priv(dev); |
1477 | if (priv->has_phy) { |
1478 | if (!dev->phydev) |
1479 | return -ENODEV; |
1480 | return phy_ethtool_ksettings_set(phydev: dev->phydev, cmd); |
1481 | } else { |
1482 | |
1483 | if (cmd->base.autoneg || |
1484 | (cmd->base.speed != SPEED_100 && |
1485 | cmd->base.speed != SPEED_10) || |
1486 | cmd->base.port != PORT_MII) |
1487 | return -EINVAL; |
1488 | |
1489 | priv->force_speed_100 = |
1490 | (cmd->base.speed == SPEED_100) ? 1 : 0; |
1491 | priv->force_duplex_full = |
1492 | (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0; |
1493 | |
1494 | if (netif_running(dev)) |
1495 | bcm_enet_adjust_link(dev); |
1496 | return 0; |
1497 | } |
1498 | } |
1499 | |
1500 | static void |
1501 | bcm_enet_get_ringparam(struct net_device *dev, |
1502 | struct ethtool_ringparam *ering, |
1503 | struct kernel_ethtool_ringparam *kernel_ering, |
1504 | struct netlink_ext_ack *extack) |
1505 | { |
1506 | struct bcm_enet_priv *priv; |
1507 | |
1508 | priv = netdev_priv(dev); |
1509 | |
1510 | /* rx/tx ring is actually only limited by memory */ |
1511 | ering->rx_max_pending = 8192; |
1512 | ering->tx_max_pending = 8192; |
1513 | ering->rx_pending = priv->rx_ring_size; |
1514 | ering->tx_pending = priv->tx_ring_size; |
1515 | } |
1516 | |
1517 | static int bcm_enet_set_ringparam(struct net_device *dev, |
1518 | struct ethtool_ringparam *ering, |
1519 | struct kernel_ethtool_ringparam *kernel_ering, |
1520 | struct netlink_ext_ack *extack) |
1521 | { |
1522 | struct bcm_enet_priv *priv; |
1523 | int was_running; |
1524 | |
1525 | priv = netdev_priv(dev); |
1526 | |
1527 | was_running = 0; |
1528 | if (netif_running(dev)) { |
1529 | bcm_enet_stop(dev); |
1530 | was_running = 1; |
1531 | } |
1532 | |
1533 | priv->rx_ring_size = ering->rx_pending; |
1534 | priv->tx_ring_size = ering->tx_pending; |
1535 | |
1536 | if (was_running) { |
1537 | int err; |
1538 | |
1539 | err = bcm_enet_open(dev); |
1540 | if (err) |
1541 | dev_close(dev); |
1542 | else |
1543 | bcm_enet_set_multicast_list(dev); |
1544 | } |
1545 | return 0; |
1546 | } |
1547 | |
1548 | static void bcm_enet_get_pauseparam(struct net_device *dev, |
1549 | struct ethtool_pauseparam *ecmd) |
1550 | { |
1551 | struct bcm_enet_priv *priv; |
1552 | |
1553 | priv = netdev_priv(dev); |
1554 | ecmd->autoneg = priv->pause_auto; |
1555 | ecmd->rx_pause = priv->pause_rx; |
1556 | ecmd->tx_pause = priv->pause_tx; |
1557 | } |
1558 | |
1559 | static int bcm_enet_set_pauseparam(struct net_device *dev, |
1560 | struct ethtool_pauseparam *ecmd) |
1561 | { |
1562 | struct bcm_enet_priv *priv; |
1563 | |
1564 | priv = netdev_priv(dev); |
1565 | |
1566 | if (priv->has_phy) { |
1567 | if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { |
1568 | /* asymetric pause mode not supported, |
1569 | * actually possible but integrated PHY has RO |
1570 | * asym_pause bit */ |
1571 | return -EINVAL; |
1572 | } |
1573 | } else { |
1574 | /* no pause autoneg on direct mii connection */ |
1575 | if (ecmd->autoneg) |
1576 | return -EINVAL; |
1577 | } |
1578 | |
1579 | priv->pause_auto = ecmd->autoneg; |
1580 | priv->pause_rx = ecmd->rx_pause; |
1581 | priv->pause_tx = ecmd->tx_pause; |
1582 | |
1583 | return 0; |
1584 | } |
1585 | |
1586 | static const struct ethtool_ops bcm_enet_ethtool_ops = { |
1587 | .get_strings = bcm_enet_get_strings, |
1588 | .get_sset_count = bcm_enet_get_sset_count, |
1589 | .get_ethtool_stats = bcm_enet_get_ethtool_stats, |
1590 | .nway_reset = bcm_enet_nway_reset, |
1591 | .get_drvinfo = bcm_enet_get_drvinfo, |
1592 | .get_link = ethtool_op_get_link, |
1593 | .get_ringparam = bcm_enet_get_ringparam, |
1594 | .set_ringparam = bcm_enet_set_ringparam, |
1595 | .get_pauseparam = bcm_enet_get_pauseparam, |
1596 | .set_pauseparam = bcm_enet_set_pauseparam, |
1597 | .get_link_ksettings = bcm_enet_get_link_ksettings, |
1598 | .set_link_ksettings = bcm_enet_set_link_ksettings, |
1599 | }; |
1600 | |
1601 | static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1602 | { |
1603 | struct bcm_enet_priv *priv; |
1604 | |
1605 | priv = netdev_priv(dev); |
1606 | if (priv->has_phy) { |
1607 | if (!dev->phydev) |
1608 | return -ENODEV; |
1609 | return phy_mii_ioctl(phydev: dev->phydev, ifr: rq, cmd); |
1610 | } else { |
1611 | struct mii_if_info mii; |
1612 | |
1613 | mii.dev = dev; |
1614 | mii.mdio_read = bcm_enet_mdio_read_mii; |
1615 | mii.mdio_write = bcm_enet_mdio_write_mii; |
1616 | mii.phy_id = 0; |
1617 | mii.phy_id_mask = 0x3f; |
1618 | mii.reg_num_mask = 0x1f; |
1619 | return generic_mii_ioctl(mii_if: &mii, mii_data: if_mii(rq), cmd, NULL); |
1620 | } |
1621 | } |
1622 | |
1623 | /* |
1624 | * adjust mtu, can't be called while device is running |
1625 | */ |
1626 | static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) |
1627 | { |
1628 | struct bcm_enet_priv *priv = netdev_priv(dev); |
1629 | int actual_mtu = new_mtu; |
1630 | |
1631 | if (netif_running(dev)) |
1632 | return -EBUSY; |
1633 | |
1634 | /* add ethernet header + vlan tag size */ |
1635 | actual_mtu += VLAN_ETH_HLEN; |
1636 | |
1637 | /* |
1638 | * setup maximum size before we get overflow mark in |
1639 | * descriptor, note that this will not prevent reception of |
1640 | * big frames, they will be split into multiple buffers |
1641 | * anyway |
1642 | */ |
1643 | priv->hw_mtu = actual_mtu; |
1644 | |
1645 | /* |
1646 | * align rx buffer size to dma burst len, account FCS since |
1647 | * it's appended |
1648 | */ |
1649 | priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN, |
1650 | priv->dma_maxburst * 4); |
1651 | |
1652 | priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + |
1653 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
1654 | |
1655 | dev->mtu = new_mtu; |
1656 | return 0; |
1657 | } |
1658 | |
1659 | /* |
1660 | * preinit hardware to allow mii operation while device is down |
1661 | */ |
1662 | static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) |
1663 | { |
1664 | u32 val; |
1665 | int limit; |
1666 | |
1667 | /* make sure mac is disabled */ |
1668 | bcm_enet_disable_mac(priv); |
1669 | |
1670 | /* soft reset mac */ |
1671 | val = ENET_CTL_SRESET_MASK; |
1672 | enet_writel(priv, val, ENET_CTL_REG); |
1673 | wmb(); |
1674 | |
1675 | limit = 1000; |
1676 | do { |
1677 | val = enet_readl(priv, ENET_CTL_REG); |
1678 | if (!(val & ENET_CTL_SRESET_MASK)) |
1679 | break; |
1680 | udelay(1); |
1681 | } while (limit--); |
1682 | |
1683 | /* select correct mii interface */ |
1684 | val = enet_readl(priv, ENET_CTL_REG); |
1685 | if (priv->use_external_mii) |
1686 | val |= ENET_CTL_EPHYSEL_MASK; |
1687 | else |
1688 | val &= ~ENET_CTL_EPHYSEL_MASK; |
1689 | enet_writel(priv, val, ENET_CTL_REG); |
1690 | |
1691 | /* turn on mdc clock */ |
1692 | enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | |
1693 | ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); |
1694 | |
1695 | /* set mib counters to self-clear when read */ |
1696 | val = enet_readl(priv, ENET_MIBCTL_REG); |
1697 | val |= ENET_MIBCTL_RDCLEAR_MASK; |
1698 | enet_writel(priv, val, ENET_MIBCTL_REG); |
1699 | } |
1700 | |
1701 | static const struct net_device_ops bcm_enet_ops = { |
1702 | .ndo_open = bcm_enet_open, |
1703 | .ndo_stop = bcm_enet_stop, |
1704 | .ndo_start_xmit = bcm_enet_start_xmit, |
1705 | .ndo_set_mac_address = bcm_enet_set_mac_address, |
1706 | .ndo_set_rx_mode = bcm_enet_set_multicast_list, |
1707 | .ndo_eth_ioctl = bcm_enet_ioctl, |
1708 | .ndo_change_mtu = bcm_enet_change_mtu, |
1709 | }; |
1710 | |
1711 | /* |
1712 | * allocate netdevice, request register memory and register device. |
1713 | */ |
1714 | static int bcm_enet_probe(struct platform_device *pdev) |
1715 | { |
1716 | struct bcm_enet_priv *priv; |
1717 | struct net_device *dev; |
1718 | struct bcm63xx_enet_platform_data *pd; |
1719 | int irq, irq_rx, irq_tx; |
1720 | struct mii_bus *bus; |
1721 | int i, ret; |
1722 | |
1723 | if (!bcm_enet_shared_base[0]) |
1724 | return -EPROBE_DEFER; |
1725 | |
1726 | irq = platform_get_irq(pdev, 0); |
1727 | irq_rx = platform_get_irq(pdev, 1); |
1728 | irq_tx = platform_get_irq(pdev, 2); |
1729 | if (irq < 0 || irq_rx < 0 || irq_tx < 0) |
1730 | return -ENODEV; |
1731 | |
1732 | dev = alloc_etherdev(sizeof(*priv)); |
1733 | if (!dev) |
1734 | return -ENOMEM; |
1735 | priv = netdev_priv(dev); |
1736 | |
1737 | priv->enet_is_sw = false; |
1738 | priv->dma_maxburst = BCMENET_DMA_MAXBURST; |
1739 | priv->rx_buf_offset = NET_SKB_PAD; |
1740 | |
1741 | ret = bcm_enet_change_mtu(dev, new_mtu: dev->mtu); |
1742 | if (ret) |
1743 | goto out; |
1744 | |
1745 | priv->base = devm_platform_ioremap_resource(pdev, index: 0); |
1746 | if (IS_ERR(ptr: priv->base)) { |
1747 | ret = PTR_ERR(ptr: priv->base); |
1748 | goto out; |
1749 | } |
1750 | |
1751 | dev->irq = priv->irq = irq; |
1752 | priv->irq_rx = irq_rx; |
1753 | priv->irq_tx = irq_tx; |
1754 | |
1755 | priv->mac_clk = devm_clk_get(dev: &pdev->dev, id: "enet" ); |
1756 | if (IS_ERR(ptr: priv->mac_clk)) { |
1757 | ret = PTR_ERR(ptr: priv->mac_clk); |
1758 | goto out; |
1759 | } |
1760 | ret = clk_prepare_enable(clk: priv->mac_clk); |
1761 | if (ret) |
1762 | goto out; |
1763 | |
1764 | /* initialize default and fetch platform data */ |
1765 | priv->rx_ring_size = BCMENET_DEF_RX_DESC; |
1766 | priv->tx_ring_size = BCMENET_DEF_TX_DESC; |
1767 | |
1768 | pd = dev_get_platdata(dev: &pdev->dev); |
1769 | if (pd) { |
1770 | eth_hw_addr_set(dev, addr: pd->mac_addr); |
1771 | priv->has_phy = pd->has_phy; |
1772 | priv->phy_id = pd->phy_id; |
1773 | priv->has_phy_interrupt = pd->has_phy_interrupt; |
1774 | priv->phy_interrupt = pd->phy_interrupt; |
1775 | priv->use_external_mii = !pd->use_internal_phy; |
1776 | priv->pause_auto = pd->pause_auto; |
1777 | priv->pause_rx = pd->pause_rx; |
1778 | priv->pause_tx = pd->pause_tx; |
1779 | priv->force_duplex_full = pd->force_duplex_full; |
1780 | priv->force_speed_100 = pd->force_speed_100; |
1781 | priv->dma_chan_en_mask = pd->dma_chan_en_mask; |
1782 | priv->dma_chan_int_mask = pd->dma_chan_int_mask; |
1783 | priv->dma_chan_width = pd->dma_chan_width; |
1784 | priv->dma_has_sram = pd->dma_has_sram; |
1785 | priv->dma_desc_shift = pd->dma_desc_shift; |
1786 | priv->rx_chan = pd->rx_chan; |
1787 | priv->tx_chan = pd->tx_chan; |
1788 | } |
1789 | |
1790 | if (priv->has_phy && !priv->use_external_mii) { |
1791 | /* using internal PHY, enable clock */ |
1792 | priv->phy_clk = devm_clk_get(dev: &pdev->dev, id: "ephy" ); |
1793 | if (IS_ERR(ptr: priv->phy_clk)) { |
1794 | ret = PTR_ERR(ptr: priv->phy_clk); |
1795 | priv->phy_clk = NULL; |
1796 | goto out_disable_clk_mac; |
1797 | } |
1798 | ret = clk_prepare_enable(clk: priv->phy_clk); |
1799 | if (ret) |
1800 | goto out_disable_clk_mac; |
1801 | } |
1802 | |
1803 | /* do minimal hardware init to be able to probe mii bus */ |
1804 | bcm_enet_hw_preinit(priv); |
1805 | |
1806 | /* MII bus registration */ |
1807 | if (priv->has_phy) { |
1808 | |
1809 | priv->mii_bus = mdiobus_alloc(); |
1810 | if (!priv->mii_bus) { |
1811 | ret = -ENOMEM; |
1812 | goto out_uninit_hw; |
1813 | } |
1814 | |
1815 | bus = priv->mii_bus; |
1816 | bus->name = "bcm63xx_enet MII bus" ; |
1817 | bus->parent = &pdev->dev; |
1818 | bus->priv = priv; |
1819 | bus->read = bcm_enet_mdio_read_phylib; |
1820 | bus->write = bcm_enet_mdio_write_phylib; |
1821 | sprintf(buf: bus->id, fmt: "%s-%d" , pdev->name, pdev->id); |
1822 | |
1823 | /* only probe bus where we think the PHY is, because |
1824 | * the mdio read operation return 0 instead of 0xffff |
1825 | * if a slave is not present on hw */ |
1826 | bus->phy_mask = ~(1 << priv->phy_id); |
1827 | |
1828 | if (priv->has_phy_interrupt) |
1829 | bus->irq[priv->phy_id] = priv->phy_interrupt; |
1830 | |
1831 | ret = mdiobus_register(bus); |
1832 | if (ret) { |
1833 | dev_err(&pdev->dev, "unable to register mdio bus\n" ); |
1834 | goto out_free_mdio; |
1835 | } |
1836 | } else { |
1837 | |
1838 | /* run platform code to initialize PHY device */ |
1839 | if (pd && pd->mii_config && |
1840 | pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, |
1841 | bcm_enet_mdio_write_mii)) { |
1842 | dev_err(&pdev->dev, "unable to configure mdio bus\n" ); |
1843 | goto out_uninit_hw; |
1844 | } |
1845 | } |
1846 | |
1847 | spin_lock_init(&priv->rx_lock); |
1848 | |
1849 | /* init rx timeout (used for oom) */ |
1850 | timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); |
1851 | |
1852 | /* init the mib update lock&work */ |
1853 | mutex_init(&priv->mib_update_lock); |
1854 | INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); |
1855 | |
1856 | /* zero mib counters */ |
1857 | for (i = 0; i < ENET_MIB_REG_COUNT; i++) |
1858 | enet_writel(priv, 0, ENET_MIB_REG(i)); |
1859 | |
1860 | /* register netdevice */ |
1861 | dev->netdev_ops = &bcm_enet_ops; |
1862 | netif_napi_add_weight(dev, napi: &priv->napi, poll: bcm_enet_poll, weight: 16); |
1863 | |
1864 | dev->ethtool_ops = &bcm_enet_ethtool_ops; |
1865 | /* MTU range: 46 - 2028 */ |
1866 | dev->min_mtu = ETH_ZLEN - ETH_HLEN; |
1867 | dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN; |
1868 | SET_NETDEV_DEV(dev, &pdev->dev); |
1869 | |
1870 | ret = register_netdev(dev); |
1871 | if (ret) |
1872 | goto out_unregister_mdio; |
1873 | |
1874 | netif_carrier_off(dev); |
1875 | platform_set_drvdata(pdev, data: dev); |
1876 | priv->pdev = pdev; |
1877 | priv->net_dev = dev; |
1878 | |
1879 | return 0; |
1880 | |
1881 | out_unregister_mdio: |
1882 | if (priv->mii_bus) |
1883 | mdiobus_unregister(bus: priv->mii_bus); |
1884 | |
1885 | out_free_mdio: |
1886 | if (priv->mii_bus) |
1887 | mdiobus_free(bus: priv->mii_bus); |
1888 | |
1889 | out_uninit_hw: |
1890 | /* turn off mdc clock */ |
1891 | enet_writel(priv, 0, ENET_MIISC_REG); |
1892 | clk_disable_unprepare(clk: priv->phy_clk); |
1893 | |
1894 | out_disable_clk_mac: |
1895 | clk_disable_unprepare(clk: priv->mac_clk); |
1896 | out: |
1897 | free_netdev(dev); |
1898 | return ret; |
1899 | } |
1900 | |
1901 | |
1902 | /* |
1903 | * exit func, stops hardware and unregisters netdevice |
1904 | */ |
1905 | static void bcm_enet_remove(struct platform_device *pdev) |
1906 | { |
1907 | struct bcm_enet_priv *priv; |
1908 | struct net_device *dev; |
1909 | |
1910 | /* stop netdevice */ |
1911 | dev = platform_get_drvdata(pdev); |
1912 | priv = netdev_priv(dev); |
1913 | unregister_netdev(dev); |
1914 | |
1915 | /* turn off mdc clock */ |
1916 | enet_writel(priv, 0, ENET_MIISC_REG); |
1917 | |
1918 | if (priv->has_phy) { |
1919 | mdiobus_unregister(bus: priv->mii_bus); |
1920 | mdiobus_free(bus: priv->mii_bus); |
1921 | } else { |
1922 | struct bcm63xx_enet_platform_data *pd; |
1923 | |
1924 | pd = dev_get_platdata(dev: &pdev->dev); |
1925 | if (pd && pd->mii_config) |
1926 | pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, |
1927 | bcm_enet_mdio_write_mii); |
1928 | } |
1929 | |
1930 | /* disable hw block clocks */ |
1931 | clk_disable_unprepare(clk: priv->phy_clk); |
1932 | clk_disable_unprepare(clk: priv->mac_clk); |
1933 | |
1934 | free_netdev(dev); |
1935 | } |
1936 | |
1937 | static struct platform_driver bcm63xx_enet_driver = { |
1938 | .probe = bcm_enet_probe, |
1939 | .remove_new = bcm_enet_remove, |
1940 | .driver = { |
1941 | .name = "bcm63xx_enet" , |
1942 | }, |
1943 | }; |
1944 | |
1945 | /* |
1946 | * switch mii access callbacks |
1947 | */ |
1948 | static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, |
1949 | int ext, int phy_id, int location) |
1950 | { |
1951 | u32 reg; |
1952 | int ret; |
1953 | |
1954 | spin_lock_bh(lock: &priv->enetsw_mdio_lock); |
1955 | enetsw_writel(priv, 0, ENETSW_MDIOC_REG); |
1956 | |
1957 | reg = ENETSW_MDIOC_RD_MASK | |
1958 | (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | |
1959 | (location << ENETSW_MDIOC_REG_SHIFT); |
1960 | |
1961 | if (ext) |
1962 | reg |= ENETSW_MDIOC_EXT_MASK; |
1963 | |
1964 | enetsw_writel(priv, reg, ENETSW_MDIOC_REG); |
1965 | udelay(50); |
1966 | ret = enetsw_readw(priv, ENETSW_MDIOD_REG); |
1967 | spin_unlock_bh(lock: &priv->enetsw_mdio_lock); |
1968 | return ret; |
1969 | } |
1970 | |
1971 | static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, |
1972 | int ext, int phy_id, int location, |
1973 | uint16_t data) |
1974 | { |
1975 | u32 reg; |
1976 | |
1977 | spin_lock_bh(lock: &priv->enetsw_mdio_lock); |
1978 | enetsw_writel(priv, 0, ENETSW_MDIOC_REG); |
1979 | |
1980 | reg = ENETSW_MDIOC_WR_MASK | |
1981 | (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | |
1982 | (location << ENETSW_MDIOC_REG_SHIFT); |
1983 | |
1984 | if (ext) |
1985 | reg |= ENETSW_MDIOC_EXT_MASK; |
1986 | |
1987 | reg |= data; |
1988 | |
1989 | enetsw_writel(priv, reg, ENETSW_MDIOC_REG); |
1990 | udelay(50); |
1991 | spin_unlock_bh(lock: &priv->enetsw_mdio_lock); |
1992 | } |
1993 | |
1994 | static inline int bcm_enet_port_is_rgmii(int portid) |
1995 | { |
1996 | return portid >= ENETSW_RGMII_PORT0; |
1997 | } |
1998 | |
1999 | /* |
2000 | * enet sw PHY polling |
2001 | */ |
2002 | static void swphy_poll_timer(struct timer_list *t) |
2003 | { |
2004 | struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll); |
2005 | unsigned int i; |
2006 | |
2007 | for (i = 0; i < priv->num_ports; i++) { |
2008 | struct bcm63xx_enetsw_port *port; |
2009 | int val, j, up, advertise, lpa, speed, duplex, media; |
2010 | int external_phy = bcm_enet_port_is_rgmii(portid: i); |
2011 | u8 override; |
2012 | |
2013 | port = &priv->used_ports[i]; |
2014 | if (!port->used) |
2015 | continue; |
2016 | |
2017 | if (port->bypass_link) |
2018 | continue; |
2019 | |
2020 | /* dummy read to clear */ |
2021 | for (j = 0; j < 2; j++) |
2022 | val = bcmenet_sw_mdio_read(priv, ext: external_phy, |
2023 | phy_id: port->phy_id, MII_BMSR); |
2024 | |
2025 | if (val == 0xffff) |
2026 | continue; |
2027 | |
2028 | up = (val & BMSR_LSTATUS) ? 1 : 0; |
2029 | if (!(up ^ priv->sw_port_link[i])) |
2030 | continue; |
2031 | |
2032 | priv->sw_port_link[i] = up; |
2033 | |
2034 | /* link changed */ |
2035 | if (!up) { |
2036 | dev_info(&priv->pdev->dev, "link DOWN on %s\n" , |
2037 | port->name); |
2038 | enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, |
2039 | ENETSW_PORTOV_REG(i)); |
2040 | enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | |
2041 | ENETSW_PTCTRL_TXDIS_MASK, |
2042 | ENETSW_PTCTRL_REG(i)); |
2043 | continue; |
2044 | } |
2045 | |
2046 | advertise = bcmenet_sw_mdio_read(priv, ext: external_phy, |
2047 | phy_id: port->phy_id, MII_ADVERTISE); |
2048 | |
2049 | lpa = bcmenet_sw_mdio_read(priv, ext: external_phy, phy_id: port->phy_id, |
2050 | MII_LPA); |
2051 | |
2052 | /* figure out media and duplex from advertise and LPA values */ |
2053 | media = mii_nway_result(negotiated: lpa & advertise); |
2054 | duplex = (media & ADVERTISE_FULL) ? 1 : 0; |
2055 | |
2056 | if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) |
2057 | speed = 100; |
2058 | else |
2059 | speed = 10; |
2060 | |
2061 | if (val & BMSR_ESTATEN) { |
2062 | advertise = bcmenet_sw_mdio_read(priv, ext: external_phy, |
2063 | phy_id: port->phy_id, MII_CTRL1000); |
2064 | |
2065 | lpa = bcmenet_sw_mdio_read(priv, ext: external_phy, |
2066 | phy_id: port->phy_id, MII_STAT1000); |
2067 | |
2068 | if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) |
2069 | && lpa & (LPA_1000FULL | LPA_1000HALF)) { |
2070 | speed = 1000; |
2071 | duplex = (lpa & LPA_1000FULL); |
2072 | } |
2073 | } |
2074 | |
2075 | dev_info(&priv->pdev->dev, |
2076 | "link UP on %s, %dMbps, %s-duplex\n" , |
2077 | port->name, speed, duplex ? "full" : "half" ); |
2078 | |
2079 | override = ENETSW_PORTOV_ENABLE_MASK | |
2080 | ENETSW_PORTOV_LINKUP_MASK; |
2081 | |
2082 | if (speed == 1000) |
2083 | override |= ENETSW_IMPOV_1000_MASK; |
2084 | else if (speed == 100) |
2085 | override |= ENETSW_IMPOV_100_MASK; |
2086 | if (duplex) |
2087 | override |= ENETSW_IMPOV_FDX_MASK; |
2088 | |
2089 | enetsw_writeb(priv, val: override, off: ENETSW_PORTOV_REG(i)); |
2090 | enetsw_writeb(priv, val: 0, off: ENETSW_PTCTRL_REG(i)); |
2091 | } |
2092 | |
2093 | priv->swphy_poll.expires = jiffies + HZ; |
2094 | add_timer(timer: &priv->swphy_poll); |
2095 | } |
2096 | |
2097 | /* |
2098 | * open callback, allocate dma rings & buffers and start rx operation |
2099 | */ |
2100 | static int bcm_enetsw_open(struct net_device *dev) |
2101 | { |
2102 | struct bcm_enet_priv *priv; |
2103 | struct device *kdev; |
2104 | int i, ret; |
2105 | unsigned int size; |
2106 | void *p; |
2107 | u32 val; |
2108 | |
2109 | priv = netdev_priv(dev); |
2110 | kdev = &priv->pdev->dev; |
2111 | |
2112 | /* mask all interrupts and request them */ |
2113 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); |
2114 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); |
2115 | |
2116 | ret = request_irq(irq: priv->irq_rx, handler: bcm_enet_isr_dma, |
2117 | flags: 0, name: dev->name, dev); |
2118 | if (ret) |
2119 | goto out_freeirq; |
2120 | |
2121 | if (priv->irq_tx != -1) { |
2122 | ret = request_irq(irq: priv->irq_tx, handler: bcm_enet_isr_dma, |
2123 | flags: 0, name: dev->name, dev); |
2124 | if (ret) |
2125 | goto out_freeirq_rx; |
2126 | } |
2127 | |
2128 | /* allocate rx dma ring */ |
2129 | size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); |
2130 | p = dma_alloc_coherent(dev: kdev, size, dma_handle: &priv->rx_desc_dma, GFP_KERNEL); |
2131 | if (!p) { |
2132 | dev_err(kdev, "cannot allocate rx ring %u\n" , size); |
2133 | ret = -ENOMEM; |
2134 | goto out_freeirq_tx; |
2135 | } |
2136 | |
2137 | priv->rx_desc_alloc_size = size; |
2138 | priv->rx_desc_cpu = p; |
2139 | |
2140 | /* allocate tx dma ring */ |
2141 | size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); |
2142 | p = dma_alloc_coherent(dev: kdev, size, dma_handle: &priv->tx_desc_dma, GFP_KERNEL); |
2143 | if (!p) { |
2144 | dev_err(kdev, "cannot allocate tx ring\n" ); |
2145 | ret = -ENOMEM; |
2146 | goto out_free_rx_ring; |
2147 | } |
2148 | |
2149 | priv->tx_desc_alloc_size = size; |
2150 | priv->tx_desc_cpu = p; |
2151 | |
2152 | priv->tx_skb = kcalloc(n: priv->tx_ring_size, size: sizeof(struct sk_buff *), |
2153 | GFP_KERNEL); |
2154 | if (!priv->tx_skb) { |
2155 | dev_err(kdev, "cannot allocate tx skb queue\n" ); |
2156 | ret = -ENOMEM; |
2157 | goto out_free_tx_ring; |
2158 | } |
2159 | |
2160 | priv->tx_desc_count = priv->tx_ring_size; |
2161 | priv->tx_dirty_desc = 0; |
2162 | priv->tx_curr_desc = 0; |
2163 | spin_lock_init(&priv->tx_lock); |
2164 | |
2165 | /* init & fill rx ring with buffers */ |
2166 | priv->rx_buf = kcalloc(n: priv->rx_ring_size, size: sizeof(void *), |
2167 | GFP_KERNEL); |
2168 | if (!priv->rx_buf) { |
2169 | dev_err(kdev, "cannot allocate rx buffer queue\n" ); |
2170 | ret = -ENOMEM; |
2171 | goto out_free_tx_skb; |
2172 | } |
2173 | |
2174 | priv->rx_desc_count = 0; |
2175 | priv->rx_dirty_desc = 0; |
2176 | priv->rx_curr_desc = 0; |
2177 | |
2178 | /* disable all ports */ |
2179 | for (i = 0; i < priv->num_ports; i++) { |
2180 | enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, |
2181 | ENETSW_PORTOV_REG(i)); |
2182 | enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | |
2183 | ENETSW_PTCTRL_TXDIS_MASK, |
2184 | ENETSW_PTCTRL_REG(i)); |
2185 | |
2186 | priv->sw_port_link[i] = 0; |
2187 | } |
2188 | |
2189 | /* reset mib */ |
2190 | val = enetsw_readb(priv, ENETSW_GMCR_REG); |
2191 | val |= ENETSW_GMCR_RST_MIB_MASK; |
2192 | enetsw_writeb(priv, val, ENETSW_GMCR_REG); |
2193 | mdelay(1); |
2194 | val &= ~ENETSW_GMCR_RST_MIB_MASK; |
2195 | enetsw_writeb(priv, val, ENETSW_GMCR_REG); |
2196 | mdelay(1); |
2197 | |
2198 | /* force CPU port state */ |
2199 | val = enetsw_readb(priv, ENETSW_IMPOV_REG); |
2200 | val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; |
2201 | enetsw_writeb(priv, val, ENETSW_IMPOV_REG); |
2202 | |
2203 | /* enable switch forward engine */ |
2204 | val = enetsw_readb(priv, ENETSW_SWMODE_REG); |
2205 | val |= ENETSW_SWMODE_FWD_EN_MASK; |
2206 | enetsw_writeb(priv, val, ENETSW_SWMODE_REG); |
2207 | |
2208 | /* enable jumbo on all ports */ |
2209 | enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); |
2210 | enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); |
2211 | |
2212 | /* initialize flow control buffer allocation */ |
2213 | enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, |
2214 | ENETDMA_BUFALLOC_REG(priv->rx_chan)); |
2215 | |
2216 | if (bcm_enet_refill_rx(dev, napi_mode: false)) { |
2217 | dev_err(kdev, "cannot allocate rx buffer queue\n" ); |
2218 | ret = -ENOMEM; |
2219 | goto out; |
2220 | } |
2221 | |
2222 | /* write rx & tx ring addresses */ |
2223 | enet_dmas_writel(priv, priv->rx_desc_dma, |
2224 | ENETDMAS_RSTART_REG, priv->rx_chan); |
2225 | enet_dmas_writel(priv, priv->tx_desc_dma, |
2226 | ENETDMAS_RSTART_REG, priv->tx_chan); |
2227 | |
2228 | /* clear remaining state ram for rx & tx channel */ |
2229 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); |
2230 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); |
2231 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); |
2232 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); |
2233 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); |
2234 | enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); |
2235 | |
2236 | /* set dma maximum burst len */ |
2237 | enet_dmac_writel(priv, priv->dma_maxburst, |
2238 | ENETDMAC_MAXBURST, priv->rx_chan); |
2239 | enet_dmac_writel(priv, priv->dma_maxburst, |
2240 | ENETDMAC_MAXBURST, priv->tx_chan); |
2241 | |
2242 | /* set flow control low/high threshold to 1/3 / 2/3 */ |
2243 | val = priv->rx_ring_size / 3; |
2244 | enet_dma_writel(priv, val, off: ENETDMA_FLOWCL_REG(priv->rx_chan)); |
2245 | val = (priv->rx_ring_size * 2) / 3; |
2246 | enet_dma_writel(priv, val, off: ENETDMA_FLOWCH_REG(priv->rx_chan)); |
2247 | |
2248 | /* all set, enable mac and interrupts, start dma engine and |
2249 | * kick rx dma channel |
2250 | */ |
2251 | wmb(); |
2252 | enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); |
2253 | enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, |
2254 | ENETDMAC_CHANCFG, priv->rx_chan); |
2255 | |
2256 | /* watch "packet transferred" interrupt in rx and tx */ |
2257 | enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, |
2258 | ENETDMAC_IR, priv->rx_chan); |
2259 | enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, |
2260 | ENETDMAC_IR, priv->tx_chan); |
2261 | |
2262 | /* make sure we enable napi before rx interrupt */ |
2263 | napi_enable(n: &priv->napi); |
2264 | |
2265 | enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, |
2266 | ENETDMAC_IRMASK, priv->rx_chan); |
2267 | enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, |
2268 | ENETDMAC_IRMASK, priv->tx_chan); |
2269 | |
2270 | netif_carrier_on(dev); |
2271 | netif_start_queue(dev); |
2272 | |
2273 | /* apply override config for bypass_link ports here. */ |
2274 | for (i = 0; i < priv->num_ports; i++) { |
2275 | struct bcm63xx_enetsw_port *port; |
2276 | u8 override; |
2277 | port = &priv->used_ports[i]; |
2278 | if (!port->used) |
2279 | continue; |
2280 | |
2281 | if (!port->bypass_link) |
2282 | continue; |
2283 | |
2284 | override = ENETSW_PORTOV_ENABLE_MASK | |
2285 | ENETSW_PORTOV_LINKUP_MASK; |
2286 | |
2287 | switch (port->force_speed) { |
2288 | case 1000: |
2289 | override |= ENETSW_IMPOV_1000_MASK; |
2290 | break; |
2291 | case 100: |
2292 | override |= ENETSW_IMPOV_100_MASK; |
2293 | break; |
2294 | case 10: |
2295 | break; |
2296 | default: |
2297 | pr_warn("invalid forced speed on port %s: assume 10\n" , |
2298 | port->name); |
2299 | break; |
2300 | } |
2301 | |
2302 | if (port->force_duplex_full) |
2303 | override |= ENETSW_IMPOV_FDX_MASK; |
2304 | |
2305 | |
2306 | enetsw_writeb(priv, val: override, off: ENETSW_PORTOV_REG(i)); |
2307 | enetsw_writeb(priv, val: 0, off: ENETSW_PTCTRL_REG(i)); |
2308 | } |
2309 | |
2310 | /* start phy polling timer */ |
2311 | timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); |
2312 | mod_timer(timer: &priv->swphy_poll, expires: jiffies); |
2313 | return 0; |
2314 | |
2315 | out: |
2316 | bcm_enet_free_rx_buf_ring(kdev, priv); |
2317 | |
2318 | out_free_tx_skb: |
2319 | kfree(objp: priv->tx_skb); |
2320 | |
2321 | out_free_tx_ring: |
2322 | dma_free_coherent(dev: kdev, size: priv->tx_desc_alloc_size, |
2323 | cpu_addr: priv->tx_desc_cpu, dma_handle: priv->tx_desc_dma); |
2324 | |
2325 | out_free_rx_ring: |
2326 | dma_free_coherent(dev: kdev, size: priv->rx_desc_alloc_size, |
2327 | cpu_addr: priv->rx_desc_cpu, dma_handle: priv->rx_desc_dma); |
2328 | |
2329 | out_freeirq_tx: |
2330 | if (priv->irq_tx != -1) |
2331 | free_irq(priv->irq_tx, dev); |
2332 | |
2333 | out_freeirq_rx: |
2334 | free_irq(priv->irq_rx, dev); |
2335 | |
2336 | out_freeirq: |
2337 | return ret; |
2338 | } |
2339 | |
2340 | /* stop callback */ |
2341 | static int bcm_enetsw_stop(struct net_device *dev) |
2342 | { |
2343 | struct bcm_enet_priv *priv; |
2344 | struct device *kdev; |
2345 | |
2346 | priv = netdev_priv(dev); |
2347 | kdev = &priv->pdev->dev; |
2348 | |
2349 | del_timer_sync(timer: &priv->swphy_poll); |
2350 | netif_stop_queue(dev); |
2351 | napi_disable(n: &priv->napi); |
2352 | del_timer_sync(timer: &priv->rx_timeout); |
2353 | |
2354 | /* mask all interrupts */ |
2355 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); |
2356 | enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); |
2357 | |
2358 | /* disable dma & mac */ |
2359 | bcm_enet_disable_dma(priv, chan: priv->tx_chan); |
2360 | bcm_enet_disable_dma(priv, chan: priv->rx_chan); |
2361 | |
2362 | /* force reclaim of all tx buffers */ |
2363 | bcm_enet_tx_reclaim(dev, force: 1, budget: 0); |
2364 | |
2365 | /* free the rx buffer ring */ |
2366 | bcm_enet_free_rx_buf_ring(kdev, priv); |
2367 | |
2368 | /* free remaining allocated memory */ |
2369 | kfree(objp: priv->tx_skb); |
2370 | dma_free_coherent(dev: kdev, size: priv->rx_desc_alloc_size, |
2371 | cpu_addr: priv->rx_desc_cpu, dma_handle: priv->rx_desc_dma); |
2372 | dma_free_coherent(dev: kdev, size: priv->tx_desc_alloc_size, |
2373 | cpu_addr: priv->tx_desc_cpu, dma_handle: priv->tx_desc_dma); |
2374 | if (priv->irq_tx != -1) |
2375 | free_irq(priv->irq_tx, dev); |
2376 | free_irq(priv->irq_rx, dev); |
2377 | |
2378 | /* reset BQL after forced tx reclaim to prevent kernel panic */ |
2379 | netdev_reset_queue(dev_queue: dev); |
2380 | |
2381 | return 0; |
2382 | } |
2383 | |
2384 | /* try to sort out phy external status by walking the used_port field |
2385 | * in the bcm_enet_priv structure. in case the phy address is not |
2386 | * assigned to any physical port on the switch, assume it is external |
2387 | * (and yell at the user). |
2388 | */ |
2389 | static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) |
2390 | { |
2391 | int i; |
2392 | |
2393 | for (i = 0; i < priv->num_ports; ++i) { |
2394 | if (!priv->used_ports[i].used) |
2395 | continue; |
2396 | if (priv->used_ports[i].phy_id == phy_id) |
2397 | return bcm_enet_port_is_rgmii(portid: i); |
2398 | } |
2399 | |
2400 | printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n" , |
2401 | phy_id); |
2402 | return 1; |
2403 | } |
2404 | |
2405 | /* can't use bcmenet_sw_mdio_read directly as we need to sort out |
2406 | * external/internal status of the given phy_id first. |
2407 | */ |
2408 | static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, |
2409 | int location) |
2410 | { |
2411 | struct bcm_enet_priv *priv; |
2412 | |
2413 | priv = netdev_priv(dev); |
2414 | return bcmenet_sw_mdio_read(priv, |
2415 | ext: bcm_enetsw_phy_is_external(priv, phy_id), |
2416 | phy_id, location); |
2417 | } |
2418 | |
2419 | /* can't use bcmenet_sw_mdio_write directly as we need to sort out |
2420 | * external/internal status of the given phy_id first. |
2421 | */ |
2422 | static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, |
2423 | int location, |
2424 | int val) |
2425 | { |
2426 | struct bcm_enet_priv *priv; |
2427 | |
2428 | priv = netdev_priv(dev); |
2429 | bcmenet_sw_mdio_write(priv, ext: bcm_enetsw_phy_is_external(priv, phy_id), |
2430 | phy_id, location, data: val); |
2431 | } |
2432 | |
2433 | static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2434 | { |
2435 | struct mii_if_info mii; |
2436 | |
2437 | mii.dev = dev; |
2438 | mii.mdio_read = bcm_enetsw_mii_mdio_read; |
2439 | mii.mdio_write = bcm_enetsw_mii_mdio_write; |
2440 | mii.phy_id = 0; |
2441 | mii.phy_id_mask = 0x3f; |
2442 | mii.reg_num_mask = 0x1f; |
2443 | return generic_mii_ioctl(mii_if: &mii, mii_data: if_mii(rq), cmd, NULL); |
2444 | |
2445 | } |
2446 | |
2447 | static const struct net_device_ops bcm_enetsw_ops = { |
2448 | .ndo_open = bcm_enetsw_open, |
2449 | .ndo_stop = bcm_enetsw_stop, |
2450 | .ndo_start_xmit = bcm_enet_start_xmit, |
2451 | .ndo_change_mtu = bcm_enet_change_mtu, |
2452 | .ndo_eth_ioctl = bcm_enetsw_ioctl, |
2453 | }; |
2454 | |
2455 | |
2456 | static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { |
2457 | { "rx_packets" , DEV_STAT(rx_packets), -1 }, |
2458 | { "tx_packets" , DEV_STAT(tx_packets), -1 }, |
2459 | { "rx_bytes" , DEV_STAT(rx_bytes), -1 }, |
2460 | { "tx_bytes" , DEV_STAT(tx_bytes), -1 }, |
2461 | { "rx_errors" , DEV_STAT(rx_errors), -1 }, |
2462 | { "tx_errors" , DEV_STAT(tx_errors), -1 }, |
2463 | { "rx_dropped" , DEV_STAT(rx_dropped), -1 }, |
2464 | { "tx_dropped" , DEV_STAT(tx_dropped), -1 }, |
2465 | |
2466 | { "tx_good_octets" , GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, |
2467 | { "tx_unicast" , GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, |
2468 | { "tx_broadcast" , GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, |
2469 | { "tx_multicast" , GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, |
2470 | { "tx_64_octets" , GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, |
2471 | { "tx_65_127_oct" , GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, |
2472 | { "tx_128_255_oct" , GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, |
2473 | { "tx_256_511_oct" , GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, |
2474 | { "tx_512_1023_oct" , GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, |
2475 | { "tx_1024_1522_oct" , GEN_STAT(mib.tx_1024_max), |
2476 | ETHSW_MIB_RX_1024_1522 }, |
2477 | { "tx_1523_2047_oct" , GEN_STAT(mib.tx_1523_2047), |
2478 | ETHSW_MIB_RX_1523_2047 }, |
2479 | { "tx_2048_4095_oct" , GEN_STAT(mib.tx_2048_4095), |
2480 | ETHSW_MIB_RX_2048_4095 }, |
2481 | { "tx_4096_8191_oct" , GEN_STAT(mib.tx_4096_8191), |
2482 | ETHSW_MIB_RX_4096_8191 }, |
2483 | { "tx_8192_9728_oct" , GEN_STAT(mib.tx_8192_9728), |
2484 | ETHSW_MIB_RX_8192_9728 }, |
2485 | { "tx_oversize" , GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, |
2486 | { "tx_oversize_drop" , GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, |
2487 | { "tx_dropped" , GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, |
2488 | { "tx_undersize" , GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, |
2489 | { "tx_pause" , GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, |
2490 | |
2491 | { "rx_good_octets" , GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, |
2492 | { "rx_broadcast" , GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, |
2493 | { "rx_multicast" , GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, |
2494 | { "rx_unicast" , GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, |
2495 | { "rx_pause" , GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, |
2496 | { "rx_dropped" , GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, |
2497 | |
2498 | }; |
2499 | |
2500 | #define BCM_ENETSW_STATS_LEN \ |
2501 | (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) |
2502 | |
2503 | static void bcm_enetsw_get_strings(struct net_device *netdev, |
2504 | u32 stringset, u8 *data) |
2505 | { |
2506 | int i; |
2507 | |
2508 | switch (stringset) { |
2509 | case ETH_SS_STATS: |
2510 | for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { |
2511 | memcpy(data + i * ETH_GSTRING_LEN, |
2512 | bcm_enetsw_gstrings_stats[i].stat_string, |
2513 | ETH_GSTRING_LEN); |
2514 | } |
2515 | break; |
2516 | } |
2517 | } |
2518 | |
2519 | static int bcm_enetsw_get_sset_count(struct net_device *netdev, |
2520 | int string_set) |
2521 | { |
2522 | switch (string_set) { |
2523 | case ETH_SS_STATS: |
2524 | return BCM_ENETSW_STATS_LEN; |
2525 | default: |
2526 | return -EINVAL; |
2527 | } |
2528 | } |
2529 | |
2530 | static void bcm_enetsw_get_drvinfo(struct net_device *netdev, |
2531 | struct ethtool_drvinfo *drvinfo) |
2532 | { |
2533 | strscpy(p: drvinfo->driver, q: bcm_enet_driver_name, size: sizeof(drvinfo->driver)); |
2534 | strscpy(p: drvinfo->bus_info, q: "bcm63xx" , size: sizeof(drvinfo->bus_info)); |
2535 | } |
2536 | |
2537 | static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, |
2538 | struct ethtool_stats *stats, |
2539 | u64 *data) |
2540 | { |
2541 | struct bcm_enet_priv *priv; |
2542 | int i; |
2543 | |
2544 | priv = netdev_priv(dev: netdev); |
2545 | |
2546 | for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { |
2547 | const struct bcm_enet_stats *s; |
2548 | u32 lo, hi; |
2549 | char *p; |
2550 | int reg; |
2551 | |
2552 | s = &bcm_enetsw_gstrings_stats[i]; |
2553 | |
2554 | reg = s->mib_reg; |
2555 | if (reg == -1) |
2556 | continue; |
2557 | |
2558 | lo = enetsw_readl(priv, off: ENETSW_MIB_REG(reg)); |
2559 | p = (char *)priv + s->stat_offset; |
2560 | |
2561 | if (s->sizeof_stat == sizeof(u64)) { |
2562 | hi = enetsw_readl(priv, off: ENETSW_MIB_REG(reg + 1)); |
2563 | *(u64 *)p = ((u64)hi << 32 | lo); |
2564 | } else { |
2565 | *(u32 *)p = lo; |
2566 | } |
2567 | } |
2568 | |
2569 | for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { |
2570 | const struct bcm_enet_stats *s; |
2571 | char *p; |
2572 | |
2573 | s = &bcm_enetsw_gstrings_stats[i]; |
2574 | |
2575 | if (s->mib_reg == -1) |
2576 | p = (char *)&netdev->stats + s->stat_offset; |
2577 | else |
2578 | p = (char *)priv + s->stat_offset; |
2579 | |
2580 | data[i] = (s->sizeof_stat == sizeof(u64)) ? |
2581 | *(u64 *)p : *(u32 *)p; |
2582 | } |
2583 | } |
2584 | |
2585 | static void |
2586 | bcm_enetsw_get_ringparam(struct net_device *dev, |
2587 | struct ethtool_ringparam *ering, |
2588 | struct kernel_ethtool_ringparam *kernel_ering, |
2589 | struct netlink_ext_ack *extack) |
2590 | { |
2591 | struct bcm_enet_priv *priv; |
2592 | |
2593 | priv = netdev_priv(dev); |
2594 | |
2595 | /* rx/tx ring is actually only limited by memory */ |
2596 | ering->rx_max_pending = 8192; |
2597 | ering->tx_max_pending = 8192; |
2598 | ering->rx_mini_max_pending = 0; |
2599 | ering->rx_jumbo_max_pending = 0; |
2600 | ering->rx_pending = priv->rx_ring_size; |
2601 | ering->tx_pending = priv->tx_ring_size; |
2602 | } |
2603 | |
2604 | static int |
2605 | bcm_enetsw_set_ringparam(struct net_device *dev, |
2606 | struct ethtool_ringparam *ering, |
2607 | struct kernel_ethtool_ringparam *kernel_ering, |
2608 | struct netlink_ext_ack *extack) |
2609 | { |
2610 | struct bcm_enet_priv *priv; |
2611 | int was_running; |
2612 | |
2613 | priv = netdev_priv(dev); |
2614 | |
2615 | was_running = 0; |
2616 | if (netif_running(dev)) { |
2617 | bcm_enetsw_stop(dev); |
2618 | was_running = 1; |
2619 | } |
2620 | |
2621 | priv->rx_ring_size = ering->rx_pending; |
2622 | priv->tx_ring_size = ering->tx_pending; |
2623 | |
2624 | if (was_running) { |
2625 | int err; |
2626 | |
2627 | err = bcm_enetsw_open(dev); |
2628 | if (err) |
2629 | dev_close(dev); |
2630 | } |
2631 | return 0; |
2632 | } |
2633 | |
2634 | static const struct ethtool_ops bcm_enetsw_ethtool_ops = { |
2635 | .get_strings = bcm_enetsw_get_strings, |
2636 | .get_sset_count = bcm_enetsw_get_sset_count, |
2637 | .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, |
2638 | .get_drvinfo = bcm_enetsw_get_drvinfo, |
2639 | .get_ringparam = bcm_enetsw_get_ringparam, |
2640 | .set_ringparam = bcm_enetsw_set_ringparam, |
2641 | }; |
2642 | |
2643 | /* allocate netdevice, request register memory and register device. */ |
2644 | static int bcm_enetsw_probe(struct platform_device *pdev) |
2645 | { |
2646 | struct bcm_enet_priv *priv; |
2647 | struct net_device *dev; |
2648 | struct bcm63xx_enetsw_platform_data *pd; |
2649 | struct resource *res_mem; |
2650 | int ret, irq_rx, irq_tx; |
2651 | |
2652 | if (!bcm_enet_shared_base[0]) |
2653 | return -EPROBE_DEFER; |
2654 | |
2655 | res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2656 | irq_rx = platform_get_irq(pdev, 0); |
2657 | irq_tx = platform_get_irq(pdev, 1); |
2658 | if (!res_mem || irq_rx < 0) |
2659 | return -ENODEV; |
2660 | |
2661 | dev = alloc_etherdev(sizeof(*priv)); |
2662 | if (!dev) |
2663 | return -ENOMEM; |
2664 | priv = netdev_priv(dev); |
2665 | |
2666 | /* initialize default and fetch platform data */ |
2667 | priv->enet_is_sw = true; |
2668 | priv->irq_rx = irq_rx; |
2669 | priv->irq_tx = irq_tx; |
2670 | priv->rx_ring_size = BCMENET_DEF_RX_DESC; |
2671 | priv->tx_ring_size = BCMENET_DEF_TX_DESC; |
2672 | priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; |
2673 | priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN; |
2674 | |
2675 | pd = dev_get_platdata(dev: &pdev->dev); |
2676 | if (pd) { |
2677 | eth_hw_addr_set(dev, addr: pd->mac_addr); |
2678 | memcpy(priv->used_ports, pd->used_ports, |
2679 | sizeof(pd->used_ports)); |
2680 | priv->num_ports = pd->num_ports; |
2681 | priv->dma_has_sram = pd->dma_has_sram; |
2682 | priv->dma_chan_en_mask = pd->dma_chan_en_mask; |
2683 | priv->dma_chan_int_mask = pd->dma_chan_int_mask; |
2684 | priv->dma_chan_width = pd->dma_chan_width; |
2685 | } |
2686 | |
2687 | ret = bcm_enet_change_mtu(dev, new_mtu: dev->mtu); |
2688 | if (ret) |
2689 | goto out; |
2690 | |
2691 | priv->base = devm_ioremap_resource(dev: &pdev->dev, res: res_mem); |
2692 | if (IS_ERR(ptr: priv->base)) { |
2693 | ret = PTR_ERR(ptr: priv->base); |
2694 | goto out; |
2695 | } |
2696 | |
2697 | priv->mac_clk = devm_clk_get(dev: &pdev->dev, id: "enetsw" ); |
2698 | if (IS_ERR(ptr: priv->mac_clk)) { |
2699 | ret = PTR_ERR(ptr: priv->mac_clk); |
2700 | goto out; |
2701 | } |
2702 | ret = clk_prepare_enable(clk: priv->mac_clk); |
2703 | if (ret) |
2704 | goto out; |
2705 | |
2706 | priv->rx_chan = 0; |
2707 | priv->tx_chan = 1; |
2708 | spin_lock_init(&priv->rx_lock); |
2709 | |
2710 | /* init rx timeout (used for oom) */ |
2711 | timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); |
2712 | |
2713 | /* register netdevice */ |
2714 | dev->netdev_ops = &bcm_enetsw_ops; |
2715 | netif_napi_add_weight(dev, napi: &priv->napi, poll: bcm_enet_poll, weight: 16); |
2716 | dev->ethtool_ops = &bcm_enetsw_ethtool_ops; |
2717 | SET_NETDEV_DEV(dev, &pdev->dev); |
2718 | |
2719 | spin_lock_init(&priv->enetsw_mdio_lock); |
2720 | |
2721 | ret = register_netdev(dev); |
2722 | if (ret) |
2723 | goto out_disable_clk; |
2724 | |
2725 | netif_carrier_off(dev); |
2726 | platform_set_drvdata(pdev, data: dev); |
2727 | priv->pdev = pdev; |
2728 | priv->net_dev = dev; |
2729 | |
2730 | return 0; |
2731 | |
2732 | out_disable_clk: |
2733 | clk_disable_unprepare(clk: priv->mac_clk); |
2734 | out: |
2735 | free_netdev(dev); |
2736 | return ret; |
2737 | } |
2738 | |
2739 | |
2740 | /* exit func, stops hardware and unregisters netdevice */ |
2741 | static void bcm_enetsw_remove(struct platform_device *pdev) |
2742 | { |
2743 | struct bcm_enet_priv *priv; |
2744 | struct net_device *dev; |
2745 | |
2746 | /* stop netdevice */ |
2747 | dev = platform_get_drvdata(pdev); |
2748 | priv = netdev_priv(dev); |
2749 | unregister_netdev(dev); |
2750 | |
2751 | clk_disable_unprepare(clk: priv->mac_clk); |
2752 | |
2753 | free_netdev(dev); |
2754 | } |
2755 | |
2756 | static struct platform_driver bcm63xx_enetsw_driver = { |
2757 | .probe = bcm_enetsw_probe, |
2758 | .remove_new = bcm_enetsw_remove, |
2759 | .driver = { |
2760 | .name = "bcm63xx_enetsw" , |
2761 | }, |
2762 | }; |
2763 | |
2764 | /* reserve & remap memory space shared between all macs */ |
2765 | static int bcm_enet_shared_probe(struct platform_device *pdev) |
2766 | { |
2767 | void __iomem *p[3]; |
2768 | unsigned int i; |
2769 | |
2770 | memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); |
2771 | |
2772 | for (i = 0; i < 3; i++) { |
2773 | p[i] = devm_platform_ioremap_resource(pdev, index: i); |
2774 | if (IS_ERR(ptr: p[i])) |
2775 | return PTR_ERR(ptr: p[i]); |
2776 | } |
2777 | |
2778 | memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); |
2779 | |
2780 | return 0; |
2781 | } |
2782 | |
2783 | /* this "shared" driver is needed because both macs share a single |
2784 | * address space |
2785 | */ |
2786 | struct platform_driver bcm63xx_enet_shared_driver = { |
2787 | .probe = bcm_enet_shared_probe, |
2788 | .driver = { |
2789 | .name = "bcm63xx_enet_shared" , |
2790 | }, |
2791 | }; |
2792 | |
2793 | static struct platform_driver * const drivers[] = { |
2794 | &bcm63xx_enet_shared_driver, |
2795 | &bcm63xx_enet_driver, |
2796 | &bcm63xx_enetsw_driver, |
2797 | }; |
2798 | |
2799 | /* entry point */ |
2800 | static int __init bcm_enet_init(void) |
2801 | { |
2802 | return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
2803 | } |
2804 | |
2805 | static void __exit bcm_enet_exit(void) |
2806 | { |
2807 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
2808 | } |
2809 | |
2810 | |
2811 | module_init(bcm_enet_init); |
2812 | module_exit(bcm_enet_exit); |
2813 | |
2814 | MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver" ); |
2815 | MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>" ); |
2816 | MODULE_LICENSE("GPL" ); |
2817 | |