1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* drivers/net/ethernet/freescale/gianfar.c |
3 | * |
4 | * Gianfar Ethernet Driver |
5 | * This driver is designed for the non-CPM ethernet controllers |
6 | * on the 85xx and 83xx family of integrated processors |
7 | * Based on 8260_io/fcc_enet.c |
8 | * |
9 | * Author: Andy Fleming |
10 | * Maintainer: Kumar Gala |
11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
12 | * |
13 | * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
14 | * Copyright 2007 MontaVista Software, Inc. |
15 | * |
16 | * Gianfar: AKA Lambda Draconis, "Dragon" |
17 | * RA 11 31 24.2 |
18 | * Dec +69 19 52 |
19 | * V 3.84 |
20 | * B-V +1.62 |
21 | * |
22 | * Theory of operation |
23 | * |
24 | * The driver is initialized through of_device. Configuration information |
25 | * is therefore conveyed through an OF-style device tree. |
26 | * |
27 | * The Gianfar Ethernet Controller uses a ring of buffer |
28 | * descriptors. The beginning is indicated by a register |
29 | * pointing to the physical address of the start of the ring. |
30 | * The end is determined by a "wrap" bit being set in the |
31 | * last descriptor of the ring. |
32 | * |
33 | * When a packet is received, the RXF bit in the |
34 | * IEVENT register is set, triggering an interrupt when the |
35 | * corresponding bit in the IMASK register is also set (if |
36 | * interrupt coalescing is active, then the interrupt may not |
37 | * happen immediately, but will wait until either a set number |
38 | * of frames or amount of time have passed). In NAPI, the |
39 | * interrupt handler will signal there is work to be done, and |
40 | * exit. This method will start at the last known empty |
41 | * descriptor, and process every subsequent descriptor until there |
42 | * are none left with data (NAPI will stop after a set number of |
43 | * packets to give time to other tasks, but will eventually |
44 | * process all the packets). The data arrives inside a |
45 | * pre-allocated skb, and so after the skb is passed up to the |
46 | * stack, a new skb must be allocated, and the address field in |
47 | * the buffer descriptor must be updated to indicate this new |
48 | * skb. |
49 | * |
50 | * When the kernel requests that a packet be transmitted, the |
51 | * driver starts where it left off last time, and points the |
52 | * descriptor at the buffer which was passed in. The driver |
53 | * then informs the DMA engine that there are packets ready to |
54 | * be transmitted. Once the controller is finished transmitting |
55 | * the packet, an interrupt may be triggered (under the same |
56 | * conditions as for reception, but depending on the TXF bit). |
57 | * The driver then cleans up the buffer. |
58 | */ |
59 | |
60 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
61 | |
62 | #include <linux/kernel.h> |
63 | #include <linux/platform_device.h> |
64 | #include <linux/string.h> |
65 | #include <linux/errno.h> |
66 | #include <linux/unistd.h> |
67 | #include <linux/slab.h> |
68 | #include <linux/interrupt.h> |
69 | #include <linux/delay.h> |
70 | #include <linux/netdevice.h> |
71 | #include <linux/etherdevice.h> |
72 | #include <linux/skbuff.h> |
73 | #include <linux/if_vlan.h> |
74 | #include <linux/spinlock.h> |
75 | #include <linux/mm.h> |
76 | #include <linux/of_address.h> |
77 | #include <linux/of_irq.h> |
78 | #include <linux/of_mdio.h> |
79 | #include <linux/ip.h> |
80 | #include <linux/tcp.h> |
81 | #include <linux/udp.h> |
82 | #include <linux/in.h> |
83 | #include <linux/net_tstamp.h> |
84 | |
85 | #include <asm/io.h> |
86 | #ifdef CONFIG_PPC |
87 | #include <asm/reg.h> |
88 | #include <asm/mpc85xx.h> |
89 | #endif |
90 | #include <asm/irq.h> |
91 | #include <linux/uaccess.h> |
92 | #include <linux/module.h> |
93 | #include <linux/dma-mapping.h> |
94 | #include <linux/crc32.h> |
95 | #include <linux/mii.h> |
96 | #include <linux/phy.h> |
97 | #include <linux/phy_fixed.h> |
98 | #include <linux/of.h> |
99 | #include <linux/of_net.h> |
100 | |
101 | #include "gianfar.h" |
102 | |
103 | #define TX_TIMEOUT (5*HZ) |
104 | |
105 | MODULE_AUTHOR("Freescale Semiconductor, Inc" ); |
106 | MODULE_DESCRIPTION("Gianfar Ethernet Driver" ); |
107 | MODULE_LICENSE("GPL" ); |
108 | |
109 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
110 | dma_addr_t buf) |
111 | { |
112 | u32 lstatus; |
113 | |
114 | bdp->bufPtr = cpu_to_be32(buf); |
115 | |
116 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); |
117 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
118 | lstatus |= BD_LFLAG(RXBD_WRAP); |
119 | |
120 | gfar_wmb(); |
121 | |
122 | bdp->lstatus = cpu_to_be32(lstatus); |
123 | } |
124 | |
125 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
126 | { |
127 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
128 | u32 __iomem *baddr; |
129 | int i; |
130 | |
131 | baddr = ®s->tbase0; |
132 | for (i = 0; i < priv->num_tx_queues; i++) { |
133 | gfar_write(addr: baddr, val: priv->tx_queue[i]->tx_bd_dma_base); |
134 | baddr += 2; |
135 | } |
136 | |
137 | baddr = ®s->rbase0; |
138 | for (i = 0; i < priv->num_rx_queues; i++) { |
139 | gfar_write(addr: baddr, val: priv->rx_queue[i]->rx_bd_dma_base); |
140 | baddr += 2; |
141 | } |
142 | } |
143 | |
144 | static void gfar_init_rqprm(struct gfar_private *priv) |
145 | { |
146 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
147 | u32 __iomem *baddr; |
148 | int i; |
149 | |
150 | baddr = ®s->rqprm0; |
151 | for (i = 0; i < priv->num_rx_queues; i++) { |
152 | gfar_write(addr: baddr, val: priv->rx_queue[i]->rx_ring_size | |
153 | (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); |
154 | baddr++; |
155 | } |
156 | } |
157 | |
158 | static void gfar_rx_offload_en(struct gfar_private *priv) |
159 | { |
160 | /* set this when rx hw offload (TOE) functions are being used */ |
161 | priv->uses_rxfcb = 0; |
162 | |
163 | if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) |
164 | priv->uses_rxfcb = 1; |
165 | |
166 | if (priv->hwts_rx_en || priv->rx_filer_enable) |
167 | priv->uses_rxfcb = 1; |
168 | } |
169 | |
170 | static void gfar_mac_rx_config(struct gfar_private *priv) |
171 | { |
172 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
173 | u32 rctrl = 0; |
174 | |
175 | if (priv->rx_filer_enable) { |
176 | rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; |
177 | /* Program the RIR0 reg with the required distribution */ |
178 | gfar_write(addr: ®s->rir0, DEFAULT_2RXQ_RIR0); |
179 | } |
180 | |
181 | /* Restore PROMISC mode */ |
182 | if (priv->ndev->flags & IFF_PROMISC) |
183 | rctrl |= RCTRL_PROM; |
184 | |
185 | if (priv->ndev->features & NETIF_F_RXCSUM) |
186 | rctrl |= RCTRL_CHECKSUMMING; |
187 | |
188 | if (priv->extended_hash) |
189 | rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; |
190 | |
191 | if (priv->padding) { |
192 | rctrl &= ~RCTRL_PAL_MASK; |
193 | rctrl |= RCTRL_PADDING(priv->padding); |
194 | } |
195 | |
196 | /* Enable HW time stamping if requested from user space */ |
197 | if (priv->hwts_rx_en) |
198 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; |
199 | |
200 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
201 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
202 | |
203 | /* Clear the LFC bit */ |
204 | gfar_write(addr: ®s->rctrl, val: rctrl); |
205 | /* Init flow control threshold values */ |
206 | gfar_init_rqprm(priv); |
207 | gfar_write(addr: ®s->ptv, DEFAULT_LFC_PTVVAL); |
208 | rctrl |= RCTRL_LFC; |
209 | |
210 | /* Init rctrl based on our settings */ |
211 | gfar_write(addr: ®s->rctrl, val: rctrl); |
212 | } |
213 | |
214 | static void gfar_mac_tx_config(struct gfar_private *priv) |
215 | { |
216 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
217 | u32 tctrl = 0; |
218 | |
219 | if (priv->ndev->features & NETIF_F_IP_CSUM) |
220 | tctrl |= TCTRL_INIT_CSUM; |
221 | |
222 | if (priv->prio_sched_en) |
223 | tctrl |= TCTRL_TXSCHED_PRIO; |
224 | else { |
225 | tctrl |= TCTRL_TXSCHED_WRRS; |
226 | gfar_write(addr: ®s->tr03wt, DEFAULT_WRRS_WEIGHT); |
227 | gfar_write(addr: ®s->tr47wt, DEFAULT_WRRS_WEIGHT); |
228 | } |
229 | |
230 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) |
231 | tctrl |= TCTRL_VLINS; |
232 | |
233 | gfar_write(addr: ®s->tctrl, val: tctrl); |
234 | } |
235 | |
236 | static void gfar_configure_coalescing(struct gfar_private *priv, |
237 | unsigned long tx_mask, unsigned long rx_mask) |
238 | { |
239 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
240 | u32 __iomem *baddr; |
241 | |
242 | if (priv->mode == MQ_MG_MODE) { |
243 | int i = 0; |
244 | |
245 | baddr = ®s->txic0; |
246 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { |
247 | gfar_write(addr: baddr + i, val: 0); |
248 | if (likely(priv->tx_queue[i]->txcoalescing)) |
249 | gfar_write(addr: baddr + i, val: priv->tx_queue[i]->txic); |
250 | } |
251 | |
252 | baddr = ®s->rxic0; |
253 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { |
254 | gfar_write(addr: baddr + i, val: 0); |
255 | if (likely(priv->rx_queue[i]->rxcoalescing)) |
256 | gfar_write(addr: baddr + i, val: priv->rx_queue[i]->rxic); |
257 | } |
258 | } else { |
259 | /* Backward compatible case -- even if we enable |
260 | * multiple queues, there's only single reg to program |
261 | */ |
262 | gfar_write(addr: ®s->txic, val: 0); |
263 | if (likely(priv->tx_queue[0]->txcoalescing)) |
264 | gfar_write(addr: ®s->txic, val: priv->tx_queue[0]->txic); |
265 | |
266 | gfar_write(addr: ®s->rxic, val: 0); |
267 | if (unlikely(priv->rx_queue[0]->rxcoalescing)) |
268 | gfar_write(addr: ®s->rxic, val: priv->rx_queue[0]->rxic); |
269 | } |
270 | } |
271 | |
272 | static void gfar_configure_coalescing_all(struct gfar_private *priv) |
273 | { |
274 | gfar_configure_coalescing(priv, tx_mask: 0xFF, rx_mask: 0xFF); |
275 | } |
276 | |
277 | static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
278 | { |
279 | struct gfar_private *priv = netdev_priv(dev); |
280 | int i; |
281 | |
282 | for (i = 0; i < priv->num_rx_queues; i++) { |
283 | stats->rx_packets += priv->rx_queue[i]->stats.rx_packets; |
284 | stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes; |
285 | stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped; |
286 | } |
287 | |
288 | for (i = 0; i < priv->num_tx_queues; i++) { |
289 | stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
290 | stats->tx_packets += priv->tx_queue[i]->stats.tx_packets; |
291 | } |
292 | |
293 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
294 | struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; |
295 | unsigned long flags; |
296 | u32 rdrp, car, car_before; |
297 | u64 rdrp_offset; |
298 | |
299 | spin_lock_irqsave(&priv->rmon_overflow.lock, flags); |
300 | car = gfar_read(addr: &rmon->car1) & CAR1_C1RDR; |
301 | do { |
302 | car_before = car; |
303 | rdrp = gfar_read(addr: &rmon->rdrp); |
304 | car = gfar_read(addr: &rmon->car1) & CAR1_C1RDR; |
305 | } while (car != car_before); |
306 | if (car) { |
307 | priv->rmon_overflow.rdrp++; |
308 | gfar_write(addr: &rmon->car1, val: car); |
309 | } |
310 | rdrp_offset = priv->rmon_overflow.rdrp; |
311 | spin_unlock_irqrestore(lock: &priv->rmon_overflow.lock, flags); |
312 | |
313 | stats->rx_missed_errors = rdrp + (rdrp_offset << 16); |
314 | } |
315 | } |
316 | |
317 | /* Set the appropriate hash bit for the given addr */ |
318 | /* The algorithm works like so: |
319 | * 1) Take the Destination Address (ie the multicast address), and |
320 | * do a CRC on it (little endian), and reverse the bits of the |
321 | * result. |
322 | * 2) Use the 8 most significant bits as a hash into a 256-entry |
323 | * table. The table is controlled through 8 32-bit registers: |
324 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is |
325 | * gaddr7. This means that the 3 most significant bits in the |
326 | * hash index which gaddr register to use, and the 5 other bits |
327 | * indicate which bit (assuming an IBM numbering scheme, which |
328 | * for PowerPC (tm) is usually the case) in the register holds |
329 | * the entry. |
330 | */ |
331 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
332 | { |
333 | u32 tempval; |
334 | struct gfar_private *priv = netdev_priv(dev); |
335 | u32 result = ether_crc(ETH_ALEN, addr); |
336 | int width = priv->hash_width; |
337 | u8 whichbit = (result >> (32 - width)) & 0x1f; |
338 | u8 whichreg = result >> (32 - width + 5); |
339 | u32 value = (1 << (31-whichbit)); |
340 | |
341 | tempval = gfar_read(addr: priv->hash_regs[whichreg]); |
342 | tempval |= value; |
343 | gfar_write(addr: priv->hash_regs[whichreg], val: tempval); |
344 | } |
345 | |
346 | /* There are multiple MAC Address register pairs on some controllers |
347 | * This function sets the numth pair to a given address |
348 | */ |
349 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
350 | const u8 *addr) |
351 | { |
352 | struct gfar_private *priv = netdev_priv(dev); |
353 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
354 | u32 tempval; |
355 | u32 __iomem *macptr = ®s->macstnaddr1; |
356 | |
357 | macptr += num*2; |
358 | |
359 | /* For a station address of 0x12345678ABCD in transmission |
360 | * order (BE), MACnADDR1 is set to 0xCDAB7856 and |
361 | * MACnADDR2 is set to 0x34120000. |
362 | */ |
363 | tempval = (addr[5] << 24) | (addr[4] << 16) | |
364 | (addr[3] << 8) | addr[2]; |
365 | |
366 | gfar_write(addr: macptr, val: tempval); |
367 | |
368 | tempval = (addr[1] << 24) | (addr[0] << 16); |
369 | |
370 | gfar_write(addr: macptr+1, val: tempval); |
371 | } |
372 | |
373 | static int gfar_set_mac_addr(struct net_device *dev, void *p) |
374 | { |
375 | int ret; |
376 | |
377 | ret = eth_mac_addr(dev, p); |
378 | if (ret) |
379 | return ret; |
380 | |
381 | gfar_set_mac_for_addr(dev, num: 0, addr: dev->dev_addr); |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | static void gfar_ints_disable(struct gfar_private *priv) |
387 | { |
388 | int i; |
389 | for (i = 0; i < priv->num_grps; i++) { |
390 | struct gfar __iomem *regs = priv->gfargrp[i].regs; |
391 | /* Clear IEVENT */ |
392 | gfar_write(addr: ®s->ievent, IEVENT_INIT_CLEAR); |
393 | |
394 | /* Initialize IMASK */ |
395 | gfar_write(addr: ®s->imask, IMASK_INIT_CLEAR); |
396 | } |
397 | } |
398 | |
399 | static void gfar_ints_enable(struct gfar_private *priv) |
400 | { |
401 | int i; |
402 | for (i = 0; i < priv->num_grps; i++) { |
403 | struct gfar __iomem *regs = priv->gfargrp[i].regs; |
404 | /* Unmask the interrupts we look for */ |
405 | gfar_write(addr: ®s->imask, |
406 | IMASK_DEFAULT | priv->rmon_overflow.imask); |
407 | } |
408 | } |
409 | |
410 | static int gfar_alloc_tx_queues(struct gfar_private *priv) |
411 | { |
412 | int i; |
413 | |
414 | for (i = 0; i < priv->num_tx_queues; i++) { |
415 | priv->tx_queue[i] = kzalloc(size: sizeof(struct gfar_priv_tx_q), |
416 | GFP_KERNEL); |
417 | if (!priv->tx_queue[i]) |
418 | return -ENOMEM; |
419 | |
420 | priv->tx_queue[i]->tx_skbuff = NULL; |
421 | priv->tx_queue[i]->qindex = i; |
422 | priv->tx_queue[i]->dev = priv->ndev; |
423 | spin_lock_init(&(priv->tx_queue[i]->txlock)); |
424 | } |
425 | return 0; |
426 | } |
427 | |
428 | static int gfar_alloc_rx_queues(struct gfar_private *priv) |
429 | { |
430 | int i; |
431 | |
432 | for (i = 0; i < priv->num_rx_queues; i++) { |
433 | priv->rx_queue[i] = kzalloc(size: sizeof(struct gfar_priv_rx_q), |
434 | GFP_KERNEL); |
435 | if (!priv->rx_queue[i]) |
436 | return -ENOMEM; |
437 | |
438 | priv->rx_queue[i]->qindex = i; |
439 | priv->rx_queue[i]->ndev = priv->ndev; |
440 | } |
441 | return 0; |
442 | } |
443 | |
444 | static void gfar_free_tx_queues(struct gfar_private *priv) |
445 | { |
446 | int i; |
447 | |
448 | for (i = 0; i < priv->num_tx_queues; i++) |
449 | kfree(objp: priv->tx_queue[i]); |
450 | } |
451 | |
452 | static void gfar_free_rx_queues(struct gfar_private *priv) |
453 | { |
454 | int i; |
455 | |
456 | for (i = 0; i < priv->num_rx_queues; i++) |
457 | kfree(objp: priv->rx_queue[i]); |
458 | } |
459 | |
460 | static void unmap_group_regs(struct gfar_private *priv) |
461 | { |
462 | int i; |
463 | |
464 | for (i = 0; i < MAXGROUPS; i++) |
465 | if (priv->gfargrp[i].regs) |
466 | iounmap(addr: priv->gfargrp[i].regs); |
467 | } |
468 | |
469 | static void free_gfar_dev(struct gfar_private *priv) |
470 | { |
471 | int i, j; |
472 | |
473 | for (i = 0; i < priv->num_grps; i++) |
474 | for (j = 0; j < GFAR_NUM_IRQS; j++) { |
475 | kfree(objp: priv->gfargrp[i].irqinfo[j]); |
476 | priv->gfargrp[i].irqinfo[j] = NULL; |
477 | } |
478 | |
479 | free_netdev(dev: priv->ndev); |
480 | } |
481 | |
482 | static void disable_napi(struct gfar_private *priv) |
483 | { |
484 | int i; |
485 | |
486 | for (i = 0; i < priv->num_grps; i++) { |
487 | napi_disable(n: &priv->gfargrp[i].napi_rx); |
488 | napi_disable(n: &priv->gfargrp[i].napi_tx); |
489 | } |
490 | } |
491 | |
492 | static void enable_napi(struct gfar_private *priv) |
493 | { |
494 | int i; |
495 | |
496 | for (i = 0; i < priv->num_grps; i++) { |
497 | napi_enable(n: &priv->gfargrp[i].napi_rx); |
498 | napi_enable(n: &priv->gfargrp[i].napi_tx); |
499 | } |
500 | } |
501 | |
502 | static int gfar_parse_group(struct device_node *np, |
503 | struct gfar_private *priv, const char *model) |
504 | { |
505 | struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; |
506 | int i; |
507 | |
508 | for (i = 0; i < GFAR_NUM_IRQS; i++) { |
509 | grp->irqinfo[i] = kzalloc(size: sizeof(struct gfar_irqinfo), |
510 | GFP_KERNEL); |
511 | if (!grp->irqinfo[i]) |
512 | return -ENOMEM; |
513 | } |
514 | |
515 | grp->regs = of_iomap(node: np, index: 0); |
516 | if (!grp->regs) |
517 | return -ENOMEM; |
518 | |
519 | gfar_irq(grp, TX)->irq = irq_of_parse_and_map(node: np, index: 0); |
520 | |
521 | /* If we aren't the FEC we have multiple interrupts */ |
522 | if (model && strcasecmp(s1: model, s2: "FEC" )) { |
523 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(node: np, index: 1); |
524 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(node: np, index: 2); |
525 | if (!gfar_irq(grp, TX)->irq || |
526 | !gfar_irq(grp, RX)->irq || |
527 | !gfar_irq(grp, ER)->irq) |
528 | return -EINVAL; |
529 | } |
530 | |
531 | grp->priv = priv; |
532 | spin_lock_init(&grp->grplock); |
533 | if (priv->mode == MQ_MG_MODE) { |
534 | /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ |
535 | grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); |
536 | grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); |
537 | } else { |
538 | grp->rx_bit_map = 0xFF; |
539 | grp->tx_bit_map = 0xFF; |
540 | } |
541 | |
542 | /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses |
543 | * right to left, so we need to revert the 8 bits to get the q index |
544 | */ |
545 | grp->rx_bit_map = bitrev8(grp->rx_bit_map); |
546 | grp->tx_bit_map = bitrev8(grp->tx_bit_map); |
547 | |
548 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, |
549 | * also assign queues to groups |
550 | */ |
551 | for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { |
552 | if (!grp->rx_queue) |
553 | grp->rx_queue = priv->rx_queue[i]; |
554 | grp->num_rx_queues++; |
555 | grp->rstat |= (RSTAT_CLEAR_RHALT >> i); |
556 | priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); |
557 | priv->rx_queue[i]->grp = grp; |
558 | } |
559 | |
560 | for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { |
561 | if (!grp->tx_queue) |
562 | grp->tx_queue = priv->tx_queue[i]; |
563 | grp->num_tx_queues++; |
564 | grp->tstat |= (TSTAT_CLEAR_THALT >> i); |
565 | priv->tqueue |= (TQUEUE_EN0 >> i); |
566 | priv->tx_queue[i]->grp = grp; |
567 | } |
568 | |
569 | priv->num_grps++; |
570 | |
571 | return 0; |
572 | } |
573 | |
574 | static int gfar_of_group_count(struct device_node *np) |
575 | { |
576 | struct device_node *child; |
577 | int num = 0; |
578 | |
579 | for_each_available_child_of_node(np, child) |
580 | if (of_node_name_eq(np: child, name: "queue-group" )) |
581 | num++; |
582 | |
583 | return num; |
584 | } |
585 | |
586 | /* Reads the controller's registers to determine what interface |
587 | * connects it to the PHY. |
588 | */ |
589 | static phy_interface_t gfar_get_interface(struct net_device *dev) |
590 | { |
591 | struct gfar_private *priv = netdev_priv(dev); |
592 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
593 | u32 ecntrl; |
594 | |
595 | ecntrl = gfar_read(addr: ®s->ecntrl); |
596 | |
597 | if (ecntrl & ECNTRL_SGMII_MODE) |
598 | return PHY_INTERFACE_MODE_SGMII; |
599 | |
600 | if (ecntrl & ECNTRL_TBI_MODE) { |
601 | if (ecntrl & ECNTRL_REDUCED_MODE) |
602 | return PHY_INTERFACE_MODE_RTBI; |
603 | else |
604 | return PHY_INTERFACE_MODE_TBI; |
605 | } |
606 | |
607 | if (ecntrl & ECNTRL_REDUCED_MODE) { |
608 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
609 | return PHY_INTERFACE_MODE_RMII; |
610 | } |
611 | else { |
612 | phy_interface_t interface = priv->interface; |
613 | |
614 | /* This isn't autodetected right now, so it must |
615 | * be set by the device tree or platform code. |
616 | */ |
617 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) |
618 | return PHY_INTERFACE_MODE_RGMII_ID; |
619 | |
620 | return PHY_INTERFACE_MODE_RGMII; |
621 | } |
622 | } |
623 | |
624 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
625 | return PHY_INTERFACE_MODE_GMII; |
626 | |
627 | return PHY_INTERFACE_MODE_MII; |
628 | } |
629 | |
630 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
631 | { |
632 | const char *model; |
633 | int err = 0, i; |
634 | phy_interface_t interface; |
635 | struct net_device *dev = NULL; |
636 | struct gfar_private *priv = NULL; |
637 | struct device_node *np = ofdev->dev.of_node; |
638 | struct device_node *child = NULL; |
639 | u32 stash_len = 0; |
640 | u32 stash_idx = 0; |
641 | unsigned int num_tx_qs, num_rx_qs; |
642 | unsigned short mode; |
643 | |
644 | if (!np) |
645 | return -ENODEV; |
646 | |
647 | if (of_device_is_compatible(device: np, "fsl,etsec2" )) |
648 | mode = MQ_MG_MODE; |
649 | else |
650 | mode = SQ_SG_MODE; |
651 | |
652 | if (mode == SQ_SG_MODE) { |
653 | num_tx_qs = 1; |
654 | num_rx_qs = 1; |
655 | } else { /* MQ_MG_MODE */ |
656 | /* get the actual number of supported groups */ |
657 | unsigned int num_grps = gfar_of_group_count(np); |
658 | |
659 | if (num_grps == 0 || num_grps > MAXGROUPS) { |
660 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n" , |
661 | num_grps); |
662 | pr_err("Cannot do alloc_etherdev, aborting\n" ); |
663 | return -EINVAL; |
664 | } |
665 | |
666 | num_tx_qs = num_grps; /* one txq per int group */ |
667 | num_rx_qs = num_grps; /* one rxq per int group */ |
668 | } |
669 | |
670 | if (num_tx_qs > MAX_TX_QS) { |
671 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n" , |
672 | num_tx_qs, MAX_TX_QS); |
673 | pr_err("Cannot do alloc_etherdev, aborting\n" ); |
674 | return -EINVAL; |
675 | } |
676 | |
677 | if (num_rx_qs > MAX_RX_QS) { |
678 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n" , |
679 | num_rx_qs, MAX_RX_QS); |
680 | pr_err("Cannot do alloc_etherdev, aborting\n" ); |
681 | return -EINVAL; |
682 | } |
683 | |
684 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); |
685 | dev = *pdev; |
686 | if (NULL == dev) |
687 | return -ENOMEM; |
688 | |
689 | priv = netdev_priv(dev); |
690 | priv->ndev = dev; |
691 | |
692 | priv->mode = mode; |
693 | |
694 | priv->num_tx_queues = num_tx_qs; |
695 | netif_set_real_num_rx_queues(dev, rxq: num_rx_qs); |
696 | priv->num_rx_queues = num_rx_qs; |
697 | |
698 | err = gfar_alloc_tx_queues(priv); |
699 | if (err) |
700 | goto tx_alloc_failed; |
701 | |
702 | err = gfar_alloc_rx_queues(priv); |
703 | if (err) |
704 | goto rx_alloc_failed; |
705 | |
706 | err = of_property_read_string(np, propname: "model" , out_string: &model); |
707 | if (err) { |
708 | pr_err("Device model property missing, aborting\n" ); |
709 | goto rx_alloc_failed; |
710 | } |
711 | |
712 | /* Init Rx queue filer rule set linked list */ |
713 | INIT_LIST_HEAD(list: &priv->rx_list.list); |
714 | priv->rx_list.count = 0; |
715 | mutex_init(&priv->rx_queue_access); |
716 | |
717 | for (i = 0; i < MAXGROUPS; i++) |
718 | priv->gfargrp[i].regs = NULL; |
719 | |
720 | /* Parse and initialize group specific information */ |
721 | if (priv->mode == MQ_MG_MODE) { |
722 | for_each_available_child_of_node(np, child) { |
723 | if (!of_node_name_eq(np: child, name: "queue-group" )) |
724 | continue; |
725 | |
726 | err = gfar_parse_group(np: child, priv, model); |
727 | if (err) { |
728 | of_node_put(node: child); |
729 | goto err_grp_init; |
730 | } |
731 | } |
732 | } else { /* SQ_SG_MODE */ |
733 | err = gfar_parse_group(np, priv, model); |
734 | if (err) |
735 | goto err_grp_init; |
736 | } |
737 | |
738 | if (of_property_read_bool(np, propname: "bd-stash" )) { |
739 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
740 | priv->bd_stash_en = 1; |
741 | } |
742 | |
743 | err = of_property_read_u32(np, propname: "rx-stash-len" , out_value: &stash_len); |
744 | |
745 | if (err == 0) |
746 | priv->rx_stash_size = stash_len; |
747 | |
748 | err = of_property_read_u32(np, propname: "rx-stash-idx" , out_value: &stash_idx); |
749 | |
750 | if (err == 0) |
751 | priv->rx_stash_index = stash_idx; |
752 | |
753 | if (stash_len || stash_idx) |
754 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; |
755 | |
756 | err = of_get_ethdev_address(np, dev); |
757 | if (err) { |
758 | eth_hw_addr_random(dev); |
759 | dev_info(&ofdev->dev, "Using random MAC address: %pM\n" , dev->dev_addr); |
760 | } |
761 | |
762 | if (model && !strcasecmp(s1: model, s2: "TSEC" )) |
763 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
764 | FSL_GIANFAR_DEV_HAS_COALESCE | |
765 | FSL_GIANFAR_DEV_HAS_RMON | |
766 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; |
767 | |
768 | if (model && !strcasecmp(s1: model, s2: "eTSEC" )) |
769 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
770 | FSL_GIANFAR_DEV_HAS_COALESCE | |
771 | FSL_GIANFAR_DEV_HAS_RMON | |
772 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | |
773 | FSL_GIANFAR_DEV_HAS_CSUM | |
774 | FSL_GIANFAR_DEV_HAS_VLAN | |
775 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | |
776 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
777 | FSL_GIANFAR_DEV_HAS_TIMER | |
778 | FSL_GIANFAR_DEV_HAS_RX_FILER; |
779 | |
780 | /* Use PHY connection type from the DT node if one is specified there. |
781 | * rgmii-id really needs to be specified. Other types can be |
782 | * detected by hardware |
783 | */ |
784 | err = of_get_phy_mode(np, interface: &interface); |
785 | if (!err) |
786 | priv->interface = interface; |
787 | else |
788 | priv->interface = gfar_get_interface(dev); |
789 | |
790 | if (of_property_read_bool(np, propname: "fsl,magic-packet" )) |
791 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; |
792 | |
793 | if (of_property_read_bool(np, propname: "fsl,wake-on-filer" )) |
794 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; |
795 | |
796 | priv->phy_node = of_parse_phandle(np, phandle_name: "phy-handle" , index: 0); |
797 | |
798 | /* In the case of a fixed PHY, the DT node associated |
799 | * to the PHY is the Ethernet MAC DT node. |
800 | */ |
801 | if (!priv->phy_node && of_phy_is_fixed_link(np)) { |
802 | err = of_phy_register_fixed_link(np); |
803 | if (err) |
804 | goto err_grp_init; |
805 | |
806 | priv->phy_node = of_node_get(node: np); |
807 | } |
808 | |
809 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ |
810 | priv->tbi_node = of_parse_phandle(np, phandle_name: "tbi-handle" , index: 0); |
811 | |
812 | return 0; |
813 | |
814 | err_grp_init: |
815 | unmap_group_regs(priv); |
816 | rx_alloc_failed: |
817 | gfar_free_rx_queues(priv); |
818 | tx_alloc_failed: |
819 | gfar_free_tx_queues(priv); |
820 | free_gfar_dev(priv); |
821 | return err; |
822 | } |
823 | |
824 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
825 | u32 class) |
826 | { |
827 | u32 rqfpr = FPR_FILER_MASK; |
828 | u32 rqfcr = 0x0; |
829 | |
830 | rqfar--; |
831 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; |
832 | priv->ftp_rqfpr[rqfar] = rqfpr; |
833 | priv->ftp_rqfcr[rqfar] = rqfcr; |
834 | gfar_write_filer(priv, far: rqfar, fcr: rqfcr, fpr: rqfpr); |
835 | |
836 | rqfar--; |
837 | rqfcr = RQFCR_CMP_NOMATCH; |
838 | priv->ftp_rqfpr[rqfar] = rqfpr; |
839 | priv->ftp_rqfcr[rqfar] = rqfcr; |
840 | gfar_write_filer(priv, far: rqfar, fcr: rqfcr, fpr: rqfpr); |
841 | |
842 | rqfar--; |
843 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; |
844 | rqfpr = class; |
845 | priv->ftp_rqfcr[rqfar] = rqfcr; |
846 | priv->ftp_rqfpr[rqfar] = rqfpr; |
847 | gfar_write_filer(priv, far: rqfar, fcr: rqfcr, fpr: rqfpr); |
848 | |
849 | rqfar--; |
850 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; |
851 | rqfpr = class; |
852 | priv->ftp_rqfcr[rqfar] = rqfcr; |
853 | priv->ftp_rqfpr[rqfar] = rqfpr; |
854 | gfar_write_filer(priv, far: rqfar, fcr: rqfcr, fpr: rqfpr); |
855 | |
856 | return rqfar; |
857 | } |
858 | |
859 | static void gfar_init_filer_table(struct gfar_private *priv) |
860 | { |
861 | int i = 0x0; |
862 | u32 rqfar = MAX_FILER_IDX; |
863 | u32 rqfcr = 0x0; |
864 | u32 rqfpr = FPR_FILER_MASK; |
865 | |
866 | /* Default rule */ |
867 | rqfcr = RQFCR_CMP_MATCH; |
868 | priv->ftp_rqfcr[rqfar] = rqfcr; |
869 | priv->ftp_rqfpr[rqfar] = rqfpr; |
870 | gfar_write_filer(priv, far: rqfar, fcr: rqfcr, fpr: rqfpr); |
871 | |
872 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); |
873 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); |
874 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); |
875 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); |
876 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); |
877 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); |
878 | |
879 | /* cur_filer_idx indicated the first non-masked rule */ |
880 | priv->cur_filer_idx = rqfar; |
881 | |
882 | /* Rest are masked rules */ |
883 | rqfcr = RQFCR_CMP_NOMATCH; |
884 | for (i = 0; i < rqfar; i++) { |
885 | priv->ftp_rqfcr[i] = rqfcr; |
886 | priv->ftp_rqfpr[i] = rqfpr; |
887 | gfar_write_filer(priv, far: i, fcr: rqfcr, fpr: rqfpr); |
888 | } |
889 | } |
890 | |
891 | #ifdef CONFIG_PPC |
892 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) |
893 | { |
894 | unsigned int pvr = mfspr(SPRN_PVR); |
895 | unsigned int svr = mfspr(SPRN_SVR); |
896 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ |
897 | unsigned int rev = svr & 0xffff; |
898 | |
899 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ |
900 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || |
901 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
902 | priv->errata |= GFAR_ERRATA_74; |
903 | |
904 | /* MPC8313 and MPC837x all rev */ |
905 | if ((pvr == 0x80850010 && mod == 0x80b0) || |
906 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
907 | priv->errata |= GFAR_ERRATA_76; |
908 | |
909 | /* MPC8313 Rev < 2.0 */ |
910 | if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) |
911 | priv->errata |= GFAR_ERRATA_12; |
912 | } |
913 | |
914 | static void __gfar_detect_errata_85xx(struct gfar_private *priv) |
915 | { |
916 | unsigned int svr = mfspr(SPRN_SVR); |
917 | |
918 | if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) |
919 | priv->errata |= GFAR_ERRATA_12; |
920 | /* P2020/P1010 Rev 1; MPC8548 Rev 2 */ |
921 | if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || |
922 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || |
923 | ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) |
924 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ |
925 | } |
926 | #endif |
927 | |
928 | static void gfar_detect_errata(struct gfar_private *priv) |
929 | { |
930 | struct device *dev = &priv->ofdev->dev; |
931 | |
932 | /* no plans to fix */ |
933 | priv->errata |= GFAR_ERRATA_A002; |
934 | |
935 | #ifdef CONFIG_PPC |
936 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) |
937 | __gfar_detect_errata_85xx(priv); |
938 | else /* non-mpc85xx parts, i.e. e300 core based */ |
939 | __gfar_detect_errata_83xx(priv); |
940 | #endif |
941 | |
942 | if (priv->errata) |
943 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n" , |
944 | priv->errata); |
945 | } |
946 | |
947 | static void gfar_init_addr_hash_table(struct gfar_private *priv) |
948 | { |
949 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
950 | |
951 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
952 | priv->extended_hash = 1; |
953 | priv->hash_width = 9; |
954 | |
955 | priv->hash_regs[0] = ®s->igaddr0; |
956 | priv->hash_regs[1] = ®s->igaddr1; |
957 | priv->hash_regs[2] = ®s->igaddr2; |
958 | priv->hash_regs[3] = ®s->igaddr3; |
959 | priv->hash_regs[4] = ®s->igaddr4; |
960 | priv->hash_regs[5] = ®s->igaddr5; |
961 | priv->hash_regs[6] = ®s->igaddr6; |
962 | priv->hash_regs[7] = ®s->igaddr7; |
963 | priv->hash_regs[8] = ®s->gaddr0; |
964 | priv->hash_regs[9] = ®s->gaddr1; |
965 | priv->hash_regs[10] = ®s->gaddr2; |
966 | priv->hash_regs[11] = ®s->gaddr3; |
967 | priv->hash_regs[12] = ®s->gaddr4; |
968 | priv->hash_regs[13] = ®s->gaddr5; |
969 | priv->hash_regs[14] = ®s->gaddr6; |
970 | priv->hash_regs[15] = ®s->gaddr7; |
971 | |
972 | } else { |
973 | priv->extended_hash = 0; |
974 | priv->hash_width = 8; |
975 | |
976 | priv->hash_regs[0] = ®s->gaddr0; |
977 | priv->hash_regs[1] = ®s->gaddr1; |
978 | priv->hash_regs[2] = ®s->gaddr2; |
979 | priv->hash_regs[3] = ®s->gaddr3; |
980 | priv->hash_regs[4] = ®s->gaddr4; |
981 | priv->hash_regs[5] = ®s->gaddr5; |
982 | priv->hash_regs[6] = ®s->gaddr6; |
983 | priv->hash_regs[7] = ®s->gaddr7; |
984 | } |
985 | } |
986 | |
987 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
988 | { |
989 | u32 res; |
990 | |
991 | /* Normaly TSEC should not hang on GRS commands, so we should |
992 | * actually wait for IEVENT_GRSC flag. |
993 | */ |
994 | if (!gfar_has_errata(priv, err: GFAR_ERRATA_A002)) |
995 | return 0; |
996 | |
997 | /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
998 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
999 | * and the Rx can be safely reset. |
1000 | */ |
1001 | res = gfar_read(addr: (void __iomem *)priv->gfargrp[0].regs + 0xd1c); |
1002 | res &= 0x7f807f80; |
1003 | if ((res & 0xffff) == (res >> 16)) |
1004 | return 1; |
1005 | |
1006 | return 0; |
1007 | } |
1008 | |
1009 | /* Halt the receive and transmit queues */ |
1010 | static void gfar_halt_nodisable(struct gfar_private *priv) |
1011 | { |
1012 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1013 | u32 tempval; |
1014 | unsigned int timeout; |
1015 | int stopped; |
1016 | |
1017 | gfar_ints_disable(priv); |
1018 | |
1019 | if (gfar_is_dma_stopped(priv)) |
1020 | return; |
1021 | |
1022 | /* Stop the DMA, and wait for it to stop */ |
1023 | tempval = gfar_read(addr: ®s->dmactrl); |
1024 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1025 | gfar_write(addr: ®s->dmactrl, val: tempval); |
1026 | |
1027 | retry: |
1028 | timeout = 1000; |
1029 | while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { |
1030 | cpu_relax(); |
1031 | timeout--; |
1032 | } |
1033 | |
1034 | if (!timeout) |
1035 | stopped = gfar_is_dma_stopped(priv); |
1036 | |
1037 | if (!stopped && !gfar_is_rx_dma_stopped(priv) && |
1038 | !__gfar_is_rx_idle(priv)) |
1039 | goto retry; |
1040 | } |
1041 | |
1042 | /* Halt the receive and transmit queues */ |
1043 | static void gfar_halt(struct gfar_private *priv) |
1044 | { |
1045 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1046 | u32 tempval; |
1047 | |
1048 | /* Dissable the Rx/Tx hw queues */ |
1049 | gfar_write(addr: ®s->rqueue, val: 0); |
1050 | gfar_write(addr: ®s->tqueue, val: 0); |
1051 | |
1052 | mdelay(10); |
1053 | |
1054 | gfar_halt_nodisable(priv); |
1055 | |
1056 | /* Disable Rx/Tx DMA */ |
1057 | tempval = gfar_read(addr: ®s->maccfg1); |
1058 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); |
1059 | gfar_write(addr: ®s->maccfg1, val: tempval); |
1060 | } |
1061 | |
1062 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1063 | { |
1064 | struct txbd8 *txbdp; |
1065 | struct gfar_private *priv = netdev_priv(dev: tx_queue->dev); |
1066 | int i, j; |
1067 | |
1068 | txbdp = tx_queue->tx_bd_base; |
1069 | |
1070 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1071 | if (!tx_queue->tx_skbuff[i]) |
1072 | continue; |
1073 | |
1074 | dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), |
1075 | be16_to_cpu(txbdp->length), DMA_TO_DEVICE); |
1076 | txbdp->lstatus = 0; |
1077 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
1078 | j++) { |
1079 | txbdp++; |
1080 | dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), |
1081 | be16_to_cpu(txbdp->length), |
1082 | DMA_TO_DEVICE); |
1083 | } |
1084 | txbdp++; |
1085 | dev_kfree_skb_any(skb: tx_queue->tx_skbuff[i]); |
1086 | tx_queue->tx_skbuff[i] = NULL; |
1087 | } |
1088 | kfree(objp: tx_queue->tx_skbuff); |
1089 | tx_queue->tx_skbuff = NULL; |
1090 | } |
1091 | |
1092 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1093 | { |
1094 | int i; |
1095 | |
1096 | struct rxbd8 *rxbdp = rx_queue->rx_bd_base; |
1097 | |
1098 | dev_kfree_skb(rx_queue->skb); |
1099 | |
1100 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1101 | struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; |
1102 | |
1103 | rxbdp->lstatus = 0; |
1104 | rxbdp->bufPtr = 0; |
1105 | rxbdp++; |
1106 | |
1107 | if (!rxb->page) |
1108 | continue; |
1109 | |
1110 | dma_unmap_page(rx_queue->dev, rxb->dma, |
1111 | PAGE_SIZE, DMA_FROM_DEVICE); |
1112 | __free_page(rxb->page); |
1113 | |
1114 | rxb->page = NULL; |
1115 | } |
1116 | |
1117 | kfree(objp: rx_queue->rx_buff); |
1118 | rx_queue->rx_buff = NULL; |
1119 | } |
1120 | |
1121 | /* If there are any tx skbs or rx skbs still around, free them. |
1122 | * Then free tx_skbuff and rx_skbuff |
1123 | */ |
1124 | static void free_skb_resources(struct gfar_private *priv) |
1125 | { |
1126 | struct gfar_priv_tx_q *tx_queue = NULL; |
1127 | struct gfar_priv_rx_q *rx_queue = NULL; |
1128 | int i; |
1129 | |
1130 | /* Go through all the buffer descriptors and free their data buffers */ |
1131 | for (i = 0; i < priv->num_tx_queues; i++) { |
1132 | struct netdev_queue *txq; |
1133 | |
1134 | tx_queue = priv->tx_queue[i]; |
1135 | txq = netdev_get_tx_queue(dev: tx_queue->dev, index: tx_queue->qindex); |
1136 | if (tx_queue->tx_skbuff) |
1137 | free_skb_tx_queue(tx_queue); |
1138 | netdev_tx_reset_queue(q: txq); |
1139 | } |
1140 | |
1141 | for (i = 0; i < priv->num_rx_queues; i++) { |
1142 | rx_queue = priv->rx_queue[i]; |
1143 | if (rx_queue->rx_buff) |
1144 | free_skb_rx_queue(rx_queue); |
1145 | } |
1146 | |
1147 | dma_free_coherent(dev: priv->dev, |
1148 | size: sizeof(struct txbd8) * priv->total_tx_ring_size + |
1149 | sizeof(struct rxbd8) * priv->total_rx_ring_size, |
1150 | cpu_addr: priv->tx_queue[0]->tx_bd_base, |
1151 | dma_handle: priv->tx_queue[0]->tx_bd_dma_base); |
1152 | } |
1153 | |
1154 | void stop_gfar(struct net_device *dev) |
1155 | { |
1156 | struct gfar_private *priv = netdev_priv(dev); |
1157 | |
1158 | netif_tx_stop_all_queues(dev); |
1159 | |
1160 | smp_mb__before_atomic(); |
1161 | set_bit(nr: GFAR_DOWN, addr: &priv->state); |
1162 | smp_mb__after_atomic(); |
1163 | |
1164 | disable_napi(priv); |
1165 | |
1166 | /* disable ints and gracefully shut down Rx/Tx DMA */ |
1167 | gfar_halt(priv); |
1168 | |
1169 | phy_stop(phydev: dev->phydev); |
1170 | |
1171 | free_skb_resources(priv); |
1172 | } |
1173 | |
1174 | static void gfar_start(struct gfar_private *priv) |
1175 | { |
1176 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1177 | u32 tempval; |
1178 | int i = 0; |
1179 | |
1180 | /* Enable Rx/Tx hw queues */ |
1181 | gfar_write(addr: ®s->rqueue, val: priv->rqueue); |
1182 | gfar_write(addr: ®s->tqueue, val: priv->tqueue); |
1183 | |
1184 | /* Initialize DMACTRL to have WWR and WOP */ |
1185 | tempval = gfar_read(addr: ®s->dmactrl); |
1186 | tempval |= DMACTRL_INIT_SETTINGS; |
1187 | gfar_write(addr: ®s->dmactrl, val: tempval); |
1188 | |
1189 | /* Make sure we aren't stopped */ |
1190 | tempval = gfar_read(addr: ®s->dmactrl); |
1191 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
1192 | gfar_write(addr: ®s->dmactrl, val: tempval); |
1193 | |
1194 | for (i = 0; i < priv->num_grps; i++) { |
1195 | regs = priv->gfargrp[i].regs; |
1196 | /* Clear THLT/RHLT, so that the DMA starts polling now */ |
1197 | gfar_write(addr: ®s->tstat, val: priv->gfargrp[i].tstat); |
1198 | gfar_write(addr: ®s->rstat, val: priv->gfargrp[i].rstat); |
1199 | } |
1200 | |
1201 | /* Enable Rx/Tx DMA */ |
1202 | tempval = gfar_read(addr: ®s->maccfg1); |
1203 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); |
1204 | gfar_write(addr: ®s->maccfg1, val: tempval); |
1205 | |
1206 | gfar_ints_enable(priv); |
1207 | |
1208 | netif_trans_update(dev: priv->ndev); /* prevent tx timeout */ |
1209 | } |
1210 | |
1211 | static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) |
1212 | { |
1213 | struct page *page; |
1214 | dma_addr_t addr; |
1215 | |
1216 | page = dev_alloc_page(); |
1217 | if (unlikely(!page)) |
1218 | return false; |
1219 | |
1220 | addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
1221 | if (unlikely(dma_mapping_error(rxq->dev, addr))) { |
1222 | __free_page(page); |
1223 | |
1224 | return false; |
1225 | } |
1226 | |
1227 | rxb->dma = addr; |
1228 | rxb->page = page; |
1229 | rxb->page_offset = 0; |
1230 | |
1231 | return true; |
1232 | } |
1233 | |
1234 | static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) |
1235 | { |
1236 | struct gfar_private *priv = netdev_priv(dev: rx_queue->ndev); |
1237 | struct gfar_extra_stats *estats = &priv->extra_stats; |
1238 | |
1239 | netdev_err(dev: rx_queue->ndev, format: "Can't alloc RX buffers\n" ); |
1240 | atomic64_inc(v: &estats->rx_alloc_err); |
1241 | } |
1242 | |
1243 | static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, |
1244 | int alloc_cnt) |
1245 | { |
1246 | struct rxbd8 *bdp; |
1247 | struct gfar_rx_buff *rxb; |
1248 | int i; |
1249 | |
1250 | i = rx_queue->next_to_use; |
1251 | bdp = &rx_queue->rx_bd_base[i]; |
1252 | rxb = &rx_queue->rx_buff[i]; |
1253 | |
1254 | while (alloc_cnt--) { |
1255 | /* try reuse page */ |
1256 | if (unlikely(!rxb->page)) { |
1257 | if (unlikely(!gfar_new_page(rx_queue, rxb))) { |
1258 | gfar_rx_alloc_err(rx_queue); |
1259 | break; |
1260 | } |
1261 | } |
1262 | |
1263 | /* Setup the new RxBD */ |
1264 | gfar_init_rxbdp(rx_queue, bdp, |
1265 | buf: rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); |
1266 | |
1267 | /* Update to the next pointer */ |
1268 | bdp++; |
1269 | rxb++; |
1270 | |
1271 | if (unlikely(++i == rx_queue->rx_ring_size)) { |
1272 | i = 0; |
1273 | bdp = rx_queue->rx_bd_base; |
1274 | rxb = rx_queue->rx_buff; |
1275 | } |
1276 | } |
1277 | |
1278 | rx_queue->next_to_use = i; |
1279 | rx_queue->next_to_alloc = i; |
1280 | } |
1281 | |
1282 | static void gfar_init_bds(struct net_device *ndev) |
1283 | { |
1284 | struct gfar_private *priv = netdev_priv(dev: ndev); |
1285 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1286 | struct gfar_priv_tx_q *tx_queue = NULL; |
1287 | struct gfar_priv_rx_q *rx_queue = NULL; |
1288 | struct txbd8 *txbdp; |
1289 | u32 __iomem *rfbptr; |
1290 | int i, j; |
1291 | |
1292 | for (i = 0; i < priv->num_tx_queues; i++) { |
1293 | tx_queue = priv->tx_queue[i]; |
1294 | /* Initialize some variables in our dev structure */ |
1295 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; |
1296 | tx_queue->dirty_tx = tx_queue->tx_bd_base; |
1297 | tx_queue->cur_tx = tx_queue->tx_bd_base; |
1298 | tx_queue->skb_curtx = 0; |
1299 | tx_queue->skb_dirtytx = 0; |
1300 | |
1301 | /* Initialize Transmit Descriptor Ring */ |
1302 | txbdp = tx_queue->tx_bd_base; |
1303 | for (j = 0; j < tx_queue->tx_ring_size; j++) { |
1304 | txbdp->lstatus = 0; |
1305 | txbdp->bufPtr = 0; |
1306 | txbdp++; |
1307 | } |
1308 | |
1309 | /* Set the last descriptor in the ring to indicate wrap */ |
1310 | txbdp--; |
1311 | txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | |
1312 | TXBD_WRAP); |
1313 | } |
1314 | |
1315 | rfbptr = ®s->rfbptr0; |
1316 | for (i = 0; i < priv->num_rx_queues; i++) { |
1317 | rx_queue = priv->rx_queue[i]; |
1318 | |
1319 | rx_queue->next_to_clean = 0; |
1320 | rx_queue->next_to_use = 0; |
1321 | rx_queue->next_to_alloc = 0; |
1322 | |
1323 | /* make sure next_to_clean != next_to_use after this |
1324 | * by leaving at least 1 unused descriptor |
1325 | */ |
1326 | gfar_alloc_rx_buffs(rx_queue, alloc_cnt: gfar_rxbd_unused(rxq: rx_queue)); |
1327 | |
1328 | rx_queue->rfbptr = rfbptr; |
1329 | rfbptr += 2; |
1330 | } |
1331 | } |
1332 | |
1333 | static int gfar_alloc_skb_resources(struct net_device *ndev) |
1334 | { |
1335 | void *vaddr; |
1336 | dma_addr_t addr; |
1337 | int i, j; |
1338 | struct gfar_private *priv = netdev_priv(dev: ndev); |
1339 | struct device *dev = priv->dev; |
1340 | struct gfar_priv_tx_q *tx_queue = NULL; |
1341 | struct gfar_priv_rx_q *rx_queue = NULL; |
1342 | |
1343 | priv->total_tx_ring_size = 0; |
1344 | for (i = 0; i < priv->num_tx_queues; i++) |
1345 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; |
1346 | |
1347 | priv->total_rx_ring_size = 0; |
1348 | for (i = 0; i < priv->num_rx_queues; i++) |
1349 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; |
1350 | |
1351 | /* Allocate memory for the buffer descriptors */ |
1352 | vaddr = dma_alloc_coherent(dev, |
1353 | size: (priv->total_tx_ring_size * |
1354 | sizeof(struct txbd8)) + |
1355 | (priv->total_rx_ring_size * |
1356 | sizeof(struct rxbd8)), |
1357 | dma_handle: &addr, GFP_KERNEL); |
1358 | if (!vaddr) |
1359 | return -ENOMEM; |
1360 | |
1361 | for (i = 0; i < priv->num_tx_queues; i++) { |
1362 | tx_queue = priv->tx_queue[i]; |
1363 | tx_queue->tx_bd_base = vaddr; |
1364 | tx_queue->tx_bd_dma_base = addr; |
1365 | tx_queue->dev = ndev; |
1366 | /* enet DMA only understands physical addresses */ |
1367 | addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
1368 | vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
1369 | } |
1370 | |
1371 | /* Start the rx descriptor ring where the tx ring leaves off */ |
1372 | for (i = 0; i < priv->num_rx_queues; i++) { |
1373 | rx_queue = priv->rx_queue[i]; |
1374 | rx_queue->rx_bd_base = vaddr; |
1375 | rx_queue->rx_bd_dma_base = addr; |
1376 | rx_queue->ndev = ndev; |
1377 | rx_queue->dev = dev; |
1378 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
1379 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
1380 | } |
1381 | |
1382 | /* Setup the skbuff rings */ |
1383 | for (i = 0; i < priv->num_tx_queues; i++) { |
1384 | tx_queue = priv->tx_queue[i]; |
1385 | tx_queue->tx_skbuff = |
1386 | kmalloc_array(n: tx_queue->tx_ring_size, |
1387 | size: sizeof(*tx_queue->tx_skbuff), |
1388 | GFP_KERNEL); |
1389 | if (!tx_queue->tx_skbuff) |
1390 | goto cleanup; |
1391 | |
1392 | for (j = 0; j < tx_queue->tx_ring_size; j++) |
1393 | tx_queue->tx_skbuff[j] = NULL; |
1394 | } |
1395 | |
1396 | for (i = 0; i < priv->num_rx_queues; i++) { |
1397 | rx_queue = priv->rx_queue[i]; |
1398 | rx_queue->rx_buff = kcalloc(n: rx_queue->rx_ring_size, |
1399 | size: sizeof(*rx_queue->rx_buff), |
1400 | GFP_KERNEL); |
1401 | if (!rx_queue->rx_buff) |
1402 | goto cleanup; |
1403 | } |
1404 | |
1405 | gfar_init_bds(ndev); |
1406 | |
1407 | return 0; |
1408 | |
1409 | cleanup: |
1410 | free_skb_resources(priv); |
1411 | return -ENOMEM; |
1412 | } |
1413 | |
1414 | /* Bring the controller up and running */ |
1415 | int startup_gfar(struct net_device *ndev) |
1416 | { |
1417 | struct gfar_private *priv = netdev_priv(dev: ndev); |
1418 | int err; |
1419 | |
1420 | gfar_mac_reset(priv); |
1421 | |
1422 | err = gfar_alloc_skb_resources(ndev); |
1423 | if (err) |
1424 | return err; |
1425 | |
1426 | gfar_init_tx_rx_base(priv); |
1427 | |
1428 | smp_mb__before_atomic(); |
1429 | clear_bit(nr: GFAR_DOWN, addr: &priv->state); |
1430 | smp_mb__after_atomic(); |
1431 | |
1432 | /* Start Rx/Tx DMA and enable the interrupts */ |
1433 | gfar_start(priv); |
1434 | |
1435 | /* force link state update after mac reset */ |
1436 | priv->oldlink = 0; |
1437 | priv->oldspeed = 0; |
1438 | priv->oldduplex = -1; |
1439 | |
1440 | phy_start(phydev: ndev->phydev); |
1441 | |
1442 | enable_napi(priv); |
1443 | |
1444 | netif_tx_wake_all_queues(dev: ndev); |
1445 | |
1446 | return 0; |
1447 | } |
1448 | |
1449 | static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
1450 | { |
1451 | struct net_device *ndev = priv->ndev; |
1452 | struct phy_device *phydev = ndev->phydev; |
1453 | u32 val = 0; |
1454 | |
1455 | if (!phydev->duplex) |
1456 | return val; |
1457 | |
1458 | if (!priv->pause_aneg_en) { |
1459 | if (priv->tx_pause_en) |
1460 | val |= MACCFG1_TX_FLOW; |
1461 | if (priv->rx_pause_en) |
1462 | val |= MACCFG1_RX_FLOW; |
1463 | } else { |
1464 | u16 lcl_adv, rmt_adv; |
1465 | u8 flowctrl; |
1466 | /* get link partner capabilities */ |
1467 | rmt_adv = 0; |
1468 | if (phydev->pause) |
1469 | rmt_adv = LPA_PAUSE_CAP; |
1470 | if (phydev->asym_pause) |
1471 | rmt_adv |= LPA_PAUSE_ASYM; |
1472 | |
1473 | lcl_adv = linkmode_adv_to_lcl_adv_t(advertising: phydev->advertising); |
1474 | flowctrl = mii_resolve_flowctrl_fdx(lcladv: lcl_adv, rmtadv: rmt_adv); |
1475 | if (flowctrl & FLOW_CTRL_TX) |
1476 | val |= MACCFG1_TX_FLOW; |
1477 | if (flowctrl & FLOW_CTRL_RX) |
1478 | val |= MACCFG1_RX_FLOW; |
1479 | } |
1480 | |
1481 | return val; |
1482 | } |
1483 | |
1484 | static noinline void gfar_update_link_state(struct gfar_private *priv) |
1485 | { |
1486 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1487 | struct net_device *ndev = priv->ndev; |
1488 | struct phy_device *phydev = ndev->phydev; |
1489 | struct gfar_priv_rx_q *rx_queue = NULL; |
1490 | int i; |
1491 | |
1492 | if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) |
1493 | return; |
1494 | |
1495 | if (phydev->link) { |
1496 | u32 tempval1 = gfar_read(addr: ®s->maccfg1); |
1497 | u32 tempval = gfar_read(addr: ®s->maccfg2); |
1498 | u32 ecntrl = gfar_read(addr: ®s->ecntrl); |
1499 | u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); |
1500 | |
1501 | if (phydev->duplex != priv->oldduplex) { |
1502 | if (!(phydev->duplex)) |
1503 | tempval &= ~(MACCFG2_FULL_DUPLEX); |
1504 | else |
1505 | tempval |= MACCFG2_FULL_DUPLEX; |
1506 | |
1507 | priv->oldduplex = phydev->duplex; |
1508 | } |
1509 | |
1510 | if (phydev->speed != priv->oldspeed) { |
1511 | switch (phydev->speed) { |
1512 | case 1000: |
1513 | tempval = |
1514 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); |
1515 | |
1516 | ecntrl &= ~(ECNTRL_R100); |
1517 | break; |
1518 | case 100: |
1519 | case 10: |
1520 | tempval = |
1521 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); |
1522 | |
1523 | /* Reduced mode distinguishes |
1524 | * between 10 and 100 |
1525 | */ |
1526 | if (phydev->speed == SPEED_100) |
1527 | ecntrl |= ECNTRL_R100; |
1528 | else |
1529 | ecntrl &= ~(ECNTRL_R100); |
1530 | break; |
1531 | default: |
1532 | netif_warn(priv, link, priv->ndev, |
1533 | "Ack! Speed (%d) is not 10/100/1000!\n" , |
1534 | phydev->speed); |
1535 | break; |
1536 | } |
1537 | |
1538 | priv->oldspeed = phydev->speed; |
1539 | } |
1540 | |
1541 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
1542 | tempval1 |= gfar_get_flowctrl_cfg(priv); |
1543 | |
1544 | /* Turn last free buffer recording on */ |
1545 | if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { |
1546 | for (i = 0; i < priv->num_rx_queues; i++) { |
1547 | u32 bdp_dma; |
1548 | |
1549 | rx_queue = priv->rx_queue[i]; |
1550 | bdp_dma = gfar_rxbd_dma_lastfree(rxq: rx_queue); |
1551 | gfar_write(addr: rx_queue->rfbptr, val: bdp_dma); |
1552 | } |
1553 | |
1554 | priv->tx_actual_en = 1; |
1555 | } |
1556 | |
1557 | if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) |
1558 | priv->tx_actual_en = 0; |
1559 | |
1560 | gfar_write(addr: ®s->maccfg1, val: tempval1); |
1561 | gfar_write(addr: ®s->maccfg2, val: tempval); |
1562 | gfar_write(addr: ®s->ecntrl, val: ecntrl); |
1563 | |
1564 | if (!priv->oldlink) |
1565 | priv->oldlink = 1; |
1566 | |
1567 | } else if (priv->oldlink) { |
1568 | priv->oldlink = 0; |
1569 | priv->oldspeed = 0; |
1570 | priv->oldduplex = -1; |
1571 | } |
1572 | |
1573 | if (netif_msg_link(priv)) |
1574 | phy_print_status(phydev); |
1575 | } |
1576 | |
1577 | /* Called every time the controller might need to be made |
1578 | * aware of new link state. The PHY code conveys this |
1579 | * information through variables in the phydev structure, and this |
1580 | * function converts those variables into the appropriate |
1581 | * register values, and can bring down the device if needed. |
1582 | */ |
1583 | static void adjust_link(struct net_device *dev) |
1584 | { |
1585 | struct gfar_private *priv = netdev_priv(dev); |
1586 | struct phy_device *phydev = dev->phydev; |
1587 | |
1588 | if (unlikely(phydev->link != priv->oldlink || |
1589 | (phydev->link && (phydev->duplex != priv->oldduplex || |
1590 | phydev->speed != priv->oldspeed)))) |
1591 | gfar_update_link_state(priv); |
1592 | } |
1593 | |
1594 | /* Initialize TBI PHY interface for communicating with the |
1595 | * SERDES lynx PHY on the chip. We communicate with this PHY |
1596 | * through the MDIO bus on each controller, treating it as a |
1597 | * "normal" PHY at the address found in the TBIPA register. We assume |
1598 | * that the TBIPA register is valid. Either the MDIO bus code will set |
1599 | * it to a value that doesn't conflict with other PHYs on the bus, or the |
1600 | * value doesn't matter, as there are no other PHYs on the bus. |
1601 | */ |
1602 | static void gfar_configure_serdes(struct net_device *dev) |
1603 | { |
1604 | struct gfar_private *priv = netdev_priv(dev); |
1605 | struct phy_device *tbiphy; |
1606 | |
1607 | if (!priv->tbi_node) { |
1608 | dev_warn(&dev->dev, "error: SGMII mode requires that the " |
1609 | "device tree specify a tbi-handle\n" ); |
1610 | return; |
1611 | } |
1612 | |
1613 | tbiphy = of_phy_find_device(phy_np: priv->tbi_node); |
1614 | if (!tbiphy) { |
1615 | dev_err(&dev->dev, "error: Could not get TBI device\n" ); |
1616 | return; |
1617 | } |
1618 | |
1619 | /* If the link is already up, we must already be ok, and don't need to |
1620 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1621 | * everything for us? Resetting it takes the link down and requires |
1622 | * several seconds for it to come back. |
1623 | */ |
1624 | if (phy_read(phydev: tbiphy, MII_BMSR) & BMSR_LSTATUS) { |
1625 | put_device(dev: &tbiphy->mdio.dev); |
1626 | return; |
1627 | } |
1628 | |
1629 | /* Single clk mode, mii mode off(for serdes communication) */ |
1630 | phy_write(phydev: tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
1631 | |
1632 | phy_write(phydev: tbiphy, MII_ADVERTISE, |
1633 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1634 | ADVERTISE_1000XPSE_ASYM); |
1635 | |
1636 | phy_write(phydev: tbiphy, MII_BMCR, |
1637 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | |
1638 | BMCR_SPEED1000); |
1639 | |
1640 | put_device(dev: &tbiphy->mdio.dev); |
1641 | } |
1642 | |
1643 | /* Initializes driver's PHY state, and attaches to the PHY. |
1644 | * Returns 0 on success. |
1645 | */ |
1646 | static int init_phy(struct net_device *dev) |
1647 | { |
1648 | __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
1649 | struct gfar_private *priv = netdev_priv(dev); |
1650 | phy_interface_t interface = priv->interface; |
1651 | struct phy_device *phydev; |
1652 | struct ethtool_keee edata; |
1653 | |
1654 | linkmode_set_bit_array(array: phy_10_100_features_array, |
1655 | ARRAY_SIZE(phy_10_100_features_array), |
1656 | addr: mask); |
1657 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Autoneg_BIT, addr: mask); |
1658 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_MII_BIT, addr: mask); |
1659 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
1660 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_1000baseT_Full_BIT, addr: mask); |
1661 | |
1662 | priv->oldlink = 0; |
1663 | priv->oldspeed = 0; |
1664 | priv->oldduplex = -1; |
1665 | |
1666 | phydev = of_phy_connect(dev, phy_np: priv->phy_node, hndlr: &adjust_link, flags: 0, |
1667 | iface: interface); |
1668 | if (!phydev) { |
1669 | dev_err(&dev->dev, "could not attach to PHY\n" ); |
1670 | return -ENODEV; |
1671 | } |
1672 | |
1673 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1674 | gfar_configure_serdes(dev); |
1675 | |
1676 | /* Remove any features not supported by the controller */ |
1677 | linkmode_and(dst: phydev->supported, a: phydev->supported, b: mask); |
1678 | linkmode_copy(dst: phydev->advertising, src: phydev->supported); |
1679 | |
1680 | /* Add support for flow control */ |
1681 | phy_support_asym_pause(phydev); |
1682 | |
1683 | /* disable EEE autoneg, EEE not supported by eTSEC */ |
1684 | memset(&edata, 0, sizeof(struct ethtool_keee)); |
1685 | phy_ethtool_set_eee(phydev, data: &edata); |
1686 | |
1687 | return 0; |
1688 | } |
1689 | |
1690 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
1691 | { |
1692 | struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); |
1693 | |
1694 | memset(fcb, 0, GMAC_FCB_LEN); |
1695 | |
1696 | return fcb; |
1697 | } |
1698 | |
1699 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
1700 | int fcb_length) |
1701 | { |
1702 | /* If we're here, it's a IP packet with a TCP or UDP |
1703 | * payload. We set it to checksum, using a pseudo-header |
1704 | * we provide |
1705 | */ |
1706 | u8 flags = TXFCB_DEFAULT; |
1707 | |
1708 | /* Tell the controller what the protocol is |
1709 | * And provide the already calculated phcs |
1710 | */ |
1711 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
1712 | flags |= TXFCB_UDP; |
1713 | fcb->phcs = (__force __be16)(udp_hdr(skb)->check); |
1714 | } else |
1715 | fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); |
1716 | |
1717 | /* l3os is the distance between the start of the |
1718 | * frame (skb->data) and the start of the IP hdr. |
1719 | * l4os is the distance between the start of the |
1720 | * l3 hdr and the l4 hdr |
1721 | */ |
1722 | fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); |
1723 | fcb->l4os = skb_network_header_len(skb); |
1724 | |
1725 | fcb->flags = flags; |
1726 | } |
1727 | |
1728 | static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
1729 | { |
1730 | fcb->flags |= TXFCB_VLN; |
1731 | fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); |
1732 | } |
1733 | |
1734 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
1735 | struct txbd8 *base, int ring_size) |
1736 | { |
1737 | struct txbd8 *new_bd = bdp + stride; |
1738 | |
1739 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; |
1740 | } |
1741 | |
1742 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, |
1743 | int ring_size) |
1744 | { |
1745 | return skip_txbd(bdp, stride: 1, base, ring_size); |
1746 | } |
1747 | |
1748 | /* eTSEC12: csum generation not supported for some fcb offsets */ |
1749 | static inline bool gfar_csum_errata_12(struct gfar_private *priv, |
1750 | unsigned long fcb_addr) |
1751 | { |
1752 | return (gfar_has_errata(priv, err: GFAR_ERRATA_12) && |
1753 | (fcb_addr % 0x20) > 0x18); |
1754 | } |
1755 | |
1756 | /* eTSEC76: csum generation for frames larger than 2500 may |
1757 | * cause excess delays before start of transmission |
1758 | */ |
1759 | static inline bool gfar_csum_errata_76(struct gfar_private *priv, |
1760 | unsigned int len) |
1761 | { |
1762 | return (gfar_has_errata(priv, err: GFAR_ERRATA_76) && |
1763 | (len > 2500)); |
1764 | } |
1765 | |
1766 | /* This is called by the kernel when a frame is ready for transmission. |
1767 | * It is pointed to by the dev->hard_start_xmit function pointer |
1768 | */ |
1769 | static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1770 | { |
1771 | struct gfar_private *priv = netdev_priv(dev); |
1772 | struct gfar_priv_tx_q *tx_queue = NULL; |
1773 | struct netdev_queue *txq; |
1774 | struct gfar __iomem *regs = NULL; |
1775 | struct txfcb *fcb = NULL; |
1776 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
1777 | u32 lstatus; |
1778 | skb_frag_t *frag; |
1779 | int i, rq = 0; |
1780 | int do_tstamp, do_csum, do_vlan; |
1781 | u32 bufaddr; |
1782 | unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
1783 | |
1784 | rq = skb->queue_mapping; |
1785 | tx_queue = priv->tx_queue[rq]; |
1786 | txq = netdev_get_tx_queue(dev, index: rq); |
1787 | base = tx_queue->tx_bd_base; |
1788 | regs = tx_queue->grp->regs; |
1789 | |
1790 | do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
1791 | do_vlan = skb_vlan_tag_present(skb); |
1792 | do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
1793 | priv->hwts_tx_en; |
1794 | |
1795 | if (do_csum || do_vlan) |
1796 | fcb_len = GMAC_FCB_LEN; |
1797 | |
1798 | /* check if time stamp should be generated */ |
1799 | if (unlikely(do_tstamp)) |
1800 | fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
1801 | |
1802 | /* make space for additional header when fcb is needed */ |
1803 | if (fcb_len) { |
1804 | if (unlikely(skb_cow_head(skb, fcb_len))) { |
1805 | dev->stats.tx_errors++; |
1806 | dev_kfree_skb_any(skb); |
1807 | return NETDEV_TX_OK; |
1808 | } |
1809 | } |
1810 | |
1811 | /* total number of fragments in the SKB */ |
1812 | nr_frags = skb_shinfo(skb)->nr_frags; |
1813 | |
1814 | /* calculate the required number of TxBDs for this skb */ |
1815 | if (unlikely(do_tstamp)) |
1816 | nr_txbds = nr_frags + 2; |
1817 | else |
1818 | nr_txbds = nr_frags + 1; |
1819 | |
1820 | /* check if there is space to queue this packet */ |
1821 | if (nr_txbds > tx_queue->num_txbdfree) { |
1822 | /* no space, stop the queue */ |
1823 | netif_tx_stop_queue(dev_queue: txq); |
1824 | dev->stats.tx_fifo_errors++; |
1825 | return NETDEV_TX_BUSY; |
1826 | } |
1827 | |
1828 | /* Update transmit stats */ |
1829 | bytes_sent = skb->len; |
1830 | tx_queue->stats.tx_bytes += bytes_sent; |
1831 | /* keep Tx bytes on wire for BQL accounting */ |
1832 | GFAR_CB(skb)->bytes_sent = bytes_sent; |
1833 | tx_queue->stats.tx_packets++; |
1834 | |
1835 | txbdp = txbdp_start = tx_queue->cur_tx; |
1836 | lstatus = be32_to_cpu(txbdp->lstatus); |
1837 | |
1838 | /* Add TxPAL between FCB and frame if required */ |
1839 | if (unlikely(do_tstamp)) { |
1840 | skb_push(skb, GMAC_TXPAL_LEN); |
1841 | memset(skb->data, 0, GMAC_TXPAL_LEN); |
1842 | } |
1843 | |
1844 | /* Add TxFCB if required */ |
1845 | if (fcb_len) { |
1846 | fcb = gfar_add_fcb(skb); |
1847 | lstatus |= BD_LFLAG(TXBD_TOE); |
1848 | } |
1849 | |
1850 | /* Set up checksumming */ |
1851 | if (do_csum) { |
1852 | gfar_tx_checksum(skb, fcb, fcb_length: fcb_len); |
1853 | |
1854 | if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || |
1855 | unlikely(gfar_csum_errata_76(priv, skb->len))) { |
1856 | __skb_pull(skb, GMAC_FCB_LEN); |
1857 | skb_checksum_help(skb); |
1858 | if (do_vlan || do_tstamp) { |
1859 | /* put back a new fcb for vlan/tstamp TOE */ |
1860 | fcb = gfar_add_fcb(skb); |
1861 | } else { |
1862 | /* Tx TOE not used */ |
1863 | lstatus &= ~(BD_LFLAG(TXBD_TOE)); |
1864 | fcb = NULL; |
1865 | } |
1866 | } |
1867 | } |
1868 | |
1869 | if (do_vlan) |
1870 | gfar_tx_vlan(skb, fcb); |
1871 | |
1872 | bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), |
1873 | DMA_TO_DEVICE); |
1874 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
1875 | goto dma_map_err; |
1876 | |
1877 | txbdp_start->bufPtr = cpu_to_be32(bufaddr); |
1878 | |
1879 | /* Time stamp insertion requires one additional TxBD */ |
1880 | if (unlikely(do_tstamp)) |
1881 | txbdp_tstamp = txbdp = next_txbd(bdp: txbdp, base, |
1882 | ring_size: tx_queue->tx_ring_size); |
1883 | |
1884 | if (likely(!nr_frags)) { |
1885 | if (likely(!do_tstamp)) |
1886 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
1887 | } else { |
1888 | u32 lstatus_start = lstatus; |
1889 | |
1890 | /* Place the fragment addresses and lengths into the TxBDs */ |
1891 | frag = &skb_shinfo(skb)->frags[0]; |
1892 | for (i = 0; i < nr_frags; i++, frag++) { |
1893 | unsigned int size; |
1894 | |
1895 | /* Point at the next BD, wrapping as needed */ |
1896 | txbdp = next_txbd(bdp: txbdp, base, ring_size: tx_queue->tx_ring_size); |
1897 | |
1898 | size = skb_frag_size(frag); |
1899 | |
1900 | lstatus = be32_to_cpu(txbdp->lstatus) | size | |
1901 | BD_LFLAG(TXBD_READY); |
1902 | |
1903 | /* Handle the last BD specially */ |
1904 | if (i == nr_frags - 1) |
1905 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
1906 | |
1907 | bufaddr = skb_frag_dma_map(dev: priv->dev, frag, offset: 0, |
1908 | size, dir: DMA_TO_DEVICE); |
1909 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
1910 | goto dma_map_err; |
1911 | |
1912 | /* set the TxBD length and buffer pointer */ |
1913 | txbdp->bufPtr = cpu_to_be32(bufaddr); |
1914 | txbdp->lstatus = cpu_to_be32(lstatus); |
1915 | } |
1916 | |
1917 | lstatus = lstatus_start; |
1918 | } |
1919 | |
1920 | /* If time stamping is requested one additional TxBD must be set up. The |
1921 | * first TxBD points to the FCB and must have a data length of |
1922 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with |
1923 | * the full frame length. |
1924 | */ |
1925 | if (unlikely(do_tstamp)) { |
1926 | u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); |
1927 | |
1928 | bufaddr = be32_to_cpu(txbdp_start->bufPtr); |
1929 | bufaddr += fcb_len; |
1930 | |
1931 | lstatus_ts |= BD_LFLAG(TXBD_READY) | |
1932 | (skb_headlen(skb) - fcb_len); |
1933 | if (!nr_frags) |
1934 | lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
1935 | |
1936 | txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); |
1937 | txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); |
1938 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
1939 | |
1940 | /* Setup tx hardware time stamping */ |
1941 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1942 | fcb->ptp = 1; |
1943 | } else { |
1944 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
1945 | } |
1946 | |
1947 | skb_tx_timestamp(skb); |
1948 | netdev_tx_sent_queue(dev_queue: txq, bytes: bytes_sent); |
1949 | |
1950 | gfar_wmb(); |
1951 | |
1952 | txbdp_start->lstatus = cpu_to_be32(lstatus); |
1953 | |
1954 | gfar_wmb(); /* force lstatus write before tx_skbuff */ |
1955 | |
1956 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
1957 | |
1958 | /* Update the current skb pointer to the next entry we will use |
1959 | * (wrapping if necessary) |
1960 | */ |
1961 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
1962 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
1963 | |
1964 | tx_queue->cur_tx = next_txbd(bdp: txbdp, base, ring_size: tx_queue->tx_ring_size); |
1965 | |
1966 | /* We can work in parallel with gfar_clean_tx_ring(), except |
1967 | * when modifying num_txbdfree. Note that we didn't grab the lock |
1968 | * when we were reading the num_txbdfree and checking for available |
1969 | * space, that's because outside of this function it can only grow. |
1970 | */ |
1971 | spin_lock_bh(lock: &tx_queue->txlock); |
1972 | /* reduce TxBD free count */ |
1973 | tx_queue->num_txbdfree -= (nr_txbds); |
1974 | spin_unlock_bh(lock: &tx_queue->txlock); |
1975 | |
1976 | /* If the next BD still needs to be cleaned up, then the bds |
1977 | * are full. We need to tell the kernel to stop sending us stuff. |
1978 | */ |
1979 | if (!tx_queue->num_txbdfree) { |
1980 | netif_tx_stop_queue(dev_queue: txq); |
1981 | |
1982 | dev->stats.tx_fifo_errors++; |
1983 | } |
1984 | |
1985 | /* Tell the DMA to go go go */ |
1986 | gfar_write(addr: ®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1987 | |
1988 | return NETDEV_TX_OK; |
1989 | |
1990 | dma_map_err: |
1991 | txbdp = next_txbd(bdp: txbdp_start, base, ring_size: tx_queue->tx_ring_size); |
1992 | if (do_tstamp) |
1993 | txbdp = next_txbd(bdp: txbdp, base, ring_size: tx_queue->tx_ring_size); |
1994 | for (i = 0; i < nr_frags; i++) { |
1995 | lstatus = be32_to_cpu(txbdp->lstatus); |
1996 | if (!(lstatus & BD_LFLAG(TXBD_READY))) |
1997 | break; |
1998 | |
1999 | lstatus &= ~BD_LFLAG(TXBD_READY); |
2000 | txbdp->lstatus = cpu_to_be32(lstatus); |
2001 | bufaddr = be32_to_cpu(txbdp->bufPtr); |
2002 | dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), |
2003 | DMA_TO_DEVICE); |
2004 | txbdp = next_txbd(bdp: txbdp, base, ring_size: tx_queue->tx_ring_size); |
2005 | } |
2006 | gfar_wmb(); |
2007 | dev_kfree_skb_any(skb); |
2008 | return NETDEV_TX_OK; |
2009 | } |
2010 | |
2011 | /* Changes the mac address if the controller is not running. */ |
2012 | static int gfar_set_mac_address(struct net_device *dev) |
2013 | { |
2014 | gfar_set_mac_for_addr(dev, num: 0, addr: dev->dev_addr); |
2015 | |
2016 | return 0; |
2017 | } |
2018 | |
2019 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2020 | { |
2021 | struct gfar_private *priv = netdev_priv(dev); |
2022 | |
2023 | while (test_and_set_bit_lock(nr: GFAR_RESETTING, addr: &priv->state)) |
2024 | cpu_relax(); |
2025 | |
2026 | if (dev->flags & IFF_UP) |
2027 | stop_gfar(dev); |
2028 | |
2029 | dev->mtu = new_mtu; |
2030 | |
2031 | if (dev->flags & IFF_UP) |
2032 | startup_gfar(ndev: dev); |
2033 | |
2034 | clear_bit_unlock(nr: GFAR_RESETTING, addr: &priv->state); |
2035 | |
2036 | return 0; |
2037 | } |
2038 | |
2039 | static void reset_gfar(struct net_device *ndev) |
2040 | { |
2041 | struct gfar_private *priv = netdev_priv(dev: ndev); |
2042 | |
2043 | while (test_and_set_bit_lock(nr: GFAR_RESETTING, addr: &priv->state)) |
2044 | cpu_relax(); |
2045 | |
2046 | stop_gfar(dev: ndev); |
2047 | startup_gfar(ndev); |
2048 | |
2049 | clear_bit_unlock(nr: GFAR_RESETTING, addr: &priv->state); |
2050 | } |
2051 | |
2052 | /* gfar_reset_task gets scheduled when a packet has not been |
2053 | * transmitted after a set amount of time. |
2054 | * For now, assume that clearing out all the structures, and |
2055 | * starting over will fix the problem. |
2056 | */ |
2057 | static void gfar_reset_task(struct work_struct *work) |
2058 | { |
2059 | struct gfar_private *priv = container_of(work, struct gfar_private, |
2060 | reset_task); |
2061 | reset_gfar(ndev: priv->ndev); |
2062 | } |
2063 | |
2064 | static void gfar_timeout(struct net_device *dev, unsigned int txqueue) |
2065 | { |
2066 | struct gfar_private *priv = netdev_priv(dev); |
2067 | |
2068 | dev->stats.tx_errors++; |
2069 | schedule_work(work: &priv->reset_task); |
2070 | } |
2071 | |
2072 | static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
2073 | { |
2074 | struct hwtstamp_config config; |
2075 | struct gfar_private *priv = netdev_priv(dev: netdev); |
2076 | |
2077 | if (copy_from_user(to: &config, from: ifr->ifr_data, n: sizeof(config))) |
2078 | return -EFAULT; |
2079 | |
2080 | switch (config.tx_type) { |
2081 | case HWTSTAMP_TX_OFF: |
2082 | priv->hwts_tx_en = 0; |
2083 | break; |
2084 | case HWTSTAMP_TX_ON: |
2085 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
2086 | return -ERANGE; |
2087 | priv->hwts_tx_en = 1; |
2088 | break; |
2089 | default: |
2090 | return -ERANGE; |
2091 | } |
2092 | |
2093 | switch (config.rx_filter) { |
2094 | case HWTSTAMP_FILTER_NONE: |
2095 | if (priv->hwts_rx_en) { |
2096 | priv->hwts_rx_en = 0; |
2097 | reset_gfar(ndev: netdev); |
2098 | } |
2099 | break; |
2100 | default: |
2101 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
2102 | return -ERANGE; |
2103 | if (!priv->hwts_rx_en) { |
2104 | priv->hwts_rx_en = 1; |
2105 | reset_gfar(ndev: netdev); |
2106 | } |
2107 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
2108 | break; |
2109 | } |
2110 | |
2111 | return copy_to_user(to: ifr->ifr_data, from: &config, n: sizeof(config)) ? |
2112 | -EFAULT : 0; |
2113 | } |
2114 | |
2115 | static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
2116 | { |
2117 | struct hwtstamp_config config; |
2118 | struct gfar_private *priv = netdev_priv(dev: netdev); |
2119 | |
2120 | config.flags = 0; |
2121 | config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; |
2122 | config.rx_filter = (priv->hwts_rx_en ? |
2123 | HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); |
2124 | |
2125 | return copy_to_user(to: ifr->ifr_data, from: &config, n: sizeof(config)) ? |
2126 | -EFAULT : 0; |
2127 | } |
2128 | |
2129 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2130 | { |
2131 | struct phy_device *phydev = dev->phydev; |
2132 | |
2133 | if (!netif_running(dev)) |
2134 | return -EINVAL; |
2135 | |
2136 | if (cmd == SIOCSHWTSTAMP) |
2137 | return gfar_hwtstamp_set(netdev: dev, ifr: rq); |
2138 | if (cmd == SIOCGHWTSTAMP) |
2139 | return gfar_hwtstamp_get(netdev: dev, ifr: rq); |
2140 | |
2141 | if (!phydev) |
2142 | return -ENODEV; |
2143 | |
2144 | return phy_mii_ioctl(phydev, ifr: rq, cmd); |
2145 | } |
2146 | |
2147 | /* Interrupt Handler for Transmit complete */ |
2148 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
2149 | { |
2150 | struct net_device *dev = tx_queue->dev; |
2151 | struct netdev_queue *txq; |
2152 | struct gfar_private *priv = netdev_priv(dev); |
2153 | struct txbd8 *bdp, *next = NULL; |
2154 | struct txbd8 *lbdp = NULL; |
2155 | struct txbd8 *base = tx_queue->tx_bd_base; |
2156 | struct sk_buff *skb; |
2157 | int skb_dirtytx; |
2158 | int tx_ring_size = tx_queue->tx_ring_size; |
2159 | int frags = 0, nr_txbds = 0; |
2160 | int i; |
2161 | int howmany = 0; |
2162 | int tqi = tx_queue->qindex; |
2163 | unsigned int bytes_sent = 0; |
2164 | u32 lstatus; |
2165 | size_t buflen; |
2166 | |
2167 | txq = netdev_get_tx_queue(dev, index: tqi); |
2168 | bdp = tx_queue->dirty_tx; |
2169 | skb_dirtytx = tx_queue->skb_dirtytx; |
2170 | |
2171 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
2172 | bool do_tstamp; |
2173 | |
2174 | do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
2175 | priv->hwts_tx_en; |
2176 | |
2177 | frags = skb_shinfo(skb)->nr_frags; |
2178 | |
2179 | /* When time stamping, one additional TxBD must be freed. |
2180 | * Also, we need to dma_unmap_single() the TxPAL. |
2181 | */ |
2182 | if (unlikely(do_tstamp)) |
2183 | nr_txbds = frags + 2; |
2184 | else |
2185 | nr_txbds = frags + 1; |
2186 | |
2187 | lbdp = skip_txbd(bdp, stride: nr_txbds - 1, base, ring_size: tx_ring_size); |
2188 | |
2189 | lstatus = be32_to_cpu(lbdp->lstatus); |
2190 | |
2191 | /* Only clean completed frames */ |
2192 | if ((lstatus & BD_LFLAG(TXBD_READY)) && |
2193 | (lstatus & BD_LENGTH_MASK)) |
2194 | break; |
2195 | |
2196 | if (unlikely(do_tstamp)) { |
2197 | next = next_txbd(bdp, base, ring_size: tx_ring_size); |
2198 | buflen = be16_to_cpu(next->length) + |
2199 | GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
2200 | } else |
2201 | buflen = be16_to_cpu(bdp->length); |
2202 | |
2203 | dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), |
2204 | buflen, DMA_TO_DEVICE); |
2205 | |
2206 | if (unlikely(do_tstamp)) { |
2207 | struct skb_shared_hwtstamps shhwtstamps; |
2208 | u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & |
2209 | ~0x7UL); |
2210 | |
2211 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
2212 | shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
2213 | skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
2214 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamps); |
2215 | gfar_clear_txbd_status(bdp); |
2216 | bdp = next; |
2217 | } |
2218 | |
2219 | gfar_clear_txbd_status(bdp); |
2220 | bdp = next_txbd(bdp, base, ring_size: tx_ring_size); |
2221 | |
2222 | for (i = 0; i < frags; i++) { |
2223 | dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), |
2224 | be16_to_cpu(bdp->length), |
2225 | DMA_TO_DEVICE); |
2226 | gfar_clear_txbd_status(bdp); |
2227 | bdp = next_txbd(bdp, base, ring_size: tx_ring_size); |
2228 | } |
2229 | |
2230 | bytes_sent += GFAR_CB(skb)->bytes_sent; |
2231 | |
2232 | dev_kfree_skb_any(skb); |
2233 | |
2234 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
2235 | |
2236 | skb_dirtytx = (skb_dirtytx + 1) & |
2237 | TX_RING_MOD_MASK(tx_ring_size); |
2238 | |
2239 | howmany++; |
2240 | spin_lock(lock: &tx_queue->txlock); |
2241 | tx_queue->num_txbdfree += nr_txbds; |
2242 | spin_unlock(lock: &tx_queue->txlock); |
2243 | } |
2244 | |
2245 | /* If we freed a buffer, we can restart transmission, if necessary */ |
2246 | if (tx_queue->num_txbdfree && |
2247 | netif_tx_queue_stopped(dev_queue: txq) && |
2248 | !(test_bit(GFAR_DOWN, &priv->state))) |
2249 | netif_wake_subqueue(dev: priv->ndev, queue_index: tqi); |
2250 | |
2251 | /* Update dirty indicators */ |
2252 | tx_queue->skb_dirtytx = skb_dirtytx; |
2253 | tx_queue->dirty_tx = bdp; |
2254 | |
2255 | netdev_tx_completed_queue(dev_queue: txq, pkts: howmany, bytes: bytes_sent); |
2256 | } |
2257 | |
2258 | static void count_errors(u32 lstatus, struct net_device *ndev) |
2259 | { |
2260 | struct gfar_private *priv = netdev_priv(dev: ndev); |
2261 | struct net_device_stats *stats = &ndev->stats; |
2262 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2263 | |
2264 | /* If the packet was truncated, none of the other errors matter */ |
2265 | if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { |
2266 | stats->rx_length_errors++; |
2267 | |
2268 | atomic64_inc(v: &estats->rx_trunc); |
2269 | |
2270 | return; |
2271 | } |
2272 | /* Count the errors, if there were any */ |
2273 | if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { |
2274 | stats->rx_length_errors++; |
2275 | |
2276 | if (lstatus & BD_LFLAG(RXBD_LARGE)) |
2277 | atomic64_inc(v: &estats->rx_large); |
2278 | else |
2279 | atomic64_inc(v: &estats->rx_short); |
2280 | } |
2281 | if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { |
2282 | stats->rx_frame_errors++; |
2283 | atomic64_inc(v: &estats->rx_nonoctet); |
2284 | } |
2285 | if (lstatus & BD_LFLAG(RXBD_CRCERR)) { |
2286 | atomic64_inc(v: &estats->rx_crcerr); |
2287 | stats->rx_crc_errors++; |
2288 | } |
2289 | if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { |
2290 | atomic64_inc(v: &estats->rx_overrun); |
2291 | stats->rx_over_errors++; |
2292 | } |
2293 | } |
2294 | |
2295 | static irqreturn_t gfar_receive(int irq, void *grp_id) |
2296 | { |
2297 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
2298 | unsigned long flags; |
2299 | u32 imask, ievent; |
2300 | |
2301 | ievent = gfar_read(addr: &grp->regs->ievent); |
2302 | |
2303 | if (unlikely(ievent & IEVENT_FGPI)) { |
2304 | gfar_write(addr: &grp->regs->ievent, IEVENT_FGPI); |
2305 | return IRQ_HANDLED; |
2306 | } |
2307 | |
2308 | if (likely(napi_schedule_prep(&grp->napi_rx))) { |
2309 | spin_lock_irqsave(&grp->grplock, flags); |
2310 | imask = gfar_read(addr: &grp->regs->imask); |
2311 | imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask; |
2312 | gfar_write(addr: &grp->regs->imask, val: imask); |
2313 | spin_unlock_irqrestore(lock: &grp->grplock, flags); |
2314 | __napi_schedule(n: &grp->napi_rx); |
2315 | } else { |
2316 | /* Clear IEVENT, so interrupts aren't called again |
2317 | * because of the packets that have already arrived. |
2318 | */ |
2319 | gfar_write(addr: &grp->regs->ievent, IEVENT_RX_MASK); |
2320 | } |
2321 | |
2322 | return IRQ_HANDLED; |
2323 | } |
2324 | |
2325 | /* Interrupt Handler for Transmit complete */ |
2326 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
2327 | { |
2328 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
2329 | unsigned long flags; |
2330 | u32 imask; |
2331 | |
2332 | if (likely(napi_schedule_prep(&grp->napi_tx))) { |
2333 | spin_lock_irqsave(&grp->grplock, flags); |
2334 | imask = gfar_read(addr: &grp->regs->imask); |
2335 | imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask; |
2336 | gfar_write(addr: &grp->regs->imask, val: imask); |
2337 | spin_unlock_irqrestore(lock: &grp->grplock, flags); |
2338 | __napi_schedule(n: &grp->napi_tx); |
2339 | } else { |
2340 | /* Clear IEVENT, so interrupts aren't called again |
2341 | * because of the packets that have already arrived. |
2342 | */ |
2343 | gfar_write(addr: &grp->regs->ievent, IEVENT_TX_MASK); |
2344 | } |
2345 | |
2346 | return IRQ_HANDLED; |
2347 | } |
2348 | |
2349 | static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, |
2350 | struct sk_buff *skb, bool first) |
2351 | { |
2352 | int size = lstatus & BD_LENGTH_MASK; |
2353 | struct page *page = rxb->page; |
2354 | |
2355 | if (likely(first)) { |
2356 | skb_put(skb, len: size); |
2357 | } else { |
2358 | /* the last fragments' length contains the full frame length */ |
2359 | if (lstatus & BD_LFLAG(RXBD_LAST)) |
2360 | size -= skb->len; |
2361 | |
2362 | WARN(size < 0, "gianfar: rx fragment size underflow" ); |
2363 | if (size < 0) |
2364 | return false; |
2365 | |
2366 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
2367 | off: rxb->page_offset + RXBUF_ALIGNMENT, |
2368 | size, GFAR_RXB_TRUESIZE); |
2369 | } |
2370 | |
2371 | /* try reuse page */ |
2372 | if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) |
2373 | return false; |
2374 | |
2375 | /* change offset to the other half */ |
2376 | rxb->page_offset ^= GFAR_RXB_TRUESIZE; |
2377 | |
2378 | page_ref_inc(page); |
2379 | |
2380 | return true; |
2381 | } |
2382 | |
2383 | static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, |
2384 | struct gfar_rx_buff *old_rxb) |
2385 | { |
2386 | struct gfar_rx_buff *new_rxb; |
2387 | u16 nta = rxq->next_to_alloc; |
2388 | |
2389 | new_rxb = &rxq->rx_buff[nta]; |
2390 | |
2391 | /* find next buf that can reuse a page */ |
2392 | nta++; |
2393 | rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; |
2394 | |
2395 | /* copy page reference */ |
2396 | *new_rxb = *old_rxb; |
2397 | |
2398 | /* sync for use by the device */ |
2399 | dma_sync_single_range_for_device(dev: rxq->dev, addr: old_rxb->dma, |
2400 | offset: old_rxb->page_offset, |
2401 | GFAR_RXB_TRUESIZE, dir: DMA_FROM_DEVICE); |
2402 | } |
2403 | |
2404 | static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, |
2405 | u32 lstatus, struct sk_buff *skb) |
2406 | { |
2407 | struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; |
2408 | struct page *page = rxb->page; |
2409 | bool first = false; |
2410 | |
2411 | if (likely(!skb)) { |
2412 | void *buff_addr = page_address(page) + rxb->page_offset; |
2413 | |
2414 | skb = build_skb(data: buff_addr, GFAR_SKBFRAG_SIZE); |
2415 | if (unlikely(!skb)) { |
2416 | gfar_rx_alloc_err(rx_queue); |
2417 | return NULL; |
2418 | } |
2419 | skb_reserve(skb, RXBUF_ALIGNMENT); |
2420 | first = true; |
2421 | } |
2422 | |
2423 | dma_sync_single_range_for_cpu(dev: rx_queue->dev, addr: rxb->dma, offset: rxb->page_offset, |
2424 | GFAR_RXB_TRUESIZE, dir: DMA_FROM_DEVICE); |
2425 | |
2426 | if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { |
2427 | /* reuse the free half of the page */ |
2428 | gfar_reuse_rx_page(rxq: rx_queue, old_rxb: rxb); |
2429 | } else { |
2430 | /* page cannot be reused, unmap it */ |
2431 | dma_unmap_page(rx_queue->dev, rxb->dma, |
2432 | PAGE_SIZE, DMA_FROM_DEVICE); |
2433 | } |
2434 | |
2435 | /* clear rxb content */ |
2436 | rxb->page = NULL; |
2437 | |
2438 | return skb; |
2439 | } |
2440 | |
2441 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
2442 | { |
2443 | /* If valid headers were found, and valid sums |
2444 | * were verified, then we tell the kernel that no |
2445 | * checksumming is necessary. Otherwise, it is [FIXME] |
2446 | */ |
2447 | if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == |
2448 | (RXFCB_CIP | RXFCB_CTU)) |
2449 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2450 | else |
2451 | skb_checksum_none_assert(skb); |
2452 | } |
2453 | |
2454 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
2455 | static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) |
2456 | { |
2457 | struct gfar_private *priv = netdev_priv(dev: ndev); |
2458 | struct rxfcb *fcb = NULL; |
2459 | |
2460 | /* fcb is at the beginning if exists */ |
2461 | fcb = (struct rxfcb *)skb->data; |
2462 | |
2463 | /* Remove the FCB from the skb |
2464 | * Remove the padded bytes, if there are any |
2465 | */ |
2466 | if (priv->uses_rxfcb) |
2467 | skb_pull(skb, GMAC_FCB_LEN); |
2468 | |
2469 | /* Get receive timestamp from the skb */ |
2470 | if (priv->hwts_rx_en) { |
2471 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
2472 | u64 *ns = (u64 *) skb->data; |
2473 | |
2474 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
2475 | shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
2476 | } |
2477 | |
2478 | if (priv->padding) |
2479 | skb_pull(skb, len: priv->padding); |
2480 | |
2481 | /* Trim off the FCS */ |
2482 | pskb_trim(skb, len: skb->len - ETH_FCS_LEN); |
2483 | |
2484 | if (ndev->features & NETIF_F_RXCSUM) |
2485 | gfar_rx_checksum(skb, fcb); |
2486 | |
2487 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
2488 | * Even if vlan rx accel is disabled, on some chips |
2489 | * RXFCB_VLN is pseudo randomly set. |
2490 | */ |
2491 | if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && |
2492 | be16_to_cpu(fcb->flags) & RXFCB_VLN) |
2493 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
2494 | be16_to_cpu(fcb->vlctl)); |
2495 | } |
2496 | |
2497 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring |
2498 | * until the budget/quota has been reached. Returns the number |
2499 | * of frames handled |
2500 | */ |
2501 | static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, |
2502 | int rx_work_limit) |
2503 | { |
2504 | struct net_device *ndev = rx_queue->ndev; |
2505 | struct gfar_private *priv = netdev_priv(dev: ndev); |
2506 | struct rxbd8 *bdp; |
2507 | int i, howmany = 0; |
2508 | struct sk_buff *skb = rx_queue->skb; |
2509 | int cleaned_cnt = gfar_rxbd_unused(rxq: rx_queue); |
2510 | unsigned int total_bytes = 0, total_pkts = 0; |
2511 | |
2512 | /* Get the first full descriptor */ |
2513 | i = rx_queue->next_to_clean; |
2514 | |
2515 | while (rx_work_limit--) { |
2516 | u32 lstatus; |
2517 | |
2518 | if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { |
2519 | gfar_alloc_rx_buffs(rx_queue, alloc_cnt: cleaned_cnt); |
2520 | cleaned_cnt = 0; |
2521 | } |
2522 | |
2523 | bdp = &rx_queue->rx_bd_base[i]; |
2524 | lstatus = be32_to_cpu(bdp->lstatus); |
2525 | if (lstatus & BD_LFLAG(RXBD_EMPTY)) |
2526 | break; |
2527 | |
2528 | /* lost RXBD_LAST descriptor due to overrun */ |
2529 | if (skb && |
2530 | (lstatus & BD_LFLAG(RXBD_FIRST))) { |
2531 | /* discard faulty buffer */ |
2532 | dev_kfree_skb(skb); |
2533 | skb = NULL; |
2534 | rx_queue->stats.rx_dropped++; |
2535 | |
2536 | /* can continue normally */ |
2537 | } |
2538 | |
2539 | /* order rx buffer descriptor reads */ |
2540 | rmb(); |
2541 | |
2542 | /* fetch next to clean buffer from the ring */ |
2543 | skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); |
2544 | if (unlikely(!skb)) |
2545 | break; |
2546 | |
2547 | cleaned_cnt++; |
2548 | howmany++; |
2549 | |
2550 | if (unlikely(++i == rx_queue->rx_ring_size)) |
2551 | i = 0; |
2552 | |
2553 | rx_queue->next_to_clean = i; |
2554 | |
2555 | /* fetch next buffer if not the last in frame */ |
2556 | if (!(lstatus & BD_LFLAG(RXBD_LAST))) |
2557 | continue; |
2558 | |
2559 | if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { |
2560 | count_errors(lstatus, ndev); |
2561 | |
2562 | /* discard faulty buffer */ |
2563 | dev_kfree_skb(skb); |
2564 | skb = NULL; |
2565 | rx_queue->stats.rx_dropped++; |
2566 | continue; |
2567 | } |
2568 | |
2569 | gfar_process_frame(ndev, skb); |
2570 | |
2571 | /* Increment the number of packets */ |
2572 | total_pkts++; |
2573 | total_bytes += skb->len; |
2574 | |
2575 | skb_record_rx_queue(skb, rx_queue: rx_queue->qindex); |
2576 | |
2577 | skb->protocol = eth_type_trans(skb, dev: ndev); |
2578 | |
2579 | /* Send the packet up the stack */ |
2580 | napi_gro_receive(napi: &rx_queue->grp->napi_rx, skb); |
2581 | |
2582 | skb = NULL; |
2583 | } |
2584 | |
2585 | /* Store incomplete frames for completion */ |
2586 | rx_queue->skb = skb; |
2587 | |
2588 | rx_queue->stats.rx_packets += total_pkts; |
2589 | rx_queue->stats.rx_bytes += total_bytes; |
2590 | |
2591 | if (cleaned_cnt) |
2592 | gfar_alloc_rx_buffs(rx_queue, alloc_cnt: cleaned_cnt); |
2593 | |
2594 | /* Update Last Free RxBD pointer for LFC */ |
2595 | if (unlikely(priv->tx_actual_en)) { |
2596 | u32 bdp_dma = gfar_rxbd_dma_lastfree(rxq: rx_queue); |
2597 | |
2598 | gfar_write(addr: rx_queue->rfbptr, val: bdp_dma); |
2599 | } |
2600 | |
2601 | return howmany; |
2602 | } |
2603 | |
2604 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
2605 | { |
2606 | struct gfar_priv_grp *gfargrp = |
2607 | container_of(napi, struct gfar_priv_grp, napi_rx); |
2608 | struct gfar __iomem *regs = gfargrp->regs; |
2609 | struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
2610 | int work_done = 0; |
2611 | |
2612 | /* Clear IEVENT, so interrupts aren't called again |
2613 | * because of the packets that have already arrived |
2614 | */ |
2615 | gfar_write(addr: ®s->ievent, IEVENT_RX_MASK); |
2616 | |
2617 | work_done = gfar_clean_rx_ring(rx_queue, rx_work_limit: budget); |
2618 | |
2619 | if (work_done < budget) { |
2620 | u32 imask; |
2621 | napi_complete_done(n: napi, work_done); |
2622 | /* Clear the halt bit in RSTAT */ |
2623 | gfar_write(addr: ®s->rstat, val: gfargrp->rstat); |
2624 | |
2625 | spin_lock_irq(lock: &gfargrp->grplock); |
2626 | imask = gfar_read(addr: ®s->imask); |
2627 | imask |= IMASK_RX_DEFAULT; |
2628 | gfar_write(addr: ®s->imask, val: imask); |
2629 | spin_unlock_irq(lock: &gfargrp->grplock); |
2630 | } |
2631 | |
2632 | return work_done; |
2633 | } |
2634 | |
2635 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
2636 | { |
2637 | struct gfar_priv_grp *gfargrp = |
2638 | container_of(napi, struct gfar_priv_grp, napi_tx); |
2639 | struct gfar __iomem *regs = gfargrp->regs; |
2640 | struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; |
2641 | u32 imask; |
2642 | |
2643 | /* Clear IEVENT, so interrupts aren't called again |
2644 | * because of the packets that have already arrived |
2645 | */ |
2646 | gfar_write(addr: ®s->ievent, IEVENT_TX_MASK); |
2647 | |
2648 | /* run Tx cleanup to completion */ |
2649 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) |
2650 | gfar_clean_tx_ring(tx_queue); |
2651 | |
2652 | napi_complete(n: napi); |
2653 | |
2654 | spin_lock_irq(lock: &gfargrp->grplock); |
2655 | imask = gfar_read(addr: ®s->imask); |
2656 | imask |= IMASK_TX_DEFAULT; |
2657 | gfar_write(addr: ®s->imask, val: imask); |
2658 | spin_unlock_irq(lock: &gfargrp->grplock); |
2659 | |
2660 | return 0; |
2661 | } |
2662 | |
2663 | /* GFAR error interrupt handler */ |
2664 | static irqreturn_t gfar_error(int irq, void *grp_id) |
2665 | { |
2666 | struct gfar_priv_grp *gfargrp = grp_id; |
2667 | struct gfar __iomem *regs = gfargrp->regs; |
2668 | struct gfar_private *priv= gfargrp->priv; |
2669 | struct net_device *dev = priv->ndev; |
2670 | |
2671 | /* Save ievent for future reference */ |
2672 | u32 events = gfar_read(addr: ®s->ievent); |
2673 | |
2674 | /* Clear IEVENT */ |
2675 | gfar_write(addr: ®s->ievent, val: events & IEVENT_ERR_MASK); |
2676 | |
2677 | /* Magic Packet is not an error. */ |
2678 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
2679 | (events & IEVENT_MAG)) |
2680 | events &= ~IEVENT_MAG; |
2681 | |
2682 | /* Hmm... */ |
2683 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
2684 | netdev_dbg(dev, |
2685 | "error interrupt (ievent=0x%08x imask=0x%08x)\n" , |
2686 | events, gfar_read(®s->imask)); |
2687 | |
2688 | /* Update the error counters */ |
2689 | if (events & IEVENT_TXE) { |
2690 | dev->stats.tx_errors++; |
2691 | |
2692 | if (events & IEVENT_LC) |
2693 | dev->stats.tx_window_errors++; |
2694 | if (events & IEVENT_CRL) |
2695 | dev->stats.tx_aborted_errors++; |
2696 | if (events & IEVENT_XFUN) { |
2697 | netif_dbg(priv, tx_err, dev, |
2698 | "TX FIFO underrun, packet dropped\n" ); |
2699 | dev->stats.tx_dropped++; |
2700 | atomic64_inc(v: &priv->extra_stats.tx_underrun); |
2701 | |
2702 | schedule_work(work: &priv->reset_task); |
2703 | } |
2704 | netif_dbg(priv, tx_err, dev, "Transmit Error\n" ); |
2705 | } |
2706 | if (events & IEVENT_MSRO) { |
2707 | struct rmon_mib __iomem *rmon = ®s->rmon; |
2708 | u32 car; |
2709 | |
2710 | spin_lock(lock: &priv->rmon_overflow.lock); |
2711 | car = gfar_read(addr: &rmon->car1) & CAR1_C1RDR; |
2712 | if (car) { |
2713 | priv->rmon_overflow.rdrp++; |
2714 | gfar_write(addr: &rmon->car1, val: car); |
2715 | } |
2716 | spin_unlock(lock: &priv->rmon_overflow.lock); |
2717 | } |
2718 | if (events & IEVENT_BSY) { |
2719 | dev->stats.rx_over_errors++; |
2720 | atomic64_inc(v: &priv->extra_stats.rx_bsy); |
2721 | |
2722 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n" , |
2723 | gfar_read(®s->rstat)); |
2724 | } |
2725 | if (events & IEVENT_BABR) { |
2726 | dev->stats.rx_errors++; |
2727 | atomic64_inc(v: &priv->extra_stats.rx_babr); |
2728 | |
2729 | netif_dbg(priv, rx_err, dev, "babbling RX error\n" ); |
2730 | } |
2731 | if (events & IEVENT_EBERR) { |
2732 | atomic64_inc(v: &priv->extra_stats.eberr); |
2733 | netif_dbg(priv, rx_err, dev, "bus error\n" ); |
2734 | } |
2735 | if (events & IEVENT_RXC) |
2736 | netif_dbg(priv, rx_status, dev, "control frame\n" ); |
2737 | |
2738 | if (events & IEVENT_BABT) { |
2739 | atomic64_inc(v: &priv->extra_stats.tx_babt); |
2740 | netif_dbg(priv, tx_err, dev, "babbling TX error\n" ); |
2741 | } |
2742 | return IRQ_HANDLED; |
2743 | } |
2744 | |
2745 | /* The interrupt handler for devices with one interrupt */ |
2746 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
2747 | { |
2748 | struct gfar_priv_grp *gfargrp = grp_id; |
2749 | |
2750 | /* Save ievent for future reference */ |
2751 | u32 events = gfar_read(addr: &gfargrp->regs->ievent); |
2752 | |
2753 | /* Check for reception */ |
2754 | if (events & IEVENT_RX_MASK) |
2755 | gfar_receive(irq, grp_id); |
2756 | |
2757 | /* Check for transmit completion */ |
2758 | if (events & IEVENT_TX_MASK) |
2759 | gfar_transmit(irq, grp_id); |
2760 | |
2761 | /* Check for errors */ |
2762 | if (events & IEVENT_ERR_MASK) |
2763 | gfar_error(irq, grp_id); |
2764 | |
2765 | return IRQ_HANDLED; |
2766 | } |
2767 | |
2768 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2769 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
2770 | * without having to re-enable interrupts. It's not called while |
2771 | * the interrupt routine is executing. |
2772 | */ |
2773 | static void gfar_netpoll(struct net_device *dev) |
2774 | { |
2775 | struct gfar_private *priv = netdev_priv(dev); |
2776 | int i; |
2777 | |
2778 | /* If the device has multiple interrupts, run tx/rx */ |
2779 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
2780 | for (i = 0; i < priv->num_grps; i++) { |
2781 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
2782 | |
2783 | disable_irq(gfar_irq(grp, TX)->irq); |
2784 | disable_irq(gfar_irq(grp, RX)->irq); |
2785 | disable_irq(gfar_irq(grp, ER)->irq); |
2786 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp_id: grp); |
2787 | enable_irq(gfar_irq(grp, ER)->irq); |
2788 | enable_irq(gfar_irq(grp, RX)->irq); |
2789 | enable_irq(gfar_irq(grp, TX)->irq); |
2790 | } |
2791 | } else { |
2792 | for (i = 0; i < priv->num_grps; i++) { |
2793 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
2794 | |
2795 | disable_irq(gfar_irq(grp, TX)->irq); |
2796 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp_id: grp); |
2797 | enable_irq(gfar_irq(grp, TX)->irq); |
2798 | } |
2799 | } |
2800 | } |
2801 | #endif |
2802 | |
2803 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
2804 | { |
2805 | free_irq(gfar_irq(grp, TX)->irq, grp); |
2806 | free_irq(gfar_irq(grp, RX)->irq, grp); |
2807 | free_irq(gfar_irq(grp, ER)->irq, grp); |
2808 | } |
2809 | |
2810 | static int register_grp_irqs(struct gfar_priv_grp *grp) |
2811 | { |
2812 | struct gfar_private *priv = grp->priv; |
2813 | struct net_device *dev = priv->ndev; |
2814 | int err; |
2815 | |
2816 | /* If the device has multiple interrupts, register for |
2817 | * them. Otherwise, only register for the one |
2818 | */ |
2819 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
2820 | /* Install our interrupt handlers for Error, |
2821 | * Transmit, and Receive |
2822 | */ |
2823 | err = request_irq(gfar_irq(grp, ER)->irq, handler: gfar_error, flags: 0, |
2824 | gfar_irq(grp, ER)->name, dev: grp); |
2825 | if (err < 0) { |
2826 | netif_err(priv, intr, dev, "Can't get IRQ %d\n" , |
2827 | gfar_irq(grp, ER)->irq); |
2828 | |
2829 | goto err_irq_fail; |
2830 | } |
2831 | enable_irq_wake(gfar_irq(grp, ER)->irq); |
2832 | |
2833 | err = request_irq(gfar_irq(grp, TX)->irq, handler: gfar_transmit, flags: 0, |
2834 | gfar_irq(grp, TX)->name, dev: grp); |
2835 | if (err < 0) { |
2836 | netif_err(priv, intr, dev, "Can't get IRQ %d\n" , |
2837 | gfar_irq(grp, TX)->irq); |
2838 | goto tx_irq_fail; |
2839 | } |
2840 | err = request_irq(gfar_irq(grp, RX)->irq, handler: gfar_receive, flags: 0, |
2841 | gfar_irq(grp, RX)->name, dev: grp); |
2842 | if (err < 0) { |
2843 | netif_err(priv, intr, dev, "Can't get IRQ %d\n" , |
2844 | gfar_irq(grp, RX)->irq); |
2845 | goto rx_irq_fail; |
2846 | } |
2847 | enable_irq_wake(gfar_irq(grp, RX)->irq); |
2848 | |
2849 | } else { |
2850 | err = request_irq(gfar_irq(grp, TX)->irq, handler: gfar_interrupt, flags: 0, |
2851 | gfar_irq(grp, TX)->name, dev: grp); |
2852 | if (err < 0) { |
2853 | netif_err(priv, intr, dev, "Can't get IRQ %d\n" , |
2854 | gfar_irq(grp, TX)->irq); |
2855 | goto err_irq_fail; |
2856 | } |
2857 | enable_irq_wake(gfar_irq(grp, TX)->irq); |
2858 | } |
2859 | |
2860 | return 0; |
2861 | |
2862 | rx_irq_fail: |
2863 | free_irq(gfar_irq(grp, TX)->irq, grp); |
2864 | tx_irq_fail: |
2865 | free_irq(gfar_irq(grp, ER)->irq, grp); |
2866 | err_irq_fail: |
2867 | return err; |
2868 | |
2869 | } |
2870 | |
2871 | static void gfar_free_irq(struct gfar_private *priv) |
2872 | { |
2873 | int i; |
2874 | |
2875 | /* Free the IRQs */ |
2876 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
2877 | for (i = 0; i < priv->num_grps; i++) |
2878 | free_grp_irqs(grp: &priv->gfargrp[i]); |
2879 | } else { |
2880 | for (i = 0; i < priv->num_grps; i++) |
2881 | free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, |
2882 | &priv->gfargrp[i]); |
2883 | } |
2884 | } |
2885 | |
2886 | static int gfar_request_irq(struct gfar_private *priv) |
2887 | { |
2888 | int err, i, j; |
2889 | |
2890 | for (i = 0; i < priv->num_grps; i++) { |
2891 | err = register_grp_irqs(grp: &priv->gfargrp[i]); |
2892 | if (err) { |
2893 | for (j = 0; j < i; j++) |
2894 | free_grp_irqs(grp: &priv->gfargrp[j]); |
2895 | return err; |
2896 | } |
2897 | } |
2898 | |
2899 | return 0; |
2900 | } |
2901 | |
2902 | /* Called when something needs to use the ethernet device |
2903 | * Returns 0 for success. |
2904 | */ |
2905 | static int gfar_enet_open(struct net_device *dev) |
2906 | { |
2907 | struct gfar_private *priv = netdev_priv(dev); |
2908 | int err; |
2909 | |
2910 | err = init_phy(dev); |
2911 | if (err) |
2912 | return err; |
2913 | |
2914 | err = gfar_request_irq(priv); |
2915 | if (err) |
2916 | return err; |
2917 | |
2918 | err = startup_gfar(ndev: dev); |
2919 | if (err) |
2920 | return err; |
2921 | |
2922 | return err; |
2923 | } |
2924 | |
2925 | /* Stops the kernel queue, and halts the controller */ |
2926 | static int gfar_close(struct net_device *dev) |
2927 | { |
2928 | struct gfar_private *priv = netdev_priv(dev); |
2929 | |
2930 | cancel_work_sync(work: &priv->reset_task); |
2931 | stop_gfar(dev); |
2932 | |
2933 | /* Disconnect from the PHY */ |
2934 | phy_disconnect(phydev: dev->phydev); |
2935 | |
2936 | gfar_free_irq(priv); |
2937 | |
2938 | return 0; |
2939 | } |
2940 | |
2941 | /* Clears each of the exact match registers to zero, so they |
2942 | * don't interfere with normal reception |
2943 | */ |
2944 | static void gfar_clear_exact_match(struct net_device *dev) |
2945 | { |
2946 | int idx; |
2947 | static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
2948 | |
2949 | for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
2950 | gfar_set_mac_for_addr(dev, num: idx, addr: zero_arr); |
2951 | } |
2952 | |
2953 | /* Update the hash table based on the current list of multicast |
2954 | * addresses we subscribe to. Also, change the promiscuity of |
2955 | * the device based on the flags (this function is called |
2956 | * whenever dev->flags is changed |
2957 | */ |
2958 | static void gfar_set_multi(struct net_device *dev) |
2959 | { |
2960 | struct netdev_hw_addr *ha; |
2961 | struct gfar_private *priv = netdev_priv(dev); |
2962 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
2963 | u32 tempval; |
2964 | |
2965 | if (dev->flags & IFF_PROMISC) { |
2966 | /* Set RCTRL to PROM */ |
2967 | tempval = gfar_read(addr: ®s->rctrl); |
2968 | tempval |= RCTRL_PROM; |
2969 | gfar_write(addr: ®s->rctrl, val: tempval); |
2970 | } else { |
2971 | /* Set RCTRL to not PROM */ |
2972 | tempval = gfar_read(addr: ®s->rctrl); |
2973 | tempval &= ~(RCTRL_PROM); |
2974 | gfar_write(addr: ®s->rctrl, val: tempval); |
2975 | } |
2976 | |
2977 | if (dev->flags & IFF_ALLMULTI) { |
2978 | /* Set the hash to rx all multicast frames */ |
2979 | gfar_write(addr: ®s->igaddr0, val: 0xffffffff); |
2980 | gfar_write(addr: ®s->igaddr1, val: 0xffffffff); |
2981 | gfar_write(addr: ®s->igaddr2, val: 0xffffffff); |
2982 | gfar_write(addr: ®s->igaddr3, val: 0xffffffff); |
2983 | gfar_write(addr: ®s->igaddr4, val: 0xffffffff); |
2984 | gfar_write(addr: ®s->igaddr5, val: 0xffffffff); |
2985 | gfar_write(addr: ®s->igaddr6, val: 0xffffffff); |
2986 | gfar_write(addr: ®s->igaddr7, val: 0xffffffff); |
2987 | gfar_write(addr: ®s->gaddr0, val: 0xffffffff); |
2988 | gfar_write(addr: ®s->gaddr1, val: 0xffffffff); |
2989 | gfar_write(addr: ®s->gaddr2, val: 0xffffffff); |
2990 | gfar_write(addr: ®s->gaddr3, val: 0xffffffff); |
2991 | gfar_write(addr: ®s->gaddr4, val: 0xffffffff); |
2992 | gfar_write(addr: ®s->gaddr5, val: 0xffffffff); |
2993 | gfar_write(addr: ®s->gaddr6, val: 0xffffffff); |
2994 | gfar_write(addr: ®s->gaddr7, val: 0xffffffff); |
2995 | } else { |
2996 | int em_num; |
2997 | int idx; |
2998 | |
2999 | /* zero out the hash */ |
3000 | gfar_write(addr: ®s->igaddr0, val: 0x0); |
3001 | gfar_write(addr: ®s->igaddr1, val: 0x0); |
3002 | gfar_write(addr: ®s->igaddr2, val: 0x0); |
3003 | gfar_write(addr: ®s->igaddr3, val: 0x0); |
3004 | gfar_write(addr: ®s->igaddr4, val: 0x0); |
3005 | gfar_write(addr: ®s->igaddr5, val: 0x0); |
3006 | gfar_write(addr: ®s->igaddr6, val: 0x0); |
3007 | gfar_write(addr: ®s->igaddr7, val: 0x0); |
3008 | gfar_write(addr: ®s->gaddr0, val: 0x0); |
3009 | gfar_write(addr: ®s->gaddr1, val: 0x0); |
3010 | gfar_write(addr: ®s->gaddr2, val: 0x0); |
3011 | gfar_write(addr: ®s->gaddr3, val: 0x0); |
3012 | gfar_write(addr: ®s->gaddr4, val: 0x0); |
3013 | gfar_write(addr: ®s->gaddr5, val: 0x0); |
3014 | gfar_write(addr: ®s->gaddr6, val: 0x0); |
3015 | gfar_write(addr: ®s->gaddr7, val: 0x0); |
3016 | |
3017 | /* If we have extended hash tables, we need to |
3018 | * clear the exact match registers to prepare for |
3019 | * setting them |
3020 | */ |
3021 | if (priv->extended_hash) { |
3022 | em_num = GFAR_EM_NUM + 1; |
3023 | gfar_clear_exact_match(dev); |
3024 | idx = 1; |
3025 | } else { |
3026 | idx = 0; |
3027 | em_num = 0; |
3028 | } |
3029 | |
3030 | if (netdev_mc_empty(dev)) |
3031 | return; |
3032 | |
3033 | /* Parse the list, and set the appropriate bits */ |
3034 | netdev_for_each_mc_addr(ha, dev) { |
3035 | if (idx < em_num) { |
3036 | gfar_set_mac_for_addr(dev, num: idx, addr: ha->addr); |
3037 | idx++; |
3038 | } else |
3039 | gfar_set_hash_for_addr(dev, addr: ha->addr); |
3040 | } |
3041 | } |
3042 | } |
3043 | |
3044 | void gfar_mac_reset(struct gfar_private *priv) |
3045 | { |
3046 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3047 | u32 tempval; |
3048 | |
3049 | /* Reset MAC layer */ |
3050 | gfar_write(addr: ®s->maccfg1, MACCFG1_SOFT_RESET); |
3051 | |
3052 | /* We need to delay at least 3 TX clocks */ |
3053 | udelay(3); |
3054 | |
3055 | /* the soft reset bit is not self-resetting, so we need to |
3056 | * clear it before resuming normal operation |
3057 | */ |
3058 | gfar_write(addr: ®s->maccfg1, val: 0); |
3059 | |
3060 | udelay(3); |
3061 | |
3062 | gfar_rx_offload_en(priv); |
3063 | |
3064 | /* Initialize the max receive frame/buffer lengths */ |
3065 | gfar_write(addr: ®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); |
3066 | gfar_write(addr: ®s->mrblr, GFAR_RXB_SIZE); |
3067 | |
3068 | /* Initialize the Minimum Frame Length Register */ |
3069 | gfar_write(addr: ®s->minflr, MINFLR_INIT_SETTINGS); |
3070 | |
3071 | /* Initialize MACCFG2. */ |
3072 | tempval = MACCFG2_INIT_SETTINGS; |
3073 | |
3074 | /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 |
3075 | * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, |
3076 | * and by checking RxBD[LG] and discarding larger than MAXFRM. |
3077 | */ |
3078 | if (gfar_has_errata(priv, err: GFAR_ERRATA_74)) |
3079 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
3080 | |
3081 | gfar_write(addr: ®s->maccfg2, val: tempval); |
3082 | |
3083 | /* Clear mac addr hash registers */ |
3084 | gfar_write(addr: ®s->igaddr0, val: 0); |
3085 | gfar_write(addr: ®s->igaddr1, val: 0); |
3086 | gfar_write(addr: ®s->igaddr2, val: 0); |
3087 | gfar_write(addr: ®s->igaddr3, val: 0); |
3088 | gfar_write(addr: ®s->igaddr4, val: 0); |
3089 | gfar_write(addr: ®s->igaddr5, val: 0); |
3090 | gfar_write(addr: ®s->igaddr6, val: 0); |
3091 | gfar_write(addr: ®s->igaddr7, val: 0); |
3092 | |
3093 | gfar_write(addr: ®s->gaddr0, val: 0); |
3094 | gfar_write(addr: ®s->gaddr1, val: 0); |
3095 | gfar_write(addr: ®s->gaddr2, val: 0); |
3096 | gfar_write(addr: ®s->gaddr3, val: 0); |
3097 | gfar_write(addr: ®s->gaddr4, val: 0); |
3098 | gfar_write(addr: ®s->gaddr5, val: 0); |
3099 | gfar_write(addr: ®s->gaddr6, val: 0); |
3100 | gfar_write(addr: ®s->gaddr7, val: 0); |
3101 | |
3102 | if (priv->extended_hash) |
3103 | gfar_clear_exact_match(dev: priv->ndev); |
3104 | |
3105 | gfar_mac_rx_config(priv); |
3106 | |
3107 | gfar_mac_tx_config(priv); |
3108 | |
3109 | gfar_set_mac_address(dev: priv->ndev); |
3110 | |
3111 | gfar_set_multi(dev: priv->ndev); |
3112 | |
3113 | /* clear ievent and imask before configuring coalescing */ |
3114 | gfar_ints_disable(priv); |
3115 | |
3116 | /* Configure the coalescing support */ |
3117 | gfar_configure_coalescing_all(priv); |
3118 | } |
3119 | |
3120 | static void gfar_hw_init(struct gfar_private *priv) |
3121 | { |
3122 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3123 | u32 attrs; |
3124 | |
3125 | /* Stop the DMA engine now, in case it was running before |
3126 | * (The firmware could have used it, and left it running). |
3127 | */ |
3128 | gfar_halt(priv); |
3129 | |
3130 | gfar_mac_reset(priv); |
3131 | |
3132 | /* Zero out the rmon mib registers if it has them */ |
3133 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
3134 | memset_io(®s->rmon, 0, offsetof(struct rmon_mib, car1)); |
3135 | |
3136 | /* Mask off the CAM interrupts */ |
3137 | gfar_write(addr: ®s->rmon.cam1, val: 0xffffffff); |
3138 | gfar_write(addr: ®s->rmon.cam2, val: 0xffffffff); |
3139 | /* Clear the CAR registers (w1c style) */ |
3140 | gfar_write(addr: ®s->rmon.car1, val: 0xffffffff); |
3141 | gfar_write(addr: ®s->rmon.car2, val: 0xffffffff); |
3142 | } |
3143 | |
3144 | /* Initialize ECNTRL */ |
3145 | gfar_write(addr: ®s->ecntrl, ECNTRL_INIT_SETTINGS); |
3146 | |
3147 | /* Set the extraction length and index */ |
3148 | attrs = ATTRELI_EL(priv->rx_stash_size) | |
3149 | ATTRELI_EI(priv->rx_stash_index); |
3150 | |
3151 | gfar_write(addr: ®s->attreli, val: attrs); |
3152 | |
3153 | /* Start with defaults, and add stashing |
3154 | * depending on driver parameters |
3155 | */ |
3156 | attrs = ATTR_INIT_SETTINGS; |
3157 | |
3158 | if (priv->bd_stash_en) |
3159 | attrs |= ATTR_BDSTASH; |
3160 | |
3161 | if (priv->rx_stash_size != 0) |
3162 | attrs |= ATTR_BUFSTASH; |
3163 | |
3164 | gfar_write(addr: ®s->attr, val: attrs); |
3165 | |
3166 | /* FIFO configs */ |
3167 | gfar_write(addr: ®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); |
3168 | gfar_write(addr: ®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); |
3169 | gfar_write(addr: ®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); |
3170 | |
3171 | /* Program the interrupt steering regs, only for MG devices */ |
3172 | if (priv->num_grps > 1) |
3173 | gfar_write_isrg(priv); |
3174 | } |
3175 | |
3176 | static const struct net_device_ops gfar_netdev_ops = { |
3177 | .ndo_open = gfar_enet_open, |
3178 | .ndo_start_xmit = gfar_start_xmit, |
3179 | .ndo_stop = gfar_close, |
3180 | .ndo_change_mtu = gfar_change_mtu, |
3181 | .ndo_set_features = gfar_set_features, |
3182 | .ndo_set_rx_mode = gfar_set_multi, |
3183 | .ndo_tx_timeout = gfar_timeout, |
3184 | .ndo_eth_ioctl = gfar_ioctl, |
3185 | .ndo_get_stats64 = gfar_get_stats64, |
3186 | .ndo_change_carrier = fixed_phy_change_carrier, |
3187 | .ndo_set_mac_address = gfar_set_mac_addr, |
3188 | .ndo_validate_addr = eth_validate_addr, |
3189 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3190 | .ndo_poll_controller = gfar_netpoll, |
3191 | #endif |
3192 | }; |
3193 | |
3194 | /* Set up the ethernet device structure, private data, |
3195 | * and anything else we need before we start |
3196 | */ |
3197 | static int gfar_probe(struct platform_device *ofdev) |
3198 | { |
3199 | struct device_node *np = ofdev->dev.of_node; |
3200 | struct net_device *dev = NULL; |
3201 | struct gfar_private *priv = NULL; |
3202 | int err = 0, i; |
3203 | |
3204 | err = gfar_of_init(ofdev, pdev: &dev); |
3205 | |
3206 | if (err) |
3207 | return err; |
3208 | |
3209 | priv = netdev_priv(dev); |
3210 | priv->ndev = dev; |
3211 | priv->ofdev = ofdev; |
3212 | priv->dev = &ofdev->dev; |
3213 | SET_NETDEV_DEV(dev, &ofdev->dev); |
3214 | |
3215 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
3216 | |
3217 | platform_set_drvdata(pdev: ofdev, data: priv); |
3218 | |
3219 | gfar_detect_errata(priv); |
3220 | |
3221 | /* Set the dev->base_addr to the gfar reg region */ |
3222 | dev->base_addr = (unsigned long) priv->gfargrp[0].regs; |
3223 | |
3224 | /* Fill in the dev structure */ |
3225 | dev->watchdog_timeo = TX_TIMEOUT; |
3226 | /* MTU range: 50 - 9586 */ |
3227 | dev->mtu = 1500; |
3228 | dev->min_mtu = 50; |
3229 | dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; |
3230 | dev->netdev_ops = &gfar_netdev_ops; |
3231 | dev->ethtool_ops = &gfar_ethtool_ops; |
3232 | |
3233 | /* Register for napi ...We are registering NAPI for each grp */ |
3234 | for (i = 0; i < priv->num_grps; i++) { |
3235 | netif_napi_add(dev, napi: &priv->gfargrp[i].napi_rx, |
3236 | poll: gfar_poll_rx_sq); |
3237 | netif_napi_add_tx_weight(dev, napi: &priv->gfargrp[i].napi_tx, |
3238 | poll: gfar_poll_tx_sq, weight: 2); |
3239 | } |
3240 | |
3241 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
3242 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
3243 | NETIF_F_RXCSUM; |
3244 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | |
3245 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
3246 | } |
3247 | |
3248 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
3249 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | |
3250 | NETIF_F_HW_VLAN_CTAG_RX; |
3251 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
3252 | } |
3253 | |
3254 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
3255 | |
3256 | gfar_init_addr_hash_table(priv); |
3257 | |
3258 | /* Insert receive time stamps into padding alignment bytes, and |
3259 | * plus 2 bytes padding to ensure the cpu alignment. |
3260 | */ |
3261 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
3262 | priv->padding = 8 + DEFAULT_PADDING; |
3263 | |
3264 | if (dev->features & NETIF_F_IP_CSUM || |
3265 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
3266 | dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
3267 | |
3268 | /* Initializing some of the rx/tx queue level parameters */ |
3269 | for (i = 0; i < priv->num_tx_queues; i++) { |
3270 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; |
3271 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; |
3272 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; |
3273 | priv->tx_queue[i]->txic = DEFAULT_TXIC; |
3274 | } |
3275 | |
3276 | for (i = 0; i < priv->num_rx_queues; i++) { |
3277 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; |
3278 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; |
3279 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
3280 | } |
3281 | |
3282 | /* Always enable rx filer if available */ |
3283 | priv->rx_filer_enable = |
3284 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; |
3285 | /* Enable most messages by default */ |
3286 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
3287 | /* use pritority h/w tx queue scheduling for single queue devices */ |
3288 | if (priv->num_tx_queues == 1) |
3289 | priv->prio_sched_en = 1; |
3290 | |
3291 | set_bit(nr: GFAR_DOWN, addr: &priv->state); |
3292 | |
3293 | gfar_hw_init(priv); |
3294 | |
3295 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
3296 | struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; |
3297 | |
3298 | spin_lock_init(&priv->rmon_overflow.lock); |
3299 | priv->rmon_overflow.imask = IMASK_MSRO; |
3300 | gfar_write(addr: &rmon->cam1, val: gfar_read(addr: &rmon->cam1) & ~CAM1_M1RDR); |
3301 | } |
3302 | |
3303 | /* Carrier starts down, phylib will bring it up */ |
3304 | netif_carrier_off(dev); |
3305 | |
3306 | err = register_netdev(dev); |
3307 | |
3308 | if (err) { |
3309 | pr_err("%s: Cannot register net device, aborting\n" , dev->name); |
3310 | goto register_fail; |
3311 | } |
3312 | |
3313 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) |
3314 | priv->wol_supported |= GFAR_WOL_MAGIC; |
3315 | |
3316 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && |
3317 | priv->rx_filer_enable) |
3318 | priv->wol_supported |= GFAR_WOL_FILER_UCAST; |
3319 | |
3320 | device_set_wakeup_capable(dev: &ofdev->dev, capable: priv->wol_supported); |
3321 | |
3322 | /* fill out IRQ number and name fields */ |
3323 | for (i = 0; i < priv->num_grps; i++) { |
3324 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
3325 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
3326 | sprintf(gfar_irq(grp, TX)->name, fmt: "%s%s%c%s" , |
3327 | dev->name, "_g" , '0' + i, "_tx" ); |
3328 | sprintf(gfar_irq(grp, RX)->name, fmt: "%s%s%c%s" , |
3329 | dev->name, "_g" , '0' + i, "_rx" ); |
3330 | sprintf(gfar_irq(grp, ER)->name, fmt: "%s%s%c%s" , |
3331 | dev->name, "_g" , '0' + i, "_er" ); |
3332 | } else |
3333 | strcpy(gfar_irq(grp, TX)->name, q: dev->name); |
3334 | } |
3335 | |
3336 | /* Initialize the filer table */ |
3337 | gfar_init_filer_table(priv); |
3338 | |
3339 | /* Print out the device info */ |
3340 | netdev_info(dev, format: "mac: %pM\n" , dev->dev_addr); |
3341 | |
3342 | /* Even more device info helps when determining which kernel |
3343 | * provided which set of benchmarks. |
3344 | */ |
3345 | netdev_info(dev, format: "Running with NAPI enabled\n" ); |
3346 | for (i = 0; i < priv->num_rx_queues; i++) |
3347 | netdev_info(dev, format: "RX BD ring size for Q[%d]: %d\n" , |
3348 | i, priv->rx_queue[i]->rx_ring_size); |
3349 | for (i = 0; i < priv->num_tx_queues; i++) |
3350 | netdev_info(dev, format: "TX BD ring size for Q[%d]: %d\n" , |
3351 | i, priv->tx_queue[i]->tx_ring_size); |
3352 | |
3353 | return 0; |
3354 | |
3355 | register_fail: |
3356 | if (of_phy_is_fixed_link(np)) |
3357 | of_phy_deregister_fixed_link(np); |
3358 | unmap_group_regs(priv); |
3359 | gfar_free_rx_queues(priv); |
3360 | gfar_free_tx_queues(priv); |
3361 | of_node_put(node: priv->phy_node); |
3362 | of_node_put(node: priv->tbi_node); |
3363 | free_gfar_dev(priv); |
3364 | return err; |
3365 | } |
3366 | |
3367 | static void gfar_remove(struct platform_device *ofdev) |
3368 | { |
3369 | struct gfar_private *priv = platform_get_drvdata(pdev: ofdev); |
3370 | struct device_node *np = ofdev->dev.of_node; |
3371 | |
3372 | of_node_put(node: priv->phy_node); |
3373 | of_node_put(node: priv->tbi_node); |
3374 | |
3375 | unregister_netdev(dev: priv->ndev); |
3376 | |
3377 | if (of_phy_is_fixed_link(np)) |
3378 | of_phy_deregister_fixed_link(np); |
3379 | |
3380 | unmap_group_regs(priv); |
3381 | gfar_free_rx_queues(priv); |
3382 | gfar_free_tx_queues(priv); |
3383 | free_gfar_dev(priv); |
3384 | } |
3385 | |
3386 | #ifdef CONFIG_PM |
3387 | |
3388 | static void __gfar_filer_disable(struct gfar_private *priv) |
3389 | { |
3390 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3391 | u32 temp; |
3392 | |
3393 | temp = gfar_read(addr: ®s->rctrl); |
3394 | temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); |
3395 | gfar_write(addr: ®s->rctrl, val: temp); |
3396 | } |
3397 | |
3398 | static void __gfar_filer_enable(struct gfar_private *priv) |
3399 | { |
3400 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3401 | u32 temp; |
3402 | |
3403 | temp = gfar_read(addr: ®s->rctrl); |
3404 | temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; |
3405 | gfar_write(addr: ®s->rctrl, val: temp); |
3406 | } |
3407 | |
3408 | /* Filer rules implementing wol capabilities */ |
3409 | static void gfar_filer_config_wol(struct gfar_private *priv) |
3410 | { |
3411 | unsigned int i; |
3412 | u32 rqfcr; |
3413 | |
3414 | __gfar_filer_disable(priv); |
3415 | |
3416 | /* clear the filer table, reject any packet by default */ |
3417 | rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; |
3418 | for (i = 0; i <= MAX_FILER_IDX; i++) |
3419 | gfar_write_filer(priv, far: i, fcr: rqfcr, fpr: 0); |
3420 | |
3421 | i = 0; |
3422 | if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { |
3423 | /* unicast packet, accept it */ |
3424 | struct net_device *ndev = priv->ndev; |
3425 | /* get the default rx queue index */ |
3426 | u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; |
3427 | u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | |
3428 | (ndev->dev_addr[1] << 8) | |
3429 | ndev->dev_addr[2]; |
3430 | |
3431 | rqfcr = (qindex << 10) | RQFCR_AND | |
3432 | RQFCR_CMP_EXACT | RQFCR_PID_DAH; |
3433 | |
3434 | gfar_write_filer(priv, far: i++, fcr: rqfcr, fpr: dest_mac_addr); |
3435 | |
3436 | dest_mac_addr = (ndev->dev_addr[3] << 16) | |
3437 | (ndev->dev_addr[4] << 8) | |
3438 | ndev->dev_addr[5]; |
3439 | rqfcr = (qindex << 10) | RQFCR_GPI | |
3440 | RQFCR_CMP_EXACT | RQFCR_PID_DAL; |
3441 | gfar_write_filer(priv, far: i++, fcr: rqfcr, fpr: dest_mac_addr); |
3442 | } |
3443 | |
3444 | __gfar_filer_enable(priv); |
3445 | } |
3446 | |
3447 | static void gfar_filer_restore_table(struct gfar_private *priv) |
3448 | { |
3449 | u32 rqfcr, rqfpr; |
3450 | unsigned int i; |
3451 | |
3452 | __gfar_filer_disable(priv); |
3453 | |
3454 | for (i = 0; i <= MAX_FILER_IDX; i++) { |
3455 | rqfcr = priv->ftp_rqfcr[i]; |
3456 | rqfpr = priv->ftp_rqfpr[i]; |
3457 | gfar_write_filer(priv, far: i, fcr: rqfcr, fpr: rqfpr); |
3458 | } |
3459 | |
3460 | __gfar_filer_enable(priv); |
3461 | } |
3462 | |
3463 | /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ |
3464 | static void gfar_start_wol_filer(struct gfar_private *priv) |
3465 | { |
3466 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3467 | u32 tempval; |
3468 | int i = 0; |
3469 | |
3470 | /* Enable Rx hw queues */ |
3471 | gfar_write(addr: ®s->rqueue, val: priv->rqueue); |
3472 | |
3473 | /* Initialize DMACTRL to have WWR and WOP */ |
3474 | tempval = gfar_read(addr: ®s->dmactrl); |
3475 | tempval |= DMACTRL_INIT_SETTINGS; |
3476 | gfar_write(addr: ®s->dmactrl, val: tempval); |
3477 | |
3478 | /* Make sure we aren't stopped */ |
3479 | tempval = gfar_read(addr: ®s->dmactrl); |
3480 | tempval &= ~DMACTRL_GRS; |
3481 | gfar_write(addr: ®s->dmactrl, val: tempval); |
3482 | |
3483 | for (i = 0; i < priv->num_grps; i++) { |
3484 | regs = priv->gfargrp[i].regs; |
3485 | /* Clear RHLT, so that the DMA starts polling now */ |
3486 | gfar_write(addr: ®s->rstat, val: priv->gfargrp[i].rstat); |
3487 | /* enable the Filer General Purpose Interrupt */ |
3488 | gfar_write(addr: ®s->imask, IMASK_FGPI); |
3489 | } |
3490 | |
3491 | /* Enable Rx DMA */ |
3492 | tempval = gfar_read(addr: ®s->maccfg1); |
3493 | tempval |= MACCFG1_RX_EN; |
3494 | gfar_write(addr: ®s->maccfg1, val: tempval); |
3495 | } |
3496 | |
3497 | static int gfar_suspend(struct device *dev) |
3498 | { |
3499 | struct gfar_private *priv = dev_get_drvdata(dev); |
3500 | struct net_device *ndev = priv->ndev; |
3501 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3502 | u32 tempval; |
3503 | u16 wol = priv->wol_opts; |
3504 | |
3505 | if (!netif_running(dev: ndev)) |
3506 | return 0; |
3507 | |
3508 | disable_napi(priv); |
3509 | netif_tx_lock(dev: ndev); |
3510 | netif_device_detach(dev: ndev); |
3511 | netif_tx_unlock(dev: ndev); |
3512 | |
3513 | gfar_halt(priv); |
3514 | |
3515 | if (wol & GFAR_WOL_MAGIC) { |
3516 | /* Enable interrupt on Magic Packet */ |
3517 | gfar_write(addr: ®s->imask, IMASK_MAG); |
3518 | |
3519 | /* Enable Magic Packet mode */ |
3520 | tempval = gfar_read(addr: ®s->maccfg2); |
3521 | tempval |= MACCFG2_MPEN; |
3522 | gfar_write(addr: ®s->maccfg2, val: tempval); |
3523 | |
3524 | /* re-enable the Rx block */ |
3525 | tempval = gfar_read(addr: ®s->maccfg1); |
3526 | tempval |= MACCFG1_RX_EN; |
3527 | gfar_write(addr: ®s->maccfg1, val: tempval); |
3528 | |
3529 | } else if (wol & GFAR_WOL_FILER_UCAST) { |
3530 | gfar_filer_config_wol(priv); |
3531 | gfar_start_wol_filer(priv); |
3532 | |
3533 | } else { |
3534 | phy_stop(phydev: ndev->phydev); |
3535 | } |
3536 | |
3537 | return 0; |
3538 | } |
3539 | |
3540 | static int gfar_resume(struct device *dev) |
3541 | { |
3542 | struct gfar_private *priv = dev_get_drvdata(dev); |
3543 | struct net_device *ndev = priv->ndev; |
3544 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3545 | u32 tempval; |
3546 | u16 wol = priv->wol_opts; |
3547 | |
3548 | if (!netif_running(dev: ndev)) |
3549 | return 0; |
3550 | |
3551 | if (wol & GFAR_WOL_MAGIC) { |
3552 | /* Disable Magic Packet mode */ |
3553 | tempval = gfar_read(addr: ®s->maccfg2); |
3554 | tempval &= ~MACCFG2_MPEN; |
3555 | gfar_write(addr: ®s->maccfg2, val: tempval); |
3556 | |
3557 | } else if (wol & GFAR_WOL_FILER_UCAST) { |
3558 | /* need to stop rx only, tx is already down */ |
3559 | gfar_halt(priv); |
3560 | gfar_filer_restore_table(priv); |
3561 | |
3562 | } else { |
3563 | phy_start(phydev: ndev->phydev); |
3564 | } |
3565 | |
3566 | gfar_start(priv); |
3567 | |
3568 | netif_device_attach(dev: ndev); |
3569 | enable_napi(priv); |
3570 | |
3571 | return 0; |
3572 | } |
3573 | |
3574 | static int gfar_restore(struct device *dev) |
3575 | { |
3576 | struct gfar_private *priv = dev_get_drvdata(dev); |
3577 | struct net_device *ndev = priv->ndev; |
3578 | |
3579 | if (!netif_running(dev: ndev)) { |
3580 | netif_device_attach(dev: ndev); |
3581 | |
3582 | return 0; |
3583 | } |
3584 | |
3585 | gfar_init_bds(ndev); |
3586 | |
3587 | gfar_mac_reset(priv); |
3588 | |
3589 | gfar_init_tx_rx_base(priv); |
3590 | |
3591 | gfar_start(priv); |
3592 | |
3593 | priv->oldlink = 0; |
3594 | priv->oldspeed = 0; |
3595 | priv->oldduplex = -1; |
3596 | |
3597 | if (ndev->phydev) |
3598 | phy_start(phydev: ndev->phydev); |
3599 | |
3600 | netif_device_attach(dev: ndev); |
3601 | enable_napi(priv); |
3602 | |
3603 | return 0; |
3604 | } |
3605 | |
3606 | static const struct dev_pm_ops gfar_pm_ops = { |
3607 | .suspend = gfar_suspend, |
3608 | .resume = gfar_resume, |
3609 | .freeze = gfar_suspend, |
3610 | .thaw = gfar_resume, |
3611 | .restore = gfar_restore, |
3612 | }; |
3613 | |
3614 | #define GFAR_PM_OPS (&gfar_pm_ops) |
3615 | |
3616 | #else |
3617 | |
3618 | #define GFAR_PM_OPS NULL |
3619 | |
3620 | #endif |
3621 | |
3622 | static const struct of_device_id gfar_match[] = |
3623 | { |
3624 | { |
3625 | .type = "network" , |
3626 | .compatible = "gianfar" , |
3627 | }, |
3628 | { |
3629 | .compatible = "fsl,etsec2" , |
3630 | }, |
3631 | {}, |
3632 | }; |
3633 | MODULE_DEVICE_TABLE(of, gfar_match); |
3634 | |
3635 | /* Structure for a device driver */ |
3636 | static struct platform_driver gfar_driver = { |
3637 | .driver = { |
3638 | .name = "fsl-gianfar" , |
3639 | .pm = GFAR_PM_OPS, |
3640 | .of_match_table = gfar_match, |
3641 | }, |
3642 | .probe = gfar_probe, |
3643 | .remove_new = gfar_remove, |
3644 | }; |
3645 | |
3646 | module_platform_driver(gfar_driver); |
3647 | |