1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Cadence MACB/GEM Ethernet Controller driver |
4 | * |
5 | * Copyright (C) 2004-2006 Atmel Corporation |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | #include <linux/clk.h> |
10 | #include <linux/clk-provider.h> |
11 | #include <linux/crc32.h> |
12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/types.h> |
16 | #include <linux/circ_buf.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/init.h> |
19 | #include <linux/io.h> |
20 | #include <linux/gpio.h> |
21 | #include <linux/gpio/consumer.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/netdevice.h> |
24 | #include <linux/etherdevice.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/platform_device.h> |
27 | #include <linux/phylink.h> |
28 | #include <linux/of.h> |
29 | #include <linux/of_gpio.h> |
30 | #include <linux/of_mdio.h> |
31 | #include <linux/of_net.h> |
32 | #include <linux/ip.h> |
33 | #include <linux/udp.h> |
34 | #include <linux/tcp.h> |
35 | #include <linux/iopoll.h> |
36 | #include <linux/phy/phy.h> |
37 | #include <linux/pm_runtime.h> |
38 | #include <linux/ptp_classify.h> |
39 | #include <linux/reset.h> |
40 | #include <linux/firmware/xlnx-zynqmp.h> |
41 | #include "macb.h" |
42 | |
43 | /* This structure is only used for MACB on SiFive FU540 devices */ |
44 | struct sifive_fu540_macb_mgmt { |
45 | void __iomem *reg; |
46 | unsigned long rate; |
47 | struct clk_hw hw; |
48 | }; |
49 | |
50 | #define MACB_RX_BUFFER_SIZE 128 |
51 | #define RX_BUFFER_MULTIPLE 64 /* bytes */ |
52 | |
53 | #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ |
54 | #define MIN_RX_RING_SIZE 64 |
55 | #define MAX_RX_RING_SIZE 8192 |
56 | #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
57 | * (bp)->rx_ring_size) |
58 | |
59 | #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ |
60 | #define MIN_TX_RING_SIZE 64 |
61 | #define MAX_TX_RING_SIZE 4096 |
62 | #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
63 | * (bp)->tx_ring_size) |
64 | |
65 | /* level of occupied TX descriptors under which we wake up TX process */ |
66 | #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) |
67 | |
68 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) |
69 | #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ |
70 | | MACB_BIT(ISR_RLE) \ |
71 | | MACB_BIT(TXERR)) |
72 | #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ |
73 | | MACB_BIT(TXUBR)) |
74 | |
75 | /* Max length of transmit frame must be a multiple of 8 bytes */ |
76 | #define MACB_TX_LEN_ALIGN 8 |
77 | #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) |
78 | /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a |
79 | * false amba_error in TX path from the DMA assuming there is not enough |
80 | * space in the SRAM (16KB) even when there is. |
81 | */ |
82 | #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) |
83 | |
84 | #define GEM_MTU_MIN_SIZE ETH_MIN_MTU |
85 | #define MACB_NETIF_LSO NETIF_F_TSO |
86 | |
87 | #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) |
88 | #define MACB_WOL_ENABLED (0x1 << 1) |
89 | |
90 | #define HS_SPEED_10000M 4 |
91 | #define MACB_SERDES_RATE_10G 1 |
92 | |
93 | /* Graceful stop timeouts in us. We should allow up to |
94 | * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) |
95 | */ |
96 | #define MACB_HALT_TIMEOUT 14000 |
97 | #define MACB_PM_TIMEOUT 100 /* ms */ |
98 | |
99 | #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ |
100 | |
101 | /* DMA buffer descriptor might be different size |
102 | * depends on hardware configuration: |
103 | * |
104 | * 1. dma address width 32 bits: |
105 | * word 1: 32 bit address of Data Buffer |
106 | * word 2: control |
107 | * |
108 | * 2. dma address width 64 bits: |
109 | * word 1: 32 bit address of Data Buffer |
110 | * word 2: control |
111 | * word 3: upper 32 bit address of Data Buffer |
112 | * word 4: unused |
113 | * |
114 | * 3. dma address width 32 bits with hardware timestamping: |
115 | * word 1: 32 bit address of Data Buffer |
116 | * word 2: control |
117 | * word 3: timestamp word 1 |
118 | * word 4: timestamp word 2 |
119 | * |
120 | * 4. dma address width 64 bits with hardware timestamping: |
121 | * word 1: 32 bit address of Data Buffer |
122 | * word 2: control |
123 | * word 3: upper 32 bit address of Data Buffer |
124 | * word 4: unused |
125 | * word 5: timestamp word 1 |
126 | * word 6: timestamp word 2 |
127 | */ |
128 | static unsigned int macb_dma_desc_get_size(struct macb *bp) |
129 | { |
130 | #ifdef MACB_EXT_DESC |
131 | unsigned int desc_size; |
132 | |
133 | switch (bp->hw_dma_cap) { |
134 | case HW_DMA_CAP_64B: |
135 | desc_size = sizeof(struct macb_dma_desc) |
136 | + sizeof(struct macb_dma_desc_64); |
137 | break; |
138 | case HW_DMA_CAP_PTP: |
139 | desc_size = sizeof(struct macb_dma_desc) |
140 | + sizeof(struct macb_dma_desc_ptp); |
141 | break; |
142 | case HW_DMA_CAP_64B_PTP: |
143 | desc_size = sizeof(struct macb_dma_desc) |
144 | + sizeof(struct macb_dma_desc_64) |
145 | + sizeof(struct macb_dma_desc_ptp); |
146 | break; |
147 | default: |
148 | desc_size = sizeof(struct macb_dma_desc); |
149 | } |
150 | return desc_size; |
151 | #endif |
152 | return sizeof(struct macb_dma_desc); |
153 | } |
154 | |
155 | static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) |
156 | { |
157 | #ifdef MACB_EXT_DESC |
158 | switch (bp->hw_dma_cap) { |
159 | case HW_DMA_CAP_64B: |
160 | case HW_DMA_CAP_PTP: |
161 | desc_idx <<= 1; |
162 | break; |
163 | case HW_DMA_CAP_64B_PTP: |
164 | desc_idx *= 3; |
165 | break; |
166 | default: |
167 | break; |
168 | } |
169 | #endif |
170 | return desc_idx; |
171 | } |
172 | |
173 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
174 | static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) |
175 | { |
176 | return (struct macb_dma_desc_64 *)((void *)desc |
177 | + sizeof(struct macb_dma_desc)); |
178 | } |
179 | #endif |
180 | |
181 | /* Ring buffer accessors */ |
182 | static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) |
183 | { |
184 | return index & (bp->tx_ring_size - 1); |
185 | } |
186 | |
187 | static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, |
188 | unsigned int index) |
189 | { |
190 | index = macb_tx_ring_wrap(bp: queue->bp, index); |
191 | index = macb_adj_dma_desc_idx(bp: queue->bp, desc_idx: index); |
192 | return &queue->tx_ring[index]; |
193 | } |
194 | |
195 | static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, |
196 | unsigned int index) |
197 | { |
198 | return &queue->tx_skb[macb_tx_ring_wrap(bp: queue->bp, index)]; |
199 | } |
200 | |
201 | static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) |
202 | { |
203 | dma_addr_t offset; |
204 | |
205 | offset = macb_tx_ring_wrap(bp: queue->bp, index) * |
206 | macb_dma_desc_get_size(bp: queue->bp); |
207 | |
208 | return queue->tx_ring_dma + offset; |
209 | } |
210 | |
211 | static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) |
212 | { |
213 | return index & (bp->rx_ring_size - 1); |
214 | } |
215 | |
216 | static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) |
217 | { |
218 | index = macb_rx_ring_wrap(bp: queue->bp, index); |
219 | index = macb_adj_dma_desc_idx(bp: queue->bp, desc_idx: index); |
220 | return &queue->rx_ring[index]; |
221 | } |
222 | |
223 | static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) |
224 | { |
225 | return queue->rx_buffers + queue->bp->rx_buffer_size * |
226 | macb_rx_ring_wrap(bp: queue->bp, index); |
227 | } |
228 | |
229 | /* I/O accessors */ |
230 | static u32 hw_readl_native(struct macb *bp, int offset) |
231 | { |
232 | return __raw_readl(addr: bp->regs + offset); |
233 | } |
234 | |
235 | static void hw_writel_native(struct macb *bp, int offset, u32 value) |
236 | { |
237 | __raw_writel(val: value, addr: bp->regs + offset); |
238 | } |
239 | |
240 | static u32 hw_readl(struct macb *bp, int offset) |
241 | { |
242 | return readl_relaxed(bp->regs + offset); |
243 | } |
244 | |
245 | static void hw_writel(struct macb *bp, int offset, u32 value) |
246 | { |
247 | writel_relaxed(value, bp->regs + offset); |
248 | } |
249 | |
250 | /* Find the CPU endianness by using the loopback bit of NCR register. When the |
251 | * CPU is in big endian we need to program swapped mode for management |
252 | * descriptor access. |
253 | */ |
254 | static bool hw_is_native_io(void __iomem *addr) |
255 | { |
256 | u32 value = MACB_BIT(LLB); |
257 | |
258 | __raw_writel(val: value, addr: addr + MACB_NCR); |
259 | value = __raw_readl(addr: addr + MACB_NCR); |
260 | |
261 | /* Write 0 back to disable everything */ |
262 | __raw_writel(val: 0, addr: addr + MACB_NCR); |
263 | |
264 | return value == MACB_BIT(LLB); |
265 | } |
266 | |
267 | static bool hw_is_gem(void __iomem *addr, bool native_io) |
268 | { |
269 | u32 id; |
270 | |
271 | if (native_io) |
272 | id = __raw_readl(addr: addr + MACB_MID); |
273 | else |
274 | id = readl_relaxed(addr + MACB_MID); |
275 | |
276 | return MACB_BFEXT(IDNUM, id) >= 0x2; |
277 | } |
278 | |
279 | static void macb_set_hwaddr(struct macb *bp) |
280 | { |
281 | u32 bottom; |
282 | u16 top; |
283 | |
284 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); |
285 | macb_or_gem_writel(bp, SA1B, bottom); |
286 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
287 | macb_or_gem_writel(bp, SA1T, top); |
288 | |
289 | if (gem_has_ptp(bp)) { |
290 | gem_writel(bp, RXPTPUNI, bottom); |
291 | gem_writel(bp, TXPTPUNI, bottom); |
292 | } |
293 | |
294 | /* Clear unused address register sets */ |
295 | macb_or_gem_writel(bp, SA2B, 0); |
296 | macb_or_gem_writel(bp, SA2T, 0); |
297 | macb_or_gem_writel(bp, SA3B, 0); |
298 | macb_or_gem_writel(bp, SA3T, 0); |
299 | macb_or_gem_writel(bp, SA4B, 0); |
300 | macb_or_gem_writel(bp, SA4T, 0); |
301 | } |
302 | |
303 | static void macb_get_hwaddr(struct macb *bp) |
304 | { |
305 | u32 bottom; |
306 | u16 top; |
307 | u8 addr[6]; |
308 | int i; |
309 | |
310 | /* Check all 4 address register for valid address */ |
311 | for (i = 0; i < 4; i++) { |
312 | bottom = macb_or_gem_readl(bp, SA1B + i * 8); |
313 | top = macb_or_gem_readl(bp, SA1T + i * 8); |
314 | |
315 | addr[0] = bottom & 0xff; |
316 | addr[1] = (bottom >> 8) & 0xff; |
317 | addr[2] = (bottom >> 16) & 0xff; |
318 | addr[3] = (bottom >> 24) & 0xff; |
319 | addr[4] = top & 0xff; |
320 | addr[5] = (top >> 8) & 0xff; |
321 | |
322 | if (is_valid_ether_addr(addr)) { |
323 | eth_hw_addr_set(dev: bp->dev, addr); |
324 | return; |
325 | } |
326 | } |
327 | |
328 | dev_info(&bp->pdev->dev, "invalid hw address, using random\n" ); |
329 | eth_hw_addr_random(dev: bp->dev); |
330 | } |
331 | |
332 | static int macb_mdio_wait_for_idle(struct macb *bp) |
333 | { |
334 | u32 val; |
335 | |
336 | return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), |
337 | 1, MACB_MDIO_TIMEOUT); |
338 | } |
339 | |
340 | static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) |
341 | { |
342 | struct macb *bp = bus->priv; |
343 | int status; |
344 | |
345 | status = pm_runtime_resume_and_get(dev: &bp->pdev->dev); |
346 | if (status < 0) |
347 | goto mdio_pm_exit; |
348 | |
349 | status = macb_mdio_wait_for_idle(bp); |
350 | if (status < 0) |
351 | goto mdio_read_exit; |
352 | |
353 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) |
354 | | MACB_BF(RW, MACB_MAN_C22_READ) |
355 | | MACB_BF(PHYA, mii_id) |
356 | | MACB_BF(REGA, regnum) |
357 | | MACB_BF(CODE, MACB_MAN_C22_CODE))); |
358 | |
359 | status = macb_mdio_wait_for_idle(bp); |
360 | if (status < 0) |
361 | goto mdio_read_exit; |
362 | |
363 | status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
364 | |
365 | mdio_read_exit: |
366 | pm_runtime_mark_last_busy(dev: &bp->pdev->dev); |
367 | pm_runtime_put_autosuspend(dev: &bp->pdev->dev); |
368 | mdio_pm_exit: |
369 | return status; |
370 | } |
371 | |
372 | static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad, |
373 | int regnum) |
374 | { |
375 | struct macb *bp = bus->priv; |
376 | int status; |
377 | |
378 | status = pm_runtime_get_sync(dev: &bp->pdev->dev); |
379 | if (status < 0) { |
380 | pm_runtime_put_noidle(dev: &bp->pdev->dev); |
381 | goto mdio_pm_exit; |
382 | } |
383 | |
384 | status = macb_mdio_wait_for_idle(bp); |
385 | if (status < 0) |
386 | goto mdio_read_exit; |
387 | |
388 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
389 | | MACB_BF(RW, MACB_MAN_C45_ADDR) |
390 | | MACB_BF(PHYA, mii_id) |
391 | | MACB_BF(REGA, devad & 0x1F) |
392 | | MACB_BF(DATA, regnum & 0xFFFF) |
393 | | MACB_BF(CODE, MACB_MAN_C45_CODE))); |
394 | |
395 | status = macb_mdio_wait_for_idle(bp); |
396 | if (status < 0) |
397 | goto mdio_read_exit; |
398 | |
399 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
400 | | MACB_BF(RW, MACB_MAN_C45_READ) |
401 | | MACB_BF(PHYA, mii_id) |
402 | | MACB_BF(REGA, devad & 0x1F) |
403 | | MACB_BF(CODE, MACB_MAN_C45_CODE))); |
404 | |
405 | status = macb_mdio_wait_for_idle(bp); |
406 | if (status < 0) |
407 | goto mdio_read_exit; |
408 | |
409 | status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
410 | |
411 | mdio_read_exit: |
412 | pm_runtime_mark_last_busy(dev: &bp->pdev->dev); |
413 | pm_runtime_put_autosuspend(dev: &bp->pdev->dev); |
414 | mdio_pm_exit: |
415 | return status; |
416 | } |
417 | |
418 | static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, |
419 | u16 value) |
420 | { |
421 | struct macb *bp = bus->priv; |
422 | int status; |
423 | |
424 | status = pm_runtime_resume_and_get(dev: &bp->pdev->dev); |
425 | if (status < 0) |
426 | goto mdio_pm_exit; |
427 | |
428 | status = macb_mdio_wait_for_idle(bp); |
429 | if (status < 0) |
430 | goto mdio_write_exit; |
431 | |
432 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) |
433 | | MACB_BF(RW, MACB_MAN_C22_WRITE) |
434 | | MACB_BF(PHYA, mii_id) |
435 | | MACB_BF(REGA, regnum) |
436 | | MACB_BF(CODE, MACB_MAN_C22_CODE) |
437 | | MACB_BF(DATA, value))); |
438 | |
439 | status = macb_mdio_wait_for_idle(bp); |
440 | if (status < 0) |
441 | goto mdio_write_exit; |
442 | |
443 | mdio_write_exit: |
444 | pm_runtime_mark_last_busy(dev: &bp->pdev->dev); |
445 | pm_runtime_put_autosuspend(dev: &bp->pdev->dev); |
446 | mdio_pm_exit: |
447 | return status; |
448 | } |
449 | |
450 | static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id, |
451 | int devad, int regnum, |
452 | u16 value) |
453 | { |
454 | struct macb *bp = bus->priv; |
455 | int status; |
456 | |
457 | status = pm_runtime_get_sync(dev: &bp->pdev->dev); |
458 | if (status < 0) { |
459 | pm_runtime_put_noidle(dev: &bp->pdev->dev); |
460 | goto mdio_pm_exit; |
461 | } |
462 | |
463 | status = macb_mdio_wait_for_idle(bp); |
464 | if (status < 0) |
465 | goto mdio_write_exit; |
466 | |
467 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
468 | | MACB_BF(RW, MACB_MAN_C45_ADDR) |
469 | | MACB_BF(PHYA, mii_id) |
470 | | MACB_BF(REGA, devad & 0x1F) |
471 | | MACB_BF(DATA, regnum & 0xFFFF) |
472 | | MACB_BF(CODE, MACB_MAN_C45_CODE))); |
473 | |
474 | status = macb_mdio_wait_for_idle(bp); |
475 | if (status < 0) |
476 | goto mdio_write_exit; |
477 | |
478 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
479 | | MACB_BF(RW, MACB_MAN_C45_WRITE) |
480 | | MACB_BF(PHYA, mii_id) |
481 | | MACB_BF(REGA, devad & 0x1F) |
482 | | MACB_BF(CODE, MACB_MAN_C45_CODE) |
483 | | MACB_BF(DATA, value))); |
484 | |
485 | status = macb_mdio_wait_for_idle(bp); |
486 | if (status < 0) |
487 | goto mdio_write_exit; |
488 | |
489 | mdio_write_exit: |
490 | pm_runtime_mark_last_busy(dev: &bp->pdev->dev); |
491 | pm_runtime_put_autosuspend(dev: &bp->pdev->dev); |
492 | mdio_pm_exit: |
493 | return status; |
494 | } |
495 | |
496 | static void macb_init_buffers(struct macb *bp) |
497 | { |
498 | struct macb_queue *queue; |
499 | unsigned int q; |
500 | |
501 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
502 | queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); |
503 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
504 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
505 | queue_writel(queue, RBQPH, |
506 | upper_32_bits(queue->rx_ring_dma)); |
507 | #endif |
508 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
509 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
510 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
511 | queue_writel(queue, TBQPH, |
512 | upper_32_bits(queue->tx_ring_dma)); |
513 | #endif |
514 | } |
515 | } |
516 | |
517 | /** |
518 | * macb_set_tx_clk() - Set a clock to a new frequency |
519 | * @bp: pointer to struct macb |
520 | * @speed: New frequency in Hz |
521 | */ |
522 | static void macb_set_tx_clk(struct macb *bp, int speed) |
523 | { |
524 | long ferr, rate, rate_rounded; |
525 | |
526 | if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) |
527 | return; |
528 | |
529 | /* In case of MII the PHY is the clock master */ |
530 | if (bp->phy_interface == PHY_INTERFACE_MODE_MII) |
531 | return; |
532 | |
533 | switch (speed) { |
534 | case SPEED_10: |
535 | rate = 2500000; |
536 | break; |
537 | case SPEED_100: |
538 | rate = 25000000; |
539 | break; |
540 | case SPEED_1000: |
541 | rate = 125000000; |
542 | break; |
543 | default: |
544 | return; |
545 | } |
546 | |
547 | rate_rounded = clk_round_rate(clk: bp->tx_clk, rate); |
548 | if (rate_rounded < 0) |
549 | return; |
550 | |
551 | /* RGMII allows 50 ppm frequency error. Test and warn if this limit |
552 | * is not satisfied. |
553 | */ |
554 | ferr = abs(rate_rounded - rate); |
555 | ferr = DIV_ROUND_UP(ferr, rate / 100000); |
556 | if (ferr > 5) |
557 | netdev_warn(dev: bp->dev, |
558 | format: "unable to generate target frequency: %ld Hz\n" , |
559 | rate); |
560 | |
561 | if (clk_set_rate(clk: bp->tx_clk, rate: rate_rounded)) |
562 | netdev_err(dev: bp->dev, format: "adjusting tx_clk failed.\n" ); |
563 | } |
564 | |
565 | static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, |
566 | phy_interface_t interface, int speed, |
567 | int duplex) |
568 | { |
569 | struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); |
570 | u32 config; |
571 | |
572 | config = gem_readl(bp, USX_CONTROL); |
573 | config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config); |
574 | config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config); |
575 | config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS)); |
576 | config |= GEM_BIT(TX_EN); |
577 | gem_writel(bp, USX_CONTROL, config); |
578 | } |
579 | |
580 | static void macb_usx_pcs_get_state(struct phylink_pcs *pcs, |
581 | struct phylink_link_state *state) |
582 | { |
583 | struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); |
584 | u32 val; |
585 | |
586 | state->speed = SPEED_10000; |
587 | state->duplex = 1; |
588 | state->an_complete = 1; |
589 | |
590 | val = gem_readl(bp, USX_STATUS); |
591 | state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK)); |
592 | val = gem_readl(bp, NCFGR); |
593 | if (val & GEM_BIT(PAE)) |
594 | state->pause = MLO_PAUSE_RX; |
595 | } |
596 | |
597 | static int macb_usx_pcs_config(struct phylink_pcs *pcs, |
598 | unsigned int neg_mode, |
599 | phy_interface_t interface, |
600 | const unsigned long *advertising, |
601 | bool permit_pause_to_mac) |
602 | { |
603 | struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); |
604 | |
605 | gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | |
606 | GEM_BIT(SIGNAL_OK)); |
607 | |
608 | return 0; |
609 | } |
610 | |
611 | static void macb_pcs_get_state(struct phylink_pcs *pcs, |
612 | struct phylink_link_state *state) |
613 | { |
614 | state->link = 0; |
615 | } |
616 | |
617 | static void macb_pcs_an_restart(struct phylink_pcs *pcs) |
618 | { |
619 | /* Not supported */ |
620 | } |
621 | |
622 | static int macb_pcs_config(struct phylink_pcs *pcs, |
623 | unsigned int neg_mode, |
624 | phy_interface_t interface, |
625 | const unsigned long *advertising, |
626 | bool permit_pause_to_mac) |
627 | { |
628 | return 0; |
629 | } |
630 | |
631 | static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = { |
632 | .pcs_get_state = macb_usx_pcs_get_state, |
633 | .pcs_config = macb_usx_pcs_config, |
634 | .pcs_link_up = macb_usx_pcs_link_up, |
635 | }; |
636 | |
637 | static const struct phylink_pcs_ops macb_phylink_pcs_ops = { |
638 | .pcs_get_state = macb_pcs_get_state, |
639 | .pcs_an_restart = macb_pcs_an_restart, |
640 | .pcs_config = macb_pcs_config, |
641 | }; |
642 | |
643 | static void macb_mac_config(struct phylink_config *config, unsigned int mode, |
644 | const struct phylink_link_state *state) |
645 | { |
646 | struct net_device *ndev = to_net_dev(config->dev); |
647 | struct macb *bp = netdev_priv(dev: ndev); |
648 | unsigned long flags; |
649 | u32 old_ctrl, ctrl; |
650 | u32 old_ncr, ncr; |
651 | |
652 | spin_lock_irqsave(&bp->lock, flags); |
653 | |
654 | old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); |
655 | old_ncr = ncr = macb_or_gem_readl(bp, NCR); |
656 | |
657 | if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { |
658 | if (state->interface == PHY_INTERFACE_MODE_RMII) |
659 | ctrl |= MACB_BIT(RM9200_RMII); |
660 | } else if (macb_is_gem(bp)) { |
661 | ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); |
662 | ncr &= ~GEM_BIT(ENABLE_HS_MAC); |
663 | |
664 | if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
665 | ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); |
666 | } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { |
667 | ctrl |= GEM_BIT(PCSSEL); |
668 | ncr |= GEM_BIT(ENABLE_HS_MAC); |
669 | } else if (bp->caps & MACB_CAPS_MIIONRGMII && |
670 | bp->phy_interface == PHY_INTERFACE_MODE_MII) { |
671 | ncr |= MACB_BIT(MIIONRGMII); |
672 | } |
673 | } |
674 | |
675 | /* Apply the new configuration, if any */ |
676 | if (old_ctrl ^ ctrl) |
677 | macb_or_gem_writel(bp, NCFGR, ctrl); |
678 | |
679 | if (old_ncr ^ ncr) |
680 | macb_or_gem_writel(bp, NCR, ncr); |
681 | |
682 | /* Disable AN for SGMII fixed link configuration, enable otherwise. |
683 | * Must be written after PCSSEL is set in NCFGR, |
684 | * otherwise writes will not take effect. |
685 | */ |
686 | if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { |
687 | u32 pcsctrl, old_pcsctrl; |
688 | |
689 | old_pcsctrl = gem_readl(bp, PCSCNTRL); |
690 | if (mode == MLO_AN_FIXED) |
691 | pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG); |
692 | else |
693 | pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG); |
694 | if (old_pcsctrl != pcsctrl) |
695 | gem_writel(bp, PCSCNTRL, pcsctrl); |
696 | } |
697 | |
698 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
699 | } |
700 | |
701 | static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, |
702 | phy_interface_t interface) |
703 | { |
704 | struct net_device *ndev = to_net_dev(config->dev); |
705 | struct macb *bp = netdev_priv(dev: ndev); |
706 | struct macb_queue *queue; |
707 | unsigned int q; |
708 | u32 ctrl; |
709 | |
710 | if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) |
711 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
712 | queue_writel(queue, IDR, |
713 | bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); |
714 | |
715 | /* Disable Rx and Tx */ |
716 | ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); |
717 | macb_writel(bp, NCR, ctrl); |
718 | |
719 | netif_tx_stop_all_queues(dev: ndev); |
720 | } |
721 | |
722 | static void macb_mac_link_up(struct phylink_config *config, |
723 | struct phy_device *phy, |
724 | unsigned int mode, phy_interface_t interface, |
725 | int speed, int duplex, |
726 | bool tx_pause, bool rx_pause) |
727 | { |
728 | struct net_device *ndev = to_net_dev(config->dev); |
729 | struct macb *bp = netdev_priv(dev: ndev); |
730 | struct macb_queue *queue; |
731 | unsigned long flags; |
732 | unsigned int q; |
733 | u32 ctrl; |
734 | |
735 | spin_lock_irqsave(&bp->lock, flags); |
736 | |
737 | ctrl = macb_or_gem_readl(bp, NCFGR); |
738 | |
739 | ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); |
740 | |
741 | if (speed == SPEED_100) |
742 | ctrl |= MACB_BIT(SPD); |
743 | |
744 | if (duplex) |
745 | ctrl |= MACB_BIT(FD); |
746 | |
747 | if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { |
748 | ctrl &= ~MACB_BIT(PAE); |
749 | if (macb_is_gem(bp)) { |
750 | ctrl &= ~GEM_BIT(GBE); |
751 | |
752 | if (speed == SPEED_1000) |
753 | ctrl |= GEM_BIT(GBE); |
754 | } |
755 | |
756 | if (rx_pause) |
757 | ctrl |= MACB_BIT(PAE); |
758 | |
759 | /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down |
760 | * cleared the pipeline and control registers. |
761 | */ |
762 | bp->macbgem_ops.mog_init_rings(bp); |
763 | macb_init_buffers(bp); |
764 | |
765 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
766 | queue_writel(queue, IER, |
767 | bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); |
768 | } |
769 | |
770 | macb_or_gem_writel(bp, NCFGR, ctrl); |
771 | |
772 | if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) |
773 | gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, |
774 | gem_readl(bp, HS_MAC_CONFIG))); |
775 | |
776 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
777 | |
778 | if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) |
779 | macb_set_tx_clk(bp, speed); |
780 | |
781 | /* Enable Rx and Tx; Enable PTP unicast */ |
782 | ctrl = macb_readl(bp, NCR); |
783 | if (gem_has_ptp(bp)) |
784 | ctrl |= MACB_BIT(PTPUNI); |
785 | |
786 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE)); |
787 | |
788 | netif_tx_wake_all_queues(dev: ndev); |
789 | } |
790 | |
791 | static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config, |
792 | phy_interface_t interface) |
793 | { |
794 | struct net_device *ndev = to_net_dev(config->dev); |
795 | struct macb *bp = netdev_priv(dev: ndev); |
796 | |
797 | if (interface == PHY_INTERFACE_MODE_10GBASER) |
798 | return &bp->phylink_usx_pcs; |
799 | else if (interface == PHY_INTERFACE_MODE_SGMII) |
800 | return &bp->phylink_sgmii_pcs; |
801 | else |
802 | return NULL; |
803 | } |
804 | |
805 | static const struct phylink_mac_ops macb_phylink_ops = { |
806 | .mac_select_pcs = macb_mac_select_pcs, |
807 | .mac_config = macb_mac_config, |
808 | .mac_link_down = macb_mac_link_down, |
809 | .mac_link_up = macb_mac_link_up, |
810 | }; |
811 | |
812 | static bool macb_phy_handle_exists(struct device_node *dn) |
813 | { |
814 | dn = of_parse_phandle(np: dn, phandle_name: "phy-handle" , index: 0); |
815 | of_node_put(node: dn); |
816 | return dn != NULL; |
817 | } |
818 | |
819 | static int macb_phylink_connect(struct macb *bp) |
820 | { |
821 | struct device_node *dn = bp->pdev->dev.of_node; |
822 | struct net_device *dev = bp->dev; |
823 | struct phy_device *phydev; |
824 | int ret; |
825 | |
826 | if (dn) |
827 | ret = phylink_of_phy_connect(bp->phylink, dn, flags: 0); |
828 | |
829 | if (!dn || (ret && !macb_phy_handle_exists(dn))) { |
830 | phydev = phy_find_first(bus: bp->mii_bus); |
831 | if (!phydev) { |
832 | netdev_err(dev, format: "no PHY found\n" ); |
833 | return -ENXIO; |
834 | } |
835 | |
836 | /* attach the mac to the phy */ |
837 | ret = phylink_connect_phy(bp->phylink, phydev); |
838 | } |
839 | |
840 | if (ret) { |
841 | netdev_err(dev, format: "Could not attach PHY (%d)\n" , ret); |
842 | return ret; |
843 | } |
844 | |
845 | phylink_start(bp->phylink); |
846 | |
847 | return 0; |
848 | } |
849 | |
850 | static void macb_get_pcs_fixed_state(struct phylink_config *config, |
851 | struct phylink_link_state *state) |
852 | { |
853 | struct net_device *ndev = to_net_dev(config->dev); |
854 | struct macb *bp = netdev_priv(dev: ndev); |
855 | |
856 | state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; |
857 | } |
858 | |
859 | /* based on au1000_eth. c*/ |
860 | static int macb_mii_probe(struct net_device *dev) |
861 | { |
862 | struct macb *bp = netdev_priv(dev); |
863 | |
864 | bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops; |
865 | bp->phylink_sgmii_pcs.neg_mode = true; |
866 | bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops; |
867 | bp->phylink_usx_pcs.neg_mode = true; |
868 | |
869 | bp->phylink_config.dev = &dev->dev; |
870 | bp->phylink_config.type = PHYLINK_NETDEV; |
871 | bp->phylink_config.mac_managed_pm = true; |
872 | |
873 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
874 | bp->phylink_config.poll_fixed_state = true; |
875 | bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; |
876 | } |
877 | |
878 | bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | |
879 | MAC_10 | MAC_100; |
880 | |
881 | __set_bit(PHY_INTERFACE_MODE_MII, |
882 | bp->phylink_config.supported_interfaces); |
883 | __set_bit(PHY_INTERFACE_MODE_RMII, |
884 | bp->phylink_config.supported_interfaces); |
885 | |
886 | /* Determine what modes are supported */ |
887 | if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) { |
888 | bp->phylink_config.mac_capabilities |= MAC_1000FD; |
889 | if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) |
890 | bp->phylink_config.mac_capabilities |= MAC_1000HD; |
891 | |
892 | __set_bit(PHY_INTERFACE_MODE_GMII, |
893 | bp->phylink_config.supported_interfaces); |
894 | phy_interface_set_rgmii(intf: bp->phylink_config.supported_interfaces); |
895 | |
896 | if (bp->caps & MACB_CAPS_PCS) |
897 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
898 | bp->phylink_config.supported_interfaces); |
899 | |
900 | if (bp->caps & MACB_CAPS_HIGH_SPEED) { |
901 | __set_bit(PHY_INTERFACE_MODE_10GBASER, |
902 | bp->phylink_config.supported_interfaces); |
903 | bp->phylink_config.mac_capabilities |= MAC_10000FD; |
904 | } |
905 | } |
906 | |
907 | bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, |
908 | bp->phy_interface, &macb_phylink_ops); |
909 | if (IS_ERR(ptr: bp->phylink)) { |
910 | netdev_err(dev, format: "Could not create a phylink instance (%ld)\n" , |
911 | PTR_ERR(ptr: bp->phylink)); |
912 | return PTR_ERR(ptr: bp->phylink); |
913 | } |
914 | |
915 | return 0; |
916 | } |
917 | |
918 | static int macb_mdiobus_register(struct macb *bp) |
919 | { |
920 | struct device_node *child, *np = bp->pdev->dev.of_node; |
921 | |
922 | /* If we have a child named mdio, probe it instead of looking for PHYs |
923 | * directly under the MAC node |
924 | */ |
925 | child = of_get_child_by_name(node: np, name: "mdio" ); |
926 | if (child) { |
927 | int ret = of_mdiobus_register(mdio: bp->mii_bus, np: child); |
928 | |
929 | of_node_put(node: child); |
930 | return ret; |
931 | } |
932 | |
933 | if (of_phy_is_fixed_link(np)) |
934 | return mdiobus_register(bp->mii_bus); |
935 | |
936 | /* Only create the PHY from the device tree if at least one PHY is |
937 | * described. Otherwise scan the entire MDIO bus. We do this to support |
938 | * old device tree that did not follow the best practices and did not |
939 | * describe their network PHYs. |
940 | */ |
941 | for_each_available_child_of_node(np, child) |
942 | if (of_mdiobus_child_is_phy(child)) { |
943 | /* The loop increments the child refcount, |
944 | * decrement it before returning. |
945 | */ |
946 | of_node_put(node: child); |
947 | |
948 | return of_mdiobus_register(mdio: bp->mii_bus, np); |
949 | } |
950 | |
951 | return mdiobus_register(bp->mii_bus); |
952 | } |
953 | |
954 | static int macb_mii_init(struct macb *bp) |
955 | { |
956 | int err = -ENXIO; |
957 | |
958 | /* Enable management port */ |
959 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
960 | |
961 | bp->mii_bus = mdiobus_alloc(); |
962 | if (!bp->mii_bus) { |
963 | err = -ENOMEM; |
964 | goto err_out; |
965 | } |
966 | |
967 | bp->mii_bus->name = "MACB_mii_bus" ; |
968 | bp->mii_bus->read = &macb_mdio_read_c22; |
969 | bp->mii_bus->write = &macb_mdio_write_c22; |
970 | bp->mii_bus->read_c45 = &macb_mdio_read_c45; |
971 | bp->mii_bus->write_c45 = &macb_mdio_write_c45; |
972 | snprintf(buf: bp->mii_bus->id, MII_BUS_ID_SIZE, fmt: "%s-%x" , |
973 | bp->pdev->name, bp->pdev->id); |
974 | bp->mii_bus->priv = bp; |
975 | bp->mii_bus->parent = &bp->pdev->dev; |
976 | |
977 | dev_set_drvdata(dev: &bp->dev->dev, data: bp->mii_bus); |
978 | |
979 | err = macb_mdiobus_register(bp); |
980 | if (err) |
981 | goto err_out_free_mdiobus; |
982 | |
983 | err = macb_mii_probe(dev: bp->dev); |
984 | if (err) |
985 | goto err_out_unregister_bus; |
986 | |
987 | return 0; |
988 | |
989 | err_out_unregister_bus: |
990 | mdiobus_unregister(bus: bp->mii_bus); |
991 | err_out_free_mdiobus: |
992 | mdiobus_free(bus: bp->mii_bus); |
993 | err_out: |
994 | return err; |
995 | } |
996 | |
997 | static void macb_update_stats(struct macb *bp) |
998 | { |
999 | u32 *p = &bp->hw_stats.macb.rx_pause_frames; |
1000 | u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; |
1001 | int offset = MACB_PFR; |
1002 | |
1003 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); |
1004 | |
1005 | for (; p < end; p++, offset += 4) |
1006 | *p += bp->macb_reg_readl(bp, offset); |
1007 | } |
1008 | |
1009 | static int macb_halt_tx(struct macb *bp) |
1010 | { |
1011 | unsigned long halt_time, timeout; |
1012 | u32 status; |
1013 | |
1014 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); |
1015 | |
1016 | timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); |
1017 | do { |
1018 | halt_time = jiffies; |
1019 | status = macb_readl(bp, TSR); |
1020 | if (!(status & MACB_BIT(TGO))) |
1021 | return 0; |
1022 | |
1023 | udelay(250); |
1024 | } while (time_before(halt_time, timeout)); |
1025 | |
1026 | return -ETIMEDOUT; |
1027 | } |
1028 | |
1029 | static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget) |
1030 | { |
1031 | if (tx_skb->mapping) { |
1032 | if (tx_skb->mapped_as_page) |
1033 | dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, |
1034 | tx_skb->size, DMA_TO_DEVICE); |
1035 | else |
1036 | dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, |
1037 | tx_skb->size, DMA_TO_DEVICE); |
1038 | tx_skb->mapping = 0; |
1039 | } |
1040 | |
1041 | if (tx_skb->skb) { |
1042 | napi_consume_skb(skb: tx_skb->skb, budget); |
1043 | tx_skb->skb = NULL; |
1044 | } |
1045 | } |
1046 | |
1047 | static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) |
1048 | { |
1049 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1050 | struct macb_dma_desc_64 *desc_64; |
1051 | |
1052 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
1053 | desc_64 = macb_64b_desc(bp, desc); |
1054 | desc_64->addrh = upper_32_bits(addr); |
1055 | /* The low bits of RX address contain the RX_USED bit, clearing |
1056 | * of which allows packet RX. Make sure the high bits are also |
1057 | * visible to HW at that point. |
1058 | */ |
1059 | dma_wmb(); |
1060 | } |
1061 | #endif |
1062 | desc->addr = lower_32_bits(addr); |
1063 | } |
1064 | |
1065 | static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) |
1066 | { |
1067 | dma_addr_t addr = 0; |
1068 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1069 | struct macb_dma_desc_64 *desc_64; |
1070 | |
1071 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
1072 | desc_64 = macb_64b_desc(bp, desc); |
1073 | addr = ((u64)(desc_64->addrh) << 32); |
1074 | } |
1075 | #endif |
1076 | addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); |
1077 | #ifdef CONFIG_MACB_USE_HWSTAMP |
1078 | if (bp->hw_dma_cap & HW_DMA_CAP_PTP) |
1079 | addr &= ~GEM_BIT(DMA_RXVALID); |
1080 | #endif |
1081 | return addr; |
1082 | } |
1083 | |
1084 | static void macb_tx_error_task(struct work_struct *work) |
1085 | { |
1086 | struct macb_queue *queue = container_of(work, struct macb_queue, |
1087 | tx_error_task); |
1088 | bool halt_timeout = false; |
1089 | struct macb *bp = queue->bp; |
1090 | struct macb_tx_skb *tx_skb; |
1091 | struct macb_dma_desc *desc; |
1092 | struct sk_buff *skb; |
1093 | unsigned int tail; |
1094 | unsigned long flags; |
1095 | |
1096 | netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n" , |
1097 | (unsigned int)(queue - bp->queues), |
1098 | queue->tx_tail, queue->tx_head); |
1099 | |
1100 | /* Prevent the queue NAPI TX poll from running, as it calls |
1101 | * macb_tx_complete(), which in turn may call netif_wake_subqueue(). |
1102 | * As explained below, we have to halt the transmission before updating |
1103 | * TBQP registers so we call netif_tx_stop_all_queues() to notify the |
1104 | * network engine about the macb/gem being halted. |
1105 | */ |
1106 | napi_disable(n: &queue->napi_tx); |
1107 | spin_lock_irqsave(&bp->lock, flags); |
1108 | |
1109 | /* Make sure nobody is trying to queue up new packets */ |
1110 | netif_tx_stop_all_queues(dev: bp->dev); |
1111 | |
1112 | /* Stop transmission now |
1113 | * (in case we have just queued new packets) |
1114 | * macb/gem must be halted to write TBQP register |
1115 | */ |
1116 | if (macb_halt_tx(bp)) { |
1117 | netdev_err(dev: bp->dev, format: "BUG: halt tx timed out\n" ); |
1118 | macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE))); |
1119 | halt_timeout = true; |
1120 | } |
1121 | |
1122 | /* Treat frames in TX queue including the ones that caused the error. |
1123 | * Free transmit buffers in upper layer. |
1124 | */ |
1125 | for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { |
1126 | u32 ctrl; |
1127 | |
1128 | desc = macb_tx_desc(queue, index: tail); |
1129 | ctrl = desc->ctrl; |
1130 | tx_skb = macb_tx_skb(queue, index: tail); |
1131 | skb = tx_skb->skb; |
1132 | |
1133 | if (ctrl & MACB_BIT(TX_USED)) { |
1134 | /* skb is set for the last buffer of the frame */ |
1135 | while (!skb) { |
1136 | macb_tx_unmap(bp, tx_skb, budget: 0); |
1137 | tail++; |
1138 | tx_skb = macb_tx_skb(queue, index: tail); |
1139 | skb = tx_skb->skb; |
1140 | } |
1141 | |
1142 | /* ctrl still refers to the first buffer descriptor |
1143 | * since it's the only one written back by the hardware |
1144 | */ |
1145 | if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { |
1146 | netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n" , |
1147 | macb_tx_ring_wrap(bp, tail), |
1148 | skb->data); |
1149 | bp->dev->stats.tx_packets++; |
1150 | queue->stats.tx_packets++; |
1151 | bp->dev->stats.tx_bytes += skb->len; |
1152 | queue->stats.tx_bytes += skb->len; |
1153 | } |
1154 | } else { |
1155 | /* "Buffers exhausted mid-frame" errors may only happen |
1156 | * if the driver is buggy, so complain loudly about |
1157 | * those. Statistics are updated by hardware. |
1158 | */ |
1159 | if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) |
1160 | netdev_err(dev: bp->dev, |
1161 | format: "BUG: TX buffers exhausted mid-frame\n" ); |
1162 | |
1163 | desc->ctrl = ctrl | MACB_BIT(TX_USED); |
1164 | } |
1165 | |
1166 | macb_tx_unmap(bp, tx_skb, budget: 0); |
1167 | } |
1168 | |
1169 | /* Set end of TX queue */ |
1170 | desc = macb_tx_desc(queue, index: 0); |
1171 | macb_set_addr(bp, desc, addr: 0); |
1172 | desc->ctrl = MACB_BIT(TX_USED); |
1173 | |
1174 | /* Make descriptor updates visible to hardware */ |
1175 | wmb(); |
1176 | |
1177 | /* Reinitialize the TX desc queue */ |
1178 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
1179 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1180 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
1181 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); |
1182 | #endif |
1183 | /* Make TX ring reflect state of hardware */ |
1184 | queue->tx_head = 0; |
1185 | queue->tx_tail = 0; |
1186 | |
1187 | /* Housework before enabling TX IRQ */ |
1188 | macb_writel(bp, TSR, macb_readl(bp, TSR)); |
1189 | queue_writel(queue, IER, MACB_TX_INT_FLAGS); |
1190 | |
1191 | if (halt_timeout) |
1192 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); |
1193 | |
1194 | /* Now we are ready to start transmission again */ |
1195 | netif_tx_start_all_queues(dev: bp->dev); |
1196 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
1197 | |
1198 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
1199 | napi_enable(n: &queue->napi_tx); |
1200 | } |
1201 | |
1202 | static bool ptp_one_step_sync(struct sk_buff *skb) |
1203 | { |
1204 | struct ptp_header *hdr; |
1205 | unsigned int ptp_class; |
1206 | u8 msgtype; |
1207 | |
1208 | /* No need to parse packet if PTP TS is not involved */ |
1209 | if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) |
1210 | goto not_oss; |
1211 | |
1212 | /* Identify and return whether PTP one step sync is being processed */ |
1213 | ptp_class = ptp_classify_raw(skb); |
1214 | if (ptp_class == PTP_CLASS_NONE) |
1215 | goto not_oss; |
1216 | |
1217 | hdr = ptp_parse_header(skb, type: ptp_class); |
1218 | if (!hdr) |
1219 | goto not_oss; |
1220 | |
1221 | if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP) |
1222 | goto not_oss; |
1223 | |
1224 | msgtype = ptp_get_msgtype(hdr, type: ptp_class); |
1225 | if (msgtype == PTP_MSGTYPE_SYNC) |
1226 | return true; |
1227 | |
1228 | not_oss: |
1229 | return false; |
1230 | } |
1231 | |
1232 | static int macb_tx_complete(struct macb_queue *queue, int budget) |
1233 | { |
1234 | struct macb *bp = queue->bp; |
1235 | u16 queue_index = queue - bp->queues; |
1236 | unsigned int tail; |
1237 | unsigned int head; |
1238 | int packets = 0; |
1239 | |
1240 | spin_lock(lock: &queue->tx_ptr_lock); |
1241 | head = queue->tx_head; |
1242 | for (tail = queue->tx_tail; tail != head && packets < budget; tail++) { |
1243 | struct macb_tx_skb *tx_skb; |
1244 | struct sk_buff *skb; |
1245 | struct macb_dma_desc *desc; |
1246 | u32 ctrl; |
1247 | |
1248 | desc = macb_tx_desc(queue, index: tail); |
1249 | |
1250 | /* Make hw descriptor updates visible to CPU */ |
1251 | rmb(); |
1252 | |
1253 | ctrl = desc->ctrl; |
1254 | |
1255 | /* TX_USED bit is only set by hardware on the very first buffer |
1256 | * descriptor of the transmitted frame. |
1257 | */ |
1258 | if (!(ctrl & MACB_BIT(TX_USED))) |
1259 | break; |
1260 | |
1261 | /* Process all buffers of the current transmitted frame */ |
1262 | for (;; tail++) { |
1263 | tx_skb = macb_tx_skb(queue, index: tail); |
1264 | skb = tx_skb->skb; |
1265 | |
1266 | /* First, update TX stats if needed */ |
1267 | if (skb) { |
1268 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
1269 | !ptp_one_step_sync(skb)) |
1270 | gem_ptp_do_txstamp(bp, skb, desc); |
1271 | |
1272 | netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n" , |
1273 | macb_tx_ring_wrap(bp, tail), |
1274 | skb->data); |
1275 | bp->dev->stats.tx_packets++; |
1276 | queue->stats.tx_packets++; |
1277 | bp->dev->stats.tx_bytes += skb->len; |
1278 | queue->stats.tx_bytes += skb->len; |
1279 | packets++; |
1280 | } |
1281 | |
1282 | /* Now we can safely release resources */ |
1283 | macb_tx_unmap(bp, tx_skb, budget); |
1284 | |
1285 | /* skb is set only for the last buffer of the frame. |
1286 | * WARNING: at this point skb has been freed by |
1287 | * macb_tx_unmap(). |
1288 | */ |
1289 | if (skb) |
1290 | break; |
1291 | } |
1292 | } |
1293 | |
1294 | queue->tx_tail = tail; |
1295 | if (__netif_subqueue_stopped(dev: bp->dev, queue_index) && |
1296 | CIRC_CNT(queue->tx_head, queue->tx_tail, |
1297 | bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) |
1298 | netif_wake_subqueue(dev: bp->dev, queue_index); |
1299 | spin_unlock(lock: &queue->tx_ptr_lock); |
1300 | |
1301 | return packets; |
1302 | } |
1303 | |
1304 | static void gem_rx_refill(struct macb_queue *queue) |
1305 | { |
1306 | unsigned int entry; |
1307 | struct sk_buff *skb; |
1308 | dma_addr_t paddr; |
1309 | struct macb *bp = queue->bp; |
1310 | struct macb_dma_desc *desc; |
1311 | |
1312 | while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, |
1313 | bp->rx_ring_size) > 0) { |
1314 | entry = macb_rx_ring_wrap(bp, index: queue->rx_prepared_head); |
1315 | |
1316 | /* Make hw descriptor updates visible to CPU */ |
1317 | rmb(); |
1318 | |
1319 | desc = macb_rx_desc(queue, index: entry); |
1320 | |
1321 | if (!queue->rx_skbuff[entry]) { |
1322 | /* allocate sk_buff for this free entry in ring */ |
1323 | skb = netdev_alloc_skb(dev: bp->dev, length: bp->rx_buffer_size); |
1324 | if (unlikely(!skb)) { |
1325 | netdev_err(dev: bp->dev, |
1326 | format: "Unable to allocate sk_buff\n" ); |
1327 | break; |
1328 | } |
1329 | |
1330 | /* now fill corresponding descriptor entry */ |
1331 | paddr = dma_map_single(&bp->pdev->dev, skb->data, |
1332 | bp->rx_buffer_size, |
1333 | DMA_FROM_DEVICE); |
1334 | if (dma_mapping_error(dev: &bp->pdev->dev, dma_addr: paddr)) { |
1335 | dev_kfree_skb(skb); |
1336 | break; |
1337 | } |
1338 | |
1339 | queue->rx_skbuff[entry] = skb; |
1340 | |
1341 | if (entry == bp->rx_ring_size - 1) |
1342 | paddr |= MACB_BIT(RX_WRAP); |
1343 | desc->ctrl = 0; |
1344 | /* Setting addr clears RX_USED and allows reception, |
1345 | * make sure ctrl is cleared first to avoid a race. |
1346 | */ |
1347 | dma_wmb(); |
1348 | macb_set_addr(bp, desc, addr: paddr); |
1349 | |
1350 | /* properly align Ethernet header */ |
1351 | skb_reserve(skb, NET_IP_ALIGN); |
1352 | } else { |
1353 | desc->ctrl = 0; |
1354 | dma_wmb(); |
1355 | desc->addr &= ~MACB_BIT(RX_USED); |
1356 | } |
1357 | queue->rx_prepared_head++; |
1358 | } |
1359 | |
1360 | /* Make descriptor updates visible to hardware */ |
1361 | wmb(); |
1362 | |
1363 | netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n" , |
1364 | queue, queue->rx_prepared_head, queue->rx_tail); |
1365 | } |
1366 | |
1367 | /* Mark DMA descriptors from begin up to and not including end as unused */ |
1368 | static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, |
1369 | unsigned int end) |
1370 | { |
1371 | unsigned int frag; |
1372 | |
1373 | for (frag = begin; frag != end; frag++) { |
1374 | struct macb_dma_desc *desc = macb_rx_desc(queue, index: frag); |
1375 | |
1376 | desc->addr &= ~MACB_BIT(RX_USED); |
1377 | } |
1378 | |
1379 | /* Make descriptor updates visible to hardware */ |
1380 | wmb(); |
1381 | |
1382 | /* When this happens, the hardware stats registers for |
1383 | * whatever caused this is updated, so we don't have to record |
1384 | * anything. |
1385 | */ |
1386 | } |
1387 | |
1388 | static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, |
1389 | int budget) |
1390 | { |
1391 | struct macb *bp = queue->bp; |
1392 | unsigned int len; |
1393 | unsigned int entry; |
1394 | struct sk_buff *skb; |
1395 | struct macb_dma_desc *desc; |
1396 | int count = 0; |
1397 | |
1398 | while (count < budget) { |
1399 | u32 ctrl; |
1400 | dma_addr_t addr; |
1401 | bool rxused; |
1402 | |
1403 | entry = macb_rx_ring_wrap(bp, index: queue->rx_tail); |
1404 | desc = macb_rx_desc(queue, index: entry); |
1405 | |
1406 | /* Make hw descriptor updates visible to CPU */ |
1407 | rmb(); |
1408 | |
1409 | rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; |
1410 | addr = macb_get_addr(bp, desc); |
1411 | |
1412 | if (!rxused) |
1413 | break; |
1414 | |
1415 | /* Ensure ctrl is at least as up-to-date as rxused */ |
1416 | dma_rmb(); |
1417 | |
1418 | ctrl = desc->ctrl; |
1419 | |
1420 | queue->rx_tail++; |
1421 | count++; |
1422 | |
1423 | if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { |
1424 | netdev_err(dev: bp->dev, |
1425 | format: "not whole frame pointed by descriptor\n" ); |
1426 | bp->dev->stats.rx_dropped++; |
1427 | queue->stats.rx_dropped++; |
1428 | break; |
1429 | } |
1430 | skb = queue->rx_skbuff[entry]; |
1431 | if (unlikely(!skb)) { |
1432 | netdev_err(dev: bp->dev, |
1433 | format: "inconsistent Rx descriptor chain\n" ); |
1434 | bp->dev->stats.rx_dropped++; |
1435 | queue->stats.rx_dropped++; |
1436 | break; |
1437 | } |
1438 | /* now everything is ready for receiving packet */ |
1439 | queue->rx_skbuff[entry] = NULL; |
1440 | len = ctrl & bp->rx_frm_len_mask; |
1441 | |
1442 | netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n" , entry, len); |
1443 | |
1444 | skb_put(skb, len); |
1445 | dma_unmap_single(&bp->pdev->dev, addr, |
1446 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
1447 | |
1448 | skb->protocol = eth_type_trans(skb, dev: bp->dev); |
1449 | skb_checksum_none_assert(skb); |
1450 | if (bp->dev->features & NETIF_F_RXCSUM && |
1451 | !(bp->dev->flags & IFF_PROMISC) && |
1452 | GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) |
1453 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1454 | |
1455 | bp->dev->stats.rx_packets++; |
1456 | queue->stats.rx_packets++; |
1457 | bp->dev->stats.rx_bytes += skb->len; |
1458 | queue->stats.rx_bytes += skb->len; |
1459 | |
1460 | gem_ptp_do_rxstamp(bp, skb, desc); |
1461 | |
1462 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
1463 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n" , |
1464 | skb->len, skb->csum); |
1465 | print_hex_dump(KERN_DEBUG, " mac: " , DUMP_PREFIX_ADDRESS, 16, 1, |
1466 | skb_mac_header(skb), 16, true); |
1467 | print_hex_dump(KERN_DEBUG, "data: " , DUMP_PREFIX_ADDRESS, 16, 1, |
1468 | skb->data, 32, true); |
1469 | #endif |
1470 | |
1471 | napi_gro_receive(napi, skb); |
1472 | } |
1473 | |
1474 | gem_rx_refill(queue); |
1475 | |
1476 | return count; |
1477 | } |
1478 | |
1479 | static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, |
1480 | unsigned int first_frag, unsigned int last_frag) |
1481 | { |
1482 | unsigned int len; |
1483 | unsigned int frag; |
1484 | unsigned int offset; |
1485 | struct sk_buff *skb; |
1486 | struct macb_dma_desc *desc; |
1487 | struct macb *bp = queue->bp; |
1488 | |
1489 | desc = macb_rx_desc(queue, index: last_frag); |
1490 | len = desc->ctrl & bp->rx_frm_len_mask; |
1491 | |
1492 | netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n" , |
1493 | macb_rx_ring_wrap(bp, first_frag), |
1494 | macb_rx_ring_wrap(bp, last_frag), len); |
1495 | |
1496 | /* The ethernet header starts NET_IP_ALIGN bytes into the |
1497 | * first buffer. Since the header is 14 bytes, this makes the |
1498 | * payload word-aligned. |
1499 | * |
1500 | * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy |
1501 | * the two padding bytes into the skb so that we avoid hitting |
1502 | * the slowpath in memcpy(), and pull them off afterwards. |
1503 | */ |
1504 | skb = netdev_alloc_skb(dev: bp->dev, length: len + NET_IP_ALIGN); |
1505 | if (!skb) { |
1506 | bp->dev->stats.rx_dropped++; |
1507 | for (frag = first_frag; ; frag++) { |
1508 | desc = macb_rx_desc(queue, index: frag); |
1509 | desc->addr &= ~MACB_BIT(RX_USED); |
1510 | if (frag == last_frag) |
1511 | break; |
1512 | } |
1513 | |
1514 | /* Make descriptor updates visible to hardware */ |
1515 | wmb(); |
1516 | |
1517 | return 1; |
1518 | } |
1519 | |
1520 | offset = 0; |
1521 | len += NET_IP_ALIGN; |
1522 | skb_checksum_none_assert(skb); |
1523 | skb_put(skb, len); |
1524 | |
1525 | for (frag = first_frag; ; frag++) { |
1526 | unsigned int frag_len = bp->rx_buffer_size; |
1527 | |
1528 | if (offset + frag_len > len) { |
1529 | if (unlikely(frag != last_frag)) { |
1530 | dev_kfree_skb_any(skb); |
1531 | return -1; |
1532 | } |
1533 | frag_len = len - offset; |
1534 | } |
1535 | skb_copy_to_linear_data_offset(skb, offset, |
1536 | from: macb_rx_buffer(queue, index: frag), |
1537 | len: frag_len); |
1538 | offset += bp->rx_buffer_size; |
1539 | desc = macb_rx_desc(queue, index: frag); |
1540 | desc->addr &= ~MACB_BIT(RX_USED); |
1541 | |
1542 | if (frag == last_frag) |
1543 | break; |
1544 | } |
1545 | |
1546 | /* Make descriptor updates visible to hardware */ |
1547 | wmb(); |
1548 | |
1549 | __skb_pull(skb, NET_IP_ALIGN); |
1550 | skb->protocol = eth_type_trans(skb, dev: bp->dev); |
1551 | |
1552 | bp->dev->stats.rx_packets++; |
1553 | bp->dev->stats.rx_bytes += skb->len; |
1554 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n" , |
1555 | skb->len, skb->csum); |
1556 | napi_gro_receive(napi, skb); |
1557 | |
1558 | return 0; |
1559 | } |
1560 | |
1561 | static inline void macb_init_rx_ring(struct macb_queue *queue) |
1562 | { |
1563 | struct macb *bp = queue->bp; |
1564 | dma_addr_t addr; |
1565 | struct macb_dma_desc *desc = NULL; |
1566 | int i; |
1567 | |
1568 | addr = queue->rx_buffers_dma; |
1569 | for (i = 0; i < bp->rx_ring_size; i++) { |
1570 | desc = macb_rx_desc(queue, index: i); |
1571 | macb_set_addr(bp, desc, addr); |
1572 | desc->ctrl = 0; |
1573 | addr += bp->rx_buffer_size; |
1574 | } |
1575 | desc->addr |= MACB_BIT(RX_WRAP); |
1576 | queue->rx_tail = 0; |
1577 | } |
1578 | |
1579 | static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, |
1580 | int budget) |
1581 | { |
1582 | struct macb *bp = queue->bp; |
1583 | bool reset_rx_queue = false; |
1584 | int received = 0; |
1585 | unsigned int tail; |
1586 | int first_frag = -1; |
1587 | |
1588 | for (tail = queue->rx_tail; budget > 0; tail++) { |
1589 | struct macb_dma_desc *desc = macb_rx_desc(queue, index: tail); |
1590 | u32 ctrl; |
1591 | |
1592 | /* Make hw descriptor updates visible to CPU */ |
1593 | rmb(); |
1594 | |
1595 | if (!(desc->addr & MACB_BIT(RX_USED))) |
1596 | break; |
1597 | |
1598 | /* Ensure ctrl is at least as up-to-date as addr */ |
1599 | dma_rmb(); |
1600 | |
1601 | ctrl = desc->ctrl; |
1602 | |
1603 | if (ctrl & MACB_BIT(RX_SOF)) { |
1604 | if (first_frag != -1) |
1605 | discard_partial_frame(queue, begin: first_frag, end: tail); |
1606 | first_frag = tail; |
1607 | } |
1608 | |
1609 | if (ctrl & MACB_BIT(RX_EOF)) { |
1610 | int dropped; |
1611 | |
1612 | if (unlikely(first_frag == -1)) { |
1613 | reset_rx_queue = true; |
1614 | continue; |
1615 | } |
1616 | |
1617 | dropped = macb_rx_frame(queue, napi, first_frag, last_frag: tail); |
1618 | first_frag = -1; |
1619 | if (unlikely(dropped < 0)) { |
1620 | reset_rx_queue = true; |
1621 | continue; |
1622 | } |
1623 | if (!dropped) { |
1624 | received++; |
1625 | budget--; |
1626 | } |
1627 | } |
1628 | } |
1629 | |
1630 | if (unlikely(reset_rx_queue)) { |
1631 | unsigned long flags; |
1632 | u32 ctrl; |
1633 | |
1634 | netdev_err(dev: bp->dev, format: "RX queue corruption: reset it\n" ); |
1635 | |
1636 | spin_lock_irqsave(&bp->lock, flags); |
1637 | |
1638 | ctrl = macb_readl(bp, NCR); |
1639 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); |
1640 | |
1641 | macb_init_rx_ring(queue); |
1642 | queue_writel(queue, RBQP, queue->rx_ring_dma); |
1643 | |
1644 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); |
1645 | |
1646 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
1647 | return received; |
1648 | } |
1649 | |
1650 | if (first_frag != -1) |
1651 | queue->rx_tail = first_frag; |
1652 | else |
1653 | queue->rx_tail = tail; |
1654 | |
1655 | return received; |
1656 | } |
1657 | |
1658 | static bool macb_rx_pending(struct macb_queue *queue) |
1659 | { |
1660 | struct macb *bp = queue->bp; |
1661 | unsigned int entry; |
1662 | struct macb_dma_desc *desc; |
1663 | |
1664 | entry = macb_rx_ring_wrap(bp, index: queue->rx_tail); |
1665 | desc = macb_rx_desc(queue, index: entry); |
1666 | |
1667 | /* Make hw descriptor updates visible to CPU */ |
1668 | rmb(); |
1669 | |
1670 | return (desc->addr & MACB_BIT(RX_USED)) != 0; |
1671 | } |
1672 | |
1673 | static int macb_rx_poll(struct napi_struct *napi, int budget) |
1674 | { |
1675 | struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx); |
1676 | struct macb *bp = queue->bp; |
1677 | int work_done; |
1678 | |
1679 | work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); |
1680 | |
1681 | netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n" , |
1682 | (unsigned int)(queue - bp->queues), work_done, budget); |
1683 | |
1684 | if (work_done < budget && napi_complete_done(n: napi, work_done)) { |
1685 | queue_writel(queue, IER, bp->rx_intr_mask); |
1686 | |
1687 | /* Packet completions only seem to propagate to raise |
1688 | * interrupts when interrupts are enabled at the time, so if |
1689 | * packets were received while interrupts were disabled, |
1690 | * they will not cause another interrupt to be generated when |
1691 | * interrupts are re-enabled. |
1692 | * Check for this case here to avoid losing a wakeup. This can |
1693 | * potentially race with the interrupt handler doing the same |
1694 | * actions if an interrupt is raised just after enabling them, |
1695 | * but this should be harmless. |
1696 | */ |
1697 | if (macb_rx_pending(queue)) { |
1698 | queue_writel(queue, IDR, bp->rx_intr_mask); |
1699 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1700 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
1701 | netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n" ); |
1702 | napi_schedule(n: napi); |
1703 | } |
1704 | } |
1705 | |
1706 | /* TODO: Handle errors */ |
1707 | |
1708 | return work_done; |
1709 | } |
1710 | |
1711 | static void macb_tx_restart(struct macb_queue *queue) |
1712 | { |
1713 | struct macb *bp = queue->bp; |
1714 | unsigned int head_idx, tbqp; |
1715 | |
1716 | spin_lock(lock: &queue->tx_ptr_lock); |
1717 | |
1718 | if (queue->tx_head == queue->tx_tail) |
1719 | goto out_tx_ptr_unlock; |
1720 | |
1721 | tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); |
1722 | tbqp = macb_adj_dma_desc_idx(bp, desc_idx: macb_tx_ring_wrap(bp, index: tbqp)); |
1723 | head_idx = macb_adj_dma_desc_idx(bp, desc_idx: macb_tx_ring_wrap(bp, index: queue->tx_head)); |
1724 | |
1725 | if (tbqp == head_idx) |
1726 | goto out_tx_ptr_unlock; |
1727 | |
1728 | spin_lock_irq(lock: &bp->lock); |
1729 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
1730 | spin_unlock_irq(lock: &bp->lock); |
1731 | |
1732 | out_tx_ptr_unlock: |
1733 | spin_unlock(lock: &queue->tx_ptr_lock); |
1734 | } |
1735 | |
1736 | static bool macb_tx_complete_pending(struct macb_queue *queue) |
1737 | { |
1738 | bool retval = false; |
1739 | |
1740 | spin_lock(lock: &queue->tx_ptr_lock); |
1741 | if (queue->tx_head != queue->tx_tail) { |
1742 | /* Make hw descriptor updates visible to CPU */ |
1743 | rmb(); |
1744 | |
1745 | if (macb_tx_desc(queue, index: queue->tx_tail)->ctrl & MACB_BIT(TX_USED)) |
1746 | retval = true; |
1747 | } |
1748 | spin_unlock(lock: &queue->tx_ptr_lock); |
1749 | return retval; |
1750 | } |
1751 | |
1752 | static int macb_tx_poll(struct napi_struct *napi, int budget) |
1753 | { |
1754 | struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx); |
1755 | struct macb *bp = queue->bp; |
1756 | int work_done; |
1757 | |
1758 | work_done = macb_tx_complete(queue, budget); |
1759 | |
1760 | rmb(); // ensure txubr_pending is up to date |
1761 | if (queue->txubr_pending) { |
1762 | queue->txubr_pending = false; |
1763 | netdev_vdbg(bp->dev, "poll: tx restart\n" ); |
1764 | macb_tx_restart(queue); |
1765 | } |
1766 | |
1767 | netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n" , |
1768 | (unsigned int)(queue - bp->queues), work_done, budget); |
1769 | |
1770 | if (work_done < budget && napi_complete_done(n: napi, work_done)) { |
1771 | queue_writel(queue, IER, MACB_BIT(TCOMP)); |
1772 | |
1773 | /* Packet completions only seem to propagate to raise |
1774 | * interrupts when interrupts are enabled at the time, so if |
1775 | * packets were sent while interrupts were disabled, |
1776 | * they will not cause another interrupt to be generated when |
1777 | * interrupts are re-enabled. |
1778 | * Check for this case here to avoid losing a wakeup. This can |
1779 | * potentially race with the interrupt handler doing the same |
1780 | * actions if an interrupt is raised just after enabling them, |
1781 | * but this should be harmless. |
1782 | */ |
1783 | if (macb_tx_complete_pending(queue)) { |
1784 | queue_writel(queue, IDR, MACB_BIT(TCOMP)); |
1785 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1786 | queue_writel(queue, ISR, MACB_BIT(TCOMP)); |
1787 | netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n" ); |
1788 | napi_schedule(n: napi); |
1789 | } |
1790 | } |
1791 | |
1792 | return work_done; |
1793 | } |
1794 | |
1795 | static void macb_hresp_error_task(struct tasklet_struct *t) |
1796 | { |
1797 | struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); |
1798 | struct net_device *dev = bp->dev; |
1799 | struct macb_queue *queue; |
1800 | unsigned int q; |
1801 | u32 ctrl; |
1802 | |
1803 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1804 | queue_writel(queue, IDR, bp->rx_intr_mask | |
1805 | MACB_TX_INT_FLAGS | |
1806 | MACB_BIT(HRESP)); |
1807 | } |
1808 | ctrl = macb_readl(bp, NCR); |
1809 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
1810 | macb_writel(bp, NCR, ctrl); |
1811 | |
1812 | netif_tx_stop_all_queues(dev); |
1813 | netif_carrier_off(dev); |
1814 | |
1815 | bp->macbgem_ops.mog_init_rings(bp); |
1816 | |
1817 | /* Initialize TX and RX buffers */ |
1818 | macb_init_buffers(bp); |
1819 | |
1820 | /* Enable interrupts */ |
1821 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
1822 | queue_writel(queue, IER, |
1823 | bp->rx_intr_mask | |
1824 | MACB_TX_INT_FLAGS | |
1825 | MACB_BIT(HRESP)); |
1826 | |
1827 | ctrl |= MACB_BIT(RE) | MACB_BIT(TE); |
1828 | macb_writel(bp, NCR, ctrl); |
1829 | |
1830 | netif_carrier_on(dev); |
1831 | netif_tx_start_all_queues(dev); |
1832 | } |
1833 | |
1834 | static irqreturn_t macb_wol_interrupt(int irq, void *dev_id) |
1835 | { |
1836 | struct macb_queue *queue = dev_id; |
1837 | struct macb *bp = queue->bp; |
1838 | u32 status; |
1839 | |
1840 | status = queue_readl(queue, ISR); |
1841 | |
1842 | if (unlikely(!status)) |
1843 | return IRQ_NONE; |
1844 | |
1845 | spin_lock(lock: &bp->lock); |
1846 | |
1847 | if (status & MACB_BIT(WOL)) { |
1848 | queue_writel(queue, IDR, MACB_BIT(WOL)); |
1849 | macb_writel(bp, WOL, 0); |
1850 | netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n" , |
1851 | (unsigned int)(queue - bp->queues), |
1852 | (unsigned long)status); |
1853 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1854 | queue_writel(queue, ISR, MACB_BIT(WOL)); |
1855 | pm_wakeup_event(dev: &bp->pdev->dev, msec: 0); |
1856 | } |
1857 | |
1858 | spin_unlock(lock: &bp->lock); |
1859 | |
1860 | return IRQ_HANDLED; |
1861 | } |
1862 | |
1863 | static irqreturn_t gem_wol_interrupt(int irq, void *dev_id) |
1864 | { |
1865 | struct macb_queue *queue = dev_id; |
1866 | struct macb *bp = queue->bp; |
1867 | u32 status; |
1868 | |
1869 | status = queue_readl(queue, ISR); |
1870 | |
1871 | if (unlikely(!status)) |
1872 | return IRQ_NONE; |
1873 | |
1874 | spin_lock(lock: &bp->lock); |
1875 | |
1876 | if (status & GEM_BIT(WOL)) { |
1877 | queue_writel(queue, IDR, GEM_BIT(WOL)); |
1878 | gem_writel(bp, WOL, 0); |
1879 | netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n" , |
1880 | (unsigned int)(queue - bp->queues), |
1881 | (unsigned long)status); |
1882 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1883 | queue_writel(queue, ISR, GEM_BIT(WOL)); |
1884 | pm_wakeup_event(dev: &bp->pdev->dev, msec: 0); |
1885 | } |
1886 | |
1887 | spin_unlock(lock: &bp->lock); |
1888 | |
1889 | return IRQ_HANDLED; |
1890 | } |
1891 | |
1892 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
1893 | { |
1894 | struct macb_queue *queue = dev_id; |
1895 | struct macb *bp = queue->bp; |
1896 | struct net_device *dev = bp->dev; |
1897 | u32 status, ctrl; |
1898 | |
1899 | status = queue_readl(queue, ISR); |
1900 | |
1901 | if (unlikely(!status)) |
1902 | return IRQ_NONE; |
1903 | |
1904 | spin_lock(lock: &bp->lock); |
1905 | |
1906 | while (status) { |
1907 | /* close possible race with dev_close */ |
1908 | if (unlikely(!netif_running(dev))) { |
1909 | queue_writel(queue, IDR, -1); |
1910 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1911 | queue_writel(queue, ISR, -1); |
1912 | break; |
1913 | } |
1914 | |
1915 | netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n" , |
1916 | (unsigned int)(queue - bp->queues), |
1917 | (unsigned long)status); |
1918 | |
1919 | if (status & bp->rx_intr_mask) { |
1920 | /* There's no point taking any more interrupts |
1921 | * until we have processed the buffers. The |
1922 | * scheduling call may fail if the poll routine |
1923 | * is already scheduled, so disable interrupts |
1924 | * now. |
1925 | */ |
1926 | queue_writel(queue, IDR, bp->rx_intr_mask); |
1927 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1928 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
1929 | |
1930 | if (napi_schedule_prep(n: &queue->napi_rx)) { |
1931 | netdev_vdbg(bp->dev, "scheduling RX softirq\n" ); |
1932 | __napi_schedule(n: &queue->napi_rx); |
1933 | } |
1934 | } |
1935 | |
1936 | if (status & (MACB_BIT(TCOMP) | |
1937 | MACB_BIT(TXUBR))) { |
1938 | queue_writel(queue, IDR, MACB_BIT(TCOMP)); |
1939 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1940 | queue_writel(queue, ISR, MACB_BIT(TCOMP) | |
1941 | MACB_BIT(TXUBR)); |
1942 | |
1943 | if (status & MACB_BIT(TXUBR)) { |
1944 | queue->txubr_pending = true; |
1945 | wmb(); // ensure softirq can see update |
1946 | } |
1947 | |
1948 | if (napi_schedule_prep(n: &queue->napi_tx)) { |
1949 | netdev_vdbg(bp->dev, "scheduling TX softirq\n" ); |
1950 | __napi_schedule(n: &queue->napi_tx); |
1951 | } |
1952 | } |
1953 | |
1954 | if (unlikely(status & (MACB_TX_ERR_FLAGS))) { |
1955 | queue_writel(queue, IDR, MACB_TX_INT_FLAGS); |
1956 | schedule_work(work: &queue->tx_error_task); |
1957 | |
1958 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1959 | queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); |
1960 | |
1961 | break; |
1962 | } |
1963 | |
1964 | /* Link change detection isn't possible with RMII, so we'll |
1965 | * add that if/when we get our hands on a full-blown MII PHY. |
1966 | */ |
1967 | |
1968 | /* There is a hardware issue under heavy load where DMA can |
1969 | * stop, this causes endless "used buffer descriptor read" |
1970 | * interrupts but it can be cleared by re-enabling RX. See |
1971 | * the at91rm9200 manual, section 41.3.1 or the Zynq manual |
1972 | * section 16.7.4 for details. RXUBR is only enabled for |
1973 | * these two versions. |
1974 | */ |
1975 | if (status & MACB_BIT(RXUBR)) { |
1976 | ctrl = macb_readl(bp, NCR); |
1977 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); |
1978 | wmb(); |
1979 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); |
1980 | |
1981 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1982 | queue_writel(queue, ISR, MACB_BIT(RXUBR)); |
1983 | } |
1984 | |
1985 | if (status & MACB_BIT(ISR_ROVR)) { |
1986 | /* We missed at least one packet */ |
1987 | if (macb_is_gem(bp)) |
1988 | bp->hw_stats.gem.rx_overruns++; |
1989 | else |
1990 | bp->hw_stats.macb.rx_overruns++; |
1991 | |
1992 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1993 | queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); |
1994 | } |
1995 | |
1996 | if (status & MACB_BIT(HRESP)) { |
1997 | tasklet_schedule(t: &bp->hresp_err_tasklet); |
1998 | netdev_err(dev, format: "DMA bus error: HRESP not OK\n" ); |
1999 | |
2000 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
2001 | queue_writel(queue, ISR, MACB_BIT(HRESP)); |
2002 | } |
2003 | status = queue_readl(queue, ISR); |
2004 | } |
2005 | |
2006 | spin_unlock(lock: &bp->lock); |
2007 | |
2008 | return IRQ_HANDLED; |
2009 | } |
2010 | |
2011 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2012 | /* Polling receive - used by netconsole and other diagnostic tools |
2013 | * to allow network i/o with interrupts disabled. |
2014 | */ |
2015 | static void macb_poll_controller(struct net_device *dev) |
2016 | { |
2017 | struct macb *bp = netdev_priv(dev); |
2018 | struct macb_queue *queue; |
2019 | unsigned long flags; |
2020 | unsigned int q; |
2021 | |
2022 | local_irq_save(flags); |
2023 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
2024 | macb_interrupt(irq: dev->irq, dev_id: queue); |
2025 | local_irq_restore(flags); |
2026 | } |
2027 | #endif |
2028 | |
2029 | static unsigned int macb_tx_map(struct macb *bp, |
2030 | struct macb_queue *queue, |
2031 | struct sk_buff *skb, |
2032 | unsigned int hdrlen) |
2033 | { |
2034 | dma_addr_t mapping; |
2035 | unsigned int len, entry, i, tx_head = queue->tx_head; |
2036 | struct macb_tx_skb *tx_skb = NULL; |
2037 | struct macb_dma_desc *desc; |
2038 | unsigned int offset, size, count = 0; |
2039 | unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; |
2040 | unsigned int eof = 1, mss_mfs = 0; |
2041 | u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; |
2042 | |
2043 | /* LSO */ |
2044 | if (skb_shinfo(skb)->gso_size != 0) { |
2045 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
2046 | /* UDP - UFO */ |
2047 | lso_ctrl = MACB_LSO_UFO_ENABLE; |
2048 | else |
2049 | /* TCP - TSO */ |
2050 | lso_ctrl = MACB_LSO_TSO_ENABLE; |
2051 | } |
2052 | |
2053 | /* First, map non-paged data */ |
2054 | len = skb_headlen(skb); |
2055 | |
2056 | /* first buffer length */ |
2057 | size = hdrlen; |
2058 | |
2059 | offset = 0; |
2060 | while (len) { |
2061 | entry = macb_tx_ring_wrap(bp, index: tx_head); |
2062 | tx_skb = &queue->tx_skb[entry]; |
2063 | |
2064 | mapping = dma_map_single(&bp->pdev->dev, |
2065 | skb->data + offset, |
2066 | size, DMA_TO_DEVICE); |
2067 | if (dma_mapping_error(dev: &bp->pdev->dev, dma_addr: mapping)) |
2068 | goto dma_error; |
2069 | |
2070 | /* Save info to properly release resources */ |
2071 | tx_skb->skb = NULL; |
2072 | tx_skb->mapping = mapping; |
2073 | tx_skb->size = size; |
2074 | tx_skb->mapped_as_page = false; |
2075 | |
2076 | len -= size; |
2077 | offset += size; |
2078 | count++; |
2079 | tx_head++; |
2080 | |
2081 | size = min(len, bp->max_tx_length); |
2082 | } |
2083 | |
2084 | /* Then, map paged data from fragments */ |
2085 | for (f = 0; f < nr_frags; f++) { |
2086 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; |
2087 | |
2088 | len = skb_frag_size(frag); |
2089 | offset = 0; |
2090 | while (len) { |
2091 | size = min(len, bp->max_tx_length); |
2092 | entry = macb_tx_ring_wrap(bp, index: tx_head); |
2093 | tx_skb = &queue->tx_skb[entry]; |
2094 | |
2095 | mapping = skb_frag_dma_map(dev: &bp->pdev->dev, frag, |
2096 | offset, size, dir: DMA_TO_DEVICE); |
2097 | if (dma_mapping_error(dev: &bp->pdev->dev, dma_addr: mapping)) |
2098 | goto dma_error; |
2099 | |
2100 | /* Save info to properly release resources */ |
2101 | tx_skb->skb = NULL; |
2102 | tx_skb->mapping = mapping; |
2103 | tx_skb->size = size; |
2104 | tx_skb->mapped_as_page = true; |
2105 | |
2106 | len -= size; |
2107 | offset += size; |
2108 | count++; |
2109 | tx_head++; |
2110 | } |
2111 | } |
2112 | |
2113 | /* Should never happen */ |
2114 | if (unlikely(!tx_skb)) { |
2115 | netdev_err(dev: bp->dev, format: "BUG! empty skb!\n" ); |
2116 | return 0; |
2117 | } |
2118 | |
2119 | /* This is the last buffer of the frame: save socket buffer */ |
2120 | tx_skb->skb = skb; |
2121 | |
2122 | /* Update TX ring: update buffer descriptors in reverse order |
2123 | * to avoid race condition |
2124 | */ |
2125 | |
2126 | /* Set 'TX_USED' bit in buffer descriptor at tx_head position |
2127 | * to set the end of TX queue |
2128 | */ |
2129 | i = tx_head; |
2130 | entry = macb_tx_ring_wrap(bp, index: i); |
2131 | ctrl = MACB_BIT(TX_USED); |
2132 | desc = macb_tx_desc(queue, index: entry); |
2133 | desc->ctrl = ctrl; |
2134 | |
2135 | if (lso_ctrl) { |
2136 | if (lso_ctrl == MACB_LSO_UFO_ENABLE) |
2137 | /* include header and FCS in value given to h/w */ |
2138 | mss_mfs = skb_shinfo(skb)->gso_size + |
2139 | skb_transport_offset(skb) + |
2140 | ETH_FCS_LEN; |
2141 | else /* TSO */ { |
2142 | mss_mfs = skb_shinfo(skb)->gso_size; |
2143 | /* TCP Sequence Number Source Select |
2144 | * can be set only for TSO |
2145 | */ |
2146 | seq_ctrl = 0; |
2147 | } |
2148 | } |
2149 | |
2150 | do { |
2151 | i--; |
2152 | entry = macb_tx_ring_wrap(bp, index: i); |
2153 | tx_skb = &queue->tx_skb[entry]; |
2154 | desc = macb_tx_desc(queue, index: entry); |
2155 | |
2156 | ctrl = (u32)tx_skb->size; |
2157 | if (eof) { |
2158 | ctrl |= MACB_BIT(TX_LAST); |
2159 | eof = 0; |
2160 | } |
2161 | if (unlikely(entry == (bp->tx_ring_size - 1))) |
2162 | ctrl |= MACB_BIT(TX_WRAP); |
2163 | |
2164 | /* First descriptor is header descriptor */ |
2165 | if (i == queue->tx_head) { |
2166 | ctrl |= MACB_BF(TX_LSO, lso_ctrl); |
2167 | ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); |
2168 | if ((bp->dev->features & NETIF_F_HW_CSUM) && |
2169 | skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl && |
2170 | !ptp_one_step_sync(skb)) |
2171 | ctrl |= MACB_BIT(TX_NOCRC); |
2172 | } else |
2173 | /* Only set MSS/MFS on payload descriptors |
2174 | * (second or later descriptor) |
2175 | */ |
2176 | ctrl |= MACB_BF(MSS_MFS, mss_mfs); |
2177 | |
2178 | /* Set TX buffer descriptor */ |
2179 | macb_set_addr(bp, desc, addr: tx_skb->mapping); |
2180 | /* desc->addr must be visible to hardware before clearing |
2181 | * 'TX_USED' bit in desc->ctrl. |
2182 | */ |
2183 | wmb(); |
2184 | desc->ctrl = ctrl; |
2185 | } while (i != queue->tx_head); |
2186 | |
2187 | queue->tx_head = tx_head; |
2188 | |
2189 | return count; |
2190 | |
2191 | dma_error: |
2192 | netdev_err(dev: bp->dev, format: "TX DMA map failed\n" ); |
2193 | |
2194 | for (i = queue->tx_head; i != tx_head; i++) { |
2195 | tx_skb = macb_tx_skb(queue, index: i); |
2196 | |
2197 | macb_tx_unmap(bp, tx_skb, budget: 0); |
2198 | } |
2199 | |
2200 | return 0; |
2201 | } |
2202 | |
2203 | static netdev_features_t macb_features_check(struct sk_buff *skb, |
2204 | struct net_device *dev, |
2205 | netdev_features_t features) |
2206 | { |
2207 | unsigned int nr_frags, f; |
2208 | unsigned int hdrlen; |
2209 | |
2210 | /* Validate LSO compatibility */ |
2211 | |
2212 | /* there is only one buffer or protocol is not UDP */ |
2213 | if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) |
2214 | return features; |
2215 | |
2216 | /* length of header */ |
2217 | hdrlen = skb_transport_offset(skb); |
2218 | |
2219 | /* For UFO only: |
2220 | * When software supplies two or more payload buffers all payload buffers |
2221 | * apart from the last must be a multiple of 8 bytes in size. |
2222 | */ |
2223 | if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) |
2224 | return features & ~MACB_NETIF_LSO; |
2225 | |
2226 | nr_frags = skb_shinfo(skb)->nr_frags; |
2227 | /* No need to check last fragment */ |
2228 | nr_frags--; |
2229 | for (f = 0; f < nr_frags; f++) { |
2230 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; |
2231 | |
2232 | if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) |
2233 | return features & ~MACB_NETIF_LSO; |
2234 | } |
2235 | return features; |
2236 | } |
2237 | |
2238 | static inline int macb_clear_csum(struct sk_buff *skb) |
2239 | { |
2240 | /* no change for packets without checksum offloading */ |
2241 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2242 | return 0; |
2243 | |
2244 | /* make sure we can modify the header */ |
2245 | if (unlikely(skb_cow_head(skb, 0))) |
2246 | return -1; |
2247 | |
2248 | /* initialize checksum field |
2249 | * This is required - at least for Zynq, which otherwise calculates |
2250 | * wrong UDP header checksums for UDP packets with UDP data len <=2 |
2251 | */ |
2252 | *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; |
2253 | return 0; |
2254 | } |
2255 | |
2256 | static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) |
2257 | { |
2258 | bool cloned = skb_cloned(skb: *skb) || skb_header_cloned(skb: *skb) || |
2259 | skb_is_nonlinear(skb: *skb); |
2260 | int padlen = ETH_ZLEN - (*skb)->len; |
2261 | int tailroom = skb_tailroom(skb: *skb); |
2262 | struct sk_buff *nskb; |
2263 | u32 fcs; |
2264 | |
2265 | if (!(ndev->features & NETIF_F_HW_CSUM) || |
2266 | !((*skb)->ip_summed != CHECKSUM_PARTIAL) || |
2267 | skb_shinfo(*skb)->gso_size || ptp_one_step_sync(skb: *skb)) |
2268 | return 0; |
2269 | |
2270 | if (padlen <= 0) { |
2271 | /* FCS could be appeded to tailroom. */ |
2272 | if (tailroom >= ETH_FCS_LEN) |
2273 | goto add_fcs; |
2274 | /* No room for FCS, need to reallocate skb. */ |
2275 | else |
2276 | padlen = ETH_FCS_LEN; |
2277 | } else { |
2278 | /* Add room for FCS. */ |
2279 | padlen += ETH_FCS_LEN; |
2280 | } |
2281 | |
2282 | if (cloned || tailroom < padlen) { |
2283 | nskb = skb_copy_expand(skb: *skb, newheadroom: 0, newtailroom: padlen, GFP_ATOMIC); |
2284 | if (!nskb) |
2285 | return -ENOMEM; |
2286 | |
2287 | dev_consume_skb_any(skb: *skb); |
2288 | *skb = nskb; |
2289 | } |
2290 | |
2291 | if (padlen > ETH_FCS_LEN) |
2292 | skb_put_zero(skb: *skb, len: padlen - ETH_FCS_LEN); |
2293 | |
2294 | add_fcs: |
2295 | /* set FCS to packet */ |
2296 | fcs = crc32_le(crc: ~0, p: (*skb)->data, len: (*skb)->len); |
2297 | fcs = ~fcs; |
2298 | |
2299 | skb_put_u8(skb: *skb, val: fcs & 0xff); |
2300 | skb_put_u8(skb: *skb, val: (fcs >> 8) & 0xff); |
2301 | skb_put_u8(skb: *skb, val: (fcs >> 16) & 0xff); |
2302 | skb_put_u8(skb: *skb, val: (fcs >> 24) & 0xff); |
2303 | |
2304 | return 0; |
2305 | } |
2306 | |
2307 | static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2308 | { |
2309 | u16 queue_index = skb_get_queue_mapping(skb); |
2310 | struct macb *bp = netdev_priv(dev); |
2311 | struct macb_queue *queue = &bp->queues[queue_index]; |
2312 | unsigned int desc_cnt, nr_frags, frag_size, f; |
2313 | unsigned int hdrlen; |
2314 | bool is_lso; |
2315 | netdev_tx_t ret = NETDEV_TX_OK; |
2316 | |
2317 | if (macb_clear_csum(skb)) { |
2318 | dev_kfree_skb_any(skb); |
2319 | return ret; |
2320 | } |
2321 | |
2322 | if (macb_pad_and_fcs(skb: &skb, ndev: dev)) { |
2323 | dev_kfree_skb_any(skb); |
2324 | return ret; |
2325 | } |
2326 | |
2327 | #ifdef CONFIG_MACB_USE_HWSTAMP |
2328 | if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
2329 | (bp->hw_dma_cap & HW_DMA_CAP_PTP)) |
2330 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
2331 | #endif |
2332 | |
2333 | is_lso = (skb_shinfo(skb)->gso_size != 0); |
2334 | |
2335 | if (is_lso) { |
2336 | /* length of headers */ |
2337 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
2338 | /* only queue eth + ip headers separately for UDP */ |
2339 | hdrlen = skb_transport_offset(skb); |
2340 | else |
2341 | hdrlen = skb_tcp_all_headers(skb); |
2342 | if (skb_headlen(skb) < hdrlen) { |
2343 | netdev_err(dev: bp->dev, format: "Error - LSO headers fragmented!!!\n" ); |
2344 | /* if this is required, would need to copy to single buffer */ |
2345 | return NETDEV_TX_BUSY; |
2346 | } |
2347 | } else |
2348 | hdrlen = min(skb_headlen(skb), bp->max_tx_length); |
2349 | |
2350 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
2351 | netdev_vdbg(bp->dev, |
2352 | "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n" , |
2353 | queue_index, skb->len, skb->head, skb->data, |
2354 | skb_tail_pointer(skb), skb_end_pointer(skb)); |
2355 | print_hex_dump(KERN_DEBUG, "data: " , DUMP_PREFIX_OFFSET, 16, 1, |
2356 | skb->data, 16, true); |
2357 | #endif |
2358 | |
2359 | /* Count how many TX buffer descriptors are needed to send this |
2360 | * socket buffer: skb fragments of jumbo frames may need to be |
2361 | * split into many buffer descriptors. |
2362 | */ |
2363 | if (is_lso && (skb_headlen(skb) > hdrlen)) |
2364 | /* extra header descriptor if also payload in first buffer */ |
2365 | desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; |
2366 | else |
2367 | desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); |
2368 | nr_frags = skb_shinfo(skb)->nr_frags; |
2369 | for (f = 0; f < nr_frags; f++) { |
2370 | frag_size = skb_frag_size(frag: &skb_shinfo(skb)->frags[f]); |
2371 | desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); |
2372 | } |
2373 | |
2374 | spin_lock_bh(lock: &queue->tx_ptr_lock); |
2375 | |
2376 | /* This is a hard error, log it. */ |
2377 | if (CIRC_SPACE(queue->tx_head, queue->tx_tail, |
2378 | bp->tx_ring_size) < desc_cnt) { |
2379 | netif_stop_subqueue(dev, queue_index); |
2380 | netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n" , |
2381 | queue->tx_head, queue->tx_tail); |
2382 | ret = NETDEV_TX_BUSY; |
2383 | goto unlock; |
2384 | } |
2385 | |
2386 | /* Map socket buffer for DMA transfer */ |
2387 | if (!macb_tx_map(bp, queue, skb, hdrlen)) { |
2388 | dev_kfree_skb_any(skb); |
2389 | goto unlock; |
2390 | } |
2391 | |
2392 | /* Make newly initialized descriptor visible to hardware */ |
2393 | wmb(); |
2394 | skb_tx_timestamp(skb); |
2395 | |
2396 | spin_lock_irq(lock: &bp->lock); |
2397 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
2398 | spin_unlock_irq(lock: &bp->lock); |
2399 | |
2400 | if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) |
2401 | netif_stop_subqueue(dev, queue_index); |
2402 | |
2403 | unlock: |
2404 | spin_unlock_bh(lock: &queue->tx_ptr_lock); |
2405 | |
2406 | return ret; |
2407 | } |
2408 | |
2409 | static void macb_init_rx_buffer_size(struct macb *bp, size_t size) |
2410 | { |
2411 | if (!macb_is_gem(bp)) { |
2412 | bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; |
2413 | } else { |
2414 | bp->rx_buffer_size = size; |
2415 | |
2416 | if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { |
2417 | netdev_dbg(bp->dev, |
2418 | "RX buffer must be multiple of %d bytes, expanding\n" , |
2419 | RX_BUFFER_MULTIPLE); |
2420 | bp->rx_buffer_size = |
2421 | roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); |
2422 | } |
2423 | } |
2424 | |
2425 | netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n" , |
2426 | bp->dev->mtu, bp->rx_buffer_size); |
2427 | } |
2428 | |
2429 | static void gem_free_rx_buffers(struct macb *bp) |
2430 | { |
2431 | struct sk_buff *skb; |
2432 | struct macb_dma_desc *desc; |
2433 | struct macb_queue *queue; |
2434 | dma_addr_t addr; |
2435 | unsigned int q; |
2436 | int i; |
2437 | |
2438 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2439 | if (!queue->rx_skbuff) |
2440 | continue; |
2441 | |
2442 | for (i = 0; i < bp->rx_ring_size; i++) { |
2443 | skb = queue->rx_skbuff[i]; |
2444 | |
2445 | if (!skb) |
2446 | continue; |
2447 | |
2448 | desc = macb_rx_desc(queue, index: i); |
2449 | addr = macb_get_addr(bp, desc); |
2450 | |
2451 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, |
2452 | DMA_FROM_DEVICE); |
2453 | dev_kfree_skb_any(skb); |
2454 | skb = NULL; |
2455 | } |
2456 | |
2457 | kfree(objp: queue->rx_skbuff); |
2458 | queue->rx_skbuff = NULL; |
2459 | } |
2460 | } |
2461 | |
2462 | static void macb_free_rx_buffers(struct macb *bp) |
2463 | { |
2464 | struct macb_queue *queue = &bp->queues[0]; |
2465 | |
2466 | if (queue->rx_buffers) { |
2467 | dma_free_coherent(dev: &bp->pdev->dev, |
2468 | size: bp->rx_ring_size * bp->rx_buffer_size, |
2469 | cpu_addr: queue->rx_buffers, dma_handle: queue->rx_buffers_dma); |
2470 | queue->rx_buffers = NULL; |
2471 | } |
2472 | } |
2473 | |
2474 | static void macb_free_consistent(struct macb *bp) |
2475 | { |
2476 | struct macb_queue *queue; |
2477 | unsigned int q; |
2478 | int size; |
2479 | |
2480 | bp->macbgem_ops.mog_free_rx_buffers(bp); |
2481 | |
2482 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2483 | kfree(objp: queue->tx_skb); |
2484 | queue->tx_skb = NULL; |
2485 | if (queue->tx_ring) { |
2486 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
2487 | dma_free_coherent(dev: &bp->pdev->dev, size, |
2488 | cpu_addr: queue->tx_ring, dma_handle: queue->tx_ring_dma); |
2489 | queue->tx_ring = NULL; |
2490 | } |
2491 | if (queue->rx_ring) { |
2492 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; |
2493 | dma_free_coherent(dev: &bp->pdev->dev, size, |
2494 | cpu_addr: queue->rx_ring, dma_handle: queue->rx_ring_dma); |
2495 | queue->rx_ring = NULL; |
2496 | } |
2497 | } |
2498 | } |
2499 | |
2500 | static int gem_alloc_rx_buffers(struct macb *bp) |
2501 | { |
2502 | struct macb_queue *queue; |
2503 | unsigned int q; |
2504 | int size; |
2505 | |
2506 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2507 | size = bp->rx_ring_size * sizeof(struct sk_buff *); |
2508 | queue->rx_skbuff = kzalloc(size, GFP_KERNEL); |
2509 | if (!queue->rx_skbuff) |
2510 | return -ENOMEM; |
2511 | else |
2512 | netdev_dbg(bp->dev, |
2513 | "Allocated %d RX struct sk_buff entries at %p\n" , |
2514 | bp->rx_ring_size, queue->rx_skbuff); |
2515 | } |
2516 | return 0; |
2517 | } |
2518 | |
2519 | static int macb_alloc_rx_buffers(struct macb *bp) |
2520 | { |
2521 | struct macb_queue *queue = &bp->queues[0]; |
2522 | int size; |
2523 | |
2524 | size = bp->rx_ring_size * bp->rx_buffer_size; |
2525 | queue->rx_buffers = dma_alloc_coherent(dev: &bp->pdev->dev, size, |
2526 | dma_handle: &queue->rx_buffers_dma, GFP_KERNEL); |
2527 | if (!queue->rx_buffers) |
2528 | return -ENOMEM; |
2529 | |
2530 | netdev_dbg(bp->dev, |
2531 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n" , |
2532 | size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); |
2533 | return 0; |
2534 | } |
2535 | |
2536 | static int macb_alloc_consistent(struct macb *bp) |
2537 | { |
2538 | struct macb_queue *queue; |
2539 | unsigned int q; |
2540 | int size; |
2541 | |
2542 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2543 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
2544 | queue->tx_ring = dma_alloc_coherent(dev: &bp->pdev->dev, size, |
2545 | dma_handle: &queue->tx_ring_dma, |
2546 | GFP_KERNEL); |
2547 | if (!queue->tx_ring) |
2548 | goto out_err; |
2549 | netdev_dbg(bp->dev, |
2550 | "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n" , |
2551 | q, size, (unsigned long)queue->tx_ring_dma, |
2552 | queue->tx_ring); |
2553 | |
2554 | size = bp->tx_ring_size * sizeof(struct macb_tx_skb); |
2555 | queue->tx_skb = kmalloc(size, GFP_KERNEL); |
2556 | if (!queue->tx_skb) |
2557 | goto out_err; |
2558 | |
2559 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; |
2560 | queue->rx_ring = dma_alloc_coherent(dev: &bp->pdev->dev, size, |
2561 | dma_handle: &queue->rx_ring_dma, GFP_KERNEL); |
2562 | if (!queue->rx_ring) |
2563 | goto out_err; |
2564 | netdev_dbg(bp->dev, |
2565 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n" , |
2566 | size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); |
2567 | } |
2568 | if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) |
2569 | goto out_err; |
2570 | |
2571 | return 0; |
2572 | |
2573 | out_err: |
2574 | macb_free_consistent(bp); |
2575 | return -ENOMEM; |
2576 | } |
2577 | |
2578 | static void gem_init_rings(struct macb *bp) |
2579 | { |
2580 | struct macb_queue *queue; |
2581 | struct macb_dma_desc *desc = NULL; |
2582 | unsigned int q; |
2583 | int i; |
2584 | |
2585 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2586 | for (i = 0; i < bp->tx_ring_size; i++) { |
2587 | desc = macb_tx_desc(queue, index: i); |
2588 | macb_set_addr(bp, desc, addr: 0); |
2589 | desc->ctrl = MACB_BIT(TX_USED); |
2590 | } |
2591 | desc->ctrl |= MACB_BIT(TX_WRAP); |
2592 | queue->tx_head = 0; |
2593 | queue->tx_tail = 0; |
2594 | |
2595 | queue->rx_tail = 0; |
2596 | queue->rx_prepared_head = 0; |
2597 | |
2598 | gem_rx_refill(queue); |
2599 | } |
2600 | |
2601 | } |
2602 | |
2603 | static void macb_init_rings(struct macb *bp) |
2604 | { |
2605 | int i; |
2606 | struct macb_dma_desc *desc = NULL; |
2607 | |
2608 | macb_init_rx_ring(queue: &bp->queues[0]); |
2609 | |
2610 | for (i = 0; i < bp->tx_ring_size; i++) { |
2611 | desc = macb_tx_desc(queue: &bp->queues[0], index: i); |
2612 | macb_set_addr(bp, desc, addr: 0); |
2613 | desc->ctrl = MACB_BIT(TX_USED); |
2614 | } |
2615 | bp->queues[0].tx_head = 0; |
2616 | bp->queues[0].tx_tail = 0; |
2617 | desc->ctrl |= MACB_BIT(TX_WRAP); |
2618 | } |
2619 | |
2620 | static void macb_reset_hw(struct macb *bp) |
2621 | { |
2622 | struct macb_queue *queue; |
2623 | unsigned int q; |
2624 | u32 ctrl = macb_readl(bp, NCR); |
2625 | |
2626 | /* Disable RX and TX (XXX: Should we halt the transmission |
2627 | * more gracefully?) |
2628 | */ |
2629 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
2630 | |
2631 | /* Clear the stats registers (XXX: Update stats first?) */ |
2632 | ctrl |= MACB_BIT(CLRSTAT); |
2633 | |
2634 | macb_writel(bp, NCR, ctrl); |
2635 | |
2636 | /* Clear all status flags */ |
2637 | macb_writel(bp, TSR, -1); |
2638 | macb_writel(bp, RSR, -1); |
2639 | |
2640 | /* Disable RX partial store and forward and reset watermark value */ |
2641 | gem_writel(bp, PBUFRXCUT, 0); |
2642 | |
2643 | /* Disable all interrupts */ |
2644 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2645 | queue_writel(queue, IDR, -1); |
2646 | queue_readl(queue, ISR); |
2647 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
2648 | queue_writel(queue, ISR, -1); |
2649 | } |
2650 | } |
2651 | |
2652 | static u32 gem_mdc_clk_div(struct macb *bp) |
2653 | { |
2654 | u32 config; |
2655 | unsigned long pclk_hz = clk_get_rate(clk: bp->pclk); |
2656 | |
2657 | if (pclk_hz <= 20000000) |
2658 | config = GEM_BF(CLK, GEM_CLK_DIV8); |
2659 | else if (pclk_hz <= 40000000) |
2660 | config = GEM_BF(CLK, GEM_CLK_DIV16); |
2661 | else if (pclk_hz <= 80000000) |
2662 | config = GEM_BF(CLK, GEM_CLK_DIV32); |
2663 | else if (pclk_hz <= 120000000) |
2664 | config = GEM_BF(CLK, GEM_CLK_DIV48); |
2665 | else if (pclk_hz <= 160000000) |
2666 | config = GEM_BF(CLK, GEM_CLK_DIV64); |
2667 | else if (pclk_hz <= 240000000) |
2668 | config = GEM_BF(CLK, GEM_CLK_DIV96); |
2669 | else if (pclk_hz <= 320000000) |
2670 | config = GEM_BF(CLK, GEM_CLK_DIV128); |
2671 | else |
2672 | config = GEM_BF(CLK, GEM_CLK_DIV224); |
2673 | |
2674 | return config; |
2675 | } |
2676 | |
2677 | static u32 macb_mdc_clk_div(struct macb *bp) |
2678 | { |
2679 | u32 config; |
2680 | unsigned long pclk_hz; |
2681 | |
2682 | if (macb_is_gem(bp)) |
2683 | return gem_mdc_clk_div(bp); |
2684 | |
2685 | pclk_hz = clk_get_rate(clk: bp->pclk); |
2686 | if (pclk_hz <= 20000000) |
2687 | config = MACB_BF(CLK, MACB_CLK_DIV8); |
2688 | else if (pclk_hz <= 40000000) |
2689 | config = MACB_BF(CLK, MACB_CLK_DIV16); |
2690 | else if (pclk_hz <= 80000000) |
2691 | config = MACB_BF(CLK, MACB_CLK_DIV32); |
2692 | else |
2693 | config = MACB_BF(CLK, MACB_CLK_DIV64); |
2694 | |
2695 | return config; |
2696 | } |
2697 | |
2698 | /* Get the DMA bus width field of the network configuration register that we |
2699 | * should program. We find the width from decoding the design configuration |
2700 | * register to find the maximum supported data bus width. |
2701 | */ |
2702 | static u32 macb_dbw(struct macb *bp) |
2703 | { |
2704 | if (!macb_is_gem(bp)) |
2705 | return 0; |
2706 | |
2707 | switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { |
2708 | case 4: |
2709 | return GEM_BF(DBW, GEM_DBW128); |
2710 | case 2: |
2711 | return GEM_BF(DBW, GEM_DBW64); |
2712 | case 1: |
2713 | default: |
2714 | return GEM_BF(DBW, GEM_DBW32); |
2715 | } |
2716 | } |
2717 | |
2718 | /* Configure the receive DMA engine |
2719 | * - use the correct receive buffer size |
2720 | * - set best burst length for DMA operations |
2721 | * (if not supported by FIFO, it will fallback to default) |
2722 | * - set both rx/tx packet buffers to full memory size |
2723 | * These are configurable parameters for GEM. |
2724 | */ |
2725 | static void macb_configure_dma(struct macb *bp) |
2726 | { |
2727 | struct macb_queue *queue; |
2728 | u32 buffer_size; |
2729 | unsigned int q; |
2730 | u32 dmacfg; |
2731 | |
2732 | buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; |
2733 | if (macb_is_gem(bp)) { |
2734 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); |
2735 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2736 | if (q) |
2737 | queue_writel(queue, RBQS, buffer_size); |
2738 | else |
2739 | dmacfg |= GEM_BF(RXBS, buffer_size); |
2740 | } |
2741 | if (bp->dma_burst_length) |
2742 | dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); |
2743 | dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); |
2744 | dmacfg &= ~GEM_BIT(ENDIA_PKT); |
2745 | |
2746 | if (bp->native_io) |
2747 | dmacfg &= ~GEM_BIT(ENDIA_DESC); |
2748 | else |
2749 | dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ |
2750 | |
2751 | if (bp->dev->features & NETIF_F_HW_CSUM) |
2752 | dmacfg |= GEM_BIT(TXCOEN); |
2753 | else |
2754 | dmacfg &= ~GEM_BIT(TXCOEN); |
2755 | |
2756 | dmacfg &= ~GEM_BIT(ADDR64); |
2757 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
2758 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
2759 | dmacfg |= GEM_BIT(ADDR64); |
2760 | #endif |
2761 | #ifdef CONFIG_MACB_USE_HWSTAMP |
2762 | if (bp->hw_dma_cap & HW_DMA_CAP_PTP) |
2763 | dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); |
2764 | #endif |
2765 | netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n" , |
2766 | dmacfg); |
2767 | gem_writel(bp, DMACFG, dmacfg); |
2768 | } |
2769 | } |
2770 | |
2771 | static void macb_init_hw(struct macb *bp) |
2772 | { |
2773 | u32 config; |
2774 | |
2775 | macb_reset_hw(bp); |
2776 | macb_set_hwaddr(bp); |
2777 | |
2778 | config = macb_mdc_clk_div(bp); |
2779 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
2780 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
2781 | if (bp->caps & MACB_CAPS_JUMBO) |
2782 | config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ |
2783 | else |
2784 | config |= MACB_BIT(BIG); /* Receive oversized frames */ |
2785 | if (bp->dev->flags & IFF_PROMISC) |
2786 | config |= MACB_BIT(CAF); /* Copy All Frames */ |
2787 | else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) |
2788 | config |= GEM_BIT(RXCOEN); |
2789 | if (!(bp->dev->flags & IFF_BROADCAST)) |
2790 | config |= MACB_BIT(NBC); /* No BroadCast */ |
2791 | config |= macb_dbw(bp); |
2792 | macb_writel(bp, NCFGR, config); |
2793 | if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) |
2794 | gem_writel(bp, JML, bp->jumbo_max_len); |
2795 | bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; |
2796 | if (bp->caps & MACB_CAPS_JUMBO) |
2797 | bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; |
2798 | |
2799 | macb_configure_dma(bp); |
2800 | |
2801 | /* Enable RX partial store and forward and set watermark */ |
2802 | if (bp->rx_watermark) |
2803 | gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU))); |
2804 | } |
2805 | |
2806 | /* The hash address register is 64 bits long and takes up two |
2807 | * locations in the memory map. The least significant bits are stored |
2808 | * in EMAC_HSL and the most significant bits in EMAC_HSH. |
2809 | * |
2810 | * The unicast hash enable and the multicast hash enable bits in the |
2811 | * network configuration register enable the reception of hash matched |
2812 | * frames. The destination address is reduced to a 6 bit index into |
2813 | * the 64 bit hash register using the following hash function. The |
2814 | * hash function is an exclusive or of every sixth bit of the |
2815 | * destination address. |
2816 | * |
2817 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] |
2818 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] |
2819 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] |
2820 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] |
2821 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] |
2822 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] |
2823 | * |
2824 | * da[0] represents the least significant bit of the first byte |
2825 | * received, that is, the multicast/unicast indicator, and da[47] |
2826 | * represents the most significant bit of the last byte received. If |
2827 | * the hash index, hi[n], points to a bit that is set in the hash |
2828 | * register then the frame will be matched according to whether the |
2829 | * frame is multicast or unicast. A multicast match will be signalled |
2830 | * if the multicast hash enable bit is set, da[0] is 1 and the hash |
2831 | * index points to a bit set in the hash register. A unicast match |
2832 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 |
2833 | * and the hash index points to a bit set in the hash register. To |
2834 | * receive all multicast frames, the hash register should be set with |
2835 | * all ones and the multicast hash enable bit should be set in the |
2836 | * network configuration register. |
2837 | */ |
2838 | |
2839 | static inline int hash_bit_value(int bitnr, __u8 *addr) |
2840 | { |
2841 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) |
2842 | return 1; |
2843 | return 0; |
2844 | } |
2845 | |
2846 | /* Return the hash index value for the specified address. */ |
2847 | static int hash_get_index(__u8 *addr) |
2848 | { |
2849 | int i, j, bitval; |
2850 | int hash_index = 0; |
2851 | |
2852 | for (j = 0; j < 6; j++) { |
2853 | for (i = 0, bitval = 0; i < 8; i++) |
2854 | bitval ^= hash_bit_value(bitnr: i * 6 + j, addr); |
2855 | |
2856 | hash_index |= (bitval << j); |
2857 | } |
2858 | |
2859 | return hash_index; |
2860 | } |
2861 | |
2862 | /* Add multicast addresses to the internal multicast-hash table. */ |
2863 | static void macb_sethashtable(struct net_device *dev) |
2864 | { |
2865 | struct netdev_hw_addr *ha; |
2866 | unsigned long mc_filter[2]; |
2867 | unsigned int bitnr; |
2868 | struct macb *bp = netdev_priv(dev); |
2869 | |
2870 | mc_filter[0] = 0; |
2871 | mc_filter[1] = 0; |
2872 | |
2873 | netdev_for_each_mc_addr(ha, dev) { |
2874 | bitnr = hash_get_index(addr: ha->addr); |
2875 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
2876 | } |
2877 | |
2878 | macb_or_gem_writel(bp, HRB, mc_filter[0]); |
2879 | macb_or_gem_writel(bp, HRT, mc_filter[1]); |
2880 | } |
2881 | |
2882 | /* Enable/Disable promiscuous and multicast modes. */ |
2883 | static void macb_set_rx_mode(struct net_device *dev) |
2884 | { |
2885 | unsigned long cfg; |
2886 | struct macb *bp = netdev_priv(dev); |
2887 | |
2888 | cfg = macb_readl(bp, NCFGR); |
2889 | |
2890 | if (dev->flags & IFF_PROMISC) { |
2891 | /* Enable promiscuous mode */ |
2892 | cfg |= MACB_BIT(CAF); |
2893 | |
2894 | /* Disable RX checksum offload */ |
2895 | if (macb_is_gem(bp)) |
2896 | cfg &= ~GEM_BIT(RXCOEN); |
2897 | } else { |
2898 | /* Disable promiscuous mode */ |
2899 | cfg &= ~MACB_BIT(CAF); |
2900 | |
2901 | /* Enable RX checksum offload only if requested */ |
2902 | if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) |
2903 | cfg |= GEM_BIT(RXCOEN); |
2904 | } |
2905 | |
2906 | if (dev->flags & IFF_ALLMULTI) { |
2907 | /* Enable all multicast mode */ |
2908 | macb_or_gem_writel(bp, HRB, -1); |
2909 | macb_or_gem_writel(bp, HRT, -1); |
2910 | cfg |= MACB_BIT(NCFGR_MTI); |
2911 | } else if (!netdev_mc_empty(dev)) { |
2912 | /* Enable specific multicasts */ |
2913 | macb_sethashtable(dev); |
2914 | cfg |= MACB_BIT(NCFGR_MTI); |
2915 | } else if (dev->flags & (~IFF_ALLMULTI)) { |
2916 | /* Disable all multicast mode */ |
2917 | macb_or_gem_writel(bp, HRB, 0); |
2918 | macb_or_gem_writel(bp, HRT, 0); |
2919 | cfg &= ~MACB_BIT(NCFGR_MTI); |
2920 | } |
2921 | |
2922 | macb_writel(bp, NCFGR, cfg); |
2923 | } |
2924 | |
2925 | static int macb_open(struct net_device *dev) |
2926 | { |
2927 | size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; |
2928 | struct macb *bp = netdev_priv(dev); |
2929 | struct macb_queue *queue; |
2930 | unsigned int q; |
2931 | int err; |
2932 | |
2933 | netdev_dbg(bp->dev, "open\n" ); |
2934 | |
2935 | err = pm_runtime_resume_and_get(dev: &bp->pdev->dev); |
2936 | if (err < 0) |
2937 | return err; |
2938 | |
2939 | /* RX buffers initialization */ |
2940 | macb_init_rx_buffer_size(bp, size: bufsz); |
2941 | |
2942 | err = macb_alloc_consistent(bp); |
2943 | if (err) { |
2944 | netdev_err(dev, format: "Unable to allocate DMA memory (error %d)\n" , |
2945 | err); |
2946 | goto pm_exit; |
2947 | } |
2948 | |
2949 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2950 | napi_enable(n: &queue->napi_rx); |
2951 | napi_enable(n: &queue->napi_tx); |
2952 | } |
2953 | |
2954 | macb_init_hw(bp); |
2955 | |
2956 | err = phy_power_on(phy: bp->sgmii_phy); |
2957 | if (err) |
2958 | goto reset_hw; |
2959 | |
2960 | err = macb_phylink_connect(bp); |
2961 | if (err) |
2962 | goto phy_off; |
2963 | |
2964 | netif_tx_start_all_queues(dev); |
2965 | |
2966 | if (bp->ptp_info) |
2967 | bp->ptp_info->ptp_init(dev); |
2968 | |
2969 | return 0; |
2970 | |
2971 | phy_off: |
2972 | phy_power_off(phy: bp->sgmii_phy); |
2973 | |
2974 | reset_hw: |
2975 | macb_reset_hw(bp); |
2976 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2977 | napi_disable(n: &queue->napi_rx); |
2978 | napi_disable(n: &queue->napi_tx); |
2979 | } |
2980 | macb_free_consistent(bp); |
2981 | pm_exit: |
2982 | pm_runtime_put_sync(dev: &bp->pdev->dev); |
2983 | return err; |
2984 | } |
2985 | |
2986 | static int macb_close(struct net_device *dev) |
2987 | { |
2988 | struct macb *bp = netdev_priv(dev); |
2989 | struct macb_queue *queue; |
2990 | unsigned long flags; |
2991 | unsigned int q; |
2992 | |
2993 | netif_tx_stop_all_queues(dev); |
2994 | |
2995 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2996 | napi_disable(n: &queue->napi_rx); |
2997 | napi_disable(n: &queue->napi_tx); |
2998 | } |
2999 | |
3000 | phylink_stop(bp->phylink); |
3001 | phylink_disconnect_phy(bp->phylink); |
3002 | |
3003 | phy_power_off(phy: bp->sgmii_phy); |
3004 | |
3005 | spin_lock_irqsave(&bp->lock, flags); |
3006 | macb_reset_hw(bp); |
3007 | netif_carrier_off(dev); |
3008 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
3009 | |
3010 | macb_free_consistent(bp); |
3011 | |
3012 | if (bp->ptp_info) |
3013 | bp->ptp_info->ptp_remove(dev); |
3014 | |
3015 | pm_runtime_put(dev: &bp->pdev->dev); |
3016 | |
3017 | return 0; |
3018 | } |
3019 | |
3020 | static int macb_change_mtu(struct net_device *dev, int new_mtu) |
3021 | { |
3022 | if (netif_running(dev)) |
3023 | return -EBUSY; |
3024 | |
3025 | dev->mtu = new_mtu; |
3026 | |
3027 | return 0; |
3028 | } |
3029 | |
3030 | static int macb_set_mac_addr(struct net_device *dev, void *addr) |
3031 | { |
3032 | int err; |
3033 | |
3034 | err = eth_mac_addr(dev, p: addr); |
3035 | if (err < 0) |
3036 | return err; |
3037 | |
3038 | macb_set_hwaddr(bp: netdev_priv(dev)); |
3039 | return 0; |
3040 | } |
3041 | |
3042 | static void gem_update_stats(struct macb *bp) |
3043 | { |
3044 | struct macb_queue *queue; |
3045 | unsigned int i, q, idx; |
3046 | unsigned long *stat; |
3047 | |
3048 | u32 *p = &bp->hw_stats.gem.tx_octets_31_0; |
3049 | |
3050 | for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { |
3051 | u32 offset = gem_statistics[i].offset; |
3052 | u64 val = bp->macb_reg_readl(bp, offset); |
3053 | |
3054 | bp->ethtool_stats[i] += val; |
3055 | *p += val; |
3056 | |
3057 | if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { |
3058 | /* Add GEM_OCTTXH, GEM_OCTRXH */ |
3059 | val = bp->macb_reg_readl(bp, offset + 4); |
3060 | bp->ethtool_stats[i] += ((u64)val) << 32; |
3061 | *(++p) += val; |
3062 | } |
3063 | } |
3064 | |
3065 | idx = GEM_STATS_LEN; |
3066 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
3067 | for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) |
3068 | bp->ethtool_stats[idx++] = *stat; |
3069 | } |
3070 | |
3071 | static struct net_device_stats *gem_get_stats(struct macb *bp) |
3072 | { |
3073 | struct gem_stats *hwstat = &bp->hw_stats.gem; |
3074 | struct net_device_stats *nstat = &bp->dev->stats; |
3075 | |
3076 | if (!netif_running(dev: bp->dev)) |
3077 | return nstat; |
3078 | |
3079 | gem_update_stats(bp); |
3080 | |
3081 | nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + |
3082 | hwstat->rx_alignment_errors + |
3083 | hwstat->rx_resource_errors + |
3084 | hwstat->rx_overruns + |
3085 | hwstat->rx_oversize_frames + |
3086 | hwstat->rx_jabbers + |
3087 | hwstat->rx_undersized_frames + |
3088 | hwstat->rx_length_field_frame_errors); |
3089 | nstat->tx_errors = (hwstat->tx_late_collisions + |
3090 | hwstat->tx_excessive_collisions + |
3091 | hwstat->tx_underrun + |
3092 | hwstat->tx_carrier_sense_errors); |
3093 | nstat->multicast = hwstat->rx_multicast_frames; |
3094 | nstat->collisions = (hwstat->tx_single_collision_frames + |
3095 | hwstat->tx_multiple_collision_frames + |
3096 | hwstat->tx_excessive_collisions); |
3097 | nstat->rx_length_errors = (hwstat->rx_oversize_frames + |
3098 | hwstat->rx_jabbers + |
3099 | hwstat->rx_undersized_frames + |
3100 | hwstat->rx_length_field_frame_errors); |
3101 | nstat->rx_over_errors = hwstat->rx_resource_errors; |
3102 | nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; |
3103 | nstat->rx_frame_errors = hwstat->rx_alignment_errors; |
3104 | nstat->rx_fifo_errors = hwstat->rx_overruns; |
3105 | nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; |
3106 | nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; |
3107 | nstat->tx_fifo_errors = hwstat->tx_underrun; |
3108 | |
3109 | return nstat; |
3110 | } |
3111 | |
3112 | static void gem_get_ethtool_stats(struct net_device *dev, |
3113 | struct ethtool_stats *stats, u64 *data) |
3114 | { |
3115 | struct macb *bp; |
3116 | |
3117 | bp = netdev_priv(dev); |
3118 | gem_update_stats(bp); |
3119 | memcpy(data, &bp->ethtool_stats, sizeof(u64) |
3120 | * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); |
3121 | } |
3122 | |
3123 | static int gem_get_sset_count(struct net_device *dev, int sset) |
3124 | { |
3125 | struct macb *bp = netdev_priv(dev); |
3126 | |
3127 | switch (sset) { |
3128 | case ETH_SS_STATS: |
3129 | return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; |
3130 | default: |
3131 | return -EOPNOTSUPP; |
3132 | } |
3133 | } |
3134 | |
3135 | static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) |
3136 | { |
3137 | char stat_string[ETH_GSTRING_LEN]; |
3138 | struct macb *bp = netdev_priv(dev); |
3139 | struct macb_queue *queue; |
3140 | unsigned int i; |
3141 | unsigned int q; |
3142 | |
3143 | switch (sset) { |
3144 | case ETH_SS_STATS: |
3145 | for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) |
3146 | memcpy(p, gem_statistics[i].stat_string, |
3147 | ETH_GSTRING_LEN); |
3148 | |
3149 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
3150 | for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { |
3151 | snprintf(buf: stat_string, ETH_GSTRING_LEN, fmt: "q%d_%s" , |
3152 | q, queue_statistics[i].stat_string); |
3153 | memcpy(p, stat_string, ETH_GSTRING_LEN); |
3154 | } |
3155 | } |
3156 | break; |
3157 | } |
3158 | } |
3159 | |
3160 | static struct net_device_stats *macb_get_stats(struct net_device *dev) |
3161 | { |
3162 | struct macb *bp = netdev_priv(dev); |
3163 | struct net_device_stats *nstat = &bp->dev->stats; |
3164 | struct macb_stats *hwstat = &bp->hw_stats.macb; |
3165 | |
3166 | if (macb_is_gem(bp)) |
3167 | return gem_get_stats(bp); |
3168 | |
3169 | /* read stats from hardware */ |
3170 | macb_update_stats(bp); |
3171 | |
3172 | /* Convert HW stats into netdevice stats */ |
3173 | nstat->rx_errors = (hwstat->rx_fcs_errors + |
3174 | hwstat->rx_align_errors + |
3175 | hwstat->rx_resource_errors + |
3176 | hwstat->rx_overruns + |
3177 | hwstat->rx_oversize_pkts + |
3178 | hwstat->rx_jabbers + |
3179 | hwstat->rx_undersize_pkts + |
3180 | hwstat->rx_length_mismatch); |
3181 | nstat->tx_errors = (hwstat->tx_late_cols + |
3182 | hwstat->tx_excessive_cols + |
3183 | hwstat->tx_underruns + |
3184 | hwstat->tx_carrier_errors + |
3185 | hwstat->sqe_test_errors); |
3186 | nstat->collisions = (hwstat->tx_single_cols + |
3187 | hwstat->tx_multiple_cols + |
3188 | hwstat->tx_excessive_cols); |
3189 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
3190 | hwstat->rx_jabbers + |
3191 | hwstat->rx_undersize_pkts + |
3192 | hwstat->rx_length_mismatch); |
3193 | nstat->rx_over_errors = hwstat->rx_resource_errors + |
3194 | hwstat->rx_overruns; |
3195 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; |
3196 | nstat->rx_frame_errors = hwstat->rx_align_errors; |
3197 | nstat->rx_fifo_errors = hwstat->rx_overruns; |
3198 | /* XXX: What does "missed" mean? */ |
3199 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; |
3200 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; |
3201 | nstat->tx_fifo_errors = hwstat->tx_underruns; |
3202 | /* Don't know about heartbeat or window errors... */ |
3203 | |
3204 | return nstat; |
3205 | } |
3206 | |
3207 | static int macb_get_regs_len(struct net_device *netdev) |
3208 | { |
3209 | return MACB_GREGS_NBR * sizeof(u32); |
3210 | } |
3211 | |
3212 | static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
3213 | void *p) |
3214 | { |
3215 | struct macb *bp = netdev_priv(dev); |
3216 | unsigned int tail, head; |
3217 | u32 *regs_buff = p; |
3218 | |
3219 | regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) |
3220 | | MACB_GREGS_VERSION; |
3221 | |
3222 | tail = macb_tx_ring_wrap(bp, index: bp->queues[0].tx_tail); |
3223 | head = macb_tx_ring_wrap(bp, index: bp->queues[0].tx_head); |
3224 | |
3225 | regs_buff[0] = macb_readl(bp, NCR); |
3226 | regs_buff[1] = macb_or_gem_readl(bp, NCFGR); |
3227 | regs_buff[2] = macb_readl(bp, NSR); |
3228 | regs_buff[3] = macb_readl(bp, TSR); |
3229 | regs_buff[4] = macb_readl(bp, RBQP); |
3230 | regs_buff[5] = macb_readl(bp, TBQP); |
3231 | regs_buff[6] = macb_readl(bp, RSR); |
3232 | regs_buff[7] = macb_readl(bp, IMR); |
3233 | |
3234 | regs_buff[8] = tail; |
3235 | regs_buff[9] = head; |
3236 | regs_buff[10] = macb_tx_dma(queue: &bp->queues[0], index: tail); |
3237 | regs_buff[11] = macb_tx_dma(queue: &bp->queues[0], index: head); |
3238 | |
3239 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
3240 | regs_buff[12] = macb_or_gem_readl(bp, USRIO); |
3241 | if (macb_is_gem(bp)) |
3242 | regs_buff[13] = gem_readl(bp, DMACFG); |
3243 | } |
3244 | |
3245 | static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
3246 | { |
3247 | struct macb *bp = netdev_priv(dev: netdev); |
3248 | |
3249 | if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { |
3250 | phylink_ethtool_get_wol(bp->phylink, wol); |
3251 | wol->supported |= WAKE_MAGIC; |
3252 | |
3253 | if (bp->wol & MACB_WOL_ENABLED) |
3254 | wol->wolopts |= WAKE_MAGIC; |
3255 | } |
3256 | } |
3257 | |
3258 | static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
3259 | { |
3260 | struct macb *bp = netdev_priv(dev: netdev); |
3261 | int ret; |
3262 | |
3263 | /* Pass the order to phylink layer */ |
3264 | ret = phylink_ethtool_set_wol(bp->phylink, wol); |
3265 | /* Don't manage WoL on MAC if handled by the PHY |
3266 | * or if there's a failure in talking to the PHY |
3267 | */ |
3268 | if (!ret || ret != -EOPNOTSUPP) |
3269 | return ret; |
3270 | |
3271 | if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || |
3272 | (wol->wolopts & ~WAKE_MAGIC)) |
3273 | return -EOPNOTSUPP; |
3274 | |
3275 | if (wol->wolopts & WAKE_MAGIC) |
3276 | bp->wol |= MACB_WOL_ENABLED; |
3277 | else |
3278 | bp->wol &= ~MACB_WOL_ENABLED; |
3279 | |
3280 | device_set_wakeup_enable(dev: &bp->pdev->dev, enable: bp->wol & MACB_WOL_ENABLED); |
3281 | |
3282 | return 0; |
3283 | } |
3284 | |
3285 | static int macb_get_link_ksettings(struct net_device *netdev, |
3286 | struct ethtool_link_ksettings *kset) |
3287 | { |
3288 | struct macb *bp = netdev_priv(dev: netdev); |
3289 | |
3290 | return phylink_ethtool_ksettings_get(bp->phylink, kset); |
3291 | } |
3292 | |
3293 | static int macb_set_link_ksettings(struct net_device *netdev, |
3294 | const struct ethtool_link_ksettings *kset) |
3295 | { |
3296 | struct macb *bp = netdev_priv(dev: netdev); |
3297 | |
3298 | return phylink_ethtool_ksettings_set(bp->phylink, kset); |
3299 | } |
3300 | |
3301 | static void macb_get_ringparam(struct net_device *netdev, |
3302 | struct ethtool_ringparam *ring, |
3303 | struct kernel_ethtool_ringparam *kernel_ring, |
3304 | struct netlink_ext_ack *extack) |
3305 | { |
3306 | struct macb *bp = netdev_priv(dev: netdev); |
3307 | |
3308 | ring->rx_max_pending = MAX_RX_RING_SIZE; |
3309 | ring->tx_max_pending = MAX_TX_RING_SIZE; |
3310 | |
3311 | ring->rx_pending = bp->rx_ring_size; |
3312 | ring->tx_pending = bp->tx_ring_size; |
3313 | } |
3314 | |
3315 | static int macb_set_ringparam(struct net_device *netdev, |
3316 | struct ethtool_ringparam *ring, |
3317 | struct kernel_ethtool_ringparam *kernel_ring, |
3318 | struct netlink_ext_ack *extack) |
3319 | { |
3320 | struct macb *bp = netdev_priv(dev: netdev); |
3321 | u32 new_rx_size, new_tx_size; |
3322 | unsigned int reset = 0; |
3323 | |
3324 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
3325 | return -EINVAL; |
3326 | |
3327 | new_rx_size = clamp_t(u32, ring->rx_pending, |
3328 | MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); |
3329 | new_rx_size = roundup_pow_of_two(new_rx_size); |
3330 | |
3331 | new_tx_size = clamp_t(u32, ring->tx_pending, |
3332 | MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); |
3333 | new_tx_size = roundup_pow_of_two(new_tx_size); |
3334 | |
3335 | if ((new_tx_size == bp->tx_ring_size) && |
3336 | (new_rx_size == bp->rx_ring_size)) { |
3337 | /* nothing to do */ |
3338 | return 0; |
3339 | } |
3340 | |
3341 | if (netif_running(dev: bp->dev)) { |
3342 | reset = 1; |
3343 | macb_close(dev: bp->dev); |
3344 | } |
3345 | |
3346 | bp->rx_ring_size = new_rx_size; |
3347 | bp->tx_ring_size = new_tx_size; |
3348 | |
3349 | if (reset) |
3350 | macb_open(dev: bp->dev); |
3351 | |
3352 | return 0; |
3353 | } |
3354 | |
3355 | #ifdef CONFIG_MACB_USE_HWSTAMP |
3356 | static unsigned int gem_get_tsu_rate(struct macb *bp) |
3357 | { |
3358 | struct clk *tsu_clk; |
3359 | unsigned int tsu_rate; |
3360 | |
3361 | tsu_clk = devm_clk_get(dev: &bp->pdev->dev, id: "tsu_clk" ); |
3362 | if (!IS_ERR(ptr: tsu_clk)) |
3363 | tsu_rate = clk_get_rate(clk: tsu_clk); |
3364 | /* try pclk instead */ |
3365 | else if (!IS_ERR(ptr: bp->pclk)) { |
3366 | tsu_clk = bp->pclk; |
3367 | tsu_rate = clk_get_rate(clk: tsu_clk); |
3368 | } else |
3369 | return -ENOTSUPP; |
3370 | return tsu_rate; |
3371 | } |
3372 | |
3373 | static s32 gem_get_ptp_max_adj(void) |
3374 | { |
3375 | return 64000000; |
3376 | } |
3377 | |
3378 | static int gem_get_ts_info(struct net_device *dev, |
3379 | struct ethtool_ts_info *info) |
3380 | { |
3381 | struct macb *bp = netdev_priv(dev); |
3382 | |
3383 | if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { |
3384 | ethtool_op_get_ts_info(dev, eti: info); |
3385 | return 0; |
3386 | } |
3387 | |
3388 | info->so_timestamping = |
3389 | SOF_TIMESTAMPING_TX_SOFTWARE | |
3390 | SOF_TIMESTAMPING_RX_SOFTWARE | |
3391 | SOF_TIMESTAMPING_SOFTWARE | |
3392 | SOF_TIMESTAMPING_TX_HARDWARE | |
3393 | SOF_TIMESTAMPING_RX_HARDWARE | |
3394 | SOF_TIMESTAMPING_RAW_HARDWARE; |
3395 | info->tx_types = |
3396 | (1 << HWTSTAMP_TX_ONESTEP_SYNC) | |
3397 | (1 << HWTSTAMP_TX_OFF) | |
3398 | (1 << HWTSTAMP_TX_ON); |
3399 | info->rx_filters = |
3400 | (1 << HWTSTAMP_FILTER_NONE) | |
3401 | (1 << HWTSTAMP_FILTER_ALL); |
3402 | |
3403 | info->phc_index = bp->ptp_clock ? ptp_clock_index(ptp: bp->ptp_clock) : -1; |
3404 | |
3405 | return 0; |
3406 | } |
3407 | |
3408 | static struct macb_ptp_info gem_ptp_info = { |
3409 | .ptp_init = gem_ptp_init, |
3410 | .ptp_remove = gem_ptp_remove, |
3411 | .get_ptp_max_adj = gem_get_ptp_max_adj, |
3412 | .get_tsu_rate = gem_get_tsu_rate, |
3413 | .get_ts_info = gem_get_ts_info, |
3414 | .get_hwtst = gem_get_hwtst, |
3415 | .set_hwtst = gem_set_hwtst, |
3416 | }; |
3417 | #endif |
3418 | |
3419 | static int macb_get_ts_info(struct net_device *netdev, |
3420 | struct ethtool_ts_info *info) |
3421 | { |
3422 | struct macb *bp = netdev_priv(dev: netdev); |
3423 | |
3424 | if (bp->ptp_info) |
3425 | return bp->ptp_info->get_ts_info(netdev, info); |
3426 | |
3427 | return ethtool_op_get_ts_info(dev: netdev, eti: info); |
3428 | } |
3429 | |
3430 | static void gem_enable_flow_filters(struct macb *bp, bool enable) |
3431 | { |
3432 | struct net_device *netdev = bp->dev; |
3433 | struct ethtool_rx_fs_item *item; |
3434 | u32 t2_scr; |
3435 | int num_t2_scr; |
3436 | |
3437 | if (!(netdev->features & NETIF_F_NTUPLE)) |
3438 | return; |
3439 | |
3440 | num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); |
3441 | |
3442 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
3443 | struct ethtool_rx_flow_spec *fs = &item->fs; |
3444 | struct ethtool_tcpip4_spec *tp4sp_m; |
3445 | |
3446 | if (fs->location >= num_t2_scr) |
3447 | continue; |
3448 | |
3449 | t2_scr = gem_readl_n(bp, SCRT2, fs->location); |
3450 | |
3451 | /* enable/disable screener regs for the flow entry */ |
3452 | t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); |
3453 | |
3454 | /* only enable fields with no masking */ |
3455 | tp4sp_m = &(fs->m_u.tcp_ip4_spec); |
3456 | |
3457 | if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) |
3458 | t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); |
3459 | else |
3460 | t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); |
3461 | |
3462 | if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) |
3463 | t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); |
3464 | else |
3465 | t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); |
3466 | |
3467 | if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) |
3468 | t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); |
3469 | else |
3470 | t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); |
3471 | |
3472 | gem_writel_n(bp, SCRT2, fs->location, t2_scr); |
3473 | } |
3474 | } |
3475 | |
3476 | static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) |
3477 | { |
3478 | struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; |
3479 | uint16_t index = fs->location; |
3480 | u32 w0, w1, t2_scr; |
3481 | bool cmp_a = false; |
3482 | bool cmp_b = false; |
3483 | bool cmp_c = false; |
3484 | |
3485 | if (!macb_is_gem(bp)) |
3486 | return; |
3487 | |
3488 | tp4sp_v = &(fs->h_u.tcp_ip4_spec); |
3489 | tp4sp_m = &(fs->m_u.tcp_ip4_spec); |
3490 | |
3491 | /* ignore field if any masking set */ |
3492 | if (tp4sp_m->ip4src == 0xFFFFFFFF) { |
3493 | /* 1st compare reg - IP source address */ |
3494 | w0 = 0; |
3495 | w1 = 0; |
3496 | w0 = tp4sp_v->ip4src; |
3497 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ |
3498 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); |
3499 | w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); |
3500 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); |
3501 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); |
3502 | cmp_a = true; |
3503 | } |
3504 | |
3505 | /* ignore field if any masking set */ |
3506 | if (tp4sp_m->ip4dst == 0xFFFFFFFF) { |
3507 | /* 2nd compare reg - IP destination address */ |
3508 | w0 = 0; |
3509 | w1 = 0; |
3510 | w0 = tp4sp_v->ip4dst; |
3511 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ |
3512 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); |
3513 | w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); |
3514 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); |
3515 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); |
3516 | cmp_b = true; |
3517 | } |
3518 | |
3519 | /* ignore both port fields if masking set in both */ |
3520 | if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { |
3521 | /* 3rd compare reg - source port, destination port */ |
3522 | w0 = 0; |
3523 | w1 = 0; |
3524 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); |
3525 | if (tp4sp_m->psrc == tp4sp_m->pdst) { |
3526 | w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); |
3527 | w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); |
3528 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ |
3529 | w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); |
3530 | } else { |
3531 | /* only one port definition */ |
3532 | w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ |
3533 | w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); |
3534 | if (tp4sp_m->psrc == 0xFFFF) { /* src port */ |
3535 | w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); |
3536 | w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); |
3537 | } else { /* dst port */ |
3538 | w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); |
3539 | w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); |
3540 | } |
3541 | } |
3542 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); |
3543 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); |
3544 | cmp_c = true; |
3545 | } |
3546 | |
3547 | t2_scr = 0; |
3548 | t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); |
3549 | t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); |
3550 | if (cmp_a) |
3551 | t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); |
3552 | if (cmp_b) |
3553 | t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); |
3554 | if (cmp_c) |
3555 | t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); |
3556 | gem_writel_n(bp, SCRT2, index, t2_scr); |
3557 | } |
3558 | |
3559 | static int gem_add_flow_filter(struct net_device *netdev, |
3560 | struct ethtool_rxnfc *cmd) |
3561 | { |
3562 | struct macb *bp = netdev_priv(dev: netdev); |
3563 | struct ethtool_rx_flow_spec *fs = &cmd->fs; |
3564 | struct ethtool_rx_fs_item *item, *newfs; |
3565 | unsigned long flags; |
3566 | int ret = -EINVAL; |
3567 | bool added = false; |
3568 | |
3569 | newfs = kmalloc(size: sizeof(*newfs), GFP_KERNEL); |
3570 | if (newfs == NULL) |
3571 | return -ENOMEM; |
3572 | memcpy(&newfs->fs, fs, sizeof(newfs->fs)); |
3573 | |
3574 | netdev_dbg(netdev, |
3575 | "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n" , |
3576 | fs->flow_type, (int)fs->ring_cookie, fs->location, |
3577 | htonl(fs->h_u.tcp_ip4_spec.ip4src), |
3578 | htonl(fs->h_u.tcp_ip4_spec.ip4dst), |
3579 | be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc), |
3580 | be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst)); |
3581 | |
3582 | spin_lock_irqsave(&bp->rx_fs_lock, flags); |
3583 | |
3584 | /* find correct place to add in list */ |
3585 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
3586 | if (item->fs.location > newfs->fs.location) { |
3587 | list_add_tail(new: &newfs->list, head: &item->list); |
3588 | added = true; |
3589 | break; |
3590 | } else if (item->fs.location == fs->location) { |
3591 | netdev_err(dev: netdev, format: "Rule not added: location %d not free!\n" , |
3592 | fs->location); |
3593 | ret = -EBUSY; |
3594 | goto err; |
3595 | } |
3596 | } |
3597 | if (!added) |
3598 | list_add_tail(new: &newfs->list, head: &bp->rx_fs_list.list); |
3599 | |
3600 | gem_prog_cmp_regs(bp, fs); |
3601 | bp->rx_fs_list.count++; |
3602 | /* enable filtering if NTUPLE on */ |
3603 | gem_enable_flow_filters(bp, enable: 1); |
3604 | |
3605 | spin_unlock_irqrestore(lock: &bp->rx_fs_lock, flags); |
3606 | return 0; |
3607 | |
3608 | err: |
3609 | spin_unlock_irqrestore(lock: &bp->rx_fs_lock, flags); |
3610 | kfree(objp: newfs); |
3611 | return ret; |
3612 | } |
3613 | |
3614 | static int gem_del_flow_filter(struct net_device *netdev, |
3615 | struct ethtool_rxnfc *cmd) |
3616 | { |
3617 | struct macb *bp = netdev_priv(dev: netdev); |
3618 | struct ethtool_rx_fs_item *item; |
3619 | struct ethtool_rx_flow_spec *fs; |
3620 | unsigned long flags; |
3621 | |
3622 | spin_lock_irqsave(&bp->rx_fs_lock, flags); |
3623 | |
3624 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
3625 | if (item->fs.location == cmd->fs.location) { |
3626 | /* disable screener regs for the flow entry */ |
3627 | fs = &(item->fs); |
3628 | netdev_dbg(netdev, |
3629 | "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n" , |
3630 | fs->flow_type, (int)fs->ring_cookie, fs->location, |
3631 | htonl(fs->h_u.tcp_ip4_spec.ip4src), |
3632 | htonl(fs->h_u.tcp_ip4_spec.ip4dst), |
3633 | be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc), |
3634 | be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst)); |
3635 | |
3636 | gem_writel_n(bp, SCRT2, fs->location, 0); |
3637 | |
3638 | list_del(entry: &item->list); |
3639 | bp->rx_fs_list.count--; |
3640 | spin_unlock_irqrestore(lock: &bp->rx_fs_lock, flags); |
3641 | kfree(objp: item); |
3642 | return 0; |
3643 | } |
3644 | } |
3645 | |
3646 | spin_unlock_irqrestore(lock: &bp->rx_fs_lock, flags); |
3647 | return -EINVAL; |
3648 | } |
3649 | |
3650 | static int gem_get_flow_entry(struct net_device *netdev, |
3651 | struct ethtool_rxnfc *cmd) |
3652 | { |
3653 | struct macb *bp = netdev_priv(dev: netdev); |
3654 | struct ethtool_rx_fs_item *item; |
3655 | |
3656 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
3657 | if (item->fs.location == cmd->fs.location) { |
3658 | memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); |
3659 | return 0; |
3660 | } |
3661 | } |
3662 | return -EINVAL; |
3663 | } |
3664 | |
3665 | static int gem_get_all_flow_entries(struct net_device *netdev, |
3666 | struct ethtool_rxnfc *cmd, u32 *rule_locs) |
3667 | { |
3668 | struct macb *bp = netdev_priv(dev: netdev); |
3669 | struct ethtool_rx_fs_item *item; |
3670 | uint32_t cnt = 0; |
3671 | |
3672 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
3673 | if (cnt == cmd->rule_cnt) |
3674 | return -EMSGSIZE; |
3675 | rule_locs[cnt] = item->fs.location; |
3676 | cnt++; |
3677 | } |
3678 | cmd->data = bp->max_tuples; |
3679 | cmd->rule_cnt = cnt; |
3680 | |
3681 | return 0; |
3682 | } |
3683 | |
3684 | static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, |
3685 | u32 *rule_locs) |
3686 | { |
3687 | struct macb *bp = netdev_priv(dev: netdev); |
3688 | int ret = 0; |
3689 | |
3690 | switch (cmd->cmd) { |
3691 | case ETHTOOL_GRXRINGS: |
3692 | cmd->data = bp->num_queues; |
3693 | break; |
3694 | case ETHTOOL_GRXCLSRLCNT: |
3695 | cmd->rule_cnt = bp->rx_fs_list.count; |
3696 | break; |
3697 | case ETHTOOL_GRXCLSRULE: |
3698 | ret = gem_get_flow_entry(netdev, cmd); |
3699 | break; |
3700 | case ETHTOOL_GRXCLSRLALL: |
3701 | ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); |
3702 | break; |
3703 | default: |
3704 | netdev_err(dev: netdev, |
3705 | format: "Command parameter %d is not supported\n" , cmd->cmd); |
3706 | ret = -EOPNOTSUPP; |
3707 | } |
3708 | |
3709 | return ret; |
3710 | } |
3711 | |
3712 | static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) |
3713 | { |
3714 | struct macb *bp = netdev_priv(dev: netdev); |
3715 | int ret; |
3716 | |
3717 | switch (cmd->cmd) { |
3718 | case ETHTOOL_SRXCLSRLINS: |
3719 | if ((cmd->fs.location >= bp->max_tuples) |
3720 | || (cmd->fs.ring_cookie >= bp->num_queues)) { |
3721 | ret = -EINVAL; |
3722 | break; |
3723 | } |
3724 | ret = gem_add_flow_filter(netdev, cmd); |
3725 | break; |
3726 | case ETHTOOL_SRXCLSRLDEL: |
3727 | ret = gem_del_flow_filter(netdev, cmd); |
3728 | break; |
3729 | default: |
3730 | netdev_err(dev: netdev, |
3731 | format: "Command parameter %d is not supported\n" , cmd->cmd); |
3732 | ret = -EOPNOTSUPP; |
3733 | } |
3734 | |
3735 | return ret; |
3736 | } |
3737 | |
3738 | static const struct ethtool_ops macb_ethtool_ops = { |
3739 | .get_regs_len = macb_get_regs_len, |
3740 | .get_regs = macb_get_regs, |
3741 | .get_link = ethtool_op_get_link, |
3742 | .get_ts_info = ethtool_op_get_ts_info, |
3743 | .get_wol = macb_get_wol, |
3744 | .set_wol = macb_set_wol, |
3745 | .get_link_ksettings = macb_get_link_ksettings, |
3746 | .set_link_ksettings = macb_set_link_ksettings, |
3747 | .get_ringparam = macb_get_ringparam, |
3748 | .set_ringparam = macb_set_ringparam, |
3749 | }; |
3750 | |
3751 | static const struct ethtool_ops gem_ethtool_ops = { |
3752 | .get_regs_len = macb_get_regs_len, |
3753 | .get_regs = macb_get_regs, |
3754 | .get_wol = macb_get_wol, |
3755 | .set_wol = macb_set_wol, |
3756 | .get_link = ethtool_op_get_link, |
3757 | .get_ts_info = macb_get_ts_info, |
3758 | .get_ethtool_stats = gem_get_ethtool_stats, |
3759 | .get_strings = gem_get_ethtool_strings, |
3760 | .get_sset_count = gem_get_sset_count, |
3761 | .get_link_ksettings = macb_get_link_ksettings, |
3762 | .set_link_ksettings = macb_set_link_ksettings, |
3763 | .get_ringparam = macb_get_ringparam, |
3764 | .set_ringparam = macb_set_ringparam, |
3765 | .get_rxnfc = gem_get_rxnfc, |
3766 | .set_rxnfc = gem_set_rxnfc, |
3767 | }; |
3768 | |
3769 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
3770 | { |
3771 | struct macb *bp = netdev_priv(dev); |
3772 | |
3773 | if (!netif_running(dev)) |
3774 | return -EINVAL; |
3775 | |
3776 | return phylink_mii_ioctl(bp->phylink, rq, cmd); |
3777 | } |
3778 | |
3779 | static int macb_hwtstamp_get(struct net_device *dev, |
3780 | struct kernel_hwtstamp_config *cfg) |
3781 | { |
3782 | struct macb *bp = netdev_priv(dev); |
3783 | |
3784 | if (!netif_running(dev)) |
3785 | return -EINVAL; |
3786 | |
3787 | if (!bp->ptp_info) |
3788 | return -EOPNOTSUPP; |
3789 | |
3790 | return bp->ptp_info->get_hwtst(dev, cfg); |
3791 | } |
3792 | |
3793 | static int macb_hwtstamp_set(struct net_device *dev, |
3794 | struct kernel_hwtstamp_config *cfg, |
3795 | struct netlink_ext_ack *extack) |
3796 | { |
3797 | struct macb *bp = netdev_priv(dev); |
3798 | |
3799 | if (!netif_running(dev)) |
3800 | return -EINVAL; |
3801 | |
3802 | if (!bp->ptp_info) |
3803 | return -EOPNOTSUPP; |
3804 | |
3805 | return bp->ptp_info->set_hwtst(dev, cfg, extack); |
3806 | } |
3807 | |
3808 | static inline void macb_set_txcsum_feature(struct macb *bp, |
3809 | netdev_features_t features) |
3810 | { |
3811 | u32 val; |
3812 | |
3813 | if (!macb_is_gem(bp)) |
3814 | return; |
3815 | |
3816 | val = gem_readl(bp, DMACFG); |
3817 | if (features & NETIF_F_HW_CSUM) |
3818 | val |= GEM_BIT(TXCOEN); |
3819 | else |
3820 | val &= ~GEM_BIT(TXCOEN); |
3821 | |
3822 | gem_writel(bp, DMACFG, val); |
3823 | } |
3824 | |
3825 | static inline void macb_set_rxcsum_feature(struct macb *bp, |
3826 | netdev_features_t features) |
3827 | { |
3828 | struct net_device *netdev = bp->dev; |
3829 | u32 val; |
3830 | |
3831 | if (!macb_is_gem(bp)) |
3832 | return; |
3833 | |
3834 | val = gem_readl(bp, NCFGR); |
3835 | if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) |
3836 | val |= GEM_BIT(RXCOEN); |
3837 | else |
3838 | val &= ~GEM_BIT(RXCOEN); |
3839 | |
3840 | gem_writel(bp, NCFGR, val); |
3841 | } |
3842 | |
3843 | static inline void macb_set_rxflow_feature(struct macb *bp, |
3844 | netdev_features_t features) |
3845 | { |
3846 | if (!macb_is_gem(bp)) |
3847 | return; |
3848 | |
3849 | gem_enable_flow_filters(bp, enable: !!(features & NETIF_F_NTUPLE)); |
3850 | } |
3851 | |
3852 | static int macb_set_features(struct net_device *netdev, |
3853 | netdev_features_t features) |
3854 | { |
3855 | struct macb *bp = netdev_priv(dev: netdev); |
3856 | netdev_features_t changed = features ^ netdev->features; |
3857 | |
3858 | /* TX checksum offload */ |
3859 | if (changed & NETIF_F_HW_CSUM) |
3860 | macb_set_txcsum_feature(bp, features); |
3861 | |
3862 | /* RX checksum offload */ |
3863 | if (changed & NETIF_F_RXCSUM) |
3864 | macb_set_rxcsum_feature(bp, features); |
3865 | |
3866 | /* RX Flow Filters */ |
3867 | if (changed & NETIF_F_NTUPLE) |
3868 | macb_set_rxflow_feature(bp, features); |
3869 | |
3870 | return 0; |
3871 | } |
3872 | |
3873 | static void macb_restore_features(struct macb *bp) |
3874 | { |
3875 | struct net_device *netdev = bp->dev; |
3876 | netdev_features_t features = netdev->features; |
3877 | struct ethtool_rx_fs_item *item; |
3878 | |
3879 | /* TX checksum offload */ |
3880 | macb_set_txcsum_feature(bp, features); |
3881 | |
3882 | /* RX checksum offload */ |
3883 | macb_set_rxcsum_feature(bp, features); |
3884 | |
3885 | /* RX Flow Filters */ |
3886 | list_for_each_entry(item, &bp->rx_fs_list.list, list) |
3887 | gem_prog_cmp_regs(bp, fs: &item->fs); |
3888 | |
3889 | macb_set_rxflow_feature(bp, features); |
3890 | } |
3891 | |
3892 | static const struct net_device_ops macb_netdev_ops = { |
3893 | .ndo_open = macb_open, |
3894 | .ndo_stop = macb_close, |
3895 | .ndo_start_xmit = macb_start_xmit, |
3896 | .ndo_set_rx_mode = macb_set_rx_mode, |
3897 | .ndo_get_stats = macb_get_stats, |
3898 | .ndo_eth_ioctl = macb_ioctl, |
3899 | .ndo_validate_addr = eth_validate_addr, |
3900 | .ndo_change_mtu = macb_change_mtu, |
3901 | .ndo_set_mac_address = macb_set_mac_addr, |
3902 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3903 | .ndo_poll_controller = macb_poll_controller, |
3904 | #endif |
3905 | .ndo_set_features = macb_set_features, |
3906 | .ndo_features_check = macb_features_check, |
3907 | .ndo_hwtstamp_set = macb_hwtstamp_set, |
3908 | .ndo_hwtstamp_get = macb_hwtstamp_get, |
3909 | }; |
3910 | |
3911 | /* Configure peripheral capabilities according to device tree |
3912 | * and integration options used |
3913 | */ |
3914 | static void macb_configure_caps(struct macb *bp, |
3915 | const struct macb_config *dt_conf) |
3916 | { |
3917 | u32 dcfg; |
3918 | |
3919 | if (dt_conf) |
3920 | bp->caps = dt_conf->caps; |
3921 | |
3922 | if (hw_is_gem(addr: bp->regs, native_io: bp->native_io)) { |
3923 | bp->caps |= MACB_CAPS_MACB_IS_GEM; |
3924 | |
3925 | dcfg = gem_readl(bp, DCFG1); |
3926 | if (GEM_BFEXT(IRQCOR, dcfg) == 0) |
3927 | bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; |
3928 | if (GEM_BFEXT(NO_PCS, dcfg) == 0) |
3929 | bp->caps |= MACB_CAPS_PCS; |
3930 | dcfg = gem_readl(bp, DCFG12); |
3931 | if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1) |
3932 | bp->caps |= MACB_CAPS_HIGH_SPEED; |
3933 | dcfg = gem_readl(bp, DCFG2); |
3934 | if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) |
3935 | bp->caps |= MACB_CAPS_FIFO_MODE; |
3936 | if (gem_has_ptp(bp)) { |
3937 | if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) |
3938 | dev_err(&bp->pdev->dev, |
3939 | "GEM doesn't support hardware ptp.\n" ); |
3940 | else { |
3941 | #ifdef CONFIG_MACB_USE_HWSTAMP |
3942 | bp->hw_dma_cap |= HW_DMA_CAP_PTP; |
3943 | bp->ptp_info = &gem_ptp_info; |
3944 | #endif |
3945 | } |
3946 | } |
3947 | } |
3948 | |
3949 | dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n" , bp->caps); |
3950 | } |
3951 | |
3952 | static void macb_probe_queues(void __iomem *mem, |
3953 | bool native_io, |
3954 | unsigned int *queue_mask, |
3955 | unsigned int *num_queues) |
3956 | { |
3957 | *queue_mask = 0x1; |
3958 | *num_queues = 1; |
3959 | |
3960 | /* is it macb or gem ? |
3961 | * |
3962 | * We need to read directly from the hardware here because |
3963 | * we are early in the probe process and don't have the |
3964 | * MACB_CAPS_MACB_IS_GEM flag positioned |
3965 | */ |
3966 | if (!hw_is_gem(addr: mem, native_io)) |
3967 | return; |
3968 | |
3969 | /* bit 0 is never set but queue 0 always exists */ |
3970 | *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; |
3971 | *num_queues = hweight32(*queue_mask); |
3972 | } |
3973 | |
3974 | static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk, |
3975 | struct clk *rx_clk, struct clk *tsu_clk) |
3976 | { |
3977 | struct clk_bulk_data clks[] = { |
3978 | { .clk = tsu_clk, }, |
3979 | { .clk = rx_clk, }, |
3980 | { .clk = pclk, }, |
3981 | { .clk = hclk, }, |
3982 | { .clk = tx_clk }, |
3983 | }; |
3984 | |
3985 | clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks); |
3986 | } |
3987 | |
3988 | static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, |
3989 | struct clk **hclk, struct clk **tx_clk, |
3990 | struct clk **rx_clk, struct clk **tsu_clk) |
3991 | { |
3992 | struct macb_platform_data *pdata; |
3993 | int err; |
3994 | |
3995 | pdata = dev_get_platdata(dev: &pdev->dev); |
3996 | if (pdata) { |
3997 | *pclk = pdata->pclk; |
3998 | *hclk = pdata->hclk; |
3999 | } else { |
4000 | *pclk = devm_clk_get(dev: &pdev->dev, id: "pclk" ); |
4001 | *hclk = devm_clk_get(dev: &pdev->dev, id: "hclk" ); |
4002 | } |
4003 | |
4004 | if (IS_ERR_OR_NULL(ptr: *pclk)) |
4005 | return dev_err_probe(dev: &pdev->dev, |
4006 | err: IS_ERR(ptr: *pclk) ? PTR_ERR(ptr: *pclk) : -ENODEV, |
4007 | fmt: "failed to get pclk\n" ); |
4008 | |
4009 | if (IS_ERR_OR_NULL(ptr: *hclk)) |
4010 | return dev_err_probe(dev: &pdev->dev, |
4011 | err: IS_ERR(ptr: *hclk) ? PTR_ERR(ptr: *hclk) : -ENODEV, |
4012 | fmt: "failed to get hclk\n" ); |
4013 | |
4014 | *tx_clk = devm_clk_get_optional(dev: &pdev->dev, id: "tx_clk" ); |
4015 | if (IS_ERR(ptr: *tx_clk)) |
4016 | return PTR_ERR(ptr: *tx_clk); |
4017 | |
4018 | *rx_clk = devm_clk_get_optional(dev: &pdev->dev, id: "rx_clk" ); |
4019 | if (IS_ERR(ptr: *rx_clk)) |
4020 | return PTR_ERR(ptr: *rx_clk); |
4021 | |
4022 | *tsu_clk = devm_clk_get_optional(dev: &pdev->dev, id: "tsu_clk" ); |
4023 | if (IS_ERR(ptr: *tsu_clk)) |
4024 | return PTR_ERR(ptr: *tsu_clk); |
4025 | |
4026 | err = clk_prepare_enable(clk: *pclk); |
4027 | if (err) { |
4028 | dev_err(&pdev->dev, "failed to enable pclk (%d)\n" , err); |
4029 | return err; |
4030 | } |
4031 | |
4032 | err = clk_prepare_enable(clk: *hclk); |
4033 | if (err) { |
4034 | dev_err(&pdev->dev, "failed to enable hclk (%d)\n" , err); |
4035 | goto err_disable_pclk; |
4036 | } |
4037 | |
4038 | err = clk_prepare_enable(clk: *tx_clk); |
4039 | if (err) { |
4040 | dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n" , err); |
4041 | goto err_disable_hclk; |
4042 | } |
4043 | |
4044 | err = clk_prepare_enable(clk: *rx_clk); |
4045 | if (err) { |
4046 | dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n" , err); |
4047 | goto err_disable_txclk; |
4048 | } |
4049 | |
4050 | err = clk_prepare_enable(clk: *tsu_clk); |
4051 | if (err) { |
4052 | dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n" , err); |
4053 | goto err_disable_rxclk; |
4054 | } |
4055 | |
4056 | return 0; |
4057 | |
4058 | err_disable_rxclk: |
4059 | clk_disable_unprepare(clk: *rx_clk); |
4060 | |
4061 | err_disable_txclk: |
4062 | clk_disable_unprepare(clk: *tx_clk); |
4063 | |
4064 | err_disable_hclk: |
4065 | clk_disable_unprepare(clk: *hclk); |
4066 | |
4067 | err_disable_pclk: |
4068 | clk_disable_unprepare(clk: *pclk); |
4069 | |
4070 | return err; |
4071 | } |
4072 | |
4073 | static int macb_init(struct platform_device *pdev) |
4074 | { |
4075 | struct net_device *dev = platform_get_drvdata(pdev); |
4076 | unsigned int hw_q, q; |
4077 | struct macb *bp = netdev_priv(dev); |
4078 | struct macb_queue *queue; |
4079 | int err; |
4080 | u32 val, reg; |
4081 | |
4082 | bp->tx_ring_size = DEFAULT_TX_RING_SIZE; |
4083 | bp->rx_ring_size = DEFAULT_RX_RING_SIZE; |
4084 | |
4085 | /* set the queue register mapping once for all: queue0 has a special |
4086 | * register mapping but we don't want to test the queue index then |
4087 | * compute the corresponding register offset at run time. |
4088 | */ |
4089 | for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { |
4090 | if (!(bp->queue_mask & (1 << hw_q))) |
4091 | continue; |
4092 | |
4093 | queue = &bp->queues[q]; |
4094 | queue->bp = bp; |
4095 | spin_lock_init(&queue->tx_ptr_lock); |
4096 | netif_napi_add(dev, napi: &queue->napi_rx, poll: macb_rx_poll); |
4097 | netif_napi_add(dev, napi: &queue->napi_tx, poll: macb_tx_poll); |
4098 | if (hw_q) { |
4099 | queue->ISR = GEM_ISR(hw_q - 1); |
4100 | queue->IER = GEM_IER(hw_q - 1); |
4101 | queue->IDR = GEM_IDR(hw_q - 1); |
4102 | queue->IMR = GEM_IMR(hw_q - 1); |
4103 | queue->TBQP = GEM_TBQP(hw_q - 1); |
4104 | queue->RBQP = GEM_RBQP(hw_q - 1); |
4105 | queue->RBQS = GEM_RBQS(hw_q - 1); |
4106 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
4107 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
4108 | queue->TBQPH = GEM_TBQPH(hw_q - 1); |
4109 | queue->RBQPH = GEM_RBQPH(hw_q - 1); |
4110 | } |
4111 | #endif |
4112 | } else { |
4113 | /* queue0 uses legacy registers */ |
4114 | queue->ISR = MACB_ISR; |
4115 | queue->IER = MACB_IER; |
4116 | queue->IDR = MACB_IDR; |
4117 | queue->IMR = MACB_IMR; |
4118 | queue->TBQP = MACB_TBQP; |
4119 | queue->RBQP = MACB_RBQP; |
4120 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
4121 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
4122 | queue->TBQPH = MACB_TBQPH; |
4123 | queue->RBQPH = MACB_RBQPH; |
4124 | } |
4125 | #endif |
4126 | } |
4127 | |
4128 | /* get irq: here we use the linux queue index, not the hardware |
4129 | * queue index. the queue irq definitions in the device tree |
4130 | * must remove the optional gaps that could exist in the |
4131 | * hardware queue mask. |
4132 | */ |
4133 | queue->irq = platform_get_irq(pdev, q); |
4134 | err = devm_request_irq(dev: &pdev->dev, irq: queue->irq, handler: macb_interrupt, |
4135 | IRQF_SHARED, devname: dev->name, dev_id: queue); |
4136 | if (err) { |
4137 | dev_err(&pdev->dev, |
4138 | "Unable to request IRQ %d (error %d)\n" , |
4139 | queue->irq, err); |
4140 | return err; |
4141 | } |
4142 | |
4143 | INIT_WORK(&queue->tx_error_task, macb_tx_error_task); |
4144 | q++; |
4145 | } |
4146 | |
4147 | dev->netdev_ops = &macb_netdev_ops; |
4148 | |
4149 | /* setup appropriated routines according to adapter type */ |
4150 | if (macb_is_gem(bp)) { |
4151 | bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; |
4152 | bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; |
4153 | bp->macbgem_ops.mog_init_rings = gem_init_rings; |
4154 | bp->macbgem_ops.mog_rx = gem_rx; |
4155 | dev->ethtool_ops = &gem_ethtool_ops; |
4156 | } else { |
4157 | bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; |
4158 | bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; |
4159 | bp->macbgem_ops.mog_init_rings = macb_init_rings; |
4160 | bp->macbgem_ops.mog_rx = macb_rx; |
4161 | dev->ethtool_ops = &macb_ethtool_ops; |
4162 | } |
4163 | |
4164 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
4165 | |
4166 | /* Set features */ |
4167 | dev->hw_features = NETIF_F_SG; |
4168 | |
4169 | /* Check LSO capability */ |
4170 | if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) |
4171 | dev->hw_features |= MACB_NETIF_LSO; |
4172 | |
4173 | /* Checksum offload is only available on gem with packet buffer */ |
4174 | if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) |
4175 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; |
4176 | if (bp->caps & MACB_CAPS_SG_DISABLED) |
4177 | dev->hw_features &= ~NETIF_F_SG; |
4178 | dev->features = dev->hw_features; |
4179 | |
4180 | /* Check RX Flow Filters support. |
4181 | * Max Rx flows set by availability of screeners & compare regs: |
4182 | * each 4-tuple define requires 1 T2 screener reg + 3 compare regs |
4183 | */ |
4184 | reg = gem_readl(bp, DCFG8); |
4185 | bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), |
4186 | GEM_BFEXT(T2SCR, reg)); |
4187 | INIT_LIST_HEAD(list: &bp->rx_fs_list.list); |
4188 | if (bp->max_tuples > 0) { |
4189 | /* also needs one ethtype match to check IPv4 */ |
4190 | if (GEM_BFEXT(SCR2ETH, reg) > 0) { |
4191 | /* program this reg now */ |
4192 | reg = 0; |
4193 | reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); |
4194 | gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); |
4195 | /* Filtering is supported in hw but don't enable it in kernel now */ |
4196 | dev->hw_features |= NETIF_F_NTUPLE; |
4197 | /* init Rx flow definitions */ |
4198 | bp->rx_fs_list.count = 0; |
4199 | spin_lock_init(&bp->rx_fs_lock); |
4200 | } else |
4201 | bp->max_tuples = 0; |
4202 | } |
4203 | |
4204 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { |
4205 | val = 0; |
4206 | if (phy_interface_mode_is_rgmii(mode: bp->phy_interface)) |
4207 | val = bp->usrio->rgmii; |
4208 | else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && |
4209 | (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) |
4210 | val = bp->usrio->rmii; |
4211 | else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) |
4212 | val = bp->usrio->mii; |
4213 | |
4214 | if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) |
4215 | val |= bp->usrio->refclk; |
4216 | |
4217 | macb_or_gem_writel(bp, USRIO, val); |
4218 | } |
4219 | |
4220 | /* Set MII management clock divider */ |
4221 | val = macb_mdc_clk_div(bp); |
4222 | val |= macb_dbw(bp); |
4223 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) |
4224 | val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); |
4225 | macb_writel(bp, NCFGR, val); |
4226 | |
4227 | return 0; |
4228 | } |
4229 | |
4230 | static const struct macb_usrio_config macb_default_usrio = { |
4231 | .mii = MACB_BIT(MII), |
4232 | .rmii = MACB_BIT(RMII), |
4233 | .rgmii = GEM_BIT(RGMII), |
4234 | .refclk = MACB_BIT(CLKEN), |
4235 | }; |
4236 | |
4237 | #if defined(CONFIG_OF) |
4238 | /* 1518 rounded up */ |
4239 | #define AT91ETHER_MAX_RBUFF_SZ 0x600 |
4240 | /* max number of receive buffers */ |
4241 | #define AT91ETHER_MAX_RX_DESCR 9 |
4242 | |
4243 | static struct sifive_fu540_macb_mgmt *mgmt; |
4244 | |
4245 | static int at91ether_alloc_coherent(struct macb *lp) |
4246 | { |
4247 | struct macb_queue *q = &lp->queues[0]; |
4248 | |
4249 | q->rx_ring = dma_alloc_coherent(dev: &lp->pdev->dev, |
4250 | size: (AT91ETHER_MAX_RX_DESCR * |
4251 | macb_dma_desc_get_size(bp: lp)), |
4252 | dma_handle: &q->rx_ring_dma, GFP_KERNEL); |
4253 | if (!q->rx_ring) |
4254 | return -ENOMEM; |
4255 | |
4256 | q->rx_buffers = dma_alloc_coherent(dev: &lp->pdev->dev, |
4257 | AT91ETHER_MAX_RX_DESCR * |
4258 | AT91ETHER_MAX_RBUFF_SZ, |
4259 | dma_handle: &q->rx_buffers_dma, GFP_KERNEL); |
4260 | if (!q->rx_buffers) { |
4261 | dma_free_coherent(dev: &lp->pdev->dev, |
4262 | AT91ETHER_MAX_RX_DESCR * |
4263 | macb_dma_desc_get_size(bp: lp), |
4264 | cpu_addr: q->rx_ring, dma_handle: q->rx_ring_dma); |
4265 | q->rx_ring = NULL; |
4266 | return -ENOMEM; |
4267 | } |
4268 | |
4269 | return 0; |
4270 | } |
4271 | |
4272 | static void at91ether_free_coherent(struct macb *lp) |
4273 | { |
4274 | struct macb_queue *q = &lp->queues[0]; |
4275 | |
4276 | if (q->rx_ring) { |
4277 | dma_free_coherent(dev: &lp->pdev->dev, |
4278 | AT91ETHER_MAX_RX_DESCR * |
4279 | macb_dma_desc_get_size(bp: lp), |
4280 | cpu_addr: q->rx_ring, dma_handle: q->rx_ring_dma); |
4281 | q->rx_ring = NULL; |
4282 | } |
4283 | |
4284 | if (q->rx_buffers) { |
4285 | dma_free_coherent(dev: &lp->pdev->dev, |
4286 | AT91ETHER_MAX_RX_DESCR * |
4287 | AT91ETHER_MAX_RBUFF_SZ, |
4288 | cpu_addr: q->rx_buffers, dma_handle: q->rx_buffers_dma); |
4289 | q->rx_buffers = NULL; |
4290 | } |
4291 | } |
4292 | |
4293 | /* Initialize and start the Receiver and Transmit subsystems */ |
4294 | static int at91ether_start(struct macb *lp) |
4295 | { |
4296 | struct macb_queue *q = &lp->queues[0]; |
4297 | struct macb_dma_desc *desc; |
4298 | dma_addr_t addr; |
4299 | u32 ctl; |
4300 | int i, ret; |
4301 | |
4302 | ret = at91ether_alloc_coherent(lp); |
4303 | if (ret) |
4304 | return ret; |
4305 | |
4306 | addr = q->rx_buffers_dma; |
4307 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { |
4308 | desc = macb_rx_desc(queue: q, index: i); |
4309 | macb_set_addr(bp: lp, desc, addr); |
4310 | desc->ctrl = 0; |
4311 | addr += AT91ETHER_MAX_RBUFF_SZ; |
4312 | } |
4313 | |
4314 | /* Set the Wrap bit on the last descriptor */ |
4315 | desc->addr |= MACB_BIT(RX_WRAP); |
4316 | |
4317 | /* Reset buffer index */ |
4318 | q->rx_tail = 0; |
4319 | |
4320 | /* Program address of descriptor list in Rx Buffer Queue register */ |
4321 | macb_writel(lp, RBQP, q->rx_ring_dma); |
4322 | |
4323 | /* Enable Receive and Transmit */ |
4324 | ctl = macb_readl(lp, NCR); |
4325 | macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); |
4326 | |
4327 | /* Enable MAC interrupts */ |
4328 | macb_writel(lp, IER, MACB_BIT(RCOMP) | |
4329 | MACB_BIT(RXUBR) | |
4330 | MACB_BIT(ISR_TUND) | |
4331 | MACB_BIT(ISR_RLE) | |
4332 | MACB_BIT(TCOMP) | |
4333 | MACB_BIT(ISR_ROVR) | |
4334 | MACB_BIT(HRESP)); |
4335 | |
4336 | return 0; |
4337 | } |
4338 | |
4339 | static void at91ether_stop(struct macb *lp) |
4340 | { |
4341 | u32 ctl; |
4342 | |
4343 | /* Disable MAC interrupts */ |
4344 | macb_writel(lp, IDR, MACB_BIT(RCOMP) | |
4345 | MACB_BIT(RXUBR) | |
4346 | MACB_BIT(ISR_TUND) | |
4347 | MACB_BIT(ISR_RLE) | |
4348 | MACB_BIT(TCOMP) | |
4349 | MACB_BIT(ISR_ROVR) | |
4350 | MACB_BIT(HRESP)); |
4351 | |
4352 | /* Disable Receiver and Transmitter */ |
4353 | ctl = macb_readl(lp, NCR); |
4354 | macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); |
4355 | |
4356 | /* Free resources. */ |
4357 | at91ether_free_coherent(lp); |
4358 | } |
4359 | |
4360 | /* Open the ethernet interface */ |
4361 | static int at91ether_open(struct net_device *dev) |
4362 | { |
4363 | struct macb *lp = netdev_priv(dev); |
4364 | u32 ctl; |
4365 | int ret; |
4366 | |
4367 | ret = pm_runtime_resume_and_get(dev: &lp->pdev->dev); |
4368 | if (ret < 0) |
4369 | return ret; |
4370 | |
4371 | /* Clear internal statistics */ |
4372 | ctl = macb_readl(lp, NCR); |
4373 | macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); |
4374 | |
4375 | macb_set_hwaddr(bp: lp); |
4376 | |
4377 | ret = at91ether_start(lp); |
4378 | if (ret) |
4379 | goto pm_exit; |
4380 | |
4381 | ret = macb_phylink_connect(bp: lp); |
4382 | if (ret) |
4383 | goto stop; |
4384 | |
4385 | netif_start_queue(dev); |
4386 | |
4387 | return 0; |
4388 | |
4389 | stop: |
4390 | at91ether_stop(lp); |
4391 | pm_exit: |
4392 | pm_runtime_put_sync(dev: &lp->pdev->dev); |
4393 | return ret; |
4394 | } |
4395 | |
4396 | /* Close the interface */ |
4397 | static int at91ether_close(struct net_device *dev) |
4398 | { |
4399 | struct macb *lp = netdev_priv(dev); |
4400 | |
4401 | netif_stop_queue(dev); |
4402 | |
4403 | phylink_stop(lp->phylink); |
4404 | phylink_disconnect_phy(lp->phylink); |
4405 | |
4406 | at91ether_stop(lp); |
4407 | |
4408 | return pm_runtime_put(dev: &lp->pdev->dev); |
4409 | } |
4410 | |
4411 | /* Transmit packet */ |
4412 | static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, |
4413 | struct net_device *dev) |
4414 | { |
4415 | struct macb *lp = netdev_priv(dev); |
4416 | |
4417 | if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { |
4418 | int desc = 0; |
4419 | |
4420 | netif_stop_queue(dev); |
4421 | |
4422 | /* Store packet information (to free when Tx completed) */ |
4423 | lp->rm9200_txq[desc].skb = skb; |
4424 | lp->rm9200_txq[desc].size = skb->len; |
4425 | lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, |
4426 | skb->len, DMA_TO_DEVICE); |
4427 | if (dma_mapping_error(dev: &lp->pdev->dev, dma_addr: lp->rm9200_txq[desc].mapping)) { |
4428 | dev_kfree_skb_any(skb); |
4429 | dev->stats.tx_dropped++; |
4430 | netdev_err(dev, format: "%s: DMA mapping error\n" , __func__); |
4431 | return NETDEV_TX_OK; |
4432 | } |
4433 | |
4434 | /* Set address of the data in the Transmit Address register */ |
4435 | macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); |
4436 | /* Set length of the packet in the Transmit Control register */ |
4437 | macb_writel(lp, TCR, skb->len); |
4438 | |
4439 | } else { |
4440 | netdev_err(dev, format: "%s called, but device is busy!\n" , __func__); |
4441 | return NETDEV_TX_BUSY; |
4442 | } |
4443 | |
4444 | return NETDEV_TX_OK; |
4445 | } |
4446 | |
4447 | /* Extract received frame from buffer descriptors and sent to upper layers. |
4448 | * (Called from interrupt context) |
4449 | */ |
4450 | static void at91ether_rx(struct net_device *dev) |
4451 | { |
4452 | struct macb *lp = netdev_priv(dev); |
4453 | struct macb_queue *q = &lp->queues[0]; |
4454 | struct macb_dma_desc *desc; |
4455 | unsigned char *p_recv; |
4456 | struct sk_buff *skb; |
4457 | unsigned int pktlen; |
4458 | |
4459 | desc = macb_rx_desc(queue: q, index: q->rx_tail); |
4460 | while (desc->addr & MACB_BIT(RX_USED)) { |
4461 | p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; |
4462 | pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); |
4463 | skb = netdev_alloc_skb(dev, length: pktlen + 2); |
4464 | if (skb) { |
4465 | skb_reserve(skb, len: 2); |
4466 | skb_put_data(skb, data: p_recv, len: pktlen); |
4467 | |
4468 | skb->protocol = eth_type_trans(skb, dev); |
4469 | dev->stats.rx_packets++; |
4470 | dev->stats.rx_bytes += pktlen; |
4471 | netif_rx(skb); |
4472 | } else { |
4473 | dev->stats.rx_dropped++; |
4474 | } |
4475 | |
4476 | if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) |
4477 | dev->stats.multicast++; |
4478 | |
4479 | /* reset ownership bit */ |
4480 | desc->addr &= ~MACB_BIT(RX_USED); |
4481 | |
4482 | /* wrap after last buffer */ |
4483 | if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) |
4484 | q->rx_tail = 0; |
4485 | else |
4486 | q->rx_tail++; |
4487 | |
4488 | desc = macb_rx_desc(queue: q, index: q->rx_tail); |
4489 | } |
4490 | } |
4491 | |
4492 | /* MAC interrupt handler */ |
4493 | static irqreturn_t at91ether_interrupt(int irq, void *dev_id) |
4494 | { |
4495 | struct net_device *dev = dev_id; |
4496 | struct macb *lp = netdev_priv(dev); |
4497 | u32 intstatus, ctl; |
4498 | unsigned int desc; |
4499 | |
4500 | /* MAC Interrupt Status register indicates what interrupts are pending. |
4501 | * It is automatically cleared once read. |
4502 | */ |
4503 | intstatus = macb_readl(lp, ISR); |
4504 | |
4505 | /* Receive complete */ |
4506 | if (intstatus & MACB_BIT(RCOMP)) |
4507 | at91ether_rx(dev); |
4508 | |
4509 | /* Transmit complete */ |
4510 | if (intstatus & MACB_BIT(TCOMP)) { |
4511 | /* The TCOM bit is set even if the transmission failed */ |
4512 | if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) |
4513 | dev->stats.tx_errors++; |
4514 | |
4515 | desc = 0; |
4516 | if (lp->rm9200_txq[desc].skb) { |
4517 | dev_consume_skb_irq(skb: lp->rm9200_txq[desc].skb); |
4518 | lp->rm9200_txq[desc].skb = NULL; |
4519 | dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, |
4520 | lp->rm9200_txq[desc].size, DMA_TO_DEVICE); |
4521 | dev->stats.tx_packets++; |
4522 | dev->stats.tx_bytes += lp->rm9200_txq[desc].size; |
4523 | } |
4524 | netif_wake_queue(dev); |
4525 | } |
4526 | |
4527 | /* Work-around for EMAC Errata section 41.3.1 */ |
4528 | if (intstatus & MACB_BIT(RXUBR)) { |
4529 | ctl = macb_readl(lp, NCR); |
4530 | macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); |
4531 | wmb(); |
4532 | macb_writel(lp, NCR, ctl | MACB_BIT(RE)); |
4533 | } |
4534 | |
4535 | if (intstatus & MACB_BIT(ISR_ROVR)) |
4536 | netdev_err(dev, format: "ROVR error\n" ); |
4537 | |
4538 | return IRQ_HANDLED; |
4539 | } |
4540 | |
4541 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4542 | static void at91ether_poll_controller(struct net_device *dev) |
4543 | { |
4544 | unsigned long flags; |
4545 | |
4546 | local_irq_save(flags); |
4547 | at91ether_interrupt(irq: dev->irq, dev_id: dev); |
4548 | local_irq_restore(flags); |
4549 | } |
4550 | #endif |
4551 | |
4552 | static const struct net_device_ops at91ether_netdev_ops = { |
4553 | .ndo_open = at91ether_open, |
4554 | .ndo_stop = at91ether_close, |
4555 | .ndo_start_xmit = at91ether_start_xmit, |
4556 | .ndo_get_stats = macb_get_stats, |
4557 | .ndo_set_rx_mode = macb_set_rx_mode, |
4558 | .ndo_set_mac_address = eth_mac_addr, |
4559 | .ndo_eth_ioctl = macb_ioctl, |
4560 | .ndo_validate_addr = eth_validate_addr, |
4561 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4562 | .ndo_poll_controller = at91ether_poll_controller, |
4563 | #endif |
4564 | .ndo_hwtstamp_set = macb_hwtstamp_set, |
4565 | .ndo_hwtstamp_get = macb_hwtstamp_get, |
4566 | }; |
4567 | |
4568 | static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, |
4569 | struct clk **hclk, struct clk **tx_clk, |
4570 | struct clk **rx_clk, struct clk **tsu_clk) |
4571 | { |
4572 | int err; |
4573 | |
4574 | *hclk = NULL; |
4575 | *tx_clk = NULL; |
4576 | *rx_clk = NULL; |
4577 | *tsu_clk = NULL; |
4578 | |
4579 | *pclk = devm_clk_get(dev: &pdev->dev, id: "ether_clk" ); |
4580 | if (IS_ERR(ptr: *pclk)) |
4581 | return PTR_ERR(ptr: *pclk); |
4582 | |
4583 | err = clk_prepare_enable(clk: *pclk); |
4584 | if (err) { |
4585 | dev_err(&pdev->dev, "failed to enable pclk (%d)\n" , err); |
4586 | return err; |
4587 | } |
4588 | |
4589 | return 0; |
4590 | } |
4591 | |
4592 | static int at91ether_init(struct platform_device *pdev) |
4593 | { |
4594 | struct net_device *dev = platform_get_drvdata(pdev); |
4595 | struct macb *bp = netdev_priv(dev); |
4596 | int err; |
4597 | |
4598 | bp->queues[0].bp = bp; |
4599 | |
4600 | dev->netdev_ops = &at91ether_netdev_ops; |
4601 | dev->ethtool_ops = &macb_ethtool_ops; |
4602 | |
4603 | err = devm_request_irq(dev: &pdev->dev, irq: dev->irq, handler: at91ether_interrupt, |
4604 | irqflags: 0, devname: dev->name, dev_id: dev); |
4605 | if (err) |
4606 | return err; |
4607 | |
4608 | macb_writel(bp, NCR, 0); |
4609 | |
4610 | macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); |
4611 | |
4612 | return 0; |
4613 | } |
4614 | |
4615 | static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, |
4616 | unsigned long parent_rate) |
4617 | { |
4618 | return mgmt->rate; |
4619 | } |
4620 | |
4621 | static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, |
4622 | unsigned long *parent_rate) |
4623 | { |
4624 | if (WARN_ON(rate < 2500000)) |
4625 | return 2500000; |
4626 | else if (rate == 2500000) |
4627 | return 2500000; |
4628 | else if (WARN_ON(rate < 13750000)) |
4629 | return 2500000; |
4630 | else if (WARN_ON(rate < 25000000)) |
4631 | return 25000000; |
4632 | else if (rate == 25000000) |
4633 | return 25000000; |
4634 | else if (WARN_ON(rate < 75000000)) |
4635 | return 25000000; |
4636 | else if (WARN_ON(rate < 125000000)) |
4637 | return 125000000; |
4638 | else if (rate == 125000000) |
4639 | return 125000000; |
4640 | |
4641 | WARN_ON(rate > 125000000); |
4642 | |
4643 | return 125000000; |
4644 | } |
4645 | |
4646 | static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, |
4647 | unsigned long parent_rate) |
4648 | { |
4649 | rate = fu540_macb_tx_round_rate(hw, rate, parent_rate: &parent_rate); |
4650 | if (rate != 125000000) |
4651 | iowrite32(1, mgmt->reg); |
4652 | else |
4653 | iowrite32(0, mgmt->reg); |
4654 | mgmt->rate = rate; |
4655 | |
4656 | return 0; |
4657 | } |
4658 | |
4659 | static const struct clk_ops fu540_c000_ops = { |
4660 | .recalc_rate = fu540_macb_tx_recalc_rate, |
4661 | .round_rate = fu540_macb_tx_round_rate, |
4662 | .set_rate = fu540_macb_tx_set_rate, |
4663 | }; |
4664 | |
4665 | static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, |
4666 | struct clk **hclk, struct clk **tx_clk, |
4667 | struct clk **rx_clk, struct clk **tsu_clk) |
4668 | { |
4669 | struct clk_init_data init; |
4670 | int err = 0; |
4671 | |
4672 | err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); |
4673 | if (err) |
4674 | return err; |
4675 | |
4676 | mgmt = devm_kzalloc(dev: &pdev->dev, size: sizeof(*mgmt), GFP_KERNEL); |
4677 | if (!mgmt) { |
4678 | err = -ENOMEM; |
4679 | goto err_disable_clks; |
4680 | } |
4681 | |
4682 | init.name = "sifive-gemgxl-mgmt" ; |
4683 | init.ops = &fu540_c000_ops; |
4684 | init.flags = 0; |
4685 | init.num_parents = 0; |
4686 | |
4687 | mgmt->rate = 0; |
4688 | mgmt->hw.init = &init; |
4689 | |
4690 | *tx_clk = devm_clk_register(dev: &pdev->dev, hw: &mgmt->hw); |
4691 | if (IS_ERR(ptr: *tx_clk)) { |
4692 | err = PTR_ERR(ptr: *tx_clk); |
4693 | goto err_disable_clks; |
4694 | } |
4695 | |
4696 | err = clk_prepare_enable(clk: *tx_clk); |
4697 | if (err) { |
4698 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n" , err); |
4699 | *tx_clk = NULL; |
4700 | goto err_disable_clks; |
4701 | } else { |
4702 | dev_info(&pdev->dev, "Registered clk switch '%s'\n" , init.name); |
4703 | } |
4704 | |
4705 | return 0; |
4706 | |
4707 | err_disable_clks: |
4708 | macb_clks_disable(pclk: *pclk, hclk: *hclk, tx_clk: *tx_clk, rx_clk: *rx_clk, tsu_clk: *tsu_clk); |
4709 | |
4710 | return err; |
4711 | } |
4712 | |
4713 | static int fu540_c000_init(struct platform_device *pdev) |
4714 | { |
4715 | mgmt->reg = devm_platform_ioremap_resource(pdev, index: 1); |
4716 | if (IS_ERR(ptr: mgmt->reg)) |
4717 | return PTR_ERR(ptr: mgmt->reg); |
4718 | |
4719 | return macb_init(pdev); |
4720 | } |
4721 | |
4722 | static int init_reset_optional(struct platform_device *pdev) |
4723 | { |
4724 | struct net_device *dev = platform_get_drvdata(pdev); |
4725 | struct macb *bp = netdev_priv(dev); |
4726 | int ret; |
4727 | |
4728 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
4729 | /* Ensure PHY device used in SGMII mode is ready */ |
4730 | bp->sgmii_phy = devm_phy_optional_get(dev: &pdev->dev, NULL); |
4731 | |
4732 | if (IS_ERR(ptr: bp->sgmii_phy)) |
4733 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: bp->sgmii_phy), |
4734 | fmt: "failed to get SGMII PHY\n" ); |
4735 | |
4736 | ret = phy_init(phy: bp->sgmii_phy); |
4737 | if (ret) |
4738 | return dev_err_probe(dev: &pdev->dev, err: ret, |
4739 | fmt: "failed to init SGMII PHY\n" ); |
4740 | |
4741 | ret = zynqmp_pm_is_function_supported(api_id: PM_IOCTL, id: IOCTL_SET_GEM_CONFIG); |
4742 | if (!ret) { |
4743 | u32 pm_info[2]; |
4744 | |
4745 | ret = of_property_read_u32_array(np: pdev->dev.of_node, propname: "power-domains" , |
4746 | out_values: pm_info, ARRAY_SIZE(pm_info)); |
4747 | if (ret) { |
4748 | dev_err(&pdev->dev, "Failed to read power management information\n" ); |
4749 | goto err_out_phy_exit; |
4750 | } |
4751 | ret = zynqmp_pm_set_gem_config(node: pm_info[1], config: GEM_CONFIG_FIXED, value: 0); |
4752 | if (ret) |
4753 | goto err_out_phy_exit; |
4754 | |
4755 | ret = zynqmp_pm_set_gem_config(node: pm_info[1], config: GEM_CONFIG_SGMII_MODE, value: 1); |
4756 | if (ret) |
4757 | goto err_out_phy_exit; |
4758 | } |
4759 | |
4760 | } |
4761 | |
4762 | /* Fully reset controller at hardware level if mapped in device tree */ |
4763 | ret = device_reset_optional(dev: &pdev->dev); |
4764 | if (ret) { |
4765 | phy_exit(phy: bp->sgmii_phy); |
4766 | return dev_err_probe(dev: &pdev->dev, err: ret, fmt: "failed to reset controller" ); |
4767 | } |
4768 | |
4769 | ret = macb_init(pdev); |
4770 | |
4771 | err_out_phy_exit: |
4772 | if (ret) |
4773 | phy_exit(phy: bp->sgmii_phy); |
4774 | |
4775 | return ret; |
4776 | } |
4777 | |
4778 | static const struct macb_usrio_config sama7g5_usrio = { |
4779 | .mii = 0, |
4780 | .rmii = 1, |
4781 | .rgmii = 2, |
4782 | .refclk = BIT(2), |
4783 | .hdfctlen = BIT(6), |
4784 | }; |
4785 | |
4786 | static const struct macb_config fu540_c000_config = { |
4787 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | |
4788 | MACB_CAPS_GEM_HAS_PTP, |
4789 | .dma_burst_length = 16, |
4790 | .clk_init = fu540_c000_clk_init, |
4791 | .init = fu540_c000_init, |
4792 | .jumbo_max_len = 10240, |
4793 | .usrio = &macb_default_usrio, |
4794 | }; |
4795 | |
4796 | static const struct macb_config at91sam9260_config = { |
4797 | .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
4798 | .clk_init = macb_clk_init, |
4799 | .init = macb_init, |
4800 | .usrio = &macb_default_usrio, |
4801 | }; |
4802 | |
4803 | static const struct macb_config sama5d3macb_config = { |
4804 | .caps = MACB_CAPS_SG_DISABLED | |
4805 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
4806 | .clk_init = macb_clk_init, |
4807 | .init = macb_init, |
4808 | .usrio = &macb_default_usrio, |
4809 | }; |
4810 | |
4811 | static const struct macb_config pc302gem_config = { |
4812 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
4813 | .dma_burst_length = 16, |
4814 | .clk_init = macb_clk_init, |
4815 | .init = macb_init, |
4816 | .usrio = &macb_default_usrio, |
4817 | }; |
4818 | |
4819 | static const struct macb_config sama5d2_config = { |
4820 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
4821 | .dma_burst_length = 16, |
4822 | .clk_init = macb_clk_init, |
4823 | .init = macb_init, |
4824 | .usrio = &macb_default_usrio, |
4825 | }; |
4826 | |
4827 | static const struct macb_config sama5d29_config = { |
4828 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP, |
4829 | .dma_burst_length = 16, |
4830 | .clk_init = macb_clk_init, |
4831 | .init = macb_init, |
4832 | .usrio = &macb_default_usrio, |
4833 | }; |
4834 | |
4835 | static const struct macb_config sama5d3_config = { |
4836 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
4837 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, |
4838 | .dma_burst_length = 16, |
4839 | .clk_init = macb_clk_init, |
4840 | .init = macb_init, |
4841 | .jumbo_max_len = 10240, |
4842 | .usrio = &macb_default_usrio, |
4843 | }; |
4844 | |
4845 | static const struct macb_config sama5d4_config = { |
4846 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
4847 | .dma_burst_length = 4, |
4848 | .clk_init = macb_clk_init, |
4849 | .init = macb_init, |
4850 | .usrio = &macb_default_usrio, |
4851 | }; |
4852 | |
4853 | static const struct macb_config emac_config = { |
4854 | .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, |
4855 | .clk_init = at91ether_clk_init, |
4856 | .init = at91ether_init, |
4857 | .usrio = &macb_default_usrio, |
4858 | }; |
4859 | |
4860 | static const struct macb_config np4_config = { |
4861 | .caps = MACB_CAPS_USRIO_DISABLED, |
4862 | .clk_init = macb_clk_init, |
4863 | .init = macb_init, |
4864 | .usrio = &macb_default_usrio, |
4865 | }; |
4866 | |
4867 | static const struct macb_config zynqmp_config = { |
4868 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
4869 | MACB_CAPS_JUMBO | |
4870 | MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, |
4871 | .dma_burst_length = 16, |
4872 | .clk_init = macb_clk_init, |
4873 | .init = init_reset_optional, |
4874 | .jumbo_max_len = 10240, |
4875 | .usrio = &macb_default_usrio, |
4876 | }; |
4877 | |
4878 | static const struct macb_config zynq_config = { |
4879 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | |
4880 | MACB_CAPS_NEEDS_RSTONUBR, |
4881 | .dma_burst_length = 16, |
4882 | .clk_init = macb_clk_init, |
4883 | .init = macb_init, |
4884 | .usrio = &macb_default_usrio, |
4885 | }; |
4886 | |
4887 | static const struct macb_config mpfs_config = { |
4888 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
4889 | MACB_CAPS_JUMBO | |
4890 | MACB_CAPS_GEM_HAS_PTP, |
4891 | .dma_burst_length = 16, |
4892 | .clk_init = macb_clk_init, |
4893 | .init = init_reset_optional, |
4894 | .usrio = &macb_default_usrio, |
4895 | .max_tx_length = 4040, /* Cadence Erratum 1686 */ |
4896 | .jumbo_max_len = 4040, |
4897 | }; |
4898 | |
4899 | static const struct macb_config sama7g5_gem_config = { |
4900 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG | |
4901 | MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP, |
4902 | .dma_burst_length = 16, |
4903 | .clk_init = macb_clk_init, |
4904 | .init = macb_init, |
4905 | .usrio = &sama7g5_usrio, |
4906 | }; |
4907 | |
4908 | static const struct macb_config sama7g5_emac_config = { |
4909 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | |
4910 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII | |
4911 | MACB_CAPS_GEM_HAS_PTP, |
4912 | .dma_burst_length = 16, |
4913 | .clk_init = macb_clk_init, |
4914 | .init = macb_init, |
4915 | .usrio = &sama7g5_usrio, |
4916 | }; |
4917 | |
4918 | static const struct macb_config versal_config = { |
4919 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | |
4920 | MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK, |
4921 | .dma_burst_length = 16, |
4922 | .clk_init = macb_clk_init, |
4923 | .init = init_reset_optional, |
4924 | .jumbo_max_len = 10240, |
4925 | .usrio = &macb_default_usrio, |
4926 | }; |
4927 | |
4928 | static const struct of_device_id macb_dt_ids[] = { |
4929 | { .compatible = "cdns,at91sam9260-macb" , .data = &at91sam9260_config }, |
4930 | { .compatible = "cdns,macb" }, |
4931 | { .compatible = "cdns,np4-macb" , .data = &np4_config }, |
4932 | { .compatible = "cdns,pc302-gem" , .data = &pc302gem_config }, |
4933 | { .compatible = "cdns,gem" , .data = &pc302gem_config }, |
4934 | { .compatible = "cdns,sam9x60-macb" , .data = &at91sam9260_config }, |
4935 | { .compatible = "atmel,sama5d2-gem" , .data = &sama5d2_config }, |
4936 | { .compatible = "atmel,sama5d29-gem" , .data = &sama5d29_config }, |
4937 | { .compatible = "atmel,sama5d3-gem" , .data = &sama5d3_config }, |
4938 | { .compatible = "atmel,sama5d3-macb" , .data = &sama5d3macb_config }, |
4939 | { .compatible = "atmel,sama5d4-gem" , .data = &sama5d4_config }, |
4940 | { .compatible = "cdns,at91rm9200-emac" , .data = &emac_config }, |
4941 | { .compatible = "cdns,emac" , .data = &emac_config }, |
4942 | { .compatible = "cdns,zynqmp-gem" , .data = &zynqmp_config}, /* deprecated */ |
4943 | { .compatible = "cdns,zynq-gem" , .data = &zynq_config }, /* deprecated */ |
4944 | { .compatible = "sifive,fu540-c000-gem" , .data = &fu540_c000_config }, |
4945 | { .compatible = "microchip,mpfs-macb" , .data = &mpfs_config }, |
4946 | { .compatible = "microchip,sama7g5-gem" , .data = &sama7g5_gem_config }, |
4947 | { .compatible = "microchip,sama7g5-emac" , .data = &sama7g5_emac_config }, |
4948 | { .compatible = "xlnx,zynqmp-gem" , .data = &zynqmp_config}, |
4949 | { .compatible = "xlnx,zynq-gem" , .data = &zynq_config }, |
4950 | { .compatible = "xlnx,versal-gem" , .data = &versal_config}, |
4951 | { /* sentinel */ } |
4952 | }; |
4953 | MODULE_DEVICE_TABLE(of, macb_dt_ids); |
4954 | #endif /* CONFIG_OF */ |
4955 | |
4956 | static const struct macb_config default_gem_config = { |
4957 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
4958 | MACB_CAPS_JUMBO | |
4959 | MACB_CAPS_GEM_HAS_PTP, |
4960 | .dma_burst_length = 16, |
4961 | .clk_init = macb_clk_init, |
4962 | .init = macb_init, |
4963 | .usrio = &macb_default_usrio, |
4964 | .jumbo_max_len = 10240, |
4965 | }; |
4966 | |
4967 | static int macb_probe(struct platform_device *pdev) |
4968 | { |
4969 | const struct macb_config *macb_config = &default_gem_config; |
4970 | int (*clk_init)(struct platform_device *, struct clk **, |
4971 | struct clk **, struct clk **, struct clk **, |
4972 | struct clk **) = macb_config->clk_init; |
4973 | int (*init)(struct platform_device *) = macb_config->init; |
4974 | struct device_node *np = pdev->dev.of_node; |
4975 | struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; |
4976 | struct clk *tsu_clk = NULL; |
4977 | unsigned int queue_mask, num_queues; |
4978 | bool native_io; |
4979 | phy_interface_t interface; |
4980 | struct net_device *dev; |
4981 | struct resource *regs; |
4982 | u32 wtrmrk_rst_val; |
4983 | void __iomem *mem; |
4984 | struct macb *bp; |
4985 | int err, val; |
4986 | |
4987 | mem = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: ®s); |
4988 | if (IS_ERR(ptr: mem)) |
4989 | return PTR_ERR(ptr: mem); |
4990 | |
4991 | if (np) { |
4992 | const struct of_device_id *match; |
4993 | |
4994 | match = of_match_node(matches: macb_dt_ids, node: np); |
4995 | if (match && match->data) { |
4996 | macb_config = match->data; |
4997 | clk_init = macb_config->clk_init; |
4998 | init = macb_config->init; |
4999 | } |
5000 | } |
5001 | |
5002 | err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); |
5003 | if (err) |
5004 | return err; |
5005 | |
5006 | pm_runtime_set_autosuspend_delay(dev: &pdev->dev, MACB_PM_TIMEOUT); |
5007 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
5008 | pm_runtime_get_noresume(dev: &pdev->dev); |
5009 | pm_runtime_set_active(dev: &pdev->dev); |
5010 | pm_runtime_enable(dev: &pdev->dev); |
5011 | native_io = hw_is_native_io(addr: mem); |
5012 | |
5013 | macb_probe_queues(mem, native_io, queue_mask: &queue_mask, num_queues: &num_queues); |
5014 | dev = alloc_etherdev_mq(sizeof(*bp), num_queues); |
5015 | if (!dev) { |
5016 | err = -ENOMEM; |
5017 | goto err_disable_clocks; |
5018 | } |
5019 | |
5020 | dev->base_addr = regs->start; |
5021 | |
5022 | SET_NETDEV_DEV(dev, &pdev->dev); |
5023 | |
5024 | bp = netdev_priv(dev); |
5025 | bp->pdev = pdev; |
5026 | bp->dev = dev; |
5027 | bp->regs = mem; |
5028 | bp->native_io = native_io; |
5029 | if (native_io) { |
5030 | bp->macb_reg_readl = hw_readl_native; |
5031 | bp->macb_reg_writel = hw_writel_native; |
5032 | } else { |
5033 | bp->macb_reg_readl = hw_readl; |
5034 | bp->macb_reg_writel = hw_writel; |
5035 | } |
5036 | bp->num_queues = num_queues; |
5037 | bp->queue_mask = queue_mask; |
5038 | if (macb_config) |
5039 | bp->dma_burst_length = macb_config->dma_burst_length; |
5040 | bp->pclk = pclk; |
5041 | bp->hclk = hclk; |
5042 | bp->tx_clk = tx_clk; |
5043 | bp->rx_clk = rx_clk; |
5044 | bp->tsu_clk = tsu_clk; |
5045 | if (macb_config) |
5046 | bp->jumbo_max_len = macb_config->jumbo_max_len; |
5047 | |
5048 | if (!hw_is_gem(addr: bp->regs, native_io: bp->native_io)) |
5049 | bp->max_tx_length = MACB_MAX_TX_LEN; |
5050 | else if (macb_config->max_tx_length) |
5051 | bp->max_tx_length = macb_config->max_tx_length; |
5052 | else |
5053 | bp->max_tx_length = GEM_MAX_TX_LEN; |
5054 | |
5055 | bp->wol = 0; |
5056 | if (of_property_read_bool(np, propname: "magic-packet" )) |
5057 | bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; |
5058 | device_set_wakeup_capable(dev: &pdev->dev, capable: bp->wol & MACB_WOL_HAS_MAGIC_PACKET); |
5059 | |
5060 | bp->usrio = macb_config->usrio; |
5061 | |
5062 | /* By default we set to partial store and forward mode for zynqmp. |
5063 | * Disable if not set in devicetree. |
5064 | */ |
5065 | if (GEM_BFEXT(PBUF_CUTTHRU, gem_readl(bp, DCFG6))) { |
5066 | err = of_property_read_u32(np: bp->pdev->dev.of_node, |
5067 | propname: "cdns,rx-watermark" , |
5068 | out_value: &bp->rx_watermark); |
5069 | |
5070 | if (!err) { |
5071 | /* Disable partial store and forward in case of error or |
5072 | * invalid watermark value |
5073 | */ |
5074 | wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1; |
5075 | if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) { |
5076 | dev_info(&bp->pdev->dev, "Invalid watermark value\n" ); |
5077 | bp->rx_watermark = 0; |
5078 | } |
5079 | } |
5080 | } |
5081 | spin_lock_init(&bp->lock); |
5082 | |
5083 | /* setup capabilities */ |
5084 | macb_configure_caps(bp, dt_conf: macb_config); |
5085 | |
5086 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
5087 | if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { |
5088 | dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(44)); |
5089 | bp->hw_dma_cap |= HW_DMA_CAP_64B; |
5090 | } |
5091 | #endif |
5092 | platform_set_drvdata(pdev, data: dev); |
5093 | |
5094 | dev->irq = platform_get_irq(pdev, 0); |
5095 | if (dev->irq < 0) { |
5096 | err = dev->irq; |
5097 | goto err_out_free_netdev; |
5098 | } |
5099 | |
5100 | /* MTU range: 68 - 1500 or 10240 */ |
5101 | dev->min_mtu = GEM_MTU_MIN_SIZE; |
5102 | if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) |
5103 | dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN; |
5104 | else |
5105 | dev->max_mtu = ETH_DATA_LEN; |
5106 | |
5107 | if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { |
5108 | val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); |
5109 | if (val) |
5110 | bp->rx_bd_rd_prefetch = (2 << (val - 1)) * |
5111 | macb_dma_desc_get_size(bp); |
5112 | |
5113 | val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); |
5114 | if (val) |
5115 | bp->tx_bd_rd_prefetch = (2 << (val - 1)) * |
5116 | macb_dma_desc_get_size(bp); |
5117 | } |
5118 | |
5119 | bp->rx_intr_mask = MACB_RX_INT_FLAGS; |
5120 | if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) |
5121 | bp->rx_intr_mask |= MACB_BIT(RXUBR); |
5122 | |
5123 | err = of_get_ethdev_address(np, dev: bp->dev); |
5124 | if (err == -EPROBE_DEFER) |
5125 | goto err_out_free_netdev; |
5126 | else if (err) |
5127 | macb_get_hwaddr(bp); |
5128 | |
5129 | err = of_get_phy_mode(np, interface: &interface); |
5130 | if (err) |
5131 | /* not found in DT, MII by default */ |
5132 | bp->phy_interface = PHY_INTERFACE_MODE_MII; |
5133 | else |
5134 | bp->phy_interface = interface; |
5135 | |
5136 | /* IP specific init */ |
5137 | err = init(pdev); |
5138 | if (err) |
5139 | goto err_out_free_netdev; |
5140 | |
5141 | err = macb_mii_init(bp); |
5142 | if (err) |
5143 | goto err_out_phy_exit; |
5144 | |
5145 | netif_carrier_off(dev); |
5146 | |
5147 | err = register_netdev(dev); |
5148 | if (err) { |
5149 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n" ); |
5150 | goto err_out_unregister_mdio; |
5151 | } |
5152 | |
5153 | tasklet_setup(t: &bp->hresp_err_tasklet, callback: macb_hresp_error_task); |
5154 | |
5155 | netdev_info(dev, format: "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n" , |
5156 | macb_is_gem(bp) ? "GEM" : "MACB" , macb_readl(bp, MID), |
5157 | dev->base_addr, dev->irq, dev->dev_addr); |
5158 | |
5159 | pm_runtime_mark_last_busy(dev: &bp->pdev->dev); |
5160 | pm_runtime_put_autosuspend(dev: &bp->pdev->dev); |
5161 | |
5162 | return 0; |
5163 | |
5164 | err_out_unregister_mdio: |
5165 | mdiobus_unregister(bus: bp->mii_bus); |
5166 | mdiobus_free(bus: bp->mii_bus); |
5167 | |
5168 | err_out_phy_exit: |
5169 | phy_exit(phy: bp->sgmii_phy); |
5170 | |
5171 | err_out_free_netdev: |
5172 | free_netdev(dev); |
5173 | |
5174 | err_disable_clocks: |
5175 | macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk); |
5176 | pm_runtime_disable(dev: &pdev->dev); |
5177 | pm_runtime_set_suspended(dev: &pdev->dev); |
5178 | pm_runtime_dont_use_autosuspend(dev: &pdev->dev); |
5179 | |
5180 | return err; |
5181 | } |
5182 | |
5183 | static void macb_remove(struct platform_device *pdev) |
5184 | { |
5185 | struct net_device *dev; |
5186 | struct macb *bp; |
5187 | |
5188 | dev = platform_get_drvdata(pdev); |
5189 | |
5190 | if (dev) { |
5191 | bp = netdev_priv(dev); |
5192 | phy_exit(phy: bp->sgmii_phy); |
5193 | mdiobus_unregister(bus: bp->mii_bus); |
5194 | mdiobus_free(bus: bp->mii_bus); |
5195 | |
5196 | unregister_netdev(dev); |
5197 | tasklet_kill(t: &bp->hresp_err_tasklet); |
5198 | pm_runtime_disable(dev: &pdev->dev); |
5199 | pm_runtime_dont_use_autosuspend(dev: &pdev->dev); |
5200 | if (!pm_runtime_suspended(dev: &pdev->dev)) { |
5201 | macb_clks_disable(pclk: bp->pclk, hclk: bp->hclk, tx_clk: bp->tx_clk, |
5202 | rx_clk: bp->rx_clk, tsu_clk: bp->tsu_clk); |
5203 | pm_runtime_set_suspended(dev: &pdev->dev); |
5204 | } |
5205 | phylink_destroy(bp->phylink); |
5206 | free_netdev(dev); |
5207 | } |
5208 | } |
5209 | |
5210 | static int __maybe_unused macb_suspend(struct device *dev) |
5211 | { |
5212 | struct net_device *netdev = dev_get_drvdata(dev); |
5213 | struct macb *bp = netdev_priv(dev: netdev); |
5214 | struct macb_queue *queue; |
5215 | unsigned long flags; |
5216 | unsigned int q; |
5217 | int err; |
5218 | |
5219 | if (!device_may_wakeup(dev: &bp->dev->dev)) |
5220 | phy_exit(phy: bp->sgmii_phy); |
5221 | |
5222 | if (!netif_running(dev: netdev)) |
5223 | return 0; |
5224 | |
5225 | if (bp->wol & MACB_WOL_ENABLED) { |
5226 | spin_lock_irqsave(&bp->lock, flags); |
5227 | /* Flush all status bits */ |
5228 | macb_writel(bp, TSR, -1); |
5229 | macb_writel(bp, RSR, -1); |
5230 | for (q = 0, queue = bp->queues; q < bp->num_queues; |
5231 | ++q, ++queue) { |
5232 | /* Disable all interrupts */ |
5233 | queue_writel(queue, IDR, -1); |
5234 | queue_readl(queue, ISR); |
5235 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
5236 | queue_writel(queue, ISR, -1); |
5237 | } |
5238 | /* Change interrupt handler and |
5239 | * Enable WoL IRQ on queue 0 |
5240 | */ |
5241 | devm_free_irq(dev, irq: bp->queues[0].irq, dev_id: bp->queues); |
5242 | if (macb_is_gem(bp)) { |
5243 | err = devm_request_irq(dev, irq: bp->queues[0].irq, handler: gem_wol_interrupt, |
5244 | IRQF_SHARED, devname: netdev->name, dev_id: bp->queues); |
5245 | if (err) { |
5246 | dev_err(dev, |
5247 | "Unable to request IRQ %d (error %d)\n" , |
5248 | bp->queues[0].irq, err); |
5249 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
5250 | return err; |
5251 | } |
5252 | queue_writel(bp->queues, IER, GEM_BIT(WOL)); |
5253 | gem_writel(bp, WOL, MACB_BIT(MAG)); |
5254 | } else { |
5255 | err = devm_request_irq(dev, irq: bp->queues[0].irq, handler: macb_wol_interrupt, |
5256 | IRQF_SHARED, devname: netdev->name, dev_id: bp->queues); |
5257 | if (err) { |
5258 | dev_err(dev, |
5259 | "Unable to request IRQ %d (error %d)\n" , |
5260 | bp->queues[0].irq, err); |
5261 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
5262 | return err; |
5263 | } |
5264 | queue_writel(bp->queues, IER, MACB_BIT(WOL)); |
5265 | macb_writel(bp, WOL, MACB_BIT(MAG)); |
5266 | } |
5267 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
5268 | |
5269 | enable_irq_wake(irq: bp->queues[0].irq); |
5270 | } |
5271 | |
5272 | netif_device_detach(dev: netdev); |
5273 | for (q = 0, queue = bp->queues; q < bp->num_queues; |
5274 | ++q, ++queue) { |
5275 | napi_disable(n: &queue->napi_rx); |
5276 | napi_disable(n: &queue->napi_tx); |
5277 | } |
5278 | |
5279 | if (!(bp->wol & MACB_WOL_ENABLED)) { |
5280 | rtnl_lock(); |
5281 | phylink_stop(bp->phylink); |
5282 | rtnl_unlock(); |
5283 | spin_lock_irqsave(&bp->lock, flags); |
5284 | macb_reset_hw(bp); |
5285 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
5286 | } |
5287 | |
5288 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
5289 | bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); |
5290 | |
5291 | if (netdev->hw_features & NETIF_F_NTUPLE) |
5292 | bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); |
5293 | |
5294 | if (bp->ptp_info) |
5295 | bp->ptp_info->ptp_remove(netdev); |
5296 | if (!device_may_wakeup(dev)) |
5297 | pm_runtime_force_suspend(dev); |
5298 | |
5299 | return 0; |
5300 | } |
5301 | |
5302 | static int __maybe_unused macb_resume(struct device *dev) |
5303 | { |
5304 | struct net_device *netdev = dev_get_drvdata(dev); |
5305 | struct macb *bp = netdev_priv(dev: netdev); |
5306 | struct macb_queue *queue; |
5307 | unsigned long flags; |
5308 | unsigned int q; |
5309 | int err; |
5310 | |
5311 | if (!device_may_wakeup(dev: &bp->dev->dev)) |
5312 | phy_init(phy: bp->sgmii_phy); |
5313 | |
5314 | if (!netif_running(dev: netdev)) |
5315 | return 0; |
5316 | |
5317 | if (!device_may_wakeup(dev)) |
5318 | pm_runtime_force_resume(dev); |
5319 | |
5320 | if (bp->wol & MACB_WOL_ENABLED) { |
5321 | spin_lock_irqsave(&bp->lock, flags); |
5322 | /* Disable WoL */ |
5323 | if (macb_is_gem(bp)) { |
5324 | queue_writel(bp->queues, IDR, GEM_BIT(WOL)); |
5325 | gem_writel(bp, WOL, 0); |
5326 | } else { |
5327 | queue_writel(bp->queues, IDR, MACB_BIT(WOL)); |
5328 | macb_writel(bp, WOL, 0); |
5329 | } |
5330 | /* Clear ISR on queue 0 */ |
5331 | queue_readl(bp->queues, ISR); |
5332 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
5333 | queue_writel(bp->queues, ISR, -1); |
5334 | /* Replace interrupt handler on queue 0 */ |
5335 | devm_free_irq(dev, irq: bp->queues[0].irq, dev_id: bp->queues); |
5336 | err = devm_request_irq(dev, irq: bp->queues[0].irq, handler: macb_interrupt, |
5337 | IRQF_SHARED, devname: netdev->name, dev_id: bp->queues); |
5338 | if (err) { |
5339 | dev_err(dev, |
5340 | "Unable to request IRQ %d (error %d)\n" , |
5341 | bp->queues[0].irq, err); |
5342 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
5343 | return err; |
5344 | } |
5345 | spin_unlock_irqrestore(lock: &bp->lock, flags); |
5346 | |
5347 | disable_irq_wake(irq: bp->queues[0].irq); |
5348 | |
5349 | /* Now make sure we disable phy before moving |
5350 | * to common restore path |
5351 | */ |
5352 | rtnl_lock(); |
5353 | phylink_stop(bp->phylink); |
5354 | rtnl_unlock(); |
5355 | } |
5356 | |
5357 | for (q = 0, queue = bp->queues; q < bp->num_queues; |
5358 | ++q, ++queue) { |
5359 | napi_enable(n: &queue->napi_rx); |
5360 | napi_enable(n: &queue->napi_tx); |
5361 | } |
5362 | |
5363 | if (netdev->hw_features & NETIF_F_NTUPLE) |
5364 | gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); |
5365 | |
5366 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
5367 | macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); |
5368 | |
5369 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
5370 | macb_init_hw(bp); |
5371 | macb_set_rx_mode(dev: netdev); |
5372 | macb_restore_features(bp); |
5373 | rtnl_lock(); |
5374 | |
5375 | phylink_start(bp->phylink); |
5376 | rtnl_unlock(); |
5377 | |
5378 | netif_device_attach(dev: netdev); |
5379 | if (bp->ptp_info) |
5380 | bp->ptp_info->ptp_init(netdev); |
5381 | |
5382 | return 0; |
5383 | } |
5384 | |
5385 | static int __maybe_unused macb_runtime_suspend(struct device *dev) |
5386 | { |
5387 | struct net_device *netdev = dev_get_drvdata(dev); |
5388 | struct macb *bp = netdev_priv(dev: netdev); |
5389 | |
5390 | if (!(device_may_wakeup(dev))) |
5391 | macb_clks_disable(pclk: bp->pclk, hclk: bp->hclk, tx_clk: bp->tx_clk, rx_clk: bp->rx_clk, tsu_clk: bp->tsu_clk); |
5392 | else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) |
5393 | macb_clks_disable(NULL, NULL, NULL, NULL, tsu_clk: bp->tsu_clk); |
5394 | |
5395 | return 0; |
5396 | } |
5397 | |
5398 | static int __maybe_unused macb_runtime_resume(struct device *dev) |
5399 | { |
5400 | struct net_device *netdev = dev_get_drvdata(dev); |
5401 | struct macb *bp = netdev_priv(dev: netdev); |
5402 | |
5403 | if (!(device_may_wakeup(dev))) { |
5404 | clk_prepare_enable(clk: bp->pclk); |
5405 | clk_prepare_enable(clk: bp->hclk); |
5406 | clk_prepare_enable(clk: bp->tx_clk); |
5407 | clk_prepare_enable(clk: bp->rx_clk); |
5408 | clk_prepare_enable(clk: bp->tsu_clk); |
5409 | } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) { |
5410 | clk_prepare_enable(clk: bp->tsu_clk); |
5411 | } |
5412 | |
5413 | return 0; |
5414 | } |
5415 | |
5416 | static const struct dev_pm_ops macb_pm_ops = { |
5417 | SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) |
5418 | SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) |
5419 | }; |
5420 | |
5421 | static struct platform_driver macb_driver = { |
5422 | .probe = macb_probe, |
5423 | .remove_new = macb_remove, |
5424 | .driver = { |
5425 | .name = "macb" , |
5426 | .of_match_table = of_match_ptr(macb_dt_ids), |
5427 | .pm = &macb_pm_ops, |
5428 | }, |
5429 | }; |
5430 | |
5431 | module_platform_driver(macb_driver); |
5432 | |
5433 | MODULE_LICENSE("GPL" ); |
5434 | MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver" ); |
5435 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)" ); |
5436 | MODULE_ALIAS("platform:macb" ); |
5437 | |