1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Xilinx Axi Ethernet device driver |
4 | * |
5 | * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi |
6 | * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> |
7 | * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. |
8 | * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> |
9 | * Copyright (c) 2010 - 2011 PetaLogix |
10 | * Copyright (c) 2019 - 2022 Calian Advanced Technologies |
11 | * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. |
12 | * |
13 | * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 |
14 | * and Spartan6. |
15 | * |
16 | * TODO: |
17 | * - Add Axi Fifo support. |
18 | * - Factor out Axi DMA code into separate driver. |
19 | * - Test and fix basic multicast filtering. |
20 | * - Add support for extended multicast filtering. |
21 | * - Test basic VLAN support. |
22 | * - Add support for extended VLAN support. |
23 | */ |
24 | |
25 | #include <linux/clk.h> |
26 | #include <linux/delay.h> |
27 | #include <linux/etherdevice.h> |
28 | #include <linux/module.h> |
29 | #include <linux/netdevice.h> |
30 | #include <linux/of.h> |
31 | #include <linux/of_mdio.h> |
32 | #include <linux/of_net.h> |
33 | #include <linux/of_irq.h> |
34 | #include <linux/of_address.h> |
35 | #include <linux/platform_device.h> |
36 | #include <linux/skbuff.h> |
37 | #include <linux/math64.h> |
38 | #include <linux/phy.h> |
39 | #include <linux/mii.h> |
40 | #include <linux/ethtool.h> |
41 | #include <linux/dmaengine.h> |
42 | #include <linux/dma-mapping.h> |
43 | #include <linux/dma/xilinx_dma.h> |
44 | #include <linux/circ_buf.h> |
45 | #include <net/netdev_queues.h> |
46 | |
47 | #include "xilinx_axienet.h" |
48 | |
49 | /* Descriptors defines for Tx and Rx DMA */ |
50 | #define TX_BD_NUM_DEFAULT 128 |
51 | #define RX_BD_NUM_DEFAULT 1024 |
52 | #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) |
53 | #define TX_BD_NUM_MAX 4096 |
54 | #define RX_BD_NUM_MAX 4096 |
55 | #define DMA_NUM_APP_WORDS 5 |
56 | #define LEN_APP 4 |
57 | #define RX_BUF_NUM_DEFAULT 128 |
58 | |
59 | /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ |
60 | #define DRIVER_NAME "xaxienet" |
61 | #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" |
62 | #define DRIVER_VERSION "1.00a" |
63 | |
64 | #define AXIENET_REGS_N 40 |
65 | |
66 | static void axienet_rx_submit_desc(struct net_device *ndev); |
67 | |
68 | /* Match table for of_platform binding */ |
69 | static const struct of_device_id axienet_of_match[] = { |
70 | { .compatible = "xlnx,axi-ethernet-1.00.a", }, |
71 | { .compatible = "xlnx,axi-ethernet-1.01.a", }, |
72 | { .compatible = "xlnx,axi-ethernet-2.01.a", }, |
73 | {}, |
74 | }; |
75 | |
76 | MODULE_DEVICE_TABLE(of, axienet_of_match); |
77 | |
78 | /* Option table for setting up Axi Ethernet hardware options */ |
79 | static struct axienet_option axienet_options[] = { |
80 | /* Turn on jumbo packet support for both Rx and Tx */ |
81 | { |
82 | .opt = XAE_OPTION_JUMBO, |
83 | .reg = XAE_TC_OFFSET, |
84 | .m_or = XAE_TC_JUM_MASK, |
85 | }, { |
86 | .opt = XAE_OPTION_JUMBO, |
87 | .reg = XAE_RCW1_OFFSET, |
88 | .m_or = XAE_RCW1_JUM_MASK, |
89 | }, { /* Turn on VLAN packet support for both Rx and Tx */ |
90 | .opt = XAE_OPTION_VLAN, |
91 | .reg = XAE_TC_OFFSET, |
92 | .m_or = XAE_TC_VLAN_MASK, |
93 | }, { |
94 | .opt = XAE_OPTION_VLAN, |
95 | .reg = XAE_RCW1_OFFSET, |
96 | .m_or = XAE_RCW1_VLAN_MASK, |
97 | }, { /* Turn on FCS stripping on receive packets */ |
98 | .opt = XAE_OPTION_FCS_STRIP, |
99 | .reg = XAE_RCW1_OFFSET, |
100 | .m_or = XAE_RCW1_FCS_MASK, |
101 | }, { /* Turn on FCS insertion on transmit packets */ |
102 | .opt = XAE_OPTION_FCS_INSERT, |
103 | .reg = XAE_TC_OFFSET, |
104 | .m_or = XAE_TC_FCS_MASK, |
105 | }, { /* Turn off length/type field checking on receive packets */ |
106 | .opt = XAE_OPTION_LENTYPE_ERR, |
107 | .reg = XAE_RCW1_OFFSET, |
108 | .m_or = XAE_RCW1_LT_DIS_MASK, |
109 | }, { /* Turn on Rx flow control */ |
110 | .opt = XAE_OPTION_FLOW_CONTROL, |
111 | .reg = XAE_FCC_OFFSET, |
112 | .m_or = XAE_FCC_FCRX_MASK, |
113 | }, { /* Turn on Tx flow control */ |
114 | .opt = XAE_OPTION_FLOW_CONTROL, |
115 | .reg = XAE_FCC_OFFSET, |
116 | .m_or = XAE_FCC_FCTX_MASK, |
117 | }, { /* Turn on promiscuous frame filtering */ |
118 | .opt = XAE_OPTION_PROMISC, |
119 | .reg = XAE_FMI_OFFSET, |
120 | .m_or = XAE_FMI_PM_MASK, |
121 | }, { /* Enable transmitter */ |
122 | .opt = XAE_OPTION_TXEN, |
123 | .reg = XAE_TC_OFFSET, |
124 | .m_or = XAE_TC_TX_MASK, |
125 | }, { /* Enable receiver */ |
126 | .opt = XAE_OPTION_RXEN, |
127 | .reg = XAE_RCW1_OFFSET, |
128 | .m_or = XAE_RCW1_RX_MASK, |
129 | }, |
130 | {} |
131 | }; |
132 | |
133 | static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) |
134 | { |
135 | return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; |
136 | } |
137 | |
138 | static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) |
139 | { |
140 | return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; |
141 | } |
142 | |
143 | /** |
144 | * axienet_dma_in32 - Memory mapped Axi DMA register read |
145 | * @lp: Pointer to axienet local structure |
146 | * @reg: Address offset from the base address of the Axi DMA core |
147 | * |
148 | * Return: The contents of the Axi DMA register |
149 | * |
150 | * This function returns the contents of the corresponding Axi DMA register. |
151 | */ |
152 | static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) |
153 | { |
154 | return ioread32(lp->dma_regs + reg); |
155 | } |
156 | |
157 | static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, |
158 | struct axidma_bd *desc) |
159 | { |
160 | desc->phys = lower_32_bits(addr); |
161 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
162 | desc->phys_msb = upper_32_bits(addr); |
163 | } |
164 | |
165 | static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, |
166 | struct axidma_bd *desc) |
167 | { |
168 | dma_addr_t ret = desc->phys; |
169 | |
170 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
171 | ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; |
172 | |
173 | return ret; |
174 | } |
175 | |
176 | /** |
177 | * axienet_dma_bd_release - Release buffer descriptor rings |
178 | * @ndev: Pointer to the net_device structure |
179 | * |
180 | * This function is used to release the descriptors allocated in |
181 | * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet |
182 | * driver stop api is called. |
183 | */ |
184 | static void axienet_dma_bd_release(struct net_device *ndev) |
185 | { |
186 | int i; |
187 | struct axienet_local *lp = netdev_priv(dev: ndev); |
188 | |
189 | /* If we end up here, tx_bd_v must have been DMA allocated. */ |
190 | dma_free_coherent(dev: lp->dev, |
191 | size: sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
192 | cpu_addr: lp->tx_bd_v, |
193 | dma_handle: lp->tx_bd_p); |
194 | |
195 | if (!lp->rx_bd_v) |
196 | return; |
197 | |
198 | for (i = 0; i < lp->rx_bd_num; i++) { |
199 | dma_addr_t phys; |
200 | |
201 | /* A NULL skb means this descriptor has not been initialised |
202 | * at all. |
203 | */ |
204 | if (!lp->rx_bd_v[i].skb) |
205 | break; |
206 | |
207 | dev_kfree_skb(lp->rx_bd_v[i].skb); |
208 | |
209 | /* For each descriptor, we programmed cntrl with the (non-zero) |
210 | * descriptor size, after it had been successfully allocated. |
211 | * So a non-zero value in there means we need to unmap it. |
212 | */ |
213 | if (lp->rx_bd_v[i].cntrl) { |
214 | phys = desc_get_phys_addr(lp, desc: &lp->rx_bd_v[i]); |
215 | dma_unmap_single(lp->dev, phys, |
216 | lp->max_frm_size, DMA_FROM_DEVICE); |
217 | } |
218 | } |
219 | |
220 | dma_free_coherent(dev: lp->dev, |
221 | size: sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
222 | cpu_addr: lp->rx_bd_v, |
223 | dma_handle: lp->rx_bd_p); |
224 | } |
225 | |
226 | static u64 axienet_dma_rate(struct axienet_local *lp) |
227 | { |
228 | if (lp->axi_clk) |
229 | return clk_get_rate(clk: lp->axi_clk); |
230 | return 125000000; /* arbitrary guess if no clock rate set */ |
231 | } |
232 | |
233 | /** |
234 | * axienet_calc_cr() - Calculate control register value |
235 | * @lp: Device private data |
236 | * @count: Number of completions before an interrupt |
237 | * @usec: Microseconds after the last completion before an interrupt |
238 | * |
239 | * Calculate a control register value based on the coalescing settings. The |
240 | * run/stop bit is not set. |
241 | */ |
242 | static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec) |
243 | { |
244 | u32 cr; |
245 | |
246 | cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK | |
247 | XAXIDMA_IRQ_ERROR_MASK; |
248 | /* Only set interrupt delay timer if not generating an interrupt on |
249 | * the first packet. Otherwise leave at 0 to disable delay interrupt. |
250 | */ |
251 | if (count > 1) { |
252 | u64 clk_rate = axienet_dma_rate(lp); |
253 | u32 timer; |
254 | |
255 | /* 1 Timeout Interval = 125 * (clock period of SG clock) */ |
256 | timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate, |
257 | XAXIDMA_DELAY_SCALE); |
258 | |
259 | timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK)); |
260 | cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) | |
261 | XAXIDMA_IRQ_DELAY_MASK; |
262 | } |
263 | |
264 | return cr; |
265 | } |
266 | |
267 | /** |
268 | * axienet_coalesce_params() - Extract coalesce parameters from the CR |
269 | * @lp: Device private data |
270 | * @cr: The control register to parse |
271 | * @count: Number of packets before an interrupt |
272 | * @usec: Idle time (in usec) before an interrupt |
273 | */ |
274 | static void axienet_coalesce_params(struct axienet_local *lp, u32 cr, |
275 | u32 *count, u32 *usec) |
276 | { |
277 | u64 clk_rate = axienet_dma_rate(lp); |
278 | u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr); |
279 | |
280 | *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr); |
281 | *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate); |
282 | } |
283 | |
284 | /** |
285 | * axienet_dma_start - Set up DMA registers and start DMA operation |
286 | * @lp: Pointer to the axienet_local structure |
287 | */ |
288 | static void axienet_dma_start(struct axienet_local *lp) |
289 | { |
290 | spin_lock_irq(lock: &lp->rx_cr_lock); |
291 | |
292 | /* Start updating the Rx channel control register */ |
293 | lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; |
294 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: lp->rx_dma_cr); |
295 | |
296 | /* Populate the tail pointer and bring the Rx Axi DMA engine out of |
297 | * halted state. This will make the Rx side ready for reception. |
298 | */ |
299 | axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, addr: lp->rx_bd_p); |
300 | lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; |
301 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: lp->rx_dma_cr); |
302 | axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, addr: lp->rx_bd_p + |
303 | (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); |
304 | lp->rx_dma_started = true; |
305 | |
306 | spin_unlock_irq(lock: &lp->rx_cr_lock); |
307 | spin_lock_irq(lock: &lp->tx_cr_lock); |
308 | |
309 | /* Start updating the Tx channel control register */ |
310 | lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; |
311 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: lp->tx_dma_cr); |
312 | |
313 | /* Write to the RS (Run-stop) bit in the Tx channel control register. |
314 | * Tx channel is now ready to run. But only after we write to the |
315 | * tail pointer register that the Tx channel will start transmitting. |
316 | */ |
317 | axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, addr: lp->tx_bd_p); |
318 | lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; |
319 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: lp->tx_dma_cr); |
320 | lp->tx_dma_started = true; |
321 | |
322 | spin_unlock_irq(lock: &lp->tx_cr_lock); |
323 | } |
324 | |
325 | /** |
326 | * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA |
327 | * @ndev: Pointer to the net_device structure |
328 | * |
329 | * Return: 0, on success -ENOMEM, on failure |
330 | * |
331 | * This function is called to initialize the Rx and Tx DMA descriptor |
332 | * rings. This initializes the descriptors with required default values |
333 | * and is called when Axi Ethernet driver reset is called. |
334 | */ |
335 | static int axienet_dma_bd_init(struct net_device *ndev) |
336 | { |
337 | int i; |
338 | struct sk_buff *skb; |
339 | struct axienet_local *lp = netdev_priv(dev: ndev); |
340 | |
341 | /* Reset the indexes which are used for accessing the BDs */ |
342 | lp->tx_bd_ci = 0; |
343 | lp->tx_bd_tail = 0; |
344 | lp->rx_bd_ci = 0; |
345 | |
346 | /* Allocate the Tx and Rx buffer descriptors. */ |
347 | lp->tx_bd_v = dma_alloc_coherent(dev: lp->dev, |
348 | size: sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
349 | dma_handle: &lp->tx_bd_p, GFP_KERNEL); |
350 | if (!lp->tx_bd_v) |
351 | return -ENOMEM; |
352 | |
353 | lp->rx_bd_v = dma_alloc_coherent(dev: lp->dev, |
354 | size: sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
355 | dma_handle: &lp->rx_bd_p, GFP_KERNEL); |
356 | if (!lp->rx_bd_v) |
357 | goto out; |
358 | |
359 | for (i = 0; i < lp->tx_bd_num; i++) { |
360 | dma_addr_t addr = lp->tx_bd_p + |
361 | sizeof(*lp->tx_bd_v) * |
362 | ((i + 1) % lp->tx_bd_num); |
363 | |
364 | lp->tx_bd_v[i].next = lower_32_bits(addr); |
365 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
366 | lp->tx_bd_v[i].next_msb = upper_32_bits(addr); |
367 | } |
368 | |
369 | for (i = 0; i < lp->rx_bd_num; i++) { |
370 | dma_addr_t addr; |
371 | |
372 | addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * |
373 | ((i + 1) % lp->rx_bd_num); |
374 | lp->rx_bd_v[i].next = lower_32_bits(addr); |
375 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
376 | lp->rx_bd_v[i].next_msb = upper_32_bits(addr); |
377 | |
378 | skb = netdev_alloc_skb_ip_align(dev: ndev, length: lp->max_frm_size); |
379 | if (!skb) |
380 | goto out; |
381 | |
382 | lp->rx_bd_v[i].skb = skb; |
383 | addr = dma_map_single(lp->dev, skb->data, |
384 | lp->max_frm_size, DMA_FROM_DEVICE); |
385 | if (dma_mapping_error(dev: lp->dev, dma_addr: addr)) { |
386 | netdev_err(dev: ndev, format: "DMA mapping error\n"); |
387 | goto out; |
388 | } |
389 | desc_set_phys_addr(lp, addr, desc: &lp->rx_bd_v[i]); |
390 | |
391 | lp->rx_bd_v[i].cntrl = lp->max_frm_size; |
392 | } |
393 | |
394 | axienet_dma_start(lp); |
395 | |
396 | return 0; |
397 | out: |
398 | axienet_dma_bd_release(ndev); |
399 | return -ENOMEM; |
400 | } |
401 | |
402 | /** |
403 | * axienet_set_mac_address - Write the MAC address |
404 | * @ndev: Pointer to the net_device structure |
405 | * @address: 6 byte Address to be written as MAC address |
406 | * |
407 | * This function is called to initialize the MAC address of the Axi Ethernet |
408 | * core. It writes to the UAW0 and UAW1 registers of the core. |
409 | */ |
410 | static void axienet_set_mac_address(struct net_device *ndev, |
411 | const void *address) |
412 | { |
413 | struct axienet_local *lp = netdev_priv(dev: ndev); |
414 | |
415 | if (address) |
416 | eth_hw_addr_set(dev: ndev, addr: address); |
417 | if (!is_valid_ether_addr(addr: ndev->dev_addr)) |
418 | eth_hw_addr_random(dev: ndev); |
419 | |
420 | /* Set up unicast MAC address filter set its mac address */ |
421 | axienet_iow(lp, XAE_UAW0_OFFSET, |
422 | value: (ndev->dev_addr[0]) | |
423 | (ndev->dev_addr[1] << 8) | |
424 | (ndev->dev_addr[2] << 16) | |
425 | (ndev->dev_addr[3] << 24)); |
426 | axienet_iow(lp, XAE_UAW1_OFFSET, |
427 | value: (((axienet_ior(lp, XAE_UAW1_OFFSET)) & |
428 | ~XAE_UAW1_UNICASTADDR_MASK) | |
429 | (ndev->dev_addr[4] | |
430 | (ndev->dev_addr[5] << 8)))); |
431 | } |
432 | |
433 | /** |
434 | * netdev_set_mac_address - Write the MAC address (from outside the driver) |
435 | * @ndev: Pointer to the net_device structure |
436 | * @p: 6 byte Address to be written as MAC address |
437 | * |
438 | * Return: 0 for all conditions. Presently, there is no failure case. |
439 | * |
440 | * This function is called to initialize the MAC address of the Axi Ethernet |
441 | * core. It calls the core specific axienet_set_mac_address. This is the |
442 | * function that goes into net_device_ops structure entry ndo_set_mac_address. |
443 | */ |
444 | static int netdev_set_mac_address(struct net_device *ndev, void *p) |
445 | { |
446 | struct sockaddr *addr = p; |
447 | |
448 | axienet_set_mac_address(ndev, address: addr->sa_data); |
449 | return 0; |
450 | } |
451 | |
452 | /** |
453 | * axienet_set_multicast_list - Prepare the multicast table |
454 | * @ndev: Pointer to the net_device structure |
455 | * |
456 | * This function is called to initialize the multicast table during |
457 | * initialization. The Axi Ethernet basic multicast support has a four-entry |
458 | * multicast table which is initialized here. Additionally this function |
459 | * goes into the net_device_ops structure entry ndo_set_multicast_list. This |
460 | * means whenever the multicast table entries need to be updated this |
461 | * function gets called. |
462 | */ |
463 | static void axienet_set_multicast_list(struct net_device *ndev) |
464 | { |
465 | int i = 0; |
466 | u32 reg, af0reg, af1reg; |
467 | struct axienet_local *lp = netdev_priv(dev: ndev); |
468 | |
469 | reg = axienet_ior(lp, XAE_FMI_OFFSET); |
470 | reg &= ~XAE_FMI_PM_MASK; |
471 | if (ndev->flags & IFF_PROMISC) |
472 | reg |= XAE_FMI_PM_MASK; |
473 | else |
474 | reg &= ~XAE_FMI_PM_MASK; |
475 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
476 | |
477 | if (ndev->flags & IFF_ALLMULTI || |
478 | netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { |
479 | reg &= 0xFFFFFF00; |
480 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
481 | axienet_iow(lp, XAE_AF0_OFFSET, value: 1); /* Multicast bit */ |
482 | axienet_iow(lp, XAE_AF1_OFFSET, value: 0); |
483 | axienet_iow(lp, XAE_AM0_OFFSET, value: 1); /* ditto */ |
484 | axienet_iow(lp, XAE_AM1_OFFSET, value: 0); |
485 | axienet_iow(lp, XAE_FFE_OFFSET, value: 1); |
486 | i = 1; |
487 | } else if (!netdev_mc_empty(ndev)) { |
488 | struct netdev_hw_addr *ha; |
489 | |
490 | netdev_for_each_mc_addr(ha, ndev) { |
491 | if (i >= XAE_MULTICAST_CAM_TABLE_NUM) |
492 | break; |
493 | |
494 | af0reg = (ha->addr[0]); |
495 | af0reg |= (ha->addr[1] << 8); |
496 | af0reg |= (ha->addr[2] << 16); |
497 | af0reg |= (ha->addr[3] << 24); |
498 | |
499 | af1reg = (ha->addr[4]); |
500 | af1reg |= (ha->addr[5] << 8); |
501 | |
502 | reg &= 0xFFFFFF00; |
503 | reg |= i; |
504 | |
505 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
506 | axienet_iow(lp, XAE_AF0_OFFSET, value: af0reg); |
507 | axienet_iow(lp, XAE_AF1_OFFSET, value: af1reg); |
508 | axienet_iow(lp, XAE_AM0_OFFSET, value: 0xffffffff); |
509 | axienet_iow(lp, XAE_AM1_OFFSET, value: 0x0000ffff); |
510 | axienet_iow(lp, XAE_FFE_OFFSET, value: 1); |
511 | i++; |
512 | } |
513 | } |
514 | |
515 | for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { |
516 | reg &= 0xFFFFFF00; |
517 | reg |= i; |
518 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
519 | axienet_iow(lp, XAE_FFE_OFFSET, value: 0); |
520 | } |
521 | } |
522 | |
523 | /** |
524 | * axienet_setoptions - Set an Axi Ethernet option |
525 | * @ndev: Pointer to the net_device structure |
526 | * @options: Option to be enabled/disabled |
527 | * |
528 | * The Axi Ethernet core has multiple features which can be selectively turned |
529 | * on or off. The typical options could be jumbo frame option, basic VLAN |
530 | * option, promiscuous mode option etc. This function is used to set or clear |
531 | * these options in the Axi Ethernet hardware. This is done through |
532 | * axienet_option structure . |
533 | */ |
534 | static void axienet_setoptions(struct net_device *ndev, u32 options) |
535 | { |
536 | int reg; |
537 | struct axienet_local *lp = netdev_priv(dev: ndev); |
538 | struct axienet_option *tp = &axienet_options[0]; |
539 | |
540 | while (tp->opt) { |
541 | reg = ((axienet_ior(lp, offset: tp->reg)) & ~(tp->m_or)); |
542 | if (options & tp->opt) |
543 | reg |= tp->m_or; |
544 | axienet_iow(lp, offset: tp->reg, value: reg); |
545 | tp++; |
546 | } |
547 | |
548 | lp->options |= options; |
549 | } |
550 | |
551 | static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat) |
552 | { |
553 | u32 counter; |
554 | |
555 | if (lp->reset_in_progress) |
556 | return lp->hw_stat_base[stat]; |
557 | |
558 | counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); |
559 | return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); |
560 | } |
561 | |
562 | static void axienet_stats_update(struct axienet_local *lp, bool reset) |
563 | { |
564 | enum temac_stat stat; |
565 | |
566 | write_seqcount_begin(&lp->hw_stats_seqcount); |
567 | lp->reset_in_progress = reset; |
568 | for (stat = 0; stat < STAT_COUNT; stat++) { |
569 | u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); |
570 | |
571 | lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; |
572 | lp->hw_last_counter[stat] = counter; |
573 | } |
574 | write_seqcount_end(&lp->hw_stats_seqcount); |
575 | } |
576 | |
577 | static void axienet_refresh_stats(struct work_struct *work) |
578 | { |
579 | struct axienet_local *lp = container_of(work, struct axienet_local, |
580 | stats_work.work); |
581 | |
582 | mutex_lock(&lp->stats_lock); |
583 | axienet_stats_update(lp, reset: false); |
584 | mutex_unlock(lock: &lp->stats_lock); |
585 | |
586 | /* Just less than 2^32 bytes at 2.5 GBit/s */ |
587 | schedule_delayed_work(dwork: &lp->stats_work, delay: 13 * HZ); |
588 | } |
589 | |
590 | static int __axienet_device_reset(struct axienet_local *lp) |
591 | { |
592 | u32 value; |
593 | int ret; |
594 | |
595 | /* Save statistics counters in case they will be reset */ |
596 | mutex_lock(&lp->stats_lock); |
597 | if (lp->features & XAE_FEATURE_STATS) |
598 | axienet_stats_update(lp, reset: true); |
599 | |
600 | /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset |
601 | * process of Axi DMA takes a while to complete as all pending |
602 | * commands/transfers will be flushed or completed during this |
603 | * reset process. |
604 | * Note that even though both TX and RX have their own reset register, |
605 | * they both reset the entire DMA core, so only one needs to be used. |
606 | */ |
607 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); |
608 | ret = read_poll_timeout(axienet_dma_in32, value, |
609 | !(value & XAXIDMA_CR_RESET_MASK), |
610 | DELAY_OF_ONE_MILLISEC, 50000, false, lp, |
611 | XAXIDMA_TX_CR_OFFSET); |
612 | if (ret) { |
613 | dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); |
614 | goto out; |
615 | } |
616 | |
617 | /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ |
618 | ret = read_poll_timeout(axienet_ior, value, |
619 | value & XAE_INT_PHYRSTCMPLT_MASK, |
620 | DELAY_OF_ONE_MILLISEC, 50000, false, lp, |
621 | XAE_IS_OFFSET); |
622 | if (ret) { |
623 | dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); |
624 | goto out; |
625 | } |
626 | |
627 | /* Update statistics counters with new values */ |
628 | if (lp->features & XAE_FEATURE_STATS) { |
629 | enum temac_stat stat; |
630 | |
631 | write_seqcount_begin(&lp->hw_stats_seqcount); |
632 | lp->reset_in_progress = false; |
633 | for (stat = 0; stat < STAT_COUNT; stat++) { |
634 | u32 counter = |
635 | axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); |
636 | |
637 | lp->hw_stat_base[stat] += |
638 | lp->hw_last_counter[stat] - counter; |
639 | lp->hw_last_counter[stat] = counter; |
640 | } |
641 | write_seqcount_end(&lp->hw_stats_seqcount); |
642 | } |
643 | |
644 | out: |
645 | mutex_unlock(lock: &lp->stats_lock); |
646 | return ret; |
647 | } |
648 | |
649 | /** |
650 | * axienet_dma_stop - Stop DMA operation |
651 | * @lp: Pointer to the axienet_local structure |
652 | */ |
653 | static void axienet_dma_stop(struct axienet_local *lp) |
654 | { |
655 | int count; |
656 | u32 cr, sr; |
657 | |
658 | spin_lock_irq(lock: &lp->rx_cr_lock); |
659 | |
660 | cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); |
661 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: cr); |
662 | lp->rx_dma_started = false; |
663 | |
664 | spin_unlock_irq(lock: &lp->rx_cr_lock); |
665 | synchronize_irq(irq: lp->rx_irq); |
666 | |
667 | spin_lock_irq(lock: &lp->tx_cr_lock); |
668 | |
669 | cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); |
670 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: cr); |
671 | lp->tx_dma_started = false; |
672 | |
673 | spin_unlock_irq(lock: &lp->tx_cr_lock); |
674 | synchronize_irq(irq: lp->tx_irq); |
675 | |
676 | /* Give DMAs a chance to halt gracefully */ |
677 | sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
678 | for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { |
679 | msleep(msecs: 20); |
680 | sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
681 | } |
682 | |
683 | sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
684 | for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { |
685 | msleep(msecs: 20); |
686 | sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
687 | } |
688 | |
689 | /* Do a reset to ensure DMA is really stopped */ |
690 | axienet_lock_mii(lp); |
691 | __axienet_device_reset(lp); |
692 | axienet_unlock_mii(lp); |
693 | } |
694 | |
695 | /** |
696 | * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. |
697 | * @ndev: Pointer to the net_device structure |
698 | * |
699 | * This function is called to reset and initialize the Axi Ethernet core. This |
700 | * is typically called during initialization. It does a reset of the Axi DMA |
701 | * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines |
702 | * are connected to Axi Ethernet reset lines, this in turn resets the Axi |
703 | * Ethernet core. No separate hardware reset is done for the Axi Ethernet |
704 | * core. |
705 | * Returns 0 on success or a negative error number otherwise. |
706 | */ |
707 | static int axienet_device_reset(struct net_device *ndev) |
708 | { |
709 | u32 axienet_status; |
710 | struct axienet_local *lp = netdev_priv(dev: ndev); |
711 | int ret; |
712 | |
713 | lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; |
714 | lp->options |= XAE_OPTION_VLAN; |
715 | lp->options &= (~XAE_OPTION_JUMBO); |
716 | |
717 | if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { |
718 | lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + |
719 | XAE_TRL_SIZE; |
720 | |
721 | if (lp->max_frm_size <= lp->rxmem) |
722 | lp->options |= XAE_OPTION_JUMBO; |
723 | } |
724 | |
725 | if (!lp->use_dmaengine) { |
726 | ret = __axienet_device_reset(lp); |
727 | if (ret) |
728 | return ret; |
729 | |
730 | ret = axienet_dma_bd_init(ndev); |
731 | if (ret) { |
732 | netdev_err(dev: ndev, format: "%s: descriptor allocation failed\n", |
733 | __func__); |
734 | return ret; |
735 | } |
736 | } |
737 | |
738 | axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); |
739 | axienet_status &= ~XAE_RCW1_RX_MASK; |
740 | axienet_iow(lp, XAE_RCW1_OFFSET, value: axienet_status); |
741 | |
742 | axienet_status = axienet_ior(lp, XAE_IP_OFFSET); |
743 | if (axienet_status & XAE_INT_RXRJECT_MASK) |
744 | axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); |
745 | axienet_iow(lp, XAE_IE_OFFSET, value: lp->eth_irq > 0 ? |
746 | XAE_INT_RECV_ERROR_MASK : 0); |
747 | |
748 | axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); |
749 | |
750 | /* Sync default options with HW but leave receiver and |
751 | * transmitter disabled. |
752 | */ |
753 | axienet_setoptions(ndev, options: lp->options & |
754 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
755 | axienet_set_mac_address(ndev, NULL); |
756 | axienet_set_multicast_list(ndev); |
757 | axienet_setoptions(ndev, options: lp->options); |
758 | |
759 | netif_trans_update(dev: ndev); |
760 | |
761 | return 0; |
762 | } |
763 | |
764 | /** |
765 | * axienet_free_tx_chain - Clean up a series of linked TX descriptors. |
766 | * @lp: Pointer to the axienet_local structure |
767 | * @first_bd: Index of first descriptor to clean up |
768 | * @nr_bds: Max number of descriptors to clean up |
769 | * @force: Whether to clean descriptors even if not complete |
770 | * @sizep: Pointer to a u32 filled with the total sum of all bytes |
771 | * in all cleaned-up descriptors. Ignored if NULL. |
772 | * @budget: NAPI budget (use 0 when not called from NAPI poll) |
773 | * |
774 | * Would either be called after a successful transmit operation, or after |
775 | * there was an error when setting up the chain. |
776 | * Returns the number of packets handled. |
777 | */ |
778 | static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, |
779 | int nr_bds, bool force, u32 *sizep, int budget) |
780 | { |
781 | struct axidma_bd *cur_p; |
782 | unsigned int status; |
783 | int i, packets = 0; |
784 | dma_addr_t phys; |
785 | |
786 | for (i = 0; i < nr_bds; i++) { |
787 | cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; |
788 | status = cur_p->status; |
789 | |
790 | /* If force is not specified, clean up only descriptors |
791 | * that have been completed by the MAC. |
792 | */ |
793 | if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) |
794 | break; |
795 | |
796 | /* Ensure we see complete descriptor update */ |
797 | dma_rmb(); |
798 | phys = desc_get_phys_addr(lp, desc: cur_p); |
799 | dma_unmap_single(lp->dev, phys, |
800 | (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), |
801 | DMA_TO_DEVICE); |
802 | |
803 | if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { |
804 | napi_consume_skb(skb: cur_p->skb, budget); |
805 | packets++; |
806 | } |
807 | |
808 | cur_p->app0 = 0; |
809 | cur_p->app1 = 0; |
810 | cur_p->app2 = 0; |
811 | cur_p->app4 = 0; |
812 | cur_p->skb = NULL; |
813 | /* ensure our transmit path and device don't prematurely see status cleared */ |
814 | wmb(); |
815 | cur_p->cntrl = 0; |
816 | cur_p->status = 0; |
817 | |
818 | if (sizep) |
819 | *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; |
820 | } |
821 | |
822 | if (!force) { |
823 | lp->tx_bd_ci += i; |
824 | if (lp->tx_bd_ci >= lp->tx_bd_num) |
825 | lp->tx_bd_ci %= lp->tx_bd_num; |
826 | } |
827 | |
828 | return packets; |
829 | } |
830 | |
831 | /** |
832 | * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy |
833 | * @lp: Pointer to the axienet_local structure |
834 | * @num_frag: The number of BDs to check for |
835 | * |
836 | * Return: 0, on success |
837 | * NETDEV_TX_BUSY, if any of the descriptors are not free |
838 | * |
839 | * This function is invoked before BDs are allocated and transmission starts. |
840 | * This function returns 0 if a BD or group of BDs can be allocated for |
841 | * transmission. If the BD or any of the BDs are not free the function |
842 | * returns a busy status. |
843 | */ |
844 | static inline int axienet_check_tx_bd_space(struct axienet_local *lp, |
845 | int num_frag) |
846 | { |
847 | struct axidma_bd *cur_p; |
848 | |
849 | /* Ensure we see all descriptor updates from device or TX polling */ |
850 | rmb(); |
851 | cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % |
852 | lp->tx_bd_num]; |
853 | if (cur_p->cntrl) |
854 | return NETDEV_TX_BUSY; |
855 | return 0; |
856 | } |
857 | |
858 | /** |
859 | * axienet_dma_tx_cb - DMA engine callback for TX channel. |
860 | * @data: Pointer to the axienet_local structure. |
861 | * @result: error reporting through dmaengine_result. |
862 | * This function is called by dmaengine driver for TX channel to notify |
863 | * that the transmit is done. |
864 | */ |
865 | static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) |
866 | { |
867 | struct skbuf_dma_descriptor *skbuf_dma; |
868 | struct axienet_local *lp = data; |
869 | struct netdev_queue *txq; |
870 | int len; |
871 | |
872 | skbuf_dma = axienet_get_tx_desc(lp, i: lp->tx_ring_tail++); |
873 | len = skbuf_dma->skb->len; |
874 | txq = skb_get_tx_queue(dev: lp->ndev, skb: skbuf_dma->skb); |
875 | u64_stats_update_begin(syncp: &lp->tx_stat_sync); |
876 | u64_stats_add(p: &lp->tx_bytes, val: len); |
877 | u64_stats_add(p: &lp->tx_packets, val: 1); |
878 | u64_stats_update_end(syncp: &lp->tx_stat_sync); |
879 | dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); |
880 | dev_consume_skb_any(skb: skbuf_dma->skb); |
881 | netif_txq_completed_wake(txq, 1, len, |
882 | CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), |
883 | 2); |
884 | } |
885 | |
886 | /** |
887 | * axienet_start_xmit_dmaengine - Starts the transmission. |
888 | * @skb: sk_buff pointer that contains data to be Txed. |
889 | * @ndev: Pointer to net_device structure. |
890 | * |
891 | * Return: NETDEV_TX_OK on success or any non space errors. |
892 | * NETDEV_TX_BUSY when free element in TX skb ring buffer |
893 | * is not available. |
894 | * |
895 | * This function is invoked to initiate transmission. The |
896 | * function sets the skbs, register dma callback API and submit |
897 | * the dma transaction. |
898 | * Additionally if checksum offloading is supported, |
899 | * it populates AXI Stream Control fields with appropriate values. |
900 | */ |
901 | static netdev_tx_t |
902 | axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) |
903 | { |
904 | struct dma_async_tx_descriptor *dma_tx_desc = NULL; |
905 | struct axienet_local *lp = netdev_priv(dev: ndev); |
906 | u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; |
907 | struct skbuf_dma_descriptor *skbuf_dma; |
908 | struct dma_device *dma_dev; |
909 | struct netdev_queue *txq; |
910 | u32 csum_start_off; |
911 | u32 csum_index_off; |
912 | int sg_len; |
913 | int ret; |
914 | |
915 | dma_dev = lp->tx_chan->device; |
916 | sg_len = skb_shinfo(skb)->nr_frags + 1; |
917 | if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) { |
918 | netif_stop_queue(dev: ndev); |
919 | if (net_ratelimit()) |
920 | netdev_warn(dev: ndev, format: "TX ring unexpectedly full\n"); |
921 | return NETDEV_TX_BUSY; |
922 | } |
923 | |
924 | skbuf_dma = axienet_get_tx_desc(lp, i: lp->tx_ring_head); |
925 | if (!skbuf_dma) |
926 | goto xmit_error_drop_skb; |
927 | |
928 | lp->tx_ring_head++; |
929 | sg_init_table(skbuf_dma->sgl, sg_len); |
930 | ret = skb_to_sgvec(skb, sg: skbuf_dma->sgl, offset: 0, len: skb->len); |
931 | if (ret < 0) |
932 | goto xmit_error_drop_skb; |
933 | |
934 | ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); |
935 | if (!ret) |
936 | goto xmit_error_drop_skb; |
937 | |
938 | /* Fill up app fields for checksum */ |
939 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
940 | if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { |
941 | /* Tx Full Checksum Offload Enabled */ |
942 | app_metadata[0] |= 2; |
943 | } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { |
944 | csum_start_off = skb_transport_offset(skb); |
945 | csum_index_off = csum_start_off + skb->csum_offset; |
946 | /* Tx Partial Checksum Offload Enabled */ |
947 | app_metadata[0] |= 1; |
948 | app_metadata[1] = (csum_start_off << 16) | csum_index_off; |
949 | } |
950 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
951 | app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ |
952 | } |
953 | |
954 | dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, |
955 | sg_len, DMA_MEM_TO_DEV, |
956 | DMA_PREP_INTERRUPT, (void *)app_metadata); |
957 | if (!dma_tx_desc) |
958 | goto xmit_error_unmap_sg; |
959 | |
960 | skbuf_dma->skb = skb; |
961 | skbuf_dma->sg_len = sg_len; |
962 | dma_tx_desc->callback_param = lp; |
963 | dma_tx_desc->callback_result = axienet_dma_tx_cb; |
964 | txq = skb_get_tx_queue(dev: lp->ndev, skb); |
965 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
966 | netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), |
967 | 1, 2); |
968 | |
969 | dmaengine_submit(desc: dma_tx_desc); |
970 | dma_async_issue_pending(chan: lp->tx_chan); |
971 | return NETDEV_TX_OK; |
972 | |
973 | xmit_error_unmap_sg: |
974 | dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); |
975 | xmit_error_drop_skb: |
976 | dev_kfree_skb_any(skb); |
977 | return NETDEV_TX_OK; |
978 | } |
979 | |
980 | /** |
981 | * axienet_tx_poll - Invoked once a transmit is completed by the |
982 | * Axi DMA Tx channel. |
983 | * @napi: Pointer to NAPI structure. |
984 | * @budget: Max number of TX packets to process. |
985 | * |
986 | * Return: Number of TX packets processed. |
987 | * |
988 | * This function is invoked from the NAPI processing to notify the completion |
989 | * of transmit operation. It clears fields in the corresponding Tx BDs and |
990 | * unmaps the corresponding buffer so that CPU can regain ownership of the |
991 | * buffer. It finally invokes "netif_wake_queue" to restart transmission if |
992 | * required. |
993 | */ |
994 | static int axienet_tx_poll(struct napi_struct *napi, int budget) |
995 | { |
996 | struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); |
997 | struct net_device *ndev = lp->ndev; |
998 | u32 size = 0; |
999 | int packets; |
1000 | |
1001 | packets = axienet_free_tx_chain(lp, first_bd: lp->tx_bd_ci, nr_bds: lp->tx_bd_num, force: false, |
1002 | sizep: &size, budget); |
1003 | |
1004 | if (packets) { |
1005 | netdev_completed_queue(dev: ndev, pkts: packets, bytes: size); |
1006 | u64_stats_update_begin(syncp: &lp->tx_stat_sync); |
1007 | u64_stats_add(p: &lp->tx_packets, val: packets); |
1008 | u64_stats_add(p: &lp->tx_bytes, val: size); |
1009 | u64_stats_update_end(syncp: &lp->tx_stat_sync); |
1010 | |
1011 | /* Matches barrier in axienet_start_xmit */ |
1012 | smp_mb(); |
1013 | |
1014 | if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
1015 | netif_wake_queue(dev: ndev); |
1016 | } |
1017 | |
1018 | if (packets < budget && napi_complete_done(n: napi, work_done: packets)) { |
1019 | /* Re-enable TX completion interrupts. This should |
1020 | * cause an immediate interrupt if any TX packets are |
1021 | * already pending. |
1022 | */ |
1023 | spin_lock_irq(lock: &lp->tx_cr_lock); |
1024 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: lp->tx_dma_cr); |
1025 | spin_unlock_irq(lock: &lp->tx_cr_lock); |
1026 | } |
1027 | return packets; |
1028 | } |
1029 | |
1030 | /** |
1031 | * axienet_start_xmit - Starts the transmission. |
1032 | * @skb: sk_buff pointer that contains data to be Txed. |
1033 | * @ndev: Pointer to net_device structure. |
1034 | * |
1035 | * Return: NETDEV_TX_OK, on success |
1036 | * NETDEV_TX_BUSY, if any of the descriptors are not free |
1037 | * |
1038 | * This function is invoked from upper layers to initiate transmission. The |
1039 | * function uses the next available free BDs and populates their fields to |
1040 | * start the transmission. Additionally if checksum offloading is supported, |
1041 | * it populates AXI Stream Control fields with appropriate values. |
1042 | */ |
1043 | static netdev_tx_t |
1044 | axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
1045 | { |
1046 | u32 ii; |
1047 | u32 num_frag; |
1048 | u32 csum_start_off; |
1049 | u32 csum_index_off; |
1050 | skb_frag_t *frag; |
1051 | dma_addr_t tail_p, phys; |
1052 | u32 orig_tail_ptr, new_tail_ptr; |
1053 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1054 | struct axidma_bd *cur_p; |
1055 | |
1056 | orig_tail_ptr = lp->tx_bd_tail; |
1057 | new_tail_ptr = orig_tail_ptr; |
1058 | |
1059 | num_frag = skb_shinfo(skb)->nr_frags; |
1060 | cur_p = &lp->tx_bd_v[orig_tail_ptr]; |
1061 | |
1062 | if (axienet_check_tx_bd_space(lp, num_frag: num_frag + 1)) { |
1063 | /* Should not happen as last start_xmit call should have |
1064 | * checked for sufficient space and queue should only be |
1065 | * woken when sufficient space is available. |
1066 | */ |
1067 | netif_stop_queue(dev: ndev); |
1068 | if (net_ratelimit()) |
1069 | netdev_warn(dev: ndev, format: "TX ring unexpectedly full\n"); |
1070 | return NETDEV_TX_BUSY; |
1071 | } |
1072 | |
1073 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1074 | if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { |
1075 | /* Tx Full Checksum Offload Enabled */ |
1076 | cur_p->app0 |= 2; |
1077 | } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { |
1078 | csum_start_off = skb_transport_offset(skb); |
1079 | csum_index_off = csum_start_off + skb->csum_offset; |
1080 | /* Tx Partial Checksum Offload Enabled */ |
1081 | cur_p->app0 |= 1; |
1082 | cur_p->app1 = (csum_start_off << 16) | csum_index_off; |
1083 | } |
1084 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
1085 | cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ |
1086 | } |
1087 | |
1088 | phys = dma_map_single(lp->dev, skb->data, |
1089 | skb_headlen(skb), DMA_TO_DEVICE); |
1090 | if (unlikely(dma_mapping_error(lp->dev, phys))) { |
1091 | if (net_ratelimit()) |
1092 | netdev_err(dev: ndev, format: "TX DMA mapping error\n"); |
1093 | ndev->stats.tx_dropped++; |
1094 | dev_kfree_skb_any(skb); |
1095 | return NETDEV_TX_OK; |
1096 | } |
1097 | desc_set_phys_addr(lp, addr: phys, desc: cur_p); |
1098 | cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; |
1099 | |
1100 | for (ii = 0; ii < num_frag; ii++) { |
1101 | if (++new_tail_ptr >= lp->tx_bd_num) |
1102 | new_tail_ptr = 0; |
1103 | cur_p = &lp->tx_bd_v[new_tail_ptr]; |
1104 | frag = &skb_shinfo(skb)->frags[ii]; |
1105 | phys = dma_map_single(lp->dev, |
1106 | skb_frag_address(frag), |
1107 | skb_frag_size(frag), |
1108 | DMA_TO_DEVICE); |
1109 | if (unlikely(dma_mapping_error(lp->dev, phys))) { |
1110 | if (net_ratelimit()) |
1111 | netdev_err(dev: ndev, format: "TX DMA mapping error\n"); |
1112 | ndev->stats.tx_dropped++; |
1113 | axienet_free_tx_chain(lp, first_bd: orig_tail_ptr, nr_bds: ii + 1, |
1114 | force: true, NULL, budget: 0); |
1115 | dev_kfree_skb_any(skb); |
1116 | return NETDEV_TX_OK; |
1117 | } |
1118 | desc_set_phys_addr(lp, addr: phys, desc: cur_p); |
1119 | cur_p->cntrl = skb_frag_size(frag); |
1120 | } |
1121 | |
1122 | cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; |
1123 | cur_p->skb = skb; |
1124 | |
1125 | tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; |
1126 | if (++new_tail_ptr >= lp->tx_bd_num) |
1127 | new_tail_ptr = 0; |
1128 | WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); |
1129 | netdev_sent_queue(dev: ndev, bytes: skb->len); |
1130 | |
1131 | /* Start the transfer */ |
1132 | axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, addr: tail_p); |
1133 | |
1134 | /* Stop queue if next transmit may not have space */ |
1135 | if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { |
1136 | netif_stop_queue(dev: ndev); |
1137 | |
1138 | /* Matches barrier in axienet_tx_poll */ |
1139 | smp_mb(); |
1140 | |
1141 | /* Space might have just been freed - check again */ |
1142 | if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
1143 | netif_wake_queue(dev: ndev); |
1144 | } |
1145 | |
1146 | return NETDEV_TX_OK; |
1147 | } |
1148 | |
1149 | /** |
1150 | * axienet_dma_rx_cb - DMA engine callback for RX channel. |
1151 | * @data: Pointer to the skbuf_dma_descriptor structure. |
1152 | * @result: error reporting through dmaengine_result. |
1153 | * This function is called by dmaengine driver for RX channel to notify |
1154 | * that the packet is received. |
1155 | */ |
1156 | static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) |
1157 | { |
1158 | struct skbuf_dma_descriptor *skbuf_dma; |
1159 | size_t meta_len, meta_max_len, rx_len; |
1160 | struct axienet_local *lp = data; |
1161 | struct sk_buff *skb; |
1162 | u32 *app_metadata; |
1163 | |
1164 | skbuf_dma = axienet_get_rx_desc(lp, i: lp->rx_ring_tail++); |
1165 | skb = skbuf_dma->skb; |
1166 | app_metadata = dmaengine_desc_get_metadata_ptr(desc: skbuf_dma->desc, payload_len: &meta_len, |
1167 | max_len: &meta_max_len); |
1168 | dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, |
1169 | DMA_FROM_DEVICE); |
1170 | /* TODO: Derive app word index programmatically */ |
1171 | rx_len = (app_metadata[LEN_APP] & 0xFFFF); |
1172 | skb_put(skb, len: rx_len); |
1173 | skb->protocol = eth_type_trans(skb, dev: lp->ndev); |
1174 | skb->ip_summed = CHECKSUM_NONE; |
1175 | |
1176 | __netif_rx(skb); |
1177 | u64_stats_update_begin(syncp: &lp->rx_stat_sync); |
1178 | u64_stats_add(p: &lp->rx_packets, val: 1); |
1179 | u64_stats_add(p: &lp->rx_bytes, val: rx_len); |
1180 | u64_stats_update_end(syncp: &lp->rx_stat_sync); |
1181 | axienet_rx_submit_desc(ndev: lp->ndev); |
1182 | dma_async_issue_pending(chan: lp->rx_chan); |
1183 | } |
1184 | |
1185 | /** |
1186 | * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. |
1187 | * @napi: Pointer to NAPI structure. |
1188 | * @budget: Max number of RX packets to process. |
1189 | * |
1190 | * Return: Number of RX packets processed. |
1191 | */ |
1192 | static int axienet_rx_poll(struct napi_struct *napi, int budget) |
1193 | { |
1194 | u32 length; |
1195 | u32 csumstatus; |
1196 | u32 size = 0; |
1197 | int packets = 0; |
1198 | dma_addr_t tail_p = 0; |
1199 | struct axidma_bd *cur_p; |
1200 | struct sk_buff *skb, *new_skb; |
1201 | struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); |
1202 | |
1203 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
1204 | |
1205 | while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { |
1206 | dma_addr_t phys; |
1207 | |
1208 | /* Ensure we see complete descriptor update */ |
1209 | dma_rmb(); |
1210 | |
1211 | skb = cur_p->skb; |
1212 | cur_p->skb = NULL; |
1213 | |
1214 | /* skb could be NULL if a previous pass already received the |
1215 | * packet for this slot in the ring, but failed to refill it |
1216 | * with a newly allocated buffer. In this case, don't try to |
1217 | * receive it again. |
1218 | */ |
1219 | if (likely(skb)) { |
1220 | length = cur_p->app4 & 0x0000FFFF; |
1221 | |
1222 | phys = desc_get_phys_addr(lp, desc: cur_p); |
1223 | dma_unmap_single(lp->dev, phys, lp->max_frm_size, |
1224 | DMA_FROM_DEVICE); |
1225 | |
1226 | skb_put(skb, len: length); |
1227 | skb->protocol = eth_type_trans(skb, dev: lp->ndev); |
1228 | /*skb_checksum_none_assert(skb);*/ |
1229 | skb->ip_summed = CHECKSUM_NONE; |
1230 | |
1231 | /* if we're doing Rx csum offload, set it up */ |
1232 | if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { |
1233 | csumstatus = (cur_p->app2 & |
1234 | XAE_FULL_CSUM_STATUS_MASK) >> 3; |
1235 | if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || |
1236 | csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { |
1237 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1238 | } |
1239 | } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { |
1240 | skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); |
1241 | skb->ip_summed = CHECKSUM_COMPLETE; |
1242 | } |
1243 | |
1244 | napi_gro_receive(napi, skb); |
1245 | |
1246 | size += length; |
1247 | packets++; |
1248 | } |
1249 | |
1250 | new_skb = napi_alloc_skb(napi, length: lp->max_frm_size); |
1251 | if (!new_skb) |
1252 | break; |
1253 | |
1254 | phys = dma_map_single(lp->dev, new_skb->data, |
1255 | lp->max_frm_size, |
1256 | DMA_FROM_DEVICE); |
1257 | if (unlikely(dma_mapping_error(lp->dev, phys))) { |
1258 | if (net_ratelimit()) |
1259 | netdev_err(dev: lp->ndev, format: "RX DMA mapping error\n"); |
1260 | dev_kfree_skb(new_skb); |
1261 | break; |
1262 | } |
1263 | desc_set_phys_addr(lp, addr: phys, desc: cur_p); |
1264 | |
1265 | cur_p->cntrl = lp->max_frm_size; |
1266 | cur_p->status = 0; |
1267 | cur_p->skb = new_skb; |
1268 | |
1269 | /* Only update tail_p to mark this slot as usable after it has |
1270 | * been successfully refilled. |
1271 | */ |
1272 | tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; |
1273 | |
1274 | if (++lp->rx_bd_ci >= lp->rx_bd_num) |
1275 | lp->rx_bd_ci = 0; |
1276 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
1277 | } |
1278 | |
1279 | u64_stats_update_begin(syncp: &lp->rx_stat_sync); |
1280 | u64_stats_add(p: &lp->rx_packets, val: packets); |
1281 | u64_stats_add(p: &lp->rx_bytes, val: size); |
1282 | u64_stats_update_end(syncp: &lp->rx_stat_sync); |
1283 | |
1284 | if (tail_p) |
1285 | axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, addr: tail_p); |
1286 | |
1287 | if (packets < budget && napi_complete_done(n: napi, work_done: packets)) { |
1288 | if (READ_ONCE(lp->rx_dim_enabled)) { |
1289 | struct dim_sample sample = { |
1290 | .time = ktime_get(), |
1291 | /* Safe because we are the only writer */ |
1292 | .pkt_ctr = u64_stats_read(p: &lp->rx_packets), |
1293 | .byte_ctr = u64_stats_read(p: &lp->rx_bytes), |
1294 | .event_ctr = READ_ONCE(lp->rx_irqs), |
1295 | }; |
1296 | |
1297 | net_dim(dim: &lp->rx_dim, end_sample: &sample); |
1298 | } |
1299 | |
1300 | /* Re-enable RX completion interrupts. This should |
1301 | * cause an immediate interrupt if any RX packets are |
1302 | * already pending. |
1303 | */ |
1304 | spin_lock_irq(lock: &lp->rx_cr_lock); |
1305 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: lp->rx_dma_cr); |
1306 | spin_unlock_irq(lock: &lp->rx_cr_lock); |
1307 | } |
1308 | return packets; |
1309 | } |
1310 | |
1311 | /** |
1312 | * axienet_tx_irq - Tx Done Isr. |
1313 | * @irq: irq number |
1314 | * @_ndev: net_device pointer |
1315 | * |
1316 | * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. |
1317 | * |
1318 | * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the |
1319 | * TX BD processing. |
1320 | */ |
1321 | static irqreturn_t axienet_tx_irq(int irq, void *_ndev) |
1322 | { |
1323 | unsigned int status; |
1324 | struct net_device *ndev = _ndev; |
1325 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1326 | |
1327 | status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
1328 | |
1329 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
1330 | return IRQ_NONE; |
1331 | |
1332 | axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, value: status); |
1333 | |
1334 | if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { |
1335 | netdev_err(dev: ndev, format: "DMA Tx error 0x%x\n", status); |
1336 | netdev_err(dev: ndev, format: "Current BD is at: 0x%x%08x\n", |
1337 | (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, |
1338 | (lp->tx_bd_v[lp->tx_bd_ci]).phys); |
1339 | schedule_work(work: &lp->dma_err_task); |
1340 | } else { |
1341 | /* Disable further TX completion interrupts and schedule |
1342 | * NAPI to handle the completions. |
1343 | */ |
1344 | if (napi_schedule_prep(n: &lp->napi_tx)) { |
1345 | u32 cr; |
1346 | |
1347 | spin_lock(lock: &lp->tx_cr_lock); |
1348 | cr = lp->tx_dma_cr; |
1349 | cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); |
1350 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: cr); |
1351 | spin_unlock(lock: &lp->tx_cr_lock); |
1352 | __napi_schedule(n: &lp->napi_tx); |
1353 | } |
1354 | } |
1355 | |
1356 | return IRQ_HANDLED; |
1357 | } |
1358 | |
1359 | /** |
1360 | * axienet_rx_irq - Rx Isr. |
1361 | * @irq: irq number |
1362 | * @_ndev: net_device pointer |
1363 | * |
1364 | * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. |
1365 | * |
1366 | * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD |
1367 | * processing. |
1368 | */ |
1369 | static irqreturn_t axienet_rx_irq(int irq, void *_ndev) |
1370 | { |
1371 | unsigned int status; |
1372 | struct net_device *ndev = _ndev; |
1373 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1374 | |
1375 | status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
1376 | |
1377 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
1378 | return IRQ_NONE; |
1379 | |
1380 | axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, value: status); |
1381 | |
1382 | if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { |
1383 | netdev_err(dev: ndev, format: "DMA Rx error 0x%x\n", status); |
1384 | netdev_err(dev: ndev, format: "Current BD is at: 0x%x%08x\n", |
1385 | (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, |
1386 | (lp->rx_bd_v[lp->rx_bd_ci]).phys); |
1387 | schedule_work(work: &lp->dma_err_task); |
1388 | } else { |
1389 | /* Disable further RX completion interrupts and schedule |
1390 | * NAPI receive. |
1391 | */ |
1392 | WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1); |
1393 | if (napi_schedule_prep(n: &lp->napi_rx)) { |
1394 | u32 cr; |
1395 | |
1396 | spin_lock(lock: &lp->rx_cr_lock); |
1397 | cr = lp->rx_dma_cr; |
1398 | cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); |
1399 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: cr); |
1400 | spin_unlock(lock: &lp->rx_cr_lock); |
1401 | |
1402 | __napi_schedule(n: &lp->napi_rx); |
1403 | } |
1404 | } |
1405 | |
1406 | return IRQ_HANDLED; |
1407 | } |
1408 | |
1409 | /** |
1410 | * axienet_eth_irq - Ethernet core Isr. |
1411 | * @irq: irq number |
1412 | * @_ndev: net_device pointer |
1413 | * |
1414 | * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. |
1415 | * |
1416 | * Handle miscellaneous conditions indicated by Ethernet core IRQ. |
1417 | */ |
1418 | static irqreturn_t axienet_eth_irq(int irq, void *_ndev) |
1419 | { |
1420 | struct net_device *ndev = _ndev; |
1421 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1422 | unsigned int pending; |
1423 | |
1424 | pending = axienet_ior(lp, XAE_IP_OFFSET); |
1425 | if (!pending) |
1426 | return IRQ_NONE; |
1427 | |
1428 | if (pending & XAE_INT_RXFIFOOVR_MASK) |
1429 | ndev->stats.rx_missed_errors++; |
1430 | |
1431 | if (pending & XAE_INT_RXRJECT_MASK) |
1432 | ndev->stats.rx_dropped++; |
1433 | |
1434 | axienet_iow(lp, XAE_IS_OFFSET, value: pending); |
1435 | return IRQ_HANDLED; |
1436 | } |
1437 | |
1438 | static void axienet_dma_err_handler(struct work_struct *work); |
1439 | |
1440 | /** |
1441 | * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. |
1442 | * allocate skbuff, map the scatterlist and obtain a descriptor |
1443 | * and then add the callback information and submit descriptor. |
1444 | * |
1445 | * @ndev: net_device pointer |
1446 | * |
1447 | */ |
1448 | static void axienet_rx_submit_desc(struct net_device *ndev) |
1449 | { |
1450 | struct dma_async_tx_descriptor *dma_rx_desc = NULL; |
1451 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1452 | struct skbuf_dma_descriptor *skbuf_dma; |
1453 | struct sk_buff *skb; |
1454 | dma_addr_t addr; |
1455 | |
1456 | skbuf_dma = axienet_get_rx_desc(lp, i: lp->rx_ring_head); |
1457 | if (!skbuf_dma) |
1458 | return; |
1459 | |
1460 | lp->rx_ring_head++; |
1461 | skb = netdev_alloc_skb(dev: ndev, length: lp->max_frm_size); |
1462 | if (!skb) |
1463 | return; |
1464 | |
1465 | sg_init_table(skbuf_dma->sgl, 1); |
1466 | addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); |
1467 | if (unlikely(dma_mapping_error(lp->dev, addr))) { |
1468 | if (net_ratelimit()) |
1469 | netdev_err(dev: ndev, format: "DMA mapping error\n"); |
1470 | goto rx_submit_err_free_skb; |
1471 | } |
1472 | sg_dma_address(skbuf_dma->sgl) = addr; |
1473 | sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; |
1474 | dma_rx_desc = dmaengine_prep_slave_sg(chan: lp->rx_chan, sgl: skbuf_dma->sgl, |
1475 | sg_len: 1, dir: DMA_DEV_TO_MEM, |
1476 | flags: DMA_PREP_INTERRUPT); |
1477 | if (!dma_rx_desc) |
1478 | goto rx_submit_err_unmap_skb; |
1479 | |
1480 | skbuf_dma->skb = skb; |
1481 | skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); |
1482 | skbuf_dma->desc = dma_rx_desc; |
1483 | dma_rx_desc->callback_param = lp; |
1484 | dma_rx_desc->callback_result = axienet_dma_rx_cb; |
1485 | dmaengine_submit(desc: dma_rx_desc); |
1486 | |
1487 | return; |
1488 | |
1489 | rx_submit_err_unmap_skb: |
1490 | dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); |
1491 | rx_submit_err_free_skb: |
1492 | dev_kfree_skb(skb); |
1493 | } |
1494 | |
1495 | /** |
1496 | * axienet_init_dmaengine - init the dmaengine code. |
1497 | * @ndev: Pointer to net_device structure |
1498 | * |
1499 | * Return: 0, on success. |
1500 | * non-zero error value on failure |
1501 | * |
1502 | * This is the dmaengine initialization code. |
1503 | */ |
1504 | static int axienet_init_dmaengine(struct net_device *ndev) |
1505 | { |
1506 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1507 | struct skbuf_dma_descriptor *skbuf_dma; |
1508 | int i, ret; |
1509 | |
1510 | lp->tx_chan = dma_request_chan(dev: lp->dev, name: "tx_chan0"); |
1511 | if (IS_ERR(ptr: lp->tx_chan)) { |
1512 | dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); |
1513 | return PTR_ERR(ptr: lp->tx_chan); |
1514 | } |
1515 | |
1516 | lp->rx_chan = dma_request_chan(dev: lp->dev, name: "rx_chan0"); |
1517 | if (IS_ERR(ptr: lp->rx_chan)) { |
1518 | ret = PTR_ERR(ptr: lp->rx_chan); |
1519 | dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); |
1520 | goto err_dma_release_tx; |
1521 | } |
1522 | |
1523 | lp->tx_ring_tail = 0; |
1524 | lp->tx_ring_head = 0; |
1525 | lp->rx_ring_tail = 0; |
1526 | lp->rx_ring_head = 0; |
1527 | lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), |
1528 | GFP_KERNEL); |
1529 | if (!lp->tx_skb_ring) { |
1530 | ret = -ENOMEM; |
1531 | goto err_dma_release_rx; |
1532 | } |
1533 | for (i = 0; i < TX_BD_NUM_MAX; i++) { |
1534 | skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); |
1535 | if (!skbuf_dma) { |
1536 | ret = -ENOMEM; |
1537 | goto err_free_tx_skb_ring; |
1538 | } |
1539 | lp->tx_skb_ring[i] = skbuf_dma; |
1540 | } |
1541 | |
1542 | lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), |
1543 | GFP_KERNEL); |
1544 | if (!lp->rx_skb_ring) { |
1545 | ret = -ENOMEM; |
1546 | goto err_free_tx_skb_ring; |
1547 | } |
1548 | for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { |
1549 | skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); |
1550 | if (!skbuf_dma) { |
1551 | ret = -ENOMEM; |
1552 | goto err_free_rx_skb_ring; |
1553 | } |
1554 | lp->rx_skb_ring[i] = skbuf_dma; |
1555 | } |
1556 | /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ |
1557 | for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) |
1558 | axienet_rx_submit_desc(ndev); |
1559 | dma_async_issue_pending(chan: lp->rx_chan); |
1560 | |
1561 | return 0; |
1562 | |
1563 | err_free_rx_skb_ring: |
1564 | for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) |
1565 | kfree(objp: lp->rx_skb_ring[i]); |
1566 | kfree(objp: lp->rx_skb_ring); |
1567 | err_free_tx_skb_ring: |
1568 | for (i = 0; i < TX_BD_NUM_MAX; i++) |
1569 | kfree(objp: lp->tx_skb_ring[i]); |
1570 | kfree(objp: lp->tx_skb_ring); |
1571 | err_dma_release_rx: |
1572 | dma_release_channel(chan: lp->rx_chan); |
1573 | err_dma_release_tx: |
1574 | dma_release_channel(chan: lp->tx_chan); |
1575 | return ret; |
1576 | } |
1577 | |
1578 | /** |
1579 | * axienet_init_legacy_dma - init the dma legacy code. |
1580 | * @ndev: Pointer to net_device structure |
1581 | * |
1582 | * Return: 0, on success. |
1583 | * non-zero error value on failure |
1584 | * |
1585 | * This is the dma initialization code. It also allocates interrupt |
1586 | * service routines, enables the interrupt lines and ISR handling. |
1587 | * |
1588 | */ |
1589 | static int axienet_init_legacy_dma(struct net_device *ndev) |
1590 | { |
1591 | int ret; |
1592 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1593 | |
1594 | /* Enable worker thread for Axi DMA error handling */ |
1595 | lp->stopping = false; |
1596 | INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); |
1597 | |
1598 | napi_enable(n: &lp->napi_rx); |
1599 | napi_enable(n: &lp->napi_tx); |
1600 | |
1601 | /* Enable interrupts for Axi DMA Tx */ |
1602 | ret = request_irq(irq: lp->tx_irq, handler: axienet_tx_irq, IRQF_SHARED, |
1603 | name: ndev->name, dev: ndev); |
1604 | if (ret) |
1605 | goto err_tx_irq; |
1606 | /* Enable interrupts for Axi DMA Rx */ |
1607 | ret = request_irq(irq: lp->rx_irq, handler: axienet_rx_irq, IRQF_SHARED, |
1608 | name: ndev->name, dev: ndev); |
1609 | if (ret) |
1610 | goto err_rx_irq; |
1611 | /* Enable interrupts for Axi Ethernet core (if defined) */ |
1612 | if (lp->eth_irq > 0) { |
1613 | ret = request_irq(irq: lp->eth_irq, handler: axienet_eth_irq, IRQF_SHARED, |
1614 | name: ndev->name, dev: ndev); |
1615 | if (ret) |
1616 | goto err_eth_irq; |
1617 | } |
1618 | |
1619 | return 0; |
1620 | |
1621 | err_eth_irq: |
1622 | free_irq(lp->rx_irq, ndev); |
1623 | err_rx_irq: |
1624 | free_irq(lp->tx_irq, ndev); |
1625 | err_tx_irq: |
1626 | napi_disable(n: &lp->napi_tx); |
1627 | napi_disable(n: &lp->napi_rx); |
1628 | cancel_work_sync(work: &lp->dma_err_task); |
1629 | dev_err(lp->dev, "request_irq() failed\n"); |
1630 | return ret; |
1631 | } |
1632 | |
1633 | /** |
1634 | * axienet_open - Driver open routine. |
1635 | * @ndev: Pointer to net_device structure |
1636 | * |
1637 | * Return: 0, on success. |
1638 | * non-zero error value on failure |
1639 | * |
1640 | * This is the driver open routine. It calls phylink_start to start the |
1641 | * PHY device. |
1642 | * It also allocates interrupt service routines, enables the interrupt lines |
1643 | * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer |
1644 | * descriptors are initialized. |
1645 | */ |
1646 | static int axienet_open(struct net_device *ndev) |
1647 | { |
1648 | int ret; |
1649 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1650 | |
1651 | /* When we do an Axi Ethernet reset, it resets the complete core |
1652 | * including the MDIO. MDIO must be disabled before resetting. |
1653 | * Hold MDIO bus lock to avoid MDIO accesses during the reset. |
1654 | */ |
1655 | axienet_lock_mii(lp); |
1656 | ret = axienet_device_reset(ndev); |
1657 | axienet_unlock_mii(lp); |
1658 | |
1659 | ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, flags: 0); |
1660 | if (ret) { |
1661 | dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); |
1662 | return ret; |
1663 | } |
1664 | |
1665 | phylink_start(lp->phylink); |
1666 | |
1667 | /* Start the statistics refresh work */ |
1668 | schedule_delayed_work(dwork: &lp->stats_work, delay: 0); |
1669 | |
1670 | if (lp->use_dmaengine) { |
1671 | /* Enable interrupts for Axi Ethernet core (if defined) */ |
1672 | if (lp->eth_irq > 0) { |
1673 | ret = request_irq(irq: lp->eth_irq, handler: axienet_eth_irq, IRQF_SHARED, |
1674 | name: ndev->name, dev: ndev); |
1675 | if (ret) |
1676 | goto err_phy; |
1677 | } |
1678 | |
1679 | ret = axienet_init_dmaengine(ndev); |
1680 | if (ret < 0) |
1681 | goto err_free_eth_irq; |
1682 | } else { |
1683 | ret = axienet_init_legacy_dma(ndev); |
1684 | if (ret) |
1685 | goto err_phy; |
1686 | } |
1687 | |
1688 | return 0; |
1689 | |
1690 | err_free_eth_irq: |
1691 | if (lp->eth_irq > 0) |
1692 | free_irq(lp->eth_irq, ndev); |
1693 | err_phy: |
1694 | cancel_work_sync(work: &lp->rx_dim.work); |
1695 | cancel_delayed_work_sync(dwork: &lp->stats_work); |
1696 | phylink_stop(lp->phylink); |
1697 | phylink_disconnect_phy(lp->phylink); |
1698 | return ret; |
1699 | } |
1700 | |
1701 | /** |
1702 | * axienet_stop - Driver stop routine. |
1703 | * @ndev: Pointer to net_device structure |
1704 | * |
1705 | * Return: 0, on success. |
1706 | * |
1707 | * This is the driver stop routine. It calls phylink_disconnect to stop the PHY |
1708 | * device. It also removes the interrupt handlers and disables the interrupts. |
1709 | * The Axi DMA Tx/Rx BDs are released. |
1710 | */ |
1711 | static int axienet_stop(struct net_device *ndev) |
1712 | { |
1713 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1714 | int i; |
1715 | |
1716 | if (!lp->use_dmaengine) { |
1717 | WRITE_ONCE(lp->stopping, true); |
1718 | flush_work(work: &lp->dma_err_task); |
1719 | |
1720 | napi_disable(n: &lp->napi_tx); |
1721 | napi_disable(n: &lp->napi_rx); |
1722 | } |
1723 | |
1724 | cancel_work_sync(work: &lp->rx_dim.work); |
1725 | cancel_delayed_work_sync(dwork: &lp->stats_work); |
1726 | |
1727 | phylink_stop(lp->phylink); |
1728 | phylink_disconnect_phy(lp->phylink); |
1729 | |
1730 | axienet_setoptions(ndev, options: lp->options & |
1731 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
1732 | |
1733 | if (!lp->use_dmaengine) { |
1734 | axienet_dma_stop(lp); |
1735 | cancel_work_sync(work: &lp->dma_err_task); |
1736 | free_irq(lp->tx_irq, ndev); |
1737 | free_irq(lp->rx_irq, ndev); |
1738 | axienet_dma_bd_release(ndev); |
1739 | } else { |
1740 | dmaengine_terminate_sync(chan: lp->tx_chan); |
1741 | dmaengine_synchronize(chan: lp->tx_chan); |
1742 | dmaengine_terminate_sync(chan: lp->rx_chan); |
1743 | dmaengine_synchronize(chan: lp->rx_chan); |
1744 | |
1745 | for (i = 0; i < TX_BD_NUM_MAX; i++) |
1746 | kfree(objp: lp->tx_skb_ring[i]); |
1747 | kfree(objp: lp->tx_skb_ring); |
1748 | for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) |
1749 | kfree(objp: lp->rx_skb_ring[i]); |
1750 | kfree(objp: lp->rx_skb_ring); |
1751 | |
1752 | dma_release_channel(chan: lp->rx_chan); |
1753 | dma_release_channel(chan: lp->tx_chan); |
1754 | } |
1755 | |
1756 | netdev_reset_queue(dev_queue: ndev); |
1757 | axienet_iow(lp, XAE_IE_OFFSET, value: 0); |
1758 | |
1759 | if (lp->eth_irq > 0) |
1760 | free_irq(lp->eth_irq, ndev); |
1761 | return 0; |
1762 | } |
1763 | |
1764 | /** |
1765 | * axienet_change_mtu - Driver change mtu routine. |
1766 | * @ndev: Pointer to net_device structure |
1767 | * @new_mtu: New mtu value to be applied |
1768 | * |
1769 | * Return: Always returns 0 (success). |
1770 | * |
1771 | * This is the change mtu driver routine. It checks if the Axi Ethernet |
1772 | * hardware supports jumbo frames before changing the mtu. This can be |
1773 | * called only when the device is not up. |
1774 | */ |
1775 | static int axienet_change_mtu(struct net_device *ndev, int new_mtu) |
1776 | { |
1777 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1778 | |
1779 | if (netif_running(dev: ndev)) |
1780 | return -EBUSY; |
1781 | |
1782 | if ((new_mtu + VLAN_ETH_HLEN + |
1783 | XAE_TRL_SIZE) > lp->rxmem) |
1784 | return -EINVAL; |
1785 | |
1786 | WRITE_ONCE(ndev->mtu, new_mtu); |
1787 | |
1788 | return 0; |
1789 | } |
1790 | |
1791 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1792 | /** |
1793 | * axienet_poll_controller - Axi Ethernet poll mechanism. |
1794 | * @ndev: Pointer to net_device structure |
1795 | * |
1796 | * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior |
1797 | * to polling the ISRs and are enabled back after the polling is done. |
1798 | */ |
1799 | static void axienet_poll_controller(struct net_device *ndev) |
1800 | { |
1801 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1802 | |
1803 | disable_irq(irq: lp->tx_irq); |
1804 | disable_irq(irq: lp->rx_irq); |
1805 | axienet_rx_irq(irq: lp->tx_irq, ndev: ndev); |
1806 | axienet_tx_irq(irq: lp->rx_irq, ndev: ndev); |
1807 | enable_irq(irq: lp->tx_irq); |
1808 | enable_irq(irq: lp->rx_irq); |
1809 | } |
1810 | #endif |
1811 | |
1812 | static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1813 | { |
1814 | struct axienet_local *lp = netdev_priv(dev); |
1815 | |
1816 | if (!netif_running(dev)) |
1817 | return -EINVAL; |
1818 | |
1819 | return phylink_mii_ioctl(lp->phylink, rq, cmd); |
1820 | } |
1821 | |
1822 | static void |
1823 | axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
1824 | { |
1825 | struct axienet_local *lp = netdev_priv(dev); |
1826 | unsigned int start; |
1827 | |
1828 | netdev_stats_to_stats64(stats64: stats, netdev_stats: &dev->stats); |
1829 | |
1830 | do { |
1831 | start = u64_stats_fetch_begin(syncp: &lp->rx_stat_sync); |
1832 | stats->rx_packets = u64_stats_read(p: &lp->rx_packets); |
1833 | stats->rx_bytes = u64_stats_read(p: &lp->rx_bytes); |
1834 | } while (u64_stats_fetch_retry(syncp: &lp->rx_stat_sync, start)); |
1835 | |
1836 | do { |
1837 | start = u64_stats_fetch_begin(syncp: &lp->tx_stat_sync); |
1838 | stats->tx_packets = u64_stats_read(p: &lp->tx_packets); |
1839 | stats->tx_bytes = u64_stats_read(p: &lp->tx_bytes); |
1840 | } while (u64_stats_fetch_retry(syncp: &lp->tx_stat_sync, start)); |
1841 | |
1842 | if (!(lp->features & XAE_FEATURE_STATS)) |
1843 | return; |
1844 | |
1845 | do { |
1846 | start = read_seqcount_begin(&lp->hw_stats_seqcount); |
1847 | stats->rx_length_errors = |
1848 | axienet_stat(lp, stat: STAT_RX_LENGTH_ERRORS); |
1849 | stats->rx_crc_errors = axienet_stat(lp, stat: STAT_RX_FCS_ERRORS); |
1850 | stats->rx_frame_errors = |
1851 | axienet_stat(lp, stat: STAT_RX_ALIGNMENT_ERRORS); |
1852 | stats->rx_errors = axienet_stat(lp, stat: STAT_UNDERSIZE_FRAMES) + |
1853 | axienet_stat(lp, stat: STAT_FRAGMENT_FRAMES) + |
1854 | stats->rx_length_errors + |
1855 | stats->rx_crc_errors + |
1856 | stats->rx_frame_errors; |
1857 | stats->multicast = axienet_stat(lp, stat: STAT_RX_MULTICAST_FRAMES); |
1858 | |
1859 | stats->tx_aborted_errors = |
1860 | axienet_stat(lp, stat: STAT_TX_EXCESS_COLLISIONS); |
1861 | stats->tx_fifo_errors = |
1862 | axienet_stat(lp, stat: STAT_TX_UNDERRUN_ERRORS); |
1863 | stats->tx_window_errors = |
1864 | axienet_stat(lp, stat: STAT_TX_LATE_COLLISIONS); |
1865 | stats->tx_errors = axienet_stat(lp, stat: STAT_TX_EXCESS_DEFERRAL) + |
1866 | stats->tx_aborted_errors + |
1867 | stats->tx_fifo_errors + |
1868 | stats->tx_window_errors; |
1869 | } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); |
1870 | } |
1871 | |
1872 | static const struct net_device_ops axienet_netdev_ops = { |
1873 | .ndo_open = axienet_open, |
1874 | .ndo_stop = axienet_stop, |
1875 | .ndo_start_xmit = axienet_start_xmit, |
1876 | .ndo_get_stats64 = axienet_get_stats64, |
1877 | .ndo_change_mtu = axienet_change_mtu, |
1878 | .ndo_set_mac_address = netdev_set_mac_address, |
1879 | .ndo_validate_addr = eth_validate_addr, |
1880 | .ndo_eth_ioctl = axienet_ioctl, |
1881 | .ndo_set_rx_mode = axienet_set_multicast_list, |
1882 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1883 | .ndo_poll_controller = axienet_poll_controller, |
1884 | #endif |
1885 | }; |
1886 | |
1887 | static const struct net_device_ops axienet_netdev_dmaengine_ops = { |
1888 | .ndo_open = axienet_open, |
1889 | .ndo_stop = axienet_stop, |
1890 | .ndo_start_xmit = axienet_start_xmit_dmaengine, |
1891 | .ndo_get_stats64 = axienet_get_stats64, |
1892 | .ndo_change_mtu = axienet_change_mtu, |
1893 | .ndo_set_mac_address = netdev_set_mac_address, |
1894 | .ndo_validate_addr = eth_validate_addr, |
1895 | .ndo_eth_ioctl = axienet_ioctl, |
1896 | .ndo_set_rx_mode = axienet_set_multicast_list, |
1897 | }; |
1898 | |
1899 | /** |
1900 | * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. |
1901 | * @ndev: Pointer to net_device structure |
1902 | * @ed: Pointer to ethtool_drvinfo structure |
1903 | * |
1904 | * This implements ethtool command for getting the driver information. |
1905 | * Issue "ethtool -i ethX" under linux prompt to execute this function. |
1906 | */ |
1907 | static void axienet_ethtools_get_drvinfo(struct net_device *ndev, |
1908 | struct ethtool_drvinfo *ed) |
1909 | { |
1910 | strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); |
1911 | strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); |
1912 | } |
1913 | |
1914 | /** |
1915 | * axienet_ethtools_get_regs_len - Get the total regs length present in the |
1916 | * AxiEthernet core. |
1917 | * @ndev: Pointer to net_device structure |
1918 | * |
1919 | * This implements ethtool command for getting the total register length |
1920 | * information. |
1921 | * |
1922 | * Return: the total regs length |
1923 | */ |
1924 | static int axienet_ethtools_get_regs_len(struct net_device *ndev) |
1925 | { |
1926 | return sizeof(u32) * AXIENET_REGS_N; |
1927 | } |
1928 | |
1929 | /** |
1930 | * axienet_ethtools_get_regs - Dump the contents of all registers present |
1931 | * in AxiEthernet core. |
1932 | * @ndev: Pointer to net_device structure |
1933 | * @regs: Pointer to ethtool_regs structure |
1934 | * @ret: Void pointer used to return the contents of the registers. |
1935 | * |
1936 | * This implements ethtool command for getting the Axi Ethernet register dump. |
1937 | * Issue "ethtool -d ethX" to execute this function. |
1938 | */ |
1939 | static void axienet_ethtools_get_regs(struct net_device *ndev, |
1940 | struct ethtool_regs *regs, void *ret) |
1941 | { |
1942 | u32 *data = (u32 *)ret; |
1943 | size_t len = sizeof(u32) * AXIENET_REGS_N; |
1944 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1945 | |
1946 | regs->version = 0; |
1947 | regs->len = len; |
1948 | |
1949 | memset(data, 0, len); |
1950 | data[0] = axienet_ior(lp, XAE_RAF_OFFSET); |
1951 | data[1] = axienet_ior(lp, XAE_TPF_OFFSET); |
1952 | data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); |
1953 | data[3] = axienet_ior(lp, XAE_IS_OFFSET); |
1954 | data[4] = axienet_ior(lp, XAE_IP_OFFSET); |
1955 | data[5] = axienet_ior(lp, XAE_IE_OFFSET); |
1956 | data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); |
1957 | data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); |
1958 | data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); |
1959 | data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); |
1960 | data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); |
1961 | data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); |
1962 | data[12] = axienet_ior(lp, XAE_PPST_OFFSET); |
1963 | data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); |
1964 | data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); |
1965 | data[15] = axienet_ior(lp, XAE_TC_OFFSET); |
1966 | data[16] = axienet_ior(lp, XAE_FCC_OFFSET); |
1967 | data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); |
1968 | data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); |
1969 | data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); |
1970 | data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); |
1971 | data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); |
1972 | data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); |
1973 | data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); |
1974 | data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); |
1975 | data[29] = axienet_ior(lp, XAE_FMI_OFFSET); |
1976 | data[30] = axienet_ior(lp, XAE_AF0_OFFSET); |
1977 | data[31] = axienet_ior(lp, XAE_AF1_OFFSET); |
1978 | if (!lp->use_dmaengine) { |
1979 | data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
1980 | data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
1981 | data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); |
1982 | data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); |
1983 | data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
1984 | data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
1985 | data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); |
1986 | data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); |
1987 | } |
1988 | } |
1989 | |
1990 | static void |
1991 | axienet_ethtools_get_ringparam(struct net_device *ndev, |
1992 | struct ethtool_ringparam *ering, |
1993 | struct kernel_ethtool_ringparam *kernel_ering, |
1994 | struct netlink_ext_ack *extack) |
1995 | { |
1996 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1997 | |
1998 | ering->rx_max_pending = RX_BD_NUM_MAX; |
1999 | ering->rx_mini_max_pending = 0; |
2000 | ering->rx_jumbo_max_pending = 0; |
2001 | ering->tx_max_pending = TX_BD_NUM_MAX; |
2002 | ering->rx_pending = lp->rx_bd_num; |
2003 | ering->rx_mini_pending = 0; |
2004 | ering->rx_jumbo_pending = 0; |
2005 | ering->tx_pending = lp->tx_bd_num; |
2006 | } |
2007 | |
2008 | static int |
2009 | axienet_ethtools_set_ringparam(struct net_device *ndev, |
2010 | struct ethtool_ringparam *ering, |
2011 | struct kernel_ethtool_ringparam *kernel_ering, |
2012 | struct netlink_ext_ack *extack) |
2013 | { |
2014 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2015 | |
2016 | if (ering->rx_pending > RX_BD_NUM_MAX || |
2017 | ering->rx_mini_pending || |
2018 | ering->rx_jumbo_pending || |
2019 | ering->tx_pending < TX_BD_NUM_MIN || |
2020 | ering->tx_pending > TX_BD_NUM_MAX) |
2021 | return -EINVAL; |
2022 | |
2023 | if (netif_running(dev: ndev)) |
2024 | return -EBUSY; |
2025 | |
2026 | lp->rx_bd_num = ering->rx_pending; |
2027 | lp->tx_bd_num = ering->tx_pending; |
2028 | return 0; |
2029 | } |
2030 | |
2031 | /** |
2032 | * axienet_ethtools_get_pauseparam - Get the pause parameter setting for |
2033 | * Tx and Rx paths. |
2034 | * @ndev: Pointer to net_device structure |
2035 | * @epauseparm: Pointer to ethtool_pauseparam structure. |
2036 | * |
2037 | * This implements ethtool command for getting axi ethernet pause frame |
2038 | * setting. Issue "ethtool -a ethX" to execute this function. |
2039 | */ |
2040 | static void |
2041 | axienet_ethtools_get_pauseparam(struct net_device *ndev, |
2042 | struct ethtool_pauseparam *epauseparm) |
2043 | { |
2044 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2045 | |
2046 | phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); |
2047 | } |
2048 | |
2049 | /** |
2050 | * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) |
2051 | * settings. |
2052 | * @ndev: Pointer to net_device structure |
2053 | * @epauseparm:Pointer to ethtool_pauseparam structure |
2054 | * |
2055 | * This implements ethtool command for enabling flow control on Rx and Tx |
2056 | * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this |
2057 | * function. |
2058 | * |
2059 | * Return: 0 on success, -EFAULT if device is running |
2060 | */ |
2061 | static int |
2062 | axienet_ethtools_set_pauseparam(struct net_device *ndev, |
2063 | struct ethtool_pauseparam *epauseparm) |
2064 | { |
2065 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2066 | |
2067 | return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); |
2068 | } |
2069 | |
2070 | /** |
2071 | * axienet_update_coalesce_rx() - Set RX CR |
2072 | * @lp: Device private data |
2073 | * @cr: Value to write to the RX CR |
2074 | * @mask: Bits to set from @cr |
2075 | */ |
2076 | static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr, |
2077 | u32 mask) |
2078 | { |
2079 | spin_lock_irq(lock: &lp->rx_cr_lock); |
2080 | lp->rx_dma_cr &= ~mask; |
2081 | lp->rx_dma_cr |= cr; |
2082 | /* If DMA isn't started, then the settings will be applied the next |
2083 | * time dma_start() is called. |
2084 | */ |
2085 | if (lp->rx_dma_started) { |
2086 | u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
2087 | |
2088 | /* Don't enable IRQs if they are disabled by NAPI */ |
2089 | if (reg & XAXIDMA_IRQ_ALL_MASK) |
2090 | cr = lp->rx_dma_cr; |
2091 | else |
2092 | cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; |
2093 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: cr); |
2094 | } |
2095 | spin_unlock_irq(lock: &lp->rx_cr_lock); |
2096 | } |
2097 | |
2098 | /** |
2099 | * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM |
2100 | * @lp: Device private data |
2101 | */ |
2102 | static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp) |
2103 | { |
2104 | return min(1 << (lp->rx_dim.profile_ix << 1), 255); |
2105 | } |
2106 | |
2107 | /** |
2108 | * axienet_rx_dim_work() - Adjust RX DIM settings |
2109 | * @work: The work struct |
2110 | */ |
2111 | static void axienet_rx_dim_work(struct work_struct *work) |
2112 | { |
2113 | struct axienet_local *lp = |
2114 | container_of(work, struct axienet_local, rx_dim.work); |
2115 | u32 cr = axienet_calc_cr(lp, count: axienet_dim_coalesce_count_rx(lp), usec: 0); |
2116 | u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK | |
2117 | XAXIDMA_IRQ_ERROR_MASK; |
2118 | |
2119 | axienet_update_coalesce_rx(lp, cr, mask); |
2120 | lp->rx_dim.state = DIM_START_MEASURE; |
2121 | } |
2122 | |
2123 | /** |
2124 | * axienet_update_coalesce_tx() - Set TX CR |
2125 | * @lp: Device private data |
2126 | * @cr: Value to write to the TX CR |
2127 | * @mask: Bits to set from @cr |
2128 | */ |
2129 | static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr, |
2130 | u32 mask) |
2131 | { |
2132 | spin_lock_irq(lock: &lp->tx_cr_lock); |
2133 | lp->tx_dma_cr &= ~mask; |
2134 | lp->tx_dma_cr |= cr; |
2135 | /* If DMA isn't started, then the settings will be applied the next |
2136 | * time dma_start() is called. |
2137 | */ |
2138 | if (lp->tx_dma_started) { |
2139 | u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
2140 | |
2141 | /* Don't enable IRQs if they are disabled by NAPI */ |
2142 | if (reg & XAXIDMA_IRQ_ALL_MASK) |
2143 | cr = lp->tx_dma_cr; |
2144 | else |
2145 | cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; |
2146 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: cr); |
2147 | } |
2148 | spin_unlock_irq(lock: &lp->tx_cr_lock); |
2149 | } |
2150 | |
2151 | /** |
2152 | * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. |
2153 | * @ndev: Pointer to net_device structure |
2154 | * @ecoalesce: Pointer to ethtool_coalesce structure |
2155 | * @kernel_coal: ethtool CQE mode setting structure |
2156 | * @extack: extack for reporting error messages |
2157 | * |
2158 | * This implements ethtool command for getting the DMA interrupt coalescing |
2159 | * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to |
2160 | * execute this function. |
2161 | * |
2162 | * Return: 0 always |
2163 | */ |
2164 | static int |
2165 | axienet_ethtools_get_coalesce(struct net_device *ndev, |
2166 | struct ethtool_coalesce *ecoalesce, |
2167 | struct kernel_ethtool_coalesce *kernel_coal, |
2168 | struct netlink_ext_ack *extack) |
2169 | { |
2170 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2171 | u32 cr; |
2172 | |
2173 | ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled; |
2174 | |
2175 | spin_lock_irq(lock: &lp->rx_cr_lock); |
2176 | cr = lp->rx_dma_cr; |
2177 | spin_unlock_irq(lock: &lp->rx_cr_lock); |
2178 | axienet_coalesce_params(lp, cr, |
2179 | count: &ecoalesce->rx_max_coalesced_frames, |
2180 | usec: &ecoalesce->rx_coalesce_usecs); |
2181 | |
2182 | spin_lock_irq(lock: &lp->tx_cr_lock); |
2183 | cr = lp->tx_dma_cr; |
2184 | spin_unlock_irq(lock: &lp->tx_cr_lock); |
2185 | axienet_coalesce_params(lp, cr, |
2186 | count: &ecoalesce->tx_max_coalesced_frames, |
2187 | usec: &ecoalesce->tx_coalesce_usecs); |
2188 | return 0; |
2189 | } |
2190 | |
2191 | /** |
2192 | * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. |
2193 | * @ndev: Pointer to net_device structure |
2194 | * @ecoalesce: Pointer to ethtool_coalesce structure |
2195 | * @kernel_coal: ethtool CQE mode setting structure |
2196 | * @extack: extack for reporting error messages |
2197 | * |
2198 | * This implements ethtool command for setting the DMA interrupt coalescing |
2199 | * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux |
2200 | * prompt to execute this function. |
2201 | * |
2202 | * Return: 0, on success, Non-zero error value on failure. |
2203 | */ |
2204 | static int |
2205 | axienet_ethtools_set_coalesce(struct net_device *ndev, |
2206 | struct ethtool_coalesce *ecoalesce, |
2207 | struct kernel_ethtool_coalesce *kernel_coal, |
2208 | struct netlink_ext_ack *extack) |
2209 | { |
2210 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2211 | bool new_dim = ecoalesce->use_adaptive_rx_coalesce; |
2212 | bool old_dim = lp->rx_dim_enabled; |
2213 | u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK; |
2214 | |
2215 | if (ecoalesce->rx_max_coalesced_frames > 255 || |
2216 | ecoalesce->tx_max_coalesced_frames > 255) { |
2217 | NL_SET_ERR_MSG(extack, "frames must be less than 256"); |
2218 | return -EINVAL; |
2219 | } |
2220 | |
2221 | if (!ecoalesce->rx_max_coalesced_frames || |
2222 | !ecoalesce->tx_max_coalesced_frames) { |
2223 | NL_SET_ERR_MSG(extack, "frames must be non-zero"); |
2224 | return -EINVAL; |
2225 | } |
2226 | |
2227 | if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) && |
2228 | !ecoalesce->rx_coalesce_usecs) || |
2229 | (ecoalesce->tx_max_coalesced_frames > 1 && |
2230 | !ecoalesce->tx_coalesce_usecs)) { |
2231 | NL_SET_ERR_MSG(extack, |
2232 | "usecs must be non-zero when frames is greater than one"); |
2233 | return -EINVAL; |
2234 | } |
2235 | |
2236 | if (new_dim && !old_dim) { |
2237 | cr = axienet_calc_cr(lp, count: axienet_dim_coalesce_count_rx(lp), |
2238 | usec: ecoalesce->rx_coalesce_usecs); |
2239 | } else if (!new_dim) { |
2240 | if (old_dim) { |
2241 | WRITE_ONCE(lp->rx_dim_enabled, false); |
2242 | napi_synchronize(n: &lp->napi_rx); |
2243 | flush_work(work: &lp->rx_dim.work); |
2244 | } |
2245 | |
2246 | cr = axienet_calc_cr(lp, count: ecoalesce->rx_max_coalesced_frames, |
2247 | usec: ecoalesce->rx_coalesce_usecs); |
2248 | } else { |
2249 | /* Dummy value for count just to calculate timer */ |
2250 | cr = axienet_calc_cr(lp, count: 2, usec: ecoalesce->rx_coalesce_usecs); |
2251 | mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK; |
2252 | } |
2253 | |
2254 | axienet_update_coalesce_rx(lp, cr, mask); |
2255 | if (new_dim && !old_dim) |
2256 | WRITE_ONCE(lp->rx_dim_enabled, true); |
2257 | |
2258 | cr = axienet_calc_cr(lp, count: ecoalesce->tx_max_coalesced_frames, |
2259 | usec: ecoalesce->tx_coalesce_usecs); |
2260 | axienet_update_coalesce_tx(lp, cr, mask: ~XAXIDMA_CR_RUNSTOP_MASK); |
2261 | return 0; |
2262 | } |
2263 | |
2264 | static int |
2265 | axienet_ethtools_get_link_ksettings(struct net_device *ndev, |
2266 | struct ethtool_link_ksettings *cmd) |
2267 | { |
2268 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2269 | |
2270 | return phylink_ethtool_ksettings_get(lp->phylink, cmd); |
2271 | } |
2272 | |
2273 | static int |
2274 | axienet_ethtools_set_link_ksettings(struct net_device *ndev, |
2275 | const struct ethtool_link_ksettings *cmd) |
2276 | { |
2277 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2278 | |
2279 | return phylink_ethtool_ksettings_set(lp->phylink, cmd); |
2280 | } |
2281 | |
2282 | static int axienet_ethtools_nway_reset(struct net_device *dev) |
2283 | { |
2284 | struct axienet_local *lp = netdev_priv(dev); |
2285 | |
2286 | return phylink_ethtool_nway_reset(lp->phylink); |
2287 | } |
2288 | |
2289 | static void axienet_ethtools_get_ethtool_stats(struct net_device *dev, |
2290 | struct ethtool_stats *stats, |
2291 | u64 *data) |
2292 | { |
2293 | struct axienet_local *lp = netdev_priv(dev); |
2294 | unsigned int start; |
2295 | |
2296 | do { |
2297 | start = read_seqcount_begin(&lp->hw_stats_seqcount); |
2298 | data[0] = axienet_stat(lp, stat: STAT_RX_BYTES); |
2299 | data[1] = axienet_stat(lp, stat: STAT_TX_BYTES); |
2300 | data[2] = axienet_stat(lp, stat: STAT_RX_VLAN_FRAMES); |
2301 | data[3] = axienet_stat(lp, stat: STAT_TX_VLAN_FRAMES); |
2302 | data[6] = axienet_stat(lp, stat: STAT_TX_PFC_FRAMES); |
2303 | data[7] = axienet_stat(lp, stat: STAT_RX_PFC_FRAMES); |
2304 | data[8] = axienet_stat(lp, stat: STAT_USER_DEFINED0); |
2305 | data[9] = axienet_stat(lp, stat: STAT_USER_DEFINED1); |
2306 | data[10] = axienet_stat(lp, stat: STAT_USER_DEFINED2); |
2307 | } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); |
2308 | } |
2309 | |
2310 | static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = { |
2311 | "Received bytes", |
2312 | "Transmitted bytes", |
2313 | "RX Good VLAN Tagged Frames", |
2314 | "TX Good VLAN Tagged Frames", |
2315 | "TX Good PFC Frames", |
2316 | "RX Good PFC Frames", |
2317 | "User Defined Counter 0", |
2318 | "User Defined Counter 1", |
2319 | "User Defined Counter 2", |
2320 | }; |
2321 | |
2322 | static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
2323 | { |
2324 | switch (stringset) { |
2325 | case ETH_SS_STATS: |
2326 | memcpy(data, axienet_ethtool_stats_strings, |
2327 | sizeof(axienet_ethtool_stats_strings)); |
2328 | break; |
2329 | } |
2330 | } |
2331 | |
2332 | static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset) |
2333 | { |
2334 | struct axienet_local *lp = netdev_priv(dev); |
2335 | |
2336 | switch (sset) { |
2337 | case ETH_SS_STATS: |
2338 | if (lp->features & XAE_FEATURE_STATS) |
2339 | return ARRAY_SIZE(axienet_ethtool_stats_strings); |
2340 | fallthrough; |
2341 | default: |
2342 | return -EOPNOTSUPP; |
2343 | } |
2344 | } |
2345 | |
2346 | static void |
2347 | axienet_ethtools_get_pause_stats(struct net_device *dev, |
2348 | struct ethtool_pause_stats *pause_stats) |
2349 | { |
2350 | struct axienet_local *lp = netdev_priv(dev); |
2351 | unsigned int start; |
2352 | |
2353 | if (!(lp->features & XAE_FEATURE_STATS)) |
2354 | return; |
2355 | |
2356 | do { |
2357 | start = read_seqcount_begin(&lp->hw_stats_seqcount); |
2358 | pause_stats->tx_pause_frames = |
2359 | axienet_stat(lp, stat: STAT_TX_PAUSE_FRAMES); |
2360 | pause_stats->rx_pause_frames = |
2361 | axienet_stat(lp, stat: STAT_RX_PAUSE_FRAMES); |
2362 | } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); |
2363 | } |
2364 | |
2365 | static void |
2366 | axienet_ethtool_get_eth_mac_stats(struct net_device *dev, |
2367 | struct ethtool_eth_mac_stats *mac_stats) |
2368 | { |
2369 | struct axienet_local *lp = netdev_priv(dev); |
2370 | unsigned int start; |
2371 | |
2372 | if (!(lp->features & XAE_FEATURE_STATS)) |
2373 | return; |
2374 | |
2375 | do { |
2376 | start = read_seqcount_begin(&lp->hw_stats_seqcount); |
2377 | mac_stats->FramesTransmittedOK = |
2378 | axienet_stat(lp, stat: STAT_TX_GOOD_FRAMES); |
2379 | mac_stats->SingleCollisionFrames = |
2380 | axienet_stat(lp, stat: STAT_TX_SINGLE_COLLISION_FRAMES); |
2381 | mac_stats->MultipleCollisionFrames = |
2382 | axienet_stat(lp, stat: STAT_TX_MULTIPLE_COLLISION_FRAMES); |
2383 | mac_stats->FramesReceivedOK = |
2384 | axienet_stat(lp, stat: STAT_RX_GOOD_FRAMES); |
2385 | mac_stats->FrameCheckSequenceErrors = |
2386 | axienet_stat(lp, stat: STAT_RX_FCS_ERRORS); |
2387 | mac_stats->AlignmentErrors = |
2388 | axienet_stat(lp, stat: STAT_RX_ALIGNMENT_ERRORS); |
2389 | mac_stats->FramesWithDeferredXmissions = |
2390 | axienet_stat(lp, stat: STAT_TX_DEFERRED_FRAMES); |
2391 | mac_stats->LateCollisions = |
2392 | axienet_stat(lp, stat: STAT_TX_LATE_COLLISIONS); |
2393 | mac_stats->FramesAbortedDueToXSColls = |
2394 | axienet_stat(lp, stat: STAT_TX_EXCESS_COLLISIONS); |
2395 | mac_stats->MulticastFramesXmittedOK = |
2396 | axienet_stat(lp, stat: STAT_TX_MULTICAST_FRAMES); |
2397 | mac_stats->BroadcastFramesXmittedOK = |
2398 | axienet_stat(lp, stat: STAT_TX_BROADCAST_FRAMES); |
2399 | mac_stats->FramesWithExcessiveDeferral = |
2400 | axienet_stat(lp, stat: STAT_TX_EXCESS_DEFERRAL); |
2401 | mac_stats->MulticastFramesReceivedOK = |
2402 | axienet_stat(lp, stat: STAT_RX_MULTICAST_FRAMES); |
2403 | mac_stats->BroadcastFramesReceivedOK = |
2404 | axienet_stat(lp, stat: STAT_RX_BROADCAST_FRAMES); |
2405 | mac_stats->InRangeLengthErrors = |
2406 | axienet_stat(lp, stat: STAT_RX_LENGTH_ERRORS); |
2407 | } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); |
2408 | } |
2409 | |
2410 | static void |
2411 | axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev, |
2412 | struct ethtool_eth_ctrl_stats *ctrl_stats) |
2413 | { |
2414 | struct axienet_local *lp = netdev_priv(dev); |
2415 | unsigned int start; |
2416 | |
2417 | if (!(lp->features & XAE_FEATURE_STATS)) |
2418 | return; |
2419 | |
2420 | do { |
2421 | start = read_seqcount_begin(&lp->hw_stats_seqcount); |
2422 | ctrl_stats->MACControlFramesTransmitted = |
2423 | axienet_stat(lp, stat: STAT_TX_CONTROL_FRAMES); |
2424 | ctrl_stats->MACControlFramesReceived = |
2425 | axienet_stat(lp, stat: STAT_RX_CONTROL_FRAMES); |
2426 | ctrl_stats->UnsupportedOpcodesReceived = |
2427 | axienet_stat(lp, stat: STAT_RX_CONTROL_OPCODE_ERRORS); |
2428 | } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); |
2429 | } |
2430 | |
2431 | static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = { |
2432 | { 64, 64 }, |
2433 | { 65, 127 }, |
2434 | { 128, 255 }, |
2435 | { 256, 511 }, |
2436 | { 512, 1023 }, |
2437 | { 1024, 1518 }, |
2438 | { 1519, 16384 }, |
2439 | { }, |
2440 | }; |
2441 | |
2442 | static void |
2443 | axienet_ethtool_get_rmon_stats(struct net_device *dev, |
2444 | struct ethtool_rmon_stats *rmon_stats, |
2445 | const struct ethtool_rmon_hist_range **ranges) |
2446 | { |
2447 | struct axienet_local *lp = netdev_priv(dev); |
2448 | unsigned int start; |
2449 | |
2450 | if (!(lp->features & XAE_FEATURE_STATS)) |
2451 | return; |
2452 | |
2453 | do { |
2454 | start = read_seqcount_begin(&lp->hw_stats_seqcount); |
2455 | rmon_stats->undersize_pkts = |
2456 | axienet_stat(lp, stat: STAT_UNDERSIZE_FRAMES); |
2457 | rmon_stats->oversize_pkts = |
2458 | axienet_stat(lp, stat: STAT_RX_OVERSIZE_FRAMES); |
2459 | rmon_stats->fragments = |
2460 | axienet_stat(lp, stat: STAT_FRAGMENT_FRAMES); |
2461 | |
2462 | rmon_stats->hist[0] = |
2463 | axienet_stat(lp, stat: STAT_RX_64_BYTE_FRAMES); |
2464 | rmon_stats->hist[1] = |
2465 | axienet_stat(lp, stat: STAT_RX_65_127_BYTE_FRAMES); |
2466 | rmon_stats->hist[2] = |
2467 | axienet_stat(lp, stat: STAT_RX_128_255_BYTE_FRAMES); |
2468 | rmon_stats->hist[3] = |
2469 | axienet_stat(lp, stat: STAT_RX_256_511_BYTE_FRAMES); |
2470 | rmon_stats->hist[4] = |
2471 | axienet_stat(lp, stat: STAT_RX_512_1023_BYTE_FRAMES); |
2472 | rmon_stats->hist[5] = |
2473 | axienet_stat(lp, stat: STAT_RX_1024_MAX_BYTE_FRAMES); |
2474 | rmon_stats->hist[6] = |
2475 | rmon_stats->oversize_pkts; |
2476 | |
2477 | rmon_stats->hist_tx[0] = |
2478 | axienet_stat(lp, stat: STAT_TX_64_BYTE_FRAMES); |
2479 | rmon_stats->hist_tx[1] = |
2480 | axienet_stat(lp, stat: STAT_TX_65_127_BYTE_FRAMES); |
2481 | rmon_stats->hist_tx[2] = |
2482 | axienet_stat(lp, stat: STAT_TX_128_255_BYTE_FRAMES); |
2483 | rmon_stats->hist_tx[3] = |
2484 | axienet_stat(lp, stat: STAT_TX_256_511_BYTE_FRAMES); |
2485 | rmon_stats->hist_tx[4] = |
2486 | axienet_stat(lp, stat: STAT_TX_512_1023_BYTE_FRAMES); |
2487 | rmon_stats->hist_tx[5] = |
2488 | axienet_stat(lp, stat: STAT_TX_1024_MAX_BYTE_FRAMES); |
2489 | rmon_stats->hist_tx[6] = |
2490 | axienet_stat(lp, stat: STAT_TX_OVERSIZE_FRAMES); |
2491 | } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); |
2492 | |
2493 | *ranges = axienet_rmon_ranges; |
2494 | } |
2495 | |
2496 | static const struct ethtool_ops axienet_ethtool_ops = { |
2497 | .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | |
2498 | ETHTOOL_COALESCE_USECS | |
2499 | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, |
2500 | .get_drvinfo = axienet_ethtools_get_drvinfo, |
2501 | .get_regs_len = axienet_ethtools_get_regs_len, |
2502 | .get_regs = axienet_ethtools_get_regs, |
2503 | .get_link = ethtool_op_get_link, |
2504 | .get_ringparam = axienet_ethtools_get_ringparam, |
2505 | .set_ringparam = axienet_ethtools_set_ringparam, |
2506 | .get_pauseparam = axienet_ethtools_get_pauseparam, |
2507 | .set_pauseparam = axienet_ethtools_set_pauseparam, |
2508 | .get_coalesce = axienet_ethtools_get_coalesce, |
2509 | .set_coalesce = axienet_ethtools_set_coalesce, |
2510 | .get_link_ksettings = axienet_ethtools_get_link_ksettings, |
2511 | .set_link_ksettings = axienet_ethtools_set_link_ksettings, |
2512 | .nway_reset = axienet_ethtools_nway_reset, |
2513 | .get_ethtool_stats = axienet_ethtools_get_ethtool_stats, |
2514 | .get_strings = axienet_ethtools_get_strings, |
2515 | .get_sset_count = axienet_ethtools_get_sset_count, |
2516 | .get_pause_stats = axienet_ethtools_get_pause_stats, |
2517 | .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats, |
2518 | .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats, |
2519 | .get_rmon_stats = axienet_ethtool_get_rmon_stats, |
2520 | }; |
2521 | |
2522 | static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) |
2523 | { |
2524 | return container_of(pcs, struct axienet_local, pcs); |
2525 | } |
2526 | |
2527 | static void axienet_pcs_get_state(struct phylink_pcs *pcs, |
2528 | unsigned int neg_mode, |
2529 | struct phylink_link_state *state) |
2530 | { |
2531 | struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; |
2532 | |
2533 | phylink_mii_c22_pcs_get_state(pcs: pcs_phy, neg_mode, state); |
2534 | } |
2535 | |
2536 | static void axienet_pcs_an_restart(struct phylink_pcs *pcs) |
2537 | { |
2538 | struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; |
2539 | |
2540 | phylink_mii_c22_pcs_an_restart(pcs: pcs_phy); |
2541 | } |
2542 | |
2543 | static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, |
2544 | phy_interface_t interface, |
2545 | const unsigned long *advertising, |
2546 | bool permit_pause_to_mac) |
2547 | { |
2548 | struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; |
2549 | struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; |
2550 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2551 | int ret; |
2552 | |
2553 | if (lp->switch_x_sgmii) { |
2554 | ret = mdiodev_write(mdiodev: pcs_phy, XLNX_MII_STD_SELECT_REG, |
2555 | val: interface == PHY_INTERFACE_MODE_SGMII ? |
2556 | XLNX_MII_STD_SELECT_SGMII : 0); |
2557 | if (ret < 0) { |
2558 | netdev_warn(dev: ndev, |
2559 | format: "Failed to switch PHY interface: %d\n", |
2560 | ret); |
2561 | return ret; |
2562 | } |
2563 | } |
2564 | |
2565 | ret = phylink_mii_c22_pcs_config(pcs: pcs_phy, interface, advertising, |
2566 | neg_mode); |
2567 | if (ret < 0) |
2568 | netdev_warn(dev: ndev, format: "Failed to configure PCS: %d\n", ret); |
2569 | |
2570 | return ret; |
2571 | } |
2572 | |
2573 | static const struct phylink_pcs_ops axienet_pcs_ops = { |
2574 | .pcs_get_state = axienet_pcs_get_state, |
2575 | .pcs_config = axienet_pcs_config, |
2576 | .pcs_an_restart = axienet_pcs_an_restart, |
2577 | }; |
2578 | |
2579 | static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, |
2580 | phy_interface_t interface) |
2581 | { |
2582 | struct net_device *ndev = to_net_dev(config->dev); |
2583 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2584 | |
2585 | if (interface == PHY_INTERFACE_MODE_1000BASEX || |
2586 | interface == PHY_INTERFACE_MODE_SGMII) |
2587 | return &lp->pcs; |
2588 | |
2589 | return NULL; |
2590 | } |
2591 | |
2592 | static void axienet_mac_config(struct phylink_config *config, unsigned int mode, |
2593 | const struct phylink_link_state *state) |
2594 | { |
2595 | /* nothing meaningful to do */ |
2596 | } |
2597 | |
2598 | static void axienet_mac_link_down(struct phylink_config *config, |
2599 | unsigned int mode, |
2600 | phy_interface_t interface) |
2601 | { |
2602 | /* nothing meaningful to do */ |
2603 | } |
2604 | |
2605 | static void axienet_mac_link_up(struct phylink_config *config, |
2606 | struct phy_device *phy, |
2607 | unsigned int mode, phy_interface_t interface, |
2608 | int speed, int duplex, |
2609 | bool tx_pause, bool rx_pause) |
2610 | { |
2611 | struct net_device *ndev = to_net_dev(config->dev); |
2612 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2613 | u32 emmc_reg, fcc_reg; |
2614 | |
2615 | emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); |
2616 | emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; |
2617 | |
2618 | switch (speed) { |
2619 | case SPEED_1000: |
2620 | emmc_reg |= XAE_EMMC_LINKSPD_1000; |
2621 | break; |
2622 | case SPEED_100: |
2623 | emmc_reg |= XAE_EMMC_LINKSPD_100; |
2624 | break; |
2625 | case SPEED_10: |
2626 | emmc_reg |= XAE_EMMC_LINKSPD_10; |
2627 | break; |
2628 | default: |
2629 | dev_err(&ndev->dev, |
2630 | "Speed other than 10, 100 or 1Gbps is not supported\n"); |
2631 | break; |
2632 | } |
2633 | |
2634 | axienet_iow(lp, XAE_EMMC_OFFSET, value: emmc_reg); |
2635 | |
2636 | fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); |
2637 | if (tx_pause) |
2638 | fcc_reg |= XAE_FCC_FCTX_MASK; |
2639 | else |
2640 | fcc_reg &= ~XAE_FCC_FCTX_MASK; |
2641 | if (rx_pause) |
2642 | fcc_reg |= XAE_FCC_FCRX_MASK; |
2643 | else |
2644 | fcc_reg &= ~XAE_FCC_FCRX_MASK; |
2645 | axienet_iow(lp, XAE_FCC_OFFSET, value: fcc_reg); |
2646 | } |
2647 | |
2648 | static const struct phylink_mac_ops axienet_phylink_ops = { |
2649 | .mac_select_pcs = axienet_mac_select_pcs, |
2650 | .mac_config = axienet_mac_config, |
2651 | .mac_link_down = axienet_mac_link_down, |
2652 | .mac_link_up = axienet_mac_link_up, |
2653 | }; |
2654 | |
2655 | /** |
2656 | * axienet_dma_err_handler - Work queue task for Axi DMA Error |
2657 | * @work: pointer to work_struct |
2658 | * |
2659 | * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the |
2660 | * Tx/Rx BDs. |
2661 | */ |
2662 | static void axienet_dma_err_handler(struct work_struct *work) |
2663 | { |
2664 | u32 i; |
2665 | u32 axienet_status; |
2666 | struct axidma_bd *cur_p; |
2667 | struct axienet_local *lp = container_of(work, struct axienet_local, |
2668 | dma_err_task); |
2669 | struct net_device *ndev = lp->ndev; |
2670 | |
2671 | /* Don't bother if we are going to stop anyway */ |
2672 | if (READ_ONCE(lp->stopping)) |
2673 | return; |
2674 | |
2675 | napi_disable(n: &lp->napi_tx); |
2676 | napi_disable(n: &lp->napi_rx); |
2677 | |
2678 | axienet_setoptions(ndev, options: lp->options & |
2679 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
2680 | |
2681 | axienet_dma_stop(lp); |
2682 | netdev_reset_queue(dev_queue: ndev); |
2683 | |
2684 | for (i = 0; i < lp->tx_bd_num; i++) { |
2685 | cur_p = &lp->tx_bd_v[i]; |
2686 | if (cur_p->cntrl) { |
2687 | dma_addr_t addr = desc_get_phys_addr(lp, desc: cur_p); |
2688 | |
2689 | dma_unmap_single(lp->dev, addr, |
2690 | (cur_p->cntrl & |
2691 | XAXIDMA_BD_CTRL_LENGTH_MASK), |
2692 | DMA_TO_DEVICE); |
2693 | } |
2694 | if (cur_p->skb) |
2695 | dev_kfree_skb_irq(skb: cur_p->skb); |
2696 | cur_p->phys = 0; |
2697 | cur_p->phys_msb = 0; |
2698 | cur_p->cntrl = 0; |
2699 | cur_p->status = 0; |
2700 | cur_p->app0 = 0; |
2701 | cur_p->app1 = 0; |
2702 | cur_p->app2 = 0; |
2703 | cur_p->app3 = 0; |
2704 | cur_p->app4 = 0; |
2705 | cur_p->skb = NULL; |
2706 | } |
2707 | |
2708 | for (i = 0; i < lp->rx_bd_num; i++) { |
2709 | cur_p = &lp->rx_bd_v[i]; |
2710 | cur_p->status = 0; |
2711 | cur_p->app0 = 0; |
2712 | cur_p->app1 = 0; |
2713 | cur_p->app2 = 0; |
2714 | cur_p->app3 = 0; |
2715 | cur_p->app4 = 0; |
2716 | } |
2717 | |
2718 | lp->tx_bd_ci = 0; |
2719 | lp->tx_bd_tail = 0; |
2720 | lp->rx_bd_ci = 0; |
2721 | |
2722 | axienet_dma_start(lp); |
2723 | |
2724 | axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); |
2725 | axienet_status &= ~XAE_RCW1_RX_MASK; |
2726 | axienet_iow(lp, XAE_RCW1_OFFSET, value: axienet_status); |
2727 | |
2728 | axienet_status = axienet_ior(lp, XAE_IP_OFFSET); |
2729 | if (axienet_status & XAE_INT_RXRJECT_MASK) |
2730 | axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); |
2731 | axienet_iow(lp, XAE_IE_OFFSET, value: lp->eth_irq > 0 ? |
2732 | XAE_INT_RECV_ERROR_MASK : 0); |
2733 | axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); |
2734 | |
2735 | /* Sync default options with HW but leave receiver and |
2736 | * transmitter disabled. |
2737 | */ |
2738 | axienet_setoptions(ndev, options: lp->options & |
2739 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
2740 | axienet_set_mac_address(ndev, NULL); |
2741 | axienet_set_multicast_list(ndev); |
2742 | napi_enable(n: &lp->napi_rx); |
2743 | napi_enable(n: &lp->napi_tx); |
2744 | axienet_setoptions(ndev, options: lp->options); |
2745 | } |
2746 | |
2747 | /** |
2748 | * axienet_probe - Axi Ethernet probe function. |
2749 | * @pdev: Pointer to platform device structure. |
2750 | * |
2751 | * Return: 0, on success |
2752 | * Non-zero error value on failure. |
2753 | * |
2754 | * This is the probe routine for Axi Ethernet driver. This is called before |
2755 | * any other driver routines are invoked. It allocates and sets up the Ethernet |
2756 | * device. Parses through device tree and populates fields of |
2757 | * axienet_local. It registers the Ethernet device. |
2758 | */ |
2759 | static int axienet_probe(struct platform_device *pdev) |
2760 | { |
2761 | int ret; |
2762 | struct device_node *np; |
2763 | struct axienet_local *lp; |
2764 | struct net_device *ndev; |
2765 | struct resource *ethres; |
2766 | u8 mac_addr[ETH_ALEN]; |
2767 | int addr_width = 32; |
2768 | u32 value; |
2769 | |
2770 | ndev = alloc_etherdev(sizeof(*lp)); |
2771 | if (!ndev) |
2772 | return -ENOMEM; |
2773 | |
2774 | platform_set_drvdata(pdev, data: ndev); |
2775 | |
2776 | SET_NETDEV_DEV(ndev, &pdev->dev); |
2777 | ndev->features = NETIF_F_SG; |
2778 | ndev->ethtool_ops = &axienet_ethtool_ops; |
2779 | |
2780 | /* MTU range: 64 - 9000 */ |
2781 | ndev->min_mtu = 64; |
2782 | ndev->max_mtu = XAE_JUMBO_MTU; |
2783 | |
2784 | lp = netdev_priv(dev: ndev); |
2785 | lp->ndev = ndev; |
2786 | lp->dev = &pdev->dev; |
2787 | lp->options = XAE_OPTION_DEFAULTS; |
2788 | lp->rx_bd_num = RX_BD_NUM_DEFAULT; |
2789 | lp->tx_bd_num = TX_BD_NUM_DEFAULT; |
2790 | |
2791 | u64_stats_init(syncp: &lp->rx_stat_sync); |
2792 | u64_stats_init(syncp: &lp->tx_stat_sync); |
2793 | |
2794 | mutex_init(&lp->stats_lock); |
2795 | seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); |
2796 | INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); |
2797 | |
2798 | lp->axi_clk = devm_clk_get_optional(dev: &pdev->dev, id: "s_axi_lite_clk"); |
2799 | if (!lp->axi_clk) { |
2800 | /* For backward compatibility, if named AXI clock is not present, |
2801 | * treat the first clock specified as the AXI clock. |
2802 | */ |
2803 | lp->axi_clk = devm_clk_get_optional(dev: &pdev->dev, NULL); |
2804 | } |
2805 | if (IS_ERR(ptr: lp->axi_clk)) { |
2806 | ret = PTR_ERR(ptr: lp->axi_clk); |
2807 | goto free_netdev; |
2808 | } |
2809 | ret = clk_prepare_enable(clk: lp->axi_clk); |
2810 | if (ret) { |
2811 | dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); |
2812 | goto free_netdev; |
2813 | } |
2814 | |
2815 | lp->misc_clks[0].id = "axis_clk"; |
2816 | lp->misc_clks[1].id = "ref_clk"; |
2817 | lp->misc_clks[2].id = "mgt_clk"; |
2818 | |
2819 | ret = devm_clk_bulk_get_optional(dev: &pdev->dev, XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
2820 | if (ret) |
2821 | goto cleanup_clk; |
2822 | |
2823 | ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
2824 | if (ret) |
2825 | goto cleanup_clk; |
2826 | |
2827 | /* Map device registers */ |
2828 | lp->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: ðres); |
2829 | if (IS_ERR(ptr: lp->regs)) { |
2830 | ret = PTR_ERR(ptr: lp->regs); |
2831 | goto cleanup_clk; |
2832 | } |
2833 | lp->regs_start = ethres->start; |
2834 | |
2835 | /* Setup checksum offload, but default to off if not specified */ |
2836 | lp->features = 0; |
2837 | |
2838 | if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS) |
2839 | lp->features |= XAE_FEATURE_STATS; |
2840 | |
2841 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,txcsum", out_value: &value); |
2842 | if (!ret) { |
2843 | switch (value) { |
2844 | case 1: |
2845 | lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; |
2846 | /* Can checksum any contiguous range */ |
2847 | ndev->features |= NETIF_F_HW_CSUM; |
2848 | break; |
2849 | case 2: |
2850 | lp->features |= XAE_FEATURE_FULL_TX_CSUM; |
2851 | /* Can checksum TCP/UDP over IPv4. */ |
2852 | ndev->features |= NETIF_F_IP_CSUM; |
2853 | break; |
2854 | } |
2855 | } |
2856 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,rxcsum", out_value: &value); |
2857 | if (!ret) { |
2858 | switch (value) { |
2859 | case 1: |
2860 | lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; |
2861 | ndev->features |= NETIF_F_RXCSUM; |
2862 | break; |
2863 | case 2: |
2864 | lp->features |= XAE_FEATURE_FULL_RX_CSUM; |
2865 | ndev->features |= NETIF_F_RXCSUM; |
2866 | break; |
2867 | } |
2868 | } |
2869 | /* For supporting jumbo frames, the Axi Ethernet hardware must have |
2870 | * a larger Rx/Tx Memory. Typically, the size must be large so that |
2871 | * we can enable jumbo option and start supporting jumbo frames. |
2872 | * Here we check for memory allocated for Rx/Tx in the hardware from |
2873 | * the device-tree and accordingly set flags. |
2874 | */ |
2875 | of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,rxmem", out_value: &lp->rxmem); |
2876 | |
2877 | lp->switch_x_sgmii = of_property_read_bool(np: pdev->dev.of_node, |
2878 | propname: "xlnx,switch-x-sgmii"); |
2879 | |
2880 | /* Start with the proprietary, and broken phy_type */ |
2881 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,phy-type", out_value: &value); |
2882 | if (!ret) { |
2883 | netdev_warn(dev: ndev, format: "Please upgrade your device tree binary blob to use phy-mode"); |
2884 | switch (value) { |
2885 | case XAE_PHY_TYPE_MII: |
2886 | lp->phy_mode = PHY_INTERFACE_MODE_MII; |
2887 | break; |
2888 | case XAE_PHY_TYPE_GMII: |
2889 | lp->phy_mode = PHY_INTERFACE_MODE_GMII; |
2890 | break; |
2891 | case XAE_PHY_TYPE_RGMII_2_0: |
2892 | lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; |
2893 | break; |
2894 | case XAE_PHY_TYPE_SGMII: |
2895 | lp->phy_mode = PHY_INTERFACE_MODE_SGMII; |
2896 | break; |
2897 | case XAE_PHY_TYPE_1000BASE_X: |
2898 | lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; |
2899 | break; |
2900 | default: |
2901 | ret = -EINVAL; |
2902 | goto cleanup_clk; |
2903 | } |
2904 | } else { |
2905 | ret = of_get_phy_mode(np: pdev->dev.of_node, interface: &lp->phy_mode); |
2906 | if (ret) |
2907 | goto cleanup_clk; |
2908 | } |
2909 | if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && |
2910 | lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { |
2911 | dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); |
2912 | ret = -EINVAL; |
2913 | goto cleanup_clk; |
2914 | } |
2915 | |
2916 | if (!of_property_present(np: pdev->dev.of_node, propname: "dmas")) { |
2917 | /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ |
2918 | np = of_parse_phandle(np: pdev->dev.of_node, phandle_name: "axistream-connected", index: 0); |
2919 | |
2920 | if (np) { |
2921 | struct resource dmares; |
2922 | |
2923 | ret = of_address_to_resource(dev: np, index: 0, r: &dmares); |
2924 | if (ret) { |
2925 | dev_err(&pdev->dev, |
2926 | "unable to get DMA resource\n"); |
2927 | of_node_put(node: np); |
2928 | goto cleanup_clk; |
2929 | } |
2930 | lp->dma_regs = devm_ioremap_resource(dev: &pdev->dev, |
2931 | res: &dmares); |
2932 | lp->rx_irq = irq_of_parse_and_map(node: np, index: 1); |
2933 | lp->tx_irq = irq_of_parse_and_map(node: np, index: 0); |
2934 | of_node_put(node: np); |
2935 | lp->eth_irq = platform_get_irq_optional(pdev, 0); |
2936 | } else { |
2937 | /* Check for these resources directly on the Ethernet node. */ |
2938 | lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, index: 1, NULL); |
2939 | lp->rx_irq = platform_get_irq(pdev, 1); |
2940 | lp->tx_irq = platform_get_irq(pdev, 0); |
2941 | lp->eth_irq = platform_get_irq_optional(pdev, 2); |
2942 | } |
2943 | if (IS_ERR(ptr: lp->dma_regs)) { |
2944 | dev_err(&pdev->dev, "could not map DMA regs\n"); |
2945 | ret = PTR_ERR(ptr: lp->dma_regs); |
2946 | goto cleanup_clk; |
2947 | } |
2948 | if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { |
2949 | dev_err(&pdev->dev, "could not determine irqs\n"); |
2950 | ret = -ENOMEM; |
2951 | goto cleanup_clk; |
2952 | } |
2953 | |
2954 | /* Reset core now that clocks are enabled, prior to accessing MDIO */ |
2955 | ret = __axienet_device_reset(lp); |
2956 | if (ret) |
2957 | goto cleanup_clk; |
2958 | |
2959 | /* Autodetect the need for 64-bit DMA pointers. |
2960 | * When the IP is configured for a bus width bigger than 32 bits, |
2961 | * writing the MSB registers is mandatory, even if they are all 0. |
2962 | * We can detect this case by writing all 1's to one such register |
2963 | * and see if that sticks: when the IP is configured for 32 bits |
2964 | * only, those registers are RES0. |
2965 | * Those MSB registers were introduced in IP v7.1, which we check first. |
2966 | */ |
2967 | if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { |
2968 | void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; |
2969 | |
2970 | iowrite32(0x0, desc); |
2971 | if (ioread32(desc) == 0) { /* sanity check */ |
2972 | iowrite32(0xffffffff, desc); |
2973 | if (ioread32(desc) > 0) { |
2974 | lp->features |= XAE_FEATURE_DMA_64BIT; |
2975 | addr_width = 64; |
2976 | dev_info(&pdev->dev, |
2977 | "autodetected 64-bit DMA range\n"); |
2978 | } |
2979 | iowrite32(0x0, desc); |
2980 | } |
2981 | } |
2982 | if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { |
2983 | dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n"); |
2984 | ret = -EINVAL; |
2985 | goto cleanup_clk; |
2986 | } |
2987 | |
2988 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(addr_width)); |
2989 | if (ret) { |
2990 | dev_err(&pdev->dev, "No suitable DMA available\n"); |
2991 | goto cleanup_clk; |
2992 | } |
2993 | netif_napi_add(dev: ndev, napi: &lp->napi_rx, poll: axienet_rx_poll); |
2994 | netif_napi_add(dev: ndev, napi: &lp->napi_tx, poll: axienet_tx_poll); |
2995 | } else { |
2996 | struct xilinx_vdma_config cfg; |
2997 | struct dma_chan *tx_chan; |
2998 | |
2999 | lp->eth_irq = platform_get_irq_optional(pdev, 0); |
3000 | if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { |
3001 | ret = lp->eth_irq; |
3002 | goto cleanup_clk; |
3003 | } |
3004 | tx_chan = dma_request_chan(dev: lp->dev, name: "tx_chan0"); |
3005 | if (IS_ERR(ptr: tx_chan)) { |
3006 | ret = PTR_ERR(ptr: tx_chan); |
3007 | dev_err_probe(dev: lp->dev, err: ret, fmt: "No Ethernet DMA (TX) channel found\n"); |
3008 | goto cleanup_clk; |
3009 | } |
3010 | |
3011 | cfg.reset = 1; |
3012 | /* As name says VDMA but it has support for DMA channel reset */ |
3013 | ret = xilinx_vdma_channel_set_config(dchan: tx_chan, cfg: &cfg); |
3014 | if (ret < 0) { |
3015 | dev_err(&pdev->dev, "Reset channel failed\n"); |
3016 | dma_release_channel(chan: tx_chan); |
3017 | goto cleanup_clk; |
3018 | } |
3019 | |
3020 | dma_release_channel(chan: tx_chan); |
3021 | lp->use_dmaengine = 1; |
3022 | } |
3023 | |
3024 | if (lp->use_dmaengine) |
3025 | ndev->netdev_ops = &axienet_netdev_dmaengine_ops; |
3026 | else |
3027 | ndev->netdev_ops = &axienet_netdev_ops; |
3028 | /* Check for Ethernet core IRQ (optional) */ |
3029 | if (lp->eth_irq <= 0) |
3030 | dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); |
3031 | |
3032 | /* Retrieve the MAC address */ |
3033 | ret = of_get_mac_address(np: pdev->dev.of_node, mac: mac_addr); |
3034 | if (!ret) { |
3035 | axienet_set_mac_address(ndev, address: mac_addr); |
3036 | } else { |
3037 | dev_warn(&pdev->dev, "could not find MAC address property: %d\n", |
3038 | ret); |
3039 | axienet_set_mac_address(ndev, NULL); |
3040 | } |
3041 | |
3042 | spin_lock_init(&lp->rx_cr_lock); |
3043 | spin_lock_init(&lp->tx_cr_lock); |
3044 | INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work); |
3045 | lp->rx_dim_enabled = true; |
3046 | lp->rx_dim.profile_ix = 1; |
3047 | lp->rx_dma_cr = axienet_calc_cr(lp, count: axienet_dim_coalesce_count_rx(lp), |
3048 | XAXIDMA_DFT_RX_USEC); |
3049 | lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD, |
3050 | XAXIDMA_DFT_TX_USEC); |
3051 | |
3052 | ret = axienet_mdio_setup(lp); |
3053 | if (ret) |
3054 | dev_warn(&pdev->dev, |
3055 | "error registering MDIO bus: %d\n", ret); |
3056 | |
3057 | if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || |
3058 | lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { |
3059 | np = of_parse_phandle(np: pdev->dev.of_node, phandle_name: "pcs-handle", index: 0); |
3060 | if (!np) { |
3061 | /* Deprecated: Always use "pcs-handle" for pcs_phy. |
3062 | * Falling back to "phy-handle" here is only for |
3063 | * backward compatibility with old device trees. |
3064 | */ |
3065 | np = of_parse_phandle(np: pdev->dev.of_node, phandle_name: "phy-handle", index: 0); |
3066 | } |
3067 | if (!np) { |
3068 | dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); |
3069 | ret = -EINVAL; |
3070 | goto cleanup_mdio; |
3071 | } |
3072 | lp->pcs_phy = of_mdio_find_device(np); |
3073 | if (!lp->pcs_phy) { |
3074 | ret = -EPROBE_DEFER; |
3075 | of_node_put(node: np); |
3076 | goto cleanup_mdio; |
3077 | } |
3078 | of_node_put(node: np); |
3079 | lp->pcs.ops = &axienet_pcs_ops; |
3080 | lp->pcs.poll = true; |
3081 | } |
3082 | |
3083 | lp->phylink_config.dev = &ndev->dev; |
3084 | lp->phylink_config.type = PHYLINK_NETDEV; |
3085 | lp->phylink_config.mac_managed_pm = true; |
3086 | lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | |
3087 | MAC_10FD | MAC_100FD | MAC_1000FD; |
3088 | |
3089 | __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); |
3090 | if (lp->switch_x_sgmii) { |
3091 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, |
3092 | lp->phylink_config.supported_interfaces); |
3093 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
3094 | lp->phylink_config.supported_interfaces); |
3095 | } |
3096 | |
3097 | lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, |
3098 | lp->phy_mode, |
3099 | &axienet_phylink_ops); |
3100 | if (IS_ERR(ptr: lp->phylink)) { |
3101 | ret = PTR_ERR(ptr: lp->phylink); |
3102 | dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); |
3103 | goto cleanup_mdio; |
3104 | } |
3105 | |
3106 | ret = register_netdev(dev: lp->ndev); |
3107 | if (ret) { |
3108 | dev_err(lp->dev, "register_netdev() error (%i)\n", ret); |
3109 | goto cleanup_phylink; |
3110 | } |
3111 | |
3112 | return 0; |
3113 | |
3114 | cleanup_phylink: |
3115 | phylink_destroy(lp->phylink); |
3116 | |
3117 | cleanup_mdio: |
3118 | if (lp->pcs_phy) |
3119 | put_device(dev: &lp->pcs_phy->dev); |
3120 | if (lp->mii_bus) |
3121 | axienet_mdio_teardown(lp); |
3122 | cleanup_clk: |
3123 | clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
3124 | clk_disable_unprepare(clk: lp->axi_clk); |
3125 | |
3126 | free_netdev: |
3127 | free_netdev(dev: ndev); |
3128 | |
3129 | return ret; |
3130 | } |
3131 | |
3132 | static void axienet_remove(struct platform_device *pdev) |
3133 | { |
3134 | struct net_device *ndev = platform_get_drvdata(pdev); |
3135 | struct axienet_local *lp = netdev_priv(dev: ndev); |
3136 | |
3137 | unregister_netdev(dev: ndev); |
3138 | |
3139 | if (lp->phylink) |
3140 | phylink_destroy(lp->phylink); |
3141 | |
3142 | if (lp->pcs_phy) |
3143 | put_device(dev: &lp->pcs_phy->dev); |
3144 | |
3145 | axienet_mdio_teardown(lp); |
3146 | |
3147 | clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
3148 | clk_disable_unprepare(clk: lp->axi_clk); |
3149 | |
3150 | free_netdev(dev: ndev); |
3151 | } |
3152 | |
3153 | static void axienet_shutdown(struct platform_device *pdev) |
3154 | { |
3155 | struct net_device *ndev = platform_get_drvdata(pdev); |
3156 | |
3157 | rtnl_lock(); |
3158 | netif_device_detach(dev: ndev); |
3159 | |
3160 | if (netif_running(dev: ndev)) |
3161 | dev_close(dev: ndev); |
3162 | |
3163 | rtnl_unlock(); |
3164 | } |
3165 | |
3166 | static int axienet_suspend(struct device *dev) |
3167 | { |
3168 | struct net_device *ndev = dev_get_drvdata(dev); |
3169 | |
3170 | if (!netif_running(dev: ndev)) |
3171 | return 0; |
3172 | |
3173 | netif_device_detach(dev: ndev); |
3174 | |
3175 | rtnl_lock(); |
3176 | axienet_stop(ndev); |
3177 | rtnl_unlock(); |
3178 | |
3179 | return 0; |
3180 | } |
3181 | |
3182 | static int axienet_resume(struct device *dev) |
3183 | { |
3184 | struct net_device *ndev = dev_get_drvdata(dev); |
3185 | |
3186 | if (!netif_running(dev: ndev)) |
3187 | return 0; |
3188 | |
3189 | rtnl_lock(); |
3190 | axienet_open(ndev); |
3191 | rtnl_unlock(); |
3192 | |
3193 | netif_device_attach(dev: ndev); |
3194 | |
3195 | return 0; |
3196 | } |
3197 | |
3198 | static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, |
3199 | axienet_suspend, axienet_resume); |
3200 | |
3201 | static struct platform_driver axienet_driver = { |
3202 | .probe = axienet_probe, |
3203 | .remove = axienet_remove, |
3204 | .shutdown = axienet_shutdown, |
3205 | .driver = { |
3206 | .name = "xilinx_axienet", |
3207 | .pm = &axienet_pm_ops, |
3208 | .of_match_table = axienet_of_match, |
3209 | }, |
3210 | }; |
3211 | |
3212 | module_platform_driver(axienet_driver); |
3213 | |
3214 | MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); |
3215 | MODULE_AUTHOR("Xilinx"); |
3216 | MODULE_LICENSE("GPL"); |
3217 |
Definitions
- axienet_of_match
- axienet_options
- axienet_get_rx_desc
- axienet_get_tx_desc
- axienet_dma_in32
- desc_set_phys_addr
- desc_get_phys_addr
- axienet_dma_bd_release
- axienet_dma_rate
- axienet_calc_cr
- axienet_coalesce_params
- axienet_dma_start
- axienet_dma_bd_init
- axienet_set_mac_address
- netdev_set_mac_address
- axienet_set_multicast_list
- axienet_setoptions
- axienet_stat
- axienet_stats_update
- axienet_refresh_stats
- __axienet_device_reset
- axienet_dma_stop
- axienet_device_reset
- axienet_free_tx_chain
- axienet_check_tx_bd_space
- axienet_dma_tx_cb
- axienet_start_xmit_dmaengine
- axienet_tx_poll
- axienet_start_xmit
- axienet_dma_rx_cb
- axienet_rx_poll
- axienet_tx_irq
- axienet_rx_irq
- axienet_eth_irq
- axienet_rx_submit_desc
- axienet_init_dmaengine
- axienet_init_legacy_dma
- axienet_open
- axienet_stop
- axienet_change_mtu
- axienet_poll_controller
- axienet_ioctl
- axienet_get_stats64
- axienet_netdev_ops
- axienet_netdev_dmaengine_ops
- axienet_ethtools_get_drvinfo
- axienet_ethtools_get_regs_len
- axienet_ethtools_get_regs
- axienet_ethtools_get_ringparam
- axienet_ethtools_set_ringparam
- axienet_ethtools_get_pauseparam
- axienet_ethtools_set_pauseparam
- axienet_update_coalesce_rx
- axienet_dim_coalesce_count_rx
- axienet_rx_dim_work
- axienet_update_coalesce_tx
- axienet_ethtools_get_coalesce
- axienet_ethtools_set_coalesce
- axienet_ethtools_get_link_ksettings
- axienet_ethtools_set_link_ksettings
- axienet_ethtools_nway_reset
- axienet_ethtools_get_ethtool_stats
- axienet_ethtool_stats_strings
- axienet_ethtools_get_strings
- axienet_ethtools_get_sset_count
- axienet_ethtools_get_pause_stats
- axienet_ethtool_get_eth_mac_stats
- axienet_ethtool_get_eth_ctrl_stats
- axienet_rmon_ranges
- axienet_ethtool_get_rmon_stats
- axienet_ethtool_ops
- pcs_to_axienet_local
- axienet_pcs_get_state
- axienet_pcs_an_restart
- axienet_pcs_config
- axienet_pcs_ops
- axienet_mac_select_pcs
- axienet_mac_config
- axienet_mac_link_down
- axienet_mac_link_up
- axienet_phylink_ops
- axienet_dma_err_handler
- axienet_probe
- axienet_remove
- axienet_shutdown
- axienet_suspend
- axienet_resume
- axienet_pm_ops
Improve your Profiling and Debugging skills
Find out more