1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. |
4 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) |
5 | * |
6 | * Right now, I am very wasteful with the buffers. I allocate memory |
7 | * pages and then divide them into 2K frame buffers. This way I know I |
8 | * have buffers large enough to hold one frame within one buffer descriptor. |
9 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which |
10 | * will be much more memory efficient and will easily handle lots of |
11 | * small packets. |
12 | * |
13 | * Much better multiple PHY support by Magnus Damm. |
14 | * Copyright (c) 2000 Ericsson Radio Systems AB. |
15 | * |
16 | * Support for FEC controller of ColdFire processors. |
17 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) |
18 | * |
19 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) |
20 | * Copyright (c) 2004-2006 Macq Electronique SA. |
21 | * |
22 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. |
23 | */ |
24 | |
25 | #include <linux/module.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/string.h> |
28 | #include <linux/pm_runtime.h> |
29 | #include <linux/ptrace.h> |
30 | #include <linux/errno.h> |
31 | #include <linux/ioport.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/interrupt.h> |
34 | #include <linux/delay.h> |
35 | #include <linux/netdevice.h> |
36 | #include <linux/etherdevice.h> |
37 | #include <linux/skbuff.h> |
38 | #include <linux/in.h> |
39 | #include <linux/ip.h> |
40 | #include <net/ip.h> |
41 | #include <net/page_pool/helpers.h> |
42 | #include <net/selftests.h> |
43 | #include <net/tso.h> |
44 | #include <linux/tcp.h> |
45 | #include <linux/udp.h> |
46 | #include <linux/icmp.h> |
47 | #include <linux/spinlock.h> |
48 | #include <linux/workqueue.h> |
49 | #include <linux/bitops.h> |
50 | #include <linux/io.h> |
51 | #include <linux/irq.h> |
52 | #include <linux/clk.h> |
53 | #include <linux/crc32.h> |
54 | #include <linux/platform_device.h> |
55 | #include <linux/property.h> |
56 | #include <linux/mdio.h> |
57 | #include <linux/phy.h> |
58 | #include <linux/fec.h> |
59 | #include <linux/of.h> |
60 | #include <linux/of_mdio.h> |
61 | #include <linux/of_net.h> |
62 | #include <linux/regulator/consumer.h> |
63 | #include <linux/if_vlan.h> |
64 | #include <linux/pinctrl/consumer.h> |
65 | #include <linux/gpio/consumer.h> |
66 | #include <linux/prefetch.h> |
67 | #include <linux/mfd/syscon.h> |
68 | #include <linux/regmap.h> |
69 | #include <soc/imx/cpuidle.h> |
70 | #include <linux/filter.h> |
71 | #include <linux/bpf.h> |
72 | #include <linux/bpf_trace.h> |
73 | |
74 | #include <asm/cacheflush.h> |
75 | |
76 | #include "fec.h" |
77 | |
78 | static void set_multicast_list(struct net_device *ndev); |
79 | static void fec_enet_itr_coal_set(struct net_device *ndev); |
80 | static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, |
81 | int cpu, struct xdp_buff *xdp, |
82 | u32 dma_sync_len); |
83 | |
84 | #define DRIVER_NAME "fec" |
85 | |
86 | static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; |
87 | |
88 | #define FEC_ENET_RSEM_V 0x84 |
89 | #define FEC_ENET_RSFL_V 16 |
90 | #define FEC_ENET_RAEM_V 0x8 |
91 | #define FEC_ENET_RAFL_V 0x8 |
92 | #define FEC_ENET_OPD_V 0xFFF0 |
93 | #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ |
94 | |
95 | #define FEC_ENET_XDP_PASS 0 |
96 | #define FEC_ENET_XDP_CONSUMED BIT(0) |
97 | #define FEC_ENET_XDP_TX BIT(1) |
98 | #define FEC_ENET_XDP_REDIR BIT(2) |
99 | |
100 | struct fec_devinfo { |
101 | u32 quirks; |
102 | }; |
103 | |
104 | static const struct fec_devinfo fec_imx25_info = { |
105 | .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | |
106 | FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, |
107 | }; |
108 | |
109 | static const struct fec_devinfo fec_imx27_info = { |
110 | .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | |
111 | FEC_QUIRK_HAS_MDIO_C45, |
112 | }; |
113 | |
114 | static const struct fec_devinfo fec_imx28_info = { |
115 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | |
116 | FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | |
117 | FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | |
118 | FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, |
119 | }; |
120 | |
121 | static const struct fec_devinfo fec_imx6q_info = { |
122 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
123 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
124 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | |
125 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | |
126 | FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, |
127 | }; |
128 | |
129 | static const struct fec_devinfo fec_mvf600_info = { |
130 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | |
131 | FEC_QUIRK_HAS_MDIO_C45, |
132 | }; |
133 | |
134 | static const struct fec_devinfo fec_imx6x_info = { |
135 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
136 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
137 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
138 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
139 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | |
140 | FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | |
141 | FEC_QUIRK_HAS_MDIO_C45, |
142 | }; |
143 | |
144 | static const struct fec_devinfo fec_imx6ul_info = { |
145 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
146 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
147 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | |
148 | FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | |
149 | FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | |
150 | FEC_QUIRK_HAS_MDIO_C45, |
151 | }; |
152 | |
153 | static const struct fec_devinfo fec_imx8mq_info = { |
154 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
155 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
156 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
157 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
158 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | |
159 | FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | |
160 | FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | |
161 | FEC_QUIRK_HAS_MDIO_C45, |
162 | }; |
163 | |
164 | static const struct fec_devinfo fec_imx8qm_info = { |
165 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
166 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
167 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
168 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
169 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | |
170 | FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | |
171 | FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45, |
172 | }; |
173 | |
174 | static const struct fec_devinfo fec_s32v234_info = { |
175 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
176 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
177 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
178 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
179 | FEC_QUIRK_HAS_MDIO_C45, |
180 | }; |
181 | |
182 | static struct platform_device_id fec_devtype[] = { |
183 | { |
184 | /* keep it for coldfire */ |
185 | .name = DRIVER_NAME, |
186 | .driver_data = 0, |
187 | }, { |
188 | /* sentinel */ |
189 | } |
190 | }; |
191 | MODULE_DEVICE_TABLE(platform, fec_devtype); |
192 | |
193 | static const struct of_device_id fec_dt_ids[] = { |
194 | { .compatible = "fsl,imx25-fec" , .data = &fec_imx25_info, }, |
195 | { .compatible = "fsl,imx27-fec" , .data = &fec_imx27_info, }, |
196 | { .compatible = "fsl,imx28-fec" , .data = &fec_imx28_info, }, |
197 | { .compatible = "fsl,imx6q-fec" , .data = &fec_imx6q_info, }, |
198 | { .compatible = "fsl,mvf600-fec" , .data = &fec_mvf600_info, }, |
199 | { .compatible = "fsl,imx6sx-fec" , .data = &fec_imx6x_info, }, |
200 | { .compatible = "fsl,imx6ul-fec" , .data = &fec_imx6ul_info, }, |
201 | { .compatible = "fsl,imx8mq-fec" , .data = &fec_imx8mq_info, }, |
202 | { .compatible = "fsl,imx8qm-fec" , .data = &fec_imx8qm_info, }, |
203 | { .compatible = "fsl,s32v234-fec" , .data = &fec_s32v234_info, }, |
204 | { /* sentinel */ } |
205 | }; |
206 | MODULE_DEVICE_TABLE(of, fec_dt_ids); |
207 | |
208 | static unsigned char macaddr[ETH_ALEN]; |
209 | module_param_array(macaddr, byte, NULL, 0); |
210 | MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address" ); |
211 | |
212 | #if defined(CONFIG_M5272) |
213 | /* |
214 | * Some hardware gets it MAC address out of local flash memory. |
215 | * if this is non-zero then assume it is the address to get MAC from. |
216 | */ |
217 | #if defined(CONFIG_NETtel) |
218 | #define FEC_FLASHMAC 0xf0006006 |
219 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) |
220 | #define FEC_FLASHMAC 0xf0006000 |
221 | #elif defined(CONFIG_CANCam) |
222 | #define FEC_FLASHMAC 0xf0020000 |
223 | #elif defined (CONFIG_M5272C3) |
224 | #define FEC_FLASHMAC (0xffe04000 + 4) |
225 | #elif defined(CONFIG_MOD5272) |
226 | #define FEC_FLASHMAC 0xffc0406b |
227 | #else |
228 | #define FEC_FLASHMAC 0 |
229 | #endif |
230 | #endif /* CONFIG_M5272 */ |
231 | |
232 | /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. |
233 | * |
234 | * 2048 byte skbufs are allocated. However, alignment requirements |
235 | * varies between FEC variants. Worst case is 64, so round down by 64. |
236 | */ |
237 | #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) |
238 | #define PKT_MINBUF_SIZE 64 |
239 | |
240 | /* FEC receive acceleration */ |
241 | #define FEC_RACC_IPDIS BIT(1) |
242 | #define FEC_RACC_PRODIS BIT(2) |
243 | #define FEC_RACC_SHIFT16 BIT(7) |
244 | #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) |
245 | |
246 | /* MIB Control Register */ |
247 | #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) |
248 | |
249 | /* |
250 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame |
251 | * size bits. Other FEC hardware does not, so we need to take that into |
252 | * account when setting it. |
253 | */ |
254 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
255 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
256 | defined(CONFIG_ARM64) |
257 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
258 | #else |
259 | #define OPT_FRAME_SIZE 0 |
260 | #endif |
261 | |
262 | /* FEC MII MMFR bits definition */ |
263 | #define FEC_MMFR_ST (1 << 30) |
264 | #define FEC_MMFR_ST_C45 (0) |
265 | #define FEC_MMFR_OP_READ (2 << 28) |
266 | #define FEC_MMFR_OP_READ_C45 (3 << 28) |
267 | #define FEC_MMFR_OP_WRITE (1 << 28) |
268 | #define FEC_MMFR_OP_ADDR_WRITE (0) |
269 | #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) |
270 | #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) |
271 | #define FEC_MMFR_TA (2 << 16) |
272 | #define FEC_MMFR_DATA(v) (v & 0xffff) |
273 | /* FEC ECR bits definition */ |
274 | #define FEC_ECR_RESET BIT(0) |
275 | #define FEC_ECR_ETHEREN BIT(1) |
276 | #define FEC_ECR_MAGICEN BIT(2) |
277 | #define FEC_ECR_SLEEP BIT(3) |
278 | #define FEC_ECR_EN1588 BIT(4) |
279 | #define FEC_ECR_BYTESWP BIT(8) |
280 | /* FEC RCR bits definition */ |
281 | #define FEC_RCR_LOOP BIT(0) |
282 | #define FEC_RCR_HALFDPX BIT(1) |
283 | #define FEC_RCR_MII BIT(2) |
284 | #define FEC_RCR_PROMISC BIT(3) |
285 | #define FEC_RCR_BC_REJ BIT(4) |
286 | #define FEC_RCR_FLOWCTL BIT(5) |
287 | #define FEC_RCR_RMII BIT(8) |
288 | #define FEC_RCR_10BASET BIT(9) |
289 | /* TX WMARK bits */ |
290 | #define FEC_TXWMRK_STRFWD BIT(8) |
291 | |
292 | #define FEC_MII_TIMEOUT 30000 /* us */ |
293 | |
294 | /* Transmitter timeout */ |
295 | #define TX_TIMEOUT (2 * HZ) |
296 | |
297 | #define FEC_PAUSE_FLAG_AUTONEG 0x1 |
298 | #define FEC_PAUSE_FLAG_ENABLE 0x2 |
299 | #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) |
300 | #define FEC_WOL_FLAG_ENABLE (0x1 << 1) |
301 | #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) |
302 | |
303 | /* Max number of allowed TCP segments for software TSO */ |
304 | #define FEC_MAX_TSO_SEGS 100 |
305 | #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) |
306 | |
307 | #define (txq, addr) \ |
308 | ((addr >= txq->tso_hdrs_dma) && \ |
309 | (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) |
310 | |
311 | static int mii_cnt; |
312 | |
313 | static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, |
314 | struct bufdesc_prop *bd) |
315 | { |
316 | return (bdp >= bd->last) ? bd->base |
317 | : (struct bufdesc *)(((void *)bdp) + bd->dsize); |
318 | } |
319 | |
320 | static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, |
321 | struct bufdesc_prop *bd) |
322 | { |
323 | return (bdp <= bd->base) ? bd->last |
324 | : (struct bufdesc *)(((void *)bdp) - bd->dsize); |
325 | } |
326 | |
327 | static int fec_enet_get_bd_index(struct bufdesc *bdp, |
328 | struct bufdesc_prop *bd) |
329 | { |
330 | return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; |
331 | } |
332 | |
333 | static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) |
334 | { |
335 | int entries; |
336 | |
337 | entries = (((const char *)txq->dirty_tx - |
338 | (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; |
339 | |
340 | return entries >= 0 ? entries : entries + txq->bd.ring_size; |
341 | } |
342 | |
343 | static void swap_buffer(void *bufaddr, int len) |
344 | { |
345 | int i; |
346 | unsigned int *buf = bufaddr; |
347 | |
348 | for (i = 0; i < len; i += 4, buf++) |
349 | swab32s(p: buf); |
350 | } |
351 | |
352 | static void fec_dump(struct net_device *ndev) |
353 | { |
354 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
355 | struct bufdesc *bdp; |
356 | struct fec_enet_priv_tx_q *txq; |
357 | int index = 0; |
358 | |
359 | netdev_info(dev: ndev, format: "TX ring dump\n" ); |
360 | pr_info("Nr SC addr len SKB\n" ); |
361 | |
362 | txq = fep->tx_queue[0]; |
363 | bdp = txq->bd.base; |
364 | |
365 | do { |
366 | pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n" , |
367 | index, |
368 | bdp == txq->bd.cur ? 'S' : ' ', |
369 | bdp == txq->dirty_tx ? 'H' : ' ', |
370 | fec16_to_cpu(bdp->cbd_sc), |
371 | fec32_to_cpu(bdp->cbd_bufaddr), |
372 | fec16_to_cpu(bdp->cbd_datlen), |
373 | txq->tx_buf[index].buf_p); |
374 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
375 | index++; |
376 | } while (bdp != txq->bd.base); |
377 | } |
378 | |
379 | /* |
380 | * Coldfire does not support DMA coherent allocations, and has historically used |
381 | * a band-aid with a manual flush in fec_enet_rx_queue. |
382 | */ |
383 | #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) |
384 | static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
385 | gfp_t gfp) |
386 | { |
387 | return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); |
388 | } |
389 | |
390 | static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, |
391 | dma_addr_t handle) |
392 | { |
393 | dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); |
394 | } |
395 | #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ |
396 | static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
397 | gfp_t gfp) |
398 | { |
399 | return dma_alloc_coherent(dev, size, dma_handle: handle, gfp); |
400 | } |
401 | |
402 | static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, |
403 | dma_addr_t handle) |
404 | { |
405 | dma_free_coherent(dev, size, cpu_addr, dma_handle: handle); |
406 | } |
407 | #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ |
408 | |
409 | struct fec_dma_devres { |
410 | size_t size; |
411 | void *vaddr; |
412 | dma_addr_t dma_handle; |
413 | }; |
414 | |
415 | static void fec_dmam_release(struct device *dev, void *res) |
416 | { |
417 | struct fec_dma_devres *this = res; |
418 | |
419 | fec_dma_free(dev, size: this->size, cpu_addr: this->vaddr, handle: this->dma_handle); |
420 | } |
421 | |
422 | static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
423 | gfp_t gfp) |
424 | { |
425 | struct fec_dma_devres *dr; |
426 | void *vaddr; |
427 | |
428 | dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); |
429 | if (!dr) |
430 | return NULL; |
431 | vaddr = fec_dma_alloc(dev, size, handle, gfp); |
432 | if (!vaddr) { |
433 | devres_free(res: dr); |
434 | return NULL; |
435 | } |
436 | dr->vaddr = vaddr; |
437 | dr->dma_handle = *handle; |
438 | dr->size = size; |
439 | devres_add(dev, res: dr); |
440 | return vaddr; |
441 | } |
442 | |
443 | static inline bool is_ipv4_pkt(struct sk_buff *skb) |
444 | { |
445 | return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; |
446 | } |
447 | |
448 | static int |
449 | fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) |
450 | { |
451 | /* Only run for packets requiring a checksum. */ |
452 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
453 | return 0; |
454 | |
455 | if (unlikely(skb_cow_head(skb, 0))) |
456 | return -1; |
457 | |
458 | if (is_ipv4_pkt(skb)) |
459 | ip_hdr(skb)->check = 0; |
460 | *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; |
461 | |
462 | return 0; |
463 | } |
464 | |
465 | static int |
466 | fec_enet_create_page_pool(struct fec_enet_private *fep, |
467 | struct fec_enet_priv_rx_q *rxq, int size) |
468 | { |
469 | struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); |
470 | struct page_pool_params pp_params = { |
471 | .order = 0, |
472 | .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, |
473 | .pool_size = size, |
474 | .nid = dev_to_node(dev: &fep->pdev->dev), |
475 | .dev = &fep->pdev->dev, |
476 | .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, |
477 | .offset = FEC_ENET_XDP_HEADROOM, |
478 | .max_len = FEC_ENET_RX_FRSIZE, |
479 | }; |
480 | int err; |
481 | |
482 | rxq->page_pool = page_pool_create(params: &pp_params); |
483 | if (IS_ERR(ptr: rxq->page_pool)) { |
484 | err = PTR_ERR(ptr: rxq->page_pool); |
485 | rxq->page_pool = NULL; |
486 | return err; |
487 | } |
488 | |
489 | err = xdp_rxq_info_reg(xdp_rxq: &rxq->xdp_rxq, dev: fep->netdev, queue_index: rxq->id, napi_id: 0); |
490 | if (err < 0) |
491 | goto err_free_pp; |
492 | |
493 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &rxq->xdp_rxq, type: MEM_TYPE_PAGE_POOL, |
494 | allocator: rxq->page_pool); |
495 | if (err) |
496 | goto err_unregister_rxq; |
497 | |
498 | return 0; |
499 | |
500 | err_unregister_rxq: |
501 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq); |
502 | err_free_pp: |
503 | page_pool_destroy(pool: rxq->page_pool); |
504 | rxq->page_pool = NULL; |
505 | return err; |
506 | } |
507 | |
508 | static struct bufdesc * |
509 | fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, |
510 | struct sk_buff *skb, |
511 | struct net_device *ndev) |
512 | { |
513 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
514 | struct bufdesc *bdp = txq->bd.cur; |
515 | struct bufdesc_ex *ebdp; |
516 | int nr_frags = skb_shinfo(skb)->nr_frags; |
517 | int frag, frag_len; |
518 | unsigned short status; |
519 | unsigned int estatus = 0; |
520 | skb_frag_t *this_frag; |
521 | unsigned int index; |
522 | void *bufaddr; |
523 | dma_addr_t addr; |
524 | int i; |
525 | |
526 | for (frag = 0; frag < nr_frags; frag++) { |
527 | this_frag = &skb_shinfo(skb)->frags[frag]; |
528 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
529 | ebdp = (struct bufdesc_ex *)bdp; |
530 | |
531 | status = fec16_to_cpu(bdp->cbd_sc); |
532 | status &= ~BD_ENET_TX_STATS; |
533 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
534 | frag_len = skb_frag_size(frag: &skb_shinfo(skb)->frags[frag]); |
535 | |
536 | /* Handle the last BD specially */ |
537 | if (frag == nr_frags - 1) { |
538 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); |
539 | if (fep->bufdesc_ex) { |
540 | estatus |= BD_ENET_TX_INT; |
541 | if (unlikely(skb_shinfo(skb)->tx_flags & |
542 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) |
543 | estatus |= BD_ENET_TX_TS; |
544 | } |
545 | } |
546 | |
547 | if (fep->bufdesc_ex) { |
548 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
549 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
550 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
551 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
552 | |
553 | ebdp->cbd_bdu = 0; |
554 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
555 | } |
556 | |
557 | bufaddr = skb_frag_address(frag: this_frag); |
558 | |
559 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
560 | if (((unsigned long) bufaddr) & fep->tx_align || |
561 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
562 | memcpy(txq->tx_bounce[index], bufaddr, frag_len); |
563 | bufaddr = txq->tx_bounce[index]; |
564 | |
565 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
566 | swap_buffer(bufaddr, len: frag_len); |
567 | } |
568 | |
569 | addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, |
570 | DMA_TO_DEVICE); |
571 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: addr)) { |
572 | if (net_ratelimit()) |
573 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
574 | goto dma_mapping_error; |
575 | } |
576 | |
577 | bdp->cbd_bufaddr = cpu_to_fec32(addr); |
578 | bdp->cbd_datlen = cpu_to_fec16(frag_len); |
579 | /* Make sure the updates to rest of the descriptor are |
580 | * performed before transferring ownership. |
581 | */ |
582 | wmb(); |
583 | bdp->cbd_sc = cpu_to_fec16(status); |
584 | } |
585 | |
586 | return bdp; |
587 | dma_mapping_error: |
588 | bdp = txq->bd.cur; |
589 | for (i = 0; i < frag; i++) { |
590 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
591 | dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), |
592 | fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); |
593 | } |
594 | return ERR_PTR(error: -ENOMEM); |
595 | } |
596 | |
597 | static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, |
598 | struct sk_buff *skb, struct net_device *ndev) |
599 | { |
600 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
601 | int nr_frags = skb_shinfo(skb)->nr_frags; |
602 | struct bufdesc *bdp, *last_bdp; |
603 | void *bufaddr; |
604 | dma_addr_t addr; |
605 | unsigned short status; |
606 | unsigned short buflen; |
607 | unsigned int estatus = 0; |
608 | unsigned int index; |
609 | int entries_free; |
610 | |
611 | entries_free = fec_enet_get_free_txdesc_num(txq); |
612 | if (entries_free < MAX_SKB_FRAGS + 1) { |
613 | dev_kfree_skb_any(skb); |
614 | if (net_ratelimit()) |
615 | netdev_err(dev: ndev, format: "NOT enough BD for SG!\n" ); |
616 | return NETDEV_TX_OK; |
617 | } |
618 | |
619 | /* Protocol checksum off-load for TCP and UDP. */ |
620 | if (fec_enet_clear_csum(skb, ndev)) { |
621 | dev_kfree_skb_any(skb); |
622 | return NETDEV_TX_OK; |
623 | } |
624 | |
625 | /* Fill in a Tx ring entry */ |
626 | bdp = txq->bd.cur; |
627 | last_bdp = bdp; |
628 | status = fec16_to_cpu(bdp->cbd_sc); |
629 | status &= ~BD_ENET_TX_STATS; |
630 | |
631 | /* Set buffer length and buffer pointer */ |
632 | bufaddr = skb->data; |
633 | buflen = skb_headlen(skb); |
634 | |
635 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
636 | if (((unsigned long) bufaddr) & fep->tx_align || |
637 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
638 | memcpy(txq->tx_bounce[index], skb->data, buflen); |
639 | bufaddr = txq->tx_bounce[index]; |
640 | |
641 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
642 | swap_buffer(bufaddr, len: buflen); |
643 | } |
644 | |
645 | /* Push the data cache so the CPM does not get stale memory data. */ |
646 | addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); |
647 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: addr)) { |
648 | dev_kfree_skb_any(skb); |
649 | if (net_ratelimit()) |
650 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
651 | return NETDEV_TX_OK; |
652 | } |
653 | |
654 | if (nr_frags) { |
655 | last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); |
656 | if (IS_ERR(ptr: last_bdp)) { |
657 | dma_unmap_single(&fep->pdev->dev, addr, |
658 | buflen, DMA_TO_DEVICE); |
659 | dev_kfree_skb_any(skb); |
660 | return NETDEV_TX_OK; |
661 | } |
662 | } else { |
663 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); |
664 | if (fep->bufdesc_ex) { |
665 | estatus = BD_ENET_TX_INT; |
666 | if (unlikely(skb_shinfo(skb)->tx_flags & |
667 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) |
668 | estatus |= BD_ENET_TX_TS; |
669 | } |
670 | } |
671 | bdp->cbd_bufaddr = cpu_to_fec32(addr); |
672 | bdp->cbd_datlen = cpu_to_fec16(buflen); |
673 | |
674 | if (fep->bufdesc_ex) { |
675 | |
676 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
677 | |
678 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && |
679 | fep->hwts_tx_en)) |
680 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
681 | |
682 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
683 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
684 | |
685 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
686 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
687 | |
688 | ebdp->cbd_bdu = 0; |
689 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
690 | } |
691 | |
692 | index = fec_enet_get_bd_index(bdp: last_bdp, bd: &txq->bd); |
693 | /* Save skb pointer */ |
694 | txq->tx_buf[index].buf_p = skb; |
695 | |
696 | /* Make sure the updates to rest of the descriptor are performed before |
697 | * transferring ownership. |
698 | */ |
699 | wmb(); |
700 | |
701 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
702 | * it's the last BD of the frame, and to put the CRC on the end. |
703 | */ |
704 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); |
705 | bdp->cbd_sc = cpu_to_fec16(status); |
706 | |
707 | /* If this was the last BD in the ring, start at the beginning again. */ |
708 | bdp = fec_enet_get_nextdesc(bdp: last_bdp, bd: &txq->bd); |
709 | |
710 | skb_tx_timestamp(skb); |
711 | |
712 | /* Make sure the update to bdp is performed before txq->bd.cur. */ |
713 | wmb(); |
714 | txq->bd.cur = bdp; |
715 | |
716 | /* Trigger transmission start */ |
717 | writel(val: 0, addr: txq->bd.reg_desc_active); |
718 | |
719 | return 0; |
720 | } |
721 | |
722 | static int |
723 | fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, |
724 | struct net_device *ndev, |
725 | struct bufdesc *bdp, int index, char *data, |
726 | int size, bool last_tcp, bool is_last) |
727 | { |
728 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
729 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); |
730 | unsigned short status; |
731 | unsigned int estatus = 0; |
732 | dma_addr_t addr; |
733 | |
734 | status = fec16_to_cpu(bdp->cbd_sc); |
735 | status &= ~BD_ENET_TX_STATS; |
736 | |
737 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
738 | |
739 | if (((unsigned long) data) & fep->tx_align || |
740 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
741 | memcpy(txq->tx_bounce[index], data, size); |
742 | data = txq->tx_bounce[index]; |
743 | |
744 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
745 | swap_buffer(bufaddr: data, len: size); |
746 | } |
747 | |
748 | addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); |
749 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: addr)) { |
750 | dev_kfree_skb_any(skb); |
751 | if (net_ratelimit()) |
752 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
753 | return NETDEV_TX_OK; |
754 | } |
755 | |
756 | bdp->cbd_datlen = cpu_to_fec16(size); |
757 | bdp->cbd_bufaddr = cpu_to_fec32(addr); |
758 | |
759 | if (fep->bufdesc_ex) { |
760 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
761 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
762 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
763 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
764 | ebdp->cbd_bdu = 0; |
765 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
766 | } |
767 | |
768 | /* Handle the last BD specially */ |
769 | if (last_tcp) |
770 | status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); |
771 | if (is_last) { |
772 | status |= BD_ENET_TX_INTR; |
773 | if (fep->bufdesc_ex) |
774 | ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); |
775 | } |
776 | |
777 | bdp->cbd_sc = cpu_to_fec16(status); |
778 | |
779 | return 0; |
780 | } |
781 | |
782 | static int |
783 | fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, |
784 | struct sk_buff *skb, struct net_device *ndev, |
785 | struct bufdesc *bdp, int index) |
786 | { |
787 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
788 | int hdr_len = skb_tcp_all_headers(skb); |
789 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); |
790 | void *bufaddr; |
791 | unsigned long dmabuf; |
792 | unsigned short status; |
793 | unsigned int estatus = 0; |
794 | |
795 | status = fec16_to_cpu(bdp->cbd_sc); |
796 | status &= ~BD_ENET_TX_STATS; |
797 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
798 | |
799 | bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
800 | dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; |
801 | if (((unsigned long)bufaddr) & fep->tx_align || |
802 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
803 | memcpy(txq->tx_bounce[index], skb->data, hdr_len); |
804 | bufaddr = txq->tx_bounce[index]; |
805 | |
806 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
807 | swap_buffer(bufaddr, len: hdr_len); |
808 | |
809 | dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, |
810 | hdr_len, DMA_TO_DEVICE); |
811 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: dmabuf)) { |
812 | dev_kfree_skb_any(skb); |
813 | if (net_ratelimit()) |
814 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
815 | return NETDEV_TX_OK; |
816 | } |
817 | } |
818 | |
819 | bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); |
820 | bdp->cbd_datlen = cpu_to_fec16(hdr_len); |
821 | |
822 | if (fep->bufdesc_ex) { |
823 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
824 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
825 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
826 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
827 | ebdp->cbd_bdu = 0; |
828 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
829 | } |
830 | |
831 | bdp->cbd_sc = cpu_to_fec16(status); |
832 | |
833 | return 0; |
834 | } |
835 | |
836 | static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, |
837 | struct sk_buff *skb, |
838 | struct net_device *ndev) |
839 | { |
840 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
841 | int hdr_len, total_len, data_left; |
842 | struct bufdesc *bdp = txq->bd.cur; |
843 | struct tso_t tso; |
844 | unsigned int index = 0; |
845 | int ret; |
846 | |
847 | if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { |
848 | dev_kfree_skb_any(skb); |
849 | if (net_ratelimit()) |
850 | netdev_err(dev: ndev, format: "NOT enough BD for TSO!\n" ); |
851 | return NETDEV_TX_OK; |
852 | } |
853 | |
854 | /* Protocol checksum off-load for TCP and UDP. */ |
855 | if (fec_enet_clear_csum(skb, ndev)) { |
856 | dev_kfree_skb_any(skb); |
857 | return NETDEV_TX_OK; |
858 | } |
859 | |
860 | /* Initialize the TSO handler, and prepare the first payload */ |
861 | hdr_len = tso_start(skb, tso: &tso); |
862 | |
863 | total_len = skb->len - hdr_len; |
864 | while (total_len > 0) { |
865 | char *hdr; |
866 | |
867 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
868 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
869 | total_len -= data_left; |
870 | |
871 | /* prepare packet headers: MAC + IP + TCP */ |
872 | hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
873 | tso_build_hdr(skb, hdr, tso: &tso, size: data_left, is_last: total_len == 0); |
874 | ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); |
875 | if (ret) |
876 | goto err_release; |
877 | |
878 | while (data_left > 0) { |
879 | int size; |
880 | |
881 | size = min_t(int, tso.size, data_left); |
882 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
883 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
884 | ret = fec_enet_txq_put_data_tso(txq, skb, ndev, |
885 | bdp, index, |
886 | data: tso.data, size, |
887 | last_tcp: size == data_left, |
888 | is_last: total_len == 0); |
889 | if (ret) |
890 | goto err_release; |
891 | |
892 | data_left -= size; |
893 | tso_build_data(skb, tso: &tso, size); |
894 | } |
895 | |
896 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
897 | } |
898 | |
899 | /* Save skb pointer */ |
900 | txq->tx_buf[index].buf_p = skb; |
901 | |
902 | skb_tx_timestamp(skb); |
903 | txq->bd.cur = bdp; |
904 | |
905 | /* Trigger transmission start */ |
906 | if (!(fep->quirks & FEC_QUIRK_ERR007885) || |
907 | !readl(addr: txq->bd.reg_desc_active) || |
908 | !readl(addr: txq->bd.reg_desc_active) || |
909 | !readl(addr: txq->bd.reg_desc_active) || |
910 | !readl(addr: txq->bd.reg_desc_active)) |
911 | writel(val: 0, addr: txq->bd.reg_desc_active); |
912 | |
913 | return 0; |
914 | |
915 | err_release: |
916 | /* TODO: Release all used data descriptors for TSO */ |
917 | return ret; |
918 | } |
919 | |
920 | static netdev_tx_t |
921 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
922 | { |
923 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
924 | int entries_free; |
925 | unsigned short queue; |
926 | struct fec_enet_priv_tx_q *txq; |
927 | struct netdev_queue *nq; |
928 | int ret; |
929 | |
930 | queue = skb_get_queue_mapping(skb); |
931 | txq = fep->tx_queue[queue]; |
932 | nq = netdev_get_tx_queue(dev: ndev, index: queue); |
933 | |
934 | if (skb_is_gso(skb)) |
935 | ret = fec_enet_txq_submit_tso(txq, skb, ndev); |
936 | else |
937 | ret = fec_enet_txq_submit_skb(txq, skb, ndev); |
938 | if (ret) |
939 | return ret; |
940 | |
941 | entries_free = fec_enet_get_free_txdesc_num(txq); |
942 | if (entries_free <= txq->tx_stop_threshold) |
943 | netif_tx_stop_queue(dev_queue: nq); |
944 | |
945 | return NETDEV_TX_OK; |
946 | } |
947 | |
948 | /* Init RX & TX buffer descriptors |
949 | */ |
950 | static void fec_enet_bd_init(struct net_device *dev) |
951 | { |
952 | struct fec_enet_private *fep = netdev_priv(dev); |
953 | struct fec_enet_priv_tx_q *txq; |
954 | struct fec_enet_priv_rx_q *rxq; |
955 | struct bufdesc *bdp; |
956 | unsigned int i; |
957 | unsigned int q; |
958 | |
959 | for (q = 0; q < fep->num_rx_queues; q++) { |
960 | /* Initialize the receive buffer descriptors. */ |
961 | rxq = fep->rx_queue[q]; |
962 | bdp = rxq->bd.base; |
963 | |
964 | for (i = 0; i < rxq->bd.ring_size; i++) { |
965 | |
966 | /* Initialize the BD for every fragment in the page. */ |
967 | if (bdp->cbd_bufaddr) |
968 | bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); |
969 | else |
970 | bdp->cbd_sc = cpu_to_fec16(0); |
971 | bdp = fec_enet_get_nextdesc(bdp, bd: &rxq->bd); |
972 | } |
973 | |
974 | /* Set the last buffer to wrap */ |
975 | bdp = fec_enet_get_prevdesc(bdp, bd: &rxq->bd); |
976 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
977 | |
978 | rxq->bd.cur = rxq->bd.base; |
979 | } |
980 | |
981 | for (q = 0; q < fep->num_tx_queues; q++) { |
982 | /* ...and the same for transmit */ |
983 | txq = fep->tx_queue[q]; |
984 | bdp = txq->bd.base; |
985 | txq->bd.cur = bdp; |
986 | |
987 | for (i = 0; i < txq->bd.ring_size; i++) { |
988 | /* Initialize the BD for every fragment in the page. */ |
989 | bdp->cbd_sc = cpu_to_fec16(0); |
990 | if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { |
991 | if (bdp->cbd_bufaddr && |
992 | !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) |
993 | dma_unmap_single(&fep->pdev->dev, |
994 | fec32_to_cpu(bdp->cbd_bufaddr), |
995 | fec16_to_cpu(bdp->cbd_datlen), |
996 | DMA_TO_DEVICE); |
997 | if (txq->tx_buf[i].buf_p) |
998 | dev_kfree_skb_any(skb: txq->tx_buf[i].buf_p); |
999 | } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { |
1000 | if (bdp->cbd_bufaddr) |
1001 | dma_unmap_single(&fep->pdev->dev, |
1002 | fec32_to_cpu(bdp->cbd_bufaddr), |
1003 | fec16_to_cpu(bdp->cbd_datlen), |
1004 | DMA_TO_DEVICE); |
1005 | |
1006 | if (txq->tx_buf[i].buf_p) |
1007 | xdp_return_frame(xdpf: txq->tx_buf[i].buf_p); |
1008 | } else { |
1009 | struct page *page = txq->tx_buf[i].buf_p; |
1010 | |
1011 | if (page) |
1012 | page_pool_put_page(pool: page->pp, page, dma_sync_size: 0, allow_direct: false); |
1013 | } |
1014 | |
1015 | txq->tx_buf[i].buf_p = NULL; |
1016 | /* restore default tx buffer type: FEC_TXBUF_T_SKB */ |
1017 | txq->tx_buf[i].type = FEC_TXBUF_T_SKB; |
1018 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
1019 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
1020 | } |
1021 | |
1022 | /* Set the last buffer to wrap */ |
1023 | bdp = fec_enet_get_prevdesc(bdp, bd: &txq->bd); |
1024 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
1025 | txq->dirty_tx = bdp; |
1026 | } |
1027 | } |
1028 | |
1029 | static void fec_enet_active_rxring(struct net_device *ndev) |
1030 | { |
1031 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1032 | int i; |
1033 | |
1034 | for (i = 0; i < fep->num_rx_queues; i++) |
1035 | writel(val: 0, addr: fep->rx_queue[i]->bd.reg_desc_active); |
1036 | } |
1037 | |
1038 | static void fec_enet_enable_ring(struct net_device *ndev) |
1039 | { |
1040 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1041 | struct fec_enet_priv_tx_q *txq; |
1042 | struct fec_enet_priv_rx_q *rxq; |
1043 | int i; |
1044 | |
1045 | for (i = 0; i < fep->num_rx_queues; i++) { |
1046 | rxq = fep->rx_queue[i]; |
1047 | writel(val: rxq->bd.dma, addr: fep->hwp + FEC_R_DES_START(i)); |
1048 | writel(PKT_MAXBUF_SIZE, addr: fep->hwp + FEC_R_BUFF_SIZE(i)); |
1049 | |
1050 | /* enable DMA1/2 */ |
1051 | if (i) |
1052 | writel(RCMR_MATCHEN | RCMR_CMP(i), |
1053 | addr: fep->hwp + FEC_RCMR(i)); |
1054 | } |
1055 | |
1056 | for (i = 0; i < fep->num_tx_queues; i++) { |
1057 | txq = fep->tx_queue[i]; |
1058 | writel(val: txq->bd.dma, addr: fep->hwp + FEC_X_DES_START(i)); |
1059 | |
1060 | /* enable DMA1/2 */ |
1061 | if (i) |
1062 | writel(DMA_CLASS_EN | IDLE_SLOPE(i), |
1063 | addr: fep->hwp + FEC_DMA_CFG(i)); |
1064 | } |
1065 | } |
1066 | |
1067 | /* |
1068 | * This function is called to start or restart the FEC during a link |
1069 | * change, transmit timeout, or to reconfigure the FEC. The network |
1070 | * packet processing for this device must be stopped before this call. |
1071 | */ |
1072 | static void |
1073 | fec_restart(struct net_device *ndev) |
1074 | { |
1075 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1076 | u32 temp_mac[2]; |
1077 | u32 rcntl = OPT_FRAME_SIZE | 0x04; |
1078 | u32 ecntl = FEC_ECR_ETHEREN; |
1079 | |
1080 | /* Whack a reset. We should wait for this. |
1081 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC |
1082 | * instead of reset MAC itself. |
1083 | */ |
1084 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || |
1085 | ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { |
1086 | writel(val: 0, addr: fep->hwp + FEC_ECNTRL); |
1087 | } else { |
1088 | writel(val: 1, addr: fep->hwp + FEC_ECNTRL); |
1089 | udelay(10); |
1090 | } |
1091 | |
1092 | /* |
1093 | * enet-mac reset will reset mac address registers too, |
1094 | * so need to reconfigure it. |
1095 | */ |
1096 | memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); |
1097 | writel(val: (__force u32)cpu_to_be32(temp_mac[0]), |
1098 | addr: fep->hwp + FEC_ADDR_LOW); |
1099 | writel(val: (__force u32)cpu_to_be32(temp_mac[1]), |
1100 | addr: fep->hwp + FEC_ADDR_HIGH); |
1101 | |
1102 | /* Clear any outstanding interrupt, except MDIO. */ |
1103 | writel(val: (0xffffffff & ~FEC_ENET_MII), addr: fep->hwp + FEC_IEVENT); |
1104 | |
1105 | fec_enet_bd_init(dev: ndev); |
1106 | |
1107 | fec_enet_enable_ring(ndev); |
1108 | |
1109 | /* Enable MII mode */ |
1110 | if (fep->full_duplex == DUPLEX_FULL) { |
1111 | /* FD enable */ |
1112 | writel(val: 0x04, addr: fep->hwp + FEC_X_CNTRL); |
1113 | } else { |
1114 | /* No Rcv on Xmit */ |
1115 | rcntl |= 0x02; |
1116 | writel(val: 0x0, addr: fep->hwp + FEC_X_CNTRL); |
1117 | } |
1118 | |
1119 | /* Set MII speed */ |
1120 | writel(val: fep->phy_speed, addr: fep->hwp + FEC_MII_SPEED); |
1121 | |
1122 | #if !defined(CONFIG_M5272) |
1123 | if (fep->quirks & FEC_QUIRK_HAS_RACC) { |
1124 | u32 val = readl(addr: fep->hwp + FEC_RACC); |
1125 | |
1126 | /* align IP header */ |
1127 | val |= FEC_RACC_SHIFT16; |
1128 | if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) |
1129 | /* set RX checksum */ |
1130 | val |= FEC_RACC_OPTIONS; |
1131 | else |
1132 | val &= ~FEC_RACC_OPTIONS; |
1133 | writel(val, addr: fep->hwp + FEC_RACC); |
1134 | writel(PKT_MAXBUF_SIZE, addr: fep->hwp + FEC_FTRL); |
1135 | } |
1136 | #endif |
1137 | |
1138 | /* |
1139 | * The phy interface and speed need to get configured |
1140 | * differently on enet-mac. |
1141 | */ |
1142 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { |
1143 | /* Enable flow control and length check */ |
1144 | rcntl |= 0x40000000 | 0x00000020; |
1145 | |
1146 | /* RGMII, RMII or MII */ |
1147 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || |
1148 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || |
1149 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || |
1150 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) |
1151 | rcntl |= (1 << 6); |
1152 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
1153 | rcntl |= FEC_RCR_RMII; |
1154 | else |
1155 | rcntl &= ~FEC_RCR_RMII; |
1156 | |
1157 | /* 1G, 100M or 10M */ |
1158 | if (ndev->phydev) { |
1159 | if (ndev->phydev->speed == SPEED_1000) |
1160 | ecntl |= (1 << 5); |
1161 | else if (ndev->phydev->speed == SPEED_100) |
1162 | rcntl &= ~FEC_RCR_10BASET; |
1163 | else |
1164 | rcntl |= FEC_RCR_10BASET; |
1165 | } |
1166 | } else { |
1167 | #ifdef FEC_MIIGSK_ENR |
1168 | if (fep->quirks & FEC_QUIRK_USE_GASKET) { |
1169 | u32 cfgr; |
1170 | /* disable the gasket and wait */ |
1171 | writel(val: 0, addr: fep->hwp + FEC_MIIGSK_ENR); |
1172 | while (readl(addr: fep->hwp + FEC_MIIGSK_ENR) & 4) |
1173 | udelay(1); |
1174 | |
1175 | /* |
1176 | * configure the gasket: |
1177 | * RMII, 50 MHz, no loopback, no echo |
1178 | * MII, 25 MHz, no loopback, no echo |
1179 | */ |
1180 | cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
1181 | ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; |
1182 | if (ndev->phydev && ndev->phydev->speed == SPEED_10) |
1183 | cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; |
1184 | writel(val: cfgr, addr: fep->hwp + FEC_MIIGSK_CFGR); |
1185 | |
1186 | /* re-enable the gasket */ |
1187 | writel(val: 2, addr: fep->hwp + FEC_MIIGSK_ENR); |
1188 | } |
1189 | #endif |
1190 | } |
1191 | |
1192 | #if !defined(CONFIG_M5272) |
1193 | /* enable pause frame*/ |
1194 | if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || |
1195 | ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && |
1196 | ndev->phydev && ndev->phydev->pause)) { |
1197 | rcntl |= FEC_RCR_FLOWCTL; |
1198 | |
1199 | /* set FIFO threshold parameter to reduce overrun */ |
1200 | writel(FEC_ENET_RSEM_V, addr: fep->hwp + FEC_R_FIFO_RSEM); |
1201 | writel(FEC_ENET_RSFL_V, addr: fep->hwp + FEC_R_FIFO_RSFL); |
1202 | writel(FEC_ENET_RAEM_V, addr: fep->hwp + FEC_R_FIFO_RAEM); |
1203 | writel(FEC_ENET_RAFL_V, addr: fep->hwp + FEC_R_FIFO_RAFL); |
1204 | |
1205 | /* OPD */ |
1206 | writel(FEC_ENET_OPD_V, addr: fep->hwp + FEC_OPD); |
1207 | } else { |
1208 | rcntl &= ~FEC_RCR_FLOWCTL; |
1209 | } |
1210 | #endif /* !defined(CONFIG_M5272) */ |
1211 | |
1212 | writel(val: rcntl, addr: fep->hwp + FEC_R_CNTRL); |
1213 | |
1214 | /* Setup multicast filter. */ |
1215 | set_multicast_list(ndev); |
1216 | #ifndef CONFIG_M5272 |
1217 | writel(val: 0, addr: fep->hwp + FEC_HASH_TABLE_HIGH); |
1218 | writel(val: 0, addr: fep->hwp + FEC_HASH_TABLE_LOW); |
1219 | #endif |
1220 | |
1221 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { |
1222 | /* enable ENET endian swap */ |
1223 | ecntl |= FEC_ECR_BYTESWP; |
1224 | /* enable ENET store and forward mode */ |
1225 | writel(FEC_TXWMRK_STRFWD, addr: fep->hwp + FEC_X_WMRK); |
1226 | } |
1227 | |
1228 | if (fep->bufdesc_ex) |
1229 | ecntl |= FEC_ECR_EN1588; |
1230 | |
1231 | if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && |
1232 | fep->rgmii_txc_dly) |
1233 | ecntl |= FEC_ENET_TXC_DLY; |
1234 | if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && |
1235 | fep->rgmii_rxc_dly) |
1236 | ecntl |= FEC_ENET_RXC_DLY; |
1237 | |
1238 | #ifndef CONFIG_M5272 |
1239 | /* Enable the MIB statistic event counters */ |
1240 | writel(val: 0 << 31, addr: fep->hwp + FEC_MIB_CTRLSTAT); |
1241 | #endif |
1242 | |
1243 | /* And last, enable the transmit and receive processing */ |
1244 | writel(val: ecntl, addr: fep->hwp + FEC_ECNTRL); |
1245 | fec_enet_active_rxring(ndev); |
1246 | |
1247 | if (fep->bufdesc_ex) |
1248 | fec_ptp_start_cyclecounter(ndev); |
1249 | |
1250 | /* Enable interrupts we wish to service */ |
1251 | if (fep->link) |
1252 | writel(FEC_DEFAULT_IMASK, addr: fep->hwp + FEC_IMASK); |
1253 | else |
1254 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1255 | |
1256 | /* Init the interrupt coalescing */ |
1257 | if (fep->quirks & FEC_QUIRK_HAS_COALESCE) |
1258 | fec_enet_itr_coal_set(ndev); |
1259 | } |
1260 | |
1261 | static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) |
1262 | { |
1263 | if (!(of_machine_is_compatible(compat: "fsl,imx8qm" ) || |
1264 | of_machine_is_compatible(compat: "fsl,imx8qxp" ) || |
1265 | of_machine_is_compatible(compat: "fsl,imx8dxl" ))) |
1266 | return 0; |
1267 | |
1268 | return imx_scu_get_handle(ipc: &fep->ipc_handle); |
1269 | } |
1270 | |
1271 | static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) |
1272 | { |
1273 | struct device_node *np = fep->pdev->dev.of_node; |
1274 | u32 rsrc_id, val; |
1275 | int idx; |
1276 | |
1277 | if (!np || !fep->ipc_handle) |
1278 | return; |
1279 | |
1280 | idx = of_alias_get_id(np, stem: "ethernet" ); |
1281 | if (idx < 0) |
1282 | idx = 0; |
1283 | rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; |
1284 | |
1285 | val = enabled ? 1 : 0; |
1286 | imx_sc_misc_set_control(ipc: fep->ipc_handle, resource: rsrc_id, IMX_SC_C_IPG_STOP, val); |
1287 | } |
1288 | |
1289 | static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) |
1290 | { |
1291 | struct fec_platform_data *pdata = fep->pdev->dev.platform_data; |
1292 | struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; |
1293 | |
1294 | if (stop_gpr->gpr) { |
1295 | if (enabled) |
1296 | regmap_update_bits(map: stop_gpr->gpr, reg: stop_gpr->reg, |
1297 | BIT(stop_gpr->bit), |
1298 | BIT(stop_gpr->bit)); |
1299 | else |
1300 | regmap_update_bits(map: stop_gpr->gpr, reg: stop_gpr->reg, |
1301 | BIT(stop_gpr->bit), val: 0); |
1302 | } else if (pdata && pdata->sleep_mode_enable) { |
1303 | pdata->sleep_mode_enable(enabled); |
1304 | } else { |
1305 | fec_enet_ipg_stop_set(fep, enabled); |
1306 | } |
1307 | } |
1308 | |
1309 | static void fec_irqs_disable(struct net_device *ndev) |
1310 | { |
1311 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1312 | |
1313 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1314 | } |
1315 | |
1316 | static void fec_irqs_disable_except_wakeup(struct net_device *ndev) |
1317 | { |
1318 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1319 | |
1320 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1321 | writel(FEC_ENET_WAKEUP, addr: fep->hwp + FEC_IMASK); |
1322 | } |
1323 | |
1324 | static void |
1325 | fec_stop(struct net_device *ndev) |
1326 | { |
1327 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1328 | u32 rmii_mode = readl(addr: fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII; |
1329 | u32 val; |
1330 | |
1331 | /* We cannot expect a graceful transmit stop without link !!! */ |
1332 | if (fep->link) { |
1333 | writel(val: 1, addr: fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ |
1334 | udelay(10); |
1335 | if (!(readl(addr: fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) |
1336 | netdev_err(dev: ndev, format: "Graceful transmit stop did not complete!\n" ); |
1337 | } |
1338 | |
1339 | /* Whack a reset. We should wait for this. |
1340 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC |
1341 | * instead of reset MAC itself. |
1342 | */ |
1343 | if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { |
1344 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { |
1345 | writel(val: 0, addr: fep->hwp + FEC_ECNTRL); |
1346 | } else { |
1347 | writel(FEC_ECR_RESET, addr: fep->hwp + FEC_ECNTRL); |
1348 | udelay(10); |
1349 | } |
1350 | } else { |
1351 | val = readl(addr: fep->hwp + FEC_ECNTRL); |
1352 | val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); |
1353 | writel(val, addr: fep->hwp + FEC_ECNTRL); |
1354 | } |
1355 | writel(val: fep->phy_speed, addr: fep->hwp + FEC_MII_SPEED); |
1356 | writel(FEC_DEFAULT_IMASK, addr: fep->hwp + FEC_IMASK); |
1357 | |
1358 | /* We have to keep ENET enabled to have MII interrupt stay working */ |
1359 | if (fep->quirks & FEC_QUIRK_ENET_MAC && |
1360 | !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { |
1361 | writel(FEC_ECR_ETHEREN, addr: fep->hwp + FEC_ECNTRL); |
1362 | writel(val: rmii_mode, addr: fep->hwp + FEC_R_CNTRL); |
1363 | } |
1364 | } |
1365 | |
1366 | static void |
1367 | fec_timeout(struct net_device *ndev, unsigned int txqueue) |
1368 | { |
1369 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1370 | |
1371 | fec_dump(ndev); |
1372 | |
1373 | ndev->stats.tx_errors++; |
1374 | |
1375 | schedule_work(work: &fep->tx_timeout_work); |
1376 | } |
1377 | |
1378 | static void fec_enet_timeout_work(struct work_struct *work) |
1379 | { |
1380 | struct fec_enet_private *fep = |
1381 | container_of(work, struct fec_enet_private, tx_timeout_work); |
1382 | struct net_device *ndev = fep->netdev; |
1383 | |
1384 | rtnl_lock(); |
1385 | if (netif_device_present(dev: ndev) || netif_running(dev: ndev)) { |
1386 | napi_disable(n: &fep->napi); |
1387 | netif_tx_lock_bh(dev: ndev); |
1388 | fec_restart(ndev); |
1389 | netif_tx_wake_all_queues(dev: ndev); |
1390 | netif_tx_unlock_bh(dev: ndev); |
1391 | napi_enable(n: &fep->napi); |
1392 | } |
1393 | rtnl_unlock(); |
1394 | } |
1395 | |
1396 | static void |
1397 | fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, |
1398 | struct skb_shared_hwtstamps *hwtstamps) |
1399 | { |
1400 | unsigned long flags; |
1401 | u64 ns; |
1402 | |
1403 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
1404 | ns = timecounter_cyc2time(tc: &fep->tc, cycle_tstamp: ts); |
1405 | spin_unlock_irqrestore(lock: &fep->tmreg_lock, flags); |
1406 | |
1407 | memset(hwtstamps, 0, sizeof(*hwtstamps)); |
1408 | hwtstamps->hwtstamp = ns_to_ktime(ns); |
1409 | } |
1410 | |
1411 | static void |
1412 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) |
1413 | { |
1414 | struct fec_enet_private *fep; |
1415 | struct xdp_frame *xdpf; |
1416 | struct bufdesc *bdp; |
1417 | unsigned short status; |
1418 | struct sk_buff *skb; |
1419 | struct fec_enet_priv_tx_q *txq; |
1420 | struct netdev_queue *nq; |
1421 | int index = 0; |
1422 | int entries_free; |
1423 | struct page *page; |
1424 | int frame_len; |
1425 | |
1426 | fep = netdev_priv(dev: ndev); |
1427 | |
1428 | txq = fep->tx_queue[queue_id]; |
1429 | /* get next bdp of dirty_tx */ |
1430 | nq = netdev_get_tx_queue(dev: ndev, index: queue_id); |
1431 | bdp = txq->dirty_tx; |
1432 | |
1433 | /* get next bdp of dirty_tx */ |
1434 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
1435 | |
1436 | while (bdp != READ_ONCE(txq->bd.cur)) { |
1437 | /* Order the load of bd.cur and cbd_sc */ |
1438 | rmb(); |
1439 | status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); |
1440 | if (status & BD_ENET_TX_READY) |
1441 | break; |
1442 | |
1443 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
1444 | |
1445 | if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { |
1446 | skb = txq->tx_buf[index].buf_p; |
1447 | if (bdp->cbd_bufaddr && |
1448 | !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) |
1449 | dma_unmap_single(&fep->pdev->dev, |
1450 | fec32_to_cpu(bdp->cbd_bufaddr), |
1451 | fec16_to_cpu(bdp->cbd_datlen), |
1452 | DMA_TO_DEVICE); |
1453 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
1454 | if (!skb) |
1455 | goto tx_buf_done; |
1456 | } else { |
1457 | /* Tx processing cannot call any XDP (or page pool) APIs if |
1458 | * the "budget" is 0. Because NAPI is called with budget of |
1459 | * 0 (such as netpoll) indicates we may be in an IRQ context, |
1460 | * however, we can't use the page pool from IRQ context. |
1461 | */ |
1462 | if (unlikely(!budget)) |
1463 | break; |
1464 | |
1465 | if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { |
1466 | xdpf = txq->tx_buf[index].buf_p; |
1467 | if (bdp->cbd_bufaddr) |
1468 | dma_unmap_single(&fep->pdev->dev, |
1469 | fec32_to_cpu(bdp->cbd_bufaddr), |
1470 | fec16_to_cpu(bdp->cbd_datlen), |
1471 | DMA_TO_DEVICE); |
1472 | } else { |
1473 | page = txq->tx_buf[index].buf_p; |
1474 | } |
1475 | |
1476 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
1477 | if (unlikely(!txq->tx_buf[index].buf_p)) { |
1478 | txq->tx_buf[index].type = FEC_TXBUF_T_SKB; |
1479 | goto tx_buf_done; |
1480 | } |
1481 | |
1482 | frame_len = fec16_to_cpu(bdp->cbd_datlen); |
1483 | } |
1484 | |
1485 | /* Check for errors. */ |
1486 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
1487 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
1488 | BD_ENET_TX_CSL)) { |
1489 | ndev->stats.tx_errors++; |
1490 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
1491 | ndev->stats.tx_heartbeat_errors++; |
1492 | if (status & BD_ENET_TX_LC) /* Late collision */ |
1493 | ndev->stats.tx_window_errors++; |
1494 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
1495 | ndev->stats.tx_aborted_errors++; |
1496 | if (status & BD_ENET_TX_UN) /* Underrun */ |
1497 | ndev->stats.tx_fifo_errors++; |
1498 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
1499 | ndev->stats.tx_carrier_errors++; |
1500 | } else { |
1501 | ndev->stats.tx_packets++; |
1502 | |
1503 | if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) |
1504 | ndev->stats.tx_bytes += skb->len; |
1505 | else |
1506 | ndev->stats.tx_bytes += frame_len; |
1507 | } |
1508 | |
1509 | /* Deferred means some collisions occurred during transmit, |
1510 | * but we eventually sent the packet OK. |
1511 | */ |
1512 | if (status & BD_ENET_TX_DEF) |
1513 | ndev->stats.collisions++; |
1514 | |
1515 | if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { |
1516 | /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who |
1517 | * are to time stamp the packet, so we still need to check time |
1518 | * stamping enabled flag. |
1519 | */ |
1520 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && |
1521 | fep->hwts_tx_en) && fep->bufdesc_ex) { |
1522 | struct skb_shared_hwtstamps shhwtstamps; |
1523 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
1524 | |
1525 | fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), hwtstamps: &shhwtstamps); |
1526 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamps); |
1527 | } |
1528 | |
1529 | /* Free the sk buffer associated with this last transmit */ |
1530 | napi_consume_skb(skb, budget); |
1531 | } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { |
1532 | xdp_return_frame_rx_napi(xdpf); |
1533 | } else { /* recycle pages of XDP_TX frames */ |
1534 | /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ |
1535 | page_pool_put_page(pool: page->pp, page, dma_sync_size: 0, allow_direct: true); |
1536 | } |
1537 | |
1538 | txq->tx_buf[index].buf_p = NULL; |
1539 | /* restore default tx buffer type: FEC_TXBUF_T_SKB */ |
1540 | txq->tx_buf[index].type = FEC_TXBUF_T_SKB; |
1541 | |
1542 | tx_buf_done: |
1543 | /* Make sure the update to bdp and tx_buf are performed |
1544 | * before dirty_tx |
1545 | */ |
1546 | wmb(); |
1547 | txq->dirty_tx = bdp; |
1548 | |
1549 | /* Update pointer to next buffer descriptor to be transmitted */ |
1550 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
1551 | |
1552 | /* Since we have freed up a buffer, the ring is no longer full |
1553 | */ |
1554 | if (netif_tx_queue_stopped(dev_queue: nq)) { |
1555 | entries_free = fec_enet_get_free_txdesc_num(txq); |
1556 | if (entries_free >= txq->tx_wake_threshold) |
1557 | netif_tx_wake_queue(dev_queue: nq); |
1558 | } |
1559 | } |
1560 | |
1561 | /* ERR006358: Keep the transmitter going */ |
1562 | if (bdp != txq->bd.cur && |
1563 | readl(addr: txq->bd.reg_desc_active) == 0) |
1564 | writel(val: 0, addr: txq->bd.reg_desc_active); |
1565 | } |
1566 | |
1567 | static void fec_enet_tx(struct net_device *ndev, int budget) |
1568 | { |
1569 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1570 | int i; |
1571 | |
1572 | /* Make sure that AVB queues are processed first. */ |
1573 | for (i = fep->num_tx_queues - 1; i >= 0; i--) |
1574 | fec_enet_tx_queue(ndev, queue_id: i, budget); |
1575 | } |
1576 | |
1577 | static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, |
1578 | struct bufdesc *bdp, int index) |
1579 | { |
1580 | struct page *new_page; |
1581 | dma_addr_t phys_addr; |
1582 | |
1583 | new_page = page_pool_dev_alloc_pages(pool: rxq->page_pool); |
1584 | WARN_ON(!new_page); |
1585 | rxq->rx_skb_info[index].page = new_page; |
1586 | |
1587 | rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; |
1588 | phys_addr = page_pool_get_dma_addr(page: new_page) + FEC_ENET_XDP_HEADROOM; |
1589 | bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); |
1590 | } |
1591 | |
1592 | static u32 |
1593 | fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, |
1594 | struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) |
1595 | { |
1596 | unsigned int sync, len = xdp->data_end - xdp->data; |
1597 | u32 ret = FEC_ENET_XDP_PASS; |
1598 | struct page *page; |
1599 | int err; |
1600 | u32 act; |
1601 | |
1602 | act = bpf_prog_run_xdp(prog, xdp); |
1603 | |
1604 | /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover |
1605 | * max len CPU touch |
1606 | */ |
1607 | sync = xdp->data_end - xdp->data; |
1608 | sync = max(sync, len); |
1609 | |
1610 | switch (act) { |
1611 | case XDP_PASS: |
1612 | rxq->stats[RX_XDP_PASS]++; |
1613 | ret = FEC_ENET_XDP_PASS; |
1614 | break; |
1615 | |
1616 | case XDP_REDIRECT: |
1617 | rxq->stats[RX_XDP_REDIRECT]++; |
1618 | err = xdp_do_redirect(dev: fep->netdev, xdp, prog); |
1619 | if (unlikely(err)) |
1620 | goto xdp_err; |
1621 | |
1622 | ret = FEC_ENET_XDP_REDIR; |
1623 | break; |
1624 | |
1625 | case XDP_TX: |
1626 | rxq->stats[RX_XDP_TX]++; |
1627 | err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, dma_sync_len: sync); |
1628 | if (unlikely(err)) { |
1629 | rxq->stats[RX_XDP_TX_ERRORS]++; |
1630 | goto xdp_err; |
1631 | } |
1632 | |
1633 | ret = FEC_ENET_XDP_TX; |
1634 | break; |
1635 | |
1636 | default: |
1637 | bpf_warn_invalid_xdp_action(dev: fep->netdev, prog, act); |
1638 | fallthrough; |
1639 | |
1640 | case XDP_ABORTED: |
1641 | fallthrough; /* handle aborts by dropping packet */ |
1642 | |
1643 | case XDP_DROP: |
1644 | rxq->stats[RX_XDP_DROP]++; |
1645 | xdp_err: |
1646 | ret = FEC_ENET_XDP_CONSUMED; |
1647 | page = virt_to_head_page(x: xdp->data); |
1648 | page_pool_put_page(pool: rxq->page_pool, page, dma_sync_size: sync, allow_direct: true); |
1649 | if (act != XDP_DROP) |
1650 | trace_xdp_exception(dev: fep->netdev, xdp: prog, act); |
1651 | break; |
1652 | } |
1653 | |
1654 | return ret; |
1655 | } |
1656 | |
1657 | /* During a receive, the bd_rx.cur points to the current incoming buffer. |
1658 | * When we update through the ring, if the next incoming buffer has |
1659 | * not been given to the system, we just set the empty indicator, |
1660 | * effectively tossing the packet. |
1661 | */ |
1662 | static int |
1663 | fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) |
1664 | { |
1665 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1666 | struct fec_enet_priv_rx_q *rxq; |
1667 | struct bufdesc *bdp; |
1668 | unsigned short status; |
1669 | struct sk_buff *skb; |
1670 | ushort pkt_len; |
1671 | __u8 *data; |
1672 | int pkt_received = 0; |
1673 | struct bufdesc_ex *ebdp = NULL; |
1674 | bool vlan_packet_rcvd = false; |
1675 | u16 vlan_tag; |
1676 | int index = 0; |
1677 | bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; |
1678 | struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); |
1679 | u32 ret, xdp_result = FEC_ENET_XDP_PASS; |
1680 | u32 data_start = FEC_ENET_XDP_HEADROOM; |
1681 | int cpu = smp_processor_id(); |
1682 | struct xdp_buff xdp; |
1683 | struct page *page; |
1684 | u32 sub_len = 4; |
1685 | |
1686 | #if !defined(CONFIG_M5272) |
1687 | /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of |
1688 | * FEC_RACC_SHIFT16 is set by default in the probe function. |
1689 | */ |
1690 | if (fep->quirks & FEC_QUIRK_HAS_RACC) { |
1691 | data_start += 2; |
1692 | sub_len += 2; |
1693 | } |
1694 | #endif |
1695 | |
1696 | #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) |
1697 | /* |
1698 | * Hacky flush of all caches instead of using the DMA API for the TSO |
1699 | * headers. |
1700 | */ |
1701 | flush_cache_all(); |
1702 | #endif |
1703 | rxq = fep->rx_queue[queue_id]; |
1704 | |
1705 | /* First, grab all of the stats for the incoming packet. |
1706 | * These get messed up if we get called due to a busy condition. |
1707 | */ |
1708 | bdp = rxq->bd.cur; |
1709 | xdp_init_buff(xdp: &xdp, PAGE_SIZE, rxq: &rxq->xdp_rxq); |
1710 | |
1711 | while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { |
1712 | |
1713 | if (pkt_received >= budget) |
1714 | break; |
1715 | pkt_received++; |
1716 | |
1717 | writel(FEC_ENET_RXF_GET(queue_id), addr: fep->hwp + FEC_IEVENT); |
1718 | |
1719 | /* Check for errors. */ |
1720 | status ^= BD_ENET_RX_LAST; |
1721 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
1722 | BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | |
1723 | BD_ENET_RX_CL)) { |
1724 | ndev->stats.rx_errors++; |
1725 | if (status & BD_ENET_RX_OV) { |
1726 | /* FIFO overrun */ |
1727 | ndev->stats.rx_fifo_errors++; |
1728 | goto rx_processing_done; |
1729 | } |
1730 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH |
1731 | | BD_ENET_RX_LAST)) { |
1732 | /* Frame too long or too short. */ |
1733 | ndev->stats.rx_length_errors++; |
1734 | if (status & BD_ENET_RX_LAST) |
1735 | netdev_err(dev: ndev, format: "rcv is not +last\n" ); |
1736 | } |
1737 | if (status & BD_ENET_RX_CR) /* CRC Error */ |
1738 | ndev->stats.rx_crc_errors++; |
1739 | /* Report late collisions as a frame error. */ |
1740 | if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) |
1741 | ndev->stats.rx_frame_errors++; |
1742 | goto rx_processing_done; |
1743 | } |
1744 | |
1745 | /* Process the incoming frame. */ |
1746 | ndev->stats.rx_packets++; |
1747 | pkt_len = fec16_to_cpu(bdp->cbd_datlen); |
1748 | ndev->stats.rx_bytes += pkt_len; |
1749 | |
1750 | index = fec_enet_get_bd_index(bdp, bd: &rxq->bd); |
1751 | page = rxq->rx_skb_info[index].page; |
1752 | dma_sync_single_for_cpu(dev: &fep->pdev->dev, |
1753 | fec32_to_cpu(bdp->cbd_bufaddr), |
1754 | size: pkt_len, |
1755 | dir: DMA_FROM_DEVICE); |
1756 | prefetch(page_address(page)); |
1757 | fec_enet_update_cbd(rxq, bdp, index); |
1758 | |
1759 | if (xdp_prog) { |
1760 | xdp_buff_clear_frags_flag(xdp: &xdp); |
1761 | /* subtract 16bit shift and FCS */ |
1762 | xdp_prepare_buff(xdp: &xdp, page_address(page), |
1763 | headroom: data_start, data_len: pkt_len - sub_len, meta_valid: false); |
1764 | ret = fec_enet_run_xdp(fep, prog: xdp_prog, xdp: &xdp, rxq, cpu); |
1765 | xdp_result |= ret; |
1766 | if (ret != FEC_ENET_XDP_PASS) |
1767 | goto rx_processing_done; |
1768 | } |
1769 | |
1770 | /* The packet length includes FCS, but we don't want to |
1771 | * include that when passing upstream as it messes up |
1772 | * bridging applications. |
1773 | */ |
1774 | skb = build_skb(page_address(page), PAGE_SIZE); |
1775 | if (unlikely(!skb)) { |
1776 | page_pool_recycle_direct(pool: rxq->page_pool, page); |
1777 | ndev->stats.rx_dropped++; |
1778 | |
1779 | netdev_err_once(ndev, "build_skb failed!\n" ); |
1780 | goto rx_processing_done; |
1781 | } |
1782 | |
1783 | skb_reserve(skb, len: data_start); |
1784 | skb_put(skb, len: pkt_len - sub_len); |
1785 | skb_mark_for_recycle(skb); |
1786 | |
1787 | if (unlikely(need_swap)) { |
1788 | data = page_address(page) + FEC_ENET_XDP_HEADROOM; |
1789 | swap_buffer(bufaddr: data, len: pkt_len); |
1790 | } |
1791 | data = skb->data; |
1792 | |
1793 | /* Extract the enhanced buffer descriptor */ |
1794 | ebdp = NULL; |
1795 | if (fep->bufdesc_ex) |
1796 | ebdp = (struct bufdesc_ex *)bdp; |
1797 | |
1798 | /* If this is a VLAN packet remove the VLAN Tag */ |
1799 | vlan_packet_rcvd = false; |
1800 | if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
1801 | fep->bufdesc_ex && |
1802 | (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { |
1803 | /* Push and remove the vlan tag */ |
1804 | struct vlan_hdr * = |
1805 | (struct vlan_hdr *) (data + ETH_HLEN); |
1806 | vlan_tag = ntohs(vlan_header->h_vlan_TCI); |
1807 | |
1808 | vlan_packet_rcvd = true; |
1809 | |
1810 | memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); |
1811 | skb_pull(skb, VLAN_HLEN); |
1812 | } |
1813 | |
1814 | skb->protocol = eth_type_trans(skb, dev: ndev); |
1815 | |
1816 | /* Get receive timestamp from the skb */ |
1817 | if (fep->hwts_rx_en && fep->bufdesc_ex) |
1818 | fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), |
1819 | hwtstamps: skb_hwtstamps(skb)); |
1820 | |
1821 | if (fep->bufdesc_ex && |
1822 | (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { |
1823 | if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { |
1824 | /* don't check it */ |
1825 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1826 | } else { |
1827 | skb_checksum_none_assert(skb); |
1828 | } |
1829 | } |
1830 | |
1831 | /* Handle received VLAN packets */ |
1832 | if (vlan_packet_rcvd) |
1833 | __vlan_hwaccel_put_tag(skb, |
1834 | htons(ETH_P_8021Q), |
1835 | vlan_tci: vlan_tag); |
1836 | |
1837 | skb_record_rx_queue(skb, rx_queue: queue_id); |
1838 | napi_gro_receive(napi: &fep->napi, skb); |
1839 | |
1840 | rx_processing_done: |
1841 | /* Clear the status flags for this buffer */ |
1842 | status &= ~BD_ENET_RX_STATS; |
1843 | |
1844 | /* Mark the buffer empty */ |
1845 | status |= BD_ENET_RX_EMPTY; |
1846 | |
1847 | if (fep->bufdesc_ex) { |
1848 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
1849 | |
1850 | ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); |
1851 | ebdp->cbd_prot = 0; |
1852 | ebdp->cbd_bdu = 0; |
1853 | } |
1854 | /* Make sure the updates to rest of the descriptor are |
1855 | * performed before transferring ownership. |
1856 | */ |
1857 | wmb(); |
1858 | bdp->cbd_sc = cpu_to_fec16(status); |
1859 | |
1860 | /* Update BD pointer to next entry */ |
1861 | bdp = fec_enet_get_nextdesc(bdp, bd: &rxq->bd); |
1862 | |
1863 | /* Doing this here will keep the FEC running while we process |
1864 | * incoming frames. On a heavily loaded network, we should be |
1865 | * able to keep up at the expense of system resources. |
1866 | */ |
1867 | writel(val: 0, addr: rxq->bd.reg_desc_active); |
1868 | } |
1869 | rxq->bd.cur = bdp; |
1870 | |
1871 | if (xdp_result & FEC_ENET_XDP_REDIR) |
1872 | xdp_do_flush(); |
1873 | |
1874 | return pkt_received; |
1875 | } |
1876 | |
1877 | static int fec_enet_rx(struct net_device *ndev, int budget) |
1878 | { |
1879 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1880 | int i, done = 0; |
1881 | |
1882 | /* Make sure that AVB queues are processed first. */ |
1883 | for (i = fep->num_rx_queues - 1; i >= 0; i--) |
1884 | done += fec_enet_rx_queue(ndev, budget: budget - done, queue_id: i); |
1885 | |
1886 | return done; |
1887 | } |
1888 | |
1889 | static bool fec_enet_collect_events(struct fec_enet_private *fep) |
1890 | { |
1891 | uint int_events; |
1892 | |
1893 | int_events = readl(addr: fep->hwp + FEC_IEVENT); |
1894 | |
1895 | /* Don't clear MDIO events, we poll for those */ |
1896 | int_events &= ~FEC_ENET_MII; |
1897 | |
1898 | writel(val: int_events, addr: fep->hwp + FEC_IEVENT); |
1899 | |
1900 | return int_events != 0; |
1901 | } |
1902 | |
1903 | static irqreturn_t |
1904 | fec_enet_interrupt(int irq, void *dev_id) |
1905 | { |
1906 | struct net_device *ndev = dev_id; |
1907 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1908 | irqreturn_t ret = IRQ_NONE; |
1909 | |
1910 | if (fec_enet_collect_events(fep) && fep->link) { |
1911 | ret = IRQ_HANDLED; |
1912 | |
1913 | if (napi_schedule_prep(n: &fep->napi)) { |
1914 | /* Disable interrupts */ |
1915 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1916 | __napi_schedule(n: &fep->napi); |
1917 | } |
1918 | } |
1919 | |
1920 | return ret; |
1921 | } |
1922 | |
1923 | static int fec_enet_rx_napi(struct napi_struct *napi, int budget) |
1924 | { |
1925 | struct net_device *ndev = napi->dev; |
1926 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1927 | int done = 0; |
1928 | |
1929 | do { |
1930 | done += fec_enet_rx(ndev, budget: budget - done); |
1931 | fec_enet_tx(ndev, budget); |
1932 | } while ((done < budget) && fec_enet_collect_events(fep)); |
1933 | |
1934 | if (done < budget) { |
1935 | napi_complete_done(n: napi, work_done: done); |
1936 | writel(FEC_DEFAULT_IMASK, addr: fep->hwp + FEC_IMASK); |
1937 | } |
1938 | |
1939 | return done; |
1940 | } |
1941 | |
1942 | /* ------------------------------------------------------------------------- */ |
1943 | static int fec_get_mac(struct net_device *ndev) |
1944 | { |
1945 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1946 | unsigned char *iap, tmpaddr[ETH_ALEN]; |
1947 | int ret; |
1948 | |
1949 | /* |
1950 | * try to get mac address in following order: |
1951 | * |
1952 | * 1) module parameter via kernel command line in form |
1953 | * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 |
1954 | */ |
1955 | iap = macaddr; |
1956 | |
1957 | /* |
1958 | * 2) from device tree data |
1959 | */ |
1960 | if (!is_valid_ether_addr(addr: iap)) { |
1961 | struct device_node *np = fep->pdev->dev.of_node; |
1962 | if (np) { |
1963 | ret = of_get_mac_address(np, mac: tmpaddr); |
1964 | if (!ret) |
1965 | iap = tmpaddr; |
1966 | else if (ret == -EPROBE_DEFER) |
1967 | return ret; |
1968 | } |
1969 | } |
1970 | |
1971 | /* |
1972 | * 3) from flash or fuse (via platform data) |
1973 | */ |
1974 | if (!is_valid_ether_addr(addr: iap)) { |
1975 | #ifdef CONFIG_M5272 |
1976 | if (FEC_FLASHMAC) |
1977 | iap = (unsigned char *)FEC_FLASHMAC; |
1978 | #else |
1979 | struct fec_platform_data *pdata = dev_get_platdata(dev: &fep->pdev->dev); |
1980 | |
1981 | if (pdata) |
1982 | iap = (unsigned char *)&pdata->mac; |
1983 | #endif |
1984 | } |
1985 | |
1986 | /* |
1987 | * 4) FEC mac registers set by bootloader |
1988 | */ |
1989 | if (!is_valid_ether_addr(addr: iap)) { |
1990 | *((__be32 *) &tmpaddr[0]) = |
1991 | cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); |
1992 | *((__be16 *) &tmpaddr[4]) = |
1993 | cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); |
1994 | iap = &tmpaddr[0]; |
1995 | } |
1996 | |
1997 | /* |
1998 | * 5) random mac address |
1999 | */ |
2000 | if (!is_valid_ether_addr(addr: iap)) { |
2001 | /* Report it and use a random ethernet address instead */ |
2002 | dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n" , iap); |
2003 | eth_hw_addr_random(dev: ndev); |
2004 | dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n" , |
2005 | ndev->dev_addr); |
2006 | return 0; |
2007 | } |
2008 | |
2009 | /* Adjust MAC if using macaddr */ |
2010 | eth_hw_addr_gen(dev: ndev, base_addr: iap, id: iap == macaddr ? fep->dev_id : 0); |
2011 | |
2012 | return 0; |
2013 | } |
2014 | |
2015 | /* ------------------------------------------------------------------------- */ |
2016 | |
2017 | /* |
2018 | * Phy section |
2019 | */ |
2020 | |
2021 | /* LPI Sleep Ts count base on tx clk (clk_ref). |
2022 | * The lpi sleep cnt value = X us / (cycle_ns). |
2023 | */ |
2024 | static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) |
2025 | { |
2026 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2027 | |
2028 | return us * (fep->clk_ref_rate / 1000) / 1000; |
2029 | } |
2030 | |
2031 | static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) |
2032 | { |
2033 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2034 | struct ethtool_keee *p = &fep->eee; |
2035 | unsigned int sleep_cycle, wake_cycle; |
2036 | |
2037 | if (enable) { |
2038 | sleep_cycle = fec_enet_us_to_tx_cycle(ndev, us: p->tx_lpi_timer); |
2039 | wake_cycle = sleep_cycle; |
2040 | } else { |
2041 | sleep_cycle = 0; |
2042 | wake_cycle = 0; |
2043 | } |
2044 | |
2045 | writel(val: sleep_cycle, addr: fep->hwp + FEC_LPI_SLEEP); |
2046 | writel(val: wake_cycle, addr: fep->hwp + FEC_LPI_WAKE); |
2047 | |
2048 | return 0; |
2049 | } |
2050 | |
2051 | static void fec_enet_adjust_link(struct net_device *ndev) |
2052 | { |
2053 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2054 | struct phy_device *phy_dev = ndev->phydev; |
2055 | int status_change = 0; |
2056 | |
2057 | /* |
2058 | * If the netdev is down, or is going down, we're not interested |
2059 | * in link state events, so just mark our idea of the link as down |
2060 | * and ignore the event. |
2061 | */ |
2062 | if (!netif_running(dev: ndev) || !netif_device_present(dev: ndev)) { |
2063 | fep->link = 0; |
2064 | } else if (phy_dev->link) { |
2065 | if (!fep->link) { |
2066 | fep->link = phy_dev->link; |
2067 | status_change = 1; |
2068 | } |
2069 | |
2070 | if (fep->full_duplex != phy_dev->duplex) { |
2071 | fep->full_duplex = phy_dev->duplex; |
2072 | status_change = 1; |
2073 | } |
2074 | |
2075 | if (phy_dev->speed != fep->speed) { |
2076 | fep->speed = phy_dev->speed; |
2077 | status_change = 1; |
2078 | } |
2079 | |
2080 | /* if any of the above changed restart the FEC */ |
2081 | if (status_change) { |
2082 | netif_stop_queue(dev: ndev); |
2083 | napi_disable(n: &fep->napi); |
2084 | netif_tx_lock_bh(dev: ndev); |
2085 | fec_restart(ndev); |
2086 | netif_tx_wake_all_queues(dev: ndev); |
2087 | netif_tx_unlock_bh(dev: ndev); |
2088 | napi_enable(n: &fep->napi); |
2089 | } |
2090 | if (fep->quirks & FEC_QUIRK_HAS_EEE) |
2091 | fec_enet_eee_mode_set(ndev, enable: phy_dev->enable_tx_lpi); |
2092 | } else { |
2093 | if (fep->link) { |
2094 | netif_stop_queue(dev: ndev); |
2095 | napi_disable(n: &fep->napi); |
2096 | netif_tx_lock_bh(dev: ndev); |
2097 | fec_stop(ndev); |
2098 | netif_tx_unlock_bh(dev: ndev); |
2099 | napi_enable(n: &fep->napi); |
2100 | fep->link = phy_dev->link; |
2101 | status_change = 1; |
2102 | } |
2103 | } |
2104 | |
2105 | if (status_change) |
2106 | phy_print_status(phydev: phy_dev); |
2107 | } |
2108 | |
2109 | static int fec_enet_mdio_wait(struct fec_enet_private *fep) |
2110 | { |
2111 | uint ievent; |
2112 | int ret; |
2113 | |
2114 | ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, |
2115 | ievent & FEC_ENET_MII, 2, 30000); |
2116 | |
2117 | if (!ret) |
2118 | writel(FEC_ENET_MII, addr: fep->hwp + FEC_IEVENT); |
2119 | |
2120 | return ret; |
2121 | } |
2122 | |
2123 | static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) |
2124 | { |
2125 | struct fec_enet_private *fep = bus->priv; |
2126 | struct device *dev = &fep->pdev->dev; |
2127 | int ret = 0, frame_start, frame_addr, frame_op; |
2128 | |
2129 | ret = pm_runtime_resume_and_get(dev); |
2130 | if (ret < 0) |
2131 | return ret; |
2132 | |
2133 | /* C22 read */ |
2134 | frame_op = FEC_MMFR_OP_READ; |
2135 | frame_start = FEC_MMFR_ST; |
2136 | frame_addr = regnum; |
2137 | |
2138 | /* start a read op */ |
2139 | writel(val: frame_start | frame_op | |
2140 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | |
2141 | FEC_MMFR_TA, addr: fep->hwp + FEC_MII_DATA); |
2142 | |
2143 | /* wait for end of transfer */ |
2144 | ret = fec_enet_mdio_wait(fep); |
2145 | if (ret) { |
2146 | netdev_err(dev: fep->netdev, format: "MDIO read timeout\n" ); |
2147 | goto out; |
2148 | } |
2149 | |
2150 | ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); |
2151 | |
2152 | out: |
2153 | pm_runtime_mark_last_busy(dev); |
2154 | pm_runtime_put_autosuspend(dev); |
2155 | |
2156 | return ret; |
2157 | } |
2158 | |
2159 | static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, |
2160 | int devad, int regnum) |
2161 | { |
2162 | struct fec_enet_private *fep = bus->priv; |
2163 | struct device *dev = &fep->pdev->dev; |
2164 | int ret = 0, frame_start, frame_op; |
2165 | |
2166 | ret = pm_runtime_resume_and_get(dev); |
2167 | if (ret < 0) |
2168 | return ret; |
2169 | |
2170 | frame_start = FEC_MMFR_ST_C45; |
2171 | |
2172 | /* write address */ |
2173 | writel(val: frame_start | FEC_MMFR_OP_ADDR_WRITE | |
2174 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2175 | FEC_MMFR_TA | (regnum & 0xFFFF), |
2176 | addr: fep->hwp + FEC_MII_DATA); |
2177 | |
2178 | /* wait for end of transfer */ |
2179 | ret = fec_enet_mdio_wait(fep); |
2180 | if (ret) { |
2181 | netdev_err(dev: fep->netdev, format: "MDIO address write timeout\n" ); |
2182 | goto out; |
2183 | } |
2184 | |
2185 | frame_op = FEC_MMFR_OP_READ_C45; |
2186 | |
2187 | /* start a read op */ |
2188 | writel(val: frame_start | frame_op | |
2189 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2190 | FEC_MMFR_TA, addr: fep->hwp + FEC_MII_DATA); |
2191 | |
2192 | /* wait for end of transfer */ |
2193 | ret = fec_enet_mdio_wait(fep); |
2194 | if (ret) { |
2195 | netdev_err(dev: fep->netdev, format: "MDIO read timeout\n" ); |
2196 | goto out; |
2197 | } |
2198 | |
2199 | ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); |
2200 | |
2201 | out: |
2202 | pm_runtime_mark_last_busy(dev); |
2203 | pm_runtime_put_autosuspend(dev); |
2204 | |
2205 | return ret; |
2206 | } |
2207 | |
2208 | static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, |
2209 | u16 value) |
2210 | { |
2211 | struct fec_enet_private *fep = bus->priv; |
2212 | struct device *dev = &fep->pdev->dev; |
2213 | int ret, frame_start, frame_addr; |
2214 | |
2215 | ret = pm_runtime_resume_and_get(dev); |
2216 | if (ret < 0) |
2217 | return ret; |
2218 | |
2219 | /* C22 write */ |
2220 | frame_start = FEC_MMFR_ST; |
2221 | frame_addr = regnum; |
2222 | |
2223 | /* start a write op */ |
2224 | writel(val: frame_start | FEC_MMFR_OP_WRITE | |
2225 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | |
2226 | FEC_MMFR_TA | FEC_MMFR_DATA(value), |
2227 | addr: fep->hwp + FEC_MII_DATA); |
2228 | |
2229 | /* wait for end of transfer */ |
2230 | ret = fec_enet_mdio_wait(fep); |
2231 | if (ret) |
2232 | netdev_err(dev: fep->netdev, format: "MDIO write timeout\n" ); |
2233 | |
2234 | pm_runtime_mark_last_busy(dev); |
2235 | pm_runtime_put_autosuspend(dev); |
2236 | |
2237 | return ret; |
2238 | } |
2239 | |
2240 | static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, |
2241 | int devad, int regnum, u16 value) |
2242 | { |
2243 | struct fec_enet_private *fep = bus->priv; |
2244 | struct device *dev = &fep->pdev->dev; |
2245 | int ret, frame_start; |
2246 | |
2247 | ret = pm_runtime_resume_and_get(dev); |
2248 | if (ret < 0) |
2249 | return ret; |
2250 | |
2251 | frame_start = FEC_MMFR_ST_C45; |
2252 | |
2253 | /* write address */ |
2254 | writel(val: frame_start | FEC_MMFR_OP_ADDR_WRITE | |
2255 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2256 | FEC_MMFR_TA | (regnum & 0xFFFF), |
2257 | addr: fep->hwp + FEC_MII_DATA); |
2258 | |
2259 | /* wait for end of transfer */ |
2260 | ret = fec_enet_mdio_wait(fep); |
2261 | if (ret) { |
2262 | netdev_err(dev: fep->netdev, format: "MDIO address write timeout\n" ); |
2263 | goto out; |
2264 | } |
2265 | |
2266 | /* start a write op */ |
2267 | writel(val: frame_start | FEC_MMFR_OP_WRITE | |
2268 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2269 | FEC_MMFR_TA | FEC_MMFR_DATA(value), |
2270 | addr: fep->hwp + FEC_MII_DATA); |
2271 | |
2272 | /* wait for end of transfer */ |
2273 | ret = fec_enet_mdio_wait(fep); |
2274 | if (ret) |
2275 | netdev_err(dev: fep->netdev, format: "MDIO write timeout\n" ); |
2276 | |
2277 | out: |
2278 | pm_runtime_mark_last_busy(dev); |
2279 | pm_runtime_put_autosuspend(dev); |
2280 | |
2281 | return ret; |
2282 | } |
2283 | |
2284 | static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) |
2285 | { |
2286 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2287 | struct phy_device *phy_dev = ndev->phydev; |
2288 | |
2289 | if (phy_dev) { |
2290 | phy_reset_after_clk_enable(phydev: phy_dev); |
2291 | } else if (fep->phy_node) { |
2292 | /* |
2293 | * If the PHY still is not bound to the MAC, but there is |
2294 | * OF PHY node and a matching PHY device instance already, |
2295 | * use the OF PHY node to obtain the PHY device instance, |
2296 | * and then use that PHY device instance when triggering |
2297 | * the PHY reset. |
2298 | */ |
2299 | phy_dev = of_phy_find_device(phy_np: fep->phy_node); |
2300 | phy_reset_after_clk_enable(phydev: phy_dev); |
2301 | put_device(dev: &phy_dev->mdio.dev); |
2302 | } |
2303 | } |
2304 | |
2305 | static int fec_enet_clk_enable(struct net_device *ndev, bool enable) |
2306 | { |
2307 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2308 | int ret; |
2309 | |
2310 | if (enable) { |
2311 | ret = clk_prepare_enable(clk: fep->clk_enet_out); |
2312 | if (ret) |
2313 | return ret; |
2314 | |
2315 | if (fep->clk_ptp) { |
2316 | mutex_lock(&fep->ptp_clk_mutex); |
2317 | ret = clk_prepare_enable(clk: fep->clk_ptp); |
2318 | if (ret) { |
2319 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2320 | goto failed_clk_ptp; |
2321 | } else { |
2322 | fep->ptp_clk_on = true; |
2323 | } |
2324 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2325 | } |
2326 | |
2327 | ret = clk_prepare_enable(clk: fep->clk_ref); |
2328 | if (ret) |
2329 | goto failed_clk_ref; |
2330 | |
2331 | ret = clk_prepare_enable(clk: fep->clk_2x_txclk); |
2332 | if (ret) |
2333 | goto failed_clk_2x_txclk; |
2334 | |
2335 | fec_enet_phy_reset_after_clk_enable(ndev); |
2336 | } else { |
2337 | clk_disable_unprepare(clk: fep->clk_enet_out); |
2338 | if (fep->clk_ptp) { |
2339 | mutex_lock(&fep->ptp_clk_mutex); |
2340 | clk_disable_unprepare(clk: fep->clk_ptp); |
2341 | fep->ptp_clk_on = false; |
2342 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2343 | } |
2344 | clk_disable_unprepare(clk: fep->clk_ref); |
2345 | clk_disable_unprepare(clk: fep->clk_2x_txclk); |
2346 | } |
2347 | |
2348 | return 0; |
2349 | |
2350 | failed_clk_2x_txclk: |
2351 | if (fep->clk_ref) |
2352 | clk_disable_unprepare(clk: fep->clk_ref); |
2353 | failed_clk_ref: |
2354 | if (fep->clk_ptp) { |
2355 | mutex_lock(&fep->ptp_clk_mutex); |
2356 | clk_disable_unprepare(clk: fep->clk_ptp); |
2357 | fep->ptp_clk_on = false; |
2358 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2359 | } |
2360 | failed_clk_ptp: |
2361 | clk_disable_unprepare(clk: fep->clk_enet_out); |
2362 | |
2363 | return ret; |
2364 | } |
2365 | |
2366 | static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, |
2367 | struct device_node *np) |
2368 | { |
2369 | u32 rgmii_tx_delay, rgmii_rx_delay; |
2370 | |
2371 | /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ |
2372 | if (!of_property_read_u32(np, propname: "tx-internal-delay-ps" , out_value: &rgmii_tx_delay)) { |
2373 | if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { |
2374 | dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps" ); |
2375 | return -EINVAL; |
2376 | } else if (rgmii_tx_delay == 2000) { |
2377 | fep->rgmii_txc_dly = true; |
2378 | } |
2379 | } |
2380 | |
2381 | /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ |
2382 | if (!of_property_read_u32(np, propname: "rx-internal-delay-ps" , out_value: &rgmii_rx_delay)) { |
2383 | if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { |
2384 | dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps" ); |
2385 | return -EINVAL; |
2386 | } else if (rgmii_rx_delay == 2000) { |
2387 | fep->rgmii_rxc_dly = true; |
2388 | } |
2389 | } |
2390 | |
2391 | return 0; |
2392 | } |
2393 | |
2394 | static int fec_enet_mii_probe(struct net_device *ndev) |
2395 | { |
2396 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2397 | struct phy_device *phy_dev = NULL; |
2398 | char mdio_bus_id[MII_BUS_ID_SIZE]; |
2399 | char phy_name[MII_BUS_ID_SIZE + 3]; |
2400 | int phy_id; |
2401 | int dev_id = fep->dev_id; |
2402 | |
2403 | if (fep->phy_node) { |
2404 | phy_dev = of_phy_connect(dev: ndev, phy_np: fep->phy_node, |
2405 | hndlr: &fec_enet_adjust_link, flags: 0, |
2406 | iface: fep->phy_interface); |
2407 | if (!phy_dev) { |
2408 | netdev_err(dev: ndev, format: "Unable to connect to phy\n" ); |
2409 | return -ENODEV; |
2410 | } |
2411 | } else { |
2412 | /* check for attached phy */ |
2413 | for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { |
2414 | if (!mdiobus_is_registered_device(bus: fep->mii_bus, addr: phy_id)) |
2415 | continue; |
2416 | if (dev_id--) |
2417 | continue; |
2418 | strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); |
2419 | break; |
2420 | } |
2421 | |
2422 | if (phy_id >= PHY_MAX_ADDR) { |
2423 | netdev_info(dev: ndev, format: "no PHY, assuming direct connection to switch\n" ); |
2424 | strscpy(mdio_bus_id, "fixed-0" , MII_BUS_ID_SIZE); |
2425 | phy_id = 0; |
2426 | } |
2427 | |
2428 | snprintf(buf: phy_name, size: sizeof(phy_name), |
2429 | PHY_ID_FMT, mdio_bus_id, phy_id); |
2430 | phy_dev = phy_connect(dev: ndev, bus_id: phy_name, handler: &fec_enet_adjust_link, |
2431 | interface: fep->phy_interface); |
2432 | } |
2433 | |
2434 | if (IS_ERR(ptr: phy_dev)) { |
2435 | netdev_err(dev: ndev, format: "could not attach to PHY\n" ); |
2436 | return PTR_ERR(ptr: phy_dev); |
2437 | } |
2438 | |
2439 | /* mask with MAC supported features */ |
2440 | if (fep->quirks & FEC_QUIRK_HAS_GBIT) { |
2441 | phy_set_max_speed(phydev: phy_dev, max_speed: 1000); |
2442 | phy_remove_link_mode(phydev: phy_dev, |
2443 | link_mode: ETHTOOL_LINK_MODE_1000baseT_Half_BIT); |
2444 | #if !defined(CONFIG_M5272) |
2445 | phy_support_sym_pause(phydev: phy_dev); |
2446 | #endif |
2447 | } |
2448 | else |
2449 | phy_set_max_speed(phydev: phy_dev, max_speed: 100); |
2450 | |
2451 | if (fep->quirks & FEC_QUIRK_HAS_EEE) |
2452 | phy_support_eee(phydev: phy_dev); |
2453 | |
2454 | fep->link = 0; |
2455 | fep->full_duplex = 0; |
2456 | |
2457 | phy_attached_info(phydev: phy_dev); |
2458 | |
2459 | return 0; |
2460 | } |
2461 | |
2462 | static int fec_enet_mii_init(struct platform_device *pdev) |
2463 | { |
2464 | static struct mii_bus *fec0_mii_bus; |
2465 | struct net_device *ndev = platform_get_drvdata(pdev); |
2466 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2467 | bool suppress_preamble = false; |
2468 | struct phy_device *phydev; |
2469 | struct device_node *node; |
2470 | int err = -ENXIO; |
2471 | u32 mii_speed, holdtime; |
2472 | u32 bus_freq; |
2473 | int addr; |
2474 | |
2475 | /* |
2476 | * The i.MX28 dual fec interfaces are not equal. |
2477 | * Here are the differences: |
2478 | * |
2479 | * - fec0 supports MII & RMII modes while fec1 only supports RMII |
2480 | * - fec0 acts as the 1588 time master while fec1 is slave |
2481 | * - external phys can only be configured by fec0 |
2482 | * |
2483 | * That is to say fec1 can not work independently. It only works |
2484 | * when fec0 is working. The reason behind this design is that the |
2485 | * second interface is added primarily for Switch mode. |
2486 | * |
2487 | * Because of the last point above, both phys are attached on fec0 |
2488 | * mdio interface in board design, and need to be configured by |
2489 | * fec0 mii_bus. |
2490 | */ |
2491 | if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { |
2492 | /* fec1 uses fec0 mii_bus */ |
2493 | if (mii_cnt && fec0_mii_bus) { |
2494 | fep->mii_bus = fec0_mii_bus; |
2495 | mii_cnt++; |
2496 | return 0; |
2497 | } |
2498 | return -ENOENT; |
2499 | } |
2500 | |
2501 | bus_freq = 2500000; /* 2.5MHz by default */ |
2502 | node = of_get_child_by_name(node: pdev->dev.of_node, name: "mdio" ); |
2503 | if (node) { |
2504 | of_property_read_u32(np: node, propname: "clock-frequency" , out_value: &bus_freq); |
2505 | suppress_preamble = of_property_read_bool(np: node, |
2506 | propname: "suppress-preamble" ); |
2507 | } |
2508 | |
2509 | /* |
2510 | * Set MII speed (= clk_get_rate() / 2 * phy_speed) |
2511 | * |
2512 | * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while |
2513 | * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 |
2514 | * Reference Manual has an error on this, and gets fixed on i.MX6Q |
2515 | * document. |
2516 | */ |
2517 | mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); |
2518 | if (fep->quirks & FEC_QUIRK_ENET_MAC) |
2519 | mii_speed--; |
2520 | if (mii_speed > 63) { |
2521 | dev_err(&pdev->dev, |
2522 | "fec clock (%lu) too fast to get right mii speed\n" , |
2523 | clk_get_rate(fep->clk_ipg)); |
2524 | err = -EINVAL; |
2525 | goto err_out; |
2526 | } |
2527 | |
2528 | /* |
2529 | * The i.MX28 and i.MX6 types have another filed in the MSCR (aka |
2530 | * MII_SPEED) register that defines the MDIO output hold time. Earlier |
2531 | * versions are RAZ there, so just ignore the difference and write the |
2532 | * register always. |
2533 | * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. |
2534 | * HOLDTIME + 1 is the number of clk cycles the fec is holding the |
2535 | * output. |
2536 | * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). |
2537 | * Given that ceil(clkrate / 5000000) <= 64, the calculation for |
2538 | * holdtime cannot result in a value greater than 3. |
2539 | */ |
2540 | holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; |
2541 | |
2542 | fep->phy_speed = mii_speed << 1 | holdtime << 8; |
2543 | |
2544 | if (suppress_preamble) |
2545 | fep->phy_speed |= BIT(7); |
2546 | |
2547 | if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { |
2548 | /* Clear MMFR to avoid to generate MII event by writing MSCR. |
2549 | * MII event generation condition: |
2550 | * - writing MSCR: |
2551 | * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & |
2552 | * mscr_reg_data_in[7:0] != 0 |
2553 | * - writing MMFR: |
2554 | * - mscr[7:0]_not_zero |
2555 | */ |
2556 | writel(val: 0, addr: fep->hwp + FEC_MII_DATA); |
2557 | } |
2558 | |
2559 | writel(val: fep->phy_speed, addr: fep->hwp + FEC_MII_SPEED); |
2560 | |
2561 | /* Clear any pending transaction complete indication */ |
2562 | writel(FEC_ENET_MII, addr: fep->hwp + FEC_IEVENT); |
2563 | |
2564 | fep->mii_bus = mdiobus_alloc(); |
2565 | if (fep->mii_bus == NULL) { |
2566 | err = -ENOMEM; |
2567 | goto err_out; |
2568 | } |
2569 | |
2570 | fep->mii_bus->name = "fec_enet_mii_bus" ; |
2571 | fep->mii_bus->read = fec_enet_mdio_read_c22; |
2572 | fep->mii_bus->write = fec_enet_mdio_write_c22; |
2573 | if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { |
2574 | fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; |
2575 | fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; |
2576 | } |
2577 | snprintf(buf: fep->mii_bus->id, MII_BUS_ID_SIZE, fmt: "%s-%x" , |
2578 | pdev->name, fep->dev_id + 1); |
2579 | fep->mii_bus->priv = fep; |
2580 | fep->mii_bus->parent = &pdev->dev; |
2581 | |
2582 | err = of_mdiobus_register(mdio: fep->mii_bus, np: node); |
2583 | if (err) |
2584 | goto err_out_free_mdiobus; |
2585 | of_node_put(node); |
2586 | |
2587 | /* find all the PHY devices on the bus and set mac_managed_pm to true */ |
2588 | for (addr = 0; addr < PHY_MAX_ADDR; addr++) { |
2589 | phydev = mdiobus_get_phy(bus: fep->mii_bus, addr); |
2590 | if (phydev) |
2591 | phydev->mac_managed_pm = true; |
2592 | } |
2593 | |
2594 | mii_cnt++; |
2595 | |
2596 | /* save fec0 mii_bus */ |
2597 | if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) |
2598 | fec0_mii_bus = fep->mii_bus; |
2599 | |
2600 | return 0; |
2601 | |
2602 | err_out_free_mdiobus: |
2603 | mdiobus_free(bus: fep->mii_bus); |
2604 | err_out: |
2605 | of_node_put(node); |
2606 | return err; |
2607 | } |
2608 | |
2609 | static void fec_enet_mii_remove(struct fec_enet_private *fep) |
2610 | { |
2611 | if (--mii_cnt == 0) { |
2612 | mdiobus_unregister(bus: fep->mii_bus); |
2613 | mdiobus_free(bus: fep->mii_bus); |
2614 | } |
2615 | } |
2616 | |
2617 | static void fec_enet_get_drvinfo(struct net_device *ndev, |
2618 | struct ethtool_drvinfo *info) |
2619 | { |
2620 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2621 | |
2622 | strscpy(info->driver, fep->pdev->dev.driver->name, |
2623 | sizeof(info->driver)); |
2624 | strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); |
2625 | } |
2626 | |
2627 | static int fec_enet_get_regs_len(struct net_device *ndev) |
2628 | { |
2629 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2630 | struct resource *r; |
2631 | int s = 0; |
2632 | |
2633 | r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); |
2634 | if (r) |
2635 | s = resource_size(res: r); |
2636 | |
2637 | return s; |
2638 | } |
2639 | |
2640 | /* List of registers that can be safety be read to dump them with ethtool */ |
2641 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
2642 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
2643 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) |
2644 | static __u32 fec_enet_register_version = 2; |
2645 | static u32 fec_enet_register_offset[] = { |
2646 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, |
2647 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, |
2648 | FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, |
2649 | FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, |
2650 | FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, |
2651 | FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, |
2652 | FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, |
2653 | FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, |
2654 | FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, |
2655 | FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, |
2656 | FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, |
2657 | FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, |
2658 | RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, |
2659 | RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, |
2660 | RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, |
2661 | RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, |
2662 | RMON_T_P_GTE2048, RMON_T_OCTETS, |
2663 | IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, |
2664 | IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, |
2665 | IEEE_T_FDXFC, IEEE_T_OCTETS_OK, |
2666 | RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, |
2667 | RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, |
2668 | RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, |
2669 | RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, |
2670 | RMON_R_P_GTE2048, RMON_R_OCTETS, |
2671 | IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, |
2672 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK |
2673 | }; |
2674 | /* for i.MX6ul */ |
2675 | static u32 fec_enet_register_offset_6ul[] = { |
2676 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, |
2677 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, |
2678 | FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, |
2679 | FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, |
2680 | FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, |
2681 | FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, |
2682 | FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, |
2683 | RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, |
2684 | RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, |
2685 | RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, |
2686 | RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, |
2687 | RMON_T_P_GTE2048, RMON_T_OCTETS, |
2688 | IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, |
2689 | IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, |
2690 | IEEE_T_FDXFC, IEEE_T_OCTETS_OK, |
2691 | RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, |
2692 | RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, |
2693 | RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, |
2694 | RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, |
2695 | RMON_R_P_GTE2048, RMON_R_OCTETS, |
2696 | IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, |
2697 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK |
2698 | }; |
2699 | #else |
2700 | static __u32 fec_enet_register_version = 1; |
2701 | static u32 fec_enet_register_offset[] = { |
2702 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, |
2703 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, |
2704 | FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, |
2705 | FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, |
2706 | FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, |
2707 | FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, |
2708 | FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, |
2709 | FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, |
2710 | FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 |
2711 | }; |
2712 | #endif |
2713 | |
2714 | static void fec_enet_get_regs(struct net_device *ndev, |
2715 | struct ethtool_regs *regs, void *regbuf) |
2716 | { |
2717 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2718 | u32 __iomem *theregs = (u32 __iomem *)fep->hwp; |
2719 | struct device *dev = &fep->pdev->dev; |
2720 | u32 *buf = (u32 *)regbuf; |
2721 | u32 i, off; |
2722 | int ret; |
2723 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
2724 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
2725 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) |
2726 | u32 *reg_list; |
2727 | u32 reg_cnt; |
2728 | |
2729 | if (!of_machine_is_compatible(compat: "fsl,imx6ul" )) { |
2730 | reg_list = fec_enet_register_offset; |
2731 | reg_cnt = ARRAY_SIZE(fec_enet_register_offset); |
2732 | } else { |
2733 | reg_list = fec_enet_register_offset_6ul; |
2734 | reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); |
2735 | } |
2736 | #else |
2737 | /* coldfire */ |
2738 | static u32 *reg_list = fec_enet_register_offset; |
2739 | static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); |
2740 | #endif |
2741 | ret = pm_runtime_resume_and_get(dev); |
2742 | if (ret < 0) |
2743 | return; |
2744 | |
2745 | regs->version = fec_enet_register_version; |
2746 | |
2747 | memset(buf, 0, regs->len); |
2748 | |
2749 | for (i = 0; i < reg_cnt; i++) { |
2750 | off = reg_list[i]; |
2751 | |
2752 | if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && |
2753 | !(fep->quirks & FEC_QUIRK_HAS_FRREG)) |
2754 | continue; |
2755 | |
2756 | off >>= 2; |
2757 | buf[off] = readl(addr: &theregs[off]); |
2758 | } |
2759 | |
2760 | pm_runtime_mark_last_busy(dev); |
2761 | pm_runtime_put_autosuspend(dev); |
2762 | } |
2763 | |
2764 | static int fec_enet_get_ts_info(struct net_device *ndev, |
2765 | struct ethtool_ts_info *info) |
2766 | { |
2767 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2768 | |
2769 | if (fep->bufdesc_ex) { |
2770 | |
2771 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
2772 | SOF_TIMESTAMPING_RX_SOFTWARE | |
2773 | SOF_TIMESTAMPING_SOFTWARE | |
2774 | SOF_TIMESTAMPING_TX_HARDWARE | |
2775 | SOF_TIMESTAMPING_RX_HARDWARE | |
2776 | SOF_TIMESTAMPING_RAW_HARDWARE; |
2777 | if (fep->ptp_clock) |
2778 | info->phc_index = ptp_clock_index(ptp: fep->ptp_clock); |
2779 | else |
2780 | info->phc_index = -1; |
2781 | |
2782 | info->tx_types = (1 << HWTSTAMP_TX_OFF) | |
2783 | (1 << HWTSTAMP_TX_ON); |
2784 | |
2785 | info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | |
2786 | (1 << HWTSTAMP_FILTER_ALL); |
2787 | return 0; |
2788 | } else { |
2789 | return ethtool_op_get_ts_info(dev: ndev, eti: info); |
2790 | } |
2791 | } |
2792 | |
2793 | #if !defined(CONFIG_M5272) |
2794 | |
2795 | static void fec_enet_get_pauseparam(struct net_device *ndev, |
2796 | struct ethtool_pauseparam *pause) |
2797 | { |
2798 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2799 | |
2800 | pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; |
2801 | pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; |
2802 | pause->rx_pause = pause->tx_pause; |
2803 | } |
2804 | |
2805 | static int fec_enet_set_pauseparam(struct net_device *ndev, |
2806 | struct ethtool_pauseparam *pause) |
2807 | { |
2808 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2809 | |
2810 | if (!ndev->phydev) |
2811 | return -ENODEV; |
2812 | |
2813 | if (pause->tx_pause != pause->rx_pause) { |
2814 | netdev_info(dev: ndev, |
2815 | format: "hardware only support enable/disable both tx and rx" ); |
2816 | return -EINVAL; |
2817 | } |
2818 | |
2819 | fep->pause_flag = 0; |
2820 | |
2821 | /* tx pause must be same as rx pause */ |
2822 | fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; |
2823 | fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; |
2824 | |
2825 | phy_set_sym_pause(phydev: ndev->phydev, rx: pause->rx_pause, tx: pause->tx_pause, |
2826 | autoneg: pause->autoneg); |
2827 | |
2828 | if (pause->autoneg) { |
2829 | if (netif_running(dev: ndev)) |
2830 | fec_stop(ndev); |
2831 | phy_start_aneg(phydev: ndev->phydev); |
2832 | } |
2833 | if (netif_running(dev: ndev)) { |
2834 | napi_disable(n: &fep->napi); |
2835 | netif_tx_lock_bh(dev: ndev); |
2836 | fec_restart(ndev); |
2837 | netif_tx_wake_all_queues(dev: ndev); |
2838 | netif_tx_unlock_bh(dev: ndev); |
2839 | napi_enable(n: &fep->napi); |
2840 | } |
2841 | |
2842 | return 0; |
2843 | } |
2844 | |
2845 | static const struct fec_stat { |
2846 | char name[ETH_GSTRING_LEN]; |
2847 | u16 offset; |
2848 | } fec_stats[] = { |
2849 | /* RMON TX */ |
2850 | { "tx_dropped" , RMON_T_DROP }, |
2851 | { "tx_packets" , RMON_T_PACKETS }, |
2852 | { "tx_broadcast" , RMON_T_BC_PKT }, |
2853 | { "tx_multicast" , RMON_T_MC_PKT }, |
2854 | { "tx_crc_errors" , RMON_T_CRC_ALIGN }, |
2855 | { "tx_undersize" , RMON_T_UNDERSIZE }, |
2856 | { "tx_oversize" , RMON_T_OVERSIZE }, |
2857 | { "tx_fragment" , RMON_T_FRAG }, |
2858 | { "tx_jabber" , RMON_T_JAB }, |
2859 | { "tx_collision" , RMON_T_COL }, |
2860 | { "tx_64byte" , RMON_T_P64 }, |
2861 | { "tx_65to127byte" , RMON_T_P65TO127 }, |
2862 | { "tx_128to255byte" , RMON_T_P128TO255 }, |
2863 | { "tx_256to511byte" , RMON_T_P256TO511 }, |
2864 | { "tx_512to1023byte" , RMON_T_P512TO1023 }, |
2865 | { "tx_1024to2047byte" , RMON_T_P1024TO2047 }, |
2866 | { "tx_GTE2048byte" , RMON_T_P_GTE2048 }, |
2867 | { "tx_octets" , RMON_T_OCTETS }, |
2868 | |
2869 | /* IEEE TX */ |
2870 | { "IEEE_tx_drop" , IEEE_T_DROP }, |
2871 | { "IEEE_tx_frame_ok" , IEEE_T_FRAME_OK }, |
2872 | { "IEEE_tx_1col" , IEEE_T_1COL }, |
2873 | { "IEEE_tx_mcol" , IEEE_T_MCOL }, |
2874 | { "IEEE_tx_def" , IEEE_T_DEF }, |
2875 | { "IEEE_tx_lcol" , IEEE_T_LCOL }, |
2876 | { "IEEE_tx_excol" , IEEE_T_EXCOL }, |
2877 | { "IEEE_tx_macerr" , IEEE_T_MACERR }, |
2878 | { "IEEE_tx_cserr" , IEEE_T_CSERR }, |
2879 | { "IEEE_tx_sqe" , IEEE_T_SQE }, |
2880 | { "IEEE_tx_fdxfc" , IEEE_T_FDXFC }, |
2881 | { "IEEE_tx_octets_ok" , IEEE_T_OCTETS_OK }, |
2882 | |
2883 | /* RMON RX */ |
2884 | { "rx_packets" , RMON_R_PACKETS }, |
2885 | { "rx_broadcast" , RMON_R_BC_PKT }, |
2886 | { "rx_multicast" , RMON_R_MC_PKT }, |
2887 | { "rx_crc_errors" , RMON_R_CRC_ALIGN }, |
2888 | { "rx_undersize" , RMON_R_UNDERSIZE }, |
2889 | { "rx_oversize" , RMON_R_OVERSIZE }, |
2890 | { "rx_fragment" , RMON_R_FRAG }, |
2891 | { "rx_jabber" , RMON_R_JAB }, |
2892 | { "rx_64byte" , RMON_R_P64 }, |
2893 | { "rx_65to127byte" , RMON_R_P65TO127 }, |
2894 | { "rx_128to255byte" , RMON_R_P128TO255 }, |
2895 | { "rx_256to511byte" , RMON_R_P256TO511 }, |
2896 | { "rx_512to1023byte" , RMON_R_P512TO1023 }, |
2897 | { "rx_1024to2047byte" , RMON_R_P1024TO2047 }, |
2898 | { "rx_GTE2048byte" , RMON_R_P_GTE2048 }, |
2899 | { "rx_octets" , RMON_R_OCTETS }, |
2900 | |
2901 | /* IEEE RX */ |
2902 | { "IEEE_rx_drop" , IEEE_R_DROP }, |
2903 | { "IEEE_rx_frame_ok" , IEEE_R_FRAME_OK }, |
2904 | { "IEEE_rx_crc" , IEEE_R_CRC }, |
2905 | { "IEEE_rx_align" , IEEE_R_ALIGN }, |
2906 | { "IEEE_rx_macerr" , IEEE_R_MACERR }, |
2907 | { "IEEE_rx_fdxfc" , IEEE_R_FDXFC }, |
2908 | { "IEEE_rx_octets_ok" , IEEE_R_OCTETS_OK }, |
2909 | }; |
2910 | |
2911 | #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) |
2912 | |
2913 | static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { |
2914 | "rx_xdp_redirect" , /* RX_XDP_REDIRECT = 0, */ |
2915 | "rx_xdp_pass" , /* RX_XDP_PASS, */ |
2916 | "rx_xdp_drop" , /* RX_XDP_DROP, */ |
2917 | "rx_xdp_tx" , /* RX_XDP_TX, */ |
2918 | "rx_xdp_tx_errors" , /* RX_XDP_TX_ERRORS, */ |
2919 | "tx_xdp_xmit" , /* TX_XDP_XMIT, */ |
2920 | "tx_xdp_xmit_errors" , /* TX_XDP_XMIT_ERRORS, */ |
2921 | }; |
2922 | |
2923 | static void fec_enet_update_ethtool_stats(struct net_device *dev) |
2924 | { |
2925 | struct fec_enet_private *fep = netdev_priv(dev); |
2926 | int i; |
2927 | |
2928 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) |
2929 | fep->ethtool_stats[i] = readl(addr: fep->hwp + fec_stats[i].offset); |
2930 | } |
2931 | |
2932 | static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) |
2933 | { |
2934 | u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; |
2935 | struct fec_enet_priv_rx_q *rxq; |
2936 | int i, j; |
2937 | |
2938 | for (i = fep->num_rx_queues - 1; i >= 0; i--) { |
2939 | rxq = fep->rx_queue[i]; |
2940 | |
2941 | for (j = 0; j < XDP_STATS_TOTAL; j++) |
2942 | xdp_stats[j] += rxq->stats[j]; |
2943 | } |
2944 | |
2945 | memcpy(data, xdp_stats, sizeof(xdp_stats)); |
2946 | } |
2947 | |
2948 | static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) |
2949 | { |
2950 | #ifdef CONFIG_PAGE_POOL_STATS |
2951 | struct page_pool_stats stats = {}; |
2952 | struct fec_enet_priv_rx_q *rxq; |
2953 | int i; |
2954 | |
2955 | for (i = fep->num_rx_queues - 1; i >= 0; i--) { |
2956 | rxq = fep->rx_queue[i]; |
2957 | |
2958 | if (!rxq->page_pool) |
2959 | continue; |
2960 | |
2961 | page_pool_get_stats(pool: rxq->page_pool, stats: &stats); |
2962 | } |
2963 | |
2964 | page_pool_ethtool_stats_get(data, stats: &stats); |
2965 | #endif |
2966 | } |
2967 | |
2968 | static void fec_enet_get_ethtool_stats(struct net_device *dev, |
2969 | struct ethtool_stats *stats, u64 *data) |
2970 | { |
2971 | struct fec_enet_private *fep = netdev_priv(dev); |
2972 | |
2973 | if (netif_running(dev)) |
2974 | fec_enet_update_ethtool_stats(dev); |
2975 | |
2976 | memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); |
2977 | data += FEC_STATS_SIZE / sizeof(u64); |
2978 | |
2979 | fec_enet_get_xdp_stats(fep, data); |
2980 | data += XDP_STATS_TOTAL; |
2981 | |
2982 | fec_enet_page_pool_stats(fep, data); |
2983 | } |
2984 | |
2985 | static void fec_enet_get_strings(struct net_device *netdev, |
2986 | u32 stringset, u8 *data) |
2987 | { |
2988 | int i; |
2989 | switch (stringset) { |
2990 | case ETH_SS_STATS: |
2991 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { |
2992 | ethtool_puts(data: &data, str: fec_stats[i].name); |
2993 | } |
2994 | for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { |
2995 | ethtool_puts(data: &data, str: fec_xdp_stat_strs[i]); |
2996 | } |
2997 | page_pool_ethtool_stats_get_strings(data); |
2998 | |
2999 | break; |
3000 | case ETH_SS_TEST: |
3001 | net_selftest_get_strings(data); |
3002 | break; |
3003 | } |
3004 | } |
3005 | |
3006 | static int fec_enet_get_sset_count(struct net_device *dev, int sset) |
3007 | { |
3008 | int count; |
3009 | |
3010 | switch (sset) { |
3011 | case ETH_SS_STATS: |
3012 | count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; |
3013 | count += page_pool_ethtool_stats_get_count(); |
3014 | return count; |
3015 | |
3016 | case ETH_SS_TEST: |
3017 | return net_selftest_get_count(); |
3018 | default: |
3019 | return -EOPNOTSUPP; |
3020 | } |
3021 | } |
3022 | |
3023 | static void fec_enet_clear_ethtool_stats(struct net_device *dev) |
3024 | { |
3025 | struct fec_enet_private *fep = netdev_priv(dev); |
3026 | struct fec_enet_priv_rx_q *rxq; |
3027 | int i, j; |
3028 | |
3029 | /* Disable MIB statistics counters */ |
3030 | writel(FEC_MIB_CTRLSTAT_DISABLE, addr: fep->hwp + FEC_MIB_CTRLSTAT); |
3031 | |
3032 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) |
3033 | writel(val: 0, addr: fep->hwp + fec_stats[i].offset); |
3034 | |
3035 | for (i = fep->num_rx_queues - 1; i >= 0; i--) { |
3036 | rxq = fep->rx_queue[i]; |
3037 | for (j = 0; j < XDP_STATS_TOTAL; j++) |
3038 | rxq->stats[j] = 0; |
3039 | } |
3040 | |
3041 | /* Don't disable MIB statistics counters */ |
3042 | writel(val: 0, addr: fep->hwp + FEC_MIB_CTRLSTAT); |
3043 | } |
3044 | |
3045 | #else /* !defined(CONFIG_M5272) */ |
3046 | #define FEC_STATS_SIZE 0 |
3047 | static inline void fec_enet_update_ethtool_stats(struct net_device *dev) |
3048 | { |
3049 | } |
3050 | |
3051 | static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) |
3052 | { |
3053 | } |
3054 | #endif /* !defined(CONFIG_M5272) */ |
3055 | |
3056 | /* ITR clock source is enet system clock (clk_ahb). |
3057 | * TCTT unit is cycle_ns * 64 cycle |
3058 | * So, the ICTT value = X us / (cycle_ns * 64) |
3059 | */ |
3060 | static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) |
3061 | { |
3062 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3063 | |
3064 | return us * (fep->itr_clk_rate / 64000) / 1000; |
3065 | } |
3066 | |
3067 | /* Set threshold for interrupt coalescing */ |
3068 | static void fec_enet_itr_coal_set(struct net_device *ndev) |
3069 | { |
3070 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3071 | int rx_itr, tx_itr; |
3072 | |
3073 | /* Must be greater than zero to avoid unpredictable behavior */ |
3074 | if (!fep->rx_time_itr || !fep->rx_pkts_itr || |
3075 | !fep->tx_time_itr || !fep->tx_pkts_itr) |
3076 | return; |
3077 | |
3078 | /* Select enet system clock as Interrupt Coalescing |
3079 | * timer Clock Source |
3080 | */ |
3081 | rx_itr = FEC_ITR_CLK_SEL; |
3082 | tx_itr = FEC_ITR_CLK_SEL; |
3083 | |
3084 | /* set ICFT and ICTT */ |
3085 | rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); |
3086 | rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); |
3087 | tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); |
3088 | tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); |
3089 | |
3090 | rx_itr |= FEC_ITR_EN; |
3091 | tx_itr |= FEC_ITR_EN; |
3092 | |
3093 | writel(val: tx_itr, addr: fep->hwp + FEC_TXIC0); |
3094 | writel(val: rx_itr, addr: fep->hwp + FEC_RXIC0); |
3095 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { |
3096 | writel(val: tx_itr, addr: fep->hwp + FEC_TXIC1); |
3097 | writel(val: rx_itr, addr: fep->hwp + FEC_RXIC1); |
3098 | writel(val: tx_itr, addr: fep->hwp + FEC_TXIC2); |
3099 | writel(val: rx_itr, addr: fep->hwp + FEC_RXIC2); |
3100 | } |
3101 | } |
3102 | |
3103 | static int fec_enet_get_coalesce(struct net_device *ndev, |
3104 | struct ethtool_coalesce *ec, |
3105 | struct kernel_ethtool_coalesce *kernel_coal, |
3106 | struct netlink_ext_ack *extack) |
3107 | { |
3108 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3109 | |
3110 | if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) |
3111 | return -EOPNOTSUPP; |
3112 | |
3113 | ec->rx_coalesce_usecs = fep->rx_time_itr; |
3114 | ec->rx_max_coalesced_frames = fep->rx_pkts_itr; |
3115 | |
3116 | ec->tx_coalesce_usecs = fep->tx_time_itr; |
3117 | ec->tx_max_coalesced_frames = fep->tx_pkts_itr; |
3118 | |
3119 | return 0; |
3120 | } |
3121 | |
3122 | static int fec_enet_set_coalesce(struct net_device *ndev, |
3123 | struct ethtool_coalesce *ec, |
3124 | struct kernel_ethtool_coalesce *kernel_coal, |
3125 | struct netlink_ext_ack *extack) |
3126 | { |
3127 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3128 | struct device *dev = &fep->pdev->dev; |
3129 | unsigned int cycle; |
3130 | |
3131 | if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) |
3132 | return -EOPNOTSUPP; |
3133 | |
3134 | if (ec->rx_max_coalesced_frames > 255) { |
3135 | dev_err(dev, "Rx coalesced frames exceed hardware limitation\n" ); |
3136 | return -EINVAL; |
3137 | } |
3138 | |
3139 | if (ec->tx_max_coalesced_frames > 255) { |
3140 | dev_err(dev, "Tx coalesced frame exceed hardware limitation\n" ); |
3141 | return -EINVAL; |
3142 | } |
3143 | |
3144 | cycle = fec_enet_us_to_itr_clock(ndev, us: ec->rx_coalesce_usecs); |
3145 | if (cycle > 0xFFFF) { |
3146 | dev_err(dev, "Rx coalesced usec exceed hardware limitation\n" ); |
3147 | return -EINVAL; |
3148 | } |
3149 | |
3150 | cycle = fec_enet_us_to_itr_clock(ndev, us: ec->tx_coalesce_usecs); |
3151 | if (cycle > 0xFFFF) { |
3152 | dev_err(dev, "Tx coalesced usec exceed hardware limitation\n" ); |
3153 | return -EINVAL; |
3154 | } |
3155 | |
3156 | fep->rx_time_itr = ec->rx_coalesce_usecs; |
3157 | fep->rx_pkts_itr = ec->rx_max_coalesced_frames; |
3158 | |
3159 | fep->tx_time_itr = ec->tx_coalesce_usecs; |
3160 | fep->tx_pkts_itr = ec->tx_max_coalesced_frames; |
3161 | |
3162 | fec_enet_itr_coal_set(ndev); |
3163 | |
3164 | return 0; |
3165 | } |
3166 | |
3167 | static int |
3168 | fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) |
3169 | { |
3170 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3171 | struct ethtool_keee *p = &fep->eee; |
3172 | |
3173 | if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) |
3174 | return -EOPNOTSUPP; |
3175 | |
3176 | if (!netif_running(dev: ndev)) |
3177 | return -ENETDOWN; |
3178 | |
3179 | edata->tx_lpi_timer = p->tx_lpi_timer; |
3180 | |
3181 | return phy_ethtool_get_eee(phydev: ndev->phydev, data: edata); |
3182 | } |
3183 | |
3184 | static int |
3185 | fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) |
3186 | { |
3187 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3188 | struct ethtool_keee *p = &fep->eee; |
3189 | |
3190 | if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) |
3191 | return -EOPNOTSUPP; |
3192 | |
3193 | if (!netif_running(dev: ndev)) |
3194 | return -ENETDOWN; |
3195 | |
3196 | p->tx_lpi_timer = edata->tx_lpi_timer; |
3197 | |
3198 | return phy_ethtool_set_eee(phydev: ndev->phydev, data: edata); |
3199 | } |
3200 | |
3201 | static void |
3202 | fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) |
3203 | { |
3204 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3205 | |
3206 | if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { |
3207 | wol->supported = WAKE_MAGIC; |
3208 | wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; |
3209 | } else { |
3210 | wol->supported = wol->wolopts = 0; |
3211 | } |
3212 | } |
3213 | |
3214 | static int |
3215 | fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) |
3216 | { |
3217 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3218 | |
3219 | if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) |
3220 | return -EINVAL; |
3221 | |
3222 | if (wol->wolopts & ~WAKE_MAGIC) |
3223 | return -EINVAL; |
3224 | |
3225 | device_set_wakeup_enable(dev: &ndev->dev, enable: wol->wolopts & WAKE_MAGIC); |
3226 | if (device_may_wakeup(dev: &ndev->dev)) |
3227 | fep->wol_flag |= FEC_WOL_FLAG_ENABLE; |
3228 | else |
3229 | fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); |
3230 | |
3231 | return 0; |
3232 | } |
3233 | |
3234 | static const struct ethtool_ops fec_enet_ethtool_ops = { |
3235 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
3236 | ETHTOOL_COALESCE_MAX_FRAMES, |
3237 | .get_drvinfo = fec_enet_get_drvinfo, |
3238 | .get_regs_len = fec_enet_get_regs_len, |
3239 | .get_regs = fec_enet_get_regs, |
3240 | .nway_reset = phy_ethtool_nway_reset, |
3241 | .get_link = ethtool_op_get_link, |
3242 | .get_coalesce = fec_enet_get_coalesce, |
3243 | .set_coalesce = fec_enet_set_coalesce, |
3244 | #ifndef CONFIG_M5272 |
3245 | .get_pauseparam = fec_enet_get_pauseparam, |
3246 | .set_pauseparam = fec_enet_set_pauseparam, |
3247 | .get_strings = fec_enet_get_strings, |
3248 | .get_ethtool_stats = fec_enet_get_ethtool_stats, |
3249 | .get_sset_count = fec_enet_get_sset_count, |
3250 | #endif |
3251 | .get_ts_info = fec_enet_get_ts_info, |
3252 | .get_wol = fec_enet_get_wol, |
3253 | .set_wol = fec_enet_set_wol, |
3254 | .get_eee = fec_enet_get_eee, |
3255 | .set_eee = fec_enet_set_eee, |
3256 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
3257 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
3258 | .self_test = net_selftest, |
3259 | }; |
3260 | |
3261 | static void fec_enet_free_buffers(struct net_device *ndev) |
3262 | { |
3263 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3264 | unsigned int i; |
3265 | struct fec_enet_priv_tx_q *txq; |
3266 | struct fec_enet_priv_rx_q *rxq; |
3267 | unsigned int q; |
3268 | |
3269 | for (q = 0; q < fep->num_rx_queues; q++) { |
3270 | rxq = fep->rx_queue[q]; |
3271 | for (i = 0; i < rxq->bd.ring_size; i++) |
3272 | page_pool_put_full_page(pool: rxq->page_pool, page: rxq->rx_skb_info[i].page, allow_direct: false); |
3273 | |
3274 | for (i = 0; i < XDP_STATS_TOTAL; i++) |
3275 | rxq->stats[i] = 0; |
3276 | |
3277 | if (xdp_rxq_info_is_reg(xdp_rxq: &rxq->xdp_rxq)) |
3278 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq); |
3279 | page_pool_destroy(pool: rxq->page_pool); |
3280 | rxq->page_pool = NULL; |
3281 | } |
3282 | |
3283 | for (q = 0; q < fep->num_tx_queues; q++) { |
3284 | txq = fep->tx_queue[q]; |
3285 | for (i = 0; i < txq->bd.ring_size; i++) { |
3286 | kfree(objp: txq->tx_bounce[i]); |
3287 | txq->tx_bounce[i] = NULL; |
3288 | |
3289 | if (!txq->tx_buf[i].buf_p) { |
3290 | txq->tx_buf[i].type = FEC_TXBUF_T_SKB; |
3291 | continue; |
3292 | } |
3293 | |
3294 | if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { |
3295 | dev_kfree_skb(txq->tx_buf[i].buf_p); |
3296 | } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { |
3297 | xdp_return_frame(xdpf: txq->tx_buf[i].buf_p); |
3298 | } else { |
3299 | struct page *page = txq->tx_buf[i].buf_p; |
3300 | |
3301 | page_pool_put_page(pool: page->pp, page, dma_sync_size: 0, allow_direct: false); |
3302 | } |
3303 | |
3304 | txq->tx_buf[i].buf_p = NULL; |
3305 | txq->tx_buf[i].type = FEC_TXBUF_T_SKB; |
3306 | } |
3307 | } |
3308 | } |
3309 | |
3310 | static void fec_enet_free_queue(struct net_device *ndev) |
3311 | { |
3312 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3313 | int i; |
3314 | struct fec_enet_priv_tx_q *txq; |
3315 | |
3316 | for (i = 0; i < fep->num_tx_queues; i++) |
3317 | if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { |
3318 | txq = fep->tx_queue[i]; |
3319 | fec_dma_free(dev: &fep->pdev->dev, |
3320 | size: txq->bd.ring_size * TSO_HEADER_SIZE, |
3321 | cpu_addr: txq->tso_hdrs, handle: txq->tso_hdrs_dma); |
3322 | } |
3323 | |
3324 | for (i = 0; i < fep->num_rx_queues; i++) |
3325 | kfree(objp: fep->rx_queue[i]); |
3326 | for (i = 0; i < fep->num_tx_queues; i++) |
3327 | kfree(objp: fep->tx_queue[i]); |
3328 | } |
3329 | |
3330 | static int fec_enet_alloc_queue(struct net_device *ndev) |
3331 | { |
3332 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3333 | int i; |
3334 | int ret = 0; |
3335 | struct fec_enet_priv_tx_q *txq; |
3336 | |
3337 | for (i = 0; i < fep->num_tx_queues; i++) { |
3338 | txq = kzalloc(size: sizeof(*txq), GFP_KERNEL); |
3339 | if (!txq) { |
3340 | ret = -ENOMEM; |
3341 | goto alloc_failed; |
3342 | } |
3343 | |
3344 | fep->tx_queue[i] = txq; |
3345 | txq->bd.ring_size = TX_RING_SIZE; |
3346 | fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; |
3347 | |
3348 | txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; |
3349 | txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; |
3350 | |
3351 | txq->tso_hdrs = fec_dma_alloc(dev: &fep->pdev->dev, |
3352 | size: txq->bd.ring_size * TSO_HEADER_SIZE, |
3353 | handle: &txq->tso_hdrs_dma, GFP_KERNEL); |
3354 | if (!txq->tso_hdrs) { |
3355 | ret = -ENOMEM; |
3356 | goto alloc_failed; |
3357 | } |
3358 | } |
3359 | |
3360 | for (i = 0; i < fep->num_rx_queues; i++) { |
3361 | fep->rx_queue[i] = kzalloc(size: sizeof(*fep->rx_queue[i]), |
3362 | GFP_KERNEL); |
3363 | if (!fep->rx_queue[i]) { |
3364 | ret = -ENOMEM; |
3365 | goto alloc_failed; |
3366 | } |
3367 | |
3368 | fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; |
3369 | fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; |
3370 | } |
3371 | return ret; |
3372 | |
3373 | alloc_failed: |
3374 | fec_enet_free_queue(ndev); |
3375 | return ret; |
3376 | } |
3377 | |
3378 | static int |
3379 | fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) |
3380 | { |
3381 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3382 | struct fec_enet_priv_rx_q *rxq; |
3383 | dma_addr_t phys_addr; |
3384 | struct bufdesc *bdp; |
3385 | struct page *page; |
3386 | int i, err; |
3387 | |
3388 | rxq = fep->rx_queue[queue]; |
3389 | bdp = rxq->bd.base; |
3390 | |
3391 | err = fec_enet_create_page_pool(fep, rxq, size: rxq->bd.ring_size); |
3392 | if (err < 0) { |
3393 | netdev_err(dev: ndev, format: "%s failed queue %d (%d)\n" , __func__, queue, err); |
3394 | return err; |
3395 | } |
3396 | |
3397 | for (i = 0; i < rxq->bd.ring_size; i++) { |
3398 | page = page_pool_dev_alloc_pages(pool: rxq->page_pool); |
3399 | if (!page) |
3400 | goto err_alloc; |
3401 | |
3402 | phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; |
3403 | bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); |
3404 | |
3405 | rxq->rx_skb_info[i].page = page; |
3406 | rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; |
3407 | bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); |
3408 | |
3409 | if (fep->bufdesc_ex) { |
3410 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
3411 | ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); |
3412 | } |
3413 | |
3414 | bdp = fec_enet_get_nextdesc(bdp, bd: &rxq->bd); |
3415 | } |
3416 | |
3417 | /* Set the last buffer to wrap. */ |
3418 | bdp = fec_enet_get_prevdesc(bdp, bd: &rxq->bd); |
3419 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
3420 | return 0; |
3421 | |
3422 | err_alloc: |
3423 | fec_enet_free_buffers(ndev); |
3424 | return -ENOMEM; |
3425 | } |
3426 | |
3427 | static int |
3428 | fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) |
3429 | { |
3430 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3431 | unsigned int i; |
3432 | struct bufdesc *bdp; |
3433 | struct fec_enet_priv_tx_q *txq; |
3434 | |
3435 | txq = fep->tx_queue[queue]; |
3436 | bdp = txq->bd.base; |
3437 | for (i = 0; i < txq->bd.ring_size; i++) { |
3438 | txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); |
3439 | if (!txq->tx_bounce[i]) |
3440 | goto err_alloc; |
3441 | |
3442 | bdp->cbd_sc = cpu_to_fec16(0); |
3443 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
3444 | |
3445 | if (fep->bufdesc_ex) { |
3446 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
3447 | ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); |
3448 | } |
3449 | |
3450 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
3451 | } |
3452 | |
3453 | /* Set the last buffer to wrap. */ |
3454 | bdp = fec_enet_get_prevdesc(bdp, bd: &txq->bd); |
3455 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
3456 | |
3457 | return 0; |
3458 | |
3459 | err_alloc: |
3460 | fec_enet_free_buffers(ndev); |
3461 | return -ENOMEM; |
3462 | } |
3463 | |
3464 | static int fec_enet_alloc_buffers(struct net_device *ndev) |
3465 | { |
3466 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3467 | unsigned int i; |
3468 | |
3469 | for (i = 0; i < fep->num_rx_queues; i++) |
3470 | if (fec_enet_alloc_rxq_buffers(ndev, queue: i)) |
3471 | return -ENOMEM; |
3472 | |
3473 | for (i = 0; i < fep->num_tx_queues; i++) |
3474 | if (fec_enet_alloc_txq_buffers(ndev, queue: i)) |
3475 | return -ENOMEM; |
3476 | return 0; |
3477 | } |
3478 | |
3479 | static int |
3480 | fec_enet_open(struct net_device *ndev) |
3481 | { |
3482 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3483 | int ret; |
3484 | bool reset_again; |
3485 | |
3486 | ret = pm_runtime_resume_and_get(dev: &fep->pdev->dev); |
3487 | if (ret < 0) |
3488 | return ret; |
3489 | |
3490 | pinctrl_pm_select_default_state(dev: &fep->pdev->dev); |
3491 | ret = fec_enet_clk_enable(ndev, enable: true); |
3492 | if (ret) |
3493 | goto clk_enable; |
3494 | |
3495 | /* During the first fec_enet_open call the PHY isn't probed at this |
3496 | * point. Therefore the phy_reset_after_clk_enable() call within |
3497 | * fec_enet_clk_enable() fails. As we need this reset in order to be |
3498 | * sure the PHY is working correctly we check if we need to reset again |
3499 | * later when the PHY is probed |
3500 | */ |
3501 | if (ndev->phydev && ndev->phydev->drv) |
3502 | reset_again = false; |
3503 | else |
3504 | reset_again = true; |
3505 | |
3506 | /* I should reset the ring buffers here, but I don't yet know |
3507 | * a simple way to do that. |
3508 | */ |
3509 | |
3510 | ret = fec_enet_alloc_buffers(ndev); |
3511 | if (ret) |
3512 | goto err_enet_alloc; |
3513 | |
3514 | /* Init MAC prior to mii bus probe */ |
3515 | fec_restart(ndev); |
3516 | |
3517 | /* Call phy_reset_after_clk_enable() again if it failed during |
3518 | * phy_reset_after_clk_enable() before because the PHY wasn't probed. |
3519 | */ |
3520 | if (reset_again) |
3521 | fec_enet_phy_reset_after_clk_enable(ndev); |
3522 | |
3523 | /* Probe and connect to PHY when open the interface */ |
3524 | ret = fec_enet_mii_probe(ndev); |
3525 | if (ret) |
3526 | goto err_enet_mii_probe; |
3527 | |
3528 | if (fep->quirks & FEC_QUIRK_ERR006687) |
3529 | imx6q_cpuidle_fec_irqs_used(); |
3530 | |
3531 | if (fep->quirks & FEC_QUIRK_HAS_PMQOS) |
3532 | cpu_latency_qos_add_request(req: &fep->pm_qos_req, value: 0); |
3533 | |
3534 | napi_enable(n: &fep->napi); |
3535 | phy_start(phydev: ndev->phydev); |
3536 | netif_tx_start_all_queues(dev: ndev); |
3537 | |
3538 | device_set_wakeup_enable(dev: &ndev->dev, enable: fep->wol_flag & |
3539 | FEC_WOL_FLAG_ENABLE); |
3540 | |
3541 | return 0; |
3542 | |
3543 | err_enet_mii_probe: |
3544 | fec_enet_free_buffers(ndev); |
3545 | err_enet_alloc: |
3546 | fec_enet_clk_enable(ndev, enable: false); |
3547 | clk_enable: |
3548 | pm_runtime_mark_last_busy(dev: &fep->pdev->dev); |
3549 | pm_runtime_put_autosuspend(dev: &fep->pdev->dev); |
3550 | pinctrl_pm_select_sleep_state(dev: &fep->pdev->dev); |
3551 | return ret; |
3552 | } |
3553 | |
3554 | static int |
3555 | fec_enet_close(struct net_device *ndev) |
3556 | { |
3557 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3558 | |
3559 | phy_stop(phydev: ndev->phydev); |
3560 | |
3561 | if (netif_device_present(dev: ndev)) { |
3562 | napi_disable(n: &fep->napi); |
3563 | netif_tx_disable(dev: ndev); |
3564 | fec_stop(ndev); |
3565 | } |
3566 | |
3567 | phy_disconnect(phydev: ndev->phydev); |
3568 | |
3569 | if (fep->quirks & FEC_QUIRK_ERR006687) |
3570 | imx6q_cpuidle_fec_irqs_unused(); |
3571 | |
3572 | fec_enet_update_ethtool_stats(dev: ndev); |
3573 | |
3574 | fec_enet_clk_enable(ndev, enable: false); |
3575 | if (fep->quirks & FEC_QUIRK_HAS_PMQOS) |
3576 | cpu_latency_qos_remove_request(req: &fep->pm_qos_req); |
3577 | |
3578 | pinctrl_pm_select_sleep_state(dev: &fep->pdev->dev); |
3579 | pm_runtime_mark_last_busy(dev: &fep->pdev->dev); |
3580 | pm_runtime_put_autosuspend(dev: &fep->pdev->dev); |
3581 | |
3582 | fec_enet_free_buffers(ndev); |
3583 | |
3584 | return 0; |
3585 | } |
3586 | |
3587 | /* Set or clear the multicast filter for this adaptor. |
3588 | * Skeleton taken from sunlance driver. |
3589 | * The CPM Ethernet implementation allows Multicast as well as individual |
3590 | * MAC address filtering. Some of the drivers check to make sure it is |
3591 | * a group multicast address, and discard those that are not. I guess I |
3592 | * will do the same for now, but just remove the test if you want |
3593 | * individual filtering as well (do the upper net layers want or support |
3594 | * this kind of feature?). |
3595 | */ |
3596 | |
3597 | #define FEC_HASH_BITS 6 /* #bits in hash */ |
3598 | |
3599 | static void set_multicast_list(struct net_device *ndev) |
3600 | { |
3601 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3602 | struct netdev_hw_addr *ha; |
3603 | unsigned int crc, tmp; |
3604 | unsigned char hash; |
3605 | unsigned int hash_high = 0, hash_low = 0; |
3606 | |
3607 | if (ndev->flags & IFF_PROMISC) { |
3608 | tmp = readl(addr: fep->hwp + FEC_R_CNTRL); |
3609 | tmp |= 0x8; |
3610 | writel(val: tmp, addr: fep->hwp + FEC_R_CNTRL); |
3611 | return; |
3612 | } |
3613 | |
3614 | tmp = readl(addr: fep->hwp + FEC_R_CNTRL); |
3615 | tmp &= ~0x8; |
3616 | writel(val: tmp, addr: fep->hwp + FEC_R_CNTRL); |
3617 | |
3618 | if (ndev->flags & IFF_ALLMULTI) { |
3619 | /* Catch all multicast addresses, so set the |
3620 | * filter to all 1's |
3621 | */ |
3622 | writel(val: 0xffffffff, addr: fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
3623 | writel(val: 0xffffffff, addr: fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
3624 | |
3625 | return; |
3626 | } |
3627 | |
3628 | /* Add the addresses in hash register */ |
3629 | netdev_for_each_mc_addr(ha, ndev) { |
3630 | /* calculate crc32 value of mac address */ |
3631 | crc = ether_crc_le(ndev->addr_len, ha->addr); |
3632 | |
3633 | /* only upper 6 bits (FEC_HASH_BITS) are used |
3634 | * which point to specific bit in the hash registers |
3635 | */ |
3636 | hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; |
3637 | |
3638 | if (hash > 31) |
3639 | hash_high |= 1 << (hash - 32); |
3640 | else |
3641 | hash_low |= 1 << hash; |
3642 | } |
3643 | |
3644 | writel(val: hash_high, addr: fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
3645 | writel(val: hash_low, addr: fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
3646 | } |
3647 | |
3648 | /* Set a MAC change in hardware. */ |
3649 | static int |
3650 | fec_set_mac_address(struct net_device *ndev, void *p) |
3651 | { |
3652 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3653 | struct sockaddr *addr = p; |
3654 | |
3655 | if (addr) { |
3656 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
3657 | return -EADDRNOTAVAIL; |
3658 | eth_hw_addr_set(dev: ndev, addr: addr->sa_data); |
3659 | } |
3660 | |
3661 | /* Add netif status check here to avoid system hang in below case: |
3662 | * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; |
3663 | * After ethx down, fec all clocks are gated off and then register |
3664 | * access causes system hang. |
3665 | */ |
3666 | if (!netif_running(dev: ndev)) |
3667 | return 0; |
3668 | |
3669 | writel(val: ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | |
3670 | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), |
3671 | addr: fep->hwp + FEC_ADDR_LOW); |
3672 | writel(val: (ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), |
3673 | addr: fep->hwp + FEC_ADDR_HIGH); |
3674 | return 0; |
3675 | } |
3676 | |
3677 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3678 | /** |
3679 | * fec_poll_controller - FEC Poll controller function |
3680 | * @dev: The FEC network adapter |
3681 | * |
3682 | * Polled functionality used by netconsole and others in non interrupt mode |
3683 | * |
3684 | */ |
3685 | static void fec_poll_controller(struct net_device *dev) |
3686 | { |
3687 | int i; |
3688 | struct fec_enet_private *fep = netdev_priv(dev); |
3689 | |
3690 | for (i = 0; i < FEC_IRQ_NUM; i++) { |
3691 | if (fep->irq[i] > 0) { |
3692 | disable_irq(irq: fep->irq[i]); |
3693 | fec_enet_interrupt(irq: fep->irq[i], dev_id: dev); |
3694 | enable_irq(irq: fep->irq[i]); |
3695 | } |
3696 | } |
3697 | } |
3698 | #endif |
3699 | |
3700 | static inline void fec_enet_set_netdev_features(struct net_device *netdev, |
3701 | netdev_features_t features) |
3702 | { |
3703 | struct fec_enet_private *fep = netdev_priv(dev: netdev); |
3704 | netdev_features_t changed = features ^ netdev->features; |
3705 | |
3706 | netdev->features = features; |
3707 | |
3708 | /* Receive checksum has been changed */ |
3709 | if (changed & NETIF_F_RXCSUM) { |
3710 | if (features & NETIF_F_RXCSUM) |
3711 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
3712 | else |
3713 | fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; |
3714 | } |
3715 | } |
3716 | |
3717 | static int fec_set_features(struct net_device *netdev, |
3718 | netdev_features_t features) |
3719 | { |
3720 | struct fec_enet_private *fep = netdev_priv(dev: netdev); |
3721 | netdev_features_t changed = features ^ netdev->features; |
3722 | |
3723 | if (netif_running(dev: netdev) && changed & NETIF_F_RXCSUM) { |
3724 | napi_disable(n: &fep->napi); |
3725 | netif_tx_lock_bh(dev: netdev); |
3726 | fec_stop(ndev: netdev); |
3727 | fec_enet_set_netdev_features(netdev, features); |
3728 | fec_restart(ndev: netdev); |
3729 | netif_tx_wake_all_queues(dev: netdev); |
3730 | netif_tx_unlock_bh(dev: netdev); |
3731 | napi_enable(n: &fep->napi); |
3732 | } else { |
3733 | fec_enet_set_netdev_features(netdev, features); |
3734 | } |
3735 | |
3736 | return 0; |
3737 | } |
3738 | |
3739 | static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, |
3740 | struct net_device *sb_dev) |
3741 | { |
3742 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3743 | u16 vlan_tag = 0; |
3744 | |
3745 | if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) |
3746 | return netdev_pick_tx(dev: ndev, skb, NULL); |
3747 | |
3748 | /* VLAN is present in the payload.*/ |
3749 | if (eth_type_vlan(ethertype: skb->protocol)) { |
3750 | struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); |
3751 | |
3752 | vlan_tag = ntohs(vhdr->h_vlan_TCI); |
3753 | /* VLAN is present in the skb but not yet pushed in the payload.*/ |
3754 | } else if (skb_vlan_tag_present(skb)) { |
3755 | vlan_tag = skb->vlan_tci; |
3756 | } else { |
3757 | return vlan_tag; |
3758 | } |
3759 | |
3760 | return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; |
3761 | } |
3762 | |
3763 | static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) |
3764 | { |
3765 | struct fec_enet_private *fep = netdev_priv(dev); |
3766 | bool is_run = netif_running(dev); |
3767 | struct bpf_prog *old_prog; |
3768 | |
3769 | switch (bpf->command) { |
3770 | case XDP_SETUP_PROG: |
3771 | /* No need to support the SoCs that require to |
3772 | * do the frame swap because the performance wouldn't be |
3773 | * better than the skb mode. |
3774 | */ |
3775 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
3776 | return -EOPNOTSUPP; |
3777 | |
3778 | if (!bpf->prog) |
3779 | xdp_features_clear_redirect_target(dev); |
3780 | |
3781 | if (is_run) { |
3782 | napi_disable(n: &fep->napi); |
3783 | netif_tx_disable(dev); |
3784 | } |
3785 | |
3786 | old_prog = xchg(&fep->xdp_prog, bpf->prog); |
3787 | if (old_prog) |
3788 | bpf_prog_put(prog: old_prog); |
3789 | |
3790 | fec_restart(ndev: dev); |
3791 | |
3792 | if (is_run) { |
3793 | napi_enable(n: &fep->napi); |
3794 | netif_tx_start_all_queues(dev); |
3795 | } |
3796 | |
3797 | if (bpf->prog) |
3798 | xdp_features_set_redirect_target(dev, support_sg: false); |
3799 | |
3800 | return 0; |
3801 | |
3802 | case XDP_SETUP_XSK_POOL: |
3803 | return -EOPNOTSUPP; |
3804 | |
3805 | default: |
3806 | return -EOPNOTSUPP; |
3807 | } |
3808 | } |
3809 | |
3810 | static int |
3811 | fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) |
3812 | { |
3813 | if (unlikely(index < 0)) |
3814 | return 0; |
3815 | |
3816 | return (index % fep->num_tx_queues); |
3817 | } |
3818 | |
3819 | static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, |
3820 | struct fec_enet_priv_tx_q *txq, |
3821 | void *frame, u32 dma_sync_len, |
3822 | bool ndo_xmit) |
3823 | { |
3824 | unsigned int index, status, estatus; |
3825 | struct bufdesc *bdp; |
3826 | dma_addr_t dma_addr; |
3827 | int entries_free; |
3828 | u16 frame_len; |
3829 | |
3830 | entries_free = fec_enet_get_free_txdesc_num(txq); |
3831 | if (entries_free < MAX_SKB_FRAGS + 1) { |
3832 | netdev_err_once(fep->netdev, "NOT enough BD for SG!\n" ); |
3833 | return -EBUSY; |
3834 | } |
3835 | |
3836 | /* Fill in a Tx ring entry */ |
3837 | bdp = txq->bd.cur; |
3838 | status = fec16_to_cpu(bdp->cbd_sc); |
3839 | status &= ~BD_ENET_TX_STATS; |
3840 | |
3841 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
3842 | |
3843 | if (ndo_xmit) { |
3844 | struct xdp_frame *xdpf = frame; |
3845 | |
3846 | dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, |
3847 | xdpf->len, DMA_TO_DEVICE); |
3848 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr)) |
3849 | return -ENOMEM; |
3850 | |
3851 | frame_len = xdpf->len; |
3852 | txq->tx_buf[index].buf_p = xdpf; |
3853 | txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; |
3854 | } else { |
3855 | struct xdp_buff *xdpb = frame; |
3856 | struct page *page; |
3857 | |
3858 | page = virt_to_page(xdpb->data); |
3859 | dma_addr = page_pool_get_dma_addr(page) + |
3860 | (xdpb->data - xdpb->data_hard_start); |
3861 | dma_sync_single_for_device(dev: &fep->pdev->dev, addr: dma_addr, |
3862 | size: dma_sync_len, dir: DMA_BIDIRECTIONAL); |
3863 | frame_len = xdpb->data_end - xdpb->data; |
3864 | txq->tx_buf[index].buf_p = page; |
3865 | txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; |
3866 | } |
3867 | |
3868 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); |
3869 | if (fep->bufdesc_ex) |
3870 | estatus = BD_ENET_TX_INT; |
3871 | |
3872 | bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); |
3873 | bdp->cbd_datlen = cpu_to_fec16(frame_len); |
3874 | |
3875 | if (fep->bufdesc_ex) { |
3876 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
3877 | |
3878 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
3879 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
3880 | |
3881 | ebdp->cbd_bdu = 0; |
3882 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
3883 | } |
3884 | |
3885 | /* Make sure the updates to rest of the descriptor are performed before |
3886 | * transferring ownership. |
3887 | */ |
3888 | dma_wmb(); |
3889 | |
3890 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
3891 | * it's the last BD of the frame, and to put the CRC on the end. |
3892 | */ |
3893 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); |
3894 | bdp->cbd_sc = cpu_to_fec16(status); |
3895 | |
3896 | /* If this was the last BD in the ring, start at the beginning again. */ |
3897 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
3898 | |
3899 | /* Make sure the update to bdp are performed before txq->bd.cur. */ |
3900 | dma_wmb(); |
3901 | |
3902 | txq->bd.cur = bdp; |
3903 | |
3904 | /* Trigger transmission start */ |
3905 | writel(val: 0, addr: txq->bd.reg_desc_active); |
3906 | |
3907 | return 0; |
3908 | } |
3909 | |
3910 | static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, |
3911 | int cpu, struct xdp_buff *xdp, |
3912 | u32 dma_sync_len) |
3913 | { |
3914 | struct fec_enet_priv_tx_q *txq; |
3915 | struct netdev_queue *nq; |
3916 | int queue, ret; |
3917 | |
3918 | queue = fec_enet_xdp_get_tx_queue(fep, index: cpu); |
3919 | txq = fep->tx_queue[queue]; |
3920 | nq = netdev_get_tx_queue(dev: fep->netdev, index: queue); |
3921 | |
3922 | __netif_tx_lock(txq: nq, cpu); |
3923 | |
3924 | /* Avoid tx timeout as XDP shares the queue with kernel stack */ |
3925 | txq_trans_cond_update(txq: nq); |
3926 | ret = fec_enet_txq_xmit_frame(fep, txq, frame: xdp, dma_sync_len, ndo_xmit: false); |
3927 | |
3928 | __netif_tx_unlock(txq: nq); |
3929 | |
3930 | return ret; |
3931 | } |
3932 | |
3933 | static int fec_enet_xdp_xmit(struct net_device *dev, |
3934 | int num_frames, |
3935 | struct xdp_frame **frames, |
3936 | u32 flags) |
3937 | { |
3938 | struct fec_enet_private *fep = netdev_priv(dev); |
3939 | struct fec_enet_priv_tx_q *txq; |
3940 | int cpu = smp_processor_id(); |
3941 | unsigned int sent_frames = 0; |
3942 | struct netdev_queue *nq; |
3943 | unsigned int queue; |
3944 | int i; |
3945 | |
3946 | queue = fec_enet_xdp_get_tx_queue(fep, index: cpu); |
3947 | txq = fep->tx_queue[queue]; |
3948 | nq = netdev_get_tx_queue(dev: fep->netdev, index: queue); |
3949 | |
3950 | __netif_tx_lock(txq: nq, cpu); |
3951 | |
3952 | /* Avoid tx timeout as XDP shares the queue with kernel stack */ |
3953 | txq_trans_cond_update(txq: nq); |
3954 | for (i = 0; i < num_frames; i++) { |
3955 | if (fec_enet_txq_xmit_frame(fep, txq, frame: frames[i], dma_sync_len: 0, ndo_xmit: true) < 0) |
3956 | break; |
3957 | sent_frames++; |
3958 | } |
3959 | |
3960 | __netif_tx_unlock(txq: nq); |
3961 | |
3962 | return sent_frames; |
3963 | } |
3964 | |
3965 | static int fec_hwtstamp_get(struct net_device *ndev, |
3966 | struct kernel_hwtstamp_config *config) |
3967 | { |
3968 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3969 | |
3970 | if (!netif_running(dev: ndev)) |
3971 | return -EINVAL; |
3972 | |
3973 | if (!fep->bufdesc_ex) |
3974 | return -EOPNOTSUPP; |
3975 | |
3976 | fec_ptp_get(ndev, config); |
3977 | |
3978 | return 0; |
3979 | } |
3980 | |
3981 | static int fec_hwtstamp_set(struct net_device *ndev, |
3982 | struct kernel_hwtstamp_config *config, |
3983 | struct netlink_ext_ack *extack) |
3984 | { |
3985 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3986 | |
3987 | if (!netif_running(dev: ndev)) |
3988 | return -EINVAL; |
3989 | |
3990 | if (!fep->bufdesc_ex) |
3991 | return -EOPNOTSUPP; |
3992 | |
3993 | return fec_ptp_set(ndev, config, extack); |
3994 | } |
3995 | |
3996 | static const struct net_device_ops fec_netdev_ops = { |
3997 | .ndo_open = fec_enet_open, |
3998 | .ndo_stop = fec_enet_close, |
3999 | .ndo_start_xmit = fec_enet_start_xmit, |
4000 | .ndo_select_queue = fec_enet_select_queue, |
4001 | .ndo_set_rx_mode = set_multicast_list, |
4002 | .ndo_validate_addr = eth_validate_addr, |
4003 | .ndo_tx_timeout = fec_timeout, |
4004 | .ndo_set_mac_address = fec_set_mac_address, |
4005 | .ndo_eth_ioctl = phy_do_ioctl_running, |
4006 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4007 | .ndo_poll_controller = fec_poll_controller, |
4008 | #endif |
4009 | .ndo_set_features = fec_set_features, |
4010 | .ndo_bpf = fec_enet_bpf, |
4011 | .ndo_xdp_xmit = fec_enet_xdp_xmit, |
4012 | .ndo_hwtstamp_get = fec_hwtstamp_get, |
4013 | .ndo_hwtstamp_set = fec_hwtstamp_set, |
4014 | }; |
4015 | |
4016 | static const unsigned short offset_des_active_rxq[] = { |
4017 | FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 |
4018 | }; |
4019 | |
4020 | static const unsigned short offset_des_active_txq[] = { |
4021 | FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 |
4022 | }; |
4023 | |
4024 | /* |
4025 | * XXX: We need to clean up on failure exits here. |
4026 | * |
4027 | */ |
4028 | static int fec_enet_init(struct net_device *ndev) |
4029 | { |
4030 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4031 | struct bufdesc *cbd_base; |
4032 | dma_addr_t bd_dma; |
4033 | int bd_size; |
4034 | unsigned int i; |
4035 | unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : |
4036 | sizeof(struct bufdesc); |
4037 | unsigned dsize_log2 = __fls(word: dsize); |
4038 | int ret; |
4039 | |
4040 | WARN_ON(dsize != (1 << dsize_log2)); |
4041 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
4042 | fep->rx_align = 0xf; |
4043 | fep->tx_align = 0xf; |
4044 | #else |
4045 | fep->rx_align = 0x3; |
4046 | fep->tx_align = 0x3; |
4047 | #endif |
4048 | fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; |
4049 | fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; |
4050 | fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; |
4051 | fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; |
4052 | |
4053 | /* Check mask of the streaming and coherent API */ |
4054 | ret = dma_set_mask_and_coherent(dev: &fep->pdev->dev, DMA_BIT_MASK(32)); |
4055 | if (ret < 0) { |
4056 | dev_warn(&fep->pdev->dev, "No suitable DMA available\n" ); |
4057 | return ret; |
4058 | } |
4059 | |
4060 | ret = fec_enet_alloc_queue(ndev); |
4061 | if (ret) |
4062 | return ret; |
4063 | |
4064 | bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; |
4065 | |
4066 | /* Allocate memory for buffer descriptors. */ |
4067 | cbd_base = fec_dmam_alloc(dev: &fep->pdev->dev, size: bd_size, handle: &bd_dma, |
4068 | GFP_KERNEL); |
4069 | if (!cbd_base) { |
4070 | ret = -ENOMEM; |
4071 | goto free_queue_mem; |
4072 | } |
4073 | |
4074 | /* Get the Ethernet address */ |
4075 | ret = fec_get_mac(ndev); |
4076 | if (ret) |
4077 | goto free_queue_mem; |
4078 | |
4079 | /* Set receive and transmit descriptor base. */ |
4080 | for (i = 0; i < fep->num_rx_queues; i++) { |
4081 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; |
4082 | unsigned size = dsize * rxq->bd.ring_size; |
4083 | |
4084 | rxq->bd.qid = i; |
4085 | rxq->bd.base = cbd_base; |
4086 | rxq->bd.cur = cbd_base; |
4087 | rxq->bd.dma = bd_dma; |
4088 | rxq->bd.dsize = dsize; |
4089 | rxq->bd.dsize_log2 = dsize_log2; |
4090 | rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; |
4091 | bd_dma += size; |
4092 | cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); |
4093 | rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); |
4094 | } |
4095 | |
4096 | for (i = 0; i < fep->num_tx_queues; i++) { |
4097 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; |
4098 | unsigned size = dsize * txq->bd.ring_size; |
4099 | |
4100 | txq->bd.qid = i; |
4101 | txq->bd.base = cbd_base; |
4102 | txq->bd.cur = cbd_base; |
4103 | txq->bd.dma = bd_dma; |
4104 | txq->bd.dsize = dsize; |
4105 | txq->bd.dsize_log2 = dsize_log2; |
4106 | txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; |
4107 | bd_dma += size; |
4108 | cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); |
4109 | txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); |
4110 | } |
4111 | |
4112 | |
4113 | /* The FEC Ethernet specific entries in the device structure */ |
4114 | ndev->watchdog_timeo = TX_TIMEOUT; |
4115 | ndev->netdev_ops = &fec_netdev_ops; |
4116 | ndev->ethtool_ops = &fec_enet_ethtool_ops; |
4117 | |
4118 | writel(FEC_RX_DISABLED_IMASK, addr: fep->hwp + FEC_IMASK); |
4119 | netif_napi_add(dev: ndev, napi: &fep->napi, poll: fec_enet_rx_napi); |
4120 | |
4121 | if (fep->quirks & FEC_QUIRK_HAS_VLAN) |
4122 | /* enable hw VLAN support */ |
4123 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
4124 | |
4125 | if (fep->quirks & FEC_QUIRK_HAS_CSUM) { |
4126 | netif_set_tso_max_segs(dev: ndev, FEC_MAX_TSO_SEGS); |
4127 | |
4128 | /* enable hw accelerator */ |
4129 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4130 | | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); |
4131 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
4132 | } |
4133 | |
4134 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { |
4135 | fep->tx_align = 0; |
4136 | fep->rx_align = 0x3f; |
4137 | } |
4138 | |
4139 | ndev->hw_features = ndev->features; |
4140 | |
4141 | if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) |
4142 | ndev->xdp_features = NETDEV_XDP_ACT_BASIC | |
4143 | NETDEV_XDP_ACT_REDIRECT; |
4144 | |
4145 | fec_restart(ndev); |
4146 | |
4147 | if (fep->quirks & FEC_QUIRK_MIB_CLEAR) |
4148 | fec_enet_clear_ethtool_stats(dev: ndev); |
4149 | else |
4150 | fec_enet_update_ethtool_stats(dev: ndev); |
4151 | |
4152 | return 0; |
4153 | |
4154 | free_queue_mem: |
4155 | fec_enet_free_queue(ndev); |
4156 | return ret; |
4157 | } |
4158 | |
4159 | #ifdef CONFIG_OF |
4160 | static int fec_reset_phy(struct platform_device *pdev) |
4161 | { |
4162 | struct gpio_desc *phy_reset; |
4163 | int msec = 1, phy_post_delay = 0; |
4164 | struct device_node *np = pdev->dev.of_node; |
4165 | int err; |
4166 | |
4167 | if (!np) |
4168 | return 0; |
4169 | |
4170 | err = of_property_read_u32(np, propname: "phy-reset-duration" , out_value: &msec); |
4171 | /* A sane reset duration should not be longer than 1s */ |
4172 | if (!err && msec > 1000) |
4173 | msec = 1; |
4174 | |
4175 | err = of_property_read_u32(np, propname: "phy-reset-post-delay" , out_value: &phy_post_delay); |
4176 | /* valid reset duration should be less than 1s */ |
4177 | if (!err && phy_post_delay > 1000) |
4178 | return -EINVAL; |
4179 | |
4180 | phy_reset = devm_gpiod_get_optional(dev: &pdev->dev, con_id: "phy-reset" , |
4181 | flags: GPIOD_OUT_HIGH); |
4182 | if (IS_ERR(ptr: phy_reset)) |
4183 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: phy_reset), |
4184 | fmt: "failed to get phy-reset-gpios\n" ); |
4185 | |
4186 | if (!phy_reset) |
4187 | return 0; |
4188 | |
4189 | if (msec > 20) |
4190 | msleep(msecs: msec); |
4191 | else |
4192 | usleep_range(min: msec * 1000, max: msec * 1000 + 1000); |
4193 | |
4194 | gpiod_set_value_cansleep(desc: phy_reset, value: 0); |
4195 | |
4196 | if (!phy_post_delay) |
4197 | return 0; |
4198 | |
4199 | if (phy_post_delay > 20) |
4200 | msleep(msecs: phy_post_delay); |
4201 | else |
4202 | usleep_range(min: phy_post_delay * 1000, |
4203 | max: phy_post_delay * 1000 + 1000); |
4204 | |
4205 | return 0; |
4206 | } |
4207 | #else /* CONFIG_OF */ |
4208 | static int fec_reset_phy(struct platform_device *pdev) |
4209 | { |
4210 | /* |
4211 | * In case of platform probe, the reset has been done |
4212 | * by machine code. |
4213 | */ |
4214 | return 0; |
4215 | } |
4216 | #endif /* CONFIG_OF */ |
4217 | |
4218 | static void |
4219 | fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) |
4220 | { |
4221 | struct device_node *np = pdev->dev.of_node; |
4222 | |
4223 | *num_tx = *num_rx = 1; |
4224 | |
4225 | if (!np || !of_device_is_available(device: np)) |
4226 | return; |
4227 | |
4228 | /* parse the num of tx and rx queues */ |
4229 | of_property_read_u32(np, propname: "fsl,num-tx-queues" , out_value: num_tx); |
4230 | |
4231 | of_property_read_u32(np, propname: "fsl,num-rx-queues" , out_value: num_rx); |
4232 | |
4233 | if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { |
4234 | dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n" , |
4235 | *num_tx); |
4236 | *num_tx = 1; |
4237 | return; |
4238 | } |
4239 | |
4240 | if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { |
4241 | dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n" , |
4242 | *num_rx); |
4243 | *num_rx = 1; |
4244 | return; |
4245 | } |
4246 | |
4247 | } |
4248 | |
4249 | static int fec_enet_get_irq_cnt(struct platform_device *pdev) |
4250 | { |
4251 | int irq_cnt = platform_irq_count(pdev); |
4252 | |
4253 | if (irq_cnt > FEC_IRQ_NUM) |
4254 | irq_cnt = FEC_IRQ_NUM; /* last for pps */ |
4255 | else if (irq_cnt == 2) |
4256 | irq_cnt = 1; /* last for pps */ |
4257 | else if (irq_cnt <= 0) |
4258 | irq_cnt = 1; /* At least 1 irq is needed */ |
4259 | return irq_cnt; |
4260 | } |
4261 | |
4262 | static void fec_enet_get_wakeup_irq(struct platform_device *pdev) |
4263 | { |
4264 | struct net_device *ndev = platform_get_drvdata(pdev); |
4265 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4266 | |
4267 | if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) |
4268 | fep->wake_irq = fep->irq[2]; |
4269 | else |
4270 | fep->wake_irq = fep->irq[0]; |
4271 | } |
4272 | |
4273 | static int fec_enet_init_stop_mode(struct fec_enet_private *fep, |
4274 | struct device_node *np) |
4275 | { |
4276 | struct device_node *gpr_np; |
4277 | u32 out_val[3]; |
4278 | int ret = 0; |
4279 | |
4280 | gpr_np = of_parse_phandle(np, phandle_name: "fsl,stop-mode" , index: 0); |
4281 | if (!gpr_np) |
4282 | return 0; |
4283 | |
4284 | ret = of_property_read_u32_array(np, propname: "fsl,stop-mode" , out_values: out_val, |
4285 | ARRAY_SIZE(out_val)); |
4286 | if (ret) { |
4287 | dev_dbg(&fep->pdev->dev, "no stop mode property\n" ); |
4288 | goto out; |
4289 | } |
4290 | |
4291 | fep->stop_gpr.gpr = syscon_node_to_regmap(np: gpr_np); |
4292 | if (IS_ERR(ptr: fep->stop_gpr.gpr)) { |
4293 | dev_err(&fep->pdev->dev, "could not find gpr regmap\n" ); |
4294 | ret = PTR_ERR(ptr: fep->stop_gpr.gpr); |
4295 | fep->stop_gpr.gpr = NULL; |
4296 | goto out; |
4297 | } |
4298 | |
4299 | fep->stop_gpr.reg = out_val[1]; |
4300 | fep->stop_gpr.bit = out_val[2]; |
4301 | |
4302 | out: |
4303 | of_node_put(node: gpr_np); |
4304 | |
4305 | return ret; |
4306 | } |
4307 | |
4308 | static int |
4309 | fec_probe(struct platform_device *pdev) |
4310 | { |
4311 | struct fec_enet_private *fep; |
4312 | struct fec_platform_data *pdata; |
4313 | phy_interface_t interface; |
4314 | struct net_device *ndev; |
4315 | int i, irq, ret = 0; |
4316 | static int dev_id; |
4317 | struct device_node *np = pdev->dev.of_node, *phy_node; |
4318 | int num_tx_qs; |
4319 | int num_rx_qs; |
4320 | char irq_name[8]; |
4321 | int irq_cnt; |
4322 | const struct fec_devinfo *dev_info; |
4323 | |
4324 | fec_enet_get_queue_num(pdev, num_tx: &num_tx_qs, num_rx: &num_rx_qs); |
4325 | |
4326 | /* Init network device */ |
4327 | ndev = alloc_etherdev_mqs(sizeof_priv: sizeof(struct fec_enet_private) + |
4328 | FEC_STATS_SIZE, txqs: num_tx_qs, rxqs: num_rx_qs); |
4329 | if (!ndev) |
4330 | return -ENOMEM; |
4331 | |
4332 | SET_NETDEV_DEV(ndev, &pdev->dev); |
4333 | |
4334 | /* setup board info structure */ |
4335 | fep = netdev_priv(dev: ndev); |
4336 | |
4337 | dev_info = device_get_match_data(dev: &pdev->dev); |
4338 | if (!dev_info) |
4339 | dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; |
4340 | if (dev_info) |
4341 | fep->quirks = dev_info->quirks; |
4342 | |
4343 | fep->netdev = ndev; |
4344 | fep->num_rx_queues = num_rx_qs; |
4345 | fep->num_tx_queues = num_tx_qs; |
4346 | |
4347 | #if !defined(CONFIG_M5272) |
4348 | /* default enable pause frame auto negotiation */ |
4349 | if (fep->quirks & FEC_QUIRK_HAS_GBIT) |
4350 | fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; |
4351 | #endif |
4352 | |
4353 | /* Select default pin state */ |
4354 | pinctrl_pm_select_default_state(dev: &pdev->dev); |
4355 | |
4356 | fep->hwp = devm_platform_ioremap_resource(pdev, index: 0); |
4357 | if (IS_ERR(ptr: fep->hwp)) { |
4358 | ret = PTR_ERR(ptr: fep->hwp); |
4359 | goto failed_ioremap; |
4360 | } |
4361 | |
4362 | fep->pdev = pdev; |
4363 | fep->dev_id = dev_id++; |
4364 | |
4365 | platform_set_drvdata(pdev, data: ndev); |
4366 | |
4367 | if ((of_machine_is_compatible(compat: "fsl,imx6q" ) || |
4368 | of_machine_is_compatible(compat: "fsl,imx6dl" )) && |
4369 | !of_property_read_bool(np, propname: "fsl,err006687-workaround-present" )) |
4370 | fep->quirks |= FEC_QUIRK_ERR006687; |
4371 | |
4372 | ret = fec_enet_ipc_handle_init(fep); |
4373 | if (ret) |
4374 | goto failed_ipc_init; |
4375 | |
4376 | if (of_property_read_bool(np, propname: "fsl,magic-packet" )) |
4377 | fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; |
4378 | |
4379 | ret = fec_enet_init_stop_mode(fep, np); |
4380 | if (ret) |
4381 | goto failed_stop_mode; |
4382 | |
4383 | phy_node = of_parse_phandle(np, phandle_name: "phy-handle" , index: 0); |
4384 | if (!phy_node && of_phy_is_fixed_link(np)) { |
4385 | ret = of_phy_register_fixed_link(np); |
4386 | if (ret < 0) { |
4387 | dev_err(&pdev->dev, |
4388 | "broken fixed-link specification\n" ); |
4389 | goto failed_phy; |
4390 | } |
4391 | phy_node = of_node_get(node: np); |
4392 | } |
4393 | fep->phy_node = phy_node; |
4394 | |
4395 | ret = of_get_phy_mode(np: pdev->dev.of_node, interface: &interface); |
4396 | if (ret) { |
4397 | pdata = dev_get_platdata(dev: &pdev->dev); |
4398 | if (pdata) |
4399 | fep->phy_interface = pdata->phy; |
4400 | else |
4401 | fep->phy_interface = PHY_INTERFACE_MODE_MII; |
4402 | } else { |
4403 | fep->phy_interface = interface; |
4404 | } |
4405 | |
4406 | ret = fec_enet_parse_rgmii_delay(fep, np); |
4407 | if (ret) |
4408 | goto failed_rgmii_delay; |
4409 | |
4410 | fep->clk_ipg = devm_clk_get(dev: &pdev->dev, id: "ipg" ); |
4411 | if (IS_ERR(ptr: fep->clk_ipg)) { |
4412 | ret = PTR_ERR(ptr: fep->clk_ipg); |
4413 | goto failed_clk; |
4414 | } |
4415 | |
4416 | fep->clk_ahb = devm_clk_get(dev: &pdev->dev, id: "ahb" ); |
4417 | if (IS_ERR(ptr: fep->clk_ahb)) { |
4418 | ret = PTR_ERR(ptr: fep->clk_ahb); |
4419 | goto failed_clk; |
4420 | } |
4421 | |
4422 | fep->itr_clk_rate = clk_get_rate(clk: fep->clk_ahb); |
4423 | |
4424 | /* enet_out is optional, depends on board */ |
4425 | fep->clk_enet_out = devm_clk_get_optional(dev: &pdev->dev, id: "enet_out" ); |
4426 | if (IS_ERR(ptr: fep->clk_enet_out)) { |
4427 | ret = PTR_ERR(ptr: fep->clk_enet_out); |
4428 | goto failed_clk; |
4429 | } |
4430 | |
4431 | fep->ptp_clk_on = false; |
4432 | mutex_init(&fep->ptp_clk_mutex); |
4433 | |
4434 | /* clk_ref is optional, depends on board */ |
4435 | fep->clk_ref = devm_clk_get_optional(dev: &pdev->dev, id: "enet_clk_ref" ); |
4436 | if (IS_ERR(ptr: fep->clk_ref)) { |
4437 | ret = PTR_ERR(ptr: fep->clk_ref); |
4438 | goto failed_clk; |
4439 | } |
4440 | fep->clk_ref_rate = clk_get_rate(clk: fep->clk_ref); |
4441 | |
4442 | /* clk_2x_txclk is optional, depends on board */ |
4443 | if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { |
4444 | fep->clk_2x_txclk = devm_clk_get(dev: &pdev->dev, id: "enet_2x_txclk" ); |
4445 | if (IS_ERR(ptr: fep->clk_2x_txclk)) |
4446 | fep->clk_2x_txclk = NULL; |
4447 | } |
4448 | |
4449 | fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; |
4450 | fep->clk_ptp = devm_clk_get(dev: &pdev->dev, id: "ptp" ); |
4451 | if (IS_ERR(ptr: fep->clk_ptp)) { |
4452 | fep->clk_ptp = NULL; |
4453 | fep->bufdesc_ex = false; |
4454 | } |
4455 | |
4456 | ret = fec_enet_clk_enable(ndev, enable: true); |
4457 | if (ret) |
4458 | goto failed_clk; |
4459 | |
4460 | ret = clk_prepare_enable(clk: fep->clk_ipg); |
4461 | if (ret) |
4462 | goto failed_clk_ipg; |
4463 | ret = clk_prepare_enable(clk: fep->clk_ahb); |
4464 | if (ret) |
4465 | goto failed_clk_ahb; |
4466 | |
4467 | fep->reg_phy = devm_regulator_get_optional(dev: &pdev->dev, id: "phy" ); |
4468 | if (!IS_ERR(ptr: fep->reg_phy)) { |
4469 | ret = regulator_enable(regulator: fep->reg_phy); |
4470 | if (ret) { |
4471 | dev_err(&pdev->dev, |
4472 | "Failed to enable phy regulator: %d\n" , ret); |
4473 | goto failed_regulator; |
4474 | } |
4475 | } else { |
4476 | if (PTR_ERR(ptr: fep->reg_phy) == -EPROBE_DEFER) { |
4477 | ret = -EPROBE_DEFER; |
4478 | goto failed_regulator; |
4479 | } |
4480 | fep->reg_phy = NULL; |
4481 | } |
4482 | |
4483 | pm_runtime_set_autosuspend_delay(dev: &pdev->dev, FEC_MDIO_PM_TIMEOUT); |
4484 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
4485 | pm_runtime_get_noresume(dev: &pdev->dev); |
4486 | pm_runtime_set_active(dev: &pdev->dev); |
4487 | pm_runtime_enable(dev: &pdev->dev); |
4488 | |
4489 | ret = fec_reset_phy(pdev); |
4490 | if (ret) |
4491 | goto failed_reset; |
4492 | |
4493 | irq_cnt = fec_enet_get_irq_cnt(pdev); |
4494 | if (fep->bufdesc_ex) |
4495 | fec_ptp_init(pdev, irq_idx: irq_cnt); |
4496 | |
4497 | ret = fec_enet_init(ndev); |
4498 | if (ret) |
4499 | goto failed_init; |
4500 | |
4501 | for (i = 0; i < irq_cnt; i++) { |
4502 | snprintf(buf: irq_name, size: sizeof(irq_name), fmt: "int%d" , i); |
4503 | irq = platform_get_irq_byname_optional(dev: pdev, name: irq_name); |
4504 | if (irq < 0) |
4505 | irq = platform_get_irq(pdev, i); |
4506 | if (irq < 0) { |
4507 | ret = irq; |
4508 | goto failed_irq; |
4509 | } |
4510 | ret = devm_request_irq(dev: &pdev->dev, irq, handler: fec_enet_interrupt, |
4511 | irqflags: 0, devname: pdev->name, dev_id: ndev); |
4512 | if (ret) |
4513 | goto failed_irq; |
4514 | |
4515 | fep->irq[i] = irq; |
4516 | } |
4517 | |
4518 | /* Decide which interrupt line is wakeup capable */ |
4519 | fec_enet_get_wakeup_irq(pdev); |
4520 | |
4521 | ret = fec_enet_mii_init(pdev); |
4522 | if (ret) |
4523 | goto failed_mii_init; |
4524 | |
4525 | /* Carrier starts down, phylib will bring it up */ |
4526 | netif_carrier_off(dev: ndev); |
4527 | fec_enet_clk_enable(ndev, enable: false); |
4528 | pinctrl_pm_select_sleep_state(dev: &pdev->dev); |
4529 | |
4530 | ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; |
4531 | |
4532 | ret = register_netdev(dev: ndev); |
4533 | if (ret) |
4534 | goto failed_register; |
4535 | |
4536 | device_init_wakeup(dev: &ndev->dev, enable: fep->wol_flag & |
4537 | FEC_WOL_HAS_MAGIC_PACKET); |
4538 | |
4539 | if (fep->bufdesc_ex && fep->ptp_clock) |
4540 | netdev_info(dev: ndev, format: "registered PHC device %d\n" , fep->dev_id); |
4541 | |
4542 | INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); |
4543 | |
4544 | pm_runtime_mark_last_busy(dev: &pdev->dev); |
4545 | pm_runtime_put_autosuspend(dev: &pdev->dev); |
4546 | |
4547 | return 0; |
4548 | |
4549 | failed_register: |
4550 | fec_enet_mii_remove(fep); |
4551 | failed_mii_init: |
4552 | failed_irq: |
4553 | failed_init: |
4554 | fec_ptp_stop(pdev); |
4555 | failed_reset: |
4556 | pm_runtime_put_noidle(dev: &pdev->dev); |
4557 | pm_runtime_disable(dev: &pdev->dev); |
4558 | if (fep->reg_phy) |
4559 | regulator_disable(regulator: fep->reg_phy); |
4560 | failed_regulator: |
4561 | clk_disable_unprepare(clk: fep->clk_ahb); |
4562 | failed_clk_ahb: |
4563 | clk_disable_unprepare(clk: fep->clk_ipg); |
4564 | failed_clk_ipg: |
4565 | fec_enet_clk_enable(ndev, enable: false); |
4566 | failed_clk: |
4567 | failed_rgmii_delay: |
4568 | if (of_phy_is_fixed_link(np)) |
4569 | of_phy_deregister_fixed_link(np); |
4570 | of_node_put(node: phy_node); |
4571 | failed_stop_mode: |
4572 | failed_ipc_init: |
4573 | failed_phy: |
4574 | dev_id--; |
4575 | failed_ioremap: |
4576 | free_netdev(dev: ndev); |
4577 | |
4578 | return ret; |
4579 | } |
4580 | |
4581 | static void |
4582 | fec_drv_remove(struct platform_device *pdev) |
4583 | { |
4584 | struct net_device *ndev = platform_get_drvdata(pdev); |
4585 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4586 | struct device_node *np = pdev->dev.of_node; |
4587 | int ret; |
4588 | |
4589 | ret = pm_runtime_get_sync(dev: &pdev->dev); |
4590 | if (ret < 0) |
4591 | dev_err(&pdev->dev, |
4592 | "Failed to resume device in remove callback (%pe)\n" , |
4593 | ERR_PTR(ret)); |
4594 | |
4595 | cancel_work_sync(work: &fep->tx_timeout_work); |
4596 | fec_ptp_stop(pdev); |
4597 | unregister_netdev(dev: ndev); |
4598 | fec_enet_mii_remove(fep); |
4599 | if (fep->reg_phy) |
4600 | regulator_disable(regulator: fep->reg_phy); |
4601 | |
4602 | if (of_phy_is_fixed_link(np)) |
4603 | of_phy_deregister_fixed_link(np); |
4604 | of_node_put(node: fep->phy_node); |
4605 | |
4606 | /* After pm_runtime_get_sync() failed, the clks are still off, so skip |
4607 | * disabling them again. |
4608 | */ |
4609 | if (ret >= 0) { |
4610 | clk_disable_unprepare(clk: fep->clk_ahb); |
4611 | clk_disable_unprepare(clk: fep->clk_ipg); |
4612 | } |
4613 | pm_runtime_put_noidle(dev: &pdev->dev); |
4614 | pm_runtime_disable(dev: &pdev->dev); |
4615 | |
4616 | free_netdev(dev: ndev); |
4617 | } |
4618 | |
4619 | static int __maybe_unused fec_suspend(struct device *dev) |
4620 | { |
4621 | struct net_device *ndev = dev_get_drvdata(dev); |
4622 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4623 | int ret; |
4624 | |
4625 | rtnl_lock(); |
4626 | if (netif_running(dev: ndev)) { |
4627 | if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) |
4628 | fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; |
4629 | phy_stop(phydev: ndev->phydev); |
4630 | napi_disable(n: &fep->napi); |
4631 | netif_tx_lock_bh(dev: ndev); |
4632 | netif_device_detach(dev: ndev); |
4633 | netif_tx_unlock_bh(dev: ndev); |
4634 | fec_stop(ndev); |
4635 | if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { |
4636 | fec_irqs_disable(ndev); |
4637 | pinctrl_pm_select_sleep_state(dev: &fep->pdev->dev); |
4638 | } else { |
4639 | fec_irqs_disable_except_wakeup(ndev); |
4640 | if (fep->wake_irq > 0) { |
4641 | disable_irq(irq: fep->wake_irq); |
4642 | enable_irq_wake(irq: fep->wake_irq); |
4643 | } |
4644 | fec_enet_stop_mode(fep, enabled: true); |
4645 | } |
4646 | /* It's safe to disable clocks since interrupts are masked */ |
4647 | fec_enet_clk_enable(ndev, enable: false); |
4648 | |
4649 | fep->rpm_active = !pm_runtime_status_suspended(dev); |
4650 | if (fep->rpm_active) { |
4651 | ret = pm_runtime_force_suspend(dev); |
4652 | if (ret < 0) { |
4653 | rtnl_unlock(); |
4654 | return ret; |
4655 | } |
4656 | } |
4657 | } |
4658 | rtnl_unlock(); |
4659 | |
4660 | if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) |
4661 | regulator_disable(regulator: fep->reg_phy); |
4662 | |
4663 | /* SOC supply clock to phy, when clock is disabled, phy link down |
4664 | * SOC control phy regulator, when regulator is disabled, phy link down |
4665 | */ |
4666 | if (fep->clk_enet_out || fep->reg_phy) |
4667 | fep->link = 0; |
4668 | |
4669 | return 0; |
4670 | } |
4671 | |
4672 | static int __maybe_unused fec_resume(struct device *dev) |
4673 | { |
4674 | struct net_device *ndev = dev_get_drvdata(dev); |
4675 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4676 | int ret; |
4677 | int val; |
4678 | |
4679 | if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { |
4680 | ret = regulator_enable(regulator: fep->reg_phy); |
4681 | if (ret) |
4682 | return ret; |
4683 | } |
4684 | |
4685 | rtnl_lock(); |
4686 | if (netif_running(dev: ndev)) { |
4687 | if (fep->rpm_active) |
4688 | pm_runtime_force_resume(dev); |
4689 | |
4690 | ret = fec_enet_clk_enable(ndev, enable: true); |
4691 | if (ret) { |
4692 | rtnl_unlock(); |
4693 | goto failed_clk; |
4694 | } |
4695 | if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { |
4696 | fec_enet_stop_mode(fep, enabled: false); |
4697 | if (fep->wake_irq) { |
4698 | disable_irq_wake(irq: fep->wake_irq); |
4699 | enable_irq(irq: fep->wake_irq); |
4700 | } |
4701 | |
4702 | val = readl(addr: fep->hwp + FEC_ECNTRL); |
4703 | val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); |
4704 | writel(val, addr: fep->hwp + FEC_ECNTRL); |
4705 | fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; |
4706 | } else { |
4707 | pinctrl_pm_select_default_state(dev: &fep->pdev->dev); |
4708 | } |
4709 | fec_restart(ndev); |
4710 | netif_tx_lock_bh(dev: ndev); |
4711 | netif_device_attach(dev: ndev); |
4712 | netif_tx_unlock_bh(dev: ndev); |
4713 | napi_enable(n: &fep->napi); |
4714 | phy_init_hw(phydev: ndev->phydev); |
4715 | phy_start(phydev: ndev->phydev); |
4716 | } |
4717 | rtnl_unlock(); |
4718 | |
4719 | return 0; |
4720 | |
4721 | failed_clk: |
4722 | if (fep->reg_phy) |
4723 | regulator_disable(regulator: fep->reg_phy); |
4724 | return ret; |
4725 | } |
4726 | |
4727 | static int __maybe_unused fec_runtime_suspend(struct device *dev) |
4728 | { |
4729 | struct net_device *ndev = dev_get_drvdata(dev); |
4730 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4731 | |
4732 | clk_disable_unprepare(clk: fep->clk_ahb); |
4733 | clk_disable_unprepare(clk: fep->clk_ipg); |
4734 | |
4735 | return 0; |
4736 | } |
4737 | |
4738 | static int __maybe_unused fec_runtime_resume(struct device *dev) |
4739 | { |
4740 | struct net_device *ndev = dev_get_drvdata(dev); |
4741 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4742 | int ret; |
4743 | |
4744 | ret = clk_prepare_enable(clk: fep->clk_ahb); |
4745 | if (ret) |
4746 | return ret; |
4747 | ret = clk_prepare_enable(clk: fep->clk_ipg); |
4748 | if (ret) |
4749 | goto failed_clk_ipg; |
4750 | |
4751 | return 0; |
4752 | |
4753 | failed_clk_ipg: |
4754 | clk_disable_unprepare(clk: fep->clk_ahb); |
4755 | return ret; |
4756 | } |
4757 | |
4758 | static const struct dev_pm_ops fec_pm_ops = { |
4759 | SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) |
4760 | SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) |
4761 | }; |
4762 | |
4763 | static struct platform_driver fec_driver = { |
4764 | .driver = { |
4765 | .name = DRIVER_NAME, |
4766 | .pm = &fec_pm_ops, |
4767 | .of_match_table = fec_dt_ids, |
4768 | .suppress_bind_attrs = true, |
4769 | }, |
4770 | .id_table = fec_devtype, |
4771 | .probe = fec_probe, |
4772 | .remove_new = fec_drv_remove, |
4773 | }; |
4774 | |
4775 | module_platform_driver(fec_driver); |
4776 | |
4777 | MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver" ); |
4778 | MODULE_LICENSE("GPL" ); |
4779 | |