1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* Copyright 2014-2016 Freescale Semiconductor Inc. |
3 | * Copyright 2016-2022 NXP |
4 | */ |
5 | #include <linux/init.h> |
6 | #include <linux/module.h> |
7 | #include <linux/platform_device.h> |
8 | #include <linux/etherdevice.h> |
9 | #include <linux/of_net.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/kthread.h> |
12 | #include <linux/iommu.h> |
13 | #include <linux/fsl/mc.h> |
14 | #include <linux/bpf.h> |
15 | #include <linux/bpf_trace.h> |
16 | #include <linux/fsl/ptp_qoriq.h> |
17 | #include <linux/ptp_classify.h> |
18 | #include <net/pkt_cls.h> |
19 | #include <net/sock.h> |
20 | #include <net/tso.h> |
21 | #include <net/xdp_sock_drv.h> |
22 | |
23 | #include "dpaa2-eth.h" |
24 | |
25 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files |
26 | * using trace events only need to #include <trace/events/sched.h> |
27 | */ |
28 | #define CREATE_TRACE_POINTS |
29 | #include "dpaa2-eth-trace.h" |
30 | |
31 | MODULE_LICENSE("Dual BSD/GPL" ); |
32 | MODULE_AUTHOR("Freescale Semiconductor, Inc" ); |
33 | MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver" ); |
34 | |
35 | struct ptp_qoriq *dpaa2_ptp; |
36 | EXPORT_SYMBOL(dpaa2_ptp); |
37 | |
38 | static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv) |
39 | { |
40 | priv->features = 0; |
41 | |
42 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR, |
43 | DPNI_PTP_ONESTEP_VER_MINOR) >= 0) |
44 | priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT; |
45 | } |
46 | |
47 | static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv, |
48 | u32 offset, u8 udp) |
49 | { |
50 | struct dpni_single_step_cfg cfg; |
51 | |
52 | cfg.en = 1; |
53 | cfg.ch_update = udp; |
54 | cfg.offset = offset; |
55 | cfg.peer_delay = 0; |
56 | |
57 | if (dpni_set_single_step_cfg(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, ptp_cfg: &cfg)) |
58 | WARN_ONCE(1, "Failed to set single step register" ); |
59 | } |
60 | |
61 | static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv, |
62 | u32 offset, u8 udp) |
63 | { |
64 | u32 val = 0; |
65 | |
66 | val = DPAA2_PTP_SINGLE_STEP_ENABLE | |
67 | DPAA2_PTP_SINGLE_CORRECTION_OFF(offset); |
68 | |
69 | if (udp) |
70 | val |= DPAA2_PTP_SINGLE_STEP_CH; |
71 | |
72 | if (priv->onestep_reg_base) |
73 | writel(val, addr: priv->onestep_reg_base); |
74 | } |
75 | |
76 | static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv) |
77 | { |
78 | struct device *dev = priv->net_dev->dev.parent; |
79 | struct dpni_single_step_cfg ptp_cfg; |
80 | |
81 | priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect; |
82 | |
83 | if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT)) |
84 | return; |
85 | |
86 | if (dpni_get_single_step_cfg(mc_io: priv->mc_io, cmd_flags: 0, |
87 | token: priv->mc_token, ptp_cfg: &ptp_cfg)) { |
88 | dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n" ); |
89 | return; |
90 | } |
91 | |
92 | if (!ptp_cfg.ptp_onestep_reg_base) { |
93 | dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n" ); |
94 | return; |
95 | } |
96 | |
97 | priv->onestep_reg_base = ioremap(offset: ptp_cfg.ptp_onestep_reg_base, |
98 | size: sizeof(u32)); |
99 | if (!priv->onestep_reg_base) { |
100 | dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n" ); |
101 | return; |
102 | } |
103 | |
104 | priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; |
105 | } |
106 | |
107 | void *dpaa2_iova_to_virt(struct iommu_domain *domain, |
108 | dma_addr_t iova_addr) |
109 | { |
110 | phys_addr_t phys_addr; |
111 | |
112 | phys_addr = domain ? iommu_iova_to_phys(domain, iova: iova_addr) : iova_addr; |
113 | |
114 | return phys_to_virt(address: phys_addr); |
115 | } |
116 | |
117 | static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv, |
118 | u32 fd_status, |
119 | struct sk_buff *skb) |
120 | { |
121 | skb_checksum_none_assert(skb); |
122 | |
123 | /* HW checksum validation is disabled, nothing to do here */ |
124 | if (!(priv->net_dev->features & NETIF_F_RXCSUM)) |
125 | return; |
126 | |
127 | /* Read checksum validation bits */ |
128 | if (!((fd_status & DPAA2_FAS_L3CV) && |
129 | (fd_status & DPAA2_FAS_L4CV))) |
130 | return; |
131 | |
132 | /* Inform the stack there's no need to compute L3/L4 csum anymore */ |
133 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
134 | } |
135 | |
136 | /* Free a received FD. |
137 | * Not to be used for Tx conf FDs or on any other paths. |
138 | */ |
139 | static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, |
140 | const struct dpaa2_fd *fd, |
141 | void *vaddr) |
142 | { |
143 | struct device *dev = priv->net_dev->dev.parent; |
144 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
145 | u8 fd_format = dpaa2_fd_get_format(fd); |
146 | struct dpaa2_sg_entry *sgt; |
147 | void *sg_vaddr; |
148 | int i; |
149 | |
150 | /* If single buffer frame, just free the data buffer */ |
151 | if (fd_format == dpaa2_fd_single) |
152 | goto free_buf; |
153 | else if (fd_format != dpaa2_fd_sg) |
154 | /* We don't support any other format */ |
155 | return; |
156 | |
157 | /* For S/G frames, we first need to free all SG entries |
158 | * except the first one, which was taken care of already |
159 | */ |
160 | sgt = vaddr + dpaa2_fd_get_offset(fd); |
161 | for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
162 | addr = dpaa2_sg_get_addr(sg: &sgt[i]); |
163 | sg_vaddr = dpaa2_iova_to_virt(domain: priv->iommu_domain, iova_addr: addr); |
164 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
165 | DMA_BIDIRECTIONAL); |
166 | |
167 | free_pages(addr: (unsigned long)sg_vaddr, order: 0); |
168 | if (dpaa2_sg_is_final(sg: &sgt[i])) |
169 | break; |
170 | } |
171 | |
172 | free_buf: |
173 | free_pages(addr: (unsigned long)vaddr, order: 0); |
174 | } |
175 | |
176 | /* Build a linear skb based on a single-buffer frame descriptor */ |
177 | static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch, |
178 | const struct dpaa2_fd *fd, |
179 | void *fd_vaddr) |
180 | { |
181 | struct sk_buff *skb = NULL; |
182 | u16 fd_offset = dpaa2_fd_get_offset(fd); |
183 | u32 fd_length = dpaa2_fd_get_len(fd); |
184 | |
185 | ch->buf_count--; |
186 | |
187 | skb = build_skb(data: fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
188 | if (unlikely(!skb)) |
189 | return NULL; |
190 | |
191 | skb_reserve(skb, len: fd_offset); |
192 | skb_put(skb, len: fd_length); |
193 | |
194 | return skb; |
195 | } |
196 | |
197 | /* Build a non linear (fragmented) skb based on a S/G table */ |
198 | static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, |
199 | struct dpaa2_eth_channel *ch, |
200 | struct dpaa2_sg_entry *sgt) |
201 | { |
202 | struct sk_buff *skb = NULL; |
203 | struct device *dev = priv->net_dev->dev.parent; |
204 | void *sg_vaddr; |
205 | dma_addr_t sg_addr; |
206 | u16 sg_offset; |
207 | u32 sg_length; |
208 | struct page *page, *head_page; |
209 | int page_offset; |
210 | int i; |
211 | |
212 | for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
213 | struct dpaa2_sg_entry *sge = &sgt[i]; |
214 | |
215 | /* NOTE: We only support SG entries in dpaa2_sg_single format, |
216 | * but this is the only format we may receive from HW anyway |
217 | */ |
218 | |
219 | /* Get the address and length from the S/G entry */ |
220 | sg_addr = dpaa2_sg_get_addr(sg: sge); |
221 | sg_vaddr = dpaa2_iova_to_virt(domain: priv->iommu_domain, iova_addr: sg_addr); |
222 | dma_unmap_page(dev, sg_addr, priv->rx_buf_size, |
223 | DMA_BIDIRECTIONAL); |
224 | |
225 | sg_length = dpaa2_sg_get_len(sg: sge); |
226 | |
227 | if (i == 0) { |
228 | /* We build the skb around the first data buffer */ |
229 | skb = build_skb(data: sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
230 | if (unlikely(!skb)) { |
231 | /* Free the first SG entry now, since we already |
232 | * unmapped it and obtained the virtual address |
233 | */ |
234 | free_pages(addr: (unsigned long)sg_vaddr, order: 0); |
235 | |
236 | /* We still need to subtract the buffers used |
237 | * by this FD from our software counter |
238 | */ |
239 | while (!dpaa2_sg_is_final(sg: &sgt[i]) && |
240 | i < DPAA2_ETH_MAX_SG_ENTRIES) |
241 | i++; |
242 | break; |
243 | } |
244 | |
245 | sg_offset = dpaa2_sg_get_offset(sg: sge); |
246 | skb_reserve(skb, len: sg_offset); |
247 | skb_put(skb, len: sg_length); |
248 | } else { |
249 | /* Rest of the data buffers are stored as skb frags */ |
250 | page = virt_to_page(sg_vaddr); |
251 | head_page = virt_to_head_page(x: sg_vaddr); |
252 | |
253 | /* Offset in page (which may be compound). |
254 | * Data in subsequent SG entries is stored from the |
255 | * beginning of the buffer, so we don't need to add the |
256 | * sg_offset. |
257 | */ |
258 | page_offset = ((unsigned long)sg_vaddr & |
259 | (PAGE_SIZE - 1)) + |
260 | (page_address(page) - page_address(head_page)); |
261 | |
262 | skb_add_rx_frag(skb, i: i - 1, page: head_page, off: page_offset, |
263 | size: sg_length, truesize: priv->rx_buf_size); |
264 | } |
265 | |
266 | if (dpaa2_sg_is_final(sg: sge)) |
267 | break; |
268 | } |
269 | |
270 | WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT" ); |
271 | |
272 | /* Count all data buffers + SG table buffer */ |
273 | ch->buf_count -= i + 2; |
274 | |
275 | return skb; |
276 | } |
277 | |
278 | /* Free buffers acquired from the buffer pool or which were meant to |
279 | * be released in the pool |
280 | */ |
281 | static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, |
282 | int count, bool xsk_zc) |
283 | { |
284 | struct device *dev = priv->net_dev->dev.parent; |
285 | struct dpaa2_eth_swa *swa; |
286 | struct xdp_buff *xdp_buff; |
287 | void *vaddr; |
288 | int i; |
289 | |
290 | for (i = 0; i < count; i++) { |
291 | vaddr = dpaa2_iova_to_virt(domain: priv->iommu_domain, iova_addr: buf_array[i]); |
292 | |
293 | if (!xsk_zc) { |
294 | dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, |
295 | DMA_BIDIRECTIONAL); |
296 | free_pages(addr: (unsigned long)vaddr, order: 0); |
297 | } else { |
298 | swa = (struct dpaa2_eth_swa *) |
299 | (vaddr + DPAA2_ETH_RX_HWA_SIZE); |
300 | xdp_buff = swa->xsk.xdp_buff; |
301 | xsk_buff_free(xdp: xdp_buff); |
302 | } |
303 | } |
304 | } |
305 | |
306 | void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, |
307 | struct dpaa2_eth_channel *ch, |
308 | dma_addr_t addr) |
309 | { |
310 | int retries = 0; |
311 | int err; |
312 | |
313 | ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr; |
314 | if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) |
315 | return; |
316 | |
317 | while ((err = dpaa2_io_service_release(d: ch->dpio, bpid: ch->bp->bpid, |
318 | buffers: ch->recycled_bufs, |
319 | num_buffers: ch->recycled_bufs_cnt)) == -EBUSY) { |
320 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) |
321 | break; |
322 | cpu_relax(); |
323 | } |
324 | |
325 | if (err) { |
326 | dpaa2_eth_free_bufs(priv, buf_array: ch->recycled_bufs, |
327 | count: ch->recycled_bufs_cnt, xsk_zc: ch->xsk_zc); |
328 | ch->buf_count -= ch->recycled_bufs_cnt; |
329 | } |
330 | |
331 | ch->recycled_bufs_cnt = 0; |
332 | } |
333 | |
334 | static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, |
335 | struct dpaa2_eth_fq *fq, |
336 | struct dpaa2_eth_xdp_fds *xdp_fds) |
337 | { |
338 | int total_enqueued = 0, retries = 0, enqueued; |
339 | struct dpaa2_eth_drv_stats *; |
340 | int num_fds, err, max_retries; |
341 | struct dpaa2_fd *fds; |
342 | |
343 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
344 | |
345 | /* try to enqueue all the FDs until the max number of retries is hit */ |
346 | fds = xdp_fds->fds; |
347 | num_fds = xdp_fds->num; |
348 | max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; |
349 | while (total_enqueued < num_fds && retries < max_retries) { |
350 | err = priv->enqueue(priv, fq, &fds[total_enqueued], |
351 | 0, num_fds - total_enqueued, &enqueued); |
352 | if (err == -EBUSY) { |
353 | percpu_extras->tx_portal_busy += ++retries; |
354 | continue; |
355 | } |
356 | total_enqueued += enqueued; |
357 | } |
358 | xdp_fds->num = 0; |
359 | |
360 | return total_enqueued; |
361 | } |
362 | |
363 | static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, |
364 | struct dpaa2_eth_channel *ch, |
365 | struct dpaa2_eth_fq *fq) |
366 | { |
367 | struct rtnl_link_stats64 *percpu_stats; |
368 | struct dpaa2_fd *fds; |
369 | int enqueued, i; |
370 | |
371 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
372 | |
373 | // enqueue the array of XDP_TX frames |
374 | enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_fds: &fq->xdp_tx_fds); |
375 | |
376 | /* update statistics */ |
377 | percpu_stats->tx_packets += enqueued; |
378 | fds = fq->xdp_tx_fds.fds; |
379 | for (i = 0; i < enqueued; i++) { |
380 | percpu_stats->tx_bytes += dpaa2_fd_get_len(fd: &fds[i]); |
381 | ch->stats.xdp_tx++; |
382 | } |
383 | for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { |
384 | dpaa2_eth_recycle_buf(priv, ch, addr: dpaa2_fd_get_addr(fd: &fds[i])); |
385 | percpu_stats->tx_errors++; |
386 | ch->stats.xdp_tx_err++; |
387 | } |
388 | fq->xdp_tx_fds.num = 0; |
389 | } |
390 | |
391 | void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, |
392 | struct dpaa2_eth_channel *ch, |
393 | struct dpaa2_fd *fd, |
394 | void *buf_start, u16 queue_id) |
395 | { |
396 | struct dpaa2_faead *faead; |
397 | struct dpaa2_fd *dest_fd; |
398 | struct dpaa2_eth_fq *fq; |
399 | u32 ctrl, frc; |
400 | |
401 | /* Mark the egress frame hardware annotation area as valid */ |
402 | frc = dpaa2_fd_get_frc(fd); |
403 | dpaa2_fd_set_frc(fd, frc: frc | DPAA2_FD_FRC_FAEADV); |
404 | dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); |
405 | |
406 | /* Instruct hardware to release the FD buffer directly into |
407 | * the buffer pool once transmission is completed, instead of |
408 | * sending a Tx confirmation frame to us |
409 | */ |
410 | ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; |
411 | faead = dpaa2_get_faead(buf_addr: buf_start, swa: false); |
412 | faead->ctrl = cpu_to_le32(ctrl); |
413 | faead->conf_fqid = 0; |
414 | |
415 | fq = &priv->fq[queue_id]; |
416 | dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; |
417 | memcpy(dest_fd, fd, sizeof(*dest_fd)); |
418 | |
419 | if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) |
420 | return; |
421 | |
422 | dpaa2_eth_xdp_tx_flush(priv, ch, fq); |
423 | } |
424 | |
425 | static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, |
426 | struct dpaa2_eth_channel *ch, |
427 | struct dpaa2_eth_fq *rx_fq, |
428 | struct dpaa2_fd *fd, void *vaddr) |
429 | { |
430 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
431 | struct bpf_prog *xdp_prog; |
432 | struct xdp_buff xdp; |
433 | u32 xdp_act = XDP_PASS; |
434 | int err, offset; |
435 | |
436 | xdp_prog = READ_ONCE(ch->xdp.prog); |
437 | if (!xdp_prog) |
438 | goto out; |
439 | |
440 | offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM; |
441 | xdp_init_buff(xdp: &xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, rxq: &ch->xdp_rxq); |
442 | xdp_prepare_buff(xdp: &xdp, hard_start: vaddr + offset, XDP_PACKET_HEADROOM, |
443 | data_len: dpaa2_fd_get_len(fd), meta_valid: false); |
444 | |
445 | xdp_act = bpf_prog_run_xdp(prog: xdp_prog, xdp: &xdp); |
446 | |
447 | /* xdp.data pointer may have changed */ |
448 | dpaa2_fd_set_offset(fd, offset: xdp.data - vaddr); |
449 | dpaa2_fd_set_len(fd, len: xdp.data_end - xdp.data); |
450 | |
451 | switch (xdp_act) { |
452 | case XDP_PASS: |
453 | break; |
454 | case XDP_TX: |
455 | dpaa2_eth_xdp_enqueue(priv, ch, fd, buf_start: vaddr, queue_id: rx_fq->flowid); |
456 | break; |
457 | default: |
458 | bpf_warn_invalid_xdp_action(dev: priv->net_dev, prog: xdp_prog, act: xdp_act); |
459 | fallthrough; |
460 | case XDP_ABORTED: |
461 | trace_xdp_exception(dev: priv->net_dev, xdp: xdp_prog, act: xdp_act); |
462 | fallthrough; |
463 | case XDP_DROP: |
464 | dpaa2_eth_recycle_buf(priv, ch, addr); |
465 | ch->stats.xdp_drop++; |
466 | break; |
467 | case XDP_REDIRECT: |
468 | dma_unmap_page(priv->net_dev->dev.parent, addr, |
469 | priv->rx_buf_size, DMA_BIDIRECTIONAL); |
470 | ch->buf_count--; |
471 | |
472 | /* Allow redirect use of full headroom */ |
473 | xdp.data_hard_start = vaddr; |
474 | xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE; |
475 | |
476 | err = xdp_do_redirect(dev: priv->net_dev, xdp: &xdp, prog: xdp_prog); |
477 | if (unlikely(err)) { |
478 | addr = dma_map_page(priv->net_dev->dev.parent, |
479 | virt_to_page(vaddr), 0, |
480 | priv->rx_buf_size, DMA_BIDIRECTIONAL); |
481 | if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) { |
482 | free_pages(addr: (unsigned long)vaddr, order: 0); |
483 | } else { |
484 | ch->buf_count++; |
485 | dpaa2_eth_recycle_buf(priv, ch, addr); |
486 | } |
487 | ch->stats.xdp_drop++; |
488 | } else { |
489 | ch->stats.xdp_redirect++; |
490 | } |
491 | break; |
492 | } |
493 | |
494 | ch->xdp.res |= xdp_act; |
495 | out: |
496 | return xdp_act; |
497 | } |
498 | |
499 | struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, |
500 | struct dpaa2_eth_channel *ch, |
501 | const struct dpaa2_fd *fd, u32 fd_length, |
502 | void *fd_vaddr) |
503 | { |
504 | u16 fd_offset = dpaa2_fd_get_offset(fd); |
505 | struct sk_buff *skb = NULL; |
506 | unsigned int skb_len; |
507 | |
508 | skb_len = fd_length + dpaa2_eth_needed_headroom(NULL); |
509 | |
510 | skb = napi_alloc_skb(napi: &ch->napi, length: skb_len); |
511 | if (!skb) |
512 | return NULL; |
513 | |
514 | skb_reserve(skb, len: dpaa2_eth_needed_headroom(NULL)); |
515 | skb_put(skb, len: fd_length); |
516 | |
517 | memcpy(skb->data, fd_vaddr + fd_offset, fd_length); |
518 | |
519 | return skb; |
520 | } |
521 | |
522 | static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, |
523 | const struct dpaa2_fd *fd, |
524 | void *fd_vaddr) |
525 | { |
526 | struct dpaa2_eth_priv *priv = ch->priv; |
527 | u32 fd_length = dpaa2_fd_get_len(fd); |
528 | |
529 | if (fd_length > priv->rx_copybreak) |
530 | return NULL; |
531 | |
532 | return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr); |
533 | } |
534 | |
535 | void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, |
536 | struct dpaa2_eth_channel *ch, |
537 | const struct dpaa2_fd *fd, void *vaddr, |
538 | struct dpaa2_eth_fq *fq, |
539 | struct rtnl_link_stats64 *percpu_stats, |
540 | struct sk_buff *skb) |
541 | { |
542 | struct dpaa2_fas *fas; |
543 | u32 status = 0; |
544 | |
545 | fas = dpaa2_get_fas(buf_addr: vaddr, swa: false); |
546 | prefetch(fas); |
547 | prefetch(skb->data); |
548 | |
549 | /* Get the timestamp value */ |
550 | if (priv->rx_tstamp) { |
551 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
552 | __le64 *ts = dpaa2_get_ts(buf_addr: vaddr, swa: false); |
553 | u64 ns; |
554 | |
555 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
556 | |
557 | ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(p: ts); |
558 | shhwtstamps->hwtstamp = ns_to_ktime(ns); |
559 | } |
560 | |
561 | /* Check if we need to validate the L4 csum */ |
562 | if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { |
563 | status = le32_to_cpu(fas->status); |
564 | dpaa2_eth_validate_rx_csum(priv, fd_status: status, skb); |
565 | } |
566 | |
567 | skb->protocol = eth_type_trans(skb, dev: priv->net_dev); |
568 | skb_record_rx_queue(skb, rx_queue: fq->flowid); |
569 | |
570 | percpu_stats->rx_packets++; |
571 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); |
572 | ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); |
573 | |
574 | list_add_tail(new: &skb->list, head: ch->rx_list); |
575 | } |
576 | |
577 | /* Main Rx frame processing routine */ |
578 | void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, |
579 | struct dpaa2_eth_channel *ch, |
580 | const struct dpaa2_fd *fd, |
581 | struct dpaa2_eth_fq *fq) |
582 | { |
583 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
584 | u8 fd_format = dpaa2_fd_get_format(fd); |
585 | void *vaddr; |
586 | struct sk_buff *skb; |
587 | struct rtnl_link_stats64 *percpu_stats; |
588 | struct dpaa2_eth_drv_stats *; |
589 | struct device *dev = priv->net_dev->dev.parent; |
590 | bool recycle_rx_buf = false; |
591 | void *buf_data; |
592 | u32 xdp_act; |
593 | |
594 | /* Tracing point */ |
595 | trace_dpaa2_rx_fd(netdev: priv->net_dev, fd); |
596 | |
597 | vaddr = dpaa2_iova_to_virt(domain: priv->iommu_domain, iova_addr: addr); |
598 | dma_sync_single_for_cpu(dev, addr, size: priv->rx_buf_size, |
599 | dir: DMA_BIDIRECTIONAL); |
600 | |
601 | buf_data = vaddr + dpaa2_fd_get_offset(fd); |
602 | prefetch(buf_data); |
603 | |
604 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
605 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
606 | |
607 | if (fd_format == dpaa2_fd_single) { |
608 | xdp_act = dpaa2_eth_run_xdp(priv, ch, rx_fq: fq, fd: (struct dpaa2_fd *)fd, vaddr); |
609 | if (xdp_act != XDP_PASS) { |
610 | percpu_stats->rx_packets++; |
611 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); |
612 | return; |
613 | } |
614 | |
615 | skb = dpaa2_eth_copybreak(ch, fd, fd_vaddr: vaddr); |
616 | if (!skb) { |
617 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
618 | DMA_BIDIRECTIONAL); |
619 | skb = dpaa2_eth_build_linear_skb(ch, fd, fd_vaddr: vaddr); |
620 | } else { |
621 | recycle_rx_buf = true; |
622 | } |
623 | } else if (fd_format == dpaa2_fd_sg) { |
624 | WARN_ON(priv->xdp_prog); |
625 | |
626 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
627 | DMA_BIDIRECTIONAL); |
628 | skb = dpaa2_eth_build_frag_skb(priv, ch, sgt: buf_data); |
629 | free_pages(addr: (unsigned long)vaddr, order: 0); |
630 | percpu_extras->rx_sg_frames++; |
631 | percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); |
632 | } else { |
633 | /* We don't support any other format */ |
634 | goto err_frame_format; |
635 | } |
636 | |
637 | if (unlikely(!skb)) |
638 | goto err_build_skb; |
639 | |
640 | dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); |
641 | |
642 | if (recycle_rx_buf) |
643 | dpaa2_eth_recycle_buf(priv, ch, addr: dpaa2_fd_get_addr(fd)); |
644 | return; |
645 | |
646 | err_build_skb: |
647 | dpaa2_eth_free_rx_fd(priv, fd, vaddr); |
648 | err_frame_format: |
649 | percpu_stats->rx_dropped++; |
650 | } |
651 | |
652 | /* Processing of Rx frames received on the error FQ |
653 | * We check and print the error bits and then free the frame |
654 | */ |
655 | static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, |
656 | struct dpaa2_eth_channel *ch, |
657 | const struct dpaa2_fd *fd, |
658 | struct dpaa2_eth_fq *fq __always_unused) |
659 | { |
660 | struct device *dev = priv->net_dev->dev.parent; |
661 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
662 | u8 fd_format = dpaa2_fd_get_format(fd); |
663 | struct rtnl_link_stats64 *percpu_stats; |
664 | struct dpaa2_eth_trap_item *trap_item; |
665 | struct dpaa2_fapr *fapr; |
666 | struct sk_buff *skb; |
667 | void *buf_data; |
668 | void *vaddr; |
669 | |
670 | vaddr = dpaa2_iova_to_virt(domain: priv->iommu_domain, iova_addr: addr); |
671 | dma_sync_single_for_cpu(dev, addr, size: priv->rx_buf_size, |
672 | dir: DMA_BIDIRECTIONAL); |
673 | |
674 | buf_data = vaddr + dpaa2_fd_get_offset(fd); |
675 | |
676 | if (fd_format == dpaa2_fd_single) { |
677 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
678 | DMA_BIDIRECTIONAL); |
679 | skb = dpaa2_eth_build_linear_skb(ch, fd, fd_vaddr: vaddr); |
680 | } else if (fd_format == dpaa2_fd_sg) { |
681 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
682 | DMA_BIDIRECTIONAL); |
683 | skb = dpaa2_eth_build_frag_skb(priv, ch, sgt: buf_data); |
684 | free_pages(addr: (unsigned long)vaddr, order: 0); |
685 | } else { |
686 | /* We don't support any other format */ |
687 | dpaa2_eth_free_rx_fd(priv, fd, vaddr); |
688 | goto err_frame_format; |
689 | } |
690 | |
691 | fapr = dpaa2_get_fapr(buf_addr: vaddr, swa: false); |
692 | trap_item = dpaa2_eth_dl_get_trap(priv, fapr); |
693 | if (trap_item) |
694 | devlink_trap_report(devlink: priv->devlink, skb, trap_ctx: trap_item->trap_ctx, |
695 | in_devlink_port: &priv->devlink_port, NULL); |
696 | consume_skb(skb); |
697 | |
698 | err_frame_format: |
699 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
700 | percpu_stats->rx_errors++; |
701 | ch->buf_count--; |
702 | } |
703 | |
704 | /* Consume all frames pull-dequeued into the store. This is the simplest way to |
705 | * make sure we don't accidentally issue another volatile dequeue which would |
706 | * overwrite (leak) frames already in the store. |
707 | * |
708 | * Observance of NAPI budget is not our concern, leaving that to the caller. |
709 | */ |
710 | static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, |
711 | struct dpaa2_eth_fq **src) |
712 | { |
713 | struct dpaa2_eth_priv *priv = ch->priv; |
714 | struct dpaa2_eth_fq *fq = NULL; |
715 | struct dpaa2_dq *dq; |
716 | const struct dpaa2_fd *fd; |
717 | int cleaned = 0, retries = 0; |
718 | int is_last; |
719 | |
720 | do { |
721 | dq = dpaa2_io_store_next(s: ch->store, is_last: &is_last); |
722 | if (unlikely(!dq)) { |
723 | /* If we're here, we *must* have placed a |
724 | * volatile dequeue comnmand, so keep reading through |
725 | * the store until we get some sort of valid response |
726 | * token (either a valid frame or an "empty dequeue") |
727 | */ |
728 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) { |
729 | netdev_err_once(priv->net_dev, |
730 | "Unable to read a valid dequeue response\n" ); |
731 | return -ETIMEDOUT; |
732 | } |
733 | continue; |
734 | } |
735 | |
736 | fd = dpaa2_dq_fd(dq); |
737 | fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); |
738 | |
739 | fq->consume(priv, ch, fd, fq); |
740 | cleaned++; |
741 | retries = 0; |
742 | } while (!is_last); |
743 | |
744 | if (!cleaned) |
745 | return 0; |
746 | |
747 | fq->stats.frames += cleaned; |
748 | ch->stats.frames += cleaned; |
749 | ch->stats.frames_per_cdan += cleaned; |
750 | |
751 | /* A dequeue operation only pulls frames from a single queue |
752 | * into the store. Return the frame queue as an out param. |
753 | */ |
754 | if (src) |
755 | *src = fq; |
756 | |
757 | return cleaned; |
758 | } |
759 | |
760 | static int dpaa2_eth_ptp_parse(struct sk_buff *skb, |
761 | u8 *msgtype, u8 *twostep, u8 *udp, |
762 | u16 *correction_offset, |
763 | u16 *origintimestamp_offset) |
764 | { |
765 | unsigned int ptp_class; |
766 | struct ptp_header *hdr; |
767 | unsigned int type; |
768 | u8 *base; |
769 | |
770 | ptp_class = ptp_classify_raw(skb); |
771 | if (ptp_class == PTP_CLASS_NONE) |
772 | return -EINVAL; |
773 | |
774 | hdr = ptp_parse_header(skb, type: ptp_class); |
775 | if (!hdr) |
776 | return -EINVAL; |
777 | |
778 | *msgtype = ptp_get_msgtype(hdr, type: ptp_class); |
779 | *twostep = hdr->flag_field[0] & 0x2; |
780 | |
781 | type = ptp_class & PTP_CLASS_PMASK; |
782 | if (type == PTP_CLASS_IPV4 || |
783 | type == PTP_CLASS_IPV6) |
784 | *udp = 1; |
785 | else |
786 | *udp = 0; |
787 | |
788 | base = skb_mac_header(skb); |
789 | *correction_offset = (u8 *)&hdr->correction - base; |
790 | *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; |
791 | |
792 | return 0; |
793 | } |
794 | |
795 | /* Configure the egress frame annotation for timestamp update */ |
796 | static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, |
797 | struct dpaa2_fd *fd, |
798 | void *buf_start, |
799 | struct sk_buff *skb) |
800 | { |
801 | struct ptp_tstamp origin_timestamp; |
802 | u8 msgtype, twostep, udp; |
803 | struct dpaa2_faead *faead; |
804 | struct dpaa2_fas *fas; |
805 | struct timespec64 ts; |
806 | u16 offset1, offset2; |
807 | u32 ctrl, frc; |
808 | __le64 *ns; |
809 | u8 *data; |
810 | |
811 | /* Mark the egress frame annotation area as valid */ |
812 | frc = dpaa2_fd_get_frc(fd); |
813 | dpaa2_fd_set_frc(fd, frc: frc | DPAA2_FD_FRC_FAEADV); |
814 | |
815 | /* Set hardware annotation size */ |
816 | ctrl = dpaa2_fd_get_ctrl(fd); |
817 | dpaa2_fd_set_ctrl(fd, ctrl: ctrl | DPAA2_FD_CTRL_ASAL); |
818 | |
819 | /* enable UPD (update prepanded data) bit in FAEAD field of |
820 | * hardware frame annotation area |
821 | */ |
822 | ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; |
823 | faead = dpaa2_get_faead(buf_addr: buf_start, swa: true); |
824 | faead->ctrl = cpu_to_le32(ctrl); |
825 | |
826 | if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { |
827 | if (dpaa2_eth_ptp_parse(skb, msgtype: &msgtype, twostep: &twostep, udp: &udp, |
828 | correction_offset: &offset1, origintimestamp_offset: &offset2) || |
829 | msgtype != PTP_MSGTYPE_SYNC || twostep) { |
830 | WARN_ONCE(1, "Bad packet for one-step timestamping\n" ); |
831 | return; |
832 | } |
833 | |
834 | /* Mark the frame annotation status as valid */ |
835 | frc = dpaa2_fd_get_frc(fd); |
836 | dpaa2_fd_set_frc(fd, frc: frc | DPAA2_FD_FRC_FASV); |
837 | |
838 | /* Mark the PTP flag for one step timestamping */ |
839 | fas = dpaa2_get_fas(buf_addr: buf_start, swa: true); |
840 | fas->status = cpu_to_le32(DPAA2_FAS_PTP); |
841 | |
842 | dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts); |
843 | ns = dpaa2_get_ts(buf_addr: buf_start, swa: true); |
844 | *ns = cpu_to_le64(timespec64_to_ns(&ts) / |
845 | DPAA2_PTP_CLK_PERIOD_NS); |
846 | |
847 | /* Update current time to PTP message originTimestamp field */ |
848 | ns_to_ptp_tstamp(tstamp: &origin_timestamp, le64_to_cpup(p: ns)); |
849 | data = skb_mac_header(skb); |
850 | *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb); |
851 | *(__be32 *)(data + offset2 + 2) = |
852 | htonl(origin_timestamp.sec_lsb); |
853 | *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec); |
854 | |
855 | if (priv->ptp_correction_off == offset1) |
856 | return; |
857 | |
858 | priv->dpaa2_set_onestep_params_cb(priv, offset1, udp); |
859 | priv->ptp_correction_off = offset1; |
860 | |
861 | } |
862 | } |
863 | |
864 | void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) |
865 | { |
866 | struct dpaa2_eth_sgt_cache *sgt_cache; |
867 | void *sgt_buf = NULL; |
868 | int sgt_buf_size; |
869 | |
870 | sgt_cache = this_cpu_ptr(priv->sgt_cache); |
871 | sgt_buf_size = priv->tx_data_offset + |
872 | DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry); |
873 | |
874 | if (sgt_cache->count == 0) |
875 | sgt_buf = napi_alloc_frag_align(fragsz: sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN); |
876 | else |
877 | sgt_buf = sgt_cache->buf[--sgt_cache->count]; |
878 | if (!sgt_buf) |
879 | return NULL; |
880 | |
881 | memset(sgt_buf, 0, sgt_buf_size); |
882 | |
883 | return sgt_buf; |
884 | } |
885 | |
886 | void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) |
887 | { |
888 | struct dpaa2_eth_sgt_cache *sgt_cache; |
889 | |
890 | sgt_cache = this_cpu_ptr(priv->sgt_cache); |
891 | if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) |
892 | skb_free_frag(addr: sgt_buf); |
893 | else |
894 | sgt_cache->buf[sgt_cache->count++] = sgt_buf; |
895 | } |
896 | |
897 | /* Create a frame descriptor based on a fragmented skb */ |
898 | static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, |
899 | struct sk_buff *skb, |
900 | struct dpaa2_fd *fd, |
901 | void **swa_addr) |
902 | { |
903 | struct device *dev = priv->net_dev->dev.parent; |
904 | void *sgt_buf = NULL; |
905 | dma_addr_t addr; |
906 | int nr_frags = skb_shinfo(skb)->nr_frags; |
907 | struct dpaa2_sg_entry *sgt; |
908 | int i, err; |
909 | int sgt_buf_size; |
910 | struct scatterlist *scl, *crt_scl; |
911 | int num_sg; |
912 | int num_dma_bufs; |
913 | struct dpaa2_eth_swa *swa; |
914 | |
915 | /* Create and map scatterlist. |
916 | * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have |
917 | * to go beyond nr_frags+1. |
918 | * Note: We don't support chained scatterlists |
919 | */ |
920 | if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) |
921 | return -EINVAL; |
922 | |
923 | scl = kmalloc_array(n: nr_frags + 1, size: sizeof(struct scatterlist), GFP_ATOMIC); |
924 | if (unlikely(!scl)) |
925 | return -ENOMEM; |
926 | |
927 | sg_init_table(scl, nr_frags + 1); |
928 | num_sg = skb_to_sgvec(skb, sg: scl, offset: 0, len: skb->len); |
929 | if (unlikely(num_sg < 0)) { |
930 | err = -ENOMEM; |
931 | goto dma_map_sg_failed; |
932 | } |
933 | num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
934 | if (unlikely(!num_dma_bufs)) { |
935 | err = -ENOMEM; |
936 | goto dma_map_sg_failed; |
937 | } |
938 | |
939 | /* Prepare the HW SGT structure */ |
940 | sgt_buf_size = priv->tx_data_offset + |
941 | sizeof(struct dpaa2_sg_entry) * num_dma_bufs; |
942 | sgt_buf = dpaa2_eth_sgt_get(priv); |
943 | if (unlikely(!sgt_buf)) { |
944 | err = -ENOMEM; |
945 | goto sgt_buf_alloc_failed; |
946 | } |
947 | |
948 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
949 | |
950 | /* Fill in the HW SGT structure. |
951 | * |
952 | * sgt_buf is zeroed out, so the following fields are implicit |
953 | * in all sgt entries: |
954 | * - offset is 0 |
955 | * - format is 'dpaa2_sg_single' |
956 | */ |
957 | for_each_sg(scl, crt_scl, num_dma_bufs, i) { |
958 | dpaa2_sg_set_addr(sg: &sgt[i], sg_dma_address(crt_scl)); |
959 | dpaa2_sg_set_len(sg: &sgt[i], sg_dma_len(crt_scl)); |
960 | } |
961 | dpaa2_sg_set_final(sg: &sgt[i - 1], final: true); |
962 | |
963 | /* Store the skb backpointer in the SGT buffer. |
964 | * Fit the scatterlist and the number of buffers alongside the |
965 | * skb backpointer in the software annotation area. We'll need |
966 | * all of them on Tx Conf. |
967 | */ |
968 | *swa_addr = (void *)sgt_buf; |
969 | swa = (struct dpaa2_eth_swa *)sgt_buf; |
970 | swa->type = DPAA2_ETH_SWA_SG; |
971 | swa->sg.skb = skb; |
972 | swa->sg.scl = scl; |
973 | swa->sg.num_sg = num_sg; |
974 | swa->sg.sgt_size = sgt_buf_size; |
975 | |
976 | /* Separately map the SGT buffer */ |
977 | addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
978 | if (unlikely(dma_mapping_error(dev, addr))) { |
979 | err = -ENOMEM; |
980 | goto dma_map_single_failed; |
981 | } |
982 | memset(fd, 0, sizeof(struct dpaa2_fd)); |
983 | dpaa2_fd_set_offset(fd, offset: priv->tx_data_offset); |
984 | dpaa2_fd_set_format(fd, format: dpaa2_fd_sg); |
985 | dpaa2_fd_set_addr(fd, addr); |
986 | dpaa2_fd_set_len(fd, len: skb->len); |
987 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
988 | |
989 | return 0; |
990 | |
991 | dma_map_single_failed: |
992 | dpaa2_eth_sgt_recycle(priv, sgt_buf); |
993 | sgt_buf_alloc_failed: |
994 | dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
995 | dma_map_sg_failed: |
996 | kfree(objp: scl); |
997 | return err; |
998 | } |
999 | |
1000 | /* Create a SG frame descriptor based on a linear skb. |
1001 | * |
1002 | * This function is used on the Tx path when the skb headroom is not large |
1003 | * enough for the HW requirements, thus instead of realloc-ing the skb we |
1004 | * create a SG frame descriptor with only one entry. |
1005 | */ |
1006 | static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, |
1007 | struct sk_buff *skb, |
1008 | struct dpaa2_fd *fd, |
1009 | void **swa_addr) |
1010 | { |
1011 | struct device *dev = priv->net_dev->dev.parent; |
1012 | struct dpaa2_sg_entry *sgt; |
1013 | struct dpaa2_eth_swa *swa; |
1014 | dma_addr_t addr, sgt_addr; |
1015 | void *sgt_buf = NULL; |
1016 | int sgt_buf_size; |
1017 | int err; |
1018 | |
1019 | /* Prepare the HW SGT structure */ |
1020 | sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); |
1021 | sgt_buf = dpaa2_eth_sgt_get(priv); |
1022 | if (unlikely(!sgt_buf)) |
1023 | return -ENOMEM; |
1024 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
1025 | |
1026 | addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL); |
1027 | if (unlikely(dma_mapping_error(dev, addr))) { |
1028 | err = -ENOMEM; |
1029 | goto data_map_failed; |
1030 | } |
1031 | |
1032 | /* Fill in the HW SGT structure */ |
1033 | dpaa2_sg_set_addr(sg: sgt, addr); |
1034 | dpaa2_sg_set_len(sg: sgt, len: skb->len); |
1035 | dpaa2_sg_set_final(sg: sgt, final: true); |
1036 | |
1037 | /* Store the skb backpointer in the SGT buffer */ |
1038 | *swa_addr = (void *)sgt_buf; |
1039 | swa = (struct dpaa2_eth_swa *)sgt_buf; |
1040 | swa->type = DPAA2_ETH_SWA_SINGLE; |
1041 | swa->single.skb = skb; |
1042 | swa->single.sgt_size = sgt_buf_size; |
1043 | |
1044 | /* Separately map the SGT buffer */ |
1045 | sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
1046 | if (unlikely(dma_mapping_error(dev, sgt_addr))) { |
1047 | err = -ENOMEM; |
1048 | goto sgt_map_failed; |
1049 | } |
1050 | |
1051 | memset(fd, 0, sizeof(struct dpaa2_fd)); |
1052 | dpaa2_fd_set_offset(fd, offset: priv->tx_data_offset); |
1053 | dpaa2_fd_set_format(fd, format: dpaa2_fd_sg); |
1054 | dpaa2_fd_set_addr(fd, addr: sgt_addr); |
1055 | dpaa2_fd_set_len(fd, len: skb->len); |
1056 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
1057 | |
1058 | return 0; |
1059 | |
1060 | sgt_map_failed: |
1061 | dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL); |
1062 | data_map_failed: |
1063 | dpaa2_eth_sgt_recycle(priv, sgt_buf); |
1064 | |
1065 | return err; |
1066 | } |
1067 | |
1068 | /* Create a frame descriptor based on a linear skb */ |
1069 | static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, |
1070 | struct sk_buff *skb, |
1071 | struct dpaa2_fd *fd, |
1072 | void **swa_addr) |
1073 | { |
1074 | struct device *dev = priv->net_dev->dev.parent; |
1075 | u8 *buffer_start, *aligned_start; |
1076 | struct dpaa2_eth_swa *swa; |
1077 | dma_addr_t addr; |
1078 | |
1079 | buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); |
1080 | aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, |
1081 | DPAA2_ETH_TX_BUF_ALIGN); |
1082 | if (aligned_start >= skb->head) |
1083 | buffer_start = aligned_start; |
1084 | else |
1085 | return -ENOMEM; |
1086 | |
1087 | /* Store a backpointer to the skb at the beginning of the buffer |
1088 | * (in the private data area) such that we can release it |
1089 | * on Tx confirm |
1090 | */ |
1091 | *swa_addr = (void *)buffer_start; |
1092 | swa = (struct dpaa2_eth_swa *)buffer_start; |
1093 | swa->type = DPAA2_ETH_SWA_SINGLE; |
1094 | swa->single.skb = skb; |
1095 | |
1096 | addr = dma_map_single(dev, buffer_start, |
1097 | skb_tail_pointer(skb) - buffer_start, |
1098 | DMA_BIDIRECTIONAL); |
1099 | if (unlikely(dma_mapping_error(dev, addr))) |
1100 | return -ENOMEM; |
1101 | |
1102 | memset(fd, 0, sizeof(struct dpaa2_fd)); |
1103 | dpaa2_fd_set_addr(fd, addr); |
1104 | dpaa2_fd_set_offset(fd, offset: (u16)(skb->data - buffer_start)); |
1105 | dpaa2_fd_set_len(fd, len: skb->len); |
1106 | dpaa2_fd_set_format(fd, format: dpaa2_fd_single); |
1107 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
1108 | |
1109 | return 0; |
1110 | } |
1111 | |
1112 | /* FD freeing routine on the Tx path |
1113 | * |
1114 | * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb |
1115 | * back-pointed to is also freed. |
1116 | * This can be called either from dpaa2_eth_tx_conf() or on the error path of |
1117 | * dpaa2_eth_tx(). |
1118 | */ |
1119 | void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, |
1120 | struct dpaa2_eth_channel *ch, |
1121 | struct dpaa2_eth_fq *fq, |
1122 | const struct dpaa2_fd *fd, bool in_napi) |
1123 | { |
1124 | struct device *dev = priv->net_dev->dev.parent; |
1125 | dma_addr_t fd_addr, sg_addr; |
1126 | struct sk_buff *skb = NULL; |
1127 | unsigned char *buffer_start; |
1128 | struct dpaa2_eth_swa *swa; |
1129 | u8 fd_format = dpaa2_fd_get_format(fd); |
1130 | u32 fd_len = dpaa2_fd_get_len(fd); |
1131 | struct dpaa2_sg_entry *sgt; |
1132 | int should_free_skb = 1; |
1133 | void *tso_hdr; |
1134 | int i; |
1135 | |
1136 | fd_addr = dpaa2_fd_get_addr(fd); |
1137 | buffer_start = dpaa2_iova_to_virt(domain: priv->iommu_domain, iova_addr: fd_addr); |
1138 | swa = (struct dpaa2_eth_swa *)buffer_start; |
1139 | |
1140 | if (fd_format == dpaa2_fd_single) { |
1141 | if (swa->type == DPAA2_ETH_SWA_SINGLE) { |
1142 | skb = swa->single.skb; |
1143 | /* Accessing the skb buffer is safe before dma unmap, |
1144 | * because we didn't map the actual skb shell. |
1145 | */ |
1146 | dma_unmap_single(dev, fd_addr, |
1147 | skb_tail_pointer(skb) - buffer_start, |
1148 | DMA_BIDIRECTIONAL); |
1149 | } else { |
1150 | WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type" ); |
1151 | dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, |
1152 | DMA_BIDIRECTIONAL); |
1153 | } |
1154 | } else if (fd_format == dpaa2_fd_sg) { |
1155 | if (swa->type == DPAA2_ETH_SWA_SG) { |
1156 | skb = swa->sg.skb; |
1157 | |
1158 | /* Unmap the scatterlist */ |
1159 | dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, |
1160 | DMA_BIDIRECTIONAL); |
1161 | kfree(objp: swa->sg.scl); |
1162 | |
1163 | /* Unmap the SGT buffer */ |
1164 | dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, |
1165 | DMA_BIDIRECTIONAL); |
1166 | } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) { |
1167 | skb = swa->tso.skb; |
1168 | |
1169 | sgt = (struct dpaa2_sg_entry *)(buffer_start + |
1170 | priv->tx_data_offset); |
1171 | |
1172 | /* Unmap the SGT buffer */ |
1173 | dma_unmap_single(dev, fd_addr, swa->tso.sgt_size, |
1174 | DMA_BIDIRECTIONAL); |
1175 | |
1176 | /* Unmap and free the header */ |
1177 | tso_hdr = dpaa2_iova_to_virt(domain: priv->iommu_domain, iova_addr: dpaa2_sg_get_addr(sg: sgt)); |
1178 | dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE, |
1179 | DMA_TO_DEVICE); |
1180 | kfree(objp: tso_hdr); |
1181 | |
1182 | /* Unmap the other SG entries for the data */ |
1183 | for (i = 1; i < swa->tso.num_sg; i++) |
1184 | dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]), |
1185 | dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE); |
1186 | |
1187 | if (!swa->tso.is_last_fd) |
1188 | should_free_skb = 0; |
1189 | } else if (swa->type == DPAA2_ETH_SWA_XSK) { |
1190 | /* Unmap the SGT Buffer */ |
1191 | dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size, |
1192 | DMA_BIDIRECTIONAL); |
1193 | } else { |
1194 | skb = swa->single.skb; |
1195 | |
1196 | /* Unmap the SGT Buffer */ |
1197 | dma_unmap_single(dev, fd_addr, swa->single.sgt_size, |
1198 | DMA_BIDIRECTIONAL); |
1199 | |
1200 | sgt = (struct dpaa2_sg_entry *)(buffer_start + |
1201 | priv->tx_data_offset); |
1202 | sg_addr = dpaa2_sg_get_addr(sg: sgt); |
1203 | dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL); |
1204 | } |
1205 | } else { |
1206 | netdev_dbg(priv->net_dev, "Invalid FD format\n" ); |
1207 | return; |
1208 | } |
1209 | |
1210 | if (swa->type == DPAA2_ETH_SWA_XSK) { |
1211 | ch->xsk_tx_pkts_sent++; |
1212 | dpaa2_eth_sgt_recycle(priv, sgt_buf: buffer_start); |
1213 | return; |
1214 | } |
1215 | |
1216 | if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { |
1217 | fq->dq_frames++; |
1218 | fq->dq_bytes += fd_len; |
1219 | } |
1220 | |
1221 | if (swa->type == DPAA2_ETH_SWA_XDP) { |
1222 | xdp_return_frame(xdpf: swa->xdp.xdpf); |
1223 | return; |
1224 | } |
1225 | |
1226 | /* Get the timestamp value */ |
1227 | if (swa->type != DPAA2_ETH_SWA_SW_TSO) { |
1228 | if (skb->cb[0] == TX_TSTAMP) { |
1229 | struct skb_shared_hwtstamps shhwtstamps; |
1230 | __le64 *ts = dpaa2_get_ts(buf_addr: buffer_start, swa: true); |
1231 | u64 ns; |
1232 | |
1233 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
1234 | |
1235 | ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(p: ts); |
1236 | shhwtstamps.hwtstamp = ns_to_ktime(ns); |
1237 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamps); |
1238 | } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { |
1239 | mutex_unlock(lock: &priv->onestep_tstamp_lock); |
1240 | } |
1241 | } |
1242 | |
1243 | /* Free SGT buffer allocated on tx */ |
1244 | if (fd_format != dpaa2_fd_single) |
1245 | dpaa2_eth_sgt_recycle(priv, sgt_buf: buffer_start); |
1246 | |
1247 | /* Move on with skb release. If we are just confirming multiple FDs |
1248 | * from the same TSO skb then only the last one will need to free the |
1249 | * skb. |
1250 | */ |
1251 | if (should_free_skb) |
1252 | napi_consume_skb(skb, budget: in_napi); |
1253 | } |
1254 | |
1255 | static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv, |
1256 | struct sk_buff *skb, struct dpaa2_fd *fd, |
1257 | int *num_fds, u32 *total_fds_len) |
1258 | { |
1259 | struct device *dev = priv->net_dev->dev.parent; |
1260 | int hdr_len, total_len, data_left, fd_len; |
1261 | int num_sge, err, i, sgt_buf_size; |
1262 | struct dpaa2_fd *fd_start = fd; |
1263 | struct dpaa2_sg_entry *sgt; |
1264 | struct dpaa2_eth_swa *swa; |
1265 | dma_addr_t sgt_addr, addr; |
1266 | dma_addr_t tso_hdr_dma; |
1267 | unsigned int index = 0; |
1268 | struct tso_t tso; |
1269 | char *tso_hdr; |
1270 | void *sgt_buf; |
1271 | |
1272 | /* Initialize the TSO handler, and prepare the first payload */ |
1273 | hdr_len = tso_start(skb, tso: &tso); |
1274 | *total_fds_len = 0; |
1275 | |
1276 | total_len = skb->len - hdr_len; |
1277 | while (total_len > 0) { |
1278 | /* Prepare the HW SGT structure for this frame */ |
1279 | sgt_buf = dpaa2_eth_sgt_get(priv); |
1280 | if (unlikely(!sgt_buf)) { |
1281 | netdev_err(dev: priv->net_dev, format: "dpaa2_eth_sgt_get() failed\n" ); |
1282 | err = -ENOMEM; |
1283 | goto err_sgt_get; |
1284 | } |
1285 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
1286 | |
1287 | /* Determine the data length of this frame */ |
1288 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
1289 | total_len -= data_left; |
1290 | fd_len = data_left + hdr_len; |
1291 | |
1292 | /* Prepare packet headers: MAC + IP + TCP */ |
1293 | tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC); |
1294 | if (!tso_hdr) { |
1295 | err = -ENOMEM; |
1296 | goto err_alloc_tso_hdr; |
1297 | } |
1298 | |
1299 | tso_build_hdr(skb, hdr: tso_hdr, tso: &tso, size: data_left, is_last: total_len == 0); |
1300 | tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE); |
1301 | if (dma_mapping_error(dev, dma_addr: tso_hdr_dma)) { |
1302 | netdev_err(dev: priv->net_dev, format: "dma_map_single(tso_hdr) failed\n" ); |
1303 | err = -ENOMEM; |
1304 | goto err_map_tso_hdr; |
1305 | } |
1306 | |
1307 | /* Setup the SG entry for the header */ |
1308 | dpaa2_sg_set_addr(sg: sgt, addr: tso_hdr_dma); |
1309 | dpaa2_sg_set_len(sg: sgt, len: hdr_len); |
1310 | dpaa2_sg_set_final(sg: sgt, final: data_left <= 0); |
1311 | |
1312 | /* Compose the SG entries for each fragment of data */ |
1313 | num_sge = 1; |
1314 | while (data_left > 0) { |
1315 | int size; |
1316 | |
1317 | /* Move to the next SG entry */ |
1318 | sgt++; |
1319 | size = min_t(int, tso.size, data_left); |
1320 | |
1321 | addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE); |
1322 | if (dma_mapping_error(dev, dma_addr: addr)) { |
1323 | netdev_err(dev: priv->net_dev, format: "dma_map_single(tso.data) failed\n" ); |
1324 | err = -ENOMEM; |
1325 | goto err_map_data; |
1326 | } |
1327 | dpaa2_sg_set_addr(sg: sgt, addr); |
1328 | dpaa2_sg_set_len(sg: sgt, len: size); |
1329 | dpaa2_sg_set_final(sg: sgt, final: size == data_left); |
1330 | |
1331 | num_sge++; |
1332 | |
1333 | /* Build the data for the __next__ fragment */ |
1334 | data_left -= size; |
1335 | tso_build_data(skb, tso: &tso, size); |
1336 | } |
1337 | |
1338 | /* Store the skb backpointer in the SGT buffer */ |
1339 | sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry); |
1340 | swa = (struct dpaa2_eth_swa *)sgt_buf; |
1341 | swa->type = DPAA2_ETH_SWA_SW_TSO; |
1342 | swa->tso.skb = skb; |
1343 | swa->tso.num_sg = num_sge; |
1344 | swa->tso.sgt_size = sgt_buf_size; |
1345 | swa->tso.is_last_fd = total_len == 0 ? 1 : 0; |
1346 | |
1347 | /* Separately map the SGT buffer */ |
1348 | sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
1349 | if (unlikely(dma_mapping_error(dev, sgt_addr))) { |
1350 | netdev_err(dev: priv->net_dev, format: "dma_map_single(sgt_buf) failed\n" ); |
1351 | err = -ENOMEM; |
1352 | goto err_map_sgt; |
1353 | } |
1354 | |
1355 | /* Setup the frame descriptor */ |
1356 | memset(fd, 0, sizeof(struct dpaa2_fd)); |
1357 | dpaa2_fd_set_offset(fd, offset: priv->tx_data_offset); |
1358 | dpaa2_fd_set_format(fd, format: dpaa2_fd_sg); |
1359 | dpaa2_fd_set_addr(fd, addr: sgt_addr); |
1360 | dpaa2_fd_set_len(fd, len: fd_len); |
1361 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
1362 | |
1363 | *total_fds_len += fd_len; |
1364 | /* Advance to the next frame descriptor */ |
1365 | fd++; |
1366 | index++; |
1367 | } |
1368 | |
1369 | *num_fds = index; |
1370 | |
1371 | return 0; |
1372 | |
1373 | err_map_sgt: |
1374 | err_map_data: |
1375 | /* Unmap all the data S/G entries for the current FD */ |
1376 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
1377 | for (i = 1; i < num_sge; i++) |
1378 | dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]), |
1379 | dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE); |
1380 | |
1381 | /* Unmap the header entry */ |
1382 | dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE); |
1383 | err_map_tso_hdr: |
1384 | kfree(objp: tso_hdr); |
1385 | err_alloc_tso_hdr: |
1386 | dpaa2_eth_sgt_recycle(priv, sgt_buf); |
1387 | err_sgt_get: |
1388 | /* Free all the other FDs that were already fully created */ |
1389 | for (i = 0; i < index; i++) |
1390 | dpaa2_eth_free_tx_fd(priv, NULL, NULL, fd: &fd_start[i], in_napi: false); |
1391 | |
1392 | return err; |
1393 | } |
1394 | |
1395 | static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb, |
1396 | struct net_device *net_dev) |
1397 | { |
1398 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
1399 | int total_enqueued = 0, retries = 0, enqueued; |
1400 | struct dpaa2_eth_drv_stats *; |
1401 | struct rtnl_link_stats64 *percpu_stats; |
1402 | unsigned int needed_headroom; |
1403 | int num_fds = 1, max_retries; |
1404 | struct dpaa2_eth_fq *fq; |
1405 | struct netdev_queue *nq; |
1406 | struct dpaa2_fd *fd; |
1407 | u16 queue_mapping; |
1408 | void *swa = NULL; |
1409 | u8 prio = 0; |
1410 | int err, i; |
1411 | u32 fd_len; |
1412 | |
1413 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
1414 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
1415 | fd = (this_cpu_ptr(priv->fd))->array; |
1416 | |
1417 | needed_headroom = dpaa2_eth_needed_headroom(skb); |
1418 | |
1419 | /* We'll be holding a back-reference to the skb until Tx Confirmation; |
1420 | * we don't want that overwritten by a concurrent Tx with a cloned skb. |
1421 | */ |
1422 | skb = skb_unshare(skb, GFP_ATOMIC); |
1423 | if (unlikely(!skb)) { |
1424 | /* skb_unshare() has already freed the skb */ |
1425 | percpu_stats->tx_dropped++; |
1426 | return NETDEV_TX_OK; |
1427 | } |
1428 | |
1429 | /* Setup the FD fields */ |
1430 | |
1431 | if (skb_is_gso(skb)) { |
1432 | err = dpaa2_eth_build_gso_fd(priv, skb, fd, num_fds: &num_fds, total_fds_len: &fd_len); |
1433 | percpu_extras->tx_sg_frames += num_fds; |
1434 | percpu_extras->tx_sg_bytes += fd_len; |
1435 | percpu_extras->tx_tso_frames += num_fds; |
1436 | percpu_extras->tx_tso_bytes += fd_len; |
1437 | } else if (skb_is_nonlinear(skb)) { |
1438 | err = dpaa2_eth_build_sg_fd(priv, skb, fd, swa_addr: &swa); |
1439 | percpu_extras->tx_sg_frames++; |
1440 | percpu_extras->tx_sg_bytes += skb->len; |
1441 | fd_len = dpaa2_fd_get_len(fd); |
1442 | } else if (skb_headroom(skb) < needed_headroom) { |
1443 | err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, swa_addr: &swa); |
1444 | percpu_extras->tx_sg_frames++; |
1445 | percpu_extras->tx_sg_bytes += skb->len; |
1446 | percpu_extras->tx_converted_sg_frames++; |
1447 | percpu_extras->tx_converted_sg_bytes += skb->len; |
1448 | fd_len = dpaa2_fd_get_len(fd); |
1449 | } else { |
1450 | err = dpaa2_eth_build_single_fd(priv, skb, fd, swa_addr: &swa); |
1451 | fd_len = dpaa2_fd_get_len(fd); |
1452 | } |
1453 | |
1454 | if (unlikely(err)) { |
1455 | percpu_stats->tx_dropped++; |
1456 | goto err_build_fd; |
1457 | } |
1458 | |
1459 | if (swa && skb->cb[0]) |
1460 | dpaa2_eth_enable_tx_tstamp(priv, fd, buf_start: swa, skb); |
1461 | |
1462 | /* Tracing point */ |
1463 | for (i = 0; i < num_fds; i++) |
1464 | trace_dpaa2_tx_fd(netdev: net_dev, fd: &fd[i]); |
1465 | |
1466 | /* TxConf FQ selection relies on queue id from the stack. |
1467 | * In case of a forwarded frame from another DPNI interface, we choose |
1468 | * a queue affined to the same core that processed the Rx frame |
1469 | */ |
1470 | queue_mapping = skb_get_queue_mapping(skb); |
1471 | |
1472 | if (net_dev->num_tc) { |
1473 | prio = netdev_txq_to_tc(dev: net_dev, txq: queue_mapping); |
1474 | /* Hardware interprets priority level 0 as being the highest, |
1475 | * so we need to do a reverse mapping to the netdev tc index |
1476 | */ |
1477 | prio = net_dev->num_tc - prio - 1; |
1478 | /* We have only one FQ array entry for all Tx hardware queues |
1479 | * with the same flow id (but different priority levels) |
1480 | */ |
1481 | queue_mapping %= dpaa2_eth_queue_count(priv); |
1482 | } |
1483 | fq = &priv->fq[queue_mapping]; |
1484 | nq = netdev_get_tx_queue(dev: net_dev, index: queue_mapping); |
1485 | netdev_tx_sent_queue(dev_queue: nq, bytes: fd_len); |
1486 | |
1487 | /* Everything that happens after this enqueues might race with |
1488 | * the Tx confirmation callback for this frame |
1489 | */ |
1490 | max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; |
1491 | while (total_enqueued < num_fds && retries < max_retries) { |
1492 | err = priv->enqueue(priv, fq, &fd[total_enqueued], |
1493 | prio, num_fds - total_enqueued, &enqueued); |
1494 | if (err == -EBUSY) { |
1495 | retries++; |
1496 | continue; |
1497 | } |
1498 | |
1499 | total_enqueued += enqueued; |
1500 | } |
1501 | percpu_extras->tx_portal_busy += retries; |
1502 | |
1503 | if (unlikely(err < 0)) { |
1504 | percpu_stats->tx_errors++; |
1505 | /* Clean up everything, including freeing the skb */ |
1506 | dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, in_napi: false); |
1507 | netdev_tx_completed_queue(dev_queue: nq, pkts: 1, bytes: fd_len); |
1508 | } else { |
1509 | percpu_stats->tx_packets += total_enqueued; |
1510 | percpu_stats->tx_bytes += fd_len; |
1511 | } |
1512 | |
1513 | return NETDEV_TX_OK; |
1514 | |
1515 | err_build_fd: |
1516 | dev_kfree_skb(skb); |
1517 | |
1518 | return NETDEV_TX_OK; |
1519 | } |
1520 | |
1521 | static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work) |
1522 | { |
1523 | struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv, |
1524 | tx_onestep_tstamp); |
1525 | struct sk_buff *skb; |
1526 | |
1527 | while (true) { |
1528 | skb = skb_dequeue(list: &priv->tx_skbs); |
1529 | if (!skb) |
1530 | return; |
1531 | |
1532 | /* Lock just before TX one-step timestamping packet, |
1533 | * and release the lock in dpaa2_eth_free_tx_fd when |
1534 | * confirm the packet has been sent on hardware, or |
1535 | * when clean up during transmit failure. |
1536 | */ |
1537 | mutex_lock(&priv->onestep_tstamp_lock); |
1538 | __dpaa2_eth_tx(skb, net_dev: priv->net_dev); |
1539 | } |
1540 | } |
1541 | |
1542 | static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) |
1543 | { |
1544 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
1545 | u8 msgtype, twostep, udp; |
1546 | u16 offset1, offset2; |
1547 | |
1548 | /* Utilize skb->cb[0] for timestamping request per skb */ |
1549 | skb->cb[0] = 0; |
1550 | |
1551 | if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) { |
1552 | if (priv->tx_tstamp_type == HWTSTAMP_TX_ON) |
1553 | skb->cb[0] = TX_TSTAMP; |
1554 | else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) |
1555 | skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC; |
1556 | } |
1557 | |
1558 | /* TX for one-step timestamping PTP Sync packet */ |
1559 | if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { |
1560 | if (!dpaa2_eth_ptp_parse(skb, msgtype: &msgtype, twostep: &twostep, udp: &udp, |
1561 | correction_offset: &offset1, origintimestamp_offset: &offset2)) |
1562 | if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) { |
1563 | skb_queue_tail(list: &priv->tx_skbs, newsk: skb); |
1564 | queue_work(wq: priv->dpaa2_ptp_wq, |
1565 | work: &priv->tx_onestep_tstamp); |
1566 | return NETDEV_TX_OK; |
1567 | } |
1568 | /* Use two-step timestamping if not one-step timestamping |
1569 | * PTP Sync packet |
1570 | */ |
1571 | skb->cb[0] = TX_TSTAMP; |
1572 | } |
1573 | |
1574 | /* TX for other packets */ |
1575 | return __dpaa2_eth_tx(skb, net_dev); |
1576 | } |
1577 | |
1578 | /* Tx confirmation frame processing routine */ |
1579 | static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, |
1580 | struct dpaa2_eth_channel *ch, |
1581 | const struct dpaa2_fd *fd, |
1582 | struct dpaa2_eth_fq *fq) |
1583 | { |
1584 | struct rtnl_link_stats64 *percpu_stats; |
1585 | struct dpaa2_eth_drv_stats *; |
1586 | u32 fd_len = dpaa2_fd_get_len(fd); |
1587 | u32 fd_errors; |
1588 | |
1589 | /* Tracing point */ |
1590 | trace_dpaa2_tx_conf_fd(netdev: priv->net_dev, fd); |
1591 | |
1592 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
1593 | percpu_extras->tx_conf_frames++; |
1594 | percpu_extras->tx_conf_bytes += fd_len; |
1595 | ch->stats.bytes_per_cdan += fd_len; |
1596 | |
1597 | /* Check frame errors in the FD field */ |
1598 | fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; |
1599 | dpaa2_eth_free_tx_fd(priv, ch, fq, fd, in_napi: true); |
1600 | |
1601 | if (likely(!fd_errors)) |
1602 | return; |
1603 | |
1604 | if (net_ratelimit()) |
1605 | netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n" , |
1606 | fd_errors); |
1607 | |
1608 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
1609 | /* Tx-conf logically pertains to the egress path. */ |
1610 | percpu_stats->tx_errors++; |
1611 | } |
1612 | |
1613 | static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv, |
1614 | bool enable) |
1615 | { |
1616 | int err; |
1617 | |
1618 | err = dpni_enable_vlan_filter(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, en: enable); |
1619 | |
1620 | if (err) { |
1621 | netdev_err(dev: priv->net_dev, |
1622 | format: "dpni_enable_vlan_filter failed\n" ); |
1623 | return err; |
1624 | } |
1625 | |
1626 | return 0; |
1627 | } |
1628 | |
1629 | static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) |
1630 | { |
1631 | int err; |
1632 | |
1633 | err = dpni_set_offload(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
1634 | type: DPNI_OFF_RX_L3_CSUM, config: enable); |
1635 | if (err) { |
1636 | netdev_err(dev: priv->net_dev, |
1637 | format: "dpni_set_offload(RX_L3_CSUM) failed\n" ); |
1638 | return err; |
1639 | } |
1640 | |
1641 | err = dpni_set_offload(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
1642 | type: DPNI_OFF_RX_L4_CSUM, config: enable); |
1643 | if (err) { |
1644 | netdev_err(dev: priv->net_dev, |
1645 | format: "dpni_set_offload(RX_L4_CSUM) failed\n" ); |
1646 | return err; |
1647 | } |
1648 | |
1649 | return 0; |
1650 | } |
1651 | |
1652 | static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) |
1653 | { |
1654 | int err; |
1655 | |
1656 | err = dpni_set_offload(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
1657 | type: DPNI_OFF_TX_L3_CSUM, config: enable); |
1658 | if (err) { |
1659 | netdev_err(dev: priv->net_dev, format: "dpni_set_offload(TX_L3_CSUM) failed\n" ); |
1660 | return err; |
1661 | } |
1662 | |
1663 | err = dpni_set_offload(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
1664 | type: DPNI_OFF_TX_L4_CSUM, config: enable); |
1665 | if (err) { |
1666 | netdev_err(dev: priv->net_dev, format: "dpni_set_offload(TX_L4_CSUM) failed\n" ); |
1667 | return err; |
1668 | } |
1669 | |
1670 | return 0; |
1671 | } |
1672 | |
1673 | /* Perform a single release command to add buffers |
1674 | * to the specified buffer pool |
1675 | */ |
1676 | static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, |
1677 | struct dpaa2_eth_channel *ch) |
1678 | { |
1679 | struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD]; |
1680 | struct device *dev = priv->net_dev->dev.parent; |
1681 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
1682 | struct dpaa2_eth_swa *swa; |
1683 | struct page *page; |
1684 | dma_addr_t addr; |
1685 | int retries = 0; |
1686 | int i = 0, err; |
1687 | u32 batch; |
1688 | |
1689 | /* Allocate buffers visible to WRIOP */ |
1690 | if (!ch->xsk_zc) { |
1691 | for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { |
1692 | /* Also allocate skb shared info and alignment padding. |
1693 | * There is one page for each Rx buffer. WRIOP sees |
1694 | * the entire page except for a tailroom reserved for |
1695 | * skb shared info |
1696 | */ |
1697 | page = dev_alloc_pages(order: 0); |
1698 | if (!page) |
1699 | goto err_alloc; |
1700 | |
1701 | addr = dma_map_page(dev, page, 0, priv->rx_buf_size, |
1702 | DMA_BIDIRECTIONAL); |
1703 | if (unlikely(dma_mapping_error(dev, addr))) |
1704 | goto err_map; |
1705 | |
1706 | buf_array[i] = addr; |
1707 | |
1708 | /* tracing point */ |
1709 | trace_dpaa2_eth_buf_seed(netdev: priv->net_dev, |
1710 | page_address(page), |
1711 | DPAA2_ETH_RX_BUF_RAW_SIZE, |
1712 | dma_addr: addr, map_size: priv->rx_buf_size, |
1713 | bpid: ch->bp->bpid); |
1714 | } |
1715 | } else if (xsk_buff_can_alloc(pool: ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { |
1716 | /* Allocate XSK buffers for AF_XDP fast path in batches |
1717 | * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot |
1718 | * provide enough buffers at the moment |
1719 | */ |
1720 | batch = xsk_buff_alloc_batch(pool: ch->xsk_pool, xdp: xdp_buffs, |
1721 | DPAA2_ETH_BUFS_PER_CMD); |
1722 | if (!batch) |
1723 | goto err_alloc; |
1724 | |
1725 | for (i = 0; i < batch; i++) { |
1726 | swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start + |
1727 | DPAA2_ETH_RX_HWA_SIZE); |
1728 | swa->xsk.xdp_buff = xdp_buffs[i]; |
1729 | |
1730 | addr = xsk_buff_xdp_get_frame_dma(xdp: xdp_buffs[i]); |
1731 | if (unlikely(dma_mapping_error(dev, addr))) |
1732 | goto err_map; |
1733 | |
1734 | buf_array[i] = addr; |
1735 | |
1736 | trace_dpaa2_xsk_buf_seed(netdev: priv->net_dev, |
1737 | vaddr: xdp_buffs[i]->data_hard_start, |
1738 | DPAA2_ETH_RX_BUF_RAW_SIZE, |
1739 | dma_addr: addr, map_size: priv->rx_buf_size, |
1740 | bpid: ch->bp->bpid); |
1741 | } |
1742 | } |
1743 | |
1744 | release_bufs: |
1745 | /* In case the portal is busy, retry until successful */ |
1746 | while ((err = dpaa2_io_service_release(d: ch->dpio, bpid: ch->bp->bpid, |
1747 | buffers: buf_array, num_buffers: i)) == -EBUSY) { |
1748 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) |
1749 | break; |
1750 | cpu_relax(); |
1751 | } |
1752 | |
1753 | /* If release command failed, clean up and bail out; |
1754 | * not much else we can do about it |
1755 | */ |
1756 | if (err) { |
1757 | dpaa2_eth_free_bufs(priv, buf_array, count: i, xsk_zc: ch->xsk_zc); |
1758 | return 0; |
1759 | } |
1760 | |
1761 | return i; |
1762 | |
1763 | err_map: |
1764 | if (!ch->xsk_zc) { |
1765 | __free_pages(page, order: 0); |
1766 | } else { |
1767 | for (; i < batch; i++) |
1768 | xsk_buff_free(xdp: xdp_buffs[i]); |
1769 | } |
1770 | err_alloc: |
1771 | /* If we managed to allocate at least some buffers, |
1772 | * release them to hardware |
1773 | */ |
1774 | if (i) |
1775 | goto release_bufs; |
1776 | |
1777 | return 0; |
1778 | } |
1779 | |
1780 | static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, |
1781 | struct dpaa2_eth_channel *ch) |
1782 | { |
1783 | int i; |
1784 | int new_count; |
1785 | |
1786 | for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { |
1787 | new_count = dpaa2_eth_add_bufs(priv, ch); |
1788 | ch->buf_count += new_count; |
1789 | |
1790 | if (new_count < DPAA2_ETH_BUFS_PER_CMD) |
1791 | return -ENOMEM; |
1792 | } |
1793 | |
1794 | return 0; |
1795 | } |
1796 | |
1797 | static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv) |
1798 | { |
1799 | struct net_device *net_dev = priv->net_dev; |
1800 | struct dpaa2_eth_channel *channel; |
1801 | int i, err = 0; |
1802 | |
1803 | for (i = 0; i < priv->num_channels; i++) { |
1804 | channel = priv->channel[i]; |
1805 | |
1806 | err = dpaa2_eth_seed_pool(priv, ch: channel); |
1807 | |
1808 | /* Not much to do; the buffer pool, though not filled up, |
1809 | * may still contain some buffers which would enable us |
1810 | * to limp on. |
1811 | */ |
1812 | if (err) |
1813 | netdev_err(dev: net_dev, format: "Buffer seeding failed for DPBP %d (bpid=%d)\n" , |
1814 | channel->bp->dev->obj_desc.id, |
1815 | channel->bp->bpid); |
1816 | } |
1817 | } |
1818 | |
1819 | /* |
1820 | * Drain the specified number of buffers from one of the DPNI's private buffer |
1821 | * pools. |
1822 | * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD |
1823 | */ |
1824 | static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, |
1825 | int count) |
1826 | { |
1827 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
1828 | bool xsk_zc = false; |
1829 | int retries = 0; |
1830 | int i, ret; |
1831 | |
1832 | for (i = 0; i < priv->num_channels; i++) |
1833 | if (priv->channel[i]->bp->bpid == bpid) |
1834 | xsk_zc = priv->channel[i]->xsk_zc; |
1835 | |
1836 | do { |
1837 | ret = dpaa2_io_service_acquire(NULL, bpid, buffers: buf_array, num_buffers: count); |
1838 | if (ret < 0) { |
1839 | if (ret == -EBUSY && |
1840 | retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) |
1841 | continue; |
1842 | netdev_err(dev: priv->net_dev, format: "dpaa2_io_service_acquire() failed\n" ); |
1843 | return; |
1844 | } |
1845 | dpaa2_eth_free_bufs(priv, buf_array, count: ret, xsk_zc); |
1846 | retries = 0; |
1847 | } while (ret); |
1848 | } |
1849 | |
1850 | static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid) |
1851 | { |
1852 | int i; |
1853 | |
1854 | /* Drain the buffer pool */ |
1855 | dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD); |
1856 | dpaa2_eth_drain_bufs(priv, bpid, count: 1); |
1857 | |
1858 | /* Setup to zero the buffer count of all channels which were |
1859 | * using this buffer pool. |
1860 | */ |
1861 | for (i = 0; i < priv->num_channels; i++) |
1862 | if (priv->channel[i]->bp->bpid == bpid) |
1863 | priv->channel[i]->buf_count = 0; |
1864 | } |
1865 | |
1866 | static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv) |
1867 | { |
1868 | int i; |
1869 | |
1870 | for (i = 0; i < priv->num_bps; i++) |
1871 | dpaa2_eth_drain_pool(priv, bpid: priv->bp[i]->bpid); |
1872 | } |
1873 | |
1874 | /* Function is called from softirq context only, so we don't need to guard |
1875 | * the access to percpu count |
1876 | */ |
1877 | static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, |
1878 | struct dpaa2_eth_channel *ch) |
1879 | { |
1880 | int new_count; |
1881 | |
1882 | if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) |
1883 | return 0; |
1884 | |
1885 | do { |
1886 | new_count = dpaa2_eth_add_bufs(priv, ch); |
1887 | if (unlikely(!new_count)) { |
1888 | /* Out of memory; abort for now, we'll try later on */ |
1889 | break; |
1890 | } |
1891 | ch->buf_count += new_count; |
1892 | } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); |
1893 | |
1894 | if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) |
1895 | return -ENOMEM; |
1896 | |
1897 | return 0; |
1898 | } |
1899 | |
1900 | static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv) |
1901 | { |
1902 | struct dpaa2_eth_sgt_cache *sgt_cache; |
1903 | u16 count; |
1904 | int k, i; |
1905 | |
1906 | for_each_possible_cpu(k) { |
1907 | sgt_cache = per_cpu_ptr(priv->sgt_cache, k); |
1908 | count = sgt_cache->count; |
1909 | |
1910 | for (i = 0; i < count; i++) |
1911 | skb_free_frag(addr: sgt_cache->buf[i]); |
1912 | sgt_cache->count = 0; |
1913 | } |
1914 | } |
1915 | |
1916 | static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) |
1917 | { |
1918 | int err; |
1919 | int dequeues = -1; |
1920 | |
1921 | /* Retry while portal is busy */ |
1922 | do { |
1923 | err = dpaa2_io_service_pull_channel(d: ch->dpio, channelid: ch->ch_id, |
1924 | s: ch->store); |
1925 | dequeues++; |
1926 | cpu_relax(); |
1927 | } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); |
1928 | |
1929 | ch->stats.dequeue_portal_busy += dequeues; |
1930 | if (unlikely(err)) |
1931 | ch->stats.pull_err++; |
1932 | |
1933 | return err; |
1934 | } |
1935 | |
1936 | /* NAPI poll routine |
1937 | * |
1938 | * Frames are dequeued from the QMan channel associated with this NAPI context. |
1939 | * Rx, Tx confirmation and (if configured) Rx error frames all count |
1940 | * towards the NAPI budget. |
1941 | */ |
1942 | static int dpaa2_eth_poll(struct napi_struct *napi, int budget) |
1943 | { |
1944 | struct dpaa2_eth_channel *ch; |
1945 | struct dpaa2_eth_priv *priv; |
1946 | int rx_cleaned = 0, txconf_cleaned = 0; |
1947 | struct dpaa2_eth_fq *fq, *txc_fq = NULL; |
1948 | struct netdev_queue *nq; |
1949 | int store_cleaned, work_done; |
1950 | bool work_done_zc = false; |
1951 | struct list_head rx_list; |
1952 | int retries = 0; |
1953 | u16 flowid; |
1954 | int err; |
1955 | |
1956 | ch = container_of(napi, struct dpaa2_eth_channel, napi); |
1957 | ch->xdp.res = 0; |
1958 | priv = ch->priv; |
1959 | |
1960 | INIT_LIST_HEAD(list: &rx_list); |
1961 | ch->rx_list = &rx_list; |
1962 | |
1963 | if (ch->xsk_zc) { |
1964 | work_done_zc = dpaa2_xsk_tx(priv, ch); |
1965 | /* If we reached the XSK Tx per NAPI threshold, we're done */ |
1966 | if (work_done_zc) { |
1967 | work_done = budget; |
1968 | goto out; |
1969 | } |
1970 | } |
1971 | |
1972 | do { |
1973 | err = dpaa2_eth_pull_channel(ch); |
1974 | if (unlikely(err)) |
1975 | break; |
1976 | |
1977 | /* Refill pool if appropriate */ |
1978 | dpaa2_eth_refill_pool(priv, ch); |
1979 | |
1980 | store_cleaned = dpaa2_eth_consume_frames(ch, src: &fq); |
1981 | if (store_cleaned <= 0) |
1982 | break; |
1983 | if (fq->type == DPAA2_RX_FQ) { |
1984 | rx_cleaned += store_cleaned; |
1985 | flowid = fq->flowid; |
1986 | } else { |
1987 | txconf_cleaned += store_cleaned; |
1988 | /* We have a single Tx conf FQ on this channel */ |
1989 | txc_fq = fq; |
1990 | } |
1991 | |
1992 | /* If we either consumed the whole NAPI budget with Rx frames |
1993 | * or we reached the Tx confirmations threshold, we're done. |
1994 | */ |
1995 | if (rx_cleaned >= budget || |
1996 | txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { |
1997 | work_done = budget; |
1998 | if (ch->xdp.res & XDP_REDIRECT) |
1999 | xdp_do_flush(); |
2000 | goto out; |
2001 | } |
2002 | } while (store_cleaned); |
2003 | |
2004 | if (ch->xdp.res & XDP_REDIRECT) |
2005 | xdp_do_flush(); |
2006 | |
2007 | /* Update NET DIM with the values for this CDAN */ |
2008 | dpaa2_io_update_net_dim(d: ch->dpio, frames: ch->stats.frames_per_cdan, |
2009 | bytes: ch->stats.bytes_per_cdan); |
2010 | ch->stats.frames_per_cdan = 0; |
2011 | ch->stats.bytes_per_cdan = 0; |
2012 | |
2013 | /* We didn't consume the entire budget, so finish napi and |
2014 | * re-enable data availability notifications |
2015 | */ |
2016 | napi_complete_done(n: napi, work_done: rx_cleaned); |
2017 | do { |
2018 | err = dpaa2_io_service_rearm(service: ch->dpio, ctx: &ch->nctx); |
2019 | cpu_relax(); |
2020 | } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); |
2021 | WARN_ONCE(err, "CDAN notifications rearm failed on core %d" , |
2022 | ch->nctx.desired_cpu); |
2023 | |
2024 | work_done = max(rx_cleaned, 1); |
2025 | |
2026 | out: |
2027 | netif_receive_skb_list(head: ch->rx_list); |
2028 | |
2029 | if (ch->xsk_tx_pkts_sent) { |
2030 | xsk_tx_completed(pool: ch->xsk_pool, nb_entries: ch->xsk_tx_pkts_sent); |
2031 | ch->xsk_tx_pkts_sent = 0; |
2032 | } |
2033 | |
2034 | if (txc_fq && txc_fq->dq_frames) { |
2035 | nq = netdev_get_tx_queue(dev: priv->net_dev, index: txc_fq->flowid); |
2036 | netdev_tx_completed_queue(dev_queue: nq, pkts: txc_fq->dq_frames, |
2037 | bytes: txc_fq->dq_bytes); |
2038 | txc_fq->dq_frames = 0; |
2039 | txc_fq->dq_bytes = 0; |
2040 | } |
2041 | |
2042 | if (rx_cleaned && ch->xdp.res & XDP_TX) |
2043 | dpaa2_eth_xdp_tx_flush(priv, ch, fq: &priv->fq[flowid]); |
2044 | |
2045 | return work_done; |
2046 | } |
2047 | |
2048 | static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv) |
2049 | { |
2050 | struct dpaa2_eth_channel *ch; |
2051 | int i; |
2052 | |
2053 | for (i = 0; i < priv->num_channels; i++) { |
2054 | ch = priv->channel[i]; |
2055 | napi_enable(n: &ch->napi); |
2056 | } |
2057 | } |
2058 | |
2059 | static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv) |
2060 | { |
2061 | struct dpaa2_eth_channel *ch; |
2062 | int i; |
2063 | |
2064 | for (i = 0; i < priv->num_channels; i++) { |
2065 | ch = priv->channel[i]; |
2066 | napi_disable(n: &ch->napi); |
2067 | } |
2068 | } |
2069 | |
2070 | void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, |
2071 | bool tx_pause, bool pfc) |
2072 | { |
2073 | struct dpni_taildrop td = {0}; |
2074 | struct dpaa2_eth_fq *fq; |
2075 | int i, err; |
2076 | |
2077 | /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if |
2078 | * flow control is disabled (as it might interfere with either the |
2079 | * buffer pool depletion trigger for pause frames or with the group |
2080 | * congestion trigger for PFC frames) |
2081 | */ |
2082 | td.enable = !tx_pause; |
2083 | if (priv->rx_fqtd_enabled == td.enable) |
2084 | goto set_cgtd; |
2085 | |
2086 | td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH; |
2087 | td.units = DPNI_CONGESTION_UNIT_BYTES; |
2088 | |
2089 | for (i = 0; i < priv->num_fqs; i++) { |
2090 | fq = &priv->fq[i]; |
2091 | if (fq->type != DPAA2_RX_FQ) |
2092 | continue; |
2093 | err = dpni_set_taildrop(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2094 | cg_point: DPNI_CP_QUEUE, q_type: DPNI_QUEUE_RX, |
2095 | tc: fq->tc, q_index: fq->flowid, taildrop: &td); |
2096 | if (err) { |
2097 | netdev_err(dev: priv->net_dev, |
2098 | format: "dpni_set_taildrop(FQ) failed\n" ); |
2099 | return; |
2100 | } |
2101 | } |
2102 | |
2103 | priv->rx_fqtd_enabled = td.enable; |
2104 | |
2105 | set_cgtd: |
2106 | /* Congestion group taildrop: threshold is in frames, per group |
2107 | * of FQs belonging to the same traffic class |
2108 | * Enabled if general Tx pause disabled or if PFCs are enabled |
2109 | * (congestion group threhsold for PFC generation is lower than the |
2110 | * CG taildrop threshold, so it won't interfere with it; we also |
2111 | * want frames in non-PFC enabled traffic classes to be kept in check) |
2112 | */ |
2113 | td.enable = !tx_pause || pfc; |
2114 | if (priv->rx_cgtd_enabled == td.enable) |
2115 | return; |
2116 | |
2117 | td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv); |
2118 | td.units = DPNI_CONGESTION_UNIT_FRAMES; |
2119 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
2120 | err = dpni_set_taildrop(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2121 | cg_point: DPNI_CP_GROUP, q_type: DPNI_QUEUE_RX, |
2122 | tc: i, q_index: 0, taildrop: &td); |
2123 | if (err) { |
2124 | netdev_err(dev: priv->net_dev, |
2125 | format: "dpni_set_taildrop(CG) failed\n" ); |
2126 | return; |
2127 | } |
2128 | } |
2129 | |
2130 | priv->rx_cgtd_enabled = td.enable; |
2131 | } |
2132 | |
2133 | static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) |
2134 | { |
2135 | struct dpni_link_state state = {0}; |
2136 | bool tx_pause; |
2137 | int err; |
2138 | |
2139 | err = dpni_get_link_state(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, state: &state); |
2140 | if (unlikely(err)) { |
2141 | netdev_err(dev: priv->net_dev, |
2142 | format: "dpni_get_link_state() failed\n" ); |
2143 | return err; |
2144 | } |
2145 | |
2146 | /* If Tx pause frame settings have changed, we need to update |
2147 | * Rx FQ taildrop configuration as well. We configure taildrop |
2148 | * only when pause frame generation is disabled. |
2149 | */ |
2150 | tx_pause = dpaa2_eth_tx_pause_enabled(link_options: state.options); |
2151 | dpaa2_eth_set_rx_taildrop(priv, tx_pause, pfc: priv->pfc_enabled); |
2152 | |
2153 | /* When we manage the MAC/PHY using phylink there is no need |
2154 | * to manually update the netif_carrier. |
2155 | * We can avoid locking because we are called from the "link changed" |
2156 | * IRQ handler, which is the same as the "endpoint changed" IRQ handler |
2157 | * (the writer to priv->mac), so we cannot race with it. |
2158 | */ |
2159 | if (dpaa2_mac_is_type_phy(mac: priv->mac)) |
2160 | goto out; |
2161 | |
2162 | /* Chech link state; speed / duplex changes are not treated yet */ |
2163 | if (priv->link_state.up == state.up) |
2164 | goto out; |
2165 | |
2166 | if (state.up) { |
2167 | netif_carrier_on(dev: priv->net_dev); |
2168 | netif_tx_start_all_queues(dev: priv->net_dev); |
2169 | } else { |
2170 | netif_tx_stop_all_queues(dev: priv->net_dev); |
2171 | netif_carrier_off(dev: priv->net_dev); |
2172 | } |
2173 | |
2174 | netdev_info(dev: priv->net_dev, format: "Link Event: state %s\n" , |
2175 | state.up ? "up" : "down" ); |
2176 | |
2177 | out: |
2178 | priv->link_state = state; |
2179 | |
2180 | return 0; |
2181 | } |
2182 | |
2183 | static int dpaa2_eth_open(struct net_device *net_dev) |
2184 | { |
2185 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2186 | int err; |
2187 | |
2188 | dpaa2_eth_seed_pools(priv); |
2189 | |
2190 | mutex_lock(&priv->mac_lock); |
2191 | |
2192 | if (!dpaa2_eth_is_type_phy(priv)) { |
2193 | /* We'll only start the txqs when the link is actually ready; |
2194 | * make sure we don't race against the link up notification, |
2195 | * which may come immediately after dpni_enable(); |
2196 | */ |
2197 | netif_tx_stop_all_queues(dev: net_dev); |
2198 | |
2199 | /* Also, explicitly set carrier off, otherwise |
2200 | * netif_carrier_ok() will return true and cause 'ip link show' |
2201 | * to report the LOWER_UP flag, even though the link |
2202 | * notification wasn't even received. |
2203 | */ |
2204 | netif_carrier_off(dev: net_dev); |
2205 | } |
2206 | dpaa2_eth_enable_ch_napi(priv); |
2207 | |
2208 | err = dpni_enable(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token); |
2209 | if (err < 0) { |
2210 | mutex_unlock(lock: &priv->mac_lock); |
2211 | netdev_err(dev: net_dev, format: "dpni_enable() failed\n" ); |
2212 | goto enable_err; |
2213 | } |
2214 | |
2215 | if (dpaa2_eth_is_type_phy(priv)) |
2216 | dpaa2_mac_start(mac: priv->mac); |
2217 | |
2218 | mutex_unlock(lock: &priv->mac_lock); |
2219 | |
2220 | return 0; |
2221 | |
2222 | enable_err: |
2223 | dpaa2_eth_disable_ch_napi(priv); |
2224 | dpaa2_eth_drain_pools(priv); |
2225 | return err; |
2226 | } |
2227 | |
2228 | /* Total number of in-flight frames on ingress queues */ |
2229 | static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv) |
2230 | { |
2231 | struct dpaa2_eth_fq *fq; |
2232 | u32 fcnt = 0, bcnt = 0, total = 0; |
2233 | int i, err; |
2234 | |
2235 | for (i = 0; i < priv->num_fqs; i++) { |
2236 | fq = &priv->fq[i]; |
2237 | err = dpaa2_io_query_fq_count(NULL, fqid: fq->fqid, fcnt: &fcnt, bcnt: &bcnt); |
2238 | if (err) { |
2239 | netdev_warn(dev: priv->net_dev, format: "query_fq_count failed" ); |
2240 | break; |
2241 | } |
2242 | total += fcnt; |
2243 | } |
2244 | |
2245 | return total; |
2246 | } |
2247 | |
2248 | static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) |
2249 | { |
2250 | int retries = 10; |
2251 | u32 pending; |
2252 | |
2253 | do { |
2254 | pending = dpaa2_eth_ingress_fq_count(priv); |
2255 | if (pending) |
2256 | msleep(msecs: 100); |
2257 | } while (pending && --retries); |
2258 | } |
2259 | |
2260 | #define DPNI_TX_PENDING_VER_MAJOR 7 |
2261 | #define DPNI_TX_PENDING_VER_MINOR 13 |
2262 | static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) |
2263 | { |
2264 | union dpni_statistics stats; |
2265 | int retries = 10; |
2266 | int err; |
2267 | |
2268 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, |
2269 | DPNI_TX_PENDING_VER_MINOR) < 0) |
2270 | goto out; |
2271 | |
2272 | do { |
2273 | err = dpni_get_statistics(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, page: 6, |
2274 | stat: &stats); |
2275 | if (err) |
2276 | goto out; |
2277 | if (stats.page_6.tx_pending_frames == 0) |
2278 | return; |
2279 | } while (--retries); |
2280 | |
2281 | out: |
2282 | msleep(msecs: 500); |
2283 | } |
2284 | |
2285 | static int dpaa2_eth_stop(struct net_device *net_dev) |
2286 | { |
2287 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2288 | int dpni_enabled = 0; |
2289 | int retries = 10; |
2290 | |
2291 | mutex_lock(&priv->mac_lock); |
2292 | |
2293 | if (dpaa2_eth_is_type_phy(priv)) { |
2294 | dpaa2_mac_stop(mac: priv->mac); |
2295 | } else { |
2296 | netif_tx_stop_all_queues(dev: net_dev); |
2297 | netif_carrier_off(dev: net_dev); |
2298 | } |
2299 | |
2300 | mutex_unlock(lock: &priv->mac_lock); |
2301 | |
2302 | /* On dpni_disable(), the MC firmware will: |
2303 | * - stop MAC Rx and wait for all Rx frames to be enqueued to software |
2304 | * - cut off WRIOP dequeues from egress FQs and wait until transmission |
2305 | * of all in flight Tx frames is finished (and corresponding Tx conf |
2306 | * frames are enqueued back to software) |
2307 | * |
2308 | * Before calling dpni_disable(), we wait for all Tx frames to arrive |
2309 | * on WRIOP. After it finishes, wait until all remaining frames on Rx |
2310 | * and Tx conf queues are consumed on NAPI poll. |
2311 | */ |
2312 | dpaa2_eth_wait_for_egress_fq_empty(priv); |
2313 | |
2314 | do { |
2315 | dpni_disable(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token); |
2316 | dpni_is_enabled(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, en: &dpni_enabled); |
2317 | if (dpni_enabled) |
2318 | /* Allow the hardware some slack */ |
2319 | msleep(msecs: 100); |
2320 | } while (dpni_enabled && --retries); |
2321 | if (!retries) { |
2322 | netdev_warn(dev: net_dev, format: "Retry count exceeded disabling DPNI\n" ); |
2323 | /* Must go on and disable NAPI nonetheless, so we don't crash at |
2324 | * the next "ifconfig up" |
2325 | */ |
2326 | } |
2327 | |
2328 | dpaa2_eth_wait_for_ingress_fq_empty(priv); |
2329 | dpaa2_eth_disable_ch_napi(priv); |
2330 | |
2331 | /* Empty the buffer pool */ |
2332 | dpaa2_eth_drain_pools(priv); |
2333 | |
2334 | /* Empty the Scatter-Gather Buffer cache */ |
2335 | dpaa2_eth_sgt_cache_drain(priv); |
2336 | |
2337 | return 0; |
2338 | } |
2339 | |
2340 | static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) |
2341 | { |
2342 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2343 | struct device *dev = net_dev->dev.parent; |
2344 | int err; |
2345 | |
2346 | err = eth_mac_addr(dev: net_dev, p: addr); |
2347 | if (err < 0) { |
2348 | dev_err(dev, "eth_mac_addr() failed (%d)\n" , err); |
2349 | return err; |
2350 | } |
2351 | |
2352 | err = dpni_set_primary_mac_addr(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2353 | mac_addr: net_dev->dev_addr); |
2354 | if (err) { |
2355 | dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n" , err); |
2356 | return err; |
2357 | } |
2358 | |
2359 | return 0; |
2360 | } |
2361 | |
2362 | /** Fill in counters maintained by the GPP driver. These may be different from |
2363 | * the hardware counters obtained by ethtool. |
2364 | */ |
2365 | static void dpaa2_eth_get_stats(struct net_device *net_dev, |
2366 | struct rtnl_link_stats64 *stats) |
2367 | { |
2368 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2369 | struct rtnl_link_stats64 *percpu_stats; |
2370 | u64 *cpustats; |
2371 | u64 *netstats = (u64 *)stats; |
2372 | int i, j; |
2373 | int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); |
2374 | |
2375 | for_each_possible_cpu(i) { |
2376 | percpu_stats = per_cpu_ptr(priv->percpu_stats, i); |
2377 | cpustats = (u64 *)percpu_stats; |
2378 | for (j = 0; j < num; j++) |
2379 | netstats[j] += cpustats[j]; |
2380 | } |
2381 | } |
2382 | |
2383 | /* Copy mac unicast addresses from @net_dev to @priv. |
2384 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. |
2385 | */ |
2386 | static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev, |
2387 | struct dpaa2_eth_priv *priv) |
2388 | { |
2389 | struct netdev_hw_addr *ha; |
2390 | int err; |
2391 | |
2392 | netdev_for_each_uc_addr(ha, net_dev) { |
2393 | err = dpni_add_mac_addr(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2394 | mac_addr: ha->addr); |
2395 | if (err) |
2396 | netdev_warn(dev: priv->net_dev, |
2397 | format: "Could not add ucast MAC %pM to the filtering table (err %d)\n" , |
2398 | ha->addr, err); |
2399 | } |
2400 | } |
2401 | |
2402 | /* Copy mac multicast addresses from @net_dev to @priv |
2403 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. |
2404 | */ |
2405 | static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev, |
2406 | struct dpaa2_eth_priv *priv) |
2407 | { |
2408 | struct netdev_hw_addr *ha; |
2409 | int err; |
2410 | |
2411 | netdev_for_each_mc_addr(ha, net_dev) { |
2412 | err = dpni_add_mac_addr(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2413 | mac_addr: ha->addr); |
2414 | if (err) |
2415 | netdev_warn(dev: priv->net_dev, |
2416 | format: "Could not add mcast MAC %pM to the filtering table (err %d)\n" , |
2417 | ha->addr, err); |
2418 | } |
2419 | } |
2420 | |
2421 | static int dpaa2_eth_rx_add_vid(struct net_device *net_dev, |
2422 | __be16 vlan_proto, u16 vid) |
2423 | { |
2424 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2425 | int err; |
2426 | |
2427 | err = dpni_add_vlan_id(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2428 | vlan_id: vid, flags: 0, tc_id: 0, flow_id: 0); |
2429 | |
2430 | if (err) { |
2431 | netdev_warn(dev: priv->net_dev, |
2432 | format: "Could not add the vlan id %u\n" , |
2433 | vid); |
2434 | return err; |
2435 | } |
2436 | |
2437 | return 0; |
2438 | } |
2439 | |
2440 | static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev, |
2441 | __be16 vlan_proto, u16 vid) |
2442 | { |
2443 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2444 | int err; |
2445 | |
2446 | err = dpni_remove_vlan_id(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, vlan_id: vid); |
2447 | |
2448 | if (err) { |
2449 | netdev_warn(dev: priv->net_dev, |
2450 | format: "Could not remove the vlan id %u\n" , |
2451 | vid); |
2452 | return err; |
2453 | } |
2454 | |
2455 | return 0; |
2456 | } |
2457 | |
2458 | static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) |
2459 | { |
2460 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2461 | int uc_count = netdev_uc_count(net_dev); |
2462 | int mc_count = netdev_mc_count(net_dev); |
2463 | u8 max_mac = priv->dpni_attrs.mac_filter_entries; |
2464 | u32 options = priv->dpni_attrs.options; |
2465 | u16 mc_token = priv->mc_token; |
2466 | struct fsl_mc_io *mc_io = priv->mc_io; |
2467 | int err; |
2468 | |
2469 | /* Basic sanity checks; these probably indicate a misconfiguration */ |
2470 | if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) |
2471 | netdev_info(dev: net_dev, |
2472 | format: "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n" , |
2473 | max_mac); |
2474 | |
2475 | /* Force promiscuous if the uc or mc counts exceed our capabilities. */ |
2476 | if (uc_count > max_mac) { |
2477 | netdev_info(dev: net_dev, |
2478 | format: "Unicast addr count reached %d, max allowed is %d; forcing promisc\n" , |
2479 | uc_count, max_mac); |
2480 | goto force_promisc; |
2481 | } |
2482 | if (mc_count + uc_count > max_mac) { |
2483 | netdev_info(dev: net_dev, |
2484 | format: "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n" , |
2485 | uc_count + mc_count, max_mac); |
2486 | goto force_mc_promisc; |
2487 | } |
2488 | |
2489 | /* Adjust promisc settings due to flag combinations */ |
2490 | if (net_dev->flags & IFF_PROMISC) |
2491 | goto force_promisc; |
2492 | if (net_dev->flags & IFF_ALLMULTI) { |
2493 | /* First, rebuild unicast filtering table. This should be done |
2494 | * in promisc mode, in order to avoid frame loss while we |
2495 | * progressively add entries to the table. |
2496 | * We don't know whether we had been in promisc already, and |
2497 | * making an MC call to find out is expensive; so set uc promisc |
2498 | * nonetheless. |
2499 | */ |
2500 | err = dpni_set_unicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 1); |
2501 | if (err) |
2502 | netdev_warn(dev: net_dev, format: "Can't set uc promisc\n" ); |
2503 | |
2504 | /* Actual uc table reconstruction. */ |
2505 | err = dpni_clear_mac_filters(mc_io, cmd_flags: 0, token: mc_token, unicast: 1, multicast: 0); |
2506 | if (err) |
2507 | netdev_warn(dev: net_dev, format: "Can't clear uc filters\n" ); |
2508 | dpaa2_eth_add_uc_hw_addr(net_dev, priv); |
2509 | |
2510 | /* Finally, clear uc promisc and set mc promisc as requested. */ |
2511 | err = dpni_set_unicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 0); |
2512 | if (err) |
2513 | netdev_warn(dev: net_dev, format: "Can't clear uc promisc\n" ); |
2514 | goto force_mc_promisc; |
2515 | } |
2516 | |
2517 | /* Neither unicast, nor multicast promisc will be on... eventually. |
2518 | * For now, rebuild mac filtering tables while forcing both of them on. |
2519 | */ |
2520 | err = dpni_set_unicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 1); |
2521 | if (err) |
2522 | netdev_warn(dev: net_dev, format: "Can't set uc promisc (%d)\n" , err); |
2523 | err = dpni_set_multicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 1); |
2524 | if (err) |
2525 | netdev_warn(dev: net_dev, format: "Can't set mc promisc (%d)\n" , err); |
2526 | |
2527 | /* Actual mac filtering tables reconstruction */ |
2528 | err = dpni_clear_mac_filters(mc_io, cmd_flags: 0, token: mc_token, unicast: 1, multicast: 1); |
2529 | if (err) |
2530 | netdev_warn(dev: net_dev, format: "Can't clear mac filters\n" ); |
2531 | dpaa2_eth_add_mc_hw_addr(net_dev, priv); |
2532 | dpaa2_eth_add_uc_hw_addr(net_dev, priv); |
2533 | |
2534 | /* Now we can clear both ucast and mcast promisc, without risking |
2535 | * to drop legitimate frames anymore. |
2536 | */ |
2537 | err = dpni_set_unicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 0); |
2538 | if (err) |
2539 | netdev_warn(dev: net_dev, format: "Can't clear ucast promisc\n" ); |
2540 | err = dpni_set_multicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 0); |
2541 | if (err) |
2542 | netdev_warn(dev: net_dev, format: "Can't clear mcast promisc\n" ); |
2543 | |
2544 | return; |
2545 | |
2546 | force_promisc: |
2547 | err = dpni_set_unicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 1); |
2548 | if (err) |
2549 | netdev_warn(dev: net_dev, format: "Can't set ucast promisc\n" ); |
2550 | force_mc_promisc: |
2551 | err = dpni_set_multicast_promisc(mc_io, cmd_flags: 0, token: mc_token, en: 1); |
2552 | if (err) |
2553 | netdev_warn(dev: net_dev, format: "Can't set mcast promisc\n" ); |
2554 | } |
2555 | |
2556 | static int dpaa2_eth_set_features(struct net_device *net_dev, |
2557 | netdev_features_t features) |
2558 | { |
2559 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2560 | netdev_features_t changed = features ^ net_dev->features; |
2561 | bool enable; |
2562 | int err; |
2563 | |
2564 | if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { |
2565 | enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); |
2566 | err = dpaa2_eth_set_rx_vlan_filtering(priv, enable); |
2567 | if (err) |
2568 | return err; |
2569 | } |
2570 | |
2571 | if (changed & NETIF_F_RXCSUM) { |
2572 | enable = !!(features & NETIF_F_RXCSUM); |
2573 | err = dpaa2_eth_set_rx_csum(priv, enable); |
2574 | if (err) |
2575 | return err; |
2576 | } |
2577 | |
2578 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { |
2579 | enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); |
2580 | err = dpaa2_eth_set_tx_csum(priv, enable); |
2581 | if (err) |
2582 | return err; |
2583 | } |
2584 | |
2585 | return 0; |
2586 | } |
2587 | |
2588 | static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2589 | { |
2590 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
2591 | struct hwtstamp_config config; |
2592 | |
2593 | if (!dpaa2_ptp) |
2594 | return -EINVAL; |
2595 | |
2596 | if (copy_from_user(to: &config, from: rq->ifr_data, n: sizeof(config))) |
2597 | return -EFAULT; |
2598 | |
2599 | switch (config.tx_type) { |
2600 | case HWTSTAMP_TX_OFF: |
2601 | case HWTSTAMP_TX_ON: |
2602 | case HWTSTAMP_TX_ONESTEP_SYNC: |
2603 | priv->tx_tstamp_type = config.tx_type; |
2604 | break; |
2605 | default: |
2606 | return -ERANGE; |
2607 | } |
2608 | |
2609 | if (config.rx_filter == HWTSTAMP_FILTER_NONE) { |
2610 | priv->rx_tstamp = false; |
2611 | } else { |
2612 | priv->rx_tstamp = true; |
2613 | /* TS is set for all frame types, not only those requested */ |
2614 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
2615 | } |
2616 | |
2617 | if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) |
2618 | dpaa2_ptp_onestep_reg_update_method(priv); |
2619 | |
2620 | return copy_to_user(to: rq->ifr_data, from: &config, n: sizeof(config)) ? |
2621 | -EFAULT : 0; |
2622 | } |
2623 | |
2624 | static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2625 | { |
2626 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
2627 | int err; |
2628 | |
2629 | if (cmd == SIOCSHWTSTAMP) |
2630 | return dpaa2_eth_ts_ioctl(dev, rq, cmd); |
2631 | |
2632 | mutex_lock(&priv->mac_lock); |
2633 | |
2634 | if (dpaa2_eth_is_type_phy(priv)) { |
2635 | err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd); |
2636 | mutex_unlock(lock: &priv->mac_lock); |
2637 | return err; |
2638 | } |
2639 | |
2640 | mutex_unlock(lock: &priv->mac_lock); |
2641 | |
2642 | return -EOPNOTSUPP; |
2643 | } |
2644 | |
2645 | static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) |
2646 | { |
2647 | int mfl, linear_mfl; |
2648 | |
2649 | mfl = DPAA2_ETH_L2_MAX_FRM(mtu); |
2650 | linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - |
2651 | dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; |
2652 | |
2653 | if (mfl > linear_mfl) { |
2654 | netdev_warn(dev: priv->net_dev, format: "Maximum MTU for XDP is %d\n" , |
2655 | linear_mfl - VLAN_ETH_HLEN); |
2656 | return false; |
2657 | } |
2658 | |
2659 | return true; |
2660 | } |
2661 | |
2662 | static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) |
2663 | { |
2664 | int mfl, err; |
2665 | |
2666 | /* We enforce a maximum Rx frame length based on MTU only if we have |
2667 | * an XDP program attached (in order to avoid Rx S/G frames). |
2668 | * Otherwise, we accept all incoming frames as long as they are not |
2669 | * larger than maximum size supported in hardware |
2670 | */ |
2671 | if (has_xdp) |
2672 | mfl = DPAA2_ETH_L2_MAX_FRM(mtu); |
2673 | else |
2674 | mfl = DPAA2_ETH_MFL; |
2675 | |
2676 | err = dpni_set_max_frame_length(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, max_frame_length: mfl); |
2677 | if (err) { |
2678 | netdev_err(dev: priv->net_dev, format: "dpni_set_max_frame_length failed\n" ); |
2679 | return err; |
2680 | } |
2681 | |
2682 | return 0; |
2683 | } |
2684 | |
2685 | static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) |
2686 | { |
2687 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
2688 | int err; |
2689 | |
2690 | if (!priv->xdp_prog) |
2691 | goto out; |
2692 | |
2693 | if (!xdp_mtu_valid(priv, mtu: new_mtu)) |
2694 | return -EINVAL; |
2695 | |
2696 | err = dpaa2_eth_set_rx_mfl(priv, mtu: new_mtu, has_xdp: true); |
2697 | if (err) |
2698 | return err; |
2699 | |
2700 | out: |
2701 | dev->mtu = new_mtu; |
2702 | return 0; |
2703 | } |
2704 | |
2705 | static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) |
2706 | { |
2707 | struct dpni_buffer_layout buf_layout = {0}; |
2708 | int err; |
2709 | |
2710 | err = dpni_get_buffer_layout(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2711 | qtype: DPNI_QUEUE_RX, layout: &buf_layout); |
2712 | if (err) { |
2713 | netdev_err(dev: priv->net_dev, format: "dpni_get_buffer_layout failed\n" ); |
2714 | return err; |
2715 | } |
2716 | |
2717 | /* Reserve extra headroom for XDP header size changes */ |
2718 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + |
2719 | (has_xdp ? XDP_PACKET_HEADROOM : 0); |
2720 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; |
2721 | err = dpni_set_buffer_layout(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
2722 | qtype: DPNI_QUEUE_RX, layout: &buf_layout); |
2723 | if (err) { |
2724 | netdev_err(dev: priv->net_dev, format: "dpni_set_buffer_layout failed\n" ); |
2725 | return err; |
2726 | } |
2727 | |
2728 | return 0; |
2729 | } |
2730 | |
2731 | static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) |
2732 | { |
2733 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
2734 | struct dpaa2_eth_channel *ch; |
2735 | struct bpf_prog *old; |
2736 | bool up, need_update; |
2737 | int i, err; |
2738 | |
2739 | if (prog && !xdp_mtu_valid(priv, mtu: dev->mtu)) |
2740 | return -EINVAL; |
2741 | |
2742 | if (prog) |
2743 | bpf_prog_add(prog, i: priv->num_channels); |
2744 | |
2745 | up = netif_running(dev); |
2746 | need_update = (!!priv->xdp_prog != !!prog); |
2747 | |
2748 | if (up) |
2749 | dev_close(dev); |
2750 | |
2751 | /* While in xdp mode, enforce a maximum Rx frame size based on MTU. |
2752 | * Also, when switching between xdp/non-xdp modes we need to reconfigure |
2753 | * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, |
2754 | * so we are sure no old format buffers will be used from now on. |
2755 | */ |
2756 | if (need_update) { |
2757 | err = dpaa2_eth_set_rx_mfl(priv, mtu: dev->mtu, has_xdp: !!prog); |
2758 | if (err) |
2759 | goto out_err; |
2760 | err = dpaa2_eth_update_rx_buffer_headroom(priv, has_xdp: !!prog); |
2761 | if (err) |
2762 | goto out_err; |
2763 | } |
2764 | |
2765 | old = xchg(&priv->xdp_prog, prog); |
2766 | if (old) |
2767 | bpf_prog_put(prog: old); |
2768 | |
2769 | for (i = 0; i < priv->num_channels; i++) { |
2770 | ch = priv->channel[i]; |
2771 | old = xchg(&ch->xdp.prog, prog); |
2772 | if (old) |
2773 | bpf_prog_put(prog: old); |
2774 | } |
2775 | |
2776 | if (up) { |
2777 | err = dev_open(dev, NULL); |
2778 | if (err) |
2779 | return err; |
2780 | } |
2781 | |
2782 | return 0; |
2783 | |
2784 | out_err: |
2785 | if (prog) |
2786 | bpf_prog_sub(prog, i: priv->num_channels); |
2787 | if (up) |
2788 | dev_open(dev, NULL); |
2789 | |
2790 | return err; |
2791 | } |
2792 | |
2793 | static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
2794 | { |
2795 | switch (xdp->command) { |
2796 | case XDP_SETUP_PROG: |
2797 | return dpaa2_eth_setup_xdp(dev, prog: xdp->prog); |
2798 | case XDP_SETUP_XSK_POOL: |
2799 | return dpaa2_xsk_setup_pool(dev, pool: xdp->xsk.pool, qid: xdp->xsk.queue_id); |
2800 | default: |
2801 | return -EINVAL; |
2802 | } |
2803 | |
2804 | return 0; |
2805 | } |
2806 | |
2807 | static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev, |
2808 | struct xdp_frame *xdpf, |
2809 | struct dpaa2_fd *fd) |
2810 | { |
2811 | struct device *dev = net_dev->dev.parent; |
2812 | unsigned int needed_headroom; |
2813 | struct dpaa2_eth_swa *swa; |
2814 | void *buffer_start, *aligned_start; |
2815 | dma_addr_t addr; |
2816 | |
2817 | /* We require a minimum headroom to be able to transmit the frame. |
2818 | * Otherwise return an error and let the original net_device handle it |
2819 | */ |
2820 | needed_headroom = dpaa2_eth_needed_headroom(NULL); |
2821 | if (xdpf->headroom < needed_headroom) |
2822 | return -EINVAL; |
2823 | |
2824 | /* Setup the FD fields */ |
2825 | memset(fd, 0, sizeof(*fd)); |
2826 | |
2827 | /* Align FD address, if possible */ |
2828 | buffer_start = xdpf->data - needed_headroom; |
2829 | aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, |
2830 | DPAA2_ETH_TX_BUF_ALIGN); |
2831 | if (aligned_start >= xdpf->data - xdpf->headroom) |
2832 | buffer_start = aligned_start; |
2833 | |
2834 | swa = (struct dpaa2_eth_swa *)buffer_start; |
2835 | /* fill in necessary fields here */ |
2836 | swa->type = DPAA2_ETH_SWA_XDP; |
2837 | swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; |
2838 | swa->xdp.xdpf = xdpf; |
2839 | |
2840 | addr = dma_map_single(dev, buffer_start, |
2841 | swa->xdp.dma_size, |
2842 | DMA_BIDIRECTIONAL); |
2843 | if (unlikely(dma_mapping_error(dev, addr))) |
2844 | return -ENOMEM; |
2845 | |
2846 | dpaa2_fd_set_addr(fd, addr); |
2847 | dpaa2_fd_set_offset(fd, offset: xdpf->data - buffer_start); |
2848 | dpaa2_fd_set_len(fd, len: xdpf->len); |
2849 | dpaa2_fd_set_format(fd, format: dpaa2_fd_single); |
2850 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
2851 | |
2852 | return 0; |
2853 | } |
2854 | |
2855 | static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, |
2856 | struct xdp_frame **frames, u32 flags) |
2857 | { |
2858 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2859 | struct dpaa2_eth_xdp_fds *xdp_redirect_fds; |
2860 | struct rtnl_link_stats64 *percpu_stats; |
2861 | struct dpaa2_eth_fq *fq; |
2862 | struct dpaa2_fd *fds; |
2863 | int enqueued, i, err; |
2864 | |
2865 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
2866 | return -EINVAL; |
2867 | |
2868 | if (!netif_running(dev: net_dev)) |
2869 | return -ENETDOWN; |
2870 | |
2871 | fq = &priv->fq[smp_processor_id()]; |
2872 | xdp_redirect_fds = &fq->xdp_redirect_fds; |
2873 | fds = xdp_redirect_fds->fds; |
2874 | |
2875 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
2876 | |
2877 | /* create a FD for each xdp_frame in the list received */ |
2878 | for (i = 0; i < n; i++) { |
2879 | err = dpaa2_eth_xdp_create_fd(net_dev, xdpf: frames[i], fd: &fds[i]); |
2880 | if (err) |
2881 | break; |
2882 | } |
2883 | xdp_redirect_fds->num = i; |
2884 | |
2885 | /* enqueue all the frame descriptors */ |
2886 | enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_fds: xdp_redirect_fds); |
2887 | |
2888 | /* update statistics */ |
2889 | percpu_stats->tx_packets += enqueued; |
2890 | for (i = 0; i < enqueued; i++) |
2891 | percpu_stats->tx_bytes += dpaa2_fd_get_len(fd: &fds[i]); |
2892 | |
2893 | return enqueued; |
2894 | } |
2895 | |
2896 | static int update_xps(struct dpaa2_eth_priv *priv) |
2897 | { |
2898 | struct net_device *net_dev = priv->net_dev; |
2899 | struct cpumask xps_mask; |
2900 | struct dpaa2_eth_fq *fq; |
2901 | int i, num_queues, netdev_queues; |
2902 | int err = 0; |
2903 | |
2904 | num_queues = dpaa2_eth_queue_count(priv); |
2905 | netdev_queues = (net_dev->num_tc ? : 1) * num_queues; |
2906 | |
2907 | /* The first <num_queues> entries in priv->fq array are Tx/Tx conf |
2908 | * queues, so only process those |
2909 | */ |
2910 | for (i = 0; i < netdev_queues; i++) { |
2911 | fq = &priv->fq[i % num_queues]; |
2912 | |
2913 | cpumask_clear(dstp: &xps_mask); |
2914 | cpumask_set_cpu(cpu: fq->target_cpu, dstp: &xps_mask); |
2915 | |
2916 | err = netif_set_xps_queue(dev: net_dev, mask: &xps_mask, index: i); |
2917 | if (err) { |
2918 | netdev_warn_once(net_dev, "Error setting XPS queue\n" ); |
2919 | break; |
2920 | } |
2921 | } |
2922 | |
2923 | return err; |
2924 | } |
2925 | |
2926 | static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, |
2927 | struct tc_mqprio_qopt *mqprio) |
2928 | { |
2929 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2930 | u8 num_tc, num_queues; |
2931 | int i; |
2932 | |
2933 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
2934 | num_queues = dpaa2_eth_queue_count(priv); |
2935 | num_tc = mqprio->num_tc; |
2936 | |
2937 | if (num_tc == net_dev->num_tc) |
2938 | return 0; |
2939 | |
2940 | if (num_tc > dpaa2_eth_tc_count(priv)) { |
2941 | netdev_err(dev: net_dev, format: "Max %d traffic classes supported\n" , |
2942 | dpaa2_eth_tc_count(priv)); |
2943 | return -EOPNOTSUPP; |
2944 | } |
2945 | |
2946 | if (!num_tc) { |
2947 | netdev_reset_tc(dev: net_dev); |
2948 | netif_set_real_num_tx_queues(dev: net_dev, txq: num_queues); |
2949 | goto out; |
2950 | } |
2951 | |
2952 | netdev_set_num_tc(dev: net_dev, num_tc); |
2953 | netif_set_real_num_tx_queues(dev: net_dev, txq: num_tc * num_queues); |
2954 | |
2955 | for (i = 0; i < num_tc; i++) |
2956 | netdev_set_tc_queue(dev: net_dev, tc: i, count: num_queues, offset: i * num_queues); |
2957 | |
2958 | out: |
2959 | update_xps(priv); |
2960 | |
2961 | return 0; |
2962 | } |
2963 | |
2964 | #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8) |
2965 | |
2966 | static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p) |
2967 | { |
2968 | struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params; |
2969 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
2970 | struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 }; |
2971 | struct dpni_tx_shaping_cfg tx_er_shaper = { 0 }; |
2972 | int err; |
2973 | |
2974 | if (p->command == TC_TBF_STATS) |
2975 | return -EOPNOTSUPP; |
2976 | |
2977 | /* Only per port Tx shaping */ |
2978 | if (p->parent != TC_H_ROOT) |
2979 | return -EOPNOTSUPP; |
2980 | |
2981 | if (p->command == TC_TBF_REPLACE) { |
2982 | if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) { |
2983 | netdev_err(dev: net_dev, format: "burst size cannot be greater than %d\n" , |
2984 | DPAA2_ETH_MAX_BURST_SIZE); |
2985 | return -EINVAL; |
2986 | } |
2987 | |
2988 | tx_cr_shaper.max_burst_size = cfg->max_size; |
2989 | /* The TBF interface is in bytes/s, whereas DPAA2 expects the |
2990 | * rate in Mbits/s |
2991 | */ |
2992 | tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps); |
2993 | } |
2994 | |
2995 | err = dpni_set_tx_shaping(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, tx_cr_shaper: &tx_cr_shaper, |
2996 | tx_er_shaper: &tx_er_shaper, coupled: 0); |
2997 | if (err) { |
2998 | netdev_err(dev: net_dev, format: "dpni_set_tx_shaping() = %d\n" , err); |
2999 | return err; |
3000 | } |
3001 | |
3002 | return 0; |
3003 | } |
3004 | |
3005 | static int dpaa2_eth_setup_tc(struct net_device *net_dev, |
3006 | enum tc_setup_type type, void *type_data) |
3007 | { |
3008 | switch (type) { |
3009 | case TC_SETUP_QDISC_MQPRIO: |
3010 | return dpaa2_eth_setup_mqprio(net_dev, mqprio: type_data); |
3011 | case TC_SETUP_QDISC_TBF: |
3012 | return dpaa2_eth_setup_tbf(net_dev, p: type_data); |
3013 | default: |
3014 | return -EOPNOTSUPP; |
3015 | } |
3016 | } |
3017 | |
3018 | static const struct net_device_ops dpaa2_eth_ops = { |
3019 | .ndo_open = dpaa2_eth_open, |
3020 | .ndo_start_xmit = dpaa2_eth_tx, |
3021 | .ndo_stop = dpaa2_eth_stop, |
3022 | .ndo_set_mac_address = dpaa2_eth_set_addr, |
3023 | .ndo_get_stats64 = dpaa2_eth_get_stats, |
3024 | .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, |
3025 | .ndo_set_features = dpaa2_eth_set_features, |
3026 | .ndo_eth_ioctl = dpaa2_eth_ioctl, |
3027 | .ndo_change_mtu = dpaa2_eth_change_mtu, |
3028 | .ndo_bpf = dpaa2_eth_xdp, |
3029 | .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, |
3030 | .ndo_xsk_wakeup = dpaa2_xsk_wakeup, |
3031 | .ndo_setup_tc = dpaa2_eth_setup_tc, |
3032 | .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, |
3033 | .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid |
3034 | }; |
3035 | |
3036 | static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) |
3037 | { |
3038 | struct dpaa2_eth_channel *ch; |
3039 | |
3040 | ch = container_of(ctx, struct dpaa2_eth_channel, nctx); |
3041 | |
3042 | /* Update NAPI statistics */ |
3043 | ch->stats.cdan++; |
3044 | |
3045 | /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed |
3046 | * so that it can be rescheduled again. |
3047 | */ |
3048 | if (!napi_if_scheduled_mark_missed(n: &ch->napi)) |
3049 | napi_schedule(n: &ch->napi); |
3050 | } |
3051 | |
3052 | /* Allocate and configure a DPCON object */ |
3053 | static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) |
3054 | { |
3055 | struct fsl_mc_device *dpcon; |
3056 | struct device *dev = priv->net_dev->dev.parent; |
3057 | int err; |
3058 | |
3059 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), |
3060 | pool_type: FSL_MC_POOL_DPCON, new_mc_adev: &dpcon); |
3061 | if (err) { |
3062 | if (err == -ENXIO) { |
3063 | dev_dbg(dev, "Waiting for DPCON\n" ); |
3064 | err = -EPROBE_DEFER; |
3065 | } else { |
3066 | dev_info(dev, "Not enough DPCONs, will go on as-is\n" ); |
3067 | } |
3068 | return ERR_PTR(error: err); |
3069 | } |
3070 | |
3071 | err = dpcon_open(mc_io: priv->mc_io, cmd_flags: 0, dpcon_id: dpcon->obj_desc.id, token: &dpcon->mc_handle); |
3072 | if (err) { |
3073 | dev_err(dev, "dpcon_open() failed\n" ); |
3074 | goto free; |
3075 | } |
3076 | |
3077 | err = dpcon_reset(mc_io: priv->mc_io, cmd_flags: 0, token: dpcon->mc_handle); |
3078 | if (err) { |
3079 | dev_err(dev, "dpcon_reset() failed\n" ); |
3080 | goto close; |
3081 | } |
3082 | |
3083 | err = dpcon_enable(mc_io: priv->mc_io, cmd_flags: 0, token: dpcon->mc_handle); |
3084 | if (err) { |
3085 | dev_err(dev, "dpcon_enable() failed\n" ); |
3086 | goto close; |
3087 | } |
3088 | |
3089 | return dpcon; |
3090 | |
3091 | close: |
3092 | dpcon_close(mc_io: priv->mc_io, cmd_flags: 0, token: dpcon->mc_handle); |
3093 | free: |
3094 | fsl_mc_object_free(mc_adev: dpcon); |
3095 | |
3096 | return ERR_PTR(error: err); |
3097 | } |
3098 | |
3099 | static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv, |
3100 | struct fsl_mc_device *dpcon) |
3101 | { |
3102 | dpcon_disable(mc_io: priv->mc_io, cmd_flags: 0, token: dpcon->mc_handle); |
3103 | dpcon_close(mc_io: priv->mc_io, cmd_flags: 0, token: dpcon->mc_handle); |
3104 | fsl_mc_object_free(mc_adev: dpcon); |
3105 | } |
3106 | |
3107 | static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv) |
3108 | { |
3109 | struct dpaa2_eth_channel *channel; |
3110 | struct dpcon_attr attr; |
3111 | struct device *dev = priv->net_dev->dev.parent; |
3112 | int err; |
3113 | |
3114 | channel = kzalloc(size: sizeof(*channel), GFP_KERNEL); |
3115 | if (!channel) |
3116 | return NULL; |
3117 | |
3118 | channel->dpcon = dpaa2_eth_setup_dpcon(priv); |
3119 | if (IS_ERR(ptr: channel->dpcon)) { |
3120 | err = PTR_ERR(ptr: channel->dpcon); |
3121 | goto err_setup; |
3122 | } |
3123 | |
3124 | err = dpcon_get_attributes(mc_io: priv->mc_io, cmd_flags: 0, token: channel->dpcon->mc_handle, |
3125 | attr: &attr); |
3126 | if (err) { |
3127 | dev_err(dev, "dpcon_get_attributes() failed\n" ); |
3128 | goto err_get_attr; |
3129 | } |
3130 | |
3131 | channel->dpcon_id = attr.id; |
3132 | channel->ch_id = attr.qbman_ch_id; |
3133 | channel->priv = priv; |
3134 | |
3135 | return channel; |
3136 | |
3137 | err_get_attr: |
3138 | dpaa2_eth_free_dpcon(priv, dpcon: channel->dpcon); |
3139 | err_setup: |
3140 | kfree(objp: channel); |
3141 | return ERR_PTR(error: err); |
3142 | } |
3143 | |
3144 | static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv, |
3145 | struct dpaa2_eth_channel *channel) |
3146 | { |
3147 | dpaa2_eth_free_dpcon(priv, dpcon: channel->dpcon); |
3148 | kfree(objp: channel); |
3149 | } |
3150 | |
3151 | /* DPIO setup: allocate and configure QBMan channels, setup core affinity |
3152 | * and register data availability notifications |
3153 | */ |
3154 | static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) |
3155 | { |
3156 | struct dpaa2_io_notification_ctx *nctx; |
3157 | struct dpaa2_eth_channel *channel; |
3158 | struct dpcon_notification_cfg dpcon_notif_cfg; |
3159 | struct device *dev = priv->net_dev->dev.parent; |
3160 | int i, err; |
3161 | |
3162 | /* We want the ability to spread ingress traffic (RX, TX conf) to as |
3163 | * many cores as possible, so we need one channel for each core |
3164 | * (unless there's fewer queues than cores, in which case the extra |
3165 | * channels would be wasted). |
3166 | * Allocate one channel per core and register it to the core's |
3167 | * affine DPIO. If not enough channels are available for all cores |
3168 | * or if some cores don't have an affine DPIO, there will be no |
3169 | * ingress frame processing on those cores. |
3170 | */ |
3171 | cpumask_clear(dstp: &priv->dpio_cpumask); |
3172 | for_each_online_cpu(i) { |
3173 | /* Try to allocate a channel */ |
3174 | channel = dpaa2_eth_alloc_channel(priv); |
3175 | if (IS_ERR_OR_NULL(ptr: channel)) { |
3176 | err = PTR_ERR_OR_ZERO(ptr: channel); |
3177 | if (err == -EPROBE_DEFER) |
3178 | dev_dbg(dev, "waiting for affine channel\n" ); |
3179 | else |
3180 | dev_info(dev, |
3181 | "No affine channel for cpu %d and above\n" , i); |
3182 | goto err_alloc_ch; |
3183 | } |
3184 | |
3185 | priv->channel[priv->num_channels] = channel; |
3186 | |
3187 | nctx = &channel->nctx; |
3188 | nctx->is_cdan = 1; |
3189 | nctx->cb = dpaa2_eth_cdan_cb; |
3190 | nctx->id = channel->ch_id; |
3191 | nctx->desired_cpu = i; |
3192 | |
3193 | /* Register the new context */ |
3194 | channel->dpio = dpaa2_io_service_select(cpu: i); |
3195 | err = dpaa2_io_service_register(service: channel->dpio, ctx: nctx, dev); |
3196 | if (err) { |
3197 | dev_dbg(dev, "No affine DPIO for cpu %d\n" , i); |
3198 | /* If no affine DPIO for this core, there's probably |
3199 | * none available for next cores either. Signal we want |
3200 | * to retry later, in case the DPIO devices weren't |
3201 | * probed yet. |
3202 | */ |
3203 | err = -EPROBE_DEFER; |
3204 | goto err_service_reg; |
3205 | } |
3206 | |
3207 | /* Register DPCON notification with MC */ |
3208 | dpcon_notif_cfg.dpio_id = nctx->dpio_id; |
3209 | dpcon_notif_cfg.priority = 0; |
3210 | dpcon_notif_cfg.user_ctx = nctx->qman64; |
3211 | err = dpcon_set_notification(mc_io: priv->mc_io, cmd_flags: 0, |
3212 | token: channel->dpcon->mc_handle, |
3213 | cfg: &dpcon_notif_cfg); |
3214 | if (err) { |
3215 | dev_err(dev, "dpcon_set_notification failed()\n" ); |
3216 | goto err_set_cdan; |
3217 | } |
3218 | |
3219 | /* If we managed to allocate a channel and also found an affine |
3220 | * DPIO for this core, add it to the final mask |
3221 | */ |
3222 | cpumask_set_cpu(cpu: i, dstp: &priv->dpio_cpumask); |
3223 | priv->num_channels++; |
3224 | |
3225 | /* Stop if we already have enough channels to accommodate all |
3226 | * RX and TX conf queues |
3227 | */ |
3228 | if (priv->num_channels == priv->dpni_attrs.num_queues) |
3229 | break; |
3230 | } |
3231 | |
3232 | return 0; |
3233 | |
3234 | err_set_cdan: |
3235 | dpaa2_io_service_deregister(service: channel->dpio, ctx: nctx, dev); |
3236 | err_service_reg: |
3237 | dpaa2_eth_free_channel(priv, channel); |
3238 | err_alloc_ch: |
3239 | if (err == -EPROBE_DEFER) { |
3240 | for (i = 0; i < priv->num_channels; i++) { |
3241 | channel = priv->channel[i]; |
3242 | nctx = &channel->nctx; |
3243 | dpaa2_io_service_deregister(service: channel->dpio, ctx: nctx, dev); |
3244 | dpaa2_eth_free_channel(priv, channel); |
3245 | } |
3246 | priv->num_channels = 0; |
3247 | return err; |
3248 | } |
3249 | |
3250 | if (cpumask_empty(srcp: &priv->dpio_cpumask)) { |
3251 | dev_err(dev, "No cpu with an affine DPIO/DPCON\n" ); |
3252 | return -ENODEV; |
3253 | } |
3254 | |
3255 | dev_info(dev, "Cores %*pbl available for processing ingress traffic\n" , |
3256 | cpumask_pr_args(&priv->dpio_cpumask)); |
3257 | |
3258 | return 0; |
3259 | } |
3260 | |
3261 | static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv) |
3262 | { |
3263 | struct device *dev = priv->net_dev->dev.parent; |
3264 | struct dpaa2_eth_channel *ch; |
3265 | int i; |
3266 | |
3267 | /* deregister CDAN notifications and free channels */ |
3268 | for (i = 0; i < priv->num_channels; i++) { |
3269 | ch = priv->channel[i]; |
3270 | dpaa2_io_service_deregister(service: ch->dpio, ctx: &ch->nctx, dev); |
3271 | dpaa2_eth_free_channel(priv, channel: ch); |
3272 | } |
3273 | } |
3274 | |
3275 | static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv, |
3276 | int cpu) |
3277 | { |
3278 | struct device *dev = priv->net_dev->dev.parent; |
3279 | int i; |
3280 | |
3281 | for (i = 0; i < priv->num_channels; i++) |
3282 | if (priv->channel[i]->nctx.desired_cpu == cpu) |
3283 | return priv->channel[i]; |
3284 | |
3285 | /* We should never get here. Issue a warning and return |
3286 | * the first channel, because it's still better than nothing |
3287 | */ |
3288 | dev_warn(dev, "No affine channel found for cpu %d\n" , cpu); |
3289 | |
3290 | return priv->channel[0]; |
3291 | } |
3292 | |
3293 | static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv) |
3294 | { |
3295 | struct device *dev = priv->net_dev->dev.parent; |
3296 | struct dpaa2_eth_fq *fq; |
3297 | int rx_cpu, txc_cpu; |
3298 | int i; |
3299 | |
3300 | /* For each FQ, pick one channel/CPU to deliver frames to. |
3301 | * This may well change at runtime, either through irqbalance or |
3302 | * through direct user intervention. |
3303 | */ |
3304 | rx_cpu = txc_cpu = cpumask_first(srcp: &priv->dpio_cpumask); |
3305 | |
3306 | for (i = 0; i < priv->num_fqs; i++) { |
3307 | fq = &priv->fq[i]; |
3308 | switch (fq->type) { |
3309 | case DPAA2_RX_FQ: |
3310 | case DPAA2_RX_ERR_FQ: |
3311 | fq->target_cpu = rx_cpu; |
3312 | rx_cpu = cpumask_next(n: rx_cpu, srcp: &priv->dpio_cpumask); |
3313 | if (rx_cpu >= nr_cpu_ids) |
3314 | rx_cpu = cpumask_first(srcp: &priv->dpio_cpumask); |
3315 | break; |
3316 | case DPAA2_TX_CONF_FQ: |
3317 | fq->target_cpu = txc_cpu; |
3318 | txc_cpu = cpumask_next(n: txc_cpu, srcp: &priv->dpio_cpumask); |
3319 | if (txc_cpu >= nr_cpu_ids) |
3320 | txc_cpu = cpumask_first(srcp: &priv->dpio_cpumask); |
3321 | break; |
3322 | default: |
3323 | dev_err(dev, "Unknown FQ type: %d\n" , fq->type); |
3324 | } |
3325 | fq->channel = dpaa2_eth_get_affine_channel(priv, cpu: fq->target_cpu); |
3326 | } |
3327 | |
3328 | update_xps(priv); |
3329 | } |
3330 | |
3331 | static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) |
3332 | { |
3333 | int i, j; |
3334 | |
3335 | /* We have one TxConf FQ per Tx flow. |
3336 | * The number of Tx and Rx queues is the same. |
3337 | * Tx queues come first in the fq array. |
3338 | */ |
3339 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { |
3340 | priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; |
3341 | priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; |
3342 | priv->fq[priv->num_fqs++].flowid = (u16)i; |
3343 | } |
3344 | |
3345 | for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { |
3346 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { |
3347 | priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; |
3348 | priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; |
3349 | priv->fq[priv->num_fqs].tc = (u8)j; |
3350 | priv->fq[priv->num_fqs++].flowid = (u16)i; |
3351 | } |
3352 | } |
3353 | |
3354 | /* We have exactly one Rx error queue per DPNI */ |
3355 | priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; |
3356 | priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; |
3357 | |
3358 | /* For each FQ, decide on which core to process incoming frames */ |
3359 | dpaa2_eth_set_fq_affinity(priv); |
3360 | } |
3361 | |
3362 | /* Allocate and configure a buffer pool */ |
3363 | struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv) |
3364 | { |
3365 | struct device *dev = priv->net_dev->dev.parent; |
3366 | struct fsl_mc_device *dpbp_dev; |
3367 | struct dpbp_attr dpbp_attrs; |
3368 | struct dpaa2_eth_bp *bp; |
3369 | int err; |
3370 | |
3371 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), pool_type: FSL_MC_POOL_DPBP, |
3372 | new_mc_adev: &dpbp_dev); |
3373 | if (err) { |
3374 | if (err == -ENXIO) |
3375 | err = -EPROBE_DEFER; |
3376 | else |
3377 | dev_err(dev, "DPBP device allocation failed\n" ); |
3378 | return ERR_PTR(error: err); |
3379 | } |
3380 | |
3381 | bp = kzalloc(size: sizeof(*bp), GFP_KERNEL); |
3382 | if (!bp) { |
3383 | err = -ENOMEM; |
3384 | goto err_alloc; |
3385 | } |
3386 | |
3387 | err = dpbp_open(mc_io: priv->mc_io, cmd_flags: 0, dpbp_id: dpbp_dev->obj_desc.id, |
3388 | token: &dpbp_dev->mc_handle); |
3389 | if (err) { |
3390 | dev_err(dev, "dpbp_open() failed\n" ); |
3391 | goto err_open; |
3392 | } |
3393 | |
3394 | err = dpbp_reset(mc_io: priv->mc_io, cmd_flags: 0, token: dpbp_dev->mc_handle); |
3395 | if (err) { |
3396 | dev_err(dev, "dpbp_reset() failed\n" ); |
3397 | goto err_reset; |
3398 | } |
3399 | |
3400 | err = dpbp_enable(mc_io: priv->mc_io, cmd_flags: 0, token: dpbp_dev->mc_handle); |
3401 | if (err) { |
3402 | dev_err(dev, "dpbp_enable() failed\n" ); |
3403 | goto err_enable; |
3404 | } |
3405 | |
3406 | err = dpbp_get_attributes(mc_io: priv->mc_io, cmd_flags: 0, token: dpbp_dev->mc_handle, |
3407 | attr: &dpbp_attrs); |
3408 | if (err) { |
3409 | dev_err(dev, "dpbp_get_attributes() failed\n" ); |
3410 | goto err_get_attr; |
3411 | } |
3412 | |
3413 | bp->dev = dpbp_dev; |
3414 | bp->bpid = dpbp_attrs.bpid; |
3415 | |
3416 | return bp; |
3417 | |
3418 | err_get_attr: |
3419 | dpbp_disable(mc_io: priv->mc_io, cmd_flags: 0, token: dpbp_dev->mc_handle); |
3420 | err_enable: |
3421 | err_reset: |
3422 | dpbp_close(mc_io: priv->mc_io, cmd_flags: 0, token: dpbp_dev->mc_handle); |
3423 | err_open: |
3424 | kfree(objp: bp); |
3425 | err_alloc: |
3426 | fsl_mc_object_free(mc_adev: dpbp_dev); |
3427 | |
3428 | return ERR_PTR(error: err); |
3429 | } |
3430 | |
3431 | static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv) |
3432 | { |
3433 | struct dpaa2_eth_bp *bp; |
3434 | int i; |
3435 | |
3436 | bp = dpaa2_eth_allocate_dpbp(priv); |
3437 | if (IS_ERR(ptr: bp)) |
3438 | return PTR_ERR(ptr: bp); |
3439 | |
3440 | priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp; |
3441 | priv->num_bps++; |
3442 | |
3443 | for (i = 0; i < priv->num_channels; i++) |
3444 | priv->channel[i]->bp = bp; |
3445 | |
3446 | return 0; |
3447 | } |
3448 | |
3449 | void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp) |
3450 | { |
3451 | int idx_bp; |
3452 | |
3453 | /* Find the index at which this BP is stored */ |
3454 | for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++) |
3455 | if (priv->bp[idx_bp] == bp) |
3456 | break; |
3457 | |
3458 | /* Drain the pool and disable the associated MC object */ |
3459 | dpaa2_eth_drain_pool(priv, bpid: bp->bpid); |
3460 | dpbp_disable(mc_io: priv->mc_io, cmd_flags: 0, token: bp->dev->mc_handle); |
3461 | dpbp_close(mc_io: priv->mc_io, cmd_flags: 0, token: bp->dev->mc_handle); |
3462 | fsl_mc_object_free(mc_adev: bp->dev); |
3463 | kfree(objp: bp); |
3464 | |
3465 | /* Move the last in use DPBP over in this position */ |
3466 | priv->bp[idx_bp] = priv->bp[priv->num_bps - 1]; |
3467 | priv->num_bps--; |
3468 | } |
3469 | |
3470 | static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv) |
3471 | { |
3472 | int i; |
3473 | |
3474 | for (i = 0; i < priv->num_bps; i++) |
3475 | dpaa2_eth_free_dpbp(priv, bp: priv->bp[i]); |
3476 | } |
3477 | |
3478 | static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) |
3479 | { |
3480 | struct device *dev = priv->net_dev->dev.parent; |
3481 | struct dpni_buffer_layout buf_layout = {0}; |
3482 | u16 rx_buf_align; |
3483 | int err; |
3484 | |
3485 | /* We need to check for WRIOP version 1.0.0, but depending on the MC |
3486 | * version, this number is not always provided correctly on rev1. |
3487 | * We need to check for both alternatives in this situation. |
3488 | */ |
3489 | if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || |
3490 | priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) |
3491 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; |
3492 | else |
3493 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; |
3494 | |
3495 | /* We need to ensure that the buffer size seen by WRIOP is a multiple |
3496 | * of 64 or 256 bytes depending on the WRIOP version. |
3497 | */ |
3498 | priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); |
3499 | |
3500 | /* tx buffer */ |
3501 | buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; |
3502 | buf_layout.pass_timestamp = true; |
3503 | buf_layout.pass_frame_status = true; |
3504 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | |
3505 | DPNI_BUF_LAYOUT_OPT_TIMESTAMP | |
3506 | DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; |
3507 | err = dpni_set_buffer_layout(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3508 | qtype: DPNI_QUEUE_TX, layout: &buf_layout); |
3509 | if (err) { |
3510 | dev_err(dev, "dpni_set_buffer_layout(TX) failed\n" ); |
3511 | return err; |
3512 | } |
3513 | |
3514 | /* tx-confirm buffer */ |
3515 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP | |
3516 | DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; |
3517 | err = dpni_set_buffer_layout(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3518 | qtype: DPNI_QUEUE_TX_CONFIRM, layout: &buf_layout); |
3519 | if (err) { |
3520 | dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n" ); |
3521 | return err; |
3522 | } |
3523 | |
3524 | /* Now that we've set our tx buffer layout, retrieve the minimum |
3525 | * required tx data offset. |
3526 | */ |
3527 | err = dpni_get_tx_data_offset(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3528 | data_offset: &priv->tx_data_offset); |
3529 | if (err) { |
3530 | dev_err(dev, "dpni_get_tx_data_offset() failed\n" ); |
3531 | return err; |
3532 | } |
3533 | |
3534 | if ((priv->tx_data_offset % 64) != 0) |
3535 | dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n" , |
3536 | priv->tx_data_offset); |
3537 | |
3538 | /* rx buffer */ |
3539 | buf_layout.pass_frame_status = true; |
3540 | buf_layout.pass_parser_result = true; |
3541 | buf_layout.data_align = rx_buf_align; |
3542 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); |
3543 | buf_layout.private_data_size = 0; |
3544 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | |
3545 | DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | |
3546 | DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | |
3547 | DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | |
3548 | DPNI_BUF_LAYOUT_OPT_TIMESTAMP; |
3549 | err = dpni_set_buffer_layout(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3550 | qtype: DPNI_QUEUE_RX, layout: &buf_layout); |
3551 | if (err) { |
3552 | dev_err(dev, "dpni_set_buffer_layout(RX) failed\n" ); |
3553 | return err; |
3554 | } |
3555 | |
3556 | return 0; |
3557 | } |
3558 | |
3559 | #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 |
3560 | #define DPNI_ENQUEUE_FQID_VER_MINOR 9 |
3561 | |
3562 | static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, |
3563 | struct dpaa2_eth_fq *fq, |
3564 | struct dpaa2_fd *fd, u8 prio, |
3565 | u32 num_frames __always_unused, |
3566 | int *frames_enqueued) |
3567 | { |
3568 | int err; |
3569 | |
3570 | err = dpaa2_io_service_enqueue_qd(d: fq->channel->dpio, |
3571 | qdid: priv->tx_qdid, prio, |
3572 | qdbin: fq->tx_qdbin, fd); |
3573 | if (!err && frames_enqueued) |
3574 | *frames_enqueued = 1; |
3575 | return err; |
3576 | } |
3577 | |
3578 | static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv, |
3579 | struct dpaa2_eth_fq *fq, |
3580 | struct dpaa2_fd *fd, |
3581 | u8 prio, u32 num_frames, |
3582 | int *frames_enqueued) |
3583 | { |
3584 | int err; |
3585 | |
3586 | err = dpaa2_io_service_enqueue_multiple_fq(d: fq->channel->dpio, |
3587 | fqid: fq->tx_fqid[prio], |
3588 | fd, number_of_frame: num_frames); |
3589 | |
3590 | if (err == 0) |
3591 | return -EBUSY; |
3592 | |
3593 | if (frames_enqueued) |
3594 | *frames_enqueued = err; |
3595 | return 0; |
3596 | } |
3597 | |
3598 | static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv) |
3599 | { |
3600 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, |
3601 | DPNI_ENQUEUE_FQID_VER_MINOR) < 0) |
3602 | priv->enqueue = dpaa2_eth_enqueue_qd; |
3603 | else |
3604 | priv->enqueue = dpaa2_eth_enqueue_fq_multiple; |
3605 | } |
3606 | |
3607 | static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv) |
3608 | { |
3609 | struct device *dev = priv->net_dev->dev.parent; |
3610 | struct dpni_link_cfg link_cfg = {0}; |
3611 | int err; |
3612 | |
3613 | /* Get the default link options so we don't override other flags */ |
3614 | err = dpni_get_link_cfg(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, cfg: &link_cfg); |
3615 | if (err) { |
3616 | dev_err(dev, "dpni_get_link_cfg() failed\n" ); |
3617 | return err; |
3618 | } |
3619 | |
3620 | /* By default, enable both Rx and Tx pause frames */ |
3621 | link_cfg.options |= DPNI_LINK_OPT_PAUSE; |
3622 | link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; |
3623 | err = dpni_set_link_cfg(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, cfg: &link_cfg); |
3624 | if (err) { |
3625 | dev_err(dev, "dpni_set_link_cfg() failed\n" ); |
3626 | return err; |
3627 | } |
3628 | |
3629 | priv->link_state.options = link_cfg.options; |
3630 | |
3631 | return 0; |
3632 | } |
3633 | |
3634 | static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv) |
3635 | { |
3636 | struct dpni_queue_id qid = {0}; |
3637 | struct dpaa2_eth_fq *fq; |
3638 | struct dpni_queue queue; |
3639 | int i, j, err; |
3640 | |
3641 | /* We only use Tx FQIDs for FQID-based enqueue, so check |
3642 | * if DPNI version supports it before updating FQIDs |
3643 | */ |
3644 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, |
3645 | DPNI_ENQUEUE_FQID_VER_MINOR) < 0) |
3646 | return; |
3647 | |
3648 | for (i = 0; i < priv->num_fqs; i++) { |
3649 | fq = &priv->fq[i]; |
3650 | if (fq->type != DPAA2_TX_CONF_FQ) |
3651 | continue; |
3652 | for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { |
3653 | err = dpni_get_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3654 | qtype: DPNI_QUEUE_TX, tc: j, index: fq->flowid, |
3655 | queue: &queue, qid: &qid); |
3656 | if (err) |
3657 | goto out_err; |
3658 | |
3659 | fq->tx_fqid[j] = qid.fqid; |
3660 | if (fq->tx_fqid[j] == 0) |
3661 | goto out_err; |
3662 | } |
3663 | } |
3664 | |
3665 | priv->enqueue = dpaa2_eth_enqueue_fq_multiple; |
3666 | |
3667 | return; |
3668 | |
3669 | out_err: |
3670 | netdev_info(dev: priv->net_dev, |
3671 | format: "Error reading Tx FQID, fallback to QDID-based enqueue\n" ); |
3672 | priv->enqueue = dpaa2_eth_enqueue_qd; |
3673 | } |
3674 | |
3675 | /* Configure ingress classification based on VLAN PCP */ |
3676 | static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv) |
3677 | { |
3678 | struct device *dev = priv->net_dev->dev.parent; |
3679 | struct dpkg_profile_cfg kg_cfg = {0}; |
3680 | struct dpni_qos_tbl_cfg qos_cfg = {0}; |
3681 | struct dpni_rule_cfg key_params; |
3682 | void *dma_mem, *key, *mask; |
3683 | u8 key_size = 2; /* VLAN TCI field */ |
3684 | int i, pcp, err; |
3685 | |
3686 | /* VLAN-based classification only makes sense if we have multiple |
3687 | * traffic classes. |
3688 | * Also, we need to extract just the 3-bit PCP field from the VLAN |
3689 | * header and we can only do that by using a mask |
3690 | */ |
3691 | if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) { |
3692 | dev_dbg(dev, "VLAN-based QoS classification not supported\n" ); |
3693 | return -EOPNOTSUPP; |
3694 | } |
3695 | |
3696 | dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
3697 | if (!dma_mem) |
3698 | return -ENOMEM; |
3699 | |
3700 | kg_cfg.num_extracts = 1; |
3701 | kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; |
3702 | kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; |
3703 | kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; |
3704 | kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; |
3705 | |
3706 | err = dpni_prepare_key_cfg(cfg: &kg_cfg, key_cfg_buf: dma_mem); |
3707 | if (err) { |
3708 | dev_err(dev, "dpni_prepare_key_cfg failed\n" ); |
3709 | goto out_free_tbl; |
3710 | } |
3711 | |
3712 | /* set QoS table */ |
3713 | qos_cfg.default_tc = 0; |
3714 | qos_cfg.discard_on_miss = 0; |
3715 | qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, |
3716 | DPAA2_CLASSIFIER_DMA_SIZE, |
3717 | DMA_TO_DEVICE); |
3718 | if (dma_mapping_error(dev, dma_addr: qos_cfg.key_cfg_iova)) { |
3719 | dev_err(dev, "QoS table DMA mapping failed\n" ); |
3720 | err = -ENOMEM; |
3721 | goto out_free_tbl; |
3722 | } |
3723 | |
3724 | err = dpni_set_qos_table(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, cfg: &qos_cfg); |
3725 | if (err) { |
3726 | dev_err(dev, "dpni_set_qos_table failed\n" ); |
3727 | goto out_unmap_tbl; |
3728 | } |
3729 | |
3730 | /* Add QoS table entries */ |
3731 | key = kzalloc(size: key_size * 2, GFP_KERNEL); |
3732 | if (!key) { |
3733 | err = -ENOMEM; |
3734 | goto out_unmap_tbl; |
3735 | } |
3736 | mask = key + key_size; |
3737 | *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK); |
3738 | |
3739 | key_params.key_iova = dma_map_single(dev, key, key_size * 2, |
3740 | DMA_TO_DEVICE); |
3741 | if (dma_mapping_error(dev, dma_addr: key_params.key_iova)) { |
3742 | dev_err(dev, "Qos table entry DMA mapping failed\n" ); |
3743 | err = -ENOMEM; |
3744 | goto out_free_key; |
3745 | } |
3746 | |
3747 | key_params.mask_iova = key_params.key_iova + key_size; |
3748 | key_params.key_size = key_size; |
3749 | |
3750 | /* We add rules for PCP-based distribution starting with highest |
3751 | * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic |
3752 | * classes to accommodate all priority levels, the lowest ones end up |
3753 | * on TC 0 which was configured as default |
3754 | */ |
3755 | for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) { |
3756 | *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT); |
3757 | dma_sync_single_for_device(dev, addr: key_params.key_iova, |
3758 | size: key_size * 2, dir: DMA_TO_DEVICE); |
3759 | |
3760 | err = dpni_add_qos_entry(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3761 | cfg: &key_params, tc_id: i, index: i); |
3762 | if (err) { |
3763 | dev_err(dev, "dpni_add_qos_entry failed\n" ); |
3764 | dpni_clear_qos_table(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token); |
3765 | goto out_unmap_key; |
3766 | } |
3767 | } |
3768 | |
3769 | priv->vlan_cls_enabled = true; |
3770 | |
3771 | /* Table and key memory is not persistent, clean everything up after |
3772 | * configuration is finished |
3773 | */ |
3774 | out_unmap_key: |
3775 | dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE); |
3776 | out_free_key: |
3777 | kfree(objp: key); |
3778 | out_unmap_tbl: |
3779 | dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE, |
3780 | DMA_TO_DEVICE); |
3781 | out_free_tbl: |
3782 | kfree(objp: dma_mem); |
3783 | |
3784 | return err; |
3785 | } |
3786 | |
3787 | /* Configure the DPNI object this interface is associated with */ |
3788 | static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) |
3789 | { |
3790 | struct device *dev = &ls_dev->dev; |
3791 | struct dpaa2_eth_priv *priv; |
3792 | struct net_device *net_dev; |
3793 | int err; |
3794 | |
3795 | net_dev = dev_get_drvdata(dev); |
3796 | priv = netdev_priv(dev: net_dev); |
3797 | |
3798 | /* get a handle for the DPNI object */ |
3799 | err = dpni_open(mc_io: priv->mc_io, cmd_flags: 0, dpni_id: ls_dev->obj_desc.id, token: &priv->mc_token); |
3800 | if (err) { |
3801 | dev_err(dev, "dpni_open() failed\n" ); |
3802 | return err; |
3803 | } |
3804 | |
3805 | /* Check if we can work with this DPNI object */ |
3806 | err = dpni_get_api_version(mc_io: priv->mc_io, cmd_flags: 0, major_ver: &priv->dpni_ver_major, |
3807 | minor_ver: &priv->dpni_ver_minor); |
3808 | if (err) { |
3809 | dev_err(dev, "dpni_get_api_version() failed\n" ); |
3810 | goto close; |
3811 | } |
3812 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { |
3813 | dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n" , |
3814 | priv->dpni_ver_major, priv->dpni_ver_minor, |
3815 | DPNI_VER_MAJOR, DPNI_VER_MINOR); |
3816 | err = -EOPNOTSUPP; |
3817 | goto close; |
3818 | } |
3819 | |
3820 | ls_dev->mc_io = priv->mc_io; |
3821 | ls_dev->mc_handle = priv->mc_token; |
3822 | |
3823 | err = dpni_reset(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token); |
3824 | if (err) { |
3825 | dev_err(dev, "dpni_reset() failed\n" ); |
3826 | goto close; |
3827 | } |
3828 | |
3829 | err = dpni_get_attributes(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3830 | attr: &priv->dpni_attrs); |
3831 | if (err) { |
3832 | dev_err(dev, "dpni_get_attributes() failed (err=%d)\n" , err); |
3833 | goto close; |
3834 | } |
3835 | |
3836 | err = dpaa2_eth_set_buffer_layout(priv); |
3837 | if (err) |
3838 | goto close; |
3839 | |
3840 | dpaa2_eth_set_enqueue_mode(priv); |
3841 | |
3842 | /* Enable pause frame support */ |
3843 | if (dpaa2_eth_has_pause_support(priv)) { |
3844 | err = dpaa2_eth_set_pause(priv); |
3845 | if (err) |
3846 | goto close; |
3847 | } |
3848 | |
3849 | err = dpaa2_eth_set_vlan_qos(priv); |
3850 | if (err && err != -EOPNOTSUPP) |
3851 | goto close; |
3852 | |
3853 | priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), |
3854 | size: sizeof(struct dpaa2_eth_cls_rule), |
3855 | GFP_KERNEL); |
3856 | if (!priv->cls_rules) { |
3857 | err = -ENOMEM; |
3858 | goto close; |
3859 | } |
3860 | |
3861 | return 0; |
3862 | |
3863 | close: |
3864 | dpni_close(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token); |
3865 | |
3866 | return err; |
3867 | } |
3868 | |
3869 | static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv) |
3870 | { |
3871 | int err; |
3872 | |
3873 | err = dpni_reset(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token); |
3874 | if (err) |
3875 | netdev_warn(dev: priv->net_dev, format: "dpni_reset() failed (err %d)\n" , |
3876 | err); |
3877 | |
3878 | dpni_close(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token); |
3879 | } |
3880 | |
3881 | static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, |
3882 | struct dpaa2_eth_fq *fq) |
3883 | { |
3884 | struct device *dev = priv->net_dev->dev.parent; |
3885 | struct dpni_queue queue; |
3886 | struct dpni_queue_id qid; |
3887 | int err; |
3888 | |
3889 | err = dpni_get_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3890 | qtype: DPNI_QUEUE_RX, tc: fq->tc, index: fq->flowid, queue: &queue, qid: &qid); |
3891 | if (err) { |
3892 | dev_err(dev, "dpni_get_queue(RX) failed\n" ); |
3893 | return err; |
3894 | } |
3895 | |
3896 | fq->fqid = qid.fqid; |
3897 | |
3898 | queue.destination.id = fq->channel->dpcon_id; |
3899 | queue.destination.type = DPNI_DEST_DPCON; |
3900 | queue.destination.priority = 1; |
3901 | queue.user_context = (u64)(uintptr_t)fq; |
3902 | err = dpni_set_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3903 | qtype: DPNI_QUEUE_RX, tc: fq->tc, index: fq->flowid, |
3904 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, |
3905 | queue: &queue); |
3906 | if (err) { |
3907 | dev_err(dev, "dpni_set_queue(RX) failed\n" ); |
3908 | return err; |
3909 | } |
3910 | |
3911 | /* xdp_rxq setup */ |
3912 | /* only once for each channel */ |
3913 | if (fq->tc > 0) |
3914 | return 0; |
3915 | |
3916 | err = xdp_rxq_info_reg(xdp_rxq: &fq->channel->xdp_rxq, dev: priv->net_dev, |
3917 | queue_index: fq->flowid, napi_id: 0); |
3918 | if (err) { |
3919 | dev_err(dev, "xdp_rxq_info_reg failed\n" ); |
3920 | return err; |
3921 | } |
3922 | |
3923 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &fq->channel->xdp_rxq, |
3924 | type: MEM_TYPE_PAGE_ORDER0, NULL); |
3925 | if (err) { |
3926 | dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n" ); |
3927 | return err; |
3928 | } |
3929 | |
3930 | return 0; |
3931 | } |
3932 | |
3933 | static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv, |
3934 | struct dpaa2_eth_fq *fq) |
3935 | { |
3936 | struct device *dev = priv->net_dev->dev.parent; |
3937 | struct dpni_queue queue; |
3938 | struct dpni_queue_id qid; |
3939 | int i, err; |
3940 | |
3941 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
3942 | err = dpni_get_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3943 | qtype: DPNI_QUEUE_TX, tc: i, index: fq->flowid, |
3944 | queue: &queue, qid: &qid); |
3945 | if (err) { |
3946 | dev_err(dev, "dpni_get_queue(TX) failed\n" ); |
3947 | return err; |
3948 | } |
3949 | fq->tx_fqid[i] = qid.fqid; |
3950 | } |
3951 | |
3952 | /* All Tx queues belonging to the same flowid have the same qdbin */ |
3953 | fq->tx_qdbin = qid.qdbin; |
3954 | |
3955 | err = dpni_get_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3956 | qtype: DPNI_QUEUE_TX_CONFIRM, tc: 0, index: fq->flowid, |
3957 | queue: &queue, qid: &qid); |
3958 | if (err) { |
3959 | dev_err(dev, "dpni_get_queue(TX_CONF) failed\n" ); |
3960 | return err; |
3961 | } |
3962 | |
3963 | fq->fqid = qid.fqid; |
3964 | |
3965 | queue.destination.id = fq->channel->dpcon_id; |
3966 | queue.destination.type = DPNI_DEST_DPCON; |
3967 | queue.destination.priority = 0; |
3968 | queue.user_context = (u64)(uintptr_t)fq; |
3969 | err = dpni_set_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3970 | qtype: DPNI_QUEUE_TX_CONFIRM, tc: 0, index: fq->flowid, |
3971 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, |
3972 | queue: &queue); |
3973 | if (err) { |
3974 | dev_err(dev, "dpni_set_queue(TX_CONF) failed\n" ); |
3975 | return err; |
3976 | } |
3977 | |
3978 | return 0; |
3979 | } |
3980 | |
3981 | static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, |
3982 | struct dpaa2_eth_fq *fq) |
3983 | { |
3984 | struct device *dev = priv->net_dev->dev.parent; |
3985 | struct dpni_queue q = { { 0 } }; |
3986 | struct dpni_queue_id qid; |
3987 | u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; |
3988 | int err; |
3989 | |
3990 | err = dpni_get_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
3991 | qtype: DPNI_QUEUE_RX_ERR, tc: 0, index: 0, queue: &q, qid: &qid); |
3992 | if (err) { |
3993 | dev_err(dev, "dpni_get_queue() failed (%d)\n" , err); |
3994 | return err; |
3995 | } |
3996 | |
3997 | fq->fqid = qid.fqid; |
3998 | |
3999 | q.destination.id = fq->channel->dpcon_id; |
4000 | q.destination.type = DPNI_DEST_DPCON; |
4001 | q.destination.priority = 1; |
4002 | q.user_context = (u64)(uintptr_t)fq; |
4003 | err = dpni_set_queue(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4004 | qtype: DPNI_QUEUE_RX_ERR, tc: 0, index: 0, options: q_opt, queue: &q); |
4005 | if (err) { |
4006 | dev_err(dev, "dpni_set_queue() failed (%d)\n" , err); |
4007 | return err; |
4008 | } |
4009 | |
4010 | return 0; |
4011 | } |
4012 | |
4013 | /* Supported header fields for Rx hash distribution key */ |
4014 | static const struct dpaa2_eth_dist_fields dist_fields[] = { |
4015 | { |
4016 | /* L2 header */ |
4017 | .rxnfc_field = RXH_L2DA, |
4018 | .cls_prot = NET_PROT_ETH, |
4019 | .cls_field = NH_FLD_ETH_DA, |
4020 | .id = DPAA2_ETH_DIST_ETHDST, |
4021 | .size = 6, |
4022 | }, { |
4023 | .cls_prot = NET_PROT_ETH, |
4024 | .cls_field = NH_FLD_ETH_SA, |
4025 | .id = DPAA2_ETH_DIST_ETHSRC, |
4026 | .size = 6, |
4027 | }, { |
4028 | /* This is the last ethertype field parsed: |
4029 | * depending on frame format, it can be the MAC ethertype |
4030 | * or the VLAN etype. |
4031 | */ |
4032 | .cls_prot = NET_PROT_ETH, |
4033 | .cls_field = NH_FLD_ETH_TYPE, |
4034 | .id = DPAA2_ETH_DIST_ETHTYPE, |
4035 | .size = 2, |
4036 | }, { |
4037 | /* VLAN header */ |
4038 | .rxnfc_field = RXH_VLAN, |
4039 | .cls_prot = NET_PROT_VLAN, |
4040 | .cls_field = NH_FLD_VLAN_TCI, |
4041 | .id = DPAA2_ETH_DIST_VLAN, |
4042 | .size = 2, |
4043 | }, { |
4044 | /* IP header */ |
4045 | .rxnfc_field = RXH_IP_SRC, |
4046 | .cls_prot = NET_PROT_IP, |
4047 | .cls_field = NH_FLD_IP_SRC, |
4048 | .id = DPAA2_ETH_DIST_IPSRC, |
4049 | .size = 4, |
4050 | }, { |
4051 | .rxnfc_field = RXH_IP_DST, |
4052 | .cls_prot = NET_PROT_IP, |
4053 | .cls_field = NH_FLD_IP_DST, |
4054 | .id = DPAA2_ETH_DIST_IPDST, |
4055 | .size = 4, |
4056 | }, { |
4057 | .rxnfc_field = RXH_L3_PROTO, |
4058 | .cls_prot = NET_PROT_IP, |
4059 | .cls_field = NH_FLD_IP_PROTO, |
4060 | .id = DPAA2_ETH_DIST_IPPROTO, |
4061 | .size = 1, |
4062 | }, { |
4063 | /* Using UDP ports, this is functionally equivalent to raw |
4064 | * byte pairs from L4 header. |
4065 | */ |
4066 | .rxnfc_field = RXH_L4_B_0_1, |
4067 | .cls_prot = NET_PROT_UDP, |
4068 | .cls_field = NH_FLD_UDP_PORT_SRC, |
4069 | .id = DPAA2_ETH_DIST_L4SRC, |
4070 | .size = 2, |
4071 | }, { |
4072 | .rxnfc_field = RXH_L4_B_2_3, |
4073 | .cls_prot = NET_PROT_UDP, |
4074 | .cls_field = NH_FLD_UDP_PORT_DST, |
4075 | .id = DPAA2_ETH_DIST_L4DST, |
4076 | .size = 2, |
4077 | }, |
4078 | }; |
4079 | |
4080 | /* Configure the Rx hash key using the legacy API */ |
4081 | static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) |
4082 | { |
4083 | struct device *dev = priv->net_dev->dev.parent; |
4084 | struct dpni_rx_tc_dist_cfg dist_cfg; |
4085 | int i, err = 0; |
4086 | |
4087 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
4088 | |
4089 | dist_cfg.key_cfg_iova = key; |
4090 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
4091 | dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; |
4092 | |
4093 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
4094 | err = dpni_set_rx_tc_dist(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4095 | tc_id: i, cfg: &dist_cfg); |
4096 | if (err) { |
4097 | dev_err(dev, "dpni_set_rx_tc_dist failed\n" ); |
4098 | break; |
4099 | } |
4100 | } |
4101 | |
4102 | return err; |
4103 | } |
4104 | |
4105 | /* Configure the Rx hash key using the new API */ |
4106 | static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) |
4107 | { |
4108 | struct device *dev = priv->net_dev->dev.parent; |
4109 | struct dpni_rx_dist_cfg dist_cfg; |
4110 | int i, err = 0; |
4111 | |
4112 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
4113 | |
4114 | dist_cfg.key_cfg_iova = key; |
4115 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
4116 | dist_cfg.enable = 1; |
4117 | |
4118 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
4119 | dist_cfg.tc = i; |
4120 | err = dpni_set_rx_hash_dist(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4121 | cfg: &dist_cfg); |
4122 | if (err) { |
4123 | dev_err(dev, "dpni_set_rx_hash_dist failed\n" ); |
4124 | break; |
4125 | } |
4126 | |
4127 | /* If the flow steering / hashing key is shared between all |
4128 | * traffic classes, install it just once |
4129 | */ |
4130 | if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) |
4131 | break; |
4132 | } |
4133 | |
4134 | return err; |
4135 | } |
4136 | |
4137 | /* Configure the Rx flow classification key */ |
4138 | static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) |
4139 | { |
4140 | struct device *dev = priv->net_dev->dev.parent; |
4141 | struct dpni_rx_dist_cfg dist_cfg; |
4142 | int i, err = 0; |
4143 | |
4144 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
4145 | |
4146 | dist_cfg.key_cfg_iova = key; |
4147 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
4148 | dist_cfg.enable = 1; |
4149 | |
4150 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
4151 | dist_cfg.tc = i; |
4152 | err = dpni_set_rx_fs_dist(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4153 | cfg: &dist_cfg); |
4154 | if (err) { |
4155 | dev_err(dev, "dpni_set_rx_fs_dist failed\n" ); |
4156 | break; |
4157 | } |
4158 | |
4159 | /* If the flow steering / hashing key is shared between all |
4160 | * traffic classes, install it just once |
4161 | */ |
4162 | if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) |
4163 | break; |
4164 | } |
4165 | |
4166 | return err; |
4167 | } |
4168 | |
4169 | /* Size of the Rx flow classification key */ |
4170 | int dpaa2_eth_cls_key_size(u64 fields) |
4171 | { |
4172 | int i, size = 0; |
4173 | |
4174 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
4175 | if (!(fields & dist_fields[i].id)) |
4176 | continue; |
4177 | size += dist_fields[i].size; |
4178 | } |
4179 | |
4180 | return size; |
4181 | } |
4182 | |
4183 | /* Offset of header field in Rx classification key */ |
4184 | int dpaa2_eth_cls_fld_off(int prot, int field) |
4185 | { |
4186 | int i, off = 0; |
4187 | |
4188 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
4189 | if (dist_fields[i].cls_prot == prot && |
4190 | dist_fields[i].cls_field == field) |
4191 | return off; |
4192 | off += dist_fields[i].size; |
4193 | } |
4194 | |
4195 | WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n" ); |
4196 | return 0; |
4197 | } |
4198 | |
4199 | /* Prune unused fields from the classification rule. |
4200 | * Used when masking is not supported |
4201 | */ |
4202 | void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) |
4203 | { |
4204 | int off = 0, new_off = 0; |
4205 | int i, size; |
4206 | |
4207 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
4208 | size = dist_fields[i].size; |
4209 | if (dist_fields[i].id & fields) { |
4210 | memcpy(key_mem + new_off, key_mem + off, size); |
4211 | new_off += size; |
4212 | } |
4213 | off += size; |
4214 | } |
4215 | } |
4216 | |
4217 | /* Set Rx distribution (hash or flow classification) key |
4218 | * flags is a combination of RXH_ bits |
4219 | */ |
4220 | static int dpaa2_eth_set_dist_key(struct net_device *net_dev, |
4221 | enum dpaa2_eth_rx_dist type, u64 flags) |
4222 | { |
4223 | struct device *dev = net_dev->dev.parent; |
4224 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
4225 | struct dpkg_profile_cfg cls_cfg; |
4226 | u32 rx_hash_fields = 0; |
4227 | dma_addr_t key_iova; |
4228 | u8 *dma_mem; |
4229 | int i; |
4230 | int err = 0; |
4231 | |
4232 | memset(&cls_cfg, 0, sizeof(cls_cfg)); |
4233 | |
4234 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
4235 | struct dpkg_extract *key = |
4236 | &cls_cfg.extracts[cls_cfg.num_extracts]; |
4237 | |
4238 | /* For both Rx hashing and classification keys |
4239 | * we set only the selected fields. |
4240 | */ |
4241 | if (!(flags & dist_fields[i].id)) |
4242 | continue; |
4243 | if (type == DPAA2_ETH_RX_DIST_HASH) |
4244 | rx_hash_fields |= dist_fields[i].rxnfc_field; |
4245 | |
4246 | if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { |
4247 | dev_err(dev, "error adding key extraction rule, too many rules?\n" ); |
4248 | return -E2BIG; |
4249 | } |
4250 | |
4251 | key->type = DPKG_EXTRACT_FROM_HDR; |
4252 | key->extract.from_hdr.prot = dist_fields[i].cls_prot; |
4253 | key->extract.from_hdr.type = DPKG_FULL_FIELD; |
4254 | key->extract.from_hdr.field = dist_fields[i].cls_field; |
4255 | cls_cfg.num_extracts++; |
4256 | } |
4257 | |
4258 | dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
4259 | if (!dma_mem) |
4260 | return -ENOMEM; |
4261 | |
4262 | err = dpni_prepare_key_cfg(cfg: &cls_cfg, key_cfg_buf: dma_mem); |
4263 | if (err) { |
4264 | dev_err(dev, "dpni_prepare_key_cfg error %d\n" , err); |
4265 | goto free_key; |
4266 | } |
4267 | |
4268 | /* Prepare for setting the rx dist */ |
4269 | key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, |
4270 | DMA_TO_DEVICE); |
4271 | if (dma_mapping_error(dev, dma_addr: key_iova)) { |
4272 | dev_err(dev, "DMA mapping failed\n" ); |
4273 | err = -ENOMEM; |
4274 | goto free_key; |
4275 | } |
4276 | |
4277 | if (type == DPAA2_ETH_RX_DIST_HASH) { |
4278 | if (dpaa2_eth_has_legacy_dist(priv)) |
4279 | err = dpaa2_eth_config_legacy_hash_key(priv, key: key_iova); |
4280 | else |
4281 | err = dpaa2_eth_config_hash_key(priv, key: key_iova); |
4282 | } else { |
4283 | err = dpaa2_eth_config_cls_key(priv, key: key_iova); |
4284 | } |
4285 | |
4286 | dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, |
4287 | DMA_TO_DEVICE); |
4288 | if (!err && type == DPAA2_ETH_RX_DIST_HASH) |
4289 | priv->rx_hash_fields = rx_hash_fields; |
4290 | |
4291 | free_key: |
4292 | kfree(objp: dma_mem); |
4293 | return err; |
4294 | } |
4295 | |
4296 | int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) |
4297 | { |
4298 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
4299 | u64 key = 0; |
4300 | int i; |
4301 | |
4302 | if (!dpaa2_eth_hash_enabled(priv)) |
4303 | return -EOPNOTSUPP; |
4304 | |
4305 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) |
4306 | if (dist_fields[i].rxnfc_field & flags) |
4307 | key |= dist_fields[i].id; |
4308 | |
4309 | return dpaa2_eth_set_dist_key(net_dev, type: DPAA2_ETH_RX_DIST_HASH, flags: key); |
4310 | } |
4311 | |
4312 | int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) |
4313 | { |
4314 | return dpaa2_eth_set_dist_key(net_dev, type: DPAA2_ETH_RX_DIST_CLS, flags); |
4315 | } |
4316 | |
4317 | static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) |
4318 | { |
4319 | struct device *dev = priv->net_dev->dev.parent; |
4320 | int err; |
4321 | |
4322 | /* Check if we actually support Rx flow classification */ |
4323 | if (dpaa2_eth_has_legacy_dist(priv)) { |
4324 | dev_dbg(dev, "Rx cls not supported by current MC version\n" ); |
4325 | return -EOPNOTSUPP; |
4326 | } |
4327 | |
4328 | if (!dpaa2_eth_fs_enabled(priv)) { |
4329 | dev_dbg(dev, "Rx cls disabled in DPNI options\n" ); |
4330 | return -EOPNOTSUPP; |
4331 | } |
4332 | |
4333 | if (!dpaa2_eth_hash_enabled(priv)) { |
4334 | dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n" ); |
4335 | return -EOPNOTSUPP; |
4336 | } |
4337 | |
4338 | /* If there is no support for masking in the classification table, |
4339 | * we don't set a default key, as it will depend on the rules |
4340 | * added by the user at runtime. |
4341 | */ |
4342 | if (!dpaa2_eth_fs_mask_enabled(priv)) |
4343 | goto out; |
4344 | |
4345 | err = dpaa2_eth_set_cls(net_dev: priv->net_dev, DPAA2_ETH_DIST_ALL); |
4346 | if (err) |
4347 | return err; |
4348 | |
4349 | out: |
4350 | priv->rx_cls_enabled = 1; |
4351 | |
4352 | return 0; |
4353 | } |
4354 | |
4355 | /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, |
4356 | * frame queues and channels |
4357 | */ |
4358 | static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) |
4359 | { |
4360 | struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; |
4361 | struct net_device *net_dev = priv->net_dev; |
4362 | struct dpni_pools_cfg pools_params = { 0 }; |
4363 | struct device *dev = net_dev->dev.parent; |
4364 | struct dpni_error_cfg err_cfg; |
4365 | int err = 0; |
4366 | int i; |
4367 | |
4368 | pools_params.num_dpbp = 1; |
4369 | pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id; |
4370 | pools_params.pools[0].backup_pool = 0; |
4371 | pools_params.pools[0].buffer_size = priv->rx_buf_size; |
4372 | err = dpni_set_pools(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, cfg: &pools_params); |
4373 | if (err) { |
4374 | dev_err(dev, "dpni_set_pools() failed\n" ); |
4375 | return err; |
4376 | } |
4377 | |
4378 | /* have the interface implicitly distribute traffic based on |
4379 | * the default hash key |
4380 | */ |
4381 | err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); |
4382 | if (err && err != -EOPNOTSUPP) |
4383 | dev_err(dev, "Failed to configure hashing\n" ); |
4384 | |
4385 | /* Configure the flow classification key; it includes all |
4386 | * supported header fields and cannot be modified at runtime |
4387 | */ |
4388 | err = dpaa2_eth_set_default_cls(priv); |
4389 | if (err && err != -EOPNOTSUPP) |
4390 | dev_err(dev, "Failed to configure Rx classification key\n" ); |
4391 | |
4392 | /* Configure handling of error frames */ |
4393 | err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; |
4394 | err_cfg.set_frame_annotation = 1; |
4395 | err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; |
4396 | err = dpni_set_errors_behavior(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4397 | cfg: &err_cfg); |
4398 | if (err) { |
4399 | dev_err(dev, "dpni_set_errors_behavior failed\n" ); |
4400 | return err; |
4401 | } |
4402 | |
4403 | /* Configure Rx and Tx conf queues to generate CDANs */ |
4404 | for (i = 0; i < priv->num_fqs; i++) { |
4405 | switch (priv->fq[i].type) { |
4406 | case DPAA2_RX_FQ: |
4407 | err = dpaa2_eth_setup_rx_flow(priv, fq: &priv->fq[i]); |
4408 | break; |
4409 | case DPAA2_TX_CONF_FQ: |
4410 | err = dpaa2_eth_setup_tx_flow(priv, fq: &priv->fq[i]); |
4411 | break; |
4412 | case DPAA2_RX_ERR_FQ: |
4413 | err = setup_rx_err_flow(priv, fq: &priv->fq[i]); |
4414 | break; |
4415 | default: |
4416 | dev_err(dev, "Invalid FQ type %d\n" , priv->fq[i].type); |
4417 | return -EINVAL; |
4418 | } |
4419 | if (err) |
4420 | return err; |
4421 | } |
4422 | |
4423 | err = dpni_get_qdid(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4424 | qtype: DPNI_QUEUE_TX, qdid: &priv->tx_qdid); |
4425 | if (err) { |
4426 | dev_err(dev, "dpni_get_qdid() failed\n" ); |
4427 | return err; |
4428 | } |
4429 | |
4430 | return 0; |
4431 | } |
4432 | |
4433 | /* Allocate rings for storing incoming frame descriptors */ |
4434 | static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) |
4435 | { |
4436 | struct net_device *net_dev = priv->net_dev; |
4437 | struct device *dev = net_dev->dev.parent; |
4438 | int i; |
4439 | |
4440 | for (i = 0; i < priv->num_channels; i++) { |
4441 | priv->channel[i]->store = |
4442 | dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); |
4443 | if (!priv->channel[i]->store) { |
4444 | netdev_err(dev: net_dev, format: "dpaa2_io_store_create() failed\n" ); |
4445 | goto err_ring; |
4446 | } |
4447 | } |
4448 | |
4449 | return 0; |
4450 | |
4451 | err_ring: |
4452 | for (i = 0; i < priv->num_channels; i++) { |
4453 | if (!priv->channel[i]->store) |
4454 | break; |
4455 | dpaa2_io_store_destroy(s: priv->channel[i]->store); |
4456 | } |
4457 | |
4458 | return -ENOMEM; |
4459 | } |
4460 | |
4461 | static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) |
4462 | { |
4463 | int i; |
4464 | |
4465 | for (i = 0; i < priv->num_channels; i++) |
4466 | dpaa2_io_store_destroy(s: priv->channel[i]->store); |
4467 | } |
4468 | |
4469 | static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) |
4470 | { |
4471 | struct net_device *net_dev = priv->net_dev; |
4472 | struct device *dev = net_dev->dev.parent; |
4473 | u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; |
4474 | int err; |
4475 | |
4476 | /* Get firmware address, if any */ |
4477 | err = dpni_get_port_mac_addr(mc_io: priv->mc_io, cm_flags: 0, token: priv->mc_token, mac_addr); |
4478 | if (err) { |
4479 | dev_err(dev, "dpni_get_port_mac_addr() failed\n" ); |
4480 | return err; |
4481 | } |
4482 | |
4483 | /* Get DPNI attributes address, if any */ |
4484 | err = dpni_get_primary_mac_addr(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4485 | mac_addr: dpni_mac_addr); |
4486 | if (err) { |
4487 | dev_err(dev, "dpni_get_primary_mac_addr() failed\n" ); |
4488 | return err; |
4489 | } |
4490 | |
4491 | /* First check if firmware has any address configured by bootloader */ |
4492 | if (!is_zero_ether_addr(addr: mac_addr)) { |
4493 | /* If the DPMAC addr != DPNI addr, update it */ |
4494 | if (!ether_addr_equal(addr1: mac_addr, addr2: dpni_mac_addr)) { |
4495 | err = dpni_set_primary_mac_addr(mc_io: priv->mc_io, cmd_flags: 0, |
4496 | token: priv->mc_token, |
4497 | mac_addr); |
4498 | if (err) { |
4499 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n" ); |
4500 | return err; |
4501 | } |
4502 | } |
4503 | eth_hw_addr_set(dev: net_dev, addr: mac_addr); |
4504 | } else if (is_zero_ether_addr(addr: dpni_mac_addr)) { |
4505 | /* No MAC address configured, fill in net_dev->dev_addr |
4506 | * with a random one |
4507 | */ |
4508 | eth_hw_addr_random(dev: net_dev); |
4509 | dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n" ); |
4510 | |
4511 | err = dpni_set_primary_mac_addr(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4512 | mac_addr: net_dev->dev_addr); |
4513 | if (err) { |
4514 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n" ); |
4515 | return err; |
4516 | } |
4517 | |
4518 | /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all |
4519 | * practical purposes, this will be our "permanent" mac address, |
4520 | * at least until the next reboot. This move will also permit |
4521 | * register_netdevice() to properly fill up net_dev->perm_addr. |
4522 | */ |
4523 | net_dev->addr_assign_type = NET_ADDR_PERM; |
4524 | } else { |
4525 | /* NET_ADDR_PERM is default, all we have to do is |
4526 | * fill in the device addr. |
4527 | */ |
4528 | eth_hw_addr_set(dev: net_dev, addr: dpni_mac_addr); |
4529 | } |
4530 | |
4531 | return 0; |
4532 | } |
4533 | |
4534 | static int dpaa2_eth_netdev_init(struct net_device *net_dev) |
4535 | { |
4536 | struct device *dev = net_dev->dev.parent; |
4537 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
4538 | u32 options = priv->dpni_attrs.options; |
4539 | u64 supported = 0, not_supported = 0; |
4540 | u8 bcast_addr[ETH_ALEN]; |
4541 | u8 num_queues; |
4542 | int err; |
4543 | |
4544 | net_dev->netdev_ops = &dpaa2_eth_ops; |
4545 | net_dev->ethtool_ops = &dpaa2_ethtool_ops; |
4546 | |
4547 | err = dpaa2_eth_set_mac_addr(priv); |
4548 | if (err) |
4549 | return err; |
4550 | |
4551 | /* Explicitly add the broadcast address to the MAC filtering table */ |
4552 | eth_broadcast_addr(addr: bcast_addr); |
4553 | err = dpni_add_mac_addr(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, mac_addr: bcast_addr); |
4554 | if (err) { |
4555 | dev_err(dev, "dpni_add_mac_addr() failed\n" ); |
4556 | return err; |
4557 | } |
4558 | |
4559 | /* Set MTU upper limit; lower limit is 68B (default value) */ |
4560 | net_dev->max_mtu = DPAA2_ETH_MAX_MTU; |
4561 | err = dpni_set_max_frame_length(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
4562 | DPAA2_ETH_MFL); |
4563 | if (err) { |
4564 | dev_err(dev, "dpni_set_max_frame_length() failed\n" ); |
4565 | return err; |
4566 | } |
4567 | |
4568 | /* Set actual number of queues in the net device */ |
4569 | num_queues = dpaa2_eth_queue_count(priv); |
4570 | err = netif_set_real_num_tx_queues(dev: net_dev, txq: num_queues); |
4571 | if (err) { |
4572 | dev_err(dev, "netif_set_real_num_tx_queues() failed\n" ); |
4573 | return err; |
4574 | } |
4575 | err = netif_set_real_num_rx_queues(dev: net_dev, rxq: num_queues); |
4576 | if (err) { |
4577 | dev_err(dev, "netif_set_real_num_rx_queues() failed\n" ); |
4578 | return err; |
4579 | } |
4580 | |
4581 | dpaa2_eth_detect_features(priv); |
4582 | |
4583 | /* Capabilities listing */ |
4584 | supported |= IFF_LIVE_ADDR_CHANGE; |
4585 | |
4586 | if (options & DPNI_OPT_NO_MAC_FILTER) |
4587 | not_supported |= IFF_UNICAST_FLT; |
4588 | else |
4589 | supported |= IFF_UNICAST_FLT; |
4590 | |
4591 | net_dev->priv_flags |= supported; |
4592 | net_dev->priv_flags &= ~not_supported; |
4593 | |
4594 | /* Features */ |
4595 | net_dev->features = NETIF_F_RXCSUM | |
4596 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
4597 | NETIF_F_SG | NETIF_F_HIGHDMA | |
4598 | NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO; |
4599 | net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS; |
4600 | net_dev->hw_features = net_dev->features; |
4601 | net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | |
4602 | NETDEV_XDP_ACT_REDIRECT | |
4603 | NETDEV_XDP_ACT_NDO_XMIT; |
4604 | if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) && |
4605 | priv->dpni_attrs.num_queues <= 8) |
4606 | net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; |
4607 | |
4608 | if (priv->dpni_attrs.vlan_filter_entries) |
4609 | net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
4610 | |
4611 | return 0; |
4612 | } |
4613 | |
4614 | static int dpaa2_eth_poll_link_state(void *arg) |
4615 | { |
4616 | struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; |
4617 | int err; |
4618 | |
4619 | while (!kthread_should_stop()) { |
4620 | err = dpaa2_eth_link_state_update(priv); |
4621 | if (unlikely(err)) |
4622 | return err; |
4623 | |
4624 | msleep(DPAA2_ETH_LINK_STATE_REFRESH); |
4625 | } |
4626 | |
4627 | return 0; |
4628 | } |
4629 | |
4630 | static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) |
4631 | { |
4632 | struct fsl_mc_device *dpni_dev, *dpmac_dev; |
4633 | struct dpaa2_mac *mac; |
4634 | int err; |
4635 | |
4636 | dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); |
4637 | dpmac_dev = fsl_mc_get_endpoint(mc_dev: dpni_dev, if_id: 0); |
4638 | |
4639 | if (PTR_ERR(ptr: dpmac_dev) == -EPROBE_DEFER) { |
4640 | netdev_dbg(priv->net_dev, "waiting for mac\n" ); |
4641 | return PTR_ERR(ptr: dpmac_dev); |
4642 | } |
4643 | |
4644 | if (IS_ERR(ptr: dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) |
4645 | return 0; |
4646 | |
4647 | mac = kzalloc(size: sizeof(struct dpaa2_mac), GFP_KERNEL); |
4648 | if (!mac) |
4649 | return -ENOMEM; |
4650 | |
4651 | mac->mc_dev = dpmac_dev; |
4652 | mac->mc_io = priv->mc_io; |
4653 | mac->net_dev = priv->net_dev; |
4654 | |
4655 | err = dpaa2_mac_open(mac); |
4656 | if (err) |
4657 | goto err_free_mac; |
4658 | |
4659 | if (dpaa2_mac_is_type_phy(mac)) { |
4660 | err = dpaa2_mac_connect(mac); |
4661 | if (err) { |
4662 | if (err == -EPROBE_DEFER) |
4663 | netdev_dbg(priv->net_dev, |
4664 | "could not connect to MAC\n" ); |
4665 | else |
4666 | netdev_err(dev: priv->net_dev, |
4667 | format: "Error connecting to the MAC endpoint: %pe" , |
4668 | ERR_PTR(error: err)); |
4669 | goto err_close_mac; |
4670 | } |
4671 | } |
4672 | |
4673 | mutex_lock(&priv->mac_lock); |
4674 | priv->mac = mac; |
4675 | mutex_unlock(lock: &priv->mac_lock); |
4676 | |
4677 | return 0; |
4678 | |
4679 | err_close_mac: |
4680 | dpaa2_mac_close(mac); |
4681 | err_free_mac: |
4682 | kfree(objp: mac); |
4683 | return err; |
4684 | } |
4685 | |
4686 | static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) |
4687 | { |
4688 | struct dpaa2_mac *mac; |
4689 | |
4690 | mutex_lock(&priv->mac_lock); |
4691 | mac = priv->mac; |
4692 | priv->mac = NULL; |
4693 | mutex_unlock(lock: &priv->mac_lock); |
4694 | |
4695 | if (!mac) |
4696 | return; |
4697 | |
4698 | if (dpaa2_mac_is_type_phy(mac)) |
4699 | dpaa2_mac_disconnect(mac); |
4700 | |
4701 | dpaa2_mac_close(mac); |
4702 | kfree(objp: mac); |
4703 | } |
4704 | |
4705 | static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) |
4706 | { |
4707 | u32 status = ~0; |
4708 | struct device *dev = (struct device *)arg; |
4709 | struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); |
4710 | struct net_device *net_dev = dev_get_drvdata(dev); |
4711 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
4712 | bool had_mac; |
4713 | int err; |
4714 | |
4715 | err = dpni_get_irq_status(mc_io: dpni_dev->mc_io, cmd_flags: 0, token: dpni_dev->mc_handle, |
4716 | DPNI_IRQ_INDEX, status: &status); |
4717 | if (unlikely(err)) { |
4718 | netdev_err(dev: net_dev, format: "Can't get irq status (err %d)\n" , err); |
4719 | return IRQ_HANDLED; |
4720 | } |
4721 | |
4722 | if (status & DPNI_IRQ_EVENT_LINK_CHANGED) |
4723 | dpaa2_eth_link_state_update(priv: netdev_priv(dev: net_dev)); |
4724 | |
4725 | if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { |
4726 | dpaa2_eth_set_mac_addr(priv: netdev_priv(dev: net_dev)); |
4727 | dpaa2_eth_update_tx_fqids(priv); |
4728 | |
4729 | /* We can avoid locking because the "endpoint changed" IRQ |
4730 | * handler is the only one who changes priv->mac at runtime, |
4731 | * so we are not racing with anyone. |
4732 | */ |
4733 | had_mac = !!priv->mac; |
4734 | if (had_mac) |
4735 | dpaa2_eth_disconnect_mac(priv); |
4736 | else |
4737 | dpaa2_eth_connect_mac(priv); |
4738 | } |
4739 | |
4740 | return IRQ_HANDLED; |
4741 | } |
4742 | |
4743 | static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) |
4744 | { |
4745 | int err = 0; |
4746 | struct fsl_mc_device_irq *irq; |
4747 | |
4748 | err = fsl_mc_allocate_irqs(mc_dev: ls_dev); |
4749 | if (err) { |
4750 | dev_err(&ls_dev->dev, "MC irqs allocation failed\n" ); |
4751 | return err; |
4752 | } |
4753 | |
4754 | irq = ls_dev->irqs[0]; |
4755 | err = devm_request_threaded_irq(dev: &ls_dev->dev, irq: irq->virq, |
4756 | NULL, thread_fn: dpni_irq0_handler_thread, |
4757 | IRQF_NO_SUSPEND | IRQF_ONESHOT, |
4758 | devname: dev_name(dev: &ls_dev->dev), dev_id: &ls_dev->dev); |
4759 | if (err < 0) { |
4760 | dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n" , err); |
4761 | goto free_mc_irq; |
4762 | } |
4763 | |
4764 | err = dpni_set_irq_mask(mc_io: ls_dev->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, |
4765 | DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | |
4766 | DPNI_IRQ_EVENT_ENDPOINT_CHANGED); |
4767 | if (err < 0) { |
4768 | dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n" , err); |
4769 | goto free_irq; |
4770 | } |
4771 | |
4772 | err = dpni_set_irq_enable(mc_io: ls_dev->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, |
4773 | DPNI_IRQ_INDEX, en: 1); |
4774 | if (err < 0) { |
4775 | dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n" , err); |
4776 | goto free_irq; |
4777 | } |
4778 | |
4779 | return 0; |
4780 | |
4781 | free_irq: |
4782 | devm_free_irq(dev: &ls_dev->dev, irq: irq->virq, dev_id: &ls_dev->dev); |
4783 | free_mc_irq: |
4784 | fsl_mc_free_irqs(mc_dev: ls_dev); |
4785 | |
4786 | return err; |
4787 | } |
4788 | |
4789 | static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv) |
4790 | { |
4791 | int i; |
4792 | struct dpaa2_eth_channel *ch; |
4793 | |
4794 | for (i = 0; i < priv->num_channels; i++) { |
4795 | ch = priv->channel[i]; |
4796 | /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ |
4797 | netif_napi_add(dev: priv->net_dev, napi: &ch->napi, poll: dpaa2_eth_poll); |
4798 | } |
4799 | } |
4800 | |
4801 | static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) |
4802 | { |
4803 | int i; |
4804 | struct dpaa2_eth_channel *ch; |
4805 | |
4806 | for (i = 0; i < priv->num_channels; i++) { |
4807 | ch = priv->channel[i]; |
4808 | netif_napi_del(napi: &ch->napi); |
4809 | } |
4810 | } |
4811 | |
4812 | static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) |
4813 | { |
4814 | struct device *dev; |
4815 | struct net_device *net_dev = NULL; |
4816 | struct dpaa2_eth_priv *priv = NULL; |
4817 | int err = 0; |
4818 | |
4819 | dev = &dpni_dev->dev; |
4820 | |
4821 | /* Net device */ |
4822 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); |
4823 | if (!net_dev) { |
4824 | dev_err(dev, "alloc_etherdev_mq() failed\n" ); |
4825 | return -ENOMEM; |
4826 | } |
4827 | |
4828 | SET_NETDEV_DEV(net_dev, dev); |
4829 | dev_set_drvdata(dev, data: net_dev); |
4830 | |
4831 | priv = netdev_priv(dev: net_dev); |
4832 | priv->net_dev = net_dev; |
4833 | SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port); |
4834 | |
4835 | mutex_init(&priv->mac_lock); |
4836 | |
4837 | priv->iommu_domain = iommu_get_domain_for_dev(dev); |
4838 | |
4839 | priv->tx_tstamp_type = HWTSTAMP_TX_OFF; |
4840 | priv->rx_tstamp = false; |
4841 | |
4842 | priv->dpaa2_ptp_wq = alloc_workqueue(fmt: "dpaa2_ptp_wq" , flags: 0, max_active: 0); |
4843 | if (!priv->dpaa2_ptp_wq) { |
4844 | err = -ENOMEM; |
4845 | goto err_wq_alloc; |
4846 | } |
4847 | |
4848 | INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); |
4849 | mutex_init(&priv->onestep_tstamp_lock); |
4850 | skb_queue_head_init(list: &priv->tx_skbs); |
4851 | |
4852 | priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK; |
4853 | |
4854 | /* Obtain a MC portal */ |
4855 | err = fsl_mc_portal_allocate(mc_dev: dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, |
4856 | new_mc_io: &priv->mc_io); |
4857 | if (err) { |
4858 | if (err == -ENXIO) { |
4859 | dev_dbg(dev, "waiting for MC portal\n" ); |
4860 | err = -EPROBE_DEFER; |
4861 | } else { |
4862 | dev_err(dev, "MC portal allocation failed\n" ); |
4863 | } |
4864 | goto err_portal_alloc; |
4865 | } |
4866 | |
4867 | /* MC objects initialization and configuration */ |
4868 | err = dpaa2_eth_setup_dpni(ls_dev: dpni_dev); |
4869 | if (err) |
4870 | goto err_dpni_setup; |
4871 | |
4872 | err = dpaa2_eth_setup_dpio(priv); |
4873 | if (err) |
4874 | goto err_dpio_setup; |
4875 | |
4876 | dpaa2_eth_setup_fqs(priv); |
4877 | |
4878 | err = dpaa2_eth_setup_default_dpbp(priv); |
4879 | if (err) |
4880 | goto err_dpbp_setup; |
4881 | |
4882 | err = dpaa2_eth_bind_dpni(priv); |
4883 | if (err) |
4884 | goto err_bind; |
4885 | |
4886 | /* Add a NAPI context for each channel */ |
4887 | dpaa2_eth_add_ch_napi(priv); |
4888 | |
4889 | /* Percpu statistics */ |
4890 | priv->percpu_stats = alloc_percpu(*priv->percpu_stats); |
4891 | if (!priv->percpu_stats) { |
4892 | dev_err(dev, "alloc_percpu(percpu_stats) failed\n" ); |
4893 | err = -ENOMEM; |
4894 | goto err_alloc_percpu_stats; |
4895 | } |
4896 | priv->percpu_extras = alloc_percpu(*priv->percpu_extras); |
4897 | if (!priv->percpu_extras) { |
4898 | dev_err(dev, "alloc_percpu(percpu_extras) failed\n" ); |
4899 | err = -ENOMEM; |
4900 | goto err_alloc_percpu_extras; |
4901 | } |
4902 | |
4903 | priv->sgt_cache = alloc_percpu(*priv->sgt_cache); |
4904 | if (!priv->sgt_cache) { |
4905 | dev_err(dev, "alloc_percpu(sgt_cache) failed\n" ); |
4906 | err = -ENOMEM; |
4907 | goto err_alloc_sgt_cache; |
4908 | } |
4909 | |
4910 | priv->fd = alloc_percpu(*priv->fd); |
4911 | if (!priv->fd) { |
4912 | dev_err(dev, "alloc_percpu(fds) failed\n" ); |
4913 | err = -ENOMEM; |
4914 | goto err_alloc_fds; |
4915 | } |
4916 | |
4917 | err = dpaa2_eth_netdev_init(net_dev); |
4918 | if (err) |
4919 | goto err_netdev_init; |
4920 | |
4921 | /* Configure checksum offload based on current interface flags */ |
4922 | err = dpaa2_eth_set_rx_csum(priv, enable: !!(net_dev->features & NETIF_F_RXCSUM)); |
4923 | if (err) |
4924 | goto err_csum; |
4925 | |
4926 | err = dpaa2_eth_set_tx_csum(priv, |
4927 | enable: !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); |
4928 | if (err) |
4929 | goto err_csum; |
4930 | |
4931 | err = dpaa2_eth_alloc_rings(priv); |
4932 | if (err) |
4933 | goto err_alloc_rings; |
4934 | |
4935 | #ifdef CONFIG_FSL_DPAA2_ETH_DCB |
4936 | if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) { |
4937 | priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; |
4938 | net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; |
4939 | } else { |
4940 | dev_dbg(dev, "PFC not supported\n" ); |
4941 | } |
4942 | #endif |
4943 | |
4944 | err = dpaa2_eth_connect_mac(priv); |
4945 | if (err) |
4946 | goto err_connect_mac; |
4947 | |
4948 | err = dpaa2_eth_setup_irqs(ls_dev: dpni_dev); |
4949 | if (err) { |
4950 | netdev_warn(dev: net_dev, format: "Failed to set link interrupt, fall back to polling\n" ); |
4951 | priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv, |
4952 | "%s_poll_link" , net_dev->name); |
4953 | if (IS_ERR(ptr: priv->poll_thread)) { |
4954 | dev_err(dev, "Error starting polling thread\n" ); |
4955 | goto err_poll_thread; |
4956 | } |
4957 | priv->do_link_poll = true; |
4958 | } |
4959 | |
4960 | err = dpaa2_eth_dl_alloc(priv); |
4961 | if (err) |
4962 | goto err_dl_register; |
4963 | |
4964 | err = dpaa2_eth_dl_traps_register(priv); |
4965 | if (err) |
4966 | goto err_dl_trap_register; |
4967 | |
4968 | err = dpaa2_eth_dl_port_add(priv); |
4969 | if (err) |
4970 | goto err_dl_port_add; |
4971 | |
4972 | net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; |
4973 | |
4974 | err = register_netdev(dev: net_dev); |
4975 | if (err < 0) { |
4976 | dev_err(dev, "register_netdev() failed\n" ); |
4977 | goto err_netdev_reg; |
4978 | } |
4979 | |
4980 | #ifdef CONFIG_DEBUG_FS |
4981 | dpaa2_dbg_add(priv); |
4982 | #endif |
4983 | |
4984 | dpaa2_eth_dl_register(priv); |
4985 | dev_info(dev, "Probed interface %s\n" , net_dev->name); |
4986 | return 0; |
4987 | |
4988 | err_netdev_reg: |
4989 | dpaa2_eth_dl_port_del(priv); |
4990 | err_dl_port_add: |
4991 | dpaa2_eth_dl_traps_unregister(priv); |
4992 | err_dl_trap_register: |
4993 | dpaa2_eth_dl_free(priv); |
4994 | err_dl_register: |
4995 | if (priv->do_link_poll) |
4996 | kthread_stop(k: priv->poll_thread); |
4997 | else |
4998 | fsl_mc_free_irqs(mc_dev: dpni_dev); |
4999 | err_poll_thread: |
5000 | dpaa2_eth_disconnect_mac(priv); |
5001 | err_connect_mac: |
5002 | dpaa2_eth_free_rings(priv); |
5003 | err_alloc_rings: |
5004 | err_csum: |
5005 | err_netdev_init: |
5006 | free_percpu(pdata: priv->fd); |
5007 | err_alloc_fds: |
5008 | free_percpu(pdata: priv->sgt_cache); |
5009 | err_alloc_sgt_cache: |
5010 | free_percpu(pdata: priv->percpu_extras); |
5011 | : |
5012 | free_percpu(pdata: priv->percpu_stats); |
5013 | err_alloc_percpu_stats: |
5014 | dpaa2_eth_del_ch_napi(priv); |
5015 | err_bind: |
5016 | dpaa2_eth_free_dpbps(priv); |
5017 | err_dpbp_setup: |
5018 | dpaa2_eth_free_dpio(priv); |
5019 | err_dpio_setup: |
5020 | dpaa2_eth_free_dpni(priv); |
5021 | err_dpni_setup: |
5022 | fsl_mc_portal_free(mc_io: priv->mc_io); |
5023 | err_portal_alloc: |
5024 | destroy_workqueue(wq: priv->dpaa2_ptp_wq); |
5025 | err_wq_alloc: |
5026 | dev_set_drvdata(dev, NULL); |
5027 | free_netdev(dev: net_dev); |
5028 | |
5029 | return err; |
5030 | } |
5031 | |
5032 | static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev) |
5033 | { |
5034 | struct device *dev; |
5035 | struct net_device *net_dev; |
5036 | struct dpaa2_eth_priv *priv; |
5037 | |
5038 | dev = &ls_dev->dev; |
5039 | net_dev = dev_get_drvdata(dev); |
5040 | priv = netdev_priv(dev: net_dev); |
5041 | |
5042 | dpaa2_eth_dl_unregister(priv); |
5043 | |
5044 | #ifdef CONFIG_DEBUG_FS |
5045 | dpaa2_dbg_remove(priv); |
5046 | #endif |
5047 | |
5048 | unregister_netdev(dev: net_dev); |
5049 | |
5050 | dpaa2_eth_dl_port_del(priv); |
5051 | dpaa2_eth_dl_traps_unregister(priv); |
5052 | dpaa2_eth_dl_free(priv); |
5053 | |
5054 | if (priv->do_link_poll) |
5055 | kthread_stop(k: priv->poll_thread); |
5056 | else |
5057 | fsl_mc_free_irqs(mc_dev: ls_dev); |
5058 | |
5059 | dpaa2_eth_disconnect_mac(priv); |
5060 | dpaa2_eth_free_rings(priv); |
5061 | free_percpu(pdata: priv->fd); |
5062 | free_percpu(pdata: priv->sgt_cache); |
5063 | free_percpu(pdata: priv->percpu_stats); |
5064 | free_percpu(pdata: priv->percpu_extras); |
5065 | |
5066 | dpaa2_eth_del_ch_napi(priv); |
5067 | dpaa2_eth_free_dpbps(priv); |
5068 | dpaa2_eth_free_dpio(priv); |
5069 | dpaa2_eth_free_dpni(priv); |
5070 | if (priv->onestep_reg_base) |
5071 | iounmap(addr: priv->onestep_reg_base); |
5072 | |
5073 | fsl_mc_portal_free(mc_io: priv->mc_io); |
5074 | |
5075 | destroy_workqueue(wq: priv->dpaa2_ptp_wq); |
5076 | |
5077 | dev_dbg(net_dev->dev.parent, "Removed interface %s\n" , net_dev->name); |
5078 | |
5079 | free_netdev(dev: net_dev); |
5080 | } |
5081 | |
5082 | static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { |
5083 | { |
5084 | .vendor = FSL_MC_VENDOR_FREESCALE, |
5085 | .obj_type = "dpni" , |
5086 | }, |
5087 | { .vendor = 0x0 } |
5088 | }; |
5089 | MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); |
5090 | |
5091 | static struct fsl_mc_driver dpaa2_eth_driver = { |
5092 | .driver = { |
5093 | .name = KBUILD_MODNAME, |
5094 | }, |
5095 | .probe = dpaa2_eth_probe, |
5096 | .remove = dpaa2_eth_remove, |
5097 | .match_id_table = dpaa2_eth_match_id_table |
5098 | }; |
5099 | |
5100 | static int __init dpaa2_eth_driver_init(void) |
5101 | { |
5102 | int err; |
5103 | |
5104 | dpaa2_eth_dbg_init(); |
5105 | err = fsl_mc_driver_register(&dpaa2_eth_driver); |
5106 | if (err) { |
5107 | dpaa2_eth_dbg_exit(); |
5108 | return err; |
5109 | } |
5110 | |
5111 | return 0; |
5112 | } |
5113 | |
5114 | static void __exit dpaa2_eth_driver_exit(void) |
5115 | { |
5116 | dpaa2_eth_dbg_exit(); |
5117 | fsl_mc_driver_unregister(driver: &dpaa2_eth_driver); |
5118 | } |
5119 | |
5120 | module_init(dpaa2_eth_driver_init); |
5121 | module_exit(dpaa2_eth_driver_exit); |
5122 | |