1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* Copyright 2014-2016 Freescale Semiconductor Inc. |
3 | * Copyright 2016-2022 NXP |
4 | */ |
5 | |
6 | #include <linux/net_tstamp.h> |
7 | #include <linux/nospec.h> |
8 | |
9 | #include "dpni.h" /* DPNI_LINK_OPT_* */ |
10 | #include "dpaa2-eth.h" |
11 | |
12 | /* To be kept in sync with DPNI statistics */ |
13 | static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { |
14 | "[hw] rx frames" , |
15 | "[hw] rx bytes" , |
16 | "[hw] rx mcast frames" , |
17 | "[hw] rx mcast bytes" , |
18 | "[hw] rx bcast frames" , |
19 | "[hw] rx bcast bytes" , |
20 | "[hw] tx frames" , |
21 | "[hw] tx bytes" , |
22 | "[hw] tx mcast frames" , |
23 | "[hw] tx mcast bytes" , |
24 | "[hw] tx bcast frames" , |
25 | "[hw] tx bcast bytes" , |
26 | "[hw] rx filtered frames" , |
27 | "[hw] rx discarded frames" , |
28 | "[hw] rx nobuffer discards" , |
29 | "[hw] tx discarded frames" , |
30 | "[hw] tx confirmed frames" , |
31 | "[hw] tx dequeued bytes" , |
32 | "[hw] tx dequeued frames" , |
33 | "[hw] tx rejected bytes" , |
34 | "[hw] tx rejected frames" , |
35 | "[hw] tx pending frames" , |
36 | }; |
37 | |
38 | #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) |
39 | |
40 | static char [][ETH_GSTRING_LEN] = { |
41 | /* per-cpu stats */ |
42 | "[drv] tx conf frames" , |
43 | "[drv] tx conf bytes" , |
44 | "[drv] tx sg frames" , |
45 | "[drv] tx sg bytes" , |
46 | "[drv] tx tso frames" , |
47 | "[drv] tx tso bytes" , |
48 | "[drv] rx sg frames" , |
49 | "[drv] rx sg bytes" , |
50 | "[drv] tx converted sg frames" , |
51 | "[drv] tx converted sg bytes" , |
52 | "[drv] enqueue portal busy" , |
53 | /* Channel stats */ |
54 | "[drv] dequeue portal busy" , |
55 | "[drv] channel pull errors" , |
56 | "[drv] cdan" , |
57 | "[drv] xdp drop" , |
58 | "[drv] xdp tx" , |
59 | "[drv] xdp tx errors" , |
60 | "[drv] xdp redirect" , |
61 | /* FQ stats */ |
62 | "[qbman] rx pending frames" , |
63 | "[qbman] rx pending bytes" , |
64 | "[qbman] tx conf pending frames" , |
65 | "[qbman] tx conf pending bytes" , |
66 | "[qbman] buffer count" , |
67 | }; |
68 | |
69 | #define ARRAY_SIZE(dpaa2_ethtool_extras) |
70 | |
71 | static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, |
72 | struct ethtool_drvinfo *drvinfo) |
73 | { |
74 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
75 | |
76 | strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); |
77 | |
78 | snprintf(buf: drvinfo->fw_version, size: sizeof(drvinfo->fw_version), |
79 | fmt: "%u.%u" , priv->dpni_ver_major, priv->dpni_ver_minor); |
80 | |
81 | strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), |
82 | sizeof(drvinfo->bus_info)); |
83 | } |
84 | |
85 | static int dpaa2_eth_nway_reset(struct net_device *net_dev) |
86 | { |
87 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
88 | int err = -EOPNOTSUPP; |
89 | |
90 | mutex_lock(&priv->mac_lock); |
91 | |
92 | if (dpaa2_eth_is_type_phy(priv)) |
93 | err = phylink_ethtool_nway_reset(priv->mac->phylink); |
94 | |
95 | mutex_unlock(lock: &priv->mac_lock); |
96 | |
97 | return err; |
98 | } |
99 | |
100 | static int |
101 | dpaa2_eth_get_link_ksettings(struct net_device *net_dev, |
102 | struct ethtool_link_ksettings *link_settings) |
103 | { |
104 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
105 | int err; |
106 | |
107 | mutex_lock(&priv->mac_lock); |
108 | |
109 | if (dpaa2_eth_is_type_phy(priv)) { |
110 | err = phylink_ethtool_ksettings_get(priv->mac->phylink, |
111 | link_settings); |
112 | mutex_unlock(lock: &priv->mac_lock); |
113 | return err; |
114 | } |
115 | |
116 | mutex_unlock(lock: &priv->mac_lock); |
117 | |
118 | link_settings->base.autoneg = AUTONEG_DISABLE; |
119 | if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX)) |
120 | link_settings->base.duplex = DUPLEX_FULL; |
121 | link_settings->base.speed = priv->link_state.rate; |
122 | |
123 | return 0; |
124 | } |
125 | |
126 | static int |
127 | dpaa2_eth_set_link_ksettings(struct net_device *net_dev, |
128 | const struct ethtool_link_ksettings *link_settings) |
129 | { |
130 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
131 | int err = -EOPNOTSUPP; |
132 | |
133 | mutex_lock(&priv->mac_lock); |
134 | |
135 | if (dpaa2_eth_is_type_phy(priv)) |
136 | err = phylink_ethtool_ksettings_set(priv->mac->phylink, |
137 | link_settings); |
138 | |
139 | mutex_unlock(lock: &priv->mac_lock); |
140 | |
141 | return err; |
142 | } |
143 | |
144 | static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, |
145 | struct ethtool_pauseparam *pause) |
146 | { |
147 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
148 | u64 link_options = priv->link_state.options; |
149 | |
150 | mutex_lock(&priv->mac_lock); |
151 | |
152 | if (dpaa2_eth_is_type_phy(priv)) { |
153 | phylink_ethtool_get_pauseparam(priv->mac->phylink, pause); |
154 | mutex_unlock(lock: &priv->mac_lock); |
155 | return; |
156 | } |
157 | |
158 | mutex_unlock(lock: &priv->mac_lock); |
159 | |
160 | pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options); |
161 | pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options); |
162 | pause->autoneg = AUTONEG_DISABLE; |
163 | } |
164 | |
165 | static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, |
166 | struct ethtool_pauseparam *pause) |
167 | { |
168 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
169 | struct dpni_link_cfg cfg = {0}; |
170 | int err; |
171 | |
172 | if (!dpaa2_eth_has_pause_support(priv)) { |
173 | netdev_info(dev: net_dev, format: "No pause frame support for DPNI version < %d.%d\n" , |
174 | DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR); |
175 | return -EOPNOTSUPP; |
176 | } |
177 | |
178 | mutex_lock(&priv->mac_lock); |
179 | |
180 | if (dpaa2_eth_is_type_phy(priv)) { |
181 | err = phylink_ethtool_set_pauseparam(priv->mac->phylink, |
182 | pause); |
183 | mutex_unlock(lock: &priv->mac_lock); |
184 | return err; |
185 | } |
186 | |
187 | mutex_unlock(lock: &priv->mac_lock); |
188 | |
189 | if (pause->autoneg) |
190 | return -EOPNOTSUPP; |
191 | |
192 | cfg.rate = priv->link_state.rate; |
193 | cfg.options = priv->link_state.options; |
194 | if (pause->rx_pause) |
195 | cfg.options |= DPNI_LINK_OPT_PAUSE; |
196 | else |
197 | cfg.options &= ~DPNI_LINK_OPT_PAUSE; |
198 | if (!!pause->rx_pause ^ !!pause->tx_pause) |
199 | cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; |
200 | else |
201 | cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; |
202 | |
203 | if (cfg.options == priv->link_state.options) |
204 | return 0; |
205 | |
206 | err = dpni_set_link_cfg(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, cfg: &cfg); |
207 | if (err) { |
208 | netdev_err(dev: net_dev, format: "dpni_set_link_state failed\n" ); |
209 | return err; |
210 | } |
211 | |
212 | priv->link_state.options = cfg.options; |
213 | |
214 | return 0; |
215 | } |
216 | |
217 | static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, |
218 | u8 *data) |
219 | { |
220 | u8 *p = data; |
221 | int i; |
222 | |
223 | switch (stringset) { |
224 | case ETH_SS_STATS: |
225 | for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { |
226 | strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); |
227 | p += ETH_GSTRING_LEN; |
228 | } |
229 | for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { |
230 | strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); |
231 | p += ETH_GSTRING_LEN; |
232 | } |
233 | dpaa2_mac_get_strings(data: p); |
234 | break; |
235 | } |
236 | } |
237 | |
238 | static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) |
239 | { |
240 | switch (sset) { |
241 | case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ |
242 | return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS + |
243 | dpaa2_mac_get_sset_count(); |
244 | default: |
245 | return -EOPNOTSUPP; |
246 | } |
247 | } |
248 | |
249 | /** Fill in hardware counters, as returned by MC. |
250 | */ |
251 | static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, |
252 | struct ethtool_stats *stats, |
253 | u64 *data) |
254 | { |
255 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
256 | union dpni_statistics dpni_stats; |
257 | int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { |
258 | sizeof(dpni_stats.page_0), |
259 | sizeof(dpni_stats.page_1), |
260 | sizeof(dpni_stats.page_2), |
261 | sizeof(dpni_stats.page_3), |
262 | sizeof(dpni_stats.page_4), |
263 | sizeof(dpni_stats.page_5), |
264 | sizeof(dpni_stats.page_6), |
265 | }; |
266 | u32 fcnt_rx_total = 0, fcnt_tx_total = 0; |
267 | u32 bcnt_rx_total = 0, bcnt_tx_total = 0; |
268 | struct dpaa2_eth_ch_stats *ch_stats; |
269 | struct dpaa2_eth_drv_stats *; |
270 | u32 buf_cnt, buf_cnt_total = 0; |
271 | int j, k, err, num_cnt, i = 0; |
272 | u32 fcnt, bcnt; |
273 | |
274 | memset(data, 0, |
275 | sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); |
276 | |
277 | /* Print standard counters, from DPNI statistics */ |
278 | for (j = 0; j <= 6; j++) { |
279 | /* We're not interested in pages 4 & 5 for now */ |
280 | if (j == 4 || j == 5) |
281 | continue; |
282 | err = dpni_get_statistics(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
283 | page: j, stat: &dpni_stats); |
284 | if (err == -EINVAL) |
285 | /* Older firmware versions don't support all pages */ |
286 | memset(&dpni_stats, 0, sizeof(dpni_stats)); |
287 | else if (err) |
288 | netdev_warn(dev: net_dev, format: "dpni_get_stats(%d) failed\n" , j); |
289 | |
290 | num_cnt = dpni_stats_page_size[j] / sizeof(u64); |
291 | for (k = 0; k < num_cnt; k++) |
292 | *(data + i++) = dpni_stats.raw.counter[k]; |
293 | } |
294 | |
295 | /* Print per-cpu extra stats */ |
296 | for_each_online_cpu(k) { |
297 | extras = per_cpu_ptr(priv->percpu_extras, k); |
298 | for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) |
299 | *((__u64 *)data + i + j) += *((__u64 *)extras + j); |
300 | } |
301 | i += j; |
302 | |
303 | /* Per-channel stats */ |
304 | for (k = 0; k < priv->num_channels; k++) { |
305 | ch_stats = &priv->channel[k]->stats; |
306 | for (j = 0; j < DPAA2_ETH_CH_STATS; j++) |
307 | *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j); |
308 | } |
309 | i += j; |
310 | |
311 | for (j = 0; j < priv->num_fqs; j++) { |
312 | /* Print FQ instantaneous counts */ |
313 | err = dpaa2_io_query_fq_count(NULL, fqid: priv->fq[j].fqid, |
314 | fcnt: &fcnt, bcnt: &bcnt); |
315 | if (err) { |
316 | netdev_warn(dev: net_dev, format: "FQ query error %d" , err); |
317 | return; |
318 | } |
319 | |
320 | if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { |
321 | fcnt_tx_total += fcnt; |
322 | bcnt_tx_total += bcnt; |
323 | } else { |
324 | fcnt_rx_total += fcnt; |
325 | bcnt_rx_total += bcnt; |
326 | } |
327 | } |
328 | |
329 | *(data + i++) = fcnt_rx_total; |
330 | *(data + i++) = bcnt_rx_total; |
331 | *(data + i++) = fcnt_tx_total; |
332 | *(data + i++) = bcnt_tx_total; |
333 | |
334 | for (j = 0; j < priv->num_bps; j++) { |
335 | err = dpaa2_io_query_bp_count(NULL, bpid: priv->bp[j]->bpid, num: &buf_cnt); |
336 | if (err) { |
337 | netdev_warn(dev: net_dev, format: "Buffer count query error %d\n" , err); |
338 | return; |
339 | } |
340 | buf_cnt_total += buf_cnt; |
341 | } |
342 | *(data + i++) = buf_cnt_total; |
343 | |
344 | mutex_lock(&priv->mac_lock); |
345 | |
346 | if (dpaa2_eth_has_mac(priv)) |
347 | dpaa2_mac_get_ethtool_stats(mac: priv->mac, data: data + i); |
348 | |
349 | mutex_unlock(lock: &priv->mac_lock); |
350 | } |
351 | |
352 | static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, |
353 | void *key, void *mask, u64 *fields) |
354 | { |
355 | int off; |
356 | |
357 | if (eth_mask->h_proto) { |
358 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_TYPE); |
359 | *(__be16 *)(key + off) = eth_value->h_proto; |
360 | *(__be16 *)(mask + off) = eth_mask->h_proto; |
361 | *fields |= DPAA2_ETH_DIST_ETHTYPE; |
362 | } |
363 | |
364 | if (!is_zero_ether_addr(addr: eth_mask->h_source)) { |
365 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_SA); |
366 | ether_addr_copy(dst: key + off, src: eth_value->h_source); |
367 | ether_addr_copy(dst: mask + off, src: eth_mask->h_source); |
368 | *fields |= DPAA2_ETH_DIST_ETHSRC; |
369 | } |
370 | |
371 | if (!is_zero_ether_addr(addr: eth_mask->h_dest)) { |
372 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_DA); |
373 | ether_addr_copy(dst: key + off, src: eth_value->h_dest); |
374 | ether_addr_copy(dst: mask + off, src: eth_mask->h_dest); |
375 | *fields |= DPAA2_ETH_DIST_ETHDST; |
376 | } |
377 | |
378 | return 0; |
379 | } |
380 | |
381 | static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value, |
382 | struct ethtool_usrip4_spec *uip_mask, |
383 | void *key, void *mask, u64 *fields) |
384 | { |
385 | int off; |
386 | u32 tmp_value, tmp_mask; |
387 | |
388 | if (uip_mask->tos || uip_mask->ip_ver) |
389 | return -EOPNOTSUPP; |
390 | |
391 | if (uip_mask->ip4src) { |
392 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_SRC); |
393 | *(__be32 *)(key + off) = uip_value->ip4src; |
394 | *(__be32 *)(mask + off) = uip_mask->ip4src; |
395 | *fields |= DPAA2_ETH_DIST_IPSRC; |
396 | } |
397 | |
398 | if (uip_mask->ip4dst) { |
399 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_DST); |
400 | *(__be32 *)(key + off) = uip_value->ip4dst; |
401 | *(__be32 *)(mask + off) = uip_mask->ip4dst; |
402 | *fields |= DPAA2_ETH_DIST_IPDST; |
403 | } |
404 | |
405 | if (uip_mask->proto) { |
406 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_PROTO); |
407 | *(u8 *)(key + off) = uip_value->proto; |
408 | *(u8 *)(mask + off) = uip_mask->proto; |
409 | *fields |= DPAA2_ETH_DIST_IPPROTO; |
410 | } |
411 | |
412 | if (uip_mask->l4_4_bytes) { |
413 | tmp_value = be32_to_cpu(uip_value->l4_4_bytes); |
414 | tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes); |
415 | |
416 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); |
417 | *(__be16 *)(key + off) = htons(tmp_value >> 16); |
418 | *(__be16 *)(mask + off) = htons(tmp_mask >> 16); |
419 | *fields |= DPAA2_ETH_DIST_L4SRC; |
420 | |
421 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_DST); |
422 | *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF); |
423 | *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF); |
424 | *fields |= DPAA2_ETH_DIST_L4DST; |
425 | } |
426 | |
427 | /* Only apply the rule for IPv4 frames */ |
428 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_TYPE); |
429 | *(__be16 *)(key + off) = htons(ETH_P_IP); |
430 | *(__be16 *)(mask + off) = htons(0xFFFF); |
431 | *fields |= DPAA2_ETH_DIST_ETHTYPE; |
432 | |
433 | return 0; |
434 | } |
435 | |
436 | static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, |
437 | struct ethtool_tcpip4_spec *l4_mask, |
438 | void *key, void *mask, u8 l4_proto, u64 *fields) |
439 | { |
440 | int off; |
441 | |
442 | if (l4_mask->tos) |
443 | return -EOPNOTSUPP; |
444 | |
445 | if (l4_mask->ip4src) { |
446 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_SRC); |
447 | *(__be32 *)(key + off) = l4_value->ip4src; |
448 | *(__be32 *)(mask + off) = l4_mask->ip4src; |
449 | *fields |= DPAA2_ETH_DIST_IPSRC; |
450 | } |
451 | |
452 | if (l4_mask->ip4dst) { |
453 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_DST); |
454 | *(__be32 *)(key + off) = l4_value->ip4dst; |
455 | *(__be32 *)(mask + off) = l4_mask->ip4dst; |
456 | *fields |= DPAA2_ETH_DIST_IPDST; |
457 | } |
458 | |
459 | if (l4_mask->psrc) { |
460 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); |
461 | *(__be16 *)(key + off) = l4_value->psrc; |
462 | *(__be16 *)(mask + off) = l4_mask->psrc; |
463 | *fields |= DPAA2_ETH_DIST_L4SRC; |
464 | } |
465 | |
466 | if (l4_mask->pdst) { |
467 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_DST); |
468 | *(__be16 *)(key + off) = l4_value->pdst; |
469 | *(__be16 *)(mask + off) = l4_mask->pdst; |
470 | *fields |= DPAA2_ETH_DIST_L4DST; |
471 | } |
472 | |
473 | /* Only apply the rule for IPv4 frames with the specified L4 proto */ |
474 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_TYPE); |
475 | *(__be16 *)(key + off) = htons(ETH_P_IP); |
476 | *(__be16 *)(mask + off) = htons(0xFFFF); |
477 | *fields |= DPAA2_ETH_DIST_ETHTYPE; |
478 | |
479 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_PROTO); |
480 | *(u8 *)(key + off) = l4_proto; |
481 | *(u8 *)(mask + off) = 0xFF; |
482 | *fields |= DPAA2_ETH_DIST_IPPROTO; |
483 | |
484 | return 0; |
485 | } |
486 | |
487 | static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value, |
488 | struct ethtool_flow_ext *ext_mask, |
489 | void *key, void *mask, u64 *fields) |
490 | { |
491 | int off; |
492 | |
493 | if (ext_mask->vlan_etype) |
494 | return -EOPNOTSUPP; |
495 | |
496 | if (ext_mask->vlan_tci) { |
497 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_VLAN, NH_FLD_VLAN_TCI); |
498 | *(__be16 *)(key + off) = ext_value->vlan_tci; |
499 | *(__be16 *)(mask + off) = ext_mask->vlan_tci; |
500 | *fields |= DPAA2_ETH_DIST_VLAN; |
501 | } |
502 | |
503 | return 0; |
504 | } |
505 | |
506 | static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, |
507 | struct ethtool_flow_ext *ext_mask, |
508 | void *key, void *mask, u64 *fields) |
509 | { |
510 | int off; |
511 | |
512 | if (!is_zero_ether_addr(addr: ext_mask->h_dest)) { |
513 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_DA); |
514 | ether_addr_copy(dst: key + off, src: ext_value->h_dest); |
515 | ether_addr_copy(dst: mask + off, src: ext_mask->h_dest); |
516 | *fields |= DPAA2_ETH_DIST_ETHDST; |
517 | } |
518 | |
519 | return 0; |
520 | } |
521 | |
522 | static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, |
523 | void *mask, u64 *fields) |
524 | { |
525 | int err; |
526 | |
527 | switch (fs->flow_type & 0xFF) { |
528 | case ETHER_FLOW: |
529 | err = dpaa2_eth_prep_eth_rule(eth_value: &fs->h_u.ether_spec, eth_mask: &fs->m_u.ether_spec, |
530 | key, mask, fields); |
531 | break; |
532 | case IP_USER_FLOW: |
533 | err = dpaa2_eth_prep_uip_rule(uip_value: &fs->h_u.usr_ip4_spec, |
534 | uip_mask: &fs->m_u.usr_ip4_spec, key, mask, fields); |
535 | break; |
536 | case TCP_V4_FLOW: |
537 | err = dpaa2_eth_prep_l4_rule(l4_value: &fs->h_u.tcp_ip4_spec, l4_mask: &fs->m_u.tcp_ip4_spec, |
538 | key, mask, IPPROTO_TCP, fields); |
539 | break; |
540 | case UDP_V4_FLOW: |
541 | err = dpaa2_eth_prep_l4_rule(l4_value: &fs->h_u.udp_ip4_spec, l4_mask: &fs->m_u.udp_ip4_spec, |
542 | key, mask, IPPROTO_UDP, fields); |
543 | break; |
544 | case SCTP_V4_FLOW: |
545 | err = dpaa2_eth_prep_l4_rule(l4_value: &fs->h_u.sctp_ip4_spec, |
546 | l4_mask: &fs->m_u.sctp_ip4_spec, key, mask, |
547 | IPPROTO_SCTP, fields); |
548 | break; |
549 | default: |
550 | return -EOPNOTSUPP; |
551 | } |
552 | |
553 | if (err) |
554 | return err; |
555 | |
556 | if (fs->flow_type & FLOW_EXT) { |
557 | err = dpaa2_eth_prep_ext_rule(ext_value: &fs->h_ext, ext_mask: &fs->m_ext, key, mask, fields); |
558 | if (err) |
559 | return err; |
560 | } |
561 | |
562 | if (fs->flow_type & FLOW_MAC_EXT) { |
563 | err = dpaa2_eth_prep_mac_ext_rule(ext_value: &fs->h_ext, ext_mask: &fs->m_ext, key, |
564 | mask, fields); |
565 | if (err) |
566 | return err; |
567 | } |
568 | |
569 | return 0; |
570 | } |
571 | |
572 | static int dpaa2_eth_do_cls_rule(struct net_device *net_dev, |
573 | struct ethtool_rx_flow_spec *fs, |
574 | bool add) |
575 | { |
576 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
577 | struct device *dev = net_dev->dev.parent; |
578 | struct dpni_rule_cfg rule_cfg = { 0 }; |
579 | struct dpni_fs_action_cfg fs_act = { 0 }; |
580 | dma_addr_t key_iova; |
581 | u64 fields = 0; |
582 | void *key_buf; |
583 | int i, err; |
584 | |
585 | if (fs->ring_cookie != RX_CLS_FLOW_DISC && |
586 | fs->ring_cookie >= dpaa2_eth_queue_count(priv)) |
587 | return -EINVAL; |
588 | |
589 | rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL); |
590 | |
591 | /* allocate twice the key size, for the actual key and for mask */ |
592 | key_buf = kzalloc(size: rule_cfg.key_size * 2, GFP_KERNEL); |
593 | if (!key_buf) |
594 | return -ENOMEM; |
595 | |
596 | /* Fill the key and mask memory areas */ |
597 | err = dpaa2_eth_prep_cls_rule(fs, key: key_buf, mask: key_buf + rule_cfg.key_size, fields: &fields); |
598 | if (err) |
599 | goto free_mem; |
600 | |
601 | if (!dpaa2_eth_fs_mask_enabled(priv)) { |
602 | /* Masking allows us to configure a maximal key during init and |
603 | * use it for all flow steering rules. Without it, we include |
604 | * in the key only the fields actually used, so we need to |
605 | * extract the others from the final key buffer. |
606 | * |
607 | * Program the FS key if needed, or return error if previously |
608 | * set key can't be used for the current rule. User needs to |
609 | * delete existing rules in this case to allow for the new one. |
610 | */ |
611 | if (!priv->rx_cls_fields) { |
612 | err = dpaa2_eth_set_cls(net_dev, key: fields); |
613 | if (err) |
614 | goto free_mem; |
615 | |
616 | priv->rx_cls_fields = fields; |
617 | } else if (priv->rx_cls_fields != fields) { |
618 | netdev_err(dev: net_dev, format: "No support for multiple FS keys, need to delete existing rules\n" ); |
619 | err = -EOPNOTSUPP; |
620 | goto free_mem; |
621 | } |
622 | |
623 | dpaa2_eth_cls_trim_rule(key_mem: key_buf, fields); |
624 | rule_cfg.key_size = dpaa2_eth_cls_key_size(key: fields); |
625 | } |
626 | |
627 | key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2, |
628 | DMA_TO_DEVICE); |
629 | if (dma_mapping_error(dev, dma_addr: key_iova)) { |
630 | err = -ENOMEM; |
631 | goto free_mem; |
632 | } |
633 | |
634 | rule_cfg.key_iova = key_iova; |
635 | if (dpaa2_eth_fs_mask_enabled(priv)) |
636 | rule_cfg.mask_iova = key_iova + rule_cfg.key_size; |
637 | |
638 | if (add) { |
639 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) |
640 | fs_act.options |= DPNI_FS_OPT_DISCARD; |
641 | else |
642 | fs_act.flow_id = fs->ring_cookie; |
643 | } |
644 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
645 | if (add) |
646 | err = dpni_add_fs_entry(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
647 | tc_id: i, index: fs->location, cfg: &rule_cfg, |
648 | action: &fs_act); |
649 | else |
650 | err = dpni_remove_fs_entry(mc_io: priv->mc_io, cmd_flags: 0, |
651 | token: priv->mc_token, tc_id: i, |
652 | cfg: &rule_cfg); |
653 | if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) |
654 | break; |
655 | } |
656 | |
657 | dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE); |
658 | |
659 | free_mem: |
660 | kfree(objp: key_buf); |
661 | |
662 | return err; |
663 | } |
664 | |
665 | static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv) |
666 | { |
667 | int i, rules = 0; |
668 | |
669 | for (i = 0; i < dpaa2_eth_fs_count(priv); i++) |
670 | if (priv->cls_rules[i].in_use) |
671 | rules++; |
672 | |
673 | return rules; |
674 | } |
675 | |
676 | static int dpaa2_eth_update_cls_rule(struct net_device *net_dev, |
677 | struct ethtool_rx_flow_spec *new_fs, |
678 | unsigned int location) |
679 | { |
680 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
681 | struct dpaa2_eth_cls_rule *rule; |
682 | int err = -EINVAL; |
683 | |
684 | if (!priv->rx_cls_enabled) |
685 | return -EOPNOTSUPP; |
686 | |
687 | if (location >= dpaa2_eth_fs_count(priv)) |
688 | return -EINVAL; |
689 | |
690 | rule = &priv->cls_rules[location]; |
691 | |
692 | /* If a rule is present at the specified location, delete it. */ |
693 | if (rule->in_use) { |
694 | err = dpaa2_eth_do_cls_rule(net_dev, fs: &rule->fs, add: false); |
695 | if (err) |
696 | return err; |
697 | |
698 | rule->in_use = 0; |
699 | |
700 | if (!dpaa2_eth_fs_mask_enabled(priv) && |
701 | !dpaa2_eth_num_cls_rules(priv)) |
702 | priv->rx_cls_fields = 0; |
703 | } |
704 | |
705 | /* If no new entry to add, return here */ |
706 | if (!new_fs) |
707 | return err; |
708 | |
709 | err = dpaa2_eth_do_cls_rule(net_dev, fs: new_fs, add: true); |
710 | if (err) |
711 | return err; |
712 | |
713 | rule->in_use = 1; |
714 | rule->fs = *new_fs; |
715 | |
716 | return 0; |
717 | } |
718 | |
719 | static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, |
720 | struct ethtool_rxnfc *rxnfc, u32 *rule_locs) |
721 | { |
722 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
723 | int max_rules = dpaa2_eth_fs_count(priv); |
724 | int i, j = 0; |
725 | |
726 | switch (rxnfc->cmd) { |
727 | case ETHTOOL_GRXFH: |
728 | /* we purposely ignore cmd->flow_type for now, because the |
729 | * classifier only supports a single set of fields for all |
730 | * protocols |
731 | */ |
732 | rxnfc->data = priv->rx_hash_fields; |
733 | break; |
734 | case ETHTOOL_GRXRINGS: |
735 | rxnfc->data = dpaa2_eth_queue_count(priv); |
736 | break; |
737 | case ETHTOOL_GRXCLSRLCNT: |
738 | rxnfc->rule_cnt = 0; |
739 | rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv); |
740 | rxnfc->data = max_rules; |
741 | break; |
742 | case ETHTOOL_GRXCLSRULE: |
743 | if (rxnfc->fs.location >= max_rules) |
744 | return -EINVAL; |
745 | rxnfc->fs.location = array_index_nospec(rxnfc->fs.location, |
746 | max_rules); |
747 | if (!priv->cls_rules[rxnfc->fs.location].in_use) |
748 | return -EINVAL; |
749 | rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; |
750 | break; |
751 | case ETHTOOL_GRXCLSRLALL: |
752 | for (i = 0; i < max_rules; i++) { |
753 | if (!priv->cls_rules[i].in_use) |
754 | continue; |
755 | if (j == rxnfc->rule_cnt) |
756 | return -EMSGSIZE; |
757 | rule_locs[j++] = i; |
758 | } |
759 | rxnfc->rule_cnt = j; |
760 | rxnfc->data = max_rules; |
761 | break; |
762 | default: |
763 | return -EOPNOTSUPP; |
764 | } |
765 | |
766 | return 0; |
767 | } |
768 | |
769 | static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, |
770 | struct ethtool_rxnfc *rxnfc) |
771 | { |
772 | int err = 0; |
773 | |
774 | switch (rxnfc->cmd) { |
775 | case ETHTOOL_SRXFH: |
776 | if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data) |
777 | return -EOPNOTSUPP; |
778 | err = dpaa2_eth_set_hash(net_dev, flags: rxnfc->data); |
779 | break; |
780 | case ETHTOOL_SRXCLSRLINS: |
781 | err = dpaa2_eth_update_cls_rule(net_dev, new_fs: &rxnfc->fs, location: rxnfc->fs.location); |
782 | break; |
783 | case ETHTOOL_SRXCLSRLDEL: |
784 | err = dpaa2_eth_update_cls_rule(net_dev, NULL, location: rxnfc->fs.location); |
785 | break; |
786 | default: |
787 | err = -EOPNOTSUPP; |
788 | } |
789 | |
790 | return err; |
791 | } |
792 | |
793 | int dpaa2_phc_index = -1; |
794 | EXPORT_SYMBOL(dpaa2_phc_index); |
795 | |
796 | static int dpaa2_eth_get_ts_info(struct net_device *dev, |
797 | struct ethtool_ts_info *info) |
798 | { |
799 | if (!dpaa2_ptp) |
800 | return ethtool_op_get_ts_info(dev, eti: info); |
801 | |
802 | info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | |
803 | SOF_TIMESTAMPING_RX_HARDWARE | |
804 | SOF_TIMESTAMPING_RAW_HARDWARE; |
805 | |
806 | info->phc_index = dpaa2_phc_index; |
807 | |
808 | info->tx_types = (1 << HWTSTAMP_TX_OFF) | |
809 | (1 << HWTSTAMP_TX_ON) | |
810 | (1 << HWTSTAMP_TX_ONESTEP_SYNC); |
811 | |
812 | info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | |
813 | (1 << HWTSTAMP_FILTER_ALL); |
814 | return 0; |
815 | } |
816 | |
817 | static int dpaa2_eth_get_tunable(struct net_device *net_dev, |
818 | const struct ethtool_tunable *tuna, |
819 | void *data) |
820 | { |
821 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
822 | int err = 0; |
823 | |
824 | switch (tuna->id) { |
825 | case ETHTOOL_RX_COPYBREAK: |
826 | *(u32 *)data = priv->rx_copybreak; |
827 | break; |
828 | default: |
829 | err = -EOPNOTSUPP; |
830 | break; |
831 | } |
832 | |
833 | return err; |
834 | } |
835 | |
836 | static int dpaa2_eth_set_tunable(struct net_device *net_dev, |
837 | const struct ethtool_tunable *tuna, |
838 | const void *data) |
839 | { |
840 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
841 | int err = 0; |
842 | |
843 | switch (tuna->id) { |
844 | case ETHTOOL_RX_COPYBREAK: |
845 | priv->rx_copybreak = *(u32 *)data; |
846 | break; |
847 | default: |
848 | err = -EOPNOTSUPP; |
849 | break; |
850 | } |
851 | |
852 | return err; |
853 | } |
854 | |
855 | static int dpaa2_eth_get_coalesce(struct net_device *dev, |
856 | struct ethtool_coalesce *ic, |
857 | struct kernel_ethtool_coalesce *kernel_coal, |
858 | struct netlink_ext_ack *extack) |
859 | { |
860 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
861 | struct dpaa2_io *dpio = priv->channel[0]->dpio; |
862 | |
863 | dpaa2_io_get_irq_coalescing(d: dpio, irq_holdoff: &ic->rx_coalesce_usecs); |
864 | ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(d: dpio); |
865 | |
866 | return 0; |
867 | } |
868 | |
869 | static int dpaa2_eth_set_coalesce(struct net_device *dev, |
870 | struct ethtool_coalesce *ic, |
871 | struct kernel_ethtool_coalesce *kernel_coal, |
872 | struct netlink_ext_ack *extack) |
873 | { |
874 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
875 | struct dpaa2_io *dpio; |
876 | int prev_adaptive; |
877 | u32 prev_rx_usecs; |
878 | int i, j, err; |
879 | |
880 | /* Keep track of the previous value, just in case we fail */ |
881 | dpio = priv->channel[0]->dpio; |
882 | dpaa2_io_get_irq_coalescing(d: dpio, irq_holdoff: &prev_rx_usecs); |
883 | prev_adaptive = dpaa2_io_get_adaptive_coalescing(d: dpio); |
884 | |
885 | /* Setup new value for rx coalescing */ |
886 | for (i = 0; i < priv->num_channels; i++) { |
887 | dpio = priv->channel[i]->dpio; |
888 | |
889 | dpaa2_io_set_adaptive_coalescing(d: dpio, |
890 | use_adaptive_rx_coalesce: ic->use_adaptive_rx_coalesce); |
891 | err = dpaa2_io_set_irq_coalescing(d: dpio, irq_holdoff: ic->rx_coalesce_usecs); |
892 | if (err) |
893 | goto restore_rx_usecs; |
894 | } |
895 | |
896 | return 0; |
897 | |
898 | restore_rx_usecs: |
899 | for (j = 0; j < i; j++) { |
900 | dpio = priv->channel[j]->dpio; |
901 | |
902 | dpaa2_io_set_irq_coalescing(d: dpio, irq_holdoff: prev_rx_usecs); |
903 | dpaa2_io_set_adaptive_coalescing(d: dpio, use_adaptive_rx_coalesce: prev_adaptive); |
904 | } |
905 | |
906 | return err; |
907 | } |
908 | |
909 | static void dpaa2_eth_get_channels(struct net_device *net_dev, |
910 | struct ethtool_channels *channels) |
911 | { |
912 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
913 | int queue_count = dpaa2_eth_queue_count(priv); |
914 | |
915 | channels->max_rx = queue_count; |
916 | channels->max_tx = queue_count; |
917 | channels->rx_count = queue_count; |
918 | channels->tx_count = queue_count; |
919 | |
920 | /* Tx confirmation and Rx error */ |
921 | channels->max_other = queue_count + 1; |
922 | channels->max_combined = channels->max_rx + |
923 | channels->max_tx + |
924 | channels->max_other; |
925 | /* Tx conf and Rx err */ |
926 | channels->other_count = queue_count + 1; |
927 | channels->combined_count = channels->rx_count + |
928 | channels->tx_count + |
929 | channels->other_count; |
930 | } |
931 | |
932 | const struct ethtool_ops dpaa2_ethtool_ops = { |
933 | .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | |
934 | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, |
935 | .get_drvinfo = dpaa2_eth_get_drvinfo, |
936 | .nway_reset = dpaa2_eth_nway_reset, |
937 | .get_link = ethtool_op_get_link, |
938 | .get_link_ksettings = dpaa2_eth_get_link_ksettings, |
939 | .set_link_ksettings = dpaa2_eth_set_link_ksettings, |
940 | .get_pauseparam = dpaa2_eth_get_pauseparam, |
941 | .set_pauseparam = dpaa2_eth_set_pauseparam, |
942 | .get_sset_count = dpaa2_eth_get_sset_count, |
943 | .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, |
944 | .get_strings = dpaa2_eth_get_strings, |
945 | .get_rxnfc = dpaa2_eth_get_rxnfc, |
946 | .set_rxnfc = dpaa2_eth_set_rxnfc, |
947 | .get_ts_info = dpaa2_eth_get_ts_info, |
948 | .get_tunable = dpaa2_eth_get_tunable, |
949 | .set_tunable = dpaa2_eth_set_tunable, |
950 | .get_coalesce = dpaa2_eth_get_coalesce, |
951 | .set_coalesce = dpaa2_eth_set_coalesce, |
952 | .get_channels = dpaa2_eth_get_channels, |
953 | }; |
954 | |