1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* Copyright 2014-2016 Freescale Semiconductor Inc. |
3 | * Copyright 2016-2022 NXP |
4 | */ |
5 | |
6 | #include <linux/net_tstamp.h> |
7 | #include <linux/nospec.h> |
8 | |
9 | #include "dpni.h" /* DPNI_LINK_OPT_* */ |
10 | #include "dpaa2-eth.h" |
11 | |
12 | /* To be kept in sync with DPNI statistics */ |
13 | static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { |
14 | "[hw] rx frames" , |
15 | "[hw] rx bytes" , |
16 | "[hw] rx mcast frames" , |
17 | "[hw] rx mcast bytes" , |
18 | "[hw] rx bcast frames" , |
19 | "[hw] rx bcast bytes" , |
20 | "[hw] tx frames" , |
21 | "[hw] tx bytes" , |
22 | "[hw] tx mcast frames" , |
23 | "[hw] tx mcast bytes" , |
24 | "[hw] tx bcast frames" , |
25 | "[hw] tx bcast bytes" , |
26 | "[hw] rx filtered frames" , |
27 | "[hw] rx discarded frames" , |
28 | "[hw] rx nobuffer discards" , |
29 | "[hw] tx discarded frames" , |
30 | "[hw] tx confirmed frames" , |
31 | "[hw] tx dequeued bytes" , |
32 | "[hw] tx dequeued frames" , |
33 | "[hw] tx rejected bytes" , |
34 | "[hw] tx rejected frames" , |
35 | "[hw] tx pending frames" , |
36 | }; |
37 | |
38 | #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) |
39 | |
40 | static char [][ETH_GSTRING_LEN] = { |
41 | /* per-cpu stats */ |
42 | "[drv] tx conf frames" , |
43 | "[drv] tx conf bytes" , |
44 | "[drv] tx sg frames" , |
45 | "[drv] tx sg bytes" , |
46 | "[drv] tx tso frames" , |
47 | "[drv] tx tso bytes" , |
48 | "[drv] rx sg frames" , |
49 | "[drv] rx sg bytes" , |
50 | "[drv] tx converted sg frames" , |
51 | "[drv] tx converted sg bytes" , |
52 | "[drv] enqueue portal busy" , |
53 | /* Channel stats */ |
54 | "[drv] dequeue portal busy" , |
55 | "[drv] channel pull errors" , |
56 | "[drv] cdan" , |
57 | "[drv] xdp drop" , |
58 | "[drv] xdp tx" , |
59 | "[drv] xdp tx errors" , |
60 | "[drv] xdp redirect" , |
61 | /* FQ stats */ |
62 | "[qbman] rx pending frames" , |
63 | "[qbman] rx pending bytes" , |
64 | "[qbman] tx conf pending frames" , |
65 | "[qbman] tx conf pending bytes" , |
66 | "[qbman] buffer count" , |
67 | }; |
68 | |
69 | #define ARRAY_SIZE(dpaa2_ethtool_extras) |
70 | |
71 | static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, |
72 | struct ethtool_drvinfo *drvinfo) |
73 | { |
74 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
75 | |
76 | strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); |
77 | |
78 | snprintf(buf: drvinfo->fw_version, size: sizeof(drvinfo->fw_version), |
79 | fmt: "%u.%u" , priv->dpni_ver_major, priv->dpni_ver_minor); |
80 | |
81 | strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), |
82 | sizeof(drvinfo->bus_info)); |
83 | } |
84 | |
85 | static int dpaa2_eth_nway_reset(struct net_device *net_dev) |
86 | { |
87 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
88 | int err = -EOPNOTSUPP; |
89 | |
90 | mutex_lock(&priv->mac_lock); |
91 | |
92 | if (dpaa2_eth_is_type_phy(priv)) |
93 | err = phylink_ethtool_nway_reset(priv->mac->phylink); |
94 | |
95 | mutex_unlock(lock: &priv->mac_lock); |
96 | |
97 | return err; |
98 | } |
99 | |
100 | static int |
101 | dpaa2_eth_get_link_ksettings(struct net_device *net_dev, |
102 | struct ethtool_link_ksettings *link_settings) |
103 | { |
104 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
105 | int err; |
106 | |
107 | mutex_lock(&priv->mac_lock); |
108 | |
109 | if (dpaa2_eth_is_type_phy(priv)) { |
110 | err = phylink_ethtool_ksettings_get(priv->mac->phylink, |
111 | link_settings); |
112 | mutex_unlock(lock: &priv->mac_lock); |
113 | return err; |
114 | } |
115 | |
116 | mutex_unlock(lock: &priv->mac_lock); |
117 | |
118 | link_settings->base.autoneg = AUTONEG_DISABLE; |
119 | if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX)) |
120 | link_settings->base.duplex = DUPLEX_FULL; |
121 | link_settings->base.speed = priv->link_state.rate; |
122 | |
123 | return 0; |
124 | } |
125 | |
126 | static int |
127 | dpaa2_eth_set_link_ksettings(struct net_device *net_dev, |
128 | const struct ethtool_link_ksettings *link_settings) |
129 | { |
130 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
131 | int err = -EOPNOTSUPP; |
132 | |
133 | mutex_lock(&priv->mac_lock); |
134 | |
135 | if (dpaa2_eth_is_type_phy(priv)) |
136 | err = phylink_ethtool_ksettings_set(priv->mac->phylink, |
137 | link_settings); |
138 | |
139 | mutex_unlock(lock: &priv->mac_lock); |
140 | |
141 | return err; |
142 | } |
143 | |
144 | static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, |
145 | struct ethtool_pauseparam *pause) |
146 | { |
147 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
148 | u64 link_options = priv->link_state.options; |
149 | |
150 | mutex_lock(&priv->mac_lock); |
151 | |
152 | if (dpaa2_eth_is_type_phy(priv)) { |
153 | phylink_ethtool_get_pauseparam(priv->mac->phylink, pause); |
154 | mutex_unlock(lock: &priv->mac_lock); |
155 | return; |
156 | } |
157 | |
158 | mutex_unlock(lock: &priv->mac_lock); |
159 | |
160 | pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options); |
161 | pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options); |
162 | pause->autoneg = AUTONEG_DISABLE; |
163 | } |
164 | |
165 | static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, |
166 | struct ethtool_pauseparam *pause) |
167 | { |
168 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
169 | struct dpni_link_cfg cfg = {0}; |
170 | int err; |
171 | |
172 | if (!dpaa2_eth_has_pause_support(priv)) { |
173 | netdev_info(dev: net_dev, format: "No pause frame support for DPNI version < %d.%d\n" , |
174 | DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR); |
175 | return -EOPNOTSUPP; |
176 | } |
177 | |
178 | mutex_lock(&priv->mac_lock); |
179 | |
180 | if (dpaa2_eth_is_type_phy(priv)) { |
181 | err = phylink_ethtool_set_pauseparam(priv->mac->phylink, |
182 | pause); |
183 | mutex_unlock(lock: &priv->mac_lock); |
184 | return err; |
185 | } |
186 | |
187 | mutex_unlock(lock: &priv->mac_lock); |
188 | |
189 | if (pause->autoneg) |
190 | return -EOPNOTSUPP; |
191 | |
192 | cfg.rate = priv->link_state.rate; |
193 | cfg.options = priv->link_state.options; |
194 | if (pause->rx_pause) |
195 | cfg.options |= DPNI_LINK_OPT_PAUSE; |
196 | else |
197 | cfg.options &= ~DPNI_LINK_OPT_PAUSE; |
198 | if (!!pause->rx_pause ^ !!pause->tx_pause) |
199 | cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; |
200 | else |
201 | cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; |
202 | |
203 | if (cfg.options == priv->link_state.options) |
204 | return 0; |
205 | |
206 | err = dpni_set_link_cfg(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, cfg: &cfg); |
207 | if (err) { |
208 | netdev_err(dev: net_dev, format: "dpni_set_link_state failed\n" ); |
209 | return err; |
210 | } |
211 | |
212 | priv->link_state.options = cfg.options; |
213 | |
214 | return 0; |
215 | } |
216 | |
217 | static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, |
218 | u8 *data) |
219 | { |
220 | int i; |
221 | |
222 | switch (stringset) { |
223 | case ETH_SS_STATS: |
224 | for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) |
225 | ethtool_puts(data: &data, str: dpaa2_ethtool_stats[i]); |
226 | for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) |
227 | ethtool_puts(data: &data, str: dpaa2_ethtool_extras[i]); |
228 | dpaa2_mac_get_strings(data: &data); |
229 | break; |
230 | } |
231 | } |
232 | |
233 | static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) |
234 | { |
235 | switch (sset) { |
236 | case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ |
237 | return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS + |
238 | dpaa2_mac_get_sset_count(); |
239 | default: |
240 | return -EOPNOTSUPP; |
241 | } |
242 | } |
243 | |
244 | /** Fill in hardware counters, as returned by MC. |
245 | */ |
246 | static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, |
247 | struct ethtool_stats *stats, |
248 | u64 *data) |
249 | { |
250 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
251 | union dpni_statistics dpni_stats; |
252 | int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { |
253 | sizeof(dpni_stats.page_0), |
254 | sizeof(dpni_stats.page_1), |
255 | sizeof(dpni_stats.page_2), |
256 | sizeof(dpni_stats.page_3), |
257 | sizeof(dpni_stats.page_4), |
258 | sizeof(dpni_stats.page_5), |
259 | sizeof(dpni_stats.page_6), |
260 | }; |
261 | u32 fcnt_rx_total = 0, fcnt_tx_total = 0; |
262 | u32 bcnt_rx_total = 0, bcnt_tx_total = 0; |
263 | struct dpaa2_eth_ch_stats *ch_stats; |
264 | struct dpaa2_eth_drv_stats *; |
265 | u32 buf_cnt, buf_cnt_total = 0; |
266 | int j, k, err, num_cnt, i = 0; |
267 | u32 fcnt, bcnt; |
268 | |
269 | memset(data, 0, |
270 | sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); |
271 | |
272 | /* Print standard counters, from DPNI statistics */ |
273 | for (j = 0; j <= 6; j++) { |
274 | /* We're not interested in pages 4 & 5 for now */ |
275 | if (j == 4 || j == 5) |
276 | continue; |
277 | err = dpni_get_statistics(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
278 | page: j, stat: &dpni_stats); |
279 | if (err == -EINVAL) |
280 | /* Older firmware versions don't support all pages */ |
281 | memset(&dpni_stats, 0, sizeof(dpni_stats)); |
282 | else if (err) |
283 | netdev_warn(dev: net_dev, format: "dpni_get_stats(%d) failed\n" , j); |
284 | |
285 | num_cnt = dpni_stats_page_size[j] / sizeof(u64); |
286 | for (k = 0; k < num_cnt; k++) |
287 | *(data + i++) = dpni_stats.raw.counter[k]; |
288 | } |
289 | |
290 | /* Print per-cpu extra stats */ |
291 | for_each_online_cpu(k) { |
292 | extras = per_cpu_ptr(priv->percpu_extras, k); |
293 | for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) |
294 | *((__u64 *)data + i + j) += *((__u64 *)extras + j); |
295 | } |
296 | i += j; |
297 | |
298 | /* Per-channel stats */ |
299 | for (k = 0; k < priv->num_channels; k++) { |
300 | ch_stats = &priv->channel[k]->stats; |
301 | for (j = 0; j < DPAA2_ETH_CH_STATS; j++) |
302 | *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j); |
303 | } |
304 | i += j; |
305 | |
306 | for (j = 0; j < priv->num_fqs; j++) { |
307 | /* Print FQ instantaneous counts */ |
308 | err = dpaa2_io_query_fq_count(NULL, fqid: priv->fq[j].fqid, |
309 | fcnt: &fcnt, bcnt: &bcnt); |
310 | if (err) { |
311 | netdev_warn(dev: net_dev, format: "FQ query error %d" , err); |
312 | return; |
313 | } |
314 | |
315 | if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { |
316 | fcnt_tx_total += fcnt; |
317 | bcnt_tx_total += bcnt; |
318 | } else { |
319 | fcnt_rx_total += fcnt; |
320 | bcnt_rx_total += bcnt; |
321 | } |
322 | } |
323 | |
324 | *(data + i++) = fcnt_rx_total; |
325 | *(data + i++) = bcnt_rx_total; |
326 | *(data + i++) = fcnt_tx_total; |
327 | *(data + i++) = bcnt_tx_total; |
328 | |
329 | for (j = 0; j < priv->num_bps; j++) { |
330 | err = dpaa2_io_query_bp_count(NULL, bpid: priv->bp[j]->bpid, num: &buf_cnt); |
331 | if (err) { |
332 | netdev_warn(dev: net_dev, format: "Buffer count query error %d\n" , err); |
333 | return; |
334 | } |
335 | buf_cnt_total += buf_cnt; |
336 | } |
337 | *(data + i++) = buf_cnt_total; |
338 | |
339 | mutex_lock(&priv->mac_lock); |
340 | |
341 | if (dpaa2_eth_has_mac(priv)) |
342 | dpaa2_mac_get_ethtool_stats(mac: priv->mac, data: data + i); |
343 | |
344 | mutex_unlock(lock: &priv->mac_lock); |
345 | } |
346 | |
347 | static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, |
348 | void *key, void *mask, u64 *fields) |
349 | { |
350 | int off; |
351 | |
352 | if (eth_mask->h_proto) { |
353 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_TYPE); |
354 | *(__be16 *)(key + off) = eth_value->h_proto; |
355 | *(__be16 *)(mask + off) = eth_mask->h_proto; |
356 | *fields |= DPAA2_ETH_DIST_ETHTYPE; |
357 | } |
358 | |
359 | if (!is_zero_ether_addr(addr: eth_mask->h_source)) { |
360 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_SA); |
361 | ether_addr_copy(dst: key + off, src: eth_value->h_source); |
362 | ether_addr_copy(dst: mask + off, src: eth_mask->h_source); |
363 | *fields |= DPAA2_ETH_DIST_ETHSRC; |
364 | } |
365 | |
366 | if (!is_zero_ether_addr(addr: eth_mask->h_dest)) { |
367 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_DA); |
368 | ether_addr_copy(dst: key + off, src: eth_value->h_dest); |
369 | ether_addr_copy(dst: mask + off, src: eth_mask->h_dest); |
370 | *fields |= DPAA2_ETH_DIST_ETHDST; |
371 | } |
372 | |
373 | return 0; |
374 | } |
375 | |
376 | static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value, |
377 | struct ethtool_usrip4_spec *uip_mask, |
378 | void *key, void *mask, u64 *fields) |
379 | { |
380 | int off; |
381 | u32 tmp_value, tmp_mask; |
382 | |
383 | if (uip_mask->tos || uip_mask->ip_ver) |
384 | return -EOPNOTSUPP; |
385 | |
386 | if (uip_mask->ip4src) { |
387 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_SRC); |
388 | *(__be32 *)(key + off) = uip_value->ip4src; |
389 | *(__be32 *)(mask + off) = uip_mask->ip4src; |
390 | *fields |= DPAA2_ETH_DIST_IPSRC; |
391 | } |
392 | |
393 | if (uip_mask->ip4dst) { |
394 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_DST); |
395 | *(__be32 *)(key + off) = uip_value->ip4dst; |
396 | *(__be32 *)(mask + off) = uip_mask->ip4dst; |
397 | *fields |= DPAA2_ETH_DIST_IPDST; |
398 | } |
399 | |
400 | if (uip_mask->proto) { |
401 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_PROTO); |
402 | *(u8 *)(key + off) = uip_value->proto; |
403 | *(u8 *)(mask + off) = uip_mask->proto; |
404 | *fields |= DPAA2_ETH_DIST_IPPROTO; |
405 | } |
406 | |
407 | if (uip_mask->l4_4_bytes) { |
408 | tmp_value = be32_to_cpu(uip_value->l4_4_bytes); |
409 | tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes); |
410 | |
411 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); |
412 | *(__be16 *)(key + off) = htons(tmp_value >> 16); |
413 | *(__be16 *)(mask + off) = htons(tmp_mask >> 16); |
414 | *fields |= DPAA2_ETH_DIST_L4SRC; |
415 | |
416 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_DST); |
417 | *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF); |
418 | *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF); |
419 | *fields |= DPAA2_ETH_DIST_L4DST; |
420 | } |
421 | |
422 | /* Only apply the rule for IPv4 frames */ |
423 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_TYPE); |
424 | *(__be16 *)(key + off) = htons(ETH_P_IP); |
425 | *(__be16 *)(mask + off) = htons(0xFFFF); |
426 | *fields |= DPAA2_ETH_DIST_ETHTYPE; |
427 | |
428 | return 0; |
429 | } |
430 | |
431 | static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, |
432 | struct ethtool_tcpip4_spec *l4_mask, |
433 | void *key, void *mask, u8 l4_proto, u64 *fields) |
434 | { |
435 | int off; |
436 | |
437 | if (l4_mask->tos) |
438 | return -EOPNOTSUPP; |
439 | |
440 | if (l4_mask->ip4src) { |
441 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_SRC); |
442 | *(__be32 *)(key + off) = l4_value->ip4src; |
443 | *(__be32 *)(mask + off) = l4_mask->ip4src; |
444 | *fields |= DPAA2_ETH_DIST_IPSRC; |
445 | } |
446 | |
447 | if (l4_mask->ip4dst) { |
448 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_DST); |
449 | *(__be32 *)(key + off) = l4_value->ip4dst; |
450 | *(__be32 *)(mask + off) = l4_mask->ip4dst; |
451 | *fields |= DPAA2_ETH_DIST_IPDST; |
452 | } |
453 | |
454 | if (l4_mask->psrc) { |
455 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); |
456 | *(__be16 *)(key + off) = l4_value->psrc; |
457 | *(__be16 *)(mask + off) = l4_mask->psrc; |
458 | *fields |= DPAA2_ETH_DIST_L4SRC; |
459 | } |
460 | |
461 | if (l4_mask->pdst) { |
462 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_UDP, NH_FLD_UDP_PORT_DST); |
463 | *(__be16 *)(key + off) = l4_value->pdst; |
464 | *(__be16 *)(mask + off) = l4_mask->pdst; |
465 | *fields |= DPAA2_ETH_DIST_L4DST; |
466 | } |
467 | |
468 | /* Only apply the rule for IPv4 frames with the specified L4 proto */ |
469 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_TYPE); |
470 | *(__be16 *)(key + off) = htons(ETH_P_IP); |
471 | *(__be16 *)(mask + off) = htons(0xFFFF); |
472 | *fields |= DPAA2_ETH_DIST_ETHTYPE; |
473 | |
474 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_IP, NH_FLD_IP_PROTO); |
475 | *(u8 *)(key + off) = l4_proto; |
476 | *(u8 *)(mask + off) = 0xFF; |
477 | *fields |= DPAA2_ETH_DIST_IPPROTO; |
478 | |
479 | return 0; |
480 | } |
481 | |
482 | static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value, |
483 | struct ethtool_flow_ext *ext_mask, |
484 | void *key, void *mask, u64 *fields) |
485 | { |
486 | int off; |
487 | |
488 | if (ext_mask->vlan_etype) |
489 | return -EOPNOTSUPP; |
490 | |
491 | if (ext_mask->vlan_tci) { |
492 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_VLAN, NH_FLD_VLAN_TCI); |
493 | *(__be16 *)(key + off) = ext_value->vlan_tci; |
494 | *(__be16 *)(mask + off) = ext_mask->vlan_tci; |
495 | *fields |= DPAA2_ETH_DIST_VLAN; |
496 | } |
497 | |
498 | return 0; |
499 | } |
500 | |
501 | static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, |
502 | struct ethtool_flow_ext *ext_mask, |
503 | void *key, void *mask, u64 *fields) |
504 | { |
505 | int off; |
506 | |
507 | if (!is_zero_ether_addr(addr: ext_mask->h_dest)) { |
508 | off = dpaa2_eth_cls_fld_off(prot: NET_PROT_ETH, NH_FLD_ETH_DA); |
509 | ether_addr_copy(dst: key + off, src: ext_value->h_dest); |
510 | ether_addr_copy(dst: mask + off, src: ext_mask->h_dest); |
511 | *fields |= DPAA2_ETH_DIST_ETHDST; |
512 | } |
513 | |
514 | return 0; |
515 | } |
516 | |
517 | static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, |
518 | void *mask, u64 *fields) |
519 | { |
520 | int err; |
521 | |
522 | switch (fs->flow_type & 0xFF) { |
523 | case ETHER_FLOW: |
524 | err = dpaa2_eth_prep_eth_rule(eth_value: &fs->h_u.ether_spec, eth_mask: &fs->m_u.ether_spec, |
525 | key, mask, fields); |
526 | break; |
527 | case IP_USER_FLOW: |
528 | err = dpaa2_eth_prep_uip_rule(uip_value: &fs->h_u.usr_ip4_spec, |
529 | uip_mask: &fs->m_u.usr_ip4_spec, key, mask, fields); |
530 | break; |
531 | case TCP_V4_FLOW: |
532 | err = dpaa2_eth_prep_l4_rule(l4_value: &fs->h_u.tcp_ip4_spec, l4_mask: &fs->m_u.tcp_ip4_spec, |
533 | key, mask, IPPROTO_TCP, fields); |
534 | break; |
535 | case UDP_V4_FLOW: |
536 | err = dpaa2_eth_prep_l4_rule(l4_value: &fs->h_u.udp_ip4_spec, l4_mask: &fs->m_u.udp_ip4_spec, |
537 | key, mask, IPPROTO_UDP, fields); |
538 | break; |
539 | case SCTP_V4_FLOW: |
540 | err = dpaa2_eth_prep_l4_rule(l4_value: &fs->h_u.sctp_ip4_spec, |
541 | l4_mask: &fs->m_u.sctp_ip4_spec, key, mask, |
542 | IPPROTO_SCTP, fields); |
543 | break; |
544 | default: |
545 | return -EOPNOTSUPP; |
546 | } |
547 | |
548 | if (err) |
549 | return err; |
550 | |
551 | if (fs->flow_type & FLOW_EXT) { |
552 | err = dpaa2_eth_prep_ext_rule(ext_value: &fs->h_ext, ext_mask: &fs->m_ext, key, mask, fields); |
553 | if (err) |
554 | return err; |
555 | } |
556 | |
557 | if (fs->flow_type & FLOW_MAC_EXT) { |
558 | err = dpaa2_eth_prep_mac_ext_rule(ext_value: &fs->h_ext, ext_mask: &fs->m_ext, key, |
559 | mask, fields); |
560 | if (err) |
561 | return err; |
562 | } |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | static int dpaa2_eth_do_cls_rule(struct net_device *net_dev, |
568 | struct ethtool_rx_flow_spec *fs, |
569 | bool add) |
570 | { |
571 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
572 | struct device *dev = net_dev->dev.parent; |
573 | struct dpni_rule_cfg rule_cfg = { 0 }; |
574 | struct dpni_fs_action_cfg fs_act = { 0 }; |
575 | dma_addr_t key_iova; |
576 | u64 fields = 0; |
577 | void *key_buf; |
578 | int i, err; |
579 | |
580 | if (fs->ring_cookie != RX_CLS_FLOW_DISC && |
581 | fs->ring_cookie >= dpaa2_eth_queue_count(priv)) |
582 | return -EINVAL; |
583 | |
584 | rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL); |
585 | |
586 | /* allocate twice the key size, for the actual key and for mask */ |
587 | key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL); |
588 | if (!key_buf) |
589 | return -ENOMEM; |
590 | |
591 | /* Fill the key and mask memory areas */ |
592 | err = dpaa2_eth_prep_cls_rule(fs, key: key_buf, mask: key_buf + rule_cfg.key_size, fields: &fields); |
593 | if (err) |
594 | goto free_mem; |
595 | |
596 | if (!dpaa2_eth_fs_mask_enabled(priv)) { |
597 | /* Masking allows us to configure a maximal key during init and |
598 | * use it for all flow steering rules. Without it, we include |
599 | * in the key only the fields actually used, so we need to |
600 | * extract the others from the final key buffer. |
601 | * |
602 | * Program the FS key if needed, or return error if previously |
603 | * set key can't be used for the current rule. User needs to |
604 | * delete existing rules in this case to allow for the new one. |
605 | */ |
606 | if (!priv->rx_cls_fields) { |
607 | err = dpaa2_eth_set_cls(net_dev, key: fields); |
608 | if (err) |
609 | goto free_mem; |
610 | |
611 | priv->rx_cls_fields = fields; |
612 | } else if (priv->rx_cls_fields != fields) { |
613 | netdev_err(dev: net_dev, format: "No support for multiple FS keys, need to delete existing rules\n" ); |
614 | err = -EOPNOTSUPP; |
615 | goto free_mem; |
616 | } |
617 | |
618 | dpaa2_eth_cls_trim_rule(key_mem: key_buf, fields); |
619 | rule_cfg.key_size = dpaa2_eth_cls_key_size(key: fields); |
620 | } |
621 | |
622 | key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2, |
623 | DMA_TO_DEVICE); |
624 | if (dma_mapping_error(dev, dma_addr: key_iova)) { |
625 | err = -ENOMEM; |
626 | goto free_mem; |
627 | } |
628 | |
629 | rule_cfg.key_iova = key_iova; |
630 | if (dpaa2_eth_fs_mask_enabled(priv)) |
631 | rule_cfg.mask_iova = key_iova + rule_cfg.key_size; |
632 | |
633 | if (add) { |
634 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) |
635 | fs_act.options |= DPNI_FS_OPT_DISCARD; |
636 | else |
637 | fs_act.flow_id = fs->ring_cookie; |
638 | } |
639 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
640 | if (add) |
641 | err = dpni_add_fs_entry(mc_io: priv->mc_io, cmd_flags: 0, token: priv->mc_token, |
642 | tc_id: i, index: fs->location, cfg: &rule_cfg, |
643 | action: &fs_act); |
644 | else |
645 | err = dpni_remove_fs_entry(mc_io: priv->mc_io, cmd_flags: 0, |
646 | token: priv->mc_token, tc_id: i, |
647 | cfg: &rule_cfg); |
648 | if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) |
649 | break; |
650 | } |
651 | |
652 | dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE); |
653 | |
654 | free_mem: |
655 | kfree(objp: key_buf); |
656 | |
657 | return err; |
658 | } |
659 | |
660 | static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv) |
661 | { |
662 | int i, rules = 0; |
663 | |
664 | for (i = 0; i < dpaa2_eth_fs_count(priv); i++) |
665 | if (priv->cls_rules[i].in_use) |
666 | rules++; |
667 | |
668 | return rules; |
669 | } |
670 | |
671 | static int dpaa2_eth_update_cls_rule(struct net_device *net_dev, |
672 | struct ethtool_rx_flow_spec *new_fs, |
673 | unsigned int location) |
674 | { |
675 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
676 | struct dpaa2_eth_cls_rule *rule; |
677 | int err = -EINVAL; |
678 | |
679 | if (!priv->rx_cls_enabled) |
680 | return -EOPNOTSUPP; |
681 | |
682 | if (location >= dpaa2_eth_fs_count(priv)) |
683 | return -EINVAL; |
684 | |
685 | rule = &priv->cls_rules[location]; |
686 | |
687 | /* If a rule is present at the specified location, delete it. */ |
688 | if (rule->in_use) { |
689 | err = dpaa2_eth_do_cls_rule(net_dev, fs: &rule->fs, add: false); |
690 | if (err) |
691 | return err; |
692 | |
693 | rule->in_use = 0; |
694 | |
695 | if (!dpaa2_eth_fs_mask_enabled(priv) && |
696 | !dpaa2_eth_num_cls_rules(priv)) |
697 | priv->rx_cls_fields = 0; |
698 | } |
699 | |
700 | /* If no new entry to add, return here */ |
701 | if (!new_fs) |
702 | return err; |
703 | |
704 | err = dpaa2_eth_do_cls_rule(net_dev, fs: new_fs, add: true); |
705 | if (err) |
706 | return err; |
707 | |
708 | rule->in_use = 1; |
709 | rule->fs = *new_fs; |
710 | |
711 | return 0; |
712 | } |
713 | |
714 | static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, |
715 | struct ethtool_rxnfc *rxnfc, u32 *rule_locs) |
716 | { |
717 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
718 | int max_rules = dpaa2_eth_fs_count(priv); |
719 | int i, j = 0; |
720 | |
721 | switch (rxnfc->cmd) { |
722 | case ETHTOOL_GRXFH: |
723 | /* we purposely ignore cmd->flow_type for now, because the |
724 | * classifier only supports a single set of fields for all |
725 | * protocols |
726 | */ |
727 | rxnfc->data = priv->rx_hash_fields; |
728 | break; |
729 | case ETHTOOL_GRXRINGS: |
730 | rxnfc->data = dpaa2_eth_queue_count(priv); |
731 | break; |
732 | case ETHTOOL_GRXCLSRLCNT: |
733 | rxnfc->rule_cnt = 0; |
734 | rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv); |
735 | rxnfc->data = max_rules; |
736 | break; |
737 | case ETHTOOL_GRXCLSRULE: |
738 | if (rxnfc->fs.location >= max_rules) |
739 | return -EINVAL; |
740 | rxnfc->fs.location = array_index_nospec(rxnfc->fs.location, |
741 | max_rules); |
742 | if (!priv->cls_rules[rxnfc->fs.location].in_use) |
743 | return -EINVAL; |
744 | rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; |
745 | break; |
746 | case ETHTOOL_GRXCLSRLALL: |
747 | for (i = 0; i < max_rules; i++) { |
748 | if (!priv->cls_rules[i].in_use) |
749 | continue; |
750 | if (j == rxnfc->rule_cnt) |
751 | return -EMSGSIZE; |
752 | rule_locs[j++] = i; |
753 | } |
754 | rxnfc->rule_cnt = j; |
755 | rxnfc->data = max_rules; |
756 | break; |
757 | default: |
758 | return -EOPNOTSUPP; |
759 | } |
760 | |
761 | return 0; |
762 | } |
763 | |
764 | static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, |
765 | struct ethtool_rxnfc *rxnfc) |
766 | { |
767 | int err = 0; |
768 | |
769 | switch (rxnfc->cmd) { |
770 | case ETHTOOL_SRXFH: |
771 | if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data) |
772 | return -EOPNOTSUPP; |
773 | err = dpaa2_eth_set_hash(net_dev, flags: rxnfc->data); |
774 | break; |
775 | case ETHTOOL_SRXCLSRLINS: |
776 | err = dpaa2_eth_update_cls_rule(net_dev, new_fs: &rxnfc->fs, location: rxnfc->fs.location); |
777 | break; |
778 | case ETHTOOL_SRXCLSRLDEL: |
779 | err = dpaa2_eth_update_cls_rule(net_dev, NULL, location: rxnfc->fs.location); |
780 | break; |
781 | default: |
782 | err = -EOPNOTSUPP; |
783 | } |
784 | |
785 | return err; |
786 | } |
787 | |
788 | int dpaa2_phc_index = -1; |
789 | EXPORT_SYMBOL(dpaa2_phc_index); |
790 | |
791 | static int dpaa2_eth_get_ts_info(struct net_device *dev, |
792 | struct kernel_ethtool_ts_info *info) |
793 | { |
794 | if (!dpaa2_ptp) |
795 | return ethtool_op_get_ts_info(dev, eti: info); |
796 | |
797 | info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | |
798 | SOF_TIMESTAMPING_RX_HARDWARE | |
799 | SOF_TIMESTAMPING_RAW_HARDWARE; |
800 | |
801 | info->phc_index = dpaa2_phc_index; |
802 | |
803 | info->tx_types = (1 << HWTSTAMP_TX_OFF) | |
804 | (1 << HWTSTAMP_TX_ON) | |
805 | (1 << HWTSTAMP_TX_ONESTEP_SYNC); |
806 | |
807 | info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | |
808 | (1 << HWTSTAMP_FILTER_ALL); |
809 | return 0; |
810 | } |
811 | |
812 | static int dpaa2_eth_get_tunable(struct net_device *net_dev, |
813 | const struct ethtool_tunable *tuna, |
814 | void *data) |
815 | { |
816 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
817 | int err = 0; |
818 | |
819 | switch (tuna->id) { |
820 | case ETHTOOL_RX_COPYBREAK: |
821 | *(u32 *)data = priv->rx_copybreak; |
822 | break; |
823 | default: |
824 | err = -EOPNOTSUPP; |
825 | break; |
826 | } |
827 | |
828 | return err; |
829 | } |
830 | |
831 | static int dpaa2_eth_set_tunable(struct net_device *net_dev, |
832 | const struct ethtool_tunable *tuna, |
833 | const void *data) |
834 | { |
835 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
836 | int err = 0; |
837 | |
838 | switch (tuna->id) { |
839 | case ETHTOOL_RX_COPYBREAK: |
840 | priv->rx_copybreak = *(u32 *)data; |
841 | break; |
842 | default: |
843 | err = -EOPNOTSUPP; |
844 | break; |
845 | } |
846 | |
847 | return err; |
848 | } |
849 | |
850 | static int dpaa2_eth_get_coalesce(struct net_device *dev, |
851 | struct ethtool_coalesce *ic, |
852 | struct kernel_ethtool_coalesce *kernel_coal, |
853 | struct netlink_ext_ack *extack) |
854 | { |
855 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
856 | struct dpaa2_io *dpio = priv->channel[0]->dpio; |
857 | |
858 | dpaa2_io_get_irq_coalescing(d: dpio, irq_holdoff: &ic->rx_coalesce_usecs); |
859 | ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(d: dpio); |
860 | |
861 | return 0; |
862 | } |
863 | |
864 | static int dpaa2_eth_set_coalesce(struct net_device *dev, |
865 | struct ethtool_coalesce *ic, |
866 | struct kernel_ethtool_coalesce *kernel_coal, |
867 | struct netlink_ext_ack *extack) |
868 | { |
869 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
870 | struct dpaa2_io *dpio; |
871 | int prev_adaptive; |
872 | u32 prev_rx_usecs; |
873 | int i, j, err; |
874 | |
875 | /* Keep track of the previous value, just in case we fail */ |
876 | dpio = priv->channel[0]->dpio; |
877 | dpaa2_io_get_irq_coalescing(d: dpio, irq_holdoff: &prev_rx_usecs); |
878 | prev_adaptive = dpaa2_io_get_adaptive_coalescing(d: dpio); |
879 | |
880 | /* Setup new value for rx coalescing */ |
881 | for (i = 0; i < priv->num_channels; i++) { |
882 | dpio = priv->channel[i]->dpio; |
883 | |
884 | dpaa2_io_set_adaptive_coalescing(d: dpio, |
885 | use_adaptive_rx_coalesce: ic->use_adaptive_rx_coalesce); |
886 | err = dpaa2_io_set_irq_coalescing(d: dpio, irq_holdoff: ic->rx_coalesce_usecs); |
887 | if (err) |
888 | goto restore_rx_usecs; |
889 | } |
890 | |
891 | return 0; |
892 | |
893 | restore_rx_usecs: |
894 | for (j = 0; j < i; j++) { |
895 | dpio = priv->channel[j]->dpio; |
896 | |
897 | dpaa2_io_set_irq_coalescing(d: dpio, irq_holdoff: prev_rx_usecs); |
898 | dpaa2_io_set_adaptive_coalescing(d: dpio, use_adaptive_rx_coalesce: prev_adaptive); |
899 | } |
900 | |
901 | return err; |
902 | } |
903 | |
904 | static void dpaa2_eth_get_channels(struct net_device *net_dev, |
905 | struct ethtool_channels *channels) |
906 | { |
907 | struct dpaa2_eth_priv *priv = netdev_priv(dev: net_dev); |
908 | int queue_count = dpaa2_eth_queue_count(priv); |
909 | |
910 | channels->max_rx = queue_count; |
911 | channels->max_tx = queue_count; |
912 | channels->rx_count = queue_count; |
913 | channels->tx_count = queue_count; |
914 | |
915 | /* Tx confirmation and Rx error */ |
916 | channels->max_other = queue_count + 1; |
917 | channels->max_combined = channels->max_rx + |
918 | channels->max_tx + |
919 | channels->max_other; |
920 | /* Tx conf and Rx err */ |
921 | channels->other_count = queue_count + 1; |
922 | channels->combined_count = channels->rx_count + |
923 | channels->tx_count + |
924 | channels->other_count; |
925 | } |
926 | |
927 | const struct ethtool_ops dpaa2_ethtool_ops = { |
928 | .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | |
929 | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, |
930 | .get_drvinfo = dpaa2_eth_get_drvinfo, |
931 | .nway_reset = dpaa2_eth_nway_reset, |
932 | .get_link = ethtool_op_get_link, |
933 | .get_link_ksettings = dpaa2_eth_get_link_ksettings, |
934 | .set_link_ksettings = dpaa2_eth_set_link_ksettings, |
935 | .get_pauseparam = dpaa2_eth_get_pauseparam, |
936 | .set_pauseparam = dpaa2_eth_set_pauseparam, |
937 | .get_sset_count = dpaa2_eth_get_sset_count, |
938 | .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, |
939 | .get_strings = dpaa2_eth_get_strings, |
940 | .get_rxnfc = dpaa2_eth_get_rxnfc, |
941 | .set_rxnfc = dpaa2_eth_set_rxnfc, |
942 | .get_ts_info = dpaa2_eth_get_ts_info, |
943 | .get_tunable = dpaa2_eth_get_tunable, |
944 | .set_tunable = dpaa2_eth_set_tunable, |
945 | .get_coalesce = dpaa2_eth_get_coalesce, |
946 | .set_coalesce = dpaa2_eth_set_coalesce, |
947 | .get_channels = dpaa2_eth_get_channels, |
948 | }; |
949 | |