1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (C) 2023 Intel Corporation */ |
3 | |
4 | #include "idpf.h" |
5 | |
6 | /** |
7 | * idpf_get_rxnfc - command to get RX flow classification rules |
8 | * @netdev: network interface device structure |
9 | * @cmd: ethtool rxnfc command |
10 | * @rule_locs: pointer to store rule locations |
11 | * |
12 | * Returns Success if the command is supported. |
13 | */ |
14 | static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, |
15 | u32 __always_unused *rule_locs) |
16 | { |
17 | struct idpf_vport *vport; |
18 | |
19 | idpf_vport_ctrl_lock(netdev); |
20 | vport = idpf_netdev_to_vport(netdev); |
21 | |
22 | switch (cmd->cmd) { |
23 | case ETHTOOL_GRXRINGS: |
24 | cmd->data = vport->num_rxq; |
25 | idpf_vport_ctrl_unlock(netdev); |
26 | |
27 | return 0; |
28 | default: |
29 | break; |
30 | } |
31 | |
32 | idpf_vport_ctrl_unlock(netdev); |
33 | |
34 | return -EOPNOTSUPP; |
35 | } |
36 | |
37 | /** |
38 | * idpf_get_rxfh_key_size - get the RSS hash key size |
39 | * @netdev: network interface device structure |
40 | * |
41 | * Returns the key size on success, error value on failure. |
42 | */ |
43 | static u32 idpf_get_rxfh_key_size(struct net_device *netdev) |
44 | { |
45 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
46 | struct idpf_vport_user_config_data *user_config; |
47 | |
48 | if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) |
49 | return -EOPNOTSUPP; |
50 | |
51 | user_config = &np->adapter->vport_config[np->vport_idx]->user_config; |
52 | |
53 | return user_config->rss_data.rss_key_size; |
54 | } |
55 | |
56 | /** |
57 | * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size |
58 | * @netdev: network interface device structure |
59 | * |
60 | * Returns the table size on success, error value on failure. |
61 | */ |
62 | static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) |
63 | { |
64 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
65 | struct idpf_vport_user_config_data *user_config; |
66 | |
67 | if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) |
68 | return -EOPNOTSUPP; |
69 | |
70 | user_config = &np->adapter->vport_config[np->vport_idx]->user_config; |
71 | |
72 | return user_config->rss_data.rss_lut_size; |
73 | } |
74 | |
75 | /** |
76 | * idpf_get_rxfh - get the rx flow hash indirection table |
77 | * @netdev: network interface device structure |
78 | * @rxfh: pointer to param struct (indir, key, hfunc) |
79 | * |
80 | * Reads the indirection table directly from the hardware. Always returns 0. |
81 | */ |
82 | static int idpf_get_rxfh(struct net_device *netdev, |
83 | struct ethtool_rxfh_param *rxfh) |
84 | { |
85 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
86 | struct idpf_rss_data *; |
87 | struct idpf_adapter *adapter; |
88 | int err = 0; |
89 | u16 i; |
90 | |
91 | idpf_vport_ctrl_lock(netdev); |
92 | |
93 | adapter = np->adapter; |
94 | |
95 | if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { |
96 | err = -EOPNOTSUPP; |
97 | goto unlock_mutex; |
98 | } |
99 | |
100 | rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data; |
101 | if (np->state != __IDPF_VPORT_UP) |
102 | goto unlock_mutex; |
103 | |
104 | rxfh->hfunc = ETH_RSS_HASH_TOP; |
105 | |
106 | if (rxfh->key) |
107 | memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size); |
108 | |
109 | if (rxfh->indir) { |
110 | for (i = 0; i < rss_data->rss_lut_size; i++) |
111 | rxfh->indir[i] = rss_data->rss_lut[i]; |
112 | } |
113 | |
114 | unlock_mutex: |
115 | idpf_vport_ctrl_unlock(netdev); |
116 | |
117 | return err; |
118 | } |
119 | |
120 | /** |
121 | * idpf_set_rxfh - set the rx flow hash indirection table |
122 | * @netdev: network interface device structure |
123 | * @rxfh: pointer to param struct (indir, key, hfunc) |
124 | * @extack: extended ACK from the Netlink message |
125 | * |
126 | * Returns -EINVAL if the table specifies an invalid queue id, otherwise |
127 | * returns 0 after programming the table. |
128 | */ |
129 | static int idpf_set_rxfh(struct net_device *netdev, |
130 | struct ethtool_rxfh_param *rxfh, |
131 | struct netlink_ext_ack *extack) |
132 | { |
133 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
134 | struct idpf_rss_data *; |
135 | struct idpf_adapter *adapter; |
136 | struct idpf_vport *vport; |
137 | int err = 0; |
138 | u16 lut; |
139 | |
140 | idpf_vport_ctrl_lock(netdev); |
141 | vport = idpf_netdev_to_vport(netdev); |
142 | |
143 | adapter = vport->adapter; |
144 | |
145 | if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { |
146 | err = -EOPNOTSUPP; |
147 | goto unlock_mutex; |
148 | } |
149 | |
150 | rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; |
151 | if (np->state != __IDPF_VPORT_UP) |
152 | goto unlock_mutex; |
153 | |
154 | if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && |
155 | rxfh->hfunc != ETH_RSS_HASH_TOP) { |
156 | err = -EOPNOTSUPP; |
157 | goto unlock_mutex; |
158 | } |
159 | |
160 | if (rxfh->key) |
161 | memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size); |
162 | |
163 | if (rxfh->indir) { |
164 | for (lut = 0; lut < rss_data->rss_lut_size; lut++) |
165 | rss_data->rss_lut[lut] = rxfh->indir[lut]; |
166 | } |
167 | |
168 | err = idpf_config_rss(vport); |
169 | |
170 | unlock_mutex: |
171 | idpf_vport_ctrl_unlock(netdev); |
172 | |
173 | return err; |
174 | } |
175 | |
176 | /** |
177 | * idpf_get_channels: get the number of channels supported by the device |
178 | * @netdev: network interface device structure |
179 | * @ch: channel information structure |
180 | * |
181 | * Report maximum of TX and RX. Report one extra channel to match our MailBox |
182 | * Queue. |
183 | */ |
184 | static void idpf_get_channels(struct net_device *netdev, |
185 | struct ethtool_channels *ch) |
186 | { |
187 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
188 | struct idpf_vport_config *vport_config; |
189 | u16 num_txq, num_rxq; |
190 | u16 combined; |
191 | |
192 | vport_config = np->adapter->vport_config[np->vport_idx]; |
193 | |
194 | num_txq = vport_config->user_config.num_req_tx_qs; |
195 | num_rxq = vport_config->user_config.num_req_rx_qs; |
196 | |
197 | combined = min(num_txq, num_rxq); |
198 | |
199 | /* Report maximum channels */ |
200 | ch->max_combined = min_t(u16, vport_config->max_q.max_txq, |
201 | vport_config->max_q.max_rxq); |
202 | ch->max_rx = vport_config->max_q.max_rxq; |
203 | ch->max_tx = vport_config->max_q.max_txq; |
204 | |
205 | ch->max_other = IDPF_MAX_MBXQ; |
206 | ch->other_count = IDPF_MAX_MBXQ; |
207 | |
208 | ch->combined_count = combined; |
209 | ch->rx_count = num_rxq - combined; |
210 | ch->tx_count = num_txq - combined; |
211 | } |
212 | |
213 | /** |
214 | * idpf_set_channels: set the new channel count |
215 | * @netdev: network interface device structure |
216 | * @ch: channel information structure |
217 | * |
218 | * Negotiate a new number of channels with CP. Returns 0 on success, negative |
219 | * on failure. |
220 | */ |
221 | static int idpf_set_channels(struct net_device *netdev, |
222 | struct ethtool_channels *ch) |
223 | { |
224 | struct idpf_vport_config *vport_config; |
225 | u16 combined, num_txq, num_rxq; |
226 | unsigned int num_req_tx_q; |
227 | unsigned int num_req_rx_q; |
228 | struct idpf_vport *vport; |
229 | struct device *dev; |
230 | int err = 0; |
231 | u16 idx; |
232 | |
233 | idpf_vport_ctrl_lock(netdev); |
234 | vport = idpf_netdev_to_vport(netdev); |
235 | |
236 | idx = vport->idx; |
237 | vport_config = vport->adapter->vport_config[idx]; |
238 | |
239 | num_txq = vport_config->user_config.num_req_tx_qs; |
240 | num_rxq = vport_config->user_config.num_req_rx_qs; |
241 | |
242 | combined = min(num_txq, num_rxq); |
243 | |
244 | /* these checks are for cases where user didn't specify a particular |
245 | * value on cmd line but we get non-zero value anyway via |
246 | * get_channels(); look at ethtool.c in ethtool repository (the user |
247 | * space part), particularly, do_schannels() routine |
248 | */ |
249 | if (ch->combined_count == combined) |
250 | ch->combined_count = 0; |
251 | if (ch->combined_count && ch->rx_count == num_rxq - combined) |
252 | ch->rx_count = 0; |
253 | if (ch->combined_count && ch->tx_count == num_txq - combined) |
254 | ch->tx_count = 0; |
255 | |
256 | num_req_tx_q = ch->combined_count + ch->tx_count; |
257 | num_req_rx_q = ch->combined_count + ch->rx_count; |
258 | |
259 | dev = &vport->adapter->pdev->dev; |
260 | /* It's possible to specify number of queues that exceeds max. |
261 | * Stack checks max combined_count and max [tx|rx]_count but not the |
262 | * max combined_count + [tx|rx]_count. These checks should catch that. |
263 | */ |
264 | if (num_req_tx_q > vport_config->max_q.max_txq) { |
265 | dev_info(dev, "Maximum TX queues is %d\n" , |
266 | vport_config->max_q.max_txq); |
267 | err = -EINVAL; |
268 | goto unlock_mutex; |
269 | } |
270 | if (num_req_rx_q > vport_config->max_q.max_rxq) { |
271 | dev_info(dev, "Maximum RX queues is %d\n" , |
272 | vport_config->max_q.max_rxq); |
273 | err = -EINVAL; |
274 | goto unlock_mutex; |
275 | } |
276 | |
277 | if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq) |
278 | goto unlock_mutex; |
279 | |
280 | vport_config->user_config.num_req_tx_qs = num_req_tx_q; |
281 | vport_config->user_config.num_req_rx_qs = num_req_rx_q; |
282 | |
283 | err = idpf_initiate_soft_reset(vport, reset_cause: IDPF_SR_Q_CHANGE); |
284 | if (err) { |
285 | /* roll back queue change */ |
286 | vport_config->user_config.num_req_tx_qs = num_txq; |
287 | vport_config->user_config.num_req_rx_qs = num_rxq; |
288 | } |
289 | |
290 | unlock_mutex: |
291 | idpf_vport_ctrl_unlock(netdev); |
292 | |
293 | return err; |
294 | } |
295 | |
296 | /** |
297 | * idpf_get_ringparam - Get ring parameters |
298 | * @netdev: network interface device structure |
299 | * @ring: ethtool ringparam structure |
300 | * @kring: unused |
301 | * @ext_ack: unused |
302 | * |
303 | * Returns current ring parameters. TX and RX rings are reported separately, |
304 | * but the number of rings is not reported. |
305 | */ |
306 | static void idpf_get_ringparam(struct net_device *netdev, |
307 | struct ethtool_ringparam *ring, |
308 | struct kernel_ethtool_ringparam *kring, |
309 | struct netlink_ext_ack *ext_ack) |
310 | { |
311 | struct idpf_vport *vport; |
312 | |
313 | idpf_vport_ctrl_lock(netdev); |
314 | vport = idpf_netdev_to_vport(netdev); |
315 | |
316 | ring->rx_max_pending = IDPF_MAX_RXQ_DESC; |
317 | ring->tx_max_pending = IDPF_MAX_TXQ_DESC; |
318 | ring->rx_pending = vport->rxq_desc_count; |
319 | ring->tx_pending = vport->txq_desc_count; |
320 | |
321 | kring->tcp_data_split = idpf_vport_get_hsplit(vport); |
322 | |
323 | idpf_vport_ctrl_unlock(netdev); |
324 | } |
325 | |
326 | /** |
327 | * idpf_set_ringparam - Set ring parameters |
328 | * @netdev: network interface device structure |
329 | * @ring: ethtool ringparam structure |
330 | * @kring: unused |
331 | * @ext_ack: unused |
332 | * |
333 | * Sets ring parameters. TX and RX rings are controlled separately, but the |
334 | * number of rings is not specified, so all rings get the same settings. |
335 | */ |
336 | static int idpf_set_ringparam(struct net_device *netdev, |
337 | struct ethtool_ringparam *ring, |
338 | struct kernel_ethtool_ringparam *kring, |
339 | struct netlink_ext_ack *ext_ack) |
340 | { |
341 | struct idpf_vport_user_config_data *config_data; |
342 | u32 new_rx_count, new_tx_count; |
343 | struct idpf_vport *vport; |
344 | int i, err = 0; |
345 | u16 idx; |
346 | |
347 | idpf_vport_ctrl_lock(netdev); |
348 | vport = idpf_netdev_to_vport(netdev); |
349 | |
350 | idx = vport->idx; |
351 | |
352 | if (ring->tx_pending < IDPF_MIN_TXQ_DESC) { |
353 | netdev_err(dev: netdev, format: "Descriptors requested (Tx: %u) is less than min supported (%u)\n" , |
354 | ring->tx_pending, |
355 | IDPF_MIN_TXQ_DESC); |
356 | err = -EINVAL; |
357 | goto unlock_mutex; |
358 | } |
359 | |
360 | if (ring->rx_pending < IDPF_MIN_RXQ_DESC) { |
361 | netdev_err(dev: netdev, format: "Descriptors requested (Rx: %u) is less than min supported (%u)\n" , |
362 | ring->rx_pending, |
363 | IDPF_MIN_RXQ_DESC); |
364 | err = -EINVAL; |
365 | goto unlock_mutex; |
366 | } |
367 | |
368 | new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE); |
369 | if (new_rx_count != ring->rx_pending) |
370 | netdev_info(dev: netdev, format: "Requested Rx descriptor count rounded up to %u\n" , |
371 | new_rx_count); |
372 | |
373 | new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE); |
374 | if (new_tx_count != ring->tx_pending) |
375 | netdev_info(dev: netdev, format: "Requested Tx descriptor count rounded up to %u\n" , |
376 | new_tx_count); |
377 | |
378 | if (new_tx_count == vport->txq_desc_count && |
379 | new_rx_count == vport->rxq_desc_count) |
380 | goto unlock_mutex; |
381 | |
382 | if (!idpf_vport_set_hsplit(vport, val: kring->tcp_data_split)) { |
383 | NL_SET_ERR_MSG_MOD(ext_ack, |
384 | "setting TCP data split is not supported" ); |
385 | err = -EOPNOTSUPP; |
386 | |
387 | goto unlock_mutex; |
388 | } |
389 | |
390 | config_data = &vport->adapter->vport_config[idx]->user_config; |
391 | config_data->num_req_txq_desc = new_tx_count; |
392 | config_data->num_req_rxq_desc = new_rx_count; |
393 | |
394 | /* Since we adjusted the RX completion queue count, the RX buffer queue |
395 | * descriptor count needs to be adjusted as well |
396 | */ |
397 | for (i = 0; i < vport->num_bufqs_per_qgrp; i++) |
398 | vport->bufq_desc_count[i] = |
399 | IDPF_RX_BUFQ_DESC_COUNT(new_rx_count, |
400 | vport->num_bufqs_per_qgrp); |
401 | |
402 | err = idpf_initiate_soft_reset(vport, reset_cause: IDPF_SR_Q_DESC_CHANGE); |
403 | |
404 | unlock_mutex: |
405 | idpf_vport_ctrl_unlock(netdev); |
406 | |
407 | return err; |
408 | } |
409 | |
410 | /** |
411 | * struct idpf_stats - definition for an ethtool statistic |
412 | * @stat_string: statistic name to display in ethtool -S output |
413 | * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) |
414 | * @stat_offset: offsetof() the stat from a base pointer |
415 | * |
416 | * This structure defines a statistic to be added to the ethtool stats buffer. |
417 | * It defines a statistic as offset from a common base pointer. Stats should |
418 | * be defined in constant arrays using the IDPF_STAT macro, with every element |
419 | * of the array using the same _type for calculating the sizeof_stat and |
420 | * stat_offset. |
421 | * |
422 | * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or |
423 | * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from |
424 | * the idpf_add_ethtool_stat() helper function. |
425 | * |
426 | * The @stat_string is interpreted as a format string, allowing formatted |
427 | * values to be inserted while looping over multiple structures for a given |
428 | * statistics array. Thus, every statistic string in an array should have the |
429 | * same type and number of format specifiers, to be formatted by variadic |
430 | * arguments to the idpf_add_stat_string() helper function. |
431 | */ |
432 | struct idpf_stats { |
433 | char stat_string[ETH_GSTRING_LEN]; |
434 | int sizeof_stat; |
435 | int stat_offset; |
436 | }; |
437 | |
438 | /* Helper macro to define an idpf_stat structure with proper size and type. |
439 | * Use this when defining constant statistics arrays. Note that @_type expects |
440 | * only a type name and is used multiple times. |
441 | */ |
442 | #define IDPF_STAT(_type, _name, _stat) { \ |
443 | .stat_string = _name, \ |
444 | .sizeof_stat = sizeof_field(_type, _stat), \ |
445 | .stat_offset = offsetof(_type, _stat) \ |
446 | } |
447 | |
448 | /* Helper macro for defining some statistics related to queues */ |
449 | #define IDPF_QUEUE_STAT(_name, _stat) \ |
450 | IDPF_STAT(struct idpf_queue, _name, _stat) |
451 | |
452 | /* Stats associated with a Tx queue */ |
453 | static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { |
454 | IDPF_QUEUE_STAT("pkts" , q_stats.tx.packets), |
455 | IDPF_QUEUE_STAT("bytes" , q_stats.tx.bytes), |
456 | IDPF_QUEUE_STAT("lso_pkts" , q_stats.tx.lso_pkts), |
457 | }; |
458 | |
459 | /* Stats associated with an Rx queue */ |
460 | static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { |
461 | IDPF_QUEUE_STAT("pkts" , q_stats.rx.packets), |
462 | IDPF_QUEUE_STAT("bytes" , q_stats.rx.bytes), |
463 | IDPF_QUEUE_STAT("rx_gro_hw_pkts" , q_stats.rx.rsc_pkts), |
464 | }; |
465 | |
466 | #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) |
467 | #define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats) |
468 | |
469 | #define IDPF_PORT_STAT(_name, _stat) \ |
470 | IDPF_STAT(struct idpf_vport, _name, _stat) |
471 | |
472 | static const struct idpf_stats idpf_gstrings_port_stats[] = { |
473 | IDPF_PORT_STAT("rx-csum_errors" , port_stats.rx_hw_csum_err), |
474 | IDPF_PORT_STAT("rx-hsplit" , port_stats.rx_hsplit), |
475 | IDPF_PORT_STAT("rx-hsplit_hbo" , port_stats.rx_hsplit_hbo), |
476 | IDPF_PORT_STAT("rx-bad_descs" , port_stats.rx_bad_descs), |
477 | IDPF_PORT_STAT("tx-skb_drops" , port_stats.tx_drops), |
478 | IDPF_PORT_STAT("tx-dma_map_errs" , port_stats.tx_dma_map_errs), |
479 | IDPF_PORT_STAT("tx-linearized_pkts" , port_stats.tx_linearize), |
480 | IDPF_PORT_STAT("tx-busy_events" , port_stats.tx_busy), |
481 | IDPF_PORT_STAT("rx-unicast_pkts" , port_stats.vport_stats.rx_unicast), |
482 | IDPF_PORT_STAT("rx-multicast_pkts" , port_stats.vport_stats.rx_multicast), |
483 | IDPF_PORT_STAT("rx-broadcast_pkts" , port_stats.vport_stats.rx_broadcast), |
484 | IDPF_PORT_STAT("rx-unknown_protocol" , port_stats.vport_stats.rx_unknown_protocol), |
485 | IDPF_PORT_STAT("tx-unicast_pkts" , port_stats.vport_stats.tx_unicast), |
486 | IDPF_PORT_STAT("tx-multicast_pkts" , port_stats.vport_stats.tx_multicast), |
487 | IDPF_PORT_STAT("tx-broadcast_pkts" , port_stats.vport_stats.tx_broadcast), |
488 | }; |
489 | |
490 | #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats) |
491 | |
492 | /** |
493 | * __idpf_add_qstat_strings - copy stat strings into ethtool buffer |
494 | * @p: ethtool supplied buffer |
495 | * @stats: stat definitions array |
496 | * @size: size of the stats array |
497 | * @type: stat type |
498 | * @idx: stat index |
499 | * |
500 | * Format and copy the strings described by stats into the buffer pointed at |
501 | * by p. |
502 | */ |
503 | static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats, |
504 | const unsigned int size, const char *type, |
505 | unsigned int idx) |
506 | { |
507 | unsigned int i; |
508 | |
509 | for (i = 0; i < size; i++) |
510 | ethtool_sprintf(data: p, fmt: "%s_q-%u_%s" , |
511 | type, idx, stats[i].stat_string); |
512 | } |
513 | |
514 | /** |
515 | * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer |
516 | * @p: ethtool supplied buffer |
517 | * @stats: stat definitions array |
518 | * @type: stat type |
519 | * @idx: stat idx |
520 | * |
521 | * Format and copy the strings described by the const static stats value into |
522 | * the buffer pointed at by p. |
523 | * |
524 | * The parameter @stats is evaluated twice, so parameters with side effects |
525 | * should be avoided. Additionally, stats must be an array such that |
526 | * ARRAY_SIZE can be called on it. |
527 | */ |
528 | #define idpf_add_qstat_strings(p, stats, type, idx) \ |
529 | __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx) |
530 | |
531 | /** |
532 | * idpf_add_stat_strings - Copy port stat strings into ethtool buffer |
533 | * @p: ethtool buffer |
534 | * @stats: struct to copy from |
535 | * @size: size of stats array to copy from |
536 | */ |
537 | static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats, |
538 | const unsigned int size) |
539 | { |
540 | unsigned int i; |
541 | |
542 | for (i = 0; i < size; i++) |
543 | ethtool_puts(data: p, str: stats[i].stat_string); |
544 | } |
545 | |
546 | /** |
547 | * idpf_get_stat_strings - Get stat strings |
548 | * @netdev: network interface device structure |
549 | * @data: buffer for string data |
550 | * |
551 | * Builds the statistics string table |
552 | */ |
553 | static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) |
554 | { |
555 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
556 | struct idpf_vport_config *vport_config; |
557 | unsigned int i; |
558 | |
559 | idpf_add_stat_strings(p: &data, stats: idpf_gstrings_port_stats, |
560 | IDPF_PORT_STATS_LEN); |
561 | |
562 | vport_config = np->adapter->vport_config[np->vport_idx]; |
563 | /* It's critical that we always report a constant number of strings and |
564 | * that the strings are reported in the same order regardless of how |
565 | * many queues are actually in use. |
566 | */ |
567 | for (i = 0; i < vport_config->max_q.max_txq; i++) |
568 | idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats, |
569 | "tx" , i); |
570 | |
571 | for (i = 0; i < vport_config->max_q.max_rxq; i++) |
572 | idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, |
573 | "rx" , i); |
574 | |
575 | page_pool_ethtool_stats_get_strings(data); |
576 | } |
577 | |
578 | /** |
579 | * idpf_get_strings - Get string set |
580 | * @netdev: network interface device structure |
581 | * @sset: id of string set |
582 | * @data: buffer for string data |
583 | * |
584 | * Builds string tables for various string sets |
585 | */ |
586 | static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data) |
587 | { |
588 | switch (sset) { |
589 | case ETH_SS_STATS: |
590 | idpf_get_stat_strings(netdev, data); |
591 | break; |
592 | default: |
593 | break; |
594 | } |
595 | } |
596 | |
597 | /** |
598 | * idpf_get_sset_count - Get length of string set |
599 | * @netdev: network interface device structure |
600 | * @sset: id of string set |
601 | * |
602 | * Reports size of various string tables. |
603 | */ |
604 | static int idpf_get_sset_count(struct net_device *netdev, int sset) |
605 | { |
606 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
607 | struct idpf_vport_config *vport_config; |
608 | u16 max_txq, max_rxq; |
609 | unsigned int size; |
610 | |
611 | if (sset != ETH_SS_STATS) |
612 | return -EINVAL; |
613 | |
614 | vport_config = np->adapter->vport_config[np->vport_idx]; |
615 | /* This size reported back here *must* be constant throughout the |
616 | * lifecycle of the netdevice, i.e. we must report the maximum length |
617 | * even for queues that don't technically exist. This is due to the |
618 | * fact that this userspace API uses three separate ioctl calls to get |
619 | * stats data but has no way to communicate back to userspace when that |
620 | * size has changed, which can typically happen as a result of changing |
621 | * number of queues. If the number/order of stats change in the middle |
622 | * of this call chain it will lead to userspace crashing/accessing bad |
623 | * data through buffer under/overflow. |
624 | */ |
625 | max_txq = vport_config->max_q.max_txq; |
626 | max_rxq = vport_config->max_q.max_rxq; |
627 | |
628 | size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + |
629 | (IDPF_RX_QUEUE_STATS_LEN * max_rxq); |
630 | size += page_pool_ethtool_stats_get_count(); |
631 | |
632 | return size; |
633 | } |
634 | |
635 | /** |
636 | * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer |
637 | * @data: location to store the stat value |
638 | * @pstat: old stat pointer to copy from |
639 | * @stat: the stat definition |
640 | * |
641 | * Copies the stat data defined by the pointer and stat structure pair into |
642 | * the memory supplied as data. If the pointer is null, data will be zero'd. |
643 | */ |
644 | static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, |
645 | const struct idpf_stats *stat) |
646 | { |
647 | char *p; |
648 | |
649 | if (!pstat) { |
650 | /* Ensure that the ethtool data buffer is zero'd for any stats |
651 | * which don't have a valid pointer. |
652 | */ |
653 | *data = 0; |
654 | return; |
655 | } |
656 | |
657 | p = (char *)pstat + stat->stat_offset; |
658 | switch (stat->sizeof_stat) { |
659 | case sizeof(u64): |
660 | *data = *((u64 *)p); |
661 | break; |
662 | case sizeof(u32): |
663 | *data = *((u32 *)p); |
664 | break; |
665 | case sizeof(u16): |
666 | *data = *((u16 *)p); |
667 | break; |
668 | case sizeof(u8): |
669 | *data = *((u8 *)p); |
670 | break; |
671 | default: |
672 | WARN_ONCE(1, "unexpected stat size for %s" , |
673 | stat->stat_string); |
674 | *data = 0; |
675 | } |
676 | } |
677 | |
678 | /** |
679 | * idpf_add_queue_stats - copy queue statistics into supplied buffer |
680 | * @data: ethtool stats buffer |
681 | * @q: the queue to copy |
682 | * |
683 | * Queue statistics must be copied while protected by u64_stats_fetch_begin, |
684 | * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats |
685 | * are defined in idpf_gstrings_queue_stats. If the queue pointer is null, |
686 | * zero out the queue stat values and update the data pointer. Otherwise |
687 | * safely copy the stats from the queue into the supplied buffer and update |
688 | * the data pointer when finished. |
689 | * |
690 | * This function expects to be called while under rcu_read_lock(). |
691 | */ |
692 | static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) |
693 | { |
694 | const struct idpf_stats *stats; |
695 | unsigned int start; |
696 | unsigned int size; |
697 | unsigned int i; |
698 | |
699 | if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { |
700 | size = IDPF_RX_QUEUE_STATS_LEN; |
701 | stats = idpf_gstrings_rx_queue_stats; |
702 | } else { |
703 | size = IDPF_TX_QUEUE_STATS_LEN; |
704 | stats = idpf_gstrings_tx_queue_stats; |
705 | } |
706 | |
707 | /* To avoid invalid statistics values, ensure that we keep retrying |
708 | * the copy until we get a consistent value according to |
709 | * u64_stats_fetch_retry. |
710 | */ |
711 | do { |
712 | start = u64_stats_fetch_begin(syncp: &q->stats_sync); |
713 | for (i = 0; i < size; i++) |
714 | idpf_add_one_ethtool_stat(data: &(*data)[i], pstat: q, stat: &stats[i]); |
715 | } while (u64_stats_fetch_retry(syncp: &q->stats_sync, start)); |
716 | |
717 | /* Once we successfully copy the stats in, update the data pointer */ |
718 | *data += size; |
719 | } |
720 | |
721 | /** |
722 | * idpf_add_empty_queue_stats - Add stats for a non-existent queue |
723 | * @data: pointer to data buffer |
724 | * @qtype: type of data queue |
725 | * |
726 | * We must report a constant length of stats back to userspace regardless of |
727 | * how many queues are actually in use because stats collection happens over |
728 | * three separate ioctls and there's no way to notify userspace the size |
729 | * changed between those calls. This adds empty to data to the stats since we |
730 | * don't have a real queue to refer to for this stats slot. |
731 | */ |
732 | static void idpf_add_empty_queue_stats(u64 **data, u16 qtype) |
733 | { |
734 | unsigned int i; |
735 | int stats_len; |
736 | |
737 | if (qtype == VIRTCHNL2_QUEUE_TYPE_RX) |
738 | stats_len = IDPF_RX_QUEUE_STATS_LEN; |
739 | else |
740 | stats_len = IDPF_TX_QUEUE_STATS_LEN; |
741 | |
742 | for (i = 0; i < stats_len; i++) |
743 | (*data)[i] = 0; |
744 | *data += stats_len; |
745 | } |
746 | |
747 | /** |
748 | * idpf_add_port_stats - Copy port stats into ethtool buffer |
749 | * @vport: virtual port struct |
750 | * @data: ethtool buffer to copy into |
751 | */ |
752 | static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data) |
753 | { |
754 | unsigned int size = IDPF_PORT_STATS_LEN; |
755 | unsigned int start; |
756 | unsigned int i; |
757 | |
758 | do { |
759 | start = u64_stats_fetch_begin(syncp: &vport->port_stats.stats_sync); |
760 | for (i = 0; i < size; i++) |
761 | idpf_add_one_ethtool_stat(data: &(*data)[i], pstat: vport, |
762 | stat: &idpf_gstrings_port_stats[i]); |
763 | } while (u64_stats_fetch_retry(syncp: &vport->port_stats.stats_sync, start)); |
764 | |
765 | *data += size; |
766 | } |
767 | |
768 | /** |
769 | * idpf_collect_queue_stats - accumulate various per queue stats |
770 | * into port level stats |
771 | * @vport: pointer to vport struct |
772 | **/ |
773 | static void idpf_collect_queue_stats(struct idpf_vport *vport) |
774 | { |
775 | struct idpf_port_stats *pstats = &vport->port_stats; |
776 | int i, j; |
777 | |
778 | /* zero out port stats since they're actually tracked in per |
779 | * queue stats; this is only for reporting |
780 | */ |
781 | u64_stats_update_begin(syncp: &pstats->stats_sync); |
782 | u64_stats_set(p: &pstats->rx_hw_csum_err, val: 0); |
783 | u64_stats_set(p: &pstats->rx_hsplit, val: 0); |
784 | u64_stats_set(p: &pstats->rx_hsplit_hbo, val: 0); |
785 | u64_stats_set(p: &pstats->rx_bad_descs, val: 0); |
786 | u64_stats_set(p: &pstats->tx_linearize, val: 0); |
787 | u64_stats_set(p: &pstats->tx_busy, val: 0); |
788 | u64_stats_set(p: &pstats->tx_drops, val: 0); |
789 | u64_stats_set(p: &pstats->tx_dma_map_errs, val: 0); |
790 | u64_stats_update_end(syncp: &pstats->stats_sync); |
791 | |
792 | for (i = 0; i < vport->num_rxq_grp; i++) { |
793 | struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; |
794 | u16 num_rxq; |
795 | |
796 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
797 | num_rxq = rxq_grp->splitq.num_rxq_sets; |
798 | else |
799 | num_rxq = rxq_grp->singleq.num_rxq; |
800 | |
801 | for (j = 0; j < num_rxq; j++) { |
802 | u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; |
803 | struct idpf_rx_queue_stats *stats; |
804 | struct idpf_queue *rxq; |
805 | unsigned int start; |
806 | |
807 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
808 | rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; |
809 | else |
810 | rxq = rxq_grp->singleq.rxqs[j]; |
811 | |
812 | if (!rxq) |
813 | continue; |
814 | |
815 | do { |
816 | start = u64_stats_fetch_begin(syncp: &rxq->stats_sync); |
817 | |
818 | stats = &rxq->q_stats.rx; |
819 | hw_csum_err = u64_stats_read(p: &stats->hw_csum_err); |
820 | hsplit = u64_stats_read(p: &stats->hsplit_pkts); |
821 | hsplit_hbo = u64_stats_read(p: &stats->hsplit_buf_ovf); |
822 | bad_descs = u64_stats_read(p: &stats->bad_descs); |
823 | } while (u64_stats_fetch_retry(syncp: &rxq->stats_sync, start)); |
824 | |
825 | u64_stats_update_begin(syncp: &pstats->stats_sync); |
826 | u64_stats_add(p: &pstats->rx_hw_csum_err, val: hw_csum_err); |
827 | u64_stats_add(p: &pstats->rx_hsplit, val: hsplit); |
828 | u64_stats_add(p: &pstats->rx_hsplit_hbo, val: hsplit_hbo); |
829 | u64_stats_add(p: &pstats->rx_bad_descs, val: bad_descs); |
830 | u64_stats_update_end(syncp: &pstats->stats_sync); |
831 | } |
832 | } |
833 | |
834 | for (i = 0; i < vport->num_txq_grp; i++) { |
835 | struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; |
836 | |
837 | for (j = 0; j < txq_grp->num_txq; j++) { |
838 | u64 linearize, qbusy, skb_drops, dma_map_errs; |
839 | struct idpf_queue *txq = txq_grp->txqs[j]; |
840 | struct idpf_tx_queue_stats *stats; |
841 | unsigned int start; |
842 | |
843 | if (!txq) |
844 | continue; |
845 | |
846 | do { |
847 | start = u64_stats_fetch_begin(syncp: &txq->stats_sync); |
848 | |
849 | stats = &txq->q_stats.tx; |
850 | linearize = u64_stats_read(p: &stats->linearize); |
851 | qbusy = u64_stats_read(p: &stats->q_busy); |
852 | skb_drops = u64_stats_read(p: &stats->skb_drops); |
853 | dma_map_errs = u64_stats_read(p: &stats->dma_map_errs); |
854 | } while (u64_stats_fetch_retry(syncp: &txq->stats_sync, start)); |
855 | |
856 | u64_stats_update_begin(syncp: &pstats->stats_sync); |
857 | u64_stats_add(p: &pstats->tx_linearize, val: linearize); |
858 | u64_stats_add(p: &pstats->tx_busy, val: qbusy); |
859 | u64_stats_add(p: &pstats->tx_drops, val: skb_drops); |
860 | u64_stats_add(p: &pstats->tx_dma_map_errs, val: dma_map_errs); |
861 | u64_stats_update_end(syncp: &pstats->stats_sync); |
862 | } |
863 | } |
864 | } |
865 | |
866 | /** |
867 | * idpf_get_ethtool_stats - report device statistics |
868 | * @netdev: network interface device structure |
869 | * @stats: ethtool statistics structure |
870 | * @data: pointer to data buffer |
871 | * |
872 | * All statistics are added to the data buffer as an array of u64. |
873 | */ |
874 | static void idpf_get_ethtool_stats(struct net_device *netdev, |
875 | struct ethtool_stats __always_unused *stats, |
876 | u64 *data) |
877 | { |
878 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
879 | struct idpf_vport_config *vport_config; |
880 | struct page_pool_stats pp_stats = { }; |
881 | struct idpf_vport *vport; |
882 | unsigned int total = 0; |
883 | unsigned int i, j; |
884 | bool is_splitq; |
885 | u16 qtype; |
886 | |
887 | idpf_vport_ctrl_lock(netdev); |
888 | vport = idpf_netdev_to_vport(netdev); |
889 | |
890 | if (np->state != __IDPF_VPORT_UP) { |
891 | idpf_vport_ctrl_unlock(netdev); |
892 | |
893 | return; |
894 | } |
895 | |
896 | rcu_read_lock(); |
897 | |
898 | idpf_collect_queue_stats(vport); |
899 | idpf_add_port_stats(vport, data: &data); |
900 | |
901 | for (i = 0; i < vport->num_txq_grp; i++) { |
902 | struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; |
903 | |
904 | qtype = VIRTCHNL2_QUEUE_TYPE_TX; |
905 | |
906 | for (j = 0; j < txq_grp->num_txq; j++, total++) { |
907 | struct idpf_queue *txq = txq_grp->txqs[j]; |
908 | |
909 | if (!txq) |
910 | idpf_add_empty_queue_stats(data: &data, qtype); |
911 | else |
912 | idpf_add_queue_stats(data: &data, q: txq); |
913 | } |
914 | } |
915 | |
916 | vport_config = vport->adapter->vport_config[vport->idx]; |
917 | /* It is critical we provide a constant number of stats back to |
918 | * userspace regardless of how many queues are actually in use because |
919 | * there is no way to inform userspace the size has changed between |
920 | * ioctl calls. This will fill in any missing stats with zero. |
921 | */ |
922 | for (; total < vport_config->max_q.max_txq; total++) |
923 | idpf_add_empty_queue_stats(data: &data, qtype: VIRTCHNL2_QUEUE_TYPE_TX); |
924 | total = 0; |
925 | |
926 | is_splitq = idpf_is_queue_model_split(q_model: vport->rxq_model); |
927 | |
928 | for (i = 0; i < vport->num_rxq_grp; i++) { |
929 | struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; |
930 | u16 num_rxq; |
931 | |
932 | qtype = VIRTCHNL2_QUEUE_TYPE_RX; |
933 | |
934 | if (is_splitq) |
935 | num_rxq = rxq_grp->splitq.num_rxq_sets; |
936 | else |
937 | num_rxq = rxq_grp->singleq.num_rxq; |
938 | |
939 | for (j = 0; j < num_rxq; j++, total++) { |
940 | struct idpf_queue *rxq; |
941 | |
942 | if (is_splitq) |
943 | rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; |
944 | else |
945 | rxq = rxq_grp->singleq.rxqs[j]; |
946 | if (!rxq) |
947 | idpf_add_empty_queue_stats(data: &data, qtype); |
948 | else |
949 | idpf_add_queue_stats(data: &data, q: rxq); |
950 | |
951 | /* In splitq mode, don't get page pool stats here since |
952 | * the pools are attached to the buffer queues |
953 | */ |
954 | if (is_splitq) |
955 | continue; |
956 | |
957 | if (rxq) |
958 | page_pool_get_stats(pool: rxq->pp, stats: &pp_stats); |
959 | } |
960 | } |
961 | |
962 | for (i = 0; i < vport->num_rxq_grp; i++) { |
963 | for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { |
964 | struct idpf_queue *rxbufq = |
965 | &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; |
966 | |
967 | page_pool_get_stats(pool: rxbufq->pp, stats: &pp_stats); |
968 | } |
969 | } |
970 | |
971 | for (; total < vport_config->max_q.max_rxq; total++) |
972 | idpf_add_empty_queue_stats(data: &data, qtype: VIRTCHNL2_QUEUE_TYPE_RX); |
973 | |
974 | page_pool_ethtool_stats_get(data, stats: &pp_stats); |
975 | |
976 | rcu_read_unlock(); |
977 | |
978 | idpf_vport_ctrl_unlock(netdev); |
979 | } |
980 | |
981 | /** |
982 | * idpf_find_rxq - find rxq from q index |
983 | * @vport: virtual port associated to queue |
984 | * @q_num: q index used to find queue |
985 | * |
986 | * returns pointer to rx queue |
987 | */ |
988 | static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) |
989 | { |
990 | int q_grp, q_idx; |
991 | |
992 | if (!idpf_is_queue_model_split(q_model: vport->rxq_model)) |
993 | return vport->rxq_grps->singleq.rxqs[q_num]; |
994 | |
995 | q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; |
996 | q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; |
997 | |
998 | return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; |
999 | } |
1000 | |
1001 | /** |
1002 | * idpf_find_txq - find txq from q index |
1003 | * @vport: virtual port associated to queue |
1004 | * @q_num: q index used to find queue |
1005 | * |
1006 | * returns pointer to tx queue |
1007 | */ |
1008 | static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) |
1009 | { |
1010 | int q_grp; |
1011 | |
1012 | if (!idpf_is_queue_model_split(q_model: vport->txq_model)) |
1013 | return vport->txqs[q_num]; |
1014 | |
1015 | q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; |
1016 | |
1017 | return vport->txq_grps[q_grp].complq; |
1018 | } |
1019 | |
1020 | /** |
1021 | * __idpf_get_q_coalesce - get ITR values for specific queue |
1022 | * @ec: ethtool structure to fill with driver's coalesce settings |
1023 | * @q: quuee of Rx or Tx |
1024 | */ |
1025 | static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, |
1026 | struct idpf_queue *q) |
1027 | { |
1028 | if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { |
1029 | ec->use_adaptive_rx_coalesce = |
1030 | IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); |
1031 | ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; |
1032 | } else { |
1033 | ec->use_adaptive_tx_coalesce = |
1034 | IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); |
1035 | ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; |
1036 | } |
1037 | } |
1038 | |
1039 | /** |
1040 | * idpf_get_q_coalesce - get ITR values for specific queue |
1041 | * @netdev: pointer to the netdev associated with this query |
1042 | * @ec: coalesce settings to program the device with |
1043 | * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index |
1044 | * |
1045 | * Return 0 on success, and negative on failure |
1046 | */ |
1047 | static int idpf_get_q_coalesce(struct net_device *netdev, |
1048 | struct ethtool_coalesce *ec, |
1049 | u32 q_num) |
1050 | { |
1051 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
1052 | struct idpf_vport *vport; |
1053 | int err = 0; |
1054 | |
1055 | idpf_vport_ctrl_lock(netdev); |
1056 | vport = idpf_netdev_to_vport(netdev); |
1057 | |
1058 | if (np->state != __IDPF_VPORT_UP) |
1059 | goto unlock_mutex; |
1060 | |
1061 | if (q_num >= vport->num_rxq && q_num >= vport->num_txq) { |
1062 | err = -EINVAL; |
1063 | goto unlock_mutex; |
1064 | } |
1065 | |
1066 | if (q_num < vport->num_rxq) |
1067 | __idpf_get_q_coalesce(ec, q: idpf_find_rxq(vport, q_num)); |
1068 | |
1069 | if (q_num < vport->num_txq) |
1070 | __idpf_get_q_coalesce(ec, q: idpf_find_txq(vport, q_num)); |
1071 | |
1072 | unlock_mutex: |
1073 | idpf_vport_ctrl_unlock(netdev); |
1074 | |
1075 | return err; |
1076 | } |
1077 | |
1078 | /** |
1079 | * idpf_get_coalesce - get ITR values as requested by user |
1080 | * @netdev: pointer to the netdev associated with this query |
1081 | * @ec: coalesce settings to be filled |
1082 | * @kec: unused |
1083 | * @extack: unused |
1084 | * |
1085 | * Return 0 on success, and negative on failure |
1086 | */ |
1087 | static int idpf_get_coalesce(struct net_device *netdev, |
1088 | struct ethtool_coalesce *ec, |
1089 | struct kernel_ethtool_coalesce *kec, |
1090 | struct netlink_ext_ack *extack) |
1091 | { |
1092 | /* Return coalesce based on queue number zero */ |
1093 | return idpf_get_q_coalesce(netdev, ec, q_num: 0); |
1094 | } |
1095 | |
1096 | /** |
1097 | * idpf_get_per_q_coalesce - get ITR values as requested by user |
1098 | * @netdev: pointer to the netdev associated with this query |
1099 | * @q_num: queue for which the itr values has to retrieved |
1100 | * @ec: coalesce settings to be filled |
1101 | * |
1102 | * Return 0 on success, and negative on failure |
1103 | */ |
1104 | |
1105 | static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, |
1106 | struct ethtool_coalesce *ec) |
1107 | { |
1108 | return idpf_get_q_coalesce(netdev, ec, q_num); |
1109 | } |
1110 | |
1111 | /** |
1112 | * __idpf_set_q_coalesce - set ITR values for specific queue |
1113 | * @ec: ethtool structure from user to update ITR settings |
1114 | * @q: queue for which itr values has to be set |
1115 | * @is_rxq: is queue type rx |
1116 | * |
1117 | * Returns 0 on success, negative otherwise. |
1118 | */ |
1119 | static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, |
1120 | struct idpf_queue *q, bool is_rxq) |
1121 | { |
1122 | u32 use_adaptive_coalesce, coalesce_usecs; |
1123 | struct idpf_q_vector *qv = q->q_vector; |
1124 | bool is_dim_ena = false; |
1125 | u16 itr_val; |
1126 | |
1127 | if (is_rxq) { |
1128 | is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); |
1129 | use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; |
1130 | coalesce_usecs = ec->rx_coalesce_usecs; |
1131 | itr_val = qv->rx_itr_value; |
1132 | } else { |
1133 | is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); |
1134 | use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; |
1135 | coalesce_usecs = ec->tx_coalesce_usecs; |
1136 | itr_val = qv->tx_itr_value; |
1137 | } |
1138 | if (coalesce_usecs != itr_val && use_adaptive_coalesce) { |
1139 | netdev_err(dev: q->vport->netdev, format: "Cannot set coalesce usecs if adaptive enabled\n" ); |
1140 | |
1141 | return -EINVAL; |
1142 | } |
1143 | |
1144 | if (is_dim_ena && use_adaptive_coalesce) |
1145 | return 0; |
1146 | |
1147 | if (coalesce_usecs > IDPF_ITR_MAX) { |
1148 | netdev_err(dev: q->vport->netdev, |
1149 | format: "Invalid value, %d-usecs range is 0-%d\n" , |
1150 | coalesce_usecs, IDPF_ITR_MAX); |
1151 | |
1152 | return -EINVAL; |
1153 | } |
1154 | |
1155 | if (coalesce_usecs % 2) { |
1156 | coalesce_usecs--; |
1157 | netdev_info(dev: q->vport->netdev, |
1158 | format: "HW only supports even ITR values, ITR rounded to %d\n" , |
1159 | coalesce_usecs); |
1160 | } |
1161 | |
1162 | if (is_rxq) { |
1163 | qv->rx_itr_value = coalesce_usecs; |
1164 | if (use_adaptive_coalesce) { |
1165 | qv->rx_intr_mode = IDPF_ITR_DYNAMIC; |
1166 | } else { |
1167 | qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; |
1168 | idpf_vport_intr_write_itr(q_vector: qv, itr: qv->rx_itr_value, |
1169 | tx: false); |
1170 | } |
1171 | } else { |
1172 | qv->tx_itr_value = coalesce_usecs; |
1173 | if (use_adaptive_coalesce) { |
1174 | qv->tx_intr_mode = IDPF_ITR_DYNAMIC; |
1175 | } else { |
1176 | qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; |
1177 | idpf_vport_intr_write_itr(q_vector: qv, itr: qv->tx_itr_value, tx: true); |
1178 | } |
1179 | } |
1180 | |
1181 | /* Update of static/dynamic itr will be taken care when interrupt is |
1182 | * fired |
1183 | */ |
1184 | return 0; |
1185 | } |
1186 | |
1187 | /** |
1188 | * idpf_set_q_coalesce - set ITR values for specific queue |
1189 | * @vport: vport associated to the queue that need updating |
1190 | * @ec: coalesce settings to program the device with |
1191 | * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index |
1192 | * @is_rxq: is queue type rx |
1193 | * |
1194 | * Return 0 on success, and negative on failure |
1195 | */ |
1196 | static int idpf_set_q_coalesce(struct idpf_vport *vport, |
1197 | struct ethtool_coalesce *ec, |
1198 | int q_num, bool is_rxq) |
1199 | { |
1200 | struct idpf_queue *q; |
1201 | |
1202 | q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); |
1203 | |
1204 | if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) |
1205 | return -EINVAL; |
1206 | |
1207 | return 0; |
1208 | } |
1209 | |
1210 | /** |
1211 | * idpf_set_coalesce - set ITR values as requested by user |
1212 | * @netdev: pointer to the netdev associated with this query |
1213 | * @ec: coalesce settings to program the device with |
1214 | * @kec: unused |
1215 | * @extack: unused |
1216 | * |
1217 | * Return 0 on success, and negative on failure |
1218 | */ |
1219 | static int idpf_set_coalesce(struct net_device *netdev, |
1220 | struct ethtool_coalesce *ec, |
1221 | struct kernel_ethtool_coalesce *kec, |
1222 | struct netlink_ext_ack *extack) |
1223 | { |
1224 | struct idpf_netdev_priv *np = netdev_priv(dev: netdev); |
1225 | struct idpf_vport *vport; |
1226 | int i, err = 0; |
1227 | |
1228 | idpf_vport_ctrl_lock(netdev); |
1229 | vport = idpf_netdev_to_vport(netdev); |
1230 | |
1231 | if (np->state != __IDPF_VPORT_UP) |
1232 | goto unlock_mutex; |
1233 | |
1234 | for (i = 0; i < vport->num_txq; i++) { |
1235 | err = idpf_set_q_coalesce(vport, ec, q_num: i, is_rxq: false); |
1236 | if (err) |
1237 | goto unlock_mutex; |
1238 | } |
1239 | |
1240 | for (i = 0; i < vport->num_rxq; i++) { |
1241 | err = idpf_set_q_coalesce(vport, ec, q_num: i, is_rxq: true); |
1242 | if (err) |
1243 | goto unlock_mutex; |
1244 | } |
1245 | |
1246 | unlock_mutex: |
1247 | idpf_vport_ctrl_unlock(netdev); |
1248 | |
1249 | return err; |
1250 | } |
1251 | |
1252 | /** |
1253 | * idpf_set_per_q_coalesce - set ITR values as requested by user |
1254 | * @netdev: pointer to the netdev associated with this query |
1255 | * @q_num: queue for which the itr values has to be set |
1256 | * @ec: coalesce settings to program the device with |
1257 | * |
1258 | * Return 0 on success, and negative on failure |
1259 | */ |
1260 | static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, |
1261 | struct ethtool_coalesce *ec) |
1262 | { |
1263 | struct idpf_vport *vport; |
1264 | int err; |
1265 | |
1266 | idpf_vport_ctrl_lock(netdev); |
1267 | vport = idpf_netdev_to_vport(netdev); |
1268 | |
1269 | err = idpf_set_q_coalesce(vport, ec, q_num, is_rxq: false); |
1270 | if (err) { |
1271 | idpf_vport_ctrl_unlock(netdev); |
1272 | |
1273 | return err; |
1274 | } |
1275 | |
1276 | err = idpf_set_q_coalesce(vport, ec, q_num, is_rxq: true); |
1277 | |
1278 | idpf_vport_ctrl_unlock(netdev); |
1279 | |
1280 | return err; |
1281 | } |
1282 | |
1283 | /** |
1284 | * idpf_get_msglevel - Get debug message level |
1285 | * @netdev: network interface device structure |
1286 | * |
1287 | * Returns current debug message level. |
1288 | */ |
1289 | static u32 idpf_get_msglevel(struct net_device *netdev) |
1290 | { |
1291 | struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); |
1292 | |
1293 | return adapter->msg_enable; |
1294 | } |
1295 | |
1296 | /** |
1297 | * idpf_set_msglevel - Set debug message level |
1298 | * @netdev: network interface device structure |
1299 | * @data: message level |
1300 | * |
1301 | * Set current debug message level. Higher values cause the driver to |
1302 | * be noisier. |
1303 | */ |
1304 | static void idpf_set_msglevel(struct net_device *netdev, u32 data) |
1305 | { |
1306 | struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); |
1307 | |
1308 | adapter->msg_enable = data; |
1309 | } |
1310 | |
1311 | /** |
1312 | * idpf_get_link_ksettings - Get Link Speed and Duplex settings |
1313 | * @netdev: network interface device structure |
1314 | * @cmd: ethtool command |
1315 | * |
1316 | * Reports speed/duplex settings. |
1317 | **/ |
1318 | static int idpf_get_link_ksettings(struct net_device *netdev, |
1319 | struct ethtool_link_ksettings *cmd) |
1320 | { |
1321 | struct idpf_vport *vport; |
1322 | |
1323 | idpf_vport_ctrl_lock(netdev); |
1324 | vport = idpf_netdev_to_vport(netdev); |
1325 | |
1326 | ethtool_link_ksettings_zero_link_mode(cmd, supported); |
1327 | cmd->base.autoneg = AUTONEG_DISABLE; |
1328 | cmd->base.port = PORT_NONE; |
1329 | if (vport->link_up) { |
1330 | cmd->base.duplex = DUPLEX_FULL; |
1331 | cmd->base.speed = vport->link_speed_mbps; |
1332 | } else { |
1333 | cmd->base.duplex = DUPLEX_UNKNOWN; |
1334 | cmd->base.speed = SPEED_UNKNOWN; |
1335 | } |
1336 | |
1337 | idpf_vport_ctrl_unlock(netdev); |
1338 | |
1339 | return 0; |
1340 | } |
1341 | |
1342 | static const struct ethtool_ops idpf_ethtool_ops = { |
1343 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
1344 | ETHTOOL_COALESCE_USE_ADAPTIVE, |
1345 | .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, |
1346 | .get_msglevel = idpf_get_msglevel, |
1347 | .set_msglevel = idpf_set_msglevel, |
1348 | .get_link = ethtool_op_get_link, |
1349 | .get_coalesce = idpf_get_coalesce, |
1350 | .set_coalesce = idpf_set_coalesce, |
1351 | .get_per_queue_coalesce = idpf_get_per_q_coalesce, |
1352 | .set_per_queue_coalesce = idpf_set_per_q_coalesce, |
1353 | .get_ethtool_stats = idpf_get_ethtool_stats, |
1354 | .get_strings = idpf_get_strings, |
1355 | .get_sset_count = idpf_get_sset_count, |
1356 | .get_channels = idpf_get_channels, |
1357 | .get_rxnfc = idpf_get_rxnfc, |
1358 | .get_rxfh_key_size = idpf_get_rxfh_key_size, |
1359 | .get_rxfh_indir_size = idpf_get_rxfh_indir_size, |
1360 | .get_rxfh = idpf_get_rxfh, |
1361 | .set_rxfh = idpf_set_rxfh, |
1362 | .set_channels = idpf_set_channels, |
1363 | .get_ringparam = idpf_get_ringparam, |
1364 | .set_ringparam = idpf_set_ringparam, |
1365 | .get_link_ksettings = idpf_get_link_ksettings, |
1366 | }; |
1367 | |
1368 | /** |
1369 | * idpf_set_ethtool_ops - Initialize ethtool ops struct |
1370 | * @netdev: network interface device structure |
1371 | * |
1372 | * Sets ethtool ops struct in our netdev so that ethtool can call |
1373 | * our functions. |
1374 | */ |
1375 | void idpf_set_ethtool_ops(struct net_device *netdev) |
1376 | { |
1377 | netdev->ethtool_ops = &idpf_ethtool_ops; |
1378 | } |
1379 | |