1 | // SPDX-License-Identifier: (GPL-2.0 OR MIT) |
2 | /* Google virtual Ethernet (gve) driver |
3 | * |
4 | * Copyright (C) 2015-2021 Google, Inc. |
5 | */ |
6 | |
7 | #include <linux/rtnetlink.h> |
8 | #include "gve.h" |
9 | #include "gve_adminq.h" |
10 | #include "gve_dqo.h" |
11 | |
12 | static void gve_get_drvinfo(struct net_device *netdev, |
13 | struct ethtool_drvinfo *info) |
14 | { |
15 | struct gve_priv *priv = netdev_priv(dev: netdev); |
16 | |
17 | strscpy(info->driver, gve_driver_name, sizeof(info->driver)); |
18 | strscpy(info->version, gve_version_str, sizeof(info->version)); |
19 | strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); |
20 | } |
21 | |
22 | static void gve_set_msglevel(struct net_device *netdev, u32 value) |
23 | { |
24 | struct gve_priv *priv = netdev_priv(dev: netdev); |
25 | |
26 | priv->msg_enable = value; |
27 | } |
28 | |
29 | static u32 gve_get_msglevel(struct net_device *netdev) |
30 | { |
31 | struct gve_priv *priv = netdev_priv(dev: netdev); |
32 | |
33 | return priv->msg_enable; |
34 | } |
35 | |
36 | /* For the following stats column string names, make sure the order |
37 | * matches how it is filled in the code. For xdp_aborted, xdp_drop, |
38 | * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order |
39 | * as declared in enum xdp_action inside file uapi/linux/bpf.h . |
40 | */ |
41 | static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { |
42 | "rx_packets" , "rx_hsplit_pkt" , "tx_packets" , "rx_bytes" , |
43 | "tx_bytes" , "rx_dropped" , "tx_dropped" , "tx_timeouts" , |
44 | "rx_skb_alloc_fail" , "rx_buf_alloc_fail" , "rx_desc_err_dropped_pkt" , |
45 | "rx_hsplit_unsplit_pkt" , |
46 | "interface_up_cnt" , "interface_down_cnt" , "reset_cnt" , |
47 | "page_alloc_fail" , "dma_mapping_error" , "stats_report_trigger_cnt" , |
48 | }; |
49 | |
50 | static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { |
51 | "rx_posted_desc[%u]" , "rx_completed_desc[%u]" , "rx_consumed_desc[%u]" , |
52 | "rx_bytes[%u]" , "rx_hsplit_bytes[%u]" , "rx_cont_packet_cnt[%u]" , |
53 | "rx_frag_flip_cnt[%u]" , "rx_frag_copy_cnt[%u]" , "rx_frag_alloc_cnt[%u]" , |
54 | "rx_dropped_pkt[%u]" , "rx_copybreak_pkt[%u]" , "rx_copied_pkt[%u]" , |
55 | "rx_queue_drop_cnt[%u]" , "rx_no_buffers_posted[%u]" , |
56 | "rx_drops_packet_over_mru[%u]" , "rx_drops_invalid_checksum[%u]" , |
57 | "rx_xdp_aborted[%u]" , "rx_xdp_drop[%u]" , "rx_xdp_pass[%u]" , |
58 | "rx_xdp_tx[%u]" , "rx_xdp_redirect[%u]" , |
59 | "rx_xdp_tx_errors[%u]" , "rx_xdp_redirect_errors[%u]" , "rx_xdp_alloc_fails[%u]" , |
60 | }; |
61 | |
62 | static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { |
63 | "tx_posted_desc[%u]" , "tx_completed_desc[%u]" , "tx_consumed_desc[%u]" , "tx_bytes[%u]" , |
64 | "tx_wake[%u]" , "tx_stop[%u]" , "tx_event_counter[%u]" , |
65 | "tx_dma_mapping_error[%u]" , "tx_xsk_wakeup[%u]" , |
66 | "tx_xsk_done[%u]" , "tx_xsk_sent[%u]" , "tx_xdp_xmit[%u]" , "tx_xdp_xmit_errors[%u]" |
67 | }; |
68 | |
69 | static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { |
70 | "adminq_prod_cnt" , "adminq_cmd_fail" , "adminq_timeouts" , |
71 | "adminq_describe_device_cnt" , "adminq_cfg_device_resources_cnt" , |
72 | "adminq_register_page_list_cnt" , "adminq_unregister_page_list_cnt" , |
73 | "adminq_create_tx_queue_cnt" , "adminq_create_rx_queue_cnt" , |
74 | "adminq_destroy_tx_queue_cnt" , "adminq_destroy_rx_queue_cnt" , |
75 | "adminq_dcfg_device_resources_cnt" , "adminq_set_driver_parameter_cnt" , |
76 | "adminq_report_stats_cnt" , "adminq_report_link_speed_cnt" |
77 | }; |
78 | |
79 | static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { |
80 | "report-stats" , |
81 | }; |
82 | |
83 | #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats) |
84 | #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats) |
85 | #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats) |
86 | #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats) |
87 | #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags) |
88 | |
89 | static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) |
90 | { |
91 | struct gve_priv *priv = netdev_priv(dev: netdev); |
92 | char *s = (char *)data; |
93 | int num_tx_queues; |
94 | int i, j; |
95 | |
96 | num_tx_queues = gve_num_tx_queues(priv); |
97 | switch (stringset) { |
98 | case ETH_SS_STATS: |
99 | memcpy(s, *gve_gstrings_main_stats, |
100 | sizeof(gve_gstrings_main_stats)); |
101 | s += sizeof(gve_gstrings_main_stats); |
102 | |
103 | for (i = 0; i < priv->rx_cfg.num_queues; i++) { |
104 | for (j = 0; j < NUM_GVE_RX_CNTS; j++) { |
105 | snprintf(buf: s, ETH_GSTRING_LEN, |
106 | fmt: gve_gstrings_rx_stats[j], i); |
107 | s += ETH_GSTRING_LEN; |
108 | } |
109 | } |
110 | |
111 | for (i = 0; i < num_tx_queues; i++) { |
112 | for (j = 0; j < NUM_GVE_TX_CNTS; j++) { |
113 | snprintf(buf: s, ETH_GSTRING_LEN, |
114 | fmt: gve_gstrings_tx_stats[j], i); |
115 | s += ETH_GSTRING_LEN; |
116 | } |
117 | } |
118 | |
119 | memcpy(s, *gve_gstrings_adminq_stats, |
120 | sizeof(gve_gstrings_adminq_stats)); |
121 | s += sizeof(gve_gstrings_adminq_stats); |
122 | break; |
123 | |
124 | case ETH_SS_PRIV_FLAGS: |
125 | memcpy(s, *gve_gstrings_priv_flags, |
126 | sizeof(gve_gstrings_priv_flags)); |
127 | s += sizeof(gve_gstrings_priv_flags); |
128 | break; |
129 | |
130 | default: |
131 | break; |
132 | } |
133 | } |
134 | |
135 | static int gve_get_sset_count(struct net_device *netdev, int sset) |
136 | { |
137 | struct gve_priv *priv = netdev_priv(dev: netdev); |
138 | int num_tx_queues; |
139 | |
140 | num_tx_queues = gve_num_tx_queues(priv); |
141 | switch (sset) { |
142 | case ETH_SS_STATS: |
143 | return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + |
144 | (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + |
145 | (num_tx_queues * NUM_GVE_TX_CNTS); |
146 | case ETH_SS_PRIV_FLAGS: |
147 | return GVE_PRIV_FLAGS_STR_LEN; |
148 | default: |
149 | return -EOPNOTSUPP; |
150 | } |
151 | } |
152 | |
153 | static void |
154 | gve_get_ethtool_stats(struct net_device *netdev, |
155 | struct ethtool_stats *stats, u64 *data) |
156 | { |
157 | u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes, |
158 | tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, |
159 | tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt, |
160 | tmp_tx_pkts, tmp_tx_bytes; |
161 | u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt, |
162 | rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, |
163 | tx_dropped; |
164 | int stats_idx, base_stats_idx, max_stats_idx; |
165 | struct stats *report_stats; |
166 | int *rx_qid_to_stats_idx; |
167 | int *tx_qid_to_stats_idx; |
168 | struct gve_priv *priv; |
169 | bool skip_nic_stats; |
170 | unsigned int start; |
171 | int num_tx_queues; |
172 | int ring; |
173 | int i, j; |
174 | |
175 | ASSERT_RTNL(); |
176 | |
177 | priv = netdev_priv(dev: netdev); |
178 | num_tx_queues = gve_num_tx_queues(priv); |
179 | report_stats = priv->stats_report->stats; |
180 | rx_qid_to_stats_idx = kmalloc_array(n: priv->rx_cfg.num_queues, |
181 | size: sizeof(int), GFP_KERNEL); |
182 | if (!rx_qid_to_stats_idx) |
183 | return; |
184 | tx_qid_to_stats_idx = kmalloc_array(n: num_tx_queues, |
185 | size: sizeof(int), GFP_KERNEL); |
186 | if (!tx_qid_to_stats_idx) { |
187 | kfree(objp: rx_qid_to_stats_idx); |
188 | return; |
189 | } |
190 | for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, |
191 | rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, |
192 | rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, |
193 | ring = 0; |
194 | ring < priv->rx_cfg.num_queues; ring++) { |
195 | if (priv->rx) { |
196 | do { |
197 | struct gve_rx_ring *rx = &priv->rx[ring]; |
198 | |
199 | start = |
200 | u64_stats_fetch_begin(syncp: &priv->rx[ring].statss); |
201 | tmp_rx_pkts = rx->rpackets; |
202 | tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt; |
203 | tmp_rx_bytes = rx->rbytes; |
204 | tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; |
205 | tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; |
206 | tmp_rx_desc_err_dropped_pkt = |
207 | rx->rx_desc_err_dropped_pkt; |
208 | tmp_rx_hsplit_unsplit_pkt = |
209 | rx->rx_hsplit_unsplit_pkt; |
210 | } while (u64_stats_fetch_retry(syncp: &priv->rx[ring].statss, |
211 | start)); |
212 | rx_pkts += tmp_rx_pkts; |
213 | rx_hsplit_pkt += tmp_rx_hsplit_pkt; |
214 | rx_bytes += tmp_rx_bytes; |
215 | rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; |
216 | rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; |
217 | rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; |
218 | rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt; |
219 | } |
220 | } |
221 | for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; |
222 | ring < num_tx_queues; ring++) { |
223 | if (priv->tx) { |
224 | do { |
225 | start = |
226 | u64_stats_fetch_begin(syncp: &priv->tx[ring].statss); |
227 | tmp_tx_pkts = priv->tx[ring].pkt_done; |
228 | tmp_tx_bytes = priv->tx[ring].bytes_done; |
229 | } while (u64_stats_fetch_retry(syncp: &priv->tx[ring].statss, |
230 | start)); |
231 | tx_pkts += tmp_tx_pkts; |
232 | tx_bytes += tmp_tx_bytes; |
233 | tx_dropped += priv->tx[ring].dropped_pkt; |
234 | } |
235 | } |
236 | |
237 | i = 0; |
238 | data[i++] = rx_pkts; |
239 | data[i++] = rx_hsplit_pkt; |
240 | data[i++] = tx_pkts; |
241 | data[i++] = rx_bytes; |
242 | data[i++] = tx_bytes; |
243 | /* total rx dropped packets */ |
244 | data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + |
245 | rx_desc_err_dropped_pkt; |
246 | data[i++] = tx_dropped; |
247 | data[i++] = priv->tx_timeo_cnt; |
248 | data[i++] = rx_skb_alloc_fail; |
249 | data[i++] = rx_buf_alloc_fail; |
250 | data[i++] = rx_desc_err_dropped_pkt; |
251 | data[i++] = rx_hsplit_unsplit_pkt; |
252 | data[i++] = priv->interface_up_cnt; |
253 | data[i++] = priv->interface_down_cnt; |
254 | data[i++] = priv->reset_cnt; |
255 | data[i++] = priv->page_alloc_fail; |
256 | data[i++] = priv->dma_mapping_error; |
257 | data[i++] = priv->stats_report_trigger_cnt; |
258 | i = GVE_MAIN_STATS_LEN; |
259 | |
260 | /* For rx cross-reporting stats, start from nic rx stats in report */ |
261 | base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + |
262 | GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; |
263 | max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + |
264 | base_stats_idx; |
265 | /* Preprocess the stats report for rx, map queue id to start index */ |
266 | skip_nic_stats = false; |
267 | for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; |
268 | stats_idx += NIC_RX_STATS_REPORT_NUM) { |
269 | u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); |
270 | u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); |
271 | |
272 | if (stat_name == 0) { |
273 | /* no stats written by NIC yet */ |
274 | skip_nic_stats = true; |
275 | break; |
276 | } |
277 | rx_qid_to_stats_idx[queue_id] = stats_idx; |
278 | } |
279 | /* walk RX rings */ |
280 | if (priv->rx) { |
281 | for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { |
282 | struct gve_rx_ring *rx = &priv->rx[ring]; |
283 | |
284 | data[i++] = rx->fill_cnt; |
285 | data[i++] = rx->cnt; |
286 | data[i++] = rx->fill_cnt - rx->cnt; |
287 | do { |
288 | start = |
289 | u64_stats_fetch_begin(syncp: &priv->rx[ring].statss); |
290 | tmp_rx_bytes = rx->rbytes; |
291 | tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes; |
292 | tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; |
293 | tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; |
294 | tmp_rx_desc_err_dropped_pkt = |
295 | rx->rx_desc_err_dropped_pkt; |
296 | } while (u64_stats_fetch_retry(syncp: &priv->rx[ring].statss, |
297 | start)); |
298 | data[i++] = tmp_rx_bytes; |
299 | data[i++] = tmp_rx_hsplit_bytes; |
300 | data[i++] = rx->rx_cont_packet_cnt; |
301 | data[i++] = rx->rx_frag_flip_cnt; |
302 | data[i++] = rx->rx_frag_copy_cnt; |
303 | data[i++] = rx->rx_frag_alloc_cnt; |
304 | /* rx dropped packets */ |
305 | data[i++] = tmp_rx_skb_alloc_fail + |
306 | tmp_rx_buf_alloc_fail + |
307 | tmp_rx_desc_err_dropped_pkt; |
308 | data[i++] = rx->rx_copybreak_pkt; |
309 | data[i++] = rx->rx_copied_pkt; |
310 | /* stats from NIC */ |
311 | if (skip_nic_stats) { |
312 | /* skip NIC rx stats */ |
313 | i += NIC_RX_STATS_REPORT_NUM; |
314 | } else { |
315 | stats_idx = rx_qid_to_stats_idx[ring]; |
316 | for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { |
317 | u64 value = |
318 | be64_to_cpu(report_stats[stats_idx + j].value); |
319 | |
320 | data[i++] = value; |
321 | } |
322 | } |
323 | /* XDP rx counters */ |
324 | do { |
325 | start = u64_stats_fetch_begin(syncp: &priv->rx[ring].statss); |
326 | for (j = 0; j < GVE_XDP_ACTIONS; j++) |
327 | data[i + j] = rx->xdp_actions[j]; |
328 | data[i + j++] = rx->xdp_tx_errors; |
329 | data[i + j++] = rx->xdp_redirect_errors; |
330 | data[i + j++] = rx->xdp_alloc_fails; |
331 | } while (u64_stats_fetch_retry(syncp: &priv->rx[ring].statss, |
332 | start)); |
333 | i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */ |
334 | } |
335 | } else { |
336 | i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; |
337 | } |
338 | |
339 | /* For tx cross-reporting stats, start from nic tx stats in report */ |
340 | base_stats_idx = max_stats_idx; |
341 | max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues + |
342 | max_stats_idx; |
343 | /* Preprocess the stats report for tx, map queue id to start index */ |
344 | skip_nic_stats = false; |
345 | for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; |
346 | stats_idx += NIC_TX_STATS_REPORT_NUM) { |
347 | u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); |
348 | u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); |
349 | |
350 | if (stat_name == 0) { |
351 | /* no stats written by NIC yet */ |
352 | skip_nic_stats = true; |
353 | break; |
354 | } |
355 | tx_qid_to_stats_idx[queue_id] = stats_idx; |
356 | } |
357 | /* walk TX rings */ |
358 | if (priv->tx) { |
359 | for (ring = 0; ring < num_tx_queues; ring++) { |
360 | struct gve_tx_ring *tx = &priv->tx[ring]; |
361 | |
362 | if (gve_is_gqi(priv)) { |
363 | data[i++] = tx->req; |
364 | data[i++] = tx->done; |
365 | data[i++] = tx->req - tx->done; |
366 | } else { |
367 | /* DQO doesn't currently support |
368 | * posted/completed descriptor counts; |
369 | */ |
370 | data[i++] = 0; |
371 | data[i++] = 0; |
372 | data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head; |
373 | } |
374 | do { |
375 | start = |
376 | u64_stats_fetch_begin(syncp: &priv->tx[ring].statss); |
377 | tmp_tx_bytes = tx->bytes_done; |
378 | } while (u64_stats_fetch_retry(syncp: &priv->tx[ring].statss, |
379 | start)); |
380 | data[i++] = tmp_tx_bytes; |
381 | data[i++] = tx->wake_queue; |
382 | data[i++] = tx->stop_queue; |
383 | data[i++] = gve_tx_load_event_counter(priv, tx); |
384 | data[i++] = tx->dma_mapping_error; |
385 | /* stats from NIC */ |
386 | if (skip_nic_stats) { |
387 | /* skip NIC tx stats */ |
388 | i += NIC_TX_STATS_REPORT_NUM; |
389 | } else { |
390 | stats_idx = tx_qid_to_stats_idx[ring]; |
391 | for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { |
392 | u64 value = |
393 | be64_to_cpu(report_stats[stats_idx + j].value); |
394 | data[i++] = value; |
395 | } |
396 | } |
397 | /* XDP xsk counters */ |
398 | data[i++] = tx->xdp_xsk_wakeup; |
399 | data[i++] = tx->xdp_xsk_done; |
400 | do { |
401 | start = u64_stats_fetch_begin(syncp: &priv->tx[ring].statss); |
402 | data[i] = tx->xdp_xsk_sent; |
403 | data[i + 1] = tx->xdp_xmit; |
404 | data[i + 2] = tx->xdp_xmit_errors; |
405 | } while (u64_stats_fetch_retry(syncp: &priv->tx[ring].statss, |
406 | start)); |
407 | i += 3; /* XDP tx counters */ |
408 | } |
409 | } else { |
410 | i += num_tx_queues * NUM_GVE_TX_CNTS; |
411 | } |
412 | |
413 | kfree(objp: rx_qid_to_stats_idx); |
414 | kfree(objp: tx_qid_to_stats_idx); |
415 | /* AQ Stats */ |
416 | data[i++] = priv->adminq_prod_cnt; |
417 | data[i++] = priv->adminq_cmd_fail; |
418 | data[i++] = priv->adminq_timeouts; |
419 | data[i++] = priv->adminq_describe_device_cnt; |
420 | data[i++] = priv->adminq_cfg_device_resources_cnt; |
421 | data[i++] = priv->adminq_register_page_list_cnt; |
422 | data[i++] = priv->adminq_unregister_page_list_cnt; |
423 | data[i++] = priv->adminq_create_tx_queue_cnt; |
424 | data[i++] = priv->adminq_create_rx_queue_cnt; |
425 | data[i++] = priv->adminq_destroy_tx_queue_cnt; |
426 | data[i++] = priv->adminq_destroy_rx_queue_cnt; |
427 | data[i++] = priv->adminq_dcfg_device_resources_cnt; |
428 | data[i++] = priv->adminq_set_driver_parameter_cnt; |
429 | data[i++] = priv->adminq_report_stats_cnt; |
430 | data[i++] = priv->adminq_report_link_speed_cnt; |
431 | } |
432 | |
433 | static void gve_get_channels(struct net_device *netdev, |
434 | struct ethtool_channels *cmd) |
435 | { |
436 | struct gve_priv *priv = netdev_priv(dev: netdev); |
437 | |
438 | cmd->max_rx = priv->rx_cfg.max_queues; |
439 | cmd->max_tx = priv->tx_cfg.max_queues; |
440 | cmd->max_other = 0; |
441 | cmd->max_combined = 0; |
442 | cmd->rx_count = priv->rx_cfg.num_queues; |
443 | cmd->tx_count = priv->tx_cfg.num_queues; |
444 | cmd->other_count = 0; |
445 | cmd->combined_count = 0; |
446 | } |
447 | |
448 | static int gve_set_channels(struct net_device *netdev, |
449 | struct ethtool_channels *cmd) |
450 | { |
451 | struct gve_priv *priv = netdev_priv(dev: netdev); |
452 | struct gve_queue_config new_tx_cfg = priv->tx_cfg; |
453 | struct gve_queue_config new_rx_cfg = priv->rx_cfg; |
454 | struct ethtool_channels old_settings; |
455 | int new_tx = cmd->tx_count; |
456 | int new_rx = cmd->rx_count; |
457 | |
458 | gve_get_channels(netdev, cmd: &old_settings); |
459 | |
460 | /* Changing combined is not allowed */ |
461 | if (cmd->combined_count != old_settings.combined_count) |
462 | return -EINVAL; |
463 | |
464 | if (!new_rx || !new_tx) |
465 | return -EINVAL; |
466 | |
467 | if (priv->num_xdp_queues && |
468 | (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) { |
469 | dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues" ); |
470 | return -EINVAL; |
471 | } |
472 | |
473 | if (!netif_carrier_ok(dev: netdev)) { |
474 | priv->tx_cfg.num_queues = new_tx; |
475 | priv->rx_cfg.num_queues = new_rx; |
476 | return 0; |
477 | } |
478 | |
479 | new_tx_cfg.num_queues = new_tx; |
480 | new_rx_cfg.num_queues = new_rx; |
481 | |
482 | return gve_adjust_queues(priv, new_rx_config: new_rx_cfg, new_tx_config: new_tx_cfg); |
483 | } |
484 | |
485 | static void gve_get_ringparam(struct net_device *netdev, |
486 | struct ethtool_ringparam *cmd, |
487 | struct kernel_ethtool_ringparam *kernel_cmd, |
488 | struct netlink_ext_ack *extack) |
489 | { |
490 | struct gve_priv *priv = netdev_priv(dev: netdev); |
491 | |
492 | cmd->rx_max_pending = priv->rx_desc_cnt; |
493 | cmd->tx_max_pending = priv->tx_desc_cnt; |
494 | cmd->rx_pending = priv->rx_desc_cnt; |
495 | cmd->tx_pending = priv->tx_desc_cnt; |
496 | |
497 | if (!gve_header_split_supported(priv)) |
498 | kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; |
499 | else if (priv->header_split_enabled) |
500 | kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; |
501 | else |
502 | kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; |
503 | } |
504 | |
505 | static int gve_set_ringparam(struct net_device *netdev, |
506 | struct ethtool_ringparam *cmd, |
507 | struct kernel_ethtool_ringparam *kernel_cmd, |
508 | struct netlink_ext_ack *extack) |
509 | { |
510 | struct gve_priv *priv = netdev_priv(dev: netdev); |
511 | |
512 | if (priv->tx_desc_cnt != cmd->tx_pending || |
513 | priv->rx_desc_cnt != cmd->rx_pending) { |
514 | dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n" ); |
515 | return -EOPNOTSUPP; |
516 | } |
517 | |
518 | return gve_set_hsplit_config(priv, tcp_data_split: kernel_cmd->tcp_data_split); |
519 | } |
520 | |
521 | static int gve_user_reset(struct net_device *netdev, u32 *flags) |
522 | { |
523 | struct gve_priv *priv = netdev_priv(dev: netdev); |
524 | |
525 | if (*flags == ETH_RESET_ALL) { |
526 | *flags = 0; |
527 | return gve_reset(priv, attempt_teardown: true); |
528 | } |
529 | |
530 | return -EOPNOTSUPP; |
531 | } |
532 | |
533 | static int gve_get_tunable(struct net_device *netdev, |
534 | const struct ethtool_tunable *etuna, void *value) |
535 | { |
536 | struct gve_priv *priv = netdev_priv(dev: netdev); |
537 | |
538 | switch (etuna->id) { |
539 | case ETHTOOL_RX_COPYBREAK: |
540 | *(u32 *)value = priv->rx_copybreak; |
541 | return 0; |
542 | default: |
543 | return -EOPNOTSUPP; |
544 | } |
545 | } |
546 | |
547 | static int gve_set_tunable(struct net_device *netdev, |
548 | const struct ethtool_tunable *etuna, |
549 | const void *value) |
550 | { |
551 | struct gve_priv *priv = netdev_priv(dev: netdev); |
552 | u32 len; |
553 | |
554 | switch (etuna->id) { |
555 | case ETHTOOL_RX_COPYBREAK: |
556 | { |
557 | u32 max_copybreak = gve_is_gqi(priv) ? |
558 | GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo; |
559 | |
560 | len = *(u32 *)value; |
561 | if (len > max_copybreak) |
562 | return -EINVAL; |
563 | priv->rx_copybreak = len; |
564 | return 0; |
565 | } |
566 | default: |
567 | return -EOPNOTSUPP; |
568 | } |
569 | } |
570 | |
571 | static u32 gve_get_priv_flags(struct net_device *netdev) |
572 | { |
573 | struct gve_priv *priv = netdev_priv(dev: netdev); |
574 | u32 ret_flags = 0; |
575 | |
576 | /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */ |
577 | if (priv->ethtool_flags & BIT(0)) |
578 | ret_flags |= BIT(0); |
579 | return ret_flags; |
580 | } |
581 | |
582 | static int gve_set_priv_flags(struct net_device *netdev, u32 flags) |
583 | { |
584 | struct gve_priv *priv = netdev_priv(dev: netdev); |
585 | u64 ori_flags, new_flags; |
586 | int num_tx_queues; |
587 | |
588 | num_tx_queues = gve_num_tx_queues(priv); |
589 | ori_flags = READ_ONCE(priv->ethtool_flags); |
590 | new_flags = ori_flags; |
591 | |
592 | /* Only one priv flag exists: report-stats (BIT(0))*/ |
593 | if (flags & BIT(0)) |
594 | new_flags |= BIT(0); |
595 | else |
596 | new_flags &= ~(BIT(0)); |
597 | priv->ethtool_flags = new_flags; |
598 | /* start report-stats timer when user turns report stats on. */ |
599 | if (flags & BIT(0)) { |
600 | mod_timer(timer: &priv->stats_report_timer, |
601 | expires: round_jiffies(j: jiffies + |
602 | msecs_to_jiffies(m: priv->stats_report_timer_period))); |
603 | } |
604 | /* Zero off gve stats when report-stats turned off and */ |
605 | /* delete report stats timer. */ |
606 | if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { |
607 | int tx_stats_num = GVE_TX_STATS_REPORT_NUM * |
608 | num_tx_queues; |
609 | int rx_stats_num = GVE_RX_STATS_REPORT_NUM * |
610 | priv->rx_cfg.num_queues; |
611 | |
612 | memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) * |
613 | sizeof(struct stats)); |
614 | del_timer_sync(timer: &priv->stats_report_timer); |
615 | } |
616 | return 0; |
617 | } |
618 | |
619 | static int gve_get_link_ksettings(struct net_device *netdev, |
620 | struct ethtool_link_ksettings *cmd) |
621 | { |
622 | struct gve_priv *priv = netdev_priv(dev: netdev); |
623 | int err = 0; |
624 | |
625 | if (priv->link_speed == 0) |
626 | err = gve_adminq_report_link_speed(priv); |
627 | |
628 | cmd->base.speed = priv->link_speed; |
629 | |
630 | cmd->base.duplex = DUPLEX_FULL; |
631 | |
632 | return err; |
633 | } |
634 | |
635 | static int gve_get_coalesce(struct net_device *netdev, |
636 | struct ethtool_coalesce *ec, |
637 | struct kernel_ethtool_coalesce *kernel_ec, |
638 | struct netlink_ext_ack *extack) |
639 | { |
640 | struct gve_priv *priv = netdev_priv(dev: netdev); |
641 | |
642 | if (gve_is_gqi(priv)) |
643 | return -EOPNOTSUPP; |
644 | ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; |
645 | ec->rx_coalesce_usecs = priv->rx_coalesce_usecs; |
646 | |
647 | return 0; |
648 | } |
649 | |
650 | static int gve_set_coalesce(struct net_device *netdev, |
651 | struct ethtool_coalesce *ec, |
652 | struct kernel_ethtool_coalesce *kernel_ec, |
653 | struct netlink_ext_ack *extack) |
654 | { |
655 | struct gve_priv *priv = netdev_priv(dev: netdev); |
656 | u32 tx_usecs_orig = priv->tx_coalesce_usecs; |
657 | u32 rx_usecs_orig = priv->rx_coalesce_usecs; |
658 | int idx; |
659 | |
660 | if (gve_is_gqi(priv)) |
661 | return -EOPNOTSUPP; |
662 | |
663 | if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO || |
664 | ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO) |
665 | return -EINVAL; |
666 | priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; |
667 | priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; |
668 | |
669 | if (tx_usecs_orig != priv->tx_coalesce_usecs) { |
670 | for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { |
671 | int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx: idx); |
672 | struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
673 | |
674 | gve_set_itr_coalesce_usecs_dqo(priv, block, |
675 | usecs: priv->tx_coalesce_usecs); |
676 | } |
677 | } |
678 | |
679 | if (rx_usecs_orig != priv->rx_coalesce_usecs) { |
680 | for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { |
681 | int ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx: idx); |
682 | struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
683 | |
684 | gve_set_itr_coalesce_usecs_dqo(priv, block, |
685 | usecs: priv->rx_coalesce_usecs); |
686 | } |
687 | } |
688 | |
689 | return 0; |
690 | } |
691 | |
692 | const struct ethtool_ops gve_ethtool_ops = { |
693 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS, |
694 | .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, |
695 | .get_drvinfo = gve_get_drvinfo, |
696 | .get_strings = gve_get_strings, |
697 | .get_sset_count = gve_get_sset_count, |
698 | .get_ethtool_stats = gve_get_ethtool_stats, |
699 | .set_msglevel = gve_set_msglevel, |
700 | .get_msglevel = gve_get_msglevel, |
701 | .set_channels = gve_set_channels, |
702 | .get_channels = gve_get_channels, |
703 | .get_link = ethtool_op_get_link, |
704 | .get_coalesce = gve_get_coalesce, |
705 | .set_coalesce = gve_set_coalesce, |
706 | .get_ringparam = gve_get_ringparam, |
707 | .set_ringparam = gve_set_ringparam, |
708 | .reset = gve_user_reset, |
709 | .get_tunable = gve_get_tunable, |
710 | .set_tunable = gve_set_tunable, |
711 | .get_priv_flags = gve_get_priv_flags, |
712 | .set_priv_flags = gve_set_priv_flags, |
713 | .get_link_ksettings = gve_get_link_ksettings |
714 | }; |
715 | |