1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7#include <linux/etherdevice.h>
8#include <linux/pci.h>
9#include "gve.h"
10#include "gve_adminq.h"
11#include "gve_register.h"
12
13#define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14#define GVE_ADMINQ_SLEEP_LEN 20
15#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
16
17#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
18"Expected: length=%d, feature_mask=%x.\n" \
19"Actual: length=%d, feature_mask=%x.\n"
20
21#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
22
23static
24struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
25 struct gve_device_option *option)
26{
27 void *option_end, *descriptor_end;
28
29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
31
32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
33}
34
35static
36void gve_parse_device_option(struct gve_priv *priv,
37 struct gve_device_descriptor *device_descriptor,
38 struct gve_device_option *option,
39 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
40 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
41 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
42 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
43 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
44 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
45{
46 u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
47 u16 option_length = be16_to_cpu(option->option_length);
48 u16 option_id = be16_to_cpu(option->option_id);
49
50 /* If the length or feature mask doesn't match, continue without
51 * enabling the feature.
52 */
53 switch (option_id) {
54 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
55 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
56 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
57 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
58 "Raw Addressing",
59 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
60 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
61 option_length, req_feat_mask);
62 break;
63 }
64
65 dev_info(&priv->pdev->dev,
66 "Gqi raw addressing device option enabled.\n");
67 priv->queue_format = GVE_GQI_RDA_FORMAT;
68 break;
69 case GVE_DEV_OPT_ID_GQI_RDA:
70 if (option_length < sizeof(**dev_op_gqi_rda) ||
71 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
72 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
73 "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
74 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
75 option_length, req_feat_mask);
76 break;
77 }
78
79 if (option_length > sizeof(**dev_op_gqi_rda)) {
80 dev_warn(&priv->pdev->dev,
81 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
82 }
83 *dev_op_gqi_rda = (void *)(option + 1);
84 break;
85 case GVE_DEV_OPT_ID_GQI_QPL:
86 if (option_length < sizeof(**dev_op_gqi_qpl) ||
87 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
88 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
89 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
90 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
91 option_length, req_feat_mask);
92 break;
93 }
94
95 if (option_length > sizeof(**dev_op_gqi_qpl)) {
96 dev_warn(&priv->pdev->dev,
97 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
98 }
99 *dev_op_gqi_qpl = (void *)(option + 1);
100 break;
101 case GVE_DEV_OPT_ID_DQO_RDA:
102 if (option_length < sizeof(**dev_op_dqo_rda) ||
103 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
104 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
105 "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
106 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
107 option_length, req_feat_mask);
108 break;
109 }
110
111 if (option_length > sizeof(**dev_op_dqo_rda)) {
112 dev_warn(&priv->pdev->dev,
113 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
114 }
115 *dev_op_dqo_rda = (void *)(option + 1);
116 break;
117 case GVE_DEV_OPT_ID_DQO_QPL:
118 if (option_length < sizeof(**dev_op_dqo_qpl) ||
119 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
120 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
121 "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
122 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
123 option_length, req_feat_mask);
124 break;
125 }
126
127 if (option_length > sizeof(**dev_op_dqo_qpl)) {
128 dev_warn(&priv->pdev->dev,
129 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL");
130 }
131 *dev_op_dqo_qpl = (void *)(option + 1);
132 break;
133 case GVE_DEV_OPT_ID_JUMBO_FRAMES:
134 if (option_length < sizeof(**dev_op_jumbo_frames) ||
135 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
136 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
137 "Jumbo Frames",
138 (int)sizeof(**dev_op_jumbo_frames),
139 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
140 option_length, req_feat_mask);
141 break;
142 }
143
144 if (option_length > sizeof(**dev_op_jumbo_frames)) {
145 dev_warn(&priv->pdev->dev,
146 GVE_DEVICE_OPTION_TOO_BIG_FMT,
147 "Jumbo Frames");
148 }
149 *dev_op_jumbo_frames = (void *)(option + 1);
150 break;
151 case GVE_DEV_OPT_ID_BUFFER_SIZES:
152 if (option_length < sizeof(**dev_op_buffer_sizes) ||
153 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
154 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
155 "Buffer Sizes",
156 (int)sizeof(**dev_op_buffer_sizes),
157 GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
158 option_length, req_feat_mask);
159 break;
160 }
161
162 if (option_length > sizeof(**dev_op_buffer_sizes))
163 dev_warn(&priv->pdev->dev,
164 GVE_DEVICE_OPTION_TOO_BIG_FMT,
165 "Buffer Sizes");
166 *dev_op_buffer_sizes = (void *)(option + 1);
167 break;
168 default:
169 /* If we don't recognize the option just continue
170 * without doing anything.
171 */
172 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
173 option_id);
174 }
175}
176
177/* Process all device options for a given describe device call. */
178static int
179gve_process_device_options(struct gve_priv *priv,
180 struct gve_device_descriptor *descriptor,
181 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
182 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
183 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
184 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
185 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
186 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
187{
188 const int num_options = be16_to_cpu(descriptor->num_device_options);
189 struct gve_device_option *dev_opt;
190 int i;
191
192 /* The options struct directly follows the device descriptor. */
193 dev_opt = (void *)(descriptor + 1);
194 for (i = 0; i < num_options; i++) {
195 struct gve_device_option *next_opt;
196
197 next_opt = gve_get_next_option(descriptor, option: dev_opt);
198 if (!next_opt) {
199 dev_err(&priv->dev->dev,
200 "options exceed device_descriptor's total length.\n");
201 return -EINVAL;
202 }
203
204 gve_parse_device_option(priv, device_descriptor: descriptor, option: dev_opt,
205 dev_op_gqi_rda, dev_op_gqi_qpl,
206 dev_op_dqo_rda, dev_op_jumbo_frames,
207 dev_op_dqo_qpl, dev_op_buffer_sizes);
208 dev_opt = next_opt;
209 }
210
211 return 0;
212}
213
214int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
215{
216 priv->adminq_pool = dma_pool_create(name: "adminq_pool", dev,
217 GVE_ADMINQ_BUFFER_SIZE, align: 0, allocation: 0);
218 if (unlikely(!priv->adminq_pool))
219 return -ENOMEM;
220 priv->adminq = dma_pool_alloc(pool: priv->adminq_pool, GFP_KERNEL,
221 handle: &priv->adminq_bus_addr);
222 if (unlikely(!priv->adminq)) {
223 dma_pool_destroy(pool: priv->adminq_pool);
224 return -ENOMEM;
225 }
226
227 priv->adminq_mask =
228 (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1;
229 priv->adminq_prod_cnt = 0;
230 priv->adminq_cmd_fail = 0;
231 priv->adminq_timeouts = 0;
232 priv->adminq_describe_device_cnt = 0;
233 priv->adminq_cfg_device_resources_cnt = 0;
234 priv->adminq_register_page_list_cnt = 0;
235 priv->adminq_unregister_page_list_cnt = 0;
236 priv->adminq_create_tx_queue_cnt = 0;
237 priv->adminq_create_rx_queue_cnt = 0;
238 priv->adminq_destroy_tx_queue_cnt = 0;
239 priv->adminq_destroy_rx_queue_cnt = 0;
240 priv->adminq_dcfg_device_resources_cnt = 0;
241 priv->adminq_set_driver_parameter_cnt = 0;
242 priv->adminq_report_stats_cnt = 0;
243 priv->adminq_report_link_speed_cnt = 0;
244 priv->adminq_get_ptype_map_cnt = 0;
245
246 /* Setup Admin queue with the device */
247 if (priv->pdev->revision < 0x1) {
248 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
249 &priv->reg_bar0->adminq_pfn);
250 } else {
251 iowrite16be(GVE_ADMINQ_BUFFER_SIZE,
252 &priv->reg_bar0->adminq_length);
253#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
254 iowrite32be(priv->adminq_bus_addr >> 32,
255 &priv->reg_bar0->adminq_base_address_hi);
256#endif
257 iowrite32be(priv->adminq_bus_addr,
258 &priv->reg_bar0->adminq_base_address_lo);
259 iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
260 }
261 gve_set_admin_queue_ok(priv);
262 return 0;
263}
264
265void gve_adminq_release(struct gve_priv *priv)
266{
267 int i = 0;
268
269 /* Tell the device the adminq is leaving */
270 if (priv->pdev->revision < 0x1) {
271 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
272 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
273 /* If this is reached the device is unrecoverable and still
274 * holding memory. Continue looping to avoid memory corruption,
275 * but WARN so it is visible what is going on.
276 */
277 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
278 WARN(1, "Unrecoverable platform error!");
279 i++;
280 msleep(GVE_ADMINQ_SLEEP_LEN);
281 }
282 } else {
283 iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status);
284 while (!(ioread32be(&priv->reg_bar0->device_status)
285 & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) {
286 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
287 WARN(1, "Unrecoverable platform error!");
288 i++;
289 msleep(GVE_ADMINQ_SLEEP_LEN);
290 }
291 }
292 gve_clear_device_rings_ok(priv);
293 gve_clear_device_resources_ok(priv);
294 gve_clear_admin_queue_ok(priv);
295}
296
297void gve_adminq_free(struct device *dev, struct gve_priv *priv)
298{
299 if (!gve_get_admin_queue_ok(priv))
300 return;
301 gve_adminq_release(priv);
302 dma_pool_free(pool: priv->adminq_pool, vaddr: priv->adminq, addr: priv->adminq_bus_addr);
303 dma_pool_destroy(pool: priv->adminq_pool);
304 gve_clear_admin_queue_ok(priv);
305}
306
307static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
308{
309 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
310}
311
312static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
313{
314 int i;
315
316 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
317 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
318 == prod_cnt)
319 return true;
320 msleep(GVE_ADMINQ_SLEEP_LEN);
321 }
322
323 return false;
324}
325
326static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
327{
328 if (status != GVE_ADMINQ_COMMAND_PASSED &&
329 status != GVE_ADMINQ_COMMAND_UNSET) {
330 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
331 priv->adminq_cmd_fail++;
332 }
333 switch (status) {
334 case GVE_ADMINQ_COMMAND_PASSED:
335 return 0;
336 case GVE_ADMINQ_COMMAND_UNSET:
337 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
338 return -EINVAL;
339 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
340 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
341 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
342 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
343 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
344 return -EAGAIN;
345 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
346 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
347 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
348 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
349 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
350 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
351 return -EINVAL;
352 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
353 return -ETIME;
354 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
355 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
356 return -EACCES;
357 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
358 return -ENOMEM;
359 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
360 return -EOPNOTSUPP;
361 default:
362 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
363 return -EINVAL;
364 }
365}
366
367/* Flushes all AQ commands currently queued and waits for them to complete.
368 * If there are failures, it will return the first error.
369 */
370static int gve_adminq_kick_and_wait(struct gve_priv *priv)
371{
372 int tail, head;
373 int i;
374
375 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
376 head = priv->adminq_prod_cnt;
377
378 gve_adminq_kick_cmd(priv, prod_cnt: head);
379 if (!gve_adminq_wait_for_cmd(priv, prod_cnt: head)) {
380 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
381 priv->adminq_timeouts++;
382 return -ENOTRECOVERABLE;
383 }
384
385 for (i = tail; i < head; i++) {
386 union gve_adminq_command *cmd;
387 u32 status, err;
388
389 cmd = &priv->adminq[i & priv->adminq_mask];
390 status = be32_to_cpu(READ_ONCE(cmd->status));
391 err = gve_adminq_parse_err(priv, status);
392 if (err)
393 // Return the first error if we failed.
394 return err;
395 }
396
397 return 0;
398}
399
400/* This function is not threadsafe - the caller is responsible for any
401 * necessary locks.
402 */
403static int gve_adminq_issue_cmd(struct gve_priv *priv,
404 union gve_adminq_command *cmd_orig)
405{
406 union gve_adminq_command *cmd;
407 u32 opcode;
408 u32 tail;
409
410 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
411
412 // Check if next command will overflow the buffer.
413 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
414 (tail & priv->adminq_mask)) {
415 int err;
416
417 // Flush existing commands to make room.
418 err = gve_adminq_kick_and_wait(priv);
419 if (err)
420 return err;
421
422 // Retry.
423 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
424 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
425 (tail & priv->adminq_mask)) {
426 // This should never happen. We just flushed the
427 // command queue so there should be enough space.
428 return -ENOMEM;
429 }
430 }
431
432 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
433 priv->adminq_prod_cnt++;
434
435 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
436 opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
437
438 switch (opcode) {
439 case GVE_ADMINQ_DESCRIBE_DEVICE:
440 priv->adminq_describe_device_cnt++;
441 break;
442 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
443 priv->adminq_cfg_device_resources_cnt++;
444 break;
445 case GVE_ADMINQ_REGISTER_PAGE_LIST:
446 priv->adminq_register_page_list_cnt++;
447 break;
448 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
449 priv->adminq_unregister_page_list_cnt++;
450 break;
451 case GVE_ADMINQ_CREATE_TX_QUEUE:
452 priv->adminq_create_tx_queue_cnt++;
453 break;
454 case GVE_ADMINQ_CREATE_RX_QUEUE:
455 priv->adminq_create_rx_queue_cnt++;
456 break;
457 case GVE_ADMINQ_DESTROY_TX_QUEUE:
458 priv->adminq_destroy_tx_queue_cnt++;
459 break;
460 case GVE_ADMINQ_DESTROY_RX_QUEUE:
461 priv->adminq_destroy_rx_queue_cnt++;
462 break;
463 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
464 priv->adminq_dcfg_device_resources_cnt++;
465 break;
466 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
467 priv->adminq_set_driver_parameter_cnt++;
468 break;
469 case GVE_ADMINQ_REPORT_STATS:
470 priv->adminq_report_stats_cnt++;
471 break;
472 case GVE_ADMINQ_REPORT_LINK_SPEED:
473 priv->adminq_report_link_speed_cnt++;
474 break;
475 case GVE_ADMINQ_GET_PTYPE_MAP:
476 priv->adminq_get_ptype_map_cnt++;
477 break;
478 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
479 priv->adminq_verify_driver_compatibility_cnt++;
480 break;
481 default:
482 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
483 }
484
485 return 0;
486}
487
488/* This function is not threadsafe - the caller is responsible for any
489 * necessary locks.
490 * The caller is also responsible for making sure there are no commands
491 * waiting to be executed.
492 */
493static int gve_adminq_execute_cmd(struct gve_priv *priv,
494 union gve_adminq_command *cmd_orig)
495{
496 u32 tail, head;
497 int err;
498
499 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
500 head = priv->adminq_prod_cnt;
501 if (tail != head)
502 // This is not a valid path
503 return -EINVAL;
504
505 err = gve_adminq_issue_cmd(priv, cmd_orig);
506 if (err)
507 return err;
508
509 return gve_adminq_kick_and_wait(priv);
510}
511
512/* The device specifies that the management vector can either be the first irq
513 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
514 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
515 * the management vector is first.
516 *
517 * gve arranges the msix vectors so that the management vector is last.
518 */
519#define GVE_NTFY_BLK_BASE_MSIX_IDX 0
520int gve_adminq_configure_device_resources(struct gve_priv *priv,
521 dma_addr_t counter_array_bus_addr,
522 u32 num_counters,
523 dma_addr_t db_array_bus_addr,
524 u32 num_ntfy_blks)
525{
526 union gve_adminq_command cmd;
527
528 memset(&cmd, 0, sizeof(cmd));
529 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
530 cmd.configure_device_resources =
531 (struct gve_adminq_configure_device_resources) {
532 .counter_array = cpu_to_be64(counter_array_bus_addr),
533 .num_counters = cpu_to_be32(num_counters),
534 .irq_db_addr = cpu_to_be64(db_array_bus_addr),
535 .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
536 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
537 .ntfy_blk_msix_base_idx =
538 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
539 .queue_format = priv->queue_format,
540 };
541
542 return gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
543}
544
545int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
546{
547 union gve_adminq_command cmd;
548
549 memset(&cmd, 0, sizeof(cmd));
550 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
551
552 return gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
553}
554
555static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
556{
557 struct gve_tx_ring *tx = &priv->tx[queue_index];
558 union gve_adminq_command cmd;
559
560 memset(&cmd, 0, sizeof(cmd));
561 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
562 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
563 .queue_id = cpu_to_be32(queue_index),
564 .queue_resources_addr =
565 cpu_to_be64(tx->q_resources_bus),
566 .tx_ring_addr = cpu_to_be64(tx->bus),
567 .ntfy_id = cpu_to_be32(tx->ntfy_id),
568 };
569
570 if (gve_is_gqi(priv)) {
571 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
572 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
573
574 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
575 } else {
576 u16 comp_ring_size;
577 u32 qpl_id = 0;
578
579 if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
580 qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
581 comp_ring_size =
582 priv->options_dqo_rda.tx_comp_ring_entries;
583 } else {
584 qpl_id = tx->dqo.qpl->id;
585 comp_ring_size = priv->tx_desc_cnt;
586 }
587 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
588 cmd.create_tx_queue.tx_ring_size =
589 cpu_to_be16(priv->tx_desc_cnt);
590 cmd.create_tx_queue.tx_comp_ring_addr =
591 cpu_to_be64(tx->complq_bus_dqo);
592 cmd.create_tx_queue.tx_comp_ring_size =
593 cpu_to_be16(comp_ring_size);
594 }
595
596 return gve_adminq_issue_cmd(priv, cmd_orig: &cmd);
597}
598
599int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
600{
601 int err;
602 int i;
603
604 for (i = start_id; i < start_id + num_queues; i++) {
605 err = gve_adminq_create_tx_queue(priv, queue_index: i);
606 if (err)
607 return err;
608 }
609
610 return gve_adminq_kick_and_wait(priv);
611}
612
613static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
614{
615 struct gve_rx_ring *rx = &priv->rx[queue_index];
616 union gve_adminq_command cmd;
617
618 memset(&cmd, 0, sizeof(cmd));
619 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
620 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
621 .queue_id = cpu_to_be32(queue_index),
622 .ntfy_id = cpu_to_be32(rx->ntfy_id),
623 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
624 };
625
626 if (gve_is_gqi(priv)) {
627 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
628 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
629
630 cmd.create_rx_queue.rx_desc_ring_addr =
631 cpu_to_be64(rx->desc.bus),
632 cmd.create_rx_queue.rx_data_ring_addr =
633 cpu_to_be64(rx->data.data_bus),
634 cmd.create_rx_queue.index = cpu_to_be32(queue_index);
635 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
636 cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
637 } else {
638 u16 rx_buff_ring_entries;
639 u32 qpl_id = 0;
640
641 if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
642 qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
643 rx_buff_ring_entries =
644 priv->options_dqo_rda.rx_buff_ring_entries;
645 } else {
646 qpl_id = rx->dqo.qpl->id;
647 rx_buff_ring_entries = priv->rx_desc_cnt;
648 }
649 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
650 cmd.create_rx_queue.rx_ring_size =
651 cpu_to_be16(priv->rx_desc_cnt);
652 cmd.create_rx_queue.rx_desc_ring_addr =
653 cpu_to_be64(rx->dqo.complq.bus);
654 cmd.create_rx_queue.rx_data_ring_addr =
655 cpu_to_be64(rx->dqo.bufq.bus);
656 cmd.create_rx_queue.packet_buffer_size =
657 cpu_to_be16(priv->data_buffer_size_dqo);
658 cmd.create_rx_queue.rx_buff_ring_size =
659 cpu_to_be16(rx_buff_ring_entries);
660 cmd.create_rx_queue.enable_rsc =
661 !!(priv->dev->features & NETIF_F_LRO);
662 if (priv->header_split_enabled)
663 cmd.create_rx_queue.header_buffer_size =
664 cpu_to_be16(priv->header_buf_size);
665 }
666
667 return gve_adminq_issue_cmd(priv, cmd_orig: &cmd);
668}
669
670int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
671{
672 int err;
673 int i;
674
675 for (i = 0; i < num_queues; i++) {
676 err = gve_adminq_create_rx_queue(priv, queue_index: i);
677 if (err)
678 return err;
679 }
680
681 return gve_adminq_kick_and_wait(priv);
682}
683
684static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
685{
686 union gve_adminq_command cmd;
687 int err;
688
689 memset(&cmd, 0, sizeof(cmd));
690 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
691 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
692 .queue_id = cpu_to_be32(queue_index),
693 };
694
695 err = gve_adminq_issue_cmd(priv, cmd_orig: &cmd);
696 if (err)
697 return err;
698
699 return 0;
700}
701
702int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
703{
704 int err;
705 int i;
706
707 for (i = start_id; i < start_id + num_queues; i++) {
708 err = gve_adminq_destroy_tx_queue(priv, queue_index: i);
709 if (err)
710 return err;
711 }
712
713 return gve_adminq_kick_and_wait(priv);
714}
715
716static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
717{
718 union gve_adminq_command cmd;
719 int err;
720
721 memset(&cmd, 0, sizeof(cmd));
722 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
723 cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
724 .queue_id = cpu_to_be32(queue_index),
725 };
726
727 err = gve_adminq_issue_cmd(priv, cmd_orig: &cmd);
728 if (err)
729 return err;
730
731 return 0;
732}
733
734int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
735{
736 int err;
737 int i;
738
739 for (i = 0; i < num_queues; i++) {
740 err = gve_adminq_destroy_rx_queue(priv, queue_index: i);
741 if (err)
742 return err;
743 }
744
745 return gve_adminq_kick_and_wait(priv);
746}
747
748static int gve_set_desc_cnt(struct gve_priv *priv,
749 struct gve_device_descriptor *descriptor)
750{
751 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
752 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
753 return 0;
754}
755
756static int
757gve_set_desc_cnt_dqo(struct gve_priv *priv,
758 const struct gve_device_descriptor *descriptor,
759 const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
760{
761 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
762 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
763
764 if (priv->queue_format == GVE_DQO_QPL_FORMAT)
765 return 0;
766
767 priv->options_dqo_rda.tx_comp_ring_entries =
768 be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
769 priv->options_dqo_rda.rx_buff_ring_entries =
770 be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
771
772 return 0;
773}
774
775static void gve_enable_supported_features(struct gve_priv *priv,
776 u32 supported_features_mask,
777 const struct gve_device_option_jumbo_frames
778 *dev_op_jumbo_frames,
779 const struct gve_device_option_dqo_qpl
780 *dev_op_dqo_qpl,
781 const struct gve_device_option_buffer_sizes
782 *dev_op_buffer_sizes)
783{
784 /* Before control reaches this point, the page-size-capped max MTU from
785 * the gve_device_descriptor field has already been stored in
786 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
787 */
788 if (dev_op_jumbo_frames &&
789 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
790 dev_info(&priv->pdev->dev,
791 "JUMBO FRAMES device option enabled.\n");
792 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
793 }
794
795 /* Override pages for qpl for DQO-QPL */
796 if (dev_op_dqo_qpl) {
797 priv->tx_pages_per_qpl =
798 be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
799 priv->rx_pages_per_qpl =
800 be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
801 if (priv->tx_pages_per_qpl == 0)
802 priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
803 if (priv->rx_pages_per_qpl == 0)
804 priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
805 }
806
807 if (dev_op_buffer_sizes &&
808 (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
809 priv->max_rx_buffer_size =
810 be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
811 priv->header_buf_size =
812 be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
813 dev_info(&priv->pdev->dev,
814 "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
815 priv->max_rx_buffer_size, priv->header_buf_size);
816 }
817}
818
819int gve_adminq_describe_device(struct gve_priv *priv)
820{
821 struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
822 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
823 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
824 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
825 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
826 struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
827 struct gve_device_descriptor *descriptor;
828 u32 supported_features_mask = 0;
829 union gve_adminq_command cmd;
830 dma_addr_t descriptor_bus;
831 int err = 0;
832 u8 *mac;
833 u16 mtu;
834
835 memset(&cmd, 0, sizeof(cmd));
836 descriptor = dma_pool_alloc(pool: priv->adminq_pool, GFP_KERNEL,
837 handle: &descriptor_bus);
838 if (!descriptor)
839 return -ENOMEM;
840 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
841 cmd.describe_device.device_descriptor_addr =
842 cpu_to_be64(descriptor_bus);
843 cmd.describe_device.device_descriptor_version =
844 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
845 cmd.describe_device.available_length =
846 cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE);
847
848 err = gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
849 if (err)
850 goto free_device_descriptor;
851
852 err = gve_process_device_options(priv, descriptor, dev_op_gqi_rda: &dev_op_gqi_rda,
853 dev_op_gqi_qpl: &dev_op_gqi_qpl, dev_op_dqo_rda: &dev_op_dqo_rda,
854 dev_op_jumbo_frames: &dev_op_jumbo_frames,
855 dev_op_dqo_qpl: &dev_op_dqo_qpl,
856 dev_op_buffer_sizes: &dev_op_buffer_sizes);
857 if (err)
858 goto free_device_descriptor;
859
860 /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
861 * is not set to GqiRda, choose the queue format in a priority order:
862 * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default.
863 */
864 if (dev_op_dqo_rda) {
865 priv->queue_format = GVE_DQO_RDA_FORMAT;
866 dev_info(&priv->pdev->dev,
867 "Driver is running with DQO RDA queue format.\n");
868 supported_features_mask =
869 be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
870 } else if (dev_op_dqo_qpl) {
871 priv->queue_format = GVE_DQO_QPL_FORMAT;
872 supported_features_mask =
873 be32_to_cpu(dev_op_dqo_qpl->supported_features_mask);
874 } else if (dev_op_gqi_rda) {
875 priv->queue_format = GVE_GQI_RDA_FORMAT;
876 dev_info(&priv->pdev->dev,
877 "Driver is running with GQI RDA queue format.\n");
878 supported_features_mask =
879 be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
880 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
881 dev_info(&priv->pdev->dev,
882 "Driver is running with GQI RDA queue format.\n");
883 } else {
884 priv->queue_format = GVE_GQI_QPL_FORMAT;
885 if (dev_op_gqi_qpl)
886 supported_features_mask =
887 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
888 dev_info(&priv->pdev->dev,
889 "Driver is running with GQI QPL queue format.\n");
890 }
891 if (gve_is_gqi(priv)) {
892 err = gve_set_desc_cnt(priv, descriptor);
893 } else {
894 /* DQO supports LRO. */
895 priv->dev->hw_features |= NETIF_F_LRO;
896 err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
897 }
898 if (err)
899 goto free_device_descriptor;
900
901 priv->max_registered_pages =
902 be64_to_cpu(descriptor->max_registered_pages);
903 mtu = be16_to_cpu(descriptor->mtu);
904 if (mtu < ETH_MIN_MTU) {
905 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
906 err = -EINVAL;
907 goto free_device_descriptor;
908 }
909 priv->dev->max_mtu = mtu;
910 priv->num_event_counters = be16_to_cpu(descriptor->counters);
911 eth_hw_addr_set(dev: priv->dev, addr: descriptor->mac);
912 mac = descriptor->mac;
913 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
914 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
915 priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
916
917 if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
918 dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
919 priv->rx_data_slot_cnt);
920 priv->rx_desc_cnt = priv->rx_data_slot_cnt;
921 }
922 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
923
924 gve_enable_supported_features(priv, supported_features_mask,
925 dev_op_jumbo_frames, dev_op_dqo_qpl,
926 dev_op_buffer_sizes);
927
928free_device_descriptor:
929 dma_pool_free(pool: priv->adminq_pool, vaddr: descriptor, addr: descriptor_bus);
930 return err;
931}
932
933int gve_adminq_register_page_list(struct gve_priv *priv,
934 struct gve_queue_page_list *qpl)
935{
936 struct device *hdev = &priv->pdev->dev;
937 u32 num_entries = qpl->num_entries;
938 u32 size = num_entries * sizeof(qpl->page_buses[0]);
939 union gve_adminq_command cmd;
940 dma_addr_t page_list_bus;
941 __be64 *page_list;
942 int err;
943 int i;
944
945 memset(&cmd, 0, sizeof(cmd));
946 page_list = dma_alloc_coherent(dev: hdev, size, dma_handle: &page_list_bus, GFP_KERNEL);
947 if (!page_list)
948 return -ENOMEM;
949
950 for (i = 0; i < num_entries; i++)
951 page_list[i] = cpu_to_be64(qpl->page_buses[i]);
952
953 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
954 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
955 .page_list_id = cpu_to_be32(qpl->id),
956 .num_pages = cpu_to_be32(num_entries),
957 .page_address_list_addr = cpu_to_be64(page_list_bus),
958 .page_size = cpu_to_be64(PAGE_SIZE),
959 };
960
961 err = gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
962 dma_free_coherent(dev: hdev, size, cpu_addr: page_list, dma_handle: page_list_bus);
963 return err;
964}
965
966int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
967{
968 union gve_adminq_command cmd;
969
970 memset(&cmd, 0, sizeof(cmd));
971 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
972 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
973 .page_list_id = cpu_to_be32(page_list_id),
974 };
975
976 return gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
977}
978
979int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
980{
981 union gve_adminq_command cmd;
982
983 memset(&cmd, 0, sizeof(cmd));
984 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
985 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
986 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
987 .parameter_value = cpu_to_be64(mtu),
988 };
989
990 return gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
991}
992
993int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
994 dma_addr_t stats_report_addr, u64 interval)
995{
996 union gve_adminq_command cmd;
997
998 memset(&cmd, 0, sizeof(cmd));
999 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
1000 cmd.report_stats = (struct gve_adminq_report_stats) {
1001 .stats_report_len = cpu_to_be64(stats_report_len),
1002 .stats_report_addr = cpu_to_be64(stats_report_addr),
1003 .interval = cpu_to_be64(interval),
1004 };
1005
1006 return gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
1007}
1008
1009int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
1010 u64 driver_info_len,
1011 dma_addr_t driver_info_addr)
1012{
1013 union gve_adminq_command cmd;
1014
1015 memset(&cmd, 0, sizeof(cmd));
1016 cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
1017 cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
1018 .driver_info_len = cpu_to_be64(driver_info_len),
1019 .driver_info_addr = cpu_to_be64(driver_info_addr),
1020 };
1021
1022 return gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
1023}
1024
1025int gve_adminq_report_link_speed(struct gve_priv *priv)
1026{
1027 union gve_adminq_command gvnic_cmd;
1028 dma_addr_t link_speed_region_bus;
1029 __be64 *link_speed_region;
1030 int err;
1031
1032 link_speed_region =
1033 dma_alloc_coherent(dev: &priv->pdev->dev, size: sizeof(*link_speed_region),
1034 dma_handle: &link_speed_region_bus, GFP_KERNEL);
1035
1036 if (!link_speed_region)
1037 return -ENOMEM;
1038
1039 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
1040 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
1041 gvnic_cmd.report_link_speed.link_speed_address =
1042 cpu_to_be64(link_speed_region_bus);
1043
1044 err = gve_adminq_execute_cmd(priv, cmd_orig: &gvnic_cmd);
1045
1046 priv->link_speed = be64_to_cpu(*link_speed_region);
1047 dma_free_coherent(dev: &priv->pdev->dev, size: sizeof(*link_speed_region), cpu_addr: link_speed_region,
1048 dma_handle: link_speed_region_bus);
1049 return err;
1050}
1051
1052int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
1053 struct gve_ptype_lut *ptype_lut)
1054{
1055 struct gve_ptype_map *ptype_map;
1056 union gve_adminq_command cmd;
1057 dma_addr_t ptype_map_bus;
1058 int err = 0;
1059 int i;
1060
1061 memset(&cmd, 0, sizeof(cmd));
1062 ptype_map = dma_alloc_coherent(dev: &priv->pdev->dev, size: sizeof(*ptype_map),
1063 dma_handle: &ptype_map_bus, GFP_KERNEL);
1064 if (!ptype_map)
1065 return -ENOMEM;
1066
1067 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
1068 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
1069 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
1070 .ptype_map_addr = cpu_to_be64(ptype_map_bus),
1071 };
1072
1073 err = gve_adminq_execute_cmd(priv, cmd_orig: &cmd);
1074 if (err)
1075 goto err;
1076
1077 /* Populate ptype_lut. */
1078 for (i = 0; i < GVE_NUM_PTYPES; i++) {
1079 ptype_lut->ptypes[i].l3_type =
1080 ptype_map->ptypes[i].l3_type;
1081 ptype_lut->ptypes[i].l4_type =
1082 ptype_map->ptypes[i].l4_type;
1083 }
1084err:
1085 dma_free_coherent(dev: &priv->pdev->dev, size: sizeof(*ptype_map), cpu_addr: ptype_map,
1086 dma_handle: ptype_map_bus);
1087 return err;
1088}
1089

source code of linux/drivers/net/ethernet/google/gve/gve_adminq.c