1 | /* SPDX-License-Identifier: (GPL-2.0 OR MIT) |
---|---|
2 | * Google virtual Ethernet (gve) driver |
3 | * |
4 | * Copyright (C) 2015-2021 Google, Inc. |
5 | */ |
6 | |
7 | #ifndef _GVE_H_ |
8 | #define _GVE_H_ |
9 | |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/dmapool.h> |
12 | #include <linux/ethtool_netlink.h> |
13 | #include <linux/netdevice.h> |
14 | #include <linux/pci.h> |
15 | #include <linux/u64_stats_sync.h> |
16 | #include <net/xdp.h> |
17 | |
18 | #include "gve_desc.h" |
19 | #include "gve_desc_dqo.h" |
20 | |
21 | #ifndef PCI_VENDOR_ID_GOOGLE |
22 | #define PCI_VENDOR_ID_GOOGLE 0x1ae0 |
23 | #endif |
24 | |
25 | #define PCI_DEV_ID_GVNIC 0x0042 |
26 | |
27 | #define GVE_REGISTER_BAR 0 |
28 | #define GVE_DOORBELL_BAR 2 |
29 | |
30 | /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ |
31 | #define GVE_TX_MAX_IOVEC 4 |
32 | /* 1 for management, 1 for rx, 1 for tx */ |
33 | #define GVE_MIN_MSIX 3 |
34 | |
35 | /* Numbers of gve tx/rx stats in stats report. */ |
36 | #define GVE_TX_STATS_REPORT_NUM 6 |
37 | #define GVE_RX_STATS_REPORT_NUM 2 |
38 | |
39 | /* Interval to schedule a stats report update, 20000ms. */ |
40 | #define GVE_STATS_REPORT_TIMER_PERIOD 20000 |
41 | |
42 | /* Numbers of NIC tx/rx stats in stats report. */ |
43 | #define NIC_TX_STATS_REPORT_NUM 0 |
44 | #define NIC_RX_STATS_REPORT_NUM 4 |
45 | |
46 | #define GVE_ADMINQ_BUFFER_SIZE 4096 |
47 | |
48 | #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) |
49 | |
50 | /* PTYPEs are always 10 bits. */ |
51 | #define GVE_NUM_PTYPES 1024 |
52 | |
53 | #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 |
54 | |
55 | #define GVE_MAX_RX_BUFFER_SIZE 4096 |
56 | |
57 | #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 |
58 | |
59 | #define GVE_XDP_ACTIONS 5 |
60 | |
61 | #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 |
62 | |
63 | #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128 |
64 | |
65 | #define DQO_QPL_DEFAULT_TX_PAGES 512 |
66 | #define DQO_QPL_DEFAULT_RX_PAGES 2048 |
67 | |
68 | /* Maximum TSO size supported on DQO */ |
69 | #define GVE_DQO_TX_MAX 0x3FFFF |
70 | |
71 | #define GVE_TX_BUF_SHIFT_DQO 11 |
72 | |
73 | /* 2K buffers for DQO-QPL */ |
74 | #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) |
75 | #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) |
76 | #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) |
77 | |
78 | /* If number of free/recyclable buffers are less than this threshold; driver |
79 | * allocs and uses a non-qpl page on the receive path of DQO QPL to free |
80 | * up buffers. |
81 | * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. |
82 | */ |
83 | #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 |
84 | |
85 | /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ |
86 | struct gve_rx_desc_queue { |
87 | struct gve_rx_desc *desc_ring; /* the descriptor ring */ |
88 | dma_addr_t bus; /* the bus for the desc_ring */ |
89 | u8 seqno; /* the next expected seqno for this desc*/ |
90 | }; |
91 | |
92 | /* The page info for a single slot in the RX data queue */ |
93 | struct gve_rx_slot_page_info { |
94 | struct page *page; |
95 | void *page_address; |
96 | u32 page_offset; /* offset to write to in page */ |
97 | int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ |
98 | u16 pad; /* adjustment for rx padding */ |
99 | u8 can_flip; /* tracks if the networking stack is using the page */ |
100 | }; |
101 | |
102 | /* A list of pages registered with the device during setup and used by a queue |
103 | * as buffers |
104 | */ |
105 | struct gve_queue_page_list { |
106 | u32 id; /* unique id */ |
107 | u32 num_entries; |
108 | struct page **pages; /* list of num_entries pages */ |
109 | dma_addr_t *page_buses; /* the dma addrs of the pages */ |
110 | }; |
111 | |
112 | /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ |
113 | struct gve_rx_data_queue { |
114 | union gve_rx_data_slot *data_ring; /* read by NIC */ |
115 | dma_addr_t data_bus; /* dma mapping of the slots */ |
116 | struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ |
117 | struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ |
118 | u8 raw_addressing; /* use raw_addressing? */ |
119 | }; |
120 | |
121 | struct gve_priv; |
122 | |
123 | /* RX buffer queue for posting buffers to HW. |
124 | * Each RX (completion) queue has a corresponding buffer queue. |
125 | */ |
126 | struct gve_rx_buf_queue_dqo { |
127 | struct gve_rx_desc_dqo *desc_ring; |
128 | dma_addr_t bus; |
129 | u32 head; /* Pointer to start cleaning buffers at. */ |
130 | u32 tail; /* Last posted buffer index + 1 */ |
131 | u32 mask; /* Mask for indices to the size of the ring */ |
132 | }; |
133 | |
134 | /* RX completion queue to receive packets from HW. */ |
135 | struct gve_rx_compl_queue_dqo { |
136 | struct gve_rx_compl_desc_dqo *desc_ring; |
137 | dma_addr_t bus; |
138 | |
139 | /* Number of slots which did not have a buffer posted yet. We should not |
140 | * post more buffers than the queue size to avoid HW overrunning the |
141 | * queue. |
142 | */ |
143 | int num_free_slots; |
144 | |
145 | /* HW uses a "generation bit" to notify SW of new descriptors. When a |
146 | * descriptor's generation bit is different from the current generation, |
147 | * that descriptor is ready to be consumed by SW. |
148 | */ |
149 | u8 cur_gen_bit; |
150 | |
151 | /* Pointer into desc_ring where the next completion descriptor will be |
152 | * received. |
153 | */ |
154 | u32 head; |
155 | u32 mask; /* Mask for indices to the size of the ring */ |
156 | }; |
157 | |
158 | struct gve_header_buf { |
159 | u8 *data; |
160 | dma_addr_t addr; |
161 | }; |
162 | |
163 | /* Stores state for tracking buffers posted to HW */ |
164 | struct gve_rx_buf_state_dqo { |
165 | /* The page posted to HW. */ |
166 | struct gve_rx_slot_page_info page_info; |
167 | |
168 | /* The DMA address corresponding to `page_info`. */ |
169 | dma_addr_t addr; |
170 | |
171 | /* Last offset into the page when it only had a single reference, at |
172 | * which point every other offset is free to be reused. |
173 | */ |
174 | u32 last_single_ref_offset; |
175 | |
176 | /* Linked list index to next element in the list, or -1 if none */ |
177 | s16 next; |
178 | }; |
179 | |
180 | /* `head` and `tail` are indices into an array, or -1 if empty. */ |
181 | struct gve_index_list { |
182 | s16 head; |
183 | s16 tail; |
184 | }; |
185 | |
186 | /* A single received packet split across multiple buffers may be |
187 | * reconstructed using the information in this structure. |
188 | */ |
189 | struct gve_rx_ctx { |
190 | /* head and tail of skb chain for the current packet or NULL if none */ |
191 | struct sk_buff *skb_head; |
192 | struct sk_buff *skb_tail; |
193 | u32 total_size; |
194 | u8 frag_cnt; |
195 | bool drop_pkt; |
196 | }; |
197 | |
198 | struct gve_rx_cnts { |
199 | u32 ok_pkt_bytes; |
200 | u16 ok_pkt_cnt; |
201 | u16 total_pkt_cnt; |
202 | u16 cont_pkt_cnt; |
203 | u16 desc_err_pkt_cnt; |
204 | }; |
205 | |
206 | /* Contains datapath state used to represent an RX queue. */ |
207 | struct gve_rx_ring { |
208 | struct gve_priv *gve; |
209 | union { |
210 | /* GQI fields */ |
211 | struct { |
212 | struct gve_rx_desc_queue desc; |
213 | struct gve_rx_data_queue data; |
214 | |
215 | /* threshold for posting new buffs and descs */ |
216 | u32 db_threshold; |
217 | u16 packet_buffer_size; |
218 | |
219 | u32 qpl_copy_pool_mask; |
220 | u32 qpl_copy_pool_head; |
221 | struct gve_rx_slot_page_info *qpl_copy_pool; |
222 | }; |
223 | |
224 | /* DQO fields. */ |
225 | struct { |
226 | struct gve_rx_buf_queue_dqo bufq; |
227 | struct gve_rx_compl_queue_dqo complq; |
228 | |
229 | struct gve_rx_buf_state_dqo *buf_states; |
230 | u16 num_buf_states; |
231 | |
232 | /* Linked list of gve_rx_buf_state_dqo. Index into |
233 | * buf_states, or -1 if empty. |
234 | */ |
235 | s16 free_buf_states; |
236 | |
237 | /* Linked list of gve_rx_buf_state_dqo. Indexes into |
238 | * buf_states, or -1 if empty. |
239 | * |
240 | * This list contains buf_states which are pointing to |
241 | * valid buffers. |
242 | * |
243 | * We use a FIFO here in order to increase the |
244 | * probability that buffers can be reused by increasing |
245 | * the time between usages. |
246 | */ |
247 | struct gve_index_list recycled_buf_states; |
248 | |
249 | /* Linked list of gve_rx_buf_state_dqo. Indexes into |
250 | * buf_states, or -1 if empty. |
251 | * |
252 | * This list contains buf_states which have buffers |
253 | * which cannot be reused yet. |
254 | */ |
255 | struct gve_index_list used_buf_states; |
256 | |
257 | /* qpl assigned to this queue */ |
258 | struct gve_queue_page_list *qpl; |
259 | |
260 | /* index into queue page list */ |
261 | u32 next_qpl_page_idx; |
262 | |
263 | /* track number of used buffers */ |
264 | u16 used_buf_states_cnt; |
265 | |
266 | /* Address info of the buffers for header-split */ |
267 | struct gve_header_buf hdr_bufs; |
268 | } dqo; |
269 | }; |
270 | |
271 | u64 rbytes; /* free-running bytes received */ |
272 | u64 rx_hsplit_bytes; /* free-running header bytes received */ |
273 | u64 rpackets; /* free-running packets received */ |
274 | u32 cnt; /* free-running total number of completed packets */ |
275 | u32 fill_cnt; /* free-running total number of descs and buffs posted */ |
276 | u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ |
277 | u64 rx_hsplit_pkt; /* free-running packets with headers split */ |
278 | u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ |
279 | u64 rx_copied_pkt; /* free-running total number of copied packets */ |
280 | u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ |
281 | u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ |
282 | u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ |
283 | /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */ |
284 | u64 rx_hsplit_unsplit_pkt; |
285 | u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ |
286 | u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ |
287 | u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ |
288 | u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ |
289 | u64 xdp_tx_errors; |
290 | u64 xdp_redirect_errors; |
291 | u64 xdp_alloc_fails; |
292 | u64 xdp_actions[GVE_XDP_ACTIONS]; |
293 | u32 q_num; /* queue index */ |
294 | u32 ntfy_id; /* notification block index */ |
295 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ |
296 | dma_addr_t q_resources_bus; /* dma address for the queue resources */ |
297 | struct u64_stats_sync statss; /* sync stats for 32bit archs */ |
298 | |
299 | struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ |
300 | |
301 | /* XDP stuff */ |
302 | struct xdp_rxq_info xdp_rxq; |
303 | struct xdp_rxq_info xsk_rxq; |
304 | struct xsk_buff_pool *xsk_pool; |
305 | struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ |
306 | }; |
307 | |
308 | /* A TX desc ring entry */ |
309 | union gve_tx_desc { |
310 | struct gve_tx_pkt_desc pkt; /* first desc for a packet */ |
311 | struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ |
312 | struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ |
313 | }; |
314 | |
315 | /* Tracks the memory in the fifo occupied by a segment of a packet */ |
316 | struct gve_tx_iovec { |
317 | u32 iov_offset; /* offset into this segment */ |
318 | u32 iov_len; /* length */ |
319 | u32 iov_padding; /* padding associated with this segment */ |
320 | }; |
321 | |
322 | /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc |
323 | * ring entry but only used for a pkt_desc not a seg_desc |
324 | */ |
325 | struct gve_tx_buffer_state { |
326 | union { |
327 | struct sk_buff *skb; /* skb for this pkt */ |
328 | struct xdp_frame *xdp_frame; /* xdp_frame */ |
329 | }; |
330 | struct { |
331 | u16 size; /* size of xmitted xdp pkt */ |
332 | u8 is_xsk; /* xsk buff */ |
333 | } xdp; |
334 | union { |
335 | struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ |
336 | struct { |
337 | DEFINE_DMA_UNMAP_ADDR(dma); |
338 | DEFINE_DMA_UNMAP_LEN(len); |
339 | }; |
340 | }; |
341 | }; |
342 | |
343 | /* A TX buffer - each queue has one */ |
344 | struct gve_tx_fifo { |
345 | void *base; /* address of base of FIFO */ |
346 | u32 size; /* total size */ |
347 | atomic_t available; /* how much space is still available */ |
348 | u32 head; /* offset to write at */ |
349 | struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ |
350 | }; |
351 | |
352 | /* TX descriptor for DQO format */ |
353 | union gve_tx_desc_dqo { |
354 | struct gve_tx_pkt_desc_dqo pkt; |
355 | struct gve_tx_tso_context_desc_dqo tso_ctx; |
356 | struct gve_tx_general_context_desc_dqo general_ctx; |
357 | }; |
358 | |
359 | enum gve_packet_state { |
360 | /* Packet is in free list, available to be allocated. |
361 | * This should always be zero since state is not explicitly initialized. |
362 | */ |
363 | GVE_PACKET_STATE_UNALLOCATED, |
364 | /* Packet is expecting a regular data completion or miss completion */ |
365 | GVE_PACKET_STATE_PENDING_DATA_COMPL, |
366 | /* Packet has received a miss completion and is expecting a |
367 | * re-injection completion. |
368 | */ |
369 | GVE_PACKET_STATE_PENDING_REINJECT_COMPL, |
370 | /* No valid completion received within the specified timeout. */ |
371 | GVE_PACKET_STATE_TIMED_OUT_COMPL, |
372 | }; |
373 | |
374 | struct gve_tx_pending_packet_dqo { |
375 | struct sk_buff *skb; /* skb for this packet */ |
376 | |
377 | /* 0th element corresponds to the linear portion of `skb`, should be |
378 | * unmapped with `dma_unmap_single`. |
379 | * |
380 | * All others correspond to `skb`'s frags and should be unmapped with |
381 | * `dma_unmap_page`. |
382 | */ |
383 | union { |
384 | struct { |
385 | DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); |
386 | DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); |
387 | }; |
388 | s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; |
389 | }; |
390 | |
391 | u16 num_bufs; |
392 | |
393 | /* Linked list index to next element in the list, or -1 if none */ |
394 | s16 next; |
395 | |
396 | /* Linked list index to prev element in the list, or -1 if none. |
397 | * Used for tracking either outstanding miss completions or prematurely |
398 | * freed packets. |
399 | */ |
400 | s16 prev; |
401 | |
402 | /* Identifies the current state of the packet as defined in |
403 | * `enum gve_packet_state`. |
404 | */ |
405 | u8 state; |
406 | |
407 | /* If packet is an outstanding miss completion, then the packet is |
408 | * freed if the corresponding re-injection completion is not received |
409 | * before kernel jiffies exceeds timeout_jiffies. |
410 | */ |
411 | unsigned long timeout_jiffies; |
412 | }; |
413 | |
414 | /* Contains datapath state used to represent a TX queue. */ |
415 | struct gve_tx_ring { |
416 | /* Cacheline 0 -- Accessed & dirtied during transmit */ |
417 | union { |
418 | /* GQI fields */ |
419 | struct { |
420 | struct gve_tx_fifo tx_fifo; |
421 | u32 req; /* driver tracked head pointer */ |
422 | u32 done; /* driver tracked tail pointer */ |
423 | }; |
424 | |
425 | /* DQO fields. */ |
426 | struct { |
427 | /* Linked list of gve_tx_pending_packet_dqo. Index into |
428 | * pending_packets, or -1 if empty. |
429 | * |
430 | * This is a consumer list owned by the TX path. When it |
431 | * runs out, the producer list is stolen from the |
432 | * completion handling path |
433 | * (dqo_compl.free_pending_packets). |
434 | */ |
435 | s16 free_pending_packets; |
436 | |
437 | /* Cached value of `dqo_compl.hw_tx_head` */ |
438 | u32 head; |
439 | u32 tail; /* Last posted buffer index + 1 */ |
440 | |
441 | /* Index of the last descriptor with "report event" bit |
442 | * set. |
443 | */ |
444 | u32 last_re_idx; |
445 | |
446 | /* free running number of packet buf descriptors posted */ |
447 | u16 posted_packet_desc_cnt; |
448 | /* free running number of packet buf descriptors completed */ |
449 | u16 completed_packet_desc_cnt; |
450 | |
451 | /* QPL fields */ |
452 | struct { |
453 | /* Linked list of gve_tx_buf_dqo. Index into |
454 | * tx_qpl_buf_next, or -1 if empty. |
455 | * |
456 | * This is a consumer list owned by the TX path. When it |
457 | * runs out, the producer list is stolen from the |
458 | * completion handling path |
459 | * (dqo_compl.free_tx_qpl_buf_head). |
460 | */ |
461 | s16 free_tx_qpl_buf_head; |
462 | |
463 | /* Free running count of the number of QPL tx buffers |
464 | * allocated |
465 | */ |
466 | u32 alloc_tx_qpl_buf_cnt; |
467 | |
468 | /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ |
469 | u32 free_tx_qpl_buf_cnt; |
470 | }; |
471 | } dqo_tx; |
472 | }; |
473 | |
474 | /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ |
475 | union { |
476 | /* GQI fields */ |
477 | struct { |
478 | /* Spinlock for when cleanup in progress */ |
479 | spinlock_t clean_lock; |
480 | /* Spinlock for XDP tx traffic */ |
481 | spinlock_t xdp_lock; |
482 | }; |
483 | |
484 | /* DQO fields. */ |
485 | struct { |
486 | u32 head; /* Last read on compl_desc */ |
487 | |
488 | /* Tracks the current gen bit of compl_q */ |
489 | u8 cur_gen_bit; |
490 | |
491 | /* Linked list of gve_tx_pending_packet_dqo. Index into |
492 | * pending_packets, or -1 if empty. |
493 | * |
494 | * This is the producer list, owned by the completion |
495 | * handling path. When the consumer list |
496 | * (dqo_tx.free_pending_packets) is runs out, this list |
497 | * will be stolen. |
498 | */ |
499 | atomic_t free_pending_packets; |
500 | |
501 | /* Last TX ring index fetched by HW */ |
502 | atomic_t hw_tx_head; |
503 | |
504 | /* List to track pending packets which received a miss |
505 | * completion but not a corresponding reinjection. |
506 | */ |
507 | struct gve_index_list miss_completions; |
508 | |
509 | /* List to track pending packets that were completed |
510 | * before receiving a valid completion because they |
511 | * reached a specified timeout. |
512 | */ |
513 | struct gve_index_list timed_out_completions; |
514 | |
515 | /* QPL fields */ |
516 | struct { |
517 | /* Linked list of gve_tx_buf_dqo. Index into |
518 | * tx_qpl_buf_next, or -1 if empty. |
519 | * |
520 | * This is the producer list, owned by the completion |
521 | * handling path. When the consumer list |
522 | * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list |
523 | * will be stolen. |
524 | */ |
525 | atomic_t free_tx_qpl_buf_head; |
526 | |
527 | /* Free running count of the number of tx buffers |
528 | * freed |
529 | */ |
530 | atomic_t free_tx_qpl_buf_cnt; |
531 | }; |
532 | } dqo_compl; |
533 | } ____cacheline_aligned; |
534 | u64 pkt_done; /* free-running - total packets completed */ |
535 | u64 bytes_done; /* free-running - total bytes completed */ |
536 | u64 dropped_pkt; /* free-running - total packets dropped */ |
537 | u64 dma_mapping_error; /* count of dma mapping errors */ |
538 | |
539 | /* Cacheline 2 -- Read-mostly fields */ |
540 | union { |
541 | /* GQI fields */ |
542 | struct { |
543 | union gve_tx_desc *desc; |
544 | |
545 | /* Maps 1:1 to a desc */ |
546 | struct gve_tx_buffer_state *info; |
547 | }; |
548 | |
549 | /* DQO fields. */ |
550 | struct { |
551 | union gve_tx_desc_dqo *tx_ring; |
552 | struct gve_tx_compl_desc *compl_ring; |
553 | |
554 | struct gve_tx_pending_packet_dqo *pending_packets; |
555 | s16 num_pending_packets; |
556 | |
557 | u32 complq_mask; /* complq size is complq_mask + 1 */ |
558 | |
559 | /* QPL fields */ |
560 | struct { |
561 | /* qpl assigned to this queue */ |
562 | struct gve_queue_page_list *qpl; |
563 | |
564 | /* Each QPL page is divided into TX bounce buffers |
565 | * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is |
566 | * an array to manage linked lists of TX buffers. |
567 | * An entry j at index i implies that j'th buffer |
568 | * is next on the list after i |
569 | */ |
570 | s16 *tx_qpl_buf_next; |
571 | u32 num_tx_qpl_bufs; |
572 | }; |
573 | } dqo; |
574 | } ____cacheline_aligned; |
575 | struct netdev_queue *netdev_txq; |
576 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ |
577 | struct device *dev; |
578 | u32 mask; /* masks req and done down to queue size */ |
579 | u8 raw_addressing; /* use raw_addressing? */ |
580 | |
581 | /* Slow-path fields */ |
582 | u32 q_num ____cacheline_aligned; /* queue idx */ |
583 | u32 stop_queue; /* count of queue stops */ |
584 | u32 wake_queue; /* count of queue wakes */ |
585 | u32 queue_timeout; /* count of queue timeouts */ |
586 | u32 ntfy_id; /* notification block index */ |
587 | u32 last_kick_msec; /* Last time the queue was kicked */ |
588 | dma_addr_t bus; /* dma address of the descr ring */ |
589 | dma_addr_t q_resources_bus; /* dma address of the queue resources */ |
590 | dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ |
591 | struct u64_stats_sync statss; /* sync stats for 32bit archs */ |
592 | struct xsk_buff_pool *xsk_pool; |
593 | u32 xdp_xsk_wakeup; |
594 | u32 xdp_xsk_done; |
595 | u64 xdp_xsk_sent; |
596 | u64 xdp_xmit; |
597 | u64 xdp_xmit_errors; |
598 | } ____cacheline_aligned; |
599 | |
600 | /* Wraps the info for one irq including the napi struct and the queues |
601 | * associated with that irq. |
602 | */ |
603 | struct gve_notify_block { |
604 | __be32 *irq_db_index; /* pointer to idx into Bar2 */ |
605 | char name[IFNAMSIZ + 16]; /* name registered with the kernel */ |
606 | struct napi_struct napi; /* kernel napi struct for this block */ |
607 | struct gve_priv *priv; |
608 | struct gve_tx_ring *tx; /* tx rings on this block */ |
609 | struct gve_rx_ring *rx; /* rx rings on this block */ |
610 | }; |
611 | |
612 | /* Tracks allowed and current queue settings */ |
613 | struct gve_queue_config { |
614 | u16 max_queues; |
615 | u16 num_queues; /* current */ |
616 | }; |
617 | |
618 | /* Tracks the available and used qpl IDs */ |
619 | struct gve_qpl_config { |
620 | u32 qpl_map_size; /* map memory size */ |
621 | unsigned long *qpl_id_map; /* bitmap of used qpl ids */ |
622 | }; |
623 | |
624 | struct gve_options_dqo_rda { |
625 | u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ |
626 | u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ |
627 | }; |
628 | |
629 | struct gve_irq_db { |
630 | __be32 index; |
631 | } ____cacheline_aligned; |
632 | |
633 | struct gve_ptype { |
634 | u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ |
635 | u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ |
636 | }; |
637 | |
638 | struct gve_ptype_lut { |
639 | struct gve_ptype ptypes[GVE_NUM_PTYPES]; |
640 | }; |
641 | |
642 | /* Parameters for allocating queue page lists */ |
643 | struct gve_qpls_alloc_cfg { |
644 | struct gve_qpl_config *qpl_cfg; |
645 | struct gve_queue_config *tx_cfg; |
646 | struct gve_queue_config *rx_cfg; |
647 | |
648 | u16 num_xdp_queues; |
649 | bool raw_addressing; |
650 | bool is_gqi; |
651 | |
652 | /* Allocated resources are returned here */ |
653 | struct gve_queue_page_list *qpls; |
654 | }; |
655 | |
656 | /* Parameters for allocating resources for tx queues */ |
657 | struct gve_tx_alloc_rings_cfg { |
658 | struct gve_queue_config *qcfg; |
659 | |
660 | /* qpls and qpl_cfg must already be allocated */ |
661 | struct gve_queue_page_list *qpls; |
662 | struct gve_qpl_config *qpl_cfg; |
663 | |
664 | u16 ring_size; |
665 | u16 start_idx; |
666 | u16 num_rings; |
667 | bool raw_addressing; |
668 | |
669 | /* Allocated resources are returned here */ |
670 | struct gve_tx_ring *tx; |
671 | }; |
672 | |
673 | /* Parameters for allocating resources for rx queues */ |
674 | struct gve_rx_alloc_rings_cfg { |
675 | /* tx config is also needed to determine QPL ids */ |
676 | struct gve_queue_config *qcfg; |
677 | struct gve_queue_config *qcfg_tx; |
678 | |
679 | /* qpls and qpl_cfg must already be allocated */ |
680 | struct gve_queue_page_list *qpls; |
681 | struct gve_qpl_config *qpl_cfg; |
682 | |
683 | u16 ring_size; |
684 | u16 packet_buffer_size; |
685 | bool raw_addressing; |
686 | bool enable_header_split; |
687 | |
688 | /* Allocated resources are returned here */ |
689 | struct gve_rx_ring *rx; |
690 | }; |
691 | |
692 | /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value |
693 | * when the entire configure_device_resources command is zeroed out and the |
694 | * queue_format is not specified. |
695 | */ |
696 | enum gve_queue_format { |
697 | GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, |
698 | GVE_GQI_RDA_FORMAT = 0x1, |
699 | GVE_GQI_QPL_FORMAT = 0x2, |
700 | GVE_DQO_RDA_FORMAT = 0x3, |
701 | GVE_DQO_QPL_FORMAT = 0x4, |
702 | }; |
703 | |
704 | struct gve_priv { |
705 | struct net_device *dev; |
706 | struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ |
707 | struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ |
708 | struct gve_queue_page_list *qpls; /* array of num qpls */ |
709 | struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ |
710 | struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ |
711 | dma_addr_t irq_db_indices_bus; |
712 | struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ |
713 | char mgmt_msix_name[IFNAMSIZ + 16]; |
714 | u32 mgmt_msix_idx; |
715 | __be32 *counter_array; /* array of num_event_counters */ |
716 | dma_addr_t counter_array_bus; |
717 | |
718 | u16 num_event_counters; |
719 | u16 tx_desc_cnt; /* num desc per ring */ |
720 | u16 rx_desc_cnt; /* num desc per ring */ |
721 | u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ |
722 | u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ |
723 | u16 rx_data_slot_cnt; /* rx buffer length */ |
724 | u64 max_registered_pages; |
725 | u64 num_registered_pages; /* num pages registered with NIC */ |
726 | struct bpf_prog *xdp_prog; /* XDP BPF program */ |
727 | u32 rx_copybreak; /* copy packets smaller than this */ |
728 | u16 default_num_queues; /* default num queues to set up */ |
729 | |
730 | u16 num_xdp_queues; |
731 | struct gve_queue_config tx_cfg; |
732 | struct gve_queue_config rx_cfg; |
733 | struct gve_qpl_config qpl_cfg; /* map used QPL ids */ |
734 | u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ |
735 | |
736 | struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ |
737 | __be32 __iomem *db_bar2; /* "array" of doorbells */ |
738 | u32 msg_enable; /* level for netif* netdev print macros */ |
739 | struct pci_dev *pdev; |
740 | |
741 | /* metrics */ |
742 | u32 tx_timeo_cnt; |
743 | |
744 | /* Admin queue - see gve_adminq.h*/ |
745 | union gve_adminq_command *adminq; |
746 | dma_addr_t adminq_bus_addr; |
747 | struct dma_pool *adminq_pool; |
748 | u32 adminq_mask; /* masks prod_cnt to adminq size */ |
749 | u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ |
750 | u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ |
751 | u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ |
752 | /* free-running count of per AQ cmd executed */ |
753 | u32 adminq_describe_device_cnt; |
754 | u32 adminq_cfg_device_resources_cnt; |
755 | u32 adminq_register_page_list_cnt; |
756 | u32 adminq_unregister_page_list_cnt; |
757 | u32 adminq_create_tx_queue_cnt; |
758 | u32 adminq_create_rx_queue_cnt; |
759 | u32 adminq_destroy_tx_queue_cnt; |
760 | u32 adminq_destroy_rx_queue_cnt; |
761 | u32 adminq_dcfg_device_resources_cnt; |
762 | u32 adminq_set_driver_parameter_cnt; |
763 | u32 adminq_report_stats_cnt; |
764 | u32 adminq_report_link_speed_cnt; |
765 | u32 adminq_get_ptype_map_cnt; |
766 | u32 adminq_verify_driver_compatibility_cnt; |
767 | |
768 | /* Global stats */ |
769 | u32 interface_up_cnt; /* count of times interface turned up since last reset */ |
770 | u32 interface_down_cnt; /* count of times interface turned down since last reset */ |
771 | u32 reset_cnt; /* count of reset */ |
772 | u32 page_alloc_fail; /* count of page alloc fails */ |
773 | u32 dma_mapping_error; /* count of dma mapping errors */ |
774 | u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ |
775 | u32 suspend_cnt; /* count of times suspended */ |
776 | u32 resume_cnt; /* count of times resumed */ |
777 | struct workqueue_struct *gve_wq; |
778 | struct work_struct service_task; |
779 | struct work_struct stats_report_task; |
780 | unsigned long service_task_flags; |
781 | unsigned long state_flags; |
782 | |
783 | struct gve_stats_report *stats_report; |
784 | u64 stats_report_len; |
785 | dma_addr_t stats_report_bus; /* dma address for the stats report */ |
786 | unsigned long ethtool_flags; |
787 | |
788 | unsigned long stats_report_timer_period; |
789 | struct timer_list stats_report_timer; |
790 | |
791 | /* Gvnic device link speed from hypervisor. */ |
792 | u64 link_speed; |
793 | bool up_before_suspend; /* True if dev was up before suspend */ |
794 | |
795 | struct gve_options_dqo_rda options_dqo_rda; |
796 | struct gve_ptype_lut *ptype_lut_dqo; |
797 | |
798 | /* Must be a power of two. */ |
799 | u16 data_buffer_size_dqo; |
800 | u16 max_rx_buffer_size; /* device limit */ |
801 | |
802 | enum gve_queue_format queue_format; |
803 | |
804 | /* Interrupt coalescing settings */ |
805 | u32 tx_coalesce_usecs; |
806 | u32 rx_coalesce_usecs; |
807 | |
808 | u16 header_buf_size; /* device configured, header-split supported if non-zero */ |
809 | bool header_split_enabled; /* True if the header split is enabled by the user */ |
810 | }; |
811 | |
812 | enum gve_service_task_flags_bit { |
813 | GVE_PRIV_FLAGS_DO_RESET = 1, |
814 | GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, |
815 | GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, |
816 | GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, |
817 | }; |
818 | |
819 | enum gve_state_flags_bit { |
820 | GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, |
821 | GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, |
822 | GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, |
823 | GVE_PRIV_FLAGS_NAPI_ENABLED = 4, |
824 | }; |
825 | |
826 | enum gve_ethtool_flags_bit { |
827 | GVE_PRIV_FLAGS_REPORT_STATS = 0, |
828 | }; |
829 | |
830 | static inline bool gve_get_do_reset(struct gve_priv *priv) |
831 | { |
832 | return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); |
833 | } |
834 | |
835 | static inline void gve_set_do_reset(struct gve_priv *priv) |
836 | { |
837 | set_bit(nr: GVE_PRIV_FLAGS_DO_RESET, addr: &priv->service_task_flags); |
838 | } |
839 | |
840 | static inline void gve_clear_do_reset(struct gve_priv *priv) |
841 | { |
842 | clear_bit(nr: GVE_PRIV_FLAGS_DO_RESET, addr: &priv->service_task_flags); |
843 | } |
844 | |
845 | static inline bool gve_get_reset_in_progress(struct gve_priv *priv) |
846 | { |
847 | return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, |
848 | &priv->service_task_flags); |
849 | } |
850 | |
851 | static inline void gve_set_reset_in_progress(struct gve_priv *priv) |
852 | { |
853 | set_bit(nr: GVE_PRIV_FLAGS_RESET_IN_PROGRESS, addr: &priv->service_task_flags); |
854 | } |
855 | |
856 | static inline void gve_clear_reset_in_progress(struct gve_priv *priv) |
857 | { |
858 | clear_bit(nr: GVE_PRIV_FLAGS_RESET_IN_PROGRESS, addr: &priv->service_task_flags); |
859 | } |
860 | |
861 | static inline bool gve_get_probe_in_progress(struct gve_priv *priv) |
862 | { |
863 | return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, |
864 | &priv->service_task_flags); |
865 | } |
866 | |
867 | static inline void gve_set_probe_in_progress(struct gve_priv *priv) |
868 | { |
869 | set_bit(nr: GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, addr: &priv->service_task_flags); |
870 | } |
871 | |
872 | static inline void gve_clear_probe_in_progress(struct gve_priv *priv) |
873 | { |
874 | clear_bit(nr: GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, addr: &priv->service_task_flags); |
875 | } |
876 | |
877 | static inline bool gve_get_do_report_stats(struct gve_priv *priv) |
878 | { |
879 | return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, |
880 | &priv->service_task_flags); |
881 | } |
882 | |
883 | static inline void gve_set_do_report_stats(struct gve_priv *priv) |
884 | { |
885 | set_bit(nr: GVE_PRIV_FLAGS_DO_REPORT_STATS, addr: &priv->service_task_flags); |
886 | } |
887 | |
888 | static inline void gve_clear_do_report_stats(struct gve_priv *priv) |
889 | { |
890 | clear_bit(nr: GVE_PRIV_FLAGS_DO_REPORT_STATS, addr: &priv->service_task_flags); |
891 | } |
892 | |
893 | static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) |
894 | { |
895 | return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); |
896 | } |
897 | |
898 | static inline void gve_set_admin_queue_ok(struct gve_priv *priv) |
899 | { |
900 | set_bit(nr: GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, addr: &priv->state_flags); |
901 | } |
902 | |
903 | static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) |
904 | { |
905 | clear_bit(nr: GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, addr: &priv->state_flags); |
906 | } |
907 | |
908 | static inline bool gve_get_device_resources_ok(struct gve_priv *priv) |
909 | { |
910 | return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); |
911 | } |
912 | |
913 | static inline void gve_set_device_resources_ok(struct gve_priv *priv) |
914 | { |
915 | set_bit(nr: GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, addr: &priv->state_flags); |
916 | } |
917 | |
918 | static inline void gve_clear_device_resources_ok(struct gve_priv *priv) |
919 | { |
920 | clear_bit(nr: GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, addr: &priv->state_flags); |
921 | } |
922 | |
923 | static inline bool gve_get_device_rings_ok(struct gve_priv *priv) |
924 | { |
925 | return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); |
926 | } |
927 | |
928 | static inline void gve_set_device_rings_ok(struct gve_priv *priv) |
929 | { |
930 | set_bit(nr: GVE_PRIV_FLAGS_DEVICE_RINGS_OK, addr: &priv->state_flags); |
931 | } |
932 | |
933 | static inline void gve_clear_device_rings_ok(struct gve_priv *priv) |
934 | { |
935 | clear_bit(nr: GVE_PRIV_FLAGS_DEVICE_RINGS_OK, addr: &priv->state_flags); |
936 | } |
937 | |
938 | static inline bool gve_get_napi_enabled(struct gve_priv *priv) |
939 | { |
940 | return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); |
941 | } |
942 | |
943 | static inline void gve_set_napi_enabled(struct gve_priv *priv) |
944 | { |
945 | set_bit(nr: GVE_PRIV_FLAGS_NAPI_ENABLED, addr: &priv->state_flags); |
946 | } |
947 | |
948 | static inline void gve_clear_napi_enabled(struct gve_priv *priv) |
949 | { |
950 | clear_bit(nr: GVE_PRIV_FLAGS_NAPI_ENABLED, addr: &priv->state_flags); |
951 | } |
952 | |
953 | static inline bool gve_get_report_stats(struct gve_priv *priv) |
954 | { |
955 | return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); |
956 | } |
957 | |
958 | static inline void gve_clear_report_stats(struct gve_priv *priv) |
959 | { |
960 | clear_bit(nr: GVE_PRIV_FLAGS_REPORT_STATS, addr: &priv->ethtool_flags); |
961 | } |
962 | |
963 | /* Returns the address of the ntfy_blocks irq doorbell |
964 | */ |
965 | static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, |
966 | struct gve_notify_block *block) |
967 | { |
968 | return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; |
969 | } |
970 | |
971 | /* Returns the index into ntfy_blocks of the given tx ring's block |
972 | */ |
973 | static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) |
974 | { |
975 | return queue_idx; |
976 | } |
977 | |
978 | /* Returns the index into ntfy_blocks of the given rx ring's block |
979 | */ |
980 | static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) |
981 | { |
982 | return (priv->num_ntfy_blks / 2) + queue_idx; |
983 | } |
984 | |
985 | static inline bool gve_is_qpl(struct gve_priv *priv) |
986 | { |
987 | return priv->queue_format == GVE_GQI_QPL_FORMAT || |
988 | priv->queue_format == GVE_DQO_QPL_FORMAT; |
989 | } |
990 | |
991 | /* Returns the number of tx queue page lists */ |
992 | static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg, |
993 | int num_xdp_queues, |
994 | bool is_qpl) |
995 | { |
996 | if (!is_qpl) |
997 | return 0; |
998 | return tx_cfg->num_queues + num_xdp_queues; |
999 | } |
1000 | |
1001 | /* Returns the number of XDP tx queue page lists |
1002 | */ |
1003 | static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) |
1004 | { |
1005 | if (priv->queue_format != GVE_GQI_QPL_FORMAT) |
1006 | return 0; |
1007 | |
1008 | return priv->num_xdp_queues; |
1009 | } |
1010 | |
1011 | /* Returns the number of rx queue page lists */ |
1012 | static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg, |
1013 | bool is_qpl) |
1014 | { |
1015 | if (!is_qpl) |
1016 | return 0; |
1017 | return rx_cfg->num_queues; |
1018 | } |
1019 | |
1020 | static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid) |
1021 | { |
1022 | return tx_qid; |
1023 | } |
1024 | |
1025 | static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) |
1026 | { |
1027 | return priv->tx_cfg.max_queues + rx_qid; |
1028 | } |
1029 | |
1030 | /* Returns the index into priv->qpls where a certain rx queue's QPL resides */ |
1031 | static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) |
1032 | { |
1033 | return tx_cfg->max_queues + rx_qid; |
1034 | } |
1035 | |
1036 | static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) |
1037 | { |
1038 | return gve_tx_qpl_id(priv, tx_qid: 0); |
1039 | } |
1040 | |
1041 | /* Returns the index into priv->qpls where the first rx queue's QPL resides */ |
1042 | static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) |
1043 | { |
1044 | return gve_get_rx_qpl_id(tx_cfg, rx_qid: 0); |
1045 | } |
1046 | |
1047 | /* Returns a pointer to the next available tx qpl in the list of qpls */ |
1048 | static inline |
1049 | struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg, |
1050 | int tx_qid) |
1051 | { |
1052 | /* QPL already in use */ |
1053 | if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map)) |
1054 | return NULL; |
1055 | set_bit(nr: tx_qid, addr: cfg->qpl_cfg->qpl_id_map); |
1056 | return &cfg->qpls[tx_qid]; |
1057 | } |
1058 | |
1059 | /* Returns a pointer to the next available rx qpl in the list of qpls */ |
1060 | static inline |
1061 | struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg, |
1062 | int rx_qid) |
1063 | { |
1064 | int id = gve_get_rx_qpl_id(tx_cfg: cfg->qcfg_tx, rx_qid); |
1065 | /* QPL already in use */ |
1066 | if (test_bit(id, cfg->qpl_cfg->qpl_id_map)) |
1067 | return NULL; |
1068 | set_bit(nr: id, addr: cfg->qpl_cfg->qpl_id_map); |
1069 | return &cfg->qpls[id]; |
1070 | } |
1071 | |
1072 | /* Unassigns the qpl with the given id */ |
1073 | static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id) |
1074 | { |
1075 | clear_bit(nr: id, addr: qpl_cfg->qpl_id_map); |
1076 | } |
1077 | |
1078 | /* Returns the correct dma direction for tx and rx qpls */ |
1079 | static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, |
1080 | int id) |
1081 | { |
1082 | if (id < gve_rx_start_qpl_id(tx_cfg: &priv->tx_cfg)) |
1083 | return DMA_TO_DEVICE; |
1084 | else |
1085 | return DMA_FROM_DEVICE; |
1086 | } |
1087 | |
1088 | static inline bool gve_is_gqi(struct gve_priv *priv) |
1089 | { |
1090 | return priv->queue_format == GVE_GQI_RDA_FORMAT || |
1091 | priv->queue_format == GVE_GQI_QPL_FORMAT; |
1092 | } |
1093 | |
1094 | static inline u32 gve_num_tx_queues(struct gve_priv *priv) |
1095 | { |
1096 | return priv->tx_cfg.num_queues + priv->num_xdp_queues; |
1097 | } |
1098 | |
1099 | static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) |
1100 | { |
1101 | return priv->tx_cfg.num_queues + queue_id; |
1102 | } |
1103 | |
1104 | static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) |
1105 | { |
1106 | return gve_xdp_tx_queue_id(priv, queue_id: 0); |
1107 | } |
1108 | |
1109 | /* gqi napi handler defined in gve_main.c */ |
1110 | int gve_napi_poll(struct napi_struct *napi, int budget); |
1111 | |
1112 | /* buffers */ |
1113 | int gve_alloc_page(struct gve_priv *priv, struct device *dev, |
1114 | struct page **page, dma_addr_t *dma, |
1115 | enum dma_data_direction, gfp_t gfp_flags); |
1116 | void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, |
1117 | enum dma_data_direction); |
1118 | /* tx handling */ |
1119 | netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); |
1120 | int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
1121 | u32 flags); |
1122 | int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, |
1123 | void *data, int len, void *frame_p); |
1124 | void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); |
1125 | bool gve_tx_poll(struct gve_notify_block *block, int budget); |
1126 | bool gve_xdp_poll(struct gve_notify_block *block, int budget); |
1127 | int gve_tx_alloc_rings_gqi(struct gve_priv *priv, |
1128 | struct gve_tx_alloc_rings_cfg *cfg); |
1129 | void gve_tx_free_rings_gqi(struct gve_priv *priv, |
1130 | struct gve_tx_alloc_rings_cfg *cfg); |
1131 | void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx); |
1132 | void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx); |
1133 | u32 gve_tx_load_event_counter(struct gve_priv *priv, |
1134 | struct gve_tx_ring *tx); |
1135 | bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); |
1136 | /* rx handling */ |
1137 | void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); |
1138 | int gve_rx_poll(struct gve_notify_block *block, int budget); |
1139 | bool gve_rx_work_pending(struct gve_rx_ring *rx); |
1140 | int gve_rx_alloc_rings(struct gve_priv *priv); |
1141 | int gve_rx_alloc_rings_gqi(struct gve_priv *priv, |
1142 | struct gve_rx_alloc_rings_cfg *cfg); |
1143 | void gve_rx_free_rings_gqi(struct gve_priv *priv, |
1144 | struct gve_rx_alloc_rings_cfg *cfg); |
1145 | void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx); |
1146 | void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); |
1147 | u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); |
1148 | bool gve_header_split_supported(const struct gve_priv *priv); |
1149 | int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); |
1150 | /* Reset */ |
1151 | void gve_schedule_reset(struct gve_priv *priv); |
1152 | int gve_reset(struct gve_priv *priv, bool attempt_teardown); |
1153 | int gve_adjust_queues(struct gve_priv *priv, |
1154 | struct gve_queue_config new_rx_config, |
1155 | struct gve_queue_config new_tx_config); |
1156 | /* report stats handling */ |
1157 | void gve_handle_report_stats(struct gve_priv *priv); |
1158 | /* exported by ethtool.c */ |
1159 | extern const struct ethtool_ops gve_ethtool_ops; |
1160 | /* needed by ethtool */ |
1161 | extern char gve_driver_name[]; |
1162 | extern const char gve_version_str[]; |
1163 | #endif /* _GVE_H_ */ |
1164 |
Definitions
- gve_rx_desc_queue
- gve_rx_slot_page_info
- gve_queue_page_list
- gve_rx_data_queue
- gve_rx_buf_queue_dqo
- gve_rx_compl_queue_dqo
- gve_header_buf
- gve_rx_buf_state_dqo
- gve_index_list
- gve_rx_ctx
- gve_rx_cnts
- gve_rx_ring
- gve_tx_desc
- gve_tx_iovec
- gve_tx_buffer_state
- gve_tx_fifo
- gve_tx_desc_dqo
- gve_packet_state
- gve_tx_pending_packet_dqo
- gve_tx_ring
- gve_notify_block
- gve_queue_config
- gve_qpl_config
- gve_options_dqo_rda
- gve_irq_db
- gve_ptype
- gve_ptype_lut
- gve_qpls_alloc_cfg
- gve_tx_alloc_rings_cfg
- gve_rx_alloc_rings_cfg
- gve_queue_format
- gve_priv
- gve_service_task_flags_bit
- gve_state_flags_bit
- gve_ethtool_flags_bit
- gve_get_do_reset
- gve_set_do_reset
- gve_clear_do_reset
- gve_get_reset_in_progress
- gve_set_reset_in_progress
- gve_clear_reset_in_progress
- gve_get_probe_in_progress
- gve_set_probe_in_progress
- gve_clear_probe_in_progress
- gve_get_do_report_stats
- gve_set_do_report_stats
- gve_clear_do_report_stats
- gve_get_admin_queue_ok
- gve_set_admin_queue_ok
- gve_clear_admin_queue_ok
- gve_get_device_resources_ok
- gve_set_device_resources_ok
- gve_clear_device_resources_ok
- gve_get_device_rings_ok
- gve_set_device_rings_ok
- gve_clear_device_rings_ok
- gve_get_napi_enabled
- gve_set_napi_enabled
- gve_clear_napi_enabled
- gve_get_report_stats
- gve_clear_report_stats
- gve_irq_doorbell
- gve_tx_idx_to_ntfy
- gve_rx_idx_to_ntfy
- gve_is_qpl
- gve_num_tx_qpls
- gve_num_xdp_qpls
- gve_num_rx_qpls
- gve_tx_qpl_id
- gve_rx_qpl_id
- gve_get_rx_qpl_id
- gve_tx_start_qpl_id
- gve_rx_start_qpl_id
- gve_assign_tx_qpl
- gve_assign_rx_qpl
- gve_unassign_qpl
- gve_qpl_dma_dir
- gve_is_gqi
- gve_num_tx_queues
- gve_xdp_tx_queue_id
Improve your Profiling and Debugging skills
Find out more