1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7/* GVE DQO Descriptor formats */
8
9#ifndef _GVE_DESC_DQO_H_
10#define _GVE_DESC_DQO_H_
11
12#include <linux/build_bug.h>
13
14#define GVE_TX_MAX_HDR_SIZE_DQO 255
15#define GVE_TX_MIN_TSO_MSS_DQO 88
16
17#ifndef __LITTLE_ENDIAN_BITFIELD
18#error "Only little endian supported"
19#endif
20
21/* Basic TX descriptor (DTYPE 0x0C) */
22struct gve_tx_pkt_desc_dqo {
23 __le64 buf_addr;
24
25 /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */
26 u8 dtype: 5;
27
28 /* Denotes the last descriptor of a packet. */
29 u8 end_of_packet: 1;
30 u8 checksum_offload_enable: 1;
31
32 /* If set, will generate a descriptor completion for this descriptor. */
33 u8 report_event: 1;
34 u8 reserved0;
35 __le16 reserved1;
36
37 /* The TX completion associated with this packet will contain this tag.
38 */
39 __le16 compl_tag;
40 u16 buf_size: 14;
41 u16 reserved2: 2;
42} __packed;
43static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16);
44
45#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc
46#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)
47
48/* Maximum number of data descriptors allowed per packet, or per-TSO segment. */
49#define GVE_TX_MAX_DATA_DESCS 10
50
51/* Min gap between tail and head to avoid cacheline overlap */
52#define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4
53
54/* "report_event" on TX packet descriptors may only be reported on the last
55 * descriptor of a TX packet, and they must be spaced apart with at least this
56 * value.
57 */
58#define GVE_TX_MIN_RE_INTERVAL 32
59
60struct gve_tx_context_cmd_dtype {
61 u8 dtype: 5;
62 u8 tso: 1;
63 u8 reserved1: 2;
64
65 u8 reserved2;
66};
67
68static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2);
69
70/* TX Native TSO Context DTYPE (0x05)
71 *
72 * "flex" fields allow the driver to send additional packet context to HW.
73 */
74struct gve_tx_tso_context_desc_dqo {
75 /* The L4 payload bytes that should be segmented. */
76 u32 tso_total_len: 24;
77 u32 flex10: 8;
78
79 /* Max segment size in TSO excluding headers. */
80 u16 mss: 14;
81 u16 reserved: 2;
82
83 u8 header_len; /* Header length to use for TSO offload */
84 u8 flex11;
85 struct gve_tx_context_cmd_dtype cmd_dtype;
86 u8 flex0;
87 u8 flex5;
88 u8 flex6;
89 u8 flex7;
90 u8 flex8;
91 u8 flex9;
92} __packed;
93static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16);
94
95#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5
96
97/* General context descriptor for sending metadata. */
98struct gve_tx_general_context_desc_dqo {
99 u8 flex4;
100 u8 flex5;
101 u8 flex6;
102 u8 flex7;
103 u8 flex8;
104 u8 flex9;
105 u8 flex10;
106 u8 flex11;
107 struct gve_tx_context_cmd_dtype cmd_dtype;
108 u16 reserved;
109 u8 flex0;
110 u8 flex1;
111 u8 flex2;
112 u8 flex3;
113} __packed;
114static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16);
115
116#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4
117
118/* Logical structure of metadata which is packed into context descriptor flex
119 * fields.
120 */
121struct gve_tx_metadata_dqo {
122 union {
123 struct {
124 u8 version;
125
126 /* If `skb->l4_hash` is set, this value should be
127 * derived from `skb->hash`.
128 *
129 * A zero value means no l4_hash was associated with the
130 * skb.
131 */
132 u16 path_hash: 15;
133
134 /* Should be set to 1 if the flow associated with the
135 * skb had a rehash from the TCP stack.
136 */
137 u16 rehash_event: 1;
138 } __packed;
139 u8 bytes[12];
140 };
141} __packed;
142static_assert(sizeof(struct gve_tx_metadata_dqo) == 12);
143
144#define GVE_TX_METADATA_VERSION_DQO 0
145
146/* TX completion descriptor */
147struct gve_tx_compl_desc {
148 /* For types 0-4 this is the TX queue ID associated with this
149 * completion.
150 */
151 u16 id: 11;
152
153 /* See: GVE_COMPL_TYPE_DQO* */
154 u16 type: 3;
155 u16 reserved0: 1;
156
157 /* Flipped by HW to notify the descriptor is populated. */
158 u16 generation: 1;
159 union {
160 /* For descriptor completions, this is the last index fetched
161 * by HW + 1.
162 */
163 __le16 tx_head;
164
165 /* For packet completions, this is the completion tag set on the
166 * TX packet descriptors.
167 */
168 __le16 completion_tag;
169 };
170 __le32 reserved1;
171} __packed;
172static_assert(sizeof(struct gve_tx_compl_desc) == 8);
173
174#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */
175#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */
176#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */
177#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */
178
179/* The most significant bit in the completion tag can change the completion
180 * type from packet completion to miss path completion.
181 */
182#define GVE_ALT_MISS_COMPL_BIT BIT(15)
183
184/* Descriptor to post buffers to HW on buffer queue. */
185struct gve_rx_desc_dqo {
186 __le16 buf_id; /* ID returned in Rx completion descriptor */
187 __le16 reserved0;
188 __le32 reserved1;
189 __le64 buf_addr; /* DMA address of the buffer */
190 __le64 header_buf_addr;
191 __le64 reserved2;
192} __packed;
193static_assert(sizeof(struct gve_rx_desc_dqo) == 32);
194
195/* Descriptor for HW to notify SW of new packets received on RX queue. */
196struct gve_rx_compl_desc_dqo {
197 /* Must be 1 */
198 u8 rxdid: 4;
199 u8 reserved0: 4;
200
201 /* Packet originated from this system rather than the network. */
202 u8 loopback: 1;
203 /* Set when IPv6 packet contains a destination options header or routing
204 * header.
205 */
206 u8 ipv6_ex_add: 1;
207 /* Invalid packet was received. */
208 u8 rx_error: 1;
209 u8 reserved1: 5;
210
211 u16 packet_type: 10;
212 u16 ip_hdr_err: 1;
213 u16 udp_len_err: 1;
214 u16 raw_cs_invalid: 1;
215 u16 reserved2: 3;
216
217 u16 packet_len: 14;
218 /* Flipped by HW to notify the descriptor is populated. */
219 u16 generation: 1;
220 /* Should be zero. */
221 u16 buffer_queue_id: 1;
222
223 u16 header_len: 10;
224 u16 rsc: 1;
225 u16 split_header: 1;
226 u16 reserved3: 4;
227
228 u8 descriptor_done: 1;
229 u8 end_of_packet: 1;
230 u8 header_buffer_overflow: 1;
231 u8 l3_l4_processed: 1;
232 u8 csum_ip_err: 1;
233 u8 csum_l4_err: 1;
234 u8 csum_external_ip_err: 1;
235 u8 csum_external_udp_err: 1;
236
237 u8 status_error1;
238
239 __le16 reserved5;
240 __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
241
242 union {
243 /* Packet checksum. */
244 __le16 raw_cs;
245 /* Segment length for RSC packets. */
246 __le16 rsc_seg_len;
247 };
248 __le32 hash;
249 __le32 reserved6;
250 __le64 reserved7;
251} __packed;
252
253static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
254
255/* Ringing the doorbell too often can hurt performance.
256 *
257 * HW requires this value to be at least 8.
258 */
259#define GVE_RX_BUF_THRESH_DQO 32
260
261#endif /* _GVE_DESC_DQO_H_ */
262

source code of linux/drivers/net/ethernet/google/gve/gve_desc_dqo.h