1 | /* SPDX-License-Identifier: (GPL-2.0 OR MIT) |
2 | * Google virtual Ethernet (gve) driver |
3 | * |
4 | * Copyright (C) 2015-2021 Google, Inc. |
5 | */ |
6 | |
7 | #ifndef _GVE_ADMINQ_H |
8 | #define _GVE_ADMINQ_H |
9 | |
10 | #include <linux/build_bug.h> |
11 | |
12 | /* Admin queue opcodes */ |
13 | enum gve_adminq_opcodes { |
14 | GVE_ADMINQ_DESCRIBE_DEVICE = 0x1, |
15 | GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES = 0x2, |
16 | GVE_ADMINQ_REGISTER_PAGE_LIST = 0x3, |
17 | GVE_ADMINQ_UNREGISTER_PAGE_LIST = 0x4, |
18 | GVE_ADMINQ_CREATE_TX_QUEUE = 0x5, |
19 | GVE_ADMINQ_CREATE_RX_QUEUE = 0x6, |
20 | GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7, |
21 | GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8, |
22 | GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9, |
23 | GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB, |
24 | GVE_ADMINQ_REPORT_STATS = 0xC, |
25 | GVE_ADMINQ_REPORT_LINK_SPEED = 0xD, |
26 | GVE_ADMINQ_GET_PTYPE_MAP = 0xE, |
27 | GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, |
28 | }; |
29 | |
30 | /* Admin queue status codes */ |
31 | enum gve_adminq_statuses { |
32 | GVE_ADMINQ_COMMAND_UNSET = 0x0, |
33 | GVE_ADMINQ_COMMAND_PASSED = 0x1, |
34 | GVE_ADMINQ_COMMAND_ERROR_ABORTED = 0xFFFFFFF0, |
35 | GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS = 0xFFFFFFF1, |
36 | GVE_ADMINQ_COMMAND_ERROR_CANCELLED = 0xFFFFFFF2, |
37 | GVE_ADMINQ_COMMAND_ERROR_DATALOSS = 0xFFFFFFF3, |
38 | GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED = 0xFFFFFFF4, |
39 | GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION = 0xFFFFFFF5, |
40 | GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR = 0xFFFFFFF6, |
41 | GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT = 0xFFFFFFF7, |
42 | GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND = 0xFFFFFFF8, |
43 | GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE = 0xFFFFFFF9, |
44 | GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED = 0xFFFFFFFA, |
45 | GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED = 0xFFFFFFFB, |
46 | GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED = 0xFFFFFFFC, |
47 | GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE = 0xFFFFFFFD, |
48 | GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED = 0xFFFFFFFE, |
49 | GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR = 0xFFFFFFFF, |
50 | }; |
51 | |
52 | #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1 |
53 | |
54 | /* All AdminQ command structs should be naturally packed. The static_assert |
55 | * calls make sure this is the case at compile time. |
56 | */ |
57 | |
58 | struct gve_adminq_describe_device { |
59 | __be64 device_descriptor_addr; |
60 | __be32 device_descriptor_version; |
61 | __be32 available_length; |
62 | }; |
63 | |
64 | static_assert(sizeof(struct gve_adminq_describe_device) == 16); |
65 | |
66 | struct gve_device_descriptor { |
67 | __be64 max_registered_pages; |
68 | __be16 reserved1; |
69 | __be16 tx_queue_entries; |
70 | __be16 rx_queue_entries; |
71 | __be16 default_num_queues; |
72 | __be16 mtu; |
73 | __be16 counters; |
74 | __be16 tx_pages_per_qpl; |
75 | __be16 rx_pages_per_qpl; |
76 | u8 mac[ETH_ALEN]; |
77 | __be16 num_device_options; |
78 | __be16 total_length; |
79 | u8 reserved2[6]; |
80 | }; |
81 | |
82 | static_assert(sizeof(struct gve_device_descriptor) == 40); |
83 | |
84 | struct gve_device_option { |
85 | __be16 option_id; |
86 | __be16 option_length; |
87 | __be32 required_features_mask; |
88 | }; |
89 | |
90 | static_assert(sizeof(struct gve_device_option) == 8); |
91 | |
92 | struct gve_device_option_gqi_rda { |
93 | __be32 supported_features_mask; |
94 | }; |
95 | |
96 | static_assert(sizeof(struct gve_device_option_gqi_rda) == 4); |
97 | |
98 | struct gve_device_option_gqi_qpl { |
99 | __be32 supported_features_mask; |
100 | }; |
101 | |
102 | static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4); |
103 | |
104 | struct gve_device_option_dqo_rda { |
105 | __be32 supported_features_mask; |
106 | __be16 tx_comp_ring_entries; |
107 | __be16 rx_buff_ring_entries; |
108 | }; |
109 | |
110 | static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); |
111 | |
112 | struct gve_device_option_dqo_qpl { |
113 | __be32 supported_features_mask; |
114 | __be16 tx_pages_per_qpl; |
115 | __be16 rx_pages_per_qpl; |
116 | }; |
117 | |
118 | static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8); |
119 | |
120 | struct gve_device_option_jumbo_frames { |
121 | __be32 supported_features_mask; |
122 | __be16 max_mtu; |
123 | u8 padding[2]; |
124 | }; |
125 | |
126 | static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8); |
127 | |
128 | struct gve_device_option_buffer_sizes { |
129 | /* GVE_SUP_BUFFER_SIZES_MASK bit should be set */ |
130 | __be32 supported_features_mask; |
131 | __be16 packet_buffer_size; |
132 | __be16 ; |
133 | }; |
134 | |
135 | static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8); |
136 | |
137 | /* Terminology: |
138 | * |
139 | * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA |
140 | * mapped and read/updated by the device. |
141 | * |
142 | * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with |
143 | * the device for read/write and data is copied from/to SKBs. |
144 | */ |
145 | enum gve_dev_opt_id { |
146 | GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1, |
147 | GVE_DEV_OPT_ID_GQI_RDA = 0x2, |
148 | GVE_DEV_OPT_ID_GQI_QPL = 0x3, |
149 | GVE_DEV_OPT_ID_DQO_RDA = 0x4, |
150 | GVE_DEV_OPT_ID_DQO_QPL = 0x7, |
151 | GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, |
152 | GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, |
153 | }; |
154 | |
155 | enum gve_dev_opt_req_feat_mask { |
156 | GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0, |
157 | GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0, |
158 | GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, |
159 | GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, |
160 | GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, |
161 | GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, |
162 | GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, |
163 | }; |
164 | |
165 | enum gve_sup_feature_mask { |
166 | GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, |
167 | GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, |
168 | }; |
169 | |
170 | #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 |
171 | |
172 | #define GVE_VERSION_STR_LEN 128 |
173 | |
174 | enum gve_driver_capbility { |
175 | gve_driver_capability_gqi_qpl = 0, |
176 | gve_driver_capability_gqi_rda = 1, |
177 | gve_driver_capability_dqo_qpl = 2, /* reserved for future use */ |
178 | gve_driver_capability_dqo_rda = 3, |
179 | gve_driver_capability_alt_miss_compl = 4, |
180 | gve_driver_capability_flexible_buffer_size = 5, |
181 | }; |
182 | |
183 | #define GVE_CAP1(a) BIT((int)a) |
184 | #define GVE_CAP2(a) BIT(((int)a) - 64) |
185 | #define GVE_CAP3(a) BIT(((int)a) - 128) |
186 | #define GVE_CAP4(a) BIT(((int)a) - 192) |
187 | |
188 | #define GVE_DRIVER_CAPABILITY_FLAGS1 \ |
189 | (GVE_CAP1(gve_driver_capability_gqi_qpl) | \ |
190 | GVE_CAP1(gve_driver_capability_gqi_rda) | \ |
191 | GVE_CAP1(gve_driver_capability_dqo_rda) | \ |
192 | GVE_CAP1(gve_driver_capability_alt_miss_compl) | \ |
193 | GVE_CAP1(gve_driver_capability_flexible_buffer_size)) |
194 | |
195 | #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0 |
196 | #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 |
197 | #define GVE_DRIVER_CAPABILITY_FLAGS4 0x0 |
198 | |
199 | struct gve_driver_info { |
200 | u8 os_type; /* 0x01 = Linux */ |
201 | u8 driver_major; |
202 | u8 driver_minor; |
203 | u8 driver_sub; |
204 | __be32 os_version_major; |
205 | __be32 os_version_minor; |
206 | __be32 os_version_sub; |
207 | __be64 driver_capability_flags[4]; |
208 | u8 os_version_str1[GVE_VERSION_STR_LEN]; |
209 | u8 os_version_str2[GVE_VERSION_STR_LEN]; |
210 | }; |
211 | |
212 | struct gve_adminq_verify_driver_compatibility { |
213 | __be64 driver_info_len; |
214 | __be64 driver_info_addr; |
215 | }; |
216 | |
217 | static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16); |
218 | |
219 | struct gve_adminq_configure_device_resources { |
220 | __be64 counter_array; |
221 | __be64 irq_db_addr; |
222 | __be32 num_counters; |
223 | __be32 num_irq_dbs; |
224 | __be32 irq_db_stride; |
225 | __be32 ntfy_blk_msix_base_idx; |
226 | u8 queue_format; |
227 | u8 padding[7]; |
228 | }; |
229 | |
230 | static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40); |
231 | |
232 | struct gve_adminq_register_page_list { |
233 | __be32 page_list_id; |
234 | __be32 num_pages; |
235 | __be64 page_address_list_addr; |
236 | __be64 page_size; |
237 | }; |
238 | |
239 | static_assert(sizeof(struct gve_adminq_register_page_list) == 24); |
240 | |
241 | struct gve_adminq_unregister_page_list { |
242 | __be32 page_list_id; |
243 | }; |
244 | |
245 | static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4); |
246 | |
247 | #define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF |
248 | |
249 | struct gve_adminq_create_tx_queue { |
250 | __be32 queue_id; |
251 | __be32 reserved; |
252 | __be64 queue_resources_addr; |
253 | __be64 tx_ring_addr; |
254 | __be32 queue_page_list_id; |
255 | __be32 ntfy_id; |
256 | __be64 tx_comp_ring_addr; |
257 | __be16 tx_ring_size; |
258 | __be16 tx_comp_ring_size; |
259 | u8 padding[4]; |
260 | }; |
261 | |
262 | static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48); |
263 | |
264 | struct gve_adminq_create_rx_queue { |
265 | __be32 queue_id; |
266 | __be32 index; |
267 | __be32 reserved; |
268 | __be32 ntfy_id; |
269 | __be64 queue_resources_addr; |
270 | __be64 rx_desc_ring_addr; |
271 | __be64 rx_data_ring_addr; |
272 | __be32 queue_page_list_id; |
273 | __be16 rx_ring_size; |
274 | __be16 packet_buffer_size; |
275 | __be16 rx_buff_ring_size; |
276 | u8 enable_rsc; |
277 | u8 padding1; |
278 | __be16 ; |
279 | u8 padding2[2]; |
280 | }; |
281 | |
282 | static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56); |
283 | |
284 | /* Queue resources that are shared with the device */ |
285 | struct gve_queue_resources { |
286 | union { |
287 | struct { |
288 | __be32 db_index; /* Device -> Guest */ |
289 | __be32 counter_index; /* Device -> Guest */ |
290 | }; |
291 | u8 reserved[64]; |
292 | }; |
293 | }; |
294 | |
295 | static_assert(sizeof(struct gve_queue_resources) == 64); |
296 | |
297 | struct gve_adminq_destroy_tx_queue { |
298 | __be32 queue_id; |
299 | }; |
300 | |
301 | static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4); |
302 | |
303 | struct gve_adminq_destroy_rx_queue { |
304 | __be32 queue_id; |
305 | }; |
306 | |
307 | static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4); |
308 | |
309 | /* GVE Set Driver Parameter Types */ |
310 | enum gve_set_driver_param_types { |
311 | GVE_SET_PARAM_MTU = 0x1, |
312 | }; |
313 | |
314 | struct gve_adminq_set_driver_parameter { |
315 | __be32 parameter_type; |
316 | u8 reserved[4]; |
317 | __be64 parameter_value; |
318 | }; |
319 | |
320 | static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16); |
321 | |
322 | struct gve_adminq_report_stats { |
323 | __be64 stats_report_len; |
324 | __be64 stats_report_addr; |
325 | __be64 interval; |
326 | }; |
327 | |
328 | static_assert(sizeof(struct gve_adminq_report_stats) == 24); |
329 | |
330 | struct gve_adminq_report_link_speed { |
331 | __be64 link_speed_address; |
332 | }; |
333 | |
334 | static_assert(sizeof(struct gve_adminq_report_link_speed) == 8); |
335 | |
336 | struct stats { |
337 | __be32 stat_name; |
338 | __be32 queue_id; |
339 | __be64 value; |
340 | }; |
341 | |
342 | static_assert(sizeof(struct stats) == 16); |
343 | |
344 | struct gve_stats_report { |
345 | __be64 written_count; |
346 | struct stats stats[]; |
347 | }; |
348 | |
349 | static_assert(sizeof(struct gve_stats_report) == 8); |
350 | |
351 | enum gve_stat_names { |
352 | // stats from gve |
353 | TX_WAKE_CNT = 1, |
354 | TX_STOP_CNT = 2, |
355 | TX_FRAMES_SENT = 3, |
356 | TX_BYTES_SENT = 4, |
357 | TX_LAST_COMPLETION_PROCESSED = 5, |
358 | RX_NEXT_EXPECTED_SEQUENCE = 6, |
359 | RX_BUFFERS_POSTED = 7, |
360 | TX_TIMEOUT_CNT = 8, |
361 | // stats from NIC |
362 | RX_QUEUE_DROP_CNT = 65, |
363 | RX_NO_BUFFERS_POSTED = 66, |
364 | RX_DROPS_PACKET_OVER_MRU = 67, |
365 | RX_DROPS_INVALID_CHECKSUM = 68, |
366 | }; |
367 | |
368 | enum gve_l3_type { |
369 | /* Must be zero so zero initialized LUT is unknown. */ |
370 | GVE_L3_TYPE_UNKNOWN = 0, |
371 | GVE_L3_TYPE_OTHER, |
372 | GVE_L3_TYPE_IPV4, |
373 | GVE_L3_TYPE_IPV6, |
374 | }; |
375 | |
376 | enum gve_l4_type { |
377 | /* Must be zero so zero initialized LUT is unknown. */ |
378 | GVE_L4_TYPE_UNKNOWN = 0, |
379 | GVE_L4_TYPE_OTHER, |
380 | GVE_L4_TYPE_TCP, |
381 | GVE_L4_TYPE_UDP, |
382 | GVE_L4_TYPE_ICMP, |
383 | GVE_L4_TYPE_SCTP, |
384 | }; |
385 | |
386 | /* These are control path types for PTYPE which are the same as the data path |
387 | * types. |
388 | */ |
389 | struct gve_ptype_entry { |
390 | u8 l3_type; |
391 | u8 l4_type; |
392 | }; |
393 | |
394 | struct gve_ptype_map { |
395 | struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */ |
396 | }; |
397 | |
398 | struct gve_adminq_get_ptype_map { |
399 | __be64 ptype_map_len; |
400 | __be64 ptype_map_addr; |
401 | }; |
402 | |
403 | union gve_adminq_command { |
404 | struct { |
405 | __be32 opcode; |
406 | __be32 status; |
407 | union { |
408 | struct gve_adminq_configure_device_resources |
409 | configure_device_resources; |
410 | struct gve_adminq_create_tx_queue create_tx_queue; |
411 | struct gve_adminq_create_rx_queue create_rx_queue; |
412 | struct gve_adminq_destroy_tx_queue destroy_tx_queue; |
413 | struct gve_adminq_destroy_rx_queue destroy_rx_queue; |
414 | struct gve_adminq_describe_device describe_device; |
415 | struct gve_adminq_register_page_list reg_page_list; |
416 | struct gve_adminq_unregister_page_list unreg_page_list; |
417 | struct gve_adminq_set_driver_parameter set_driver_param; |
418 | struct gve_adminq_report_stats report_stats; |
419 | struct gve_adminq_report_link_speed report_link_speed; |
420 | struct gve_adminq_get_ptype_map get_ptype_map; |
421 | struct gve_adminq_verify_driver_compatibility |
422 | verify_driver_compatibility; |
423 | }; |
424 | }; |
425 | u8 reserved[64]; |
426 | }; |
427 | |
428 | static_assert(sizeof(union gve_adminq_command) == 64); |
429 | |
430 | int gve_adminq_alloc(struct device *dev, struct gve_priv *priv); |
431 | void gve_adminq_free(struct device *dev, struct gve_priv *priv); |
432 | void gve_adminq_release(struct gve_priv *priv); |
433 | int gve_adminq_describe_device(struct gve_priv *priv); |
434 | int gve_adminq_configure_device_resources(struct gve_priv *priv, |
435 | dma_addr_t counter_array_bus_addr, |
436 | u32 num_counters, |
437 | dma_addr_t db_array_bus_addr, |
438 | u32 num_ntfy_blks); |
439 | int gve_adminq_deconfigure_device_resources(struct gve_priv *priv); |
440 | int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); |
441 | int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); |
442 | int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues); |
443 | int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); |
444 | int gve_adminq_register_page_list(struct gve_priv *priv, |
445 | struct gve_queue_page_list *qpl); |
446 | int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id); |
447 | int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu); |
448 | int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, |
449 | dma_addr_t stats_report_addr, u64 interval); |
450 | int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, |
451 | u64 driver_info_len, |
452 | dma_addr_t driver_info_addr); |
453 | int gve_adminq_report_link_speed(struct gve_priv *priv); |
454 | |
455 | struct gve_ptype_lut; |
456 | int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, |
457 | struct gve_ptype_lut *ptype_lut); |
458 | |
459 | #endif /* _GVE_ADMINQ_H */ |
460 | |