1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
2 | /* Copyright (c) 2021, Microsoft Corporation. */ |
3 | |
4 | #ifndef _GDMA_H |
5 | #define _GDMA_H |
6 | |
7 | #include <linux/dma-mapping.h> |
8 | #include <linux/netdevice.h> |
9 | |
10 | #include "shm_channel.h" |
11 | |
12 | #define GDMA_STATUS_MORE_ENTRIES 0x00000105 |
13 | |
14 | /* Structures labeled with "HW DATA" are exchanged with the hardware. All of |
15 | * them are naturally aligned and hence don't need __packed. |
16 | */ |
17 | |
18 | enum gdma_request_type { |
19 | GDMA_VERIFY_VF_DRIVER_VERSION = 1, |
20 | GDMA_QUERY_MAX_RESOURCES = 2, |
21 | GDMA_LIST_DEVICES = 3, |
22 | GDMA_REGISTER_DEVICE = 4, |
23 | GDMA_DEREGISTER_DEVICE = 5, |
24 | GDMA_GENERATE_TEST_EQE = 10, |
25 | GDMA_CREATE_QUEUE = 12, |
26 | GDMA_DISABLE_QUEUE = 13, |
27 | GDMA_ALLOCATE_RESOURCE_RANGE = 22, |
28 | GDMA_DESTROY_RESOURCE_RANGE = 24, |
29 | GDMA_CREATE_DMA_REGION = 25, |
30 | GDMA_DMA_REGION_ADD_PAGES = 26, |
31 | GDMA_DESTROY_DMA_REGION = 27, |
32 | GDMA_CREATE_PD = 29, |
33 | GDMA_DESTROY_PD = 30, |
34 | GDMA_CREATE_MR = 31, |
35 | GDMA_DESTROY_MR = 32, |
36 | GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */ |
37 | }; |
38 | |
39 | #define GDMA_RESOURCE_DOORBELL_PAGE 27 |
40 | |
41 | enum gdma_queue_type { |
42 | GDMA_INVALID_QUEUE, |
43 | GDMA_SQ, |
44 | GDMA_RQ, |
45 | GDMA_CQ, |
46 | GDMA_EQ, |
47 | }; |
48 | |
49 | enum gdma_work_request_flags { |
50 | GDMA_WR_NONE = 0, |
51 | GDMA_WR_OOB_IN_SGL = BIT(0), |
52 | GDMA_WR_PAD_BY_SGE0 = BIT(1), |
53 | }; |
54 | |
55 | enum gdma_eqe_type { |
56 | GDMA_EQE_COMPLETION = 3, |
57 | GDMA_EQE_TEST_EVENT = 64, |
58 | GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, |
59 | GDMA_EQE_HWC_INIT_DATA = 130, |
60 | GDMA_EQE_HWC_INIT_DONE = 131, |
61 | GDMA_EQE_HWC_SOC_RECONFIG = 132, |
62 | GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133, |
63 | }; |
64 | |
65 | enum { |
66 | GDMA_DEVICE_NONE = 0, |
67 | GDMA_DEVICE_HWC = 1, |
68 | GDMA_DEVICE_MANA = 2, |
69 | GDMA_DEVICE_MANA_IB = 3, |
70 | }; |
71 | |
72 | struct gdma_resource { |
73 | /* Protect the bitmap */ |
74 | spinlock_t lock; |
75 | |
76 | /* The bitmap size in bits. */ |
77 | u32 size; |
78 | |
79 | /* The bitmap tracks the resources. */ |
80 | unsigned long *map; |
81 | }; |
82 | |
83 | union gdma_doorbell_entry { |
84 | u64 as_uint64; |
85 | |
86 | struct { |
87 | u64 id : 24; |
88 | u64 reserved : 8; |
89 | u64 tail_ptr : 31; |
90 | u64 arm : 1; |
91 | } cq; |
92 | |
93 | struct { |
94 | u64 id : 24; |
95 | u64 wqe_cnt : 8; |
96 | u64 tail_ptr : 32; |
97 | } rq; |
98 | |
99 | struct { |
100 | u64 id : 24; |
101 | u64 reserved : 8; |
102 | u64 tail_ptr : 32; |
103 | } sq; |
104 | |
105 | struct { |
106 | u64 id : 16; |
107 | u64 reserved : 16; |
108 | u64 tail_ptr : 31; |
109 | u64 arm : 1; |
110 | } eq; |
111 | }; /* HW DATA */ |
112 | |
113 | struct gdma_msg_hdr { |
114 | u32 hdr_type; |
115 | u32 msg_type; |
116 | u16 msg_version; |
117 | u16 hwc_msg_id; |
118 | u32 msg_size; |
119 | }; /* HW DATA */ |
120 | |
121 | struct gdma_dev_id { |
122 | union { |
123 | struct { |
124 | u16 type; |
125 | u16 instance; |
126 | }; |
127 | |
128 | u32 as_uint32; |
129 | }; |
130 | }; /* HW DATA */ |
131 | |
132 | struct gdma_req_hdr { |
133 | struct gdma_msg_hdr req; |
134 | struct gdma_msg_hdr resp; /* The expected response */ |
135 | struct gdma_dev_id dev_id; |
136 | u32 activity_id; |
137 | }; /* HW DATA */ |
138 | |
139 | struct gdma_resp_hdr { |
140 | struct gdma_msg_hdr response; |
141 | struct gdma_dev_id dev_id; |
142 | u32 activity_id; |
143 | u32 status; |
144 | u32 reserved; |
145 | }; /* HW DATA */ |
146 | |
147 | struct gdma_general_req { |
148 | struct gdma_req_hdr hdr; |
149 | }; /* HW DATA */ |
150 | |
151 | #define GDMA_MESSAGE_V1 1 |
152 | #define GDMA_MESSAGE_V2 2 |
153 | #define GDMA_MESSAGE_V3 3 |
154 | |
155 | struct gdma_general_resp { |
156 | struct gdma_resp_hdr hdr; |
157 | }; /* HW DATA */ |
158 | |
159 | #define GDMA_STANDARD_HEADER_TYPE 0 |
160 | |
161 | static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, |
162 | u32 req_size, u32 resp_size) |
163 | { |
164 | hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; |
165 | hdr->req.msg_type = code; |
166 | hdr->req.msg_version = GDMA_MESSAGE_V1; |
167 | hdr->req.msg_size = req_size; |
168 | |
169 | hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; |
170 | hdr->resp.msg_type = code; |
171 | hdr->resp.msg_version = GDMA_MESSAGE_V1; |
172 | hdr->resp.msg_size = resp_size; |
173 | } |
174 | |
175 | /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ |
176 | struct gdma_sge { |
177 | u64 address; |
178 | u32 mem_key; |
179 | u32 size; |
180 | }; /* HW DATA */ |
181 | |
182 | struct gdma_wqe_request { |
183 | struct gdma_sge *sgl; |
184 | u32 num_sge; |
185 | |
186 | u32 inline_oob_size; |
187 | const void *inline_oob_data; |
188 | |
189 | u32 flags; |
190 | u32 client_data_unit; |
191 | }; |
192 | |
193 | enum gdma_page_type { |
194 | GDMA_PAGE_TYPE_4K, |
195 | }; |
196 | |
197 | #define GDMA_INVALID_DMA_REGION 0 |
198 | |
199 | struct gdma_mem_info { |
200 | struct device *dev; |
201 | |
202 | dma_addr_t dma_handle; |
203 | void *virt_addr; |
204 | u64 length; |
205 | |
206 | /* Allocated by the PF driver */ |
207 | u64 dma_region_handle; |
208 | }; |
209 | |
210 | #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 |
211 | |
212 | struct gdma_dev { |
213 | struct gdma_context *gdma_context; |
214 | |
215 | struct gdma_dev_id dev_id; |
216 | |
217 | u32 pdid; |
218 | u32 doorbell; |
219 | u32 gpa_mkey; |
220 | |
221 | /* GDMA driver specific pointer */ |
222 | void *driver_data; |
223 | |
224 | struct auxiliary_device *adev; |
225 | }; |
226 | |
227 | #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE |
228 | |
229 | #define GDMA_CQE_SIZE 64 |
230 | #define GDMA_EQE_SIZE 16 |
231 | #define GDMA_MAX_SQE_SIZE 512 |
232 | #define GDMA_MAX_RQE_SIZE 256 |
233 | |
234 | #define GDMA_COMP_DATA_SIZE 0x3C |
235 | |
236 | #define GDMA_EVENT_DATA_SIZE 0xC |
237 | |
238 | /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ |
239 | #define GDMA_WQE_BU_SIZE 32 |
240 | |
241 | #define INVALID_PDID UINT_MAX |
242 | #define INVALID_DOORBELL UINT_MAX |
243 | #define INVALID_MEM_KEY UINT_MAX |
244 | #define INVALID_QUEUE_ID UINT_MAX |
245 | #define INVALID_PCI_MSIX_INDEX UINT_MAX |
246 | |
247 | struct gdma_comp { |
248 | u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; |
249 | u32 wq_num; |
250 | bool is_sq; |
251 | }; |
252 | |
253 | struct gdma_event { |
254 | u32 details[GDMA_EVENT_DATA_SIZE / 4]; |
255 | u8 type; |
256 | }; |
257 | |
258 | struct gdma_queue; |
259 | |
260 | struct mana_eq { |
261 | struct gdma_queue *eq; |
262 | }; |
263 | |
264 | typedef void gdma_eq_callback(void *context, struct gdma_queue *q, |
265 | struct gdma_event *e); |
266 | |
267 | typedef void gdma_cq_callback(void *context, struct gdma_queue *q); |
268 | |
269 | /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE |
270 | * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the |
271 | * driver increases the 'head' in BUs rather than in bytes, and notifies |
272 | * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track |
273 | * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. |
274 | * |
275 | * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is |
276 | * processed, the driver increases the 'tail' to indicate that WQEs have |
277 | * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. |
278 | * |
279 | * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures |
280 | * that the EQ/CQ is big enough so they can't overflow, and the driver uses |
281 | * the owner bits mechanism to detect if the queue has become empty. |
282 | */ |
283 | struct gdma_queue { |
284 | struct gdma_dev *gdma_dev; |
285 | |
286 | enum gdma_queue_type type; |
287 | u32 id; |
288 | |
289 | struct gdma_mem_info mem_info; |
290 | |
291 | void *queue_mem_ptr; |
292 | u32 queue_size; |
293 | |
294 | bool monitor_avl_buf; |
295 | |
296 | u32 head; |
297 | u32 tail; |
298 | struct list_head entry; |
299 | |
300 | /* Extra fields specific to EQ/CQ. */ |
301 | union { |
302 | struct { |
303 | bool disable_needed; |
304 | |
305 | gdma_eq_callback *callback; |
306 | void *context; |
307 | |
308 | unsigned int msix_index; |
309 | |
310 | u32 log2_throttle_limit; |
311 | } eq; |
312 | |
313 | struct { |
314 | gdma_cq_callback *callback; |
315 | void *context; |
316 | |
317 | struct gdma_queue *parent; /* For CQ/EQ relationship */ |
318 | } cq; |
319 | }; |
320 | }; |
321 | |
322 | struct gdma_queue_spec { |
323 | enum gdma_queue_type type; |
324 | bool monitor_avl_buf; |
325 | unsigned int queue_size; |
326 | |
327 | /* Extra fields specific to EQ/CQ. */ |
328 | union { |
329 | struct { |
330 | gdma_eq_callback *callback; |
331 | void *context; |
332 | |
333 | unsigned long log2_throttle_limit; |
334 | unsigned int msix_index; |
335 | } eq; |
336 | |
337 | struct { |
338 | gdma_cq_callback *callback; |
339 | void *context; |
340 | |
341 | struct gdma_queue *parent_eq; |
342 | |
343 | } cq; |
344 | }; |
345 | }; |
346 | |
347 | #define MANA_IRQ_NAME_SZ 32 |
348 | |
349 | struct gdma_irq_context { |
350 | void (*handler)(void *arg); |
351 | /* Protect the eq_list */ |
352 | spinlock_t lock; |
353 | struct list_head eq_list; |
354 | char name[MANA_IRQ_NAME_SZ]; |
355 | }; |
356 | |
357 | struct gdma_context { |
358 | struct device *dev; |
359 | |
360 | /* Per-vPort max number of queues */ |
361 | unsigned int max_num_queues; |
362 | unsigned int max_num_msix; |
363 | unsigned int num_msix_usable; |
364 | struct gdma_irq_context *irq_contexts; |
365 | |
366 | /* L2 MTU */ |
367 | u16 adapter_mtu; |
368 | |
369 | /* This maps a CQ index to the queue structure. */ |
370 | unsigned int max_num_cqs; |
371 | struct gdma_queue **cq_table; |
372 | |
373 | /* Protect eq_test_event and test_event_eq_id */ |
374 | struct mutex eq_test_event_mutex; |
375 | struct completion eq_test_event; |
376 | u32 test_event_eq_id; |
377 | |
378 | bool is_pf; |
379 | phys_addr_t bar0_pa; |
380 | void __iomem *bar0_va; |
381 | void __iomem *shm_base; |
382 | void __iomem *db_page_base; |
383 | phys_addr_t phys_db_page_base; |
384 | u32 db_page_size; |
385 | int numa_node; |
386 | |
387 | /* Shared memory chanenl (used to bootstrap HWC) */ |
388 | struct shm_channel shm_channel; |
389 | |
390 | /* Hardware communication channel (HWC) */ |
391 | struct gdma_dev hwc; |
392 | |
393 | /* Azure network adapter */ |
394 | struct gdma_dev mana; |
395 | |
396 | /* Azure RDMA adapter */ |
397 | struct gdma_dev mana_ib; |
398 | }; |
399 | |
400 | #define MAX_NUM_GDMA_DEVICES 4 |
401 | |
402 | static inline bool mana_gd_is_mana(struct gdma_dev *gd) |
403 | { |
404 | return gd->dev_id.type == GDMA_DEVICE_MANA; |
405 | } |
406 | |
407 | static inline bool mana_gd_is_hwc(struct gdma_dev *gd) |
408 | { |
409 | return gd->dev_id.type == GDMA_DEVICE_HWC; |
410 | } |
411 | |
412 | u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); |
413 | u32 mana_gd_wq_avail_space(struct gdma_queue *wq); |
414 | |
415 | int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); |
416 | |
417 | int mana_gd_create_hwc_queue(struct gdma_dev *gd, |
418 | const struct gdma_queue_spec *spec, |
419 | struct gdma_queue **queue_ptr); |
420 | |
421 | int mana_gd_create_mana_eq(struct gdma_dev *gd, |
422 | const struct gdma_queue_spec *spec, |
423 | struct gdma_queue **queue_ptr); |
424 | |
425 | int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, |
426 | const struct gdma_queue_spec *spec, |
427 | struct gdma_queue **queue_ptr); |
428 | |
429 | void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); |
430 | |
431 | int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); |
432 | |
433 | void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); |
434 | |
435 | struct gdma_wqe { |
436 | u32 reserved :24; |
437 | u32 last_vbytes :8; |
438 | |
439 | union { |
440 | u32 flags; |
441 | |
442 | struct { |
443 | u32 num_sge :8; |
444 | u32 inline_oob_size_div4:3; |
445 | u32 client_oob_in_sgl :1; |
446 | u32 reserved1 :4; |
447 | u32 client_data_unit :14; |
448 | u32 reserved2 :2; |
449 | }; |
450 | }; |
451 | }; /* HW DATA */ |
452 | |
453 | #define INLINE_OOB_SMALL_SIZE 8 |
454 | #define INLINE_OOB_LARGE_SIZE 24 |
455 | |
456 | #define MAX_TX_WQE_SIZE 512 |
457 | #define MAX_RX_WQE_SIZE 256 |
458 | |
459 | #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ |
460 | sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ |
461 | sizeof(struct gdma_sge)) |
462 | |
463 | #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ |
464 | sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) |
465 | |
466 | struct gdma_cqe { |
467 | u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; |
468 | |
469 | union { |
470 | u32 as_uint32; |
471 | |
472 | struct { |
473 | u32 wq_num : 24; |
474 | u32 is_sq : 1; |
475 | u32 reserved : 4; |
476 | u32 owner_bits : 3; |
477 | }; |
478 | } cqe_info; |
479 | }; /* HW DATA */ |
480 | |
481 | #define GDMA_CQE_OWNER_BITS 3 |
482 | |
483 | #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) |
484 | |
485 | #define SET_ARM_BIT 1 |
486 | |
487 | #define GDMA_EQE_OWNER_BITS 3 |
488 | |
489 | union gdma_eqe_info { |
490 | u32 as_uint32; |
491 | |
492 | struct { |
493 | u32 type : 8; |
494 | u32 reserved1 : 8; |
495 | u32 client_id : 2; |
496 | u32 reserved2 : 11; |
497 | u32 owner_bits : 3; |
498 | }; |
499 | }; /* HW DATA */ |
500 | |
501 | #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) |
502 | #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) |
503 | |
504 | struct gdma_eqe { |
505 | u32 details[GDMA_EVENT_DATA_SIZE / 4]; |
506 | u32 eqe_info; |
507 | }; /* HW DATA */ |
508 | |
509 | #define GDMA_REG_DB_PAGE_OFFSET 8 |
510 | #define GDMA_REG_DB_PAGE_SIZE 0x10 |
511 | #define GDMA_REG_SHM_OFFSET 0x18 |
512 | |
513 | #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 |
514 | #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 |
515 | #define GDMA_PF_REG_SHM_OFF 0x70 |
516 | |
517 | #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 |
518 | |
519 | #define MANA_PF_DEVICE_ID 0x00B9 |
520 | #define MANA_VF_DEVICE_ID 0x00BA |
521 | |
522 | struct gdma_posted_wqe_info { |
523 | u32 wqe_size_in_bu; |
524 | }; |
525 | |
526 | /* GDMA_GENERATE_TEST_EQE */ |
527 | struct gdma_generate_test_event_req { |
528 | struct gdma_req_hdr hdr; |
529 | u32 queue_index; |
530 | }; /* HW DATA */ |
531 | |
532 | /* GDMA_VERIFY_VF_DRIVER_VERSION */ |
533 | enum { |
534 | GDMA_PROTOCOL_V1 = 1, |
535 | GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, |
536 | GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, |
537 | }; |
538 | |
539 | #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) |
540 | |
541 | /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, |
542 | * so the driver is able to reliably support features like busy_poll. |
543 | */ |
544 | #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) |
545 | #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) |
546 | |
547 | #define GDMA_DRV_CAP_FLAGS1 \ |
548 | (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ |
549 | GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ |
550 | GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) |
551 | |
552 | #define GDMA_DRV_CAP_FLAGS2 0 |
553 | |
554 | #define GDMA_DRV_CAP_FLAGS3 0 |
555 | |
556 | #define GDMA_DRV_CAP_FLAGS4 0 |
557 | |
558 | struct gdma_verify_ver_req { |
559 | struct gdma_req_hdr hdr; |
560 | |
561 | /* Mandatory fields required for protocol establishment */ |
562 | u64 protocol_ver_min; |
563 | u64 protocol_ver_max; |
564 | |
565 | /* Gdma Driver Capability Flags */ |
566 | u64 gd_drv_cap_flags1; |
567 | u64 gd_drv_cap_flags2; |
568 | u64 gd_drv_cap_flags3; |
569 | u64 gd_drv_cap_flags4; |
570 | |
571 | /* Advisory fields */ |
572 | u64 drv_ver; |
573 | u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ |
574 | u32 reserved; |
575 | u32 os_ver_major; |
576 | u32 os_ver_minor; |
577 | u32 os_ver_build; |
578 | u32 os_ver_platform; |
579 | u64 reserved_2; |
580 | u8 os_ver_str1[128]; |
581 | u8 os_ver_str2[128]; |
582 | u8 os_ver_str3[128]; |
583 | u8 os_ver_str4[128]; |
584 | }; /* HW DATA */ |
585 | |
586 | struct gdma_verify_ver_resp { |
587 | struct gdma_resp_hdr hdr; |
588 | u64 gdma_protocol_ver; |
589 | u64 pf_cap_flags1; |
590 | u64 pf_cap_flags2; |
591 | u64 pf_cap_flags3; |
592 | u64 pf_cap_flags4; |
593 | }; /* HW DATA */ |
594 | |
595 | /* GDMA_QUERY_MAX_RESOURCES */ |
596 | struct gdma_query_max_resources_resp { |
597 | struct gdma_resp_hdr hdr; |
598 | u32 status; |
599 | u32 max_sq; |
600 | u32 max_rq; |
601 | u32 max_cq; |
602 | u32 max_eq; |
603 | u32 max_db; |
604 | u32 max_mst; |
605 | u32 max_cq_mod_ctx; |
606 | u32 max_mod_cq; |
607 | u32 max_msix; |
608 | }; /* HW DATA */ |
609 | |
610 | /* GDMA_LIST_DEVICES */ |
611 | struct gdma_list_devices_resp { |
612 | struct gdma_resp_hdr hdr; |
613 | u32 num_of_devs; |
614 | u32 reserved; |
615 | struct gdma_dev_id devs[64]; |
616 | }; /* HW DATA */ |
617 | |
618 | /* GDMA_REGISTER_DEVICE */ |
619 | struct gdma_register_device_resp { |
620 | struct gdma_resp_hdr hdr; |
621 | u32 pdid; |
622 | u32 gpa_mkey; |
623 | u32 db_id; |
624 | }; /* HW DATA */ |
625 | |
626 | struct gdma_allocate_resource_range_req { |
627 | struct gdma_req_hdr hdr; |
628 | u32 resource_type; |
629 | u32 num_resources; |
630 | u32 alignment; |
631 | u32 allocated_resources; |
632 | }; |
633 | |
634 | struct gdma_allocate_resource_range_resp { |
635 | struct gdma_resp_hdr hdr; |
636 | u32 allocated_resources; |
637 | }; |
638 | |
639 | struct gdma_destroy_resource_range_req { |
640 | struct gdma_req_hdr hdr; |
641 | u32 resource_type; |
642 | u32 num_resources; |
643 | u32 allocated_resources; |
644 | }; |
645 | |
646 | /* GDMA_CREATE_QUEUE */ |
647 | struct gdma_create_queue_req { |
648 | struct gdma_req_hdr hdr; |
649 | u32 type; |
650 | u32 reserved1; |
651 | u32 pdid; |
652 | u32 doolbell_id; |
653 | u64 gdma_region; |
654 | u32 reserved2; |
655 | u32 queue_size; |
656 | u32 log2_throttle_limit; |
657 | u32 eq_pci_msix_index; |
658 | u32 cq_mod_ctx_id; |
659 | u32 cq_parent_eq_id; |
660 | u8 rq_drop_on_overrun; |
661 | u8 rq_err_on_wqe_overflow; |
662 | u8 rq_chain_rec_wqes; |
663 | u8 sq_hw_db; |
664 | u32 reserved3; |
665 | }; /* HW DATA */ |
666 | |
667 | struct gdma_create_queue_resp { |
668 | struct gdma_resp_hdr hdr; |
669 | u32 queue_index; |
670 | }; /* HW DATA */ |
671 | |
672 | /* GDMA_DISABLE_QUEUE */ |
673 | struct gdma_disable_queue_req { |
674 | struct gdma_req_hdr hdr; |
675 | u32 type; |
676 | u32 queue_index; |
677 | u32 alloc_res_id_on_creation; |
678 | }; /* HW DATA */ |
679 | |
680 | /* GDMA_QUERY_HWC_TIMEOUT */ |
681 | struct gdma_query_hwc_timeout_req { |
682 | struct gdma_req_hdr hdr; |
683 | u32 timeout_ms; |
684 | u32 reserved; |
685 | }; |
686 | |
687 | struct gdma_query_hwc_timeout_resp { |
688 | struct gdma_resp_hdr hdr; |
689 | u32 timeout_ms; |
690 | u32 reserved; |
691 | }; |
692 | |
693 | enum atb_page_size { |
694 | ATB_PAGE_SIZE_4K, |
695 | ATB_PAGE_SIZE_8K, |
696 | ATB_PAGE_SIZE_16K, |
697 | ATB_PAGE_SIZE_32K, |
698 | ATB_PAGE_SIZE_64K, |
699 | ATB_PAGE_SIZE_128K, |
700 | ATB_PAGE_SIZE_256K, |
701 | ATB_PAGE_SIZE_512K, |
702 | ATB_PAGE_SIZE_1M, |
703 | ATB_PAGE_SIZE_2M, |
704 | ATB_PAGE_SIZE_MAX, |
705 | }; |
706 | |
707 | enum gdma_mr_access_flags { |
708 | GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), |
709 | GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), |
710 | GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), |
711 | GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), |
712 | GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), |
713 | }; |
714 | |
715 | /* GDMA_CREATE_DMA_REGION */ |
716 | struct gdma_create_dma_region_req { |
717 | struct gdma_req_hdr hdr; |
718 | |
719 | /* The total size of the DMA region */ |
720 | u64 length; |
721 | |
722 | /* The offset in the first page */ |
723 | u32 offset_in_page; |
724 | |
725 | /* enum gdma_page_type */ |
726 | u32 gdma_page_type; |
727 | |
728 | /* The total number of pages */ |
729 | u32 page_count; |
730 | |
731 | /* If page_addr_list_len is smaller than page_count, |
732 | * the remaining page addresses will be added via the |
733 | * message GDMA_DMA_REGION_ADD_PAGES. |
734 | */ |
735 | u32 page_addr_list_len; |
736 | u64 page_addr_list[]; |
737 | }; /* HW DATA */ |
738 | |
739 | struct gdma_create_dma_region_resp { |
740 | struct gdma_resp_hdr hdr; |
741 | u64 dma_region_handle; |
742 | }; /* HW DATA */ |
743 | |
744 | /* GDMA_DMA_REGION_ADD_PAGES */ |
745 | struct gdma_dma_region_add_pages_req { |
746 | struct gdma_req_hdr hdr; |
747 | |
748 | u64 dma_region_handle; |
749 | |
750 | u32 page_addr_list_len; |
751 | u32 reserved3; |
752 | |
753 | u64 page_addr_list[]; |
754 | }; /* HW DATA */ |
755 | |
756 | /* GDMA_DESTROY_DMA_REGION */ |
757 | struct gdma_destroy_dma_region_req { |
758 | struct gdma_req_hdr hdr; |
759 | |
760 | u64 dma_region_handle; |
761 | }; /* HW DATA */ |
762 | |
763 | enum gdma_pd_flags { |
764 | GDMA_PD_FLAG_INVALID = 0, |
765 | }; |
766 | |
767 | struct gdma_create_pd_req { |
768 | struct gdma_req_hdr hdr; |
769 | enum gdma_pd_flags flags; |
770 | u32 reserved; |
771 | };/* HW DATA */ |
772 | |
773 | struct gdma_create_pd_resp { |
774 | struct gdma_resp_hdr hdr; |
775 | u64 pd_handle; |
776 | u32 pd_id; |
777 | u32 reserved; |
778 | };/* HW DATA */ |
779 | |
780 | struct gdma_destroy_pd_req { |
781 | struct gdma_req_hdr hdr; |
782 | u64 pd_handle; |
783 | };/* HW DATA */ |
784 | |
785 | struct gdma_destory_pd_resp { |
786 | struct gdma_resp_hdr hdr; |
787 | };/* HW DATA */ |
788 | |
789 | enum gdma_mr_type { |
790 | /* Guest Virtual Address - MRs of this type allow access |
791 | * to memory mapped by PTEs associated with this MR using a virtual |
792 | * address that is set up in the MST |
793 | */ |
794 | GDMA_MR_TYPE_GVA = 2, |
795 | }; |
796 | |
797 | struct gdma_create_mr_params { |
798 | u64 pd_handle; |
799 | enum gdma_mr_type mr_type; |
800 | union { |
801 | struct { |
802 | u64 dma_region_handle; |
803 | u64 virtual_address; |
804 | enum gdma_mr_access_flags access_flags; |
805 | } gva; |
806 | }; |
807 | }; |
808 | |
809 | struct gdma_create_mr_request { |
810 | struct gdma_req_hdr hdr; |
811 | u64 pd_handle; |
812 | enum gdma_mr_type mr_type; |
813 | u32 reserved_1; |
814 | |
815 | union { |
816 | struct { |
817 | u64 dma_region_handle; |
818 | u64 virtual_address; |
819 | enum gdma_mr_access_flags access_flags; |
820 | } gva; |
821 | |
822 | }; |
823 | u32 reserved_2; |
824 | };/* HW DATA */ |
825 | |
826 | struct gdma_create_mr_response { |
827 | struct gdma_resp_hdr hdr; |
828 | u64 mr_handle; |
829 | u32 lkey; |
830 | u32 rkey; |
831 | };/* HW DATA */ |
832 | |
833 | struct gdma_destroy_mr_request { |
834 | struct gdma_req_hdr hdr; |
835 | u64 mr_handle; |
836 | };/* HW DATA */ |
837 | |
838 | struct gdma_destroy_mr_response { |
839 | struct gdma_resp_hdr hdr; |
840 | };/* HW DATA */ |
841 | |
842 | int mana_gd_verify_vf_version(struct pci_dev *pdev); |
843 | |
844 | int mana_gd_register_device(struct gdma_dev *gd); |
845 | int mana_gd_deregister_device(struct gdma_dev *gd); |
846 | |
847 | int mana_gd_post_work_request(struct gdma_queue *wq, |
848 | const struct gdma_wqe_request *wqe_req, |
849 | struct gdma_posted_wqe_info *wqe_info); |
850 | |
851 | int mana_gd_post_and_ring(struct gdma_queue *queue, |
852 | const struct gdma_wqe_request *wqe, |
853 | struct gdma_posted_wqe_info *wqe_info); |
854 | |
855 | int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); |
856 | void mana_gd_free_res_map(struct gdma_resource *r); |
857 | |
858 | void mana_gd_wq_ring_doorbell(struct gdma_context *gc, |
859 | struct gdma_queue *queue); |
860 | |
861 | int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, |
862 | struct gdma_mem_info *gmi); |
863 | |
864 | void mana_gd_free_memory(struct gdma_mem_info *gmi); |
865 | |
866 | int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, |
867 | u32 resp_len, void *resp); |
868 | |
869 | int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); |
870 | |
871 | #endif /* _GDMA_H */ |
872 | |