1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
2 | /* Copyright (c) 2021, Microsoft Corporation. */ |
3 | |
4 | #ifndef _HW_CHANNEL_H |
5 | #define _HW_CHANNEL_H |
6 | |
7 | #define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4 |
8 | |
9 | #define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000 |
10 | #define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000 |
11 | |
12 | #define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1 |
13 | |
14 | #define HWC_INIT_DATA_CQID 1 |
15 | #define HWC_INIT_DATA_RQID 2 |
16 | #define HWC_INIT_DATA_SQID 3 |
17 | #define HWC_INIT_DATA_QUEUE_DEPTH 4 |
18 | #define HWC_INIT_DATA_MAX_REQUEST 5 |
19 | #define HWC_INIT_DATA_MAX_RESPONSE 6 |
20 | #define HWC_INIT_DATA_MAX_NUM_CQS 7 |
21 | #define HWC_INIT_DATA_PDID 8 |
22 | #define HWC_INIT_DATA_GPA_MKEY 9 |
23 | #define HWC_INIT_DATA_PF_DEST_RQ_ID 10 |
24 | #define HWC_INIT_DATA_PF_DEST_CQ_ID 11 |
25 | |
26 | #define HWC_DATA_CFG_HWC_TIMEOUT 1 |
27 | |
28 | #define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000 |
29 | |
30 | /* Structures labeled with "HW DATA" are exchanged with the hardware. All of |
31 | * them are naturally aligned and hence don't need __packed. |
32 | */ |
33 | |
34 | union hwc_init_eq_id_db { |
35 | u32 as_uint32; |
36 | |
37 | struct { |
38 | u32 eq_id : 16; |
39 | u32 doorbell : 16; |
40 | }; |
41 | }; /* HW DATA */ |
42 | |
43 | union hwc_init_type_data { |
44 | u32 as_uint32; |
45 | |
46 | struct { |
47 | u32 value : 24; |
48 | u32 type : 8; |
49 | }; |
50 | }; /* HW DATA */ |
51 | |
52 | struct hwc_rx_oob { |
53 | u32 type : 6; |
54 | u32 eom : 1; |
55 | u32 som : 1; |
56 | u32 vendor_err : 8; |
57 | u32 reserved1 : 16; |
58 | |
59 | u32 src_virt_wq : 24; |
60 | u32 src_vfid : 8; |
61 | |
62 | u32 reserved2; |
63 | |
64 | union { |
65 | u32 wqe_addr_low; |
66 | u32 wqe_offset; |
67 | }; |
68 | |
69 | u32 wqe_addr_high; |
70 | |
71 | u32 client_data_unit : 14; |
72 | u32 reserved3 : 18; |
73 | |
74 | u32 tx_oob_data_size; |
75 | |
76 | u32 chunk_offset : 21; |
77 | u32 reserved4 : 11; |
78 | }; /* HW DATA */ |
79 | |
80 | struct hwc_tx_oob { |
81 | u32 reserved1; |
82 | |
83 | u32 reserved2; |
84 | |
85 | u32 vrq_id : 24; |
86 | u32 dest_vfid : 8; |
87 | |
88 | u32 vrcq_id : 24; |
89 | u32 reserved3 : 8; |
90 | |
91 | u32 vscq_id : 24; |
92 | u32 loopback : 1; |
93 | u32 lso_override: 1; |
94 | u32 dest_pf : 1; |
95 | u32 reserved4 : 5; |
96 | |
97 | u32 vsq_id : 24; |
98 | u32 reserved5 : 8; |
99 | }; /* HW DATA */ |
100 | |
101 | struct hwc_work_request { |
102 | void *buf_va; |
103 | void *buf_sge_addr; |
104 | u32 buf_len; |
105 | u32 msg_size; |
106 | |
107 | struct gdma_wqe_request wqe_req; |
108 | struct hwc_tx_oob tx_oob; |
109 | |
110 | struct gdma_sge sge; |
111 | }; |
112 | |
113 | /* hwc_dma_buf represents the array of in-flight WQEs. |
114 | * mem_info as know as the GDMA mapped memory is partitioned and used by |
115 | * in-flight WQEs. |
116 | * The number of WQEs is determined by the number of in-flight messages. |
117 | */ |
118 | struct hwc_dma_buf { |
119 | struct gdma_mem_info mem_info; |
120 | |
121 | u32 gpa_mkey; |
122 | |
123 | u32 num_reqs; |
124 | struct hwc_work_request reqs[] __counted_by(num_reqs); |
125 | }; |
126 | |
127 | typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, |
128 | const struct hwc_rx_oob *rx_oob); |
129 | |
130 | typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id, |
131 | const struct hwc_rx_oob *rx_oob); |
132 | |
133 | struct hwc_cq { |
134 | struct hw_channel_context *hwc; |
135 | |
136 | struct gdma_queue *gdma_cq; |
137 | struct gdma_queue *gdma_eq; |
138 | struct gdma_comp *comp_buf; |
139 | u16 queue_depth; |
140 | |
141 | hwc_rx_event_handler_t *rx_event_handler; |
142 | void *rx_event_ctx; |
143 | |
144 | hwc_tx_event_handler_t *tx_event_handler; |
145 | void *tx_event_ctx; |
146 | }; |
147 | |
148 | struct hwc_wq { |
149 | struct hw_channel_context *hwc; |
150 | |
151 | struct gdma_queue *gdma_wq; |
152 | struct hwc_dma_buf *msg_buf; |
153 | u16 queue_depth; |
154 | |
155 | struct hwc_cq *hwc_cq; |
156 | }; |
157 | |
158 | struct hwc_caller_ctx { |
159 | struct completion comp_event; |
160 | void *output_buf; |
161 | u32 output_buflen; |
162 | |
163 | u32 error; /* Linux error code */ |
164 | u32 status_code; |
165 | }; |
166 | |
167 | struct hw_channel_context { |
168 | struct gdma_dev *gdma_dev; |
169 | struct device *dev; |
170 | |
171 | u16 num_inflight_msg; |
172 | u32 max_req_msg_size; |
173 | |
174 | u16 hwc_init_q_depth_max; |
175 | u32 hwc_init_max_req_msg_size; |
176 | u32 hwc_init_max_resp_msg_size; |
177 | |
178 | struct completion hwc_init_eqe_comp; |
179 | |
180 | struct hwc_wq *rxq; |
181 | struct hwc_wq *txq; |
182 | struct hwc_cq *cq; |
183 | |
184 | struct semaphore sema; |
185 | struct gdma_resource inflight_msg_res; |
186 | |
187 | u32 pf_dest_vrq_id; |
188 | u32 pf_dest_vrcq_id; |
189 | u32 hwc_timeout; |
190 | |
191 | struct hwc_caller_ctx *caller_ctx; |
192 | }; |
193 | |
194 | int mana_hwc_create_channel(struct gdma_context *gc); |
195 | void mana_hwc_destroy_channel(struct gdma_context *gc); |
196 | |
197 | int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, |
198 | const void *req, u32 resp_len, void *resp); |
199 | |
200 | #endif /* _HW_CHANNEL_H */ |
201 | |