1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* Copyright 2021 Marvell. All rights reserved. */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/pci.h> |
7 | #include <linux/list.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/types.h> |
10 | #include <asm/byteorder.h> |
11 | #include <linux/qed/common_hsi.h> |
12 | #include <linux/qed/storage_common.h> |
13 | #include <linux/qed/nvmetcp_common.h> |
14 | #include <linux/qed/qed_nvmetcp_if.h> |
15 | #include "qed_nvmetcp_fw_funcs.h" |
16 | |
17 | #define NVMETCP_NUM_SGES_IN_CACHE 0x4 |
18 | |
19 | bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge) |
20 | { |
21 | return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); |
22 | } |
23 | |
24 | void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, |
25 | struct scsi_cached_sges *ctx_data_desc, |
26 | struct storage_sgl_task_params *sgl_params) |
27 | { |
28 | u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ? |
29 | NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges); |
30 | u8 sge_index; |
31 | |
32 | /* sgl params */ |
33 | ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo); |
34 | ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi); |
35 | ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size); |
36 | ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges); |
37 | |
38 | for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) { |
39 | ctx_data_desc->sge[sge_index].sge_addr.lo = |
40 | cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo); |
41 | ctx_data_desc->sge[sge_index].sge_addr.hi = |
42 | cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi); |
43 | ctx_data_desc->sge[sge_index].sge_len = |
44 | cpu_to_le32(sgl_params->sgl[sge_index].sge_len); |
45 | } |
46 | } |
47 | |
48 | static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params, |
49 | enum nvmetcp_task_type task_type) |
50 | { |
51 | u32 io_size; |
52 | |
53 | if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) |
54 | io_size = task_params->tx_io_size; |
55 | else |
56 | io_size = task_params->rx_io_size; |
57 | |
58 | if (unlikely(!io_size)) |
59 | return 0; |
60 | |
61 | return io_size; |
62 | } |
63 | |
64 | static inline void init_sqe(struct nvmetcp_task_params *task_params, |
65 | struct storage_sgl_task_params *sgl_task_params, |
66 | enum nvmetcp_task_type task_type) |
67 | { |
68 | if (!task_params->sqe) |
69 | return; |
70 | |
71 | memset(task_params->sqe, 0, sizeof(*task_params->sqe)); |
72 | task_params->sqe->task_id = cpu_to_le16(task_params->itid); |
73 | |
74 | switch (task_type) { |
75 | case NVMETCP_TASK_TYPE_HOST_WRITE: { |
76 | u32 buf_size = 0; |
77 | u32 num_sges = 0; |
78 | |
79 | SET_FIELD(task_params->sqe->contlen_cdbsize, |
80 | NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); |
81 | SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, |
82 | NVMETCP_WQE_TYPE_NORMAL); |
83 | if (task_params->tx_io_size) { |
84 | if (task_params->send_write_incapsule) |
85 | buf_size = calc_rw_task_size(task_params, task_type); |
86 | |
87 | if (nvmetcp_is_slow_sgl(num_sges: sgl_task_params->num_sges, |
88 | small_mid_sge: sgl_task_params->small_mid_sge)) |
89 | num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO; |
90 | else |
91 | num_sges = min((u16)sgl_task_params->num_sges, |
92 | (u16)SCSI_NUM_SGES_SLOW_SGL_THR); |
93 | } |
94 | SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); |
95 | SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); |
96 | } break; |
97 | |
98 | case NVMETCP_TASK_TYPE_HOST_READ: { |
99 | SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, |
100 | NVMETCP_WQE_TYPE_NORMAL); |
101 | SET_FIELD(task_params->sqe->contlen_cdbsize, |
102 | NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); |
103 | } break; |
104 | |
105 | case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: { |
106 | SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, |
107 | NVMETCP_WQE_TYPE_MIDDLE_PATH); |
108 | |
109 | if (task_params->tx_io_size) { |
110 | SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, |
111 | task_params->tx_io_size); |
112 | SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, |
113 | min((u16)sgl_task_params->num_sges, |
114 | (u16)SCSI_NUM_SGES_SLOW_SGL_THR)); |
115 | } |
116 | } break; |
117 | |
118 | case NVMETCP_TASK_TYPE_CLEANUP: |
119 | SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, |
120 | NVMETCP_WQE_TYPE_TASK_CLEANUP); |
121 | |
122 | default: |
123 | break; |
124 | } |
125 | } |
126 | |
127 | /* The following function initializes of NVMeTCP task params */ |
128 | static inline void |
129 | init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context, |
130 | struct nvmetcp_task_params *task_params, |
131 | enum nvmetcp_task_type task_type) |
132 | { |
133 | context->ystorm_st_context.state.cccid = task_params->host_cccid; |
134 | SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1); |
135 | context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo); |
136 | context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi); |
137 | } |
138 | |
139 | /* The following function initializes default values to all tasks */ |
140 | static inline void |
141 | init_default_nvmetcp_task(struct nvmetcp_task_params *task_params, |
142 | void *, void *nvme_cmd, |
143 | enum nvmetcp_task_type task_type) |
144 | { |
145 | struct e5_nvmetcp_task_context *context = task_params->context; |
146 | const u8 val_byte = context->mstorm_ag_context.cdu_validation; |
147 | u8 dw_index; |
148 | |
149 | memset(context, 0, sizeof(*context)); |
150 | init_nvmetcp_task_params(context, task_params, |
151 | task_type: (enum nvmetcp_task_type)task_type); |
152 | |
153 | /* Swapping requirements used below, will be removed in future FW versions */ |
154 | if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE || |
155 | task_type == NVMETCP_TASK_TYPE_HOST_READ) { |
156 | for (dw_index = 0; |
157 | dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); |
158 | dw_index++) |
159 | context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = |
160 | cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); |
161 | |
162 | for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); |
163 | dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32); |
164 | dw_index++) |
165 | context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = |
166 | cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2])); |
167 | } else { |
168 | for (dw_index = 0; |
169 | dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32); |
170 | dw_index++) |
171 | context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = |
172 | cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); |
173 | } |
174 | |
175 | /* M-Storm Context: */ |
176 | context->mstorm_ag_context.cdu_validation = val_byte; |
177 | context->mstorm_st_context.task_type = (u8)(task_type); |
178 | context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid); |
179 | |
180 | /* Ustorm Context: */ |
181 | SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1); |
182 | context->ustorm_st_context.task_type = (u8)(task_type); |
183 | context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; |
184 | context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid); |
185 | } |
186 | |
187 | /* The following function initializes the U-Storm Task Contexts */ |
188 | static inline void |
189 | init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context, |
190 | struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context, |
191 | u32 remaining_recv_len, |
192 | u32 expected_data_transfer_len, u8 num_sges, |
193 | bool tx_dif_conn_err_en) |
194 | { |
195 | /* Remaining data to be received in bytes. Used in validations*/ |
196 | ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len); |
197 | ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len); |
198 | ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len); |
199 | SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges); |
200 | SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN, |
201 | tx_dif_conn_err_en ? 1 : 0); |
202 | } |
203 | |
204 | /* The following function initializes Local Completion Contexts: */ |
205 | static inline void |
206 | set_local_completion_context(struct e5_nvmetcp_task_context *context) |
207 | { |
208 | SET_FIELD(context->ystorm_st_context.state.flags, |
209 | YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1); |
210 | SET_FIELD(context->ustorm_st_context.flags, |
211 | USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1); |
212 | } |
213 | |
214 | /* Common Fastpath task init function: */ |
215 | static inline void |
216 | init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params, |
217 | enum nvmetcp_task_type task_type, |
218 | void *, void *nvme_cmd, |
219 | struct storage_sgl_task_params *sgl_task_params) |
220 | { |
221 | struct e5_nvmetcp_task_context *context = task_params->context; |
222 | u32 task_size = calc_rw_task_size(task_params, task_type); |
223 | bool slow_io = false; |
224 | u8 num_sges = 0; |
225 | |
226 | init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type); |
227 | |
228 | /* Tx/Rx: */ |
229 | if (task_params->tx_io_size) { |
230 | /* if data to transmit: */ |
231 | init_scsi_sgl_context(ctx_sgl_params: &context->ystorm_st_context.state.sgl_params, |
232 | ctx_data_desc: &context->ystorm_st_context.state.data_desc, |
233 | sgl_params: sgl_task_params); |
234 | slow_io = nvmetcp_is_slow_sgl(num_sges: sgl_task_params->num_sges, |
235 | small_mid_sge: sgl_task_params->small_mid_sge); |
236 | num_sges = |
237 | (u8)(!slow_io ? min((u32)sgl_task_params->num_sges, |
238 | (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : |
239 | NVMETCP_WQE_NUM_SGES_SLOWIO); |
240 | if (slow_io) { |
241 | SET_FIELD(context->ystorm_st_context.state.flags, |
242 | YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1); |
243 | } |
244 | } else if (task_params->rx_io_size) { |
245 | /* if data to receive: */ |
246 | init_scsi_sgl_context(ctx_sgl_params: &context->mstorm_st_context.sgl_params, |
247 | ctx_data_desc: &context->mstorm_st_context.data_desc, |
248 | sgl_params: sgl_task_params); |
249 | num_sges = |
250 | (u8)(!nvmetcp_is_slow_sgl(num_sges: sgl_task_params->num_sges, |
251 | small_mid_sge: sgl_task_params->small_mid_sge) ? |
252 | min((u32)sgl_task_params->num_sges, |
253 | (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : |
254 | NVMETCP_WQE_NUM_SGES_SLOWIO); |
255 | context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size); |
256 | } |
257 | |
258 | /* Ustorm context: */ |
259 | init_ustorm_task_contexts(ustorm_st_context: &context->ustorm_st_context, |
260 | ustorm_ag_context: &context->ustorm_ag_context, |
261 | /* Remaining Receive length is the Task Size */ |
262 | remaining_recv_len: task_size, |
263 | /* The size of the transmitted task */ |
264 | expected_data_transfer_len: task_size, |
265 | /* num_sges */ |
266 | num_sges, |
267 | tx_dif_conn_err_en: false); |
268 | |
269 | /* Set exp_data_acked */ |
270 | if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) { |
271 | if (task_params->send_write_incapsule) |
272 | context->ustorm_ag_context.exp_data_acked = task_size; |
273 | else |
274 | context->ustorm_ag_context.exp_data_acked = 0; |
275 | } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) { |
276 | context->ustorm_ag_context.exp_data_acked = 0; |
277 | } |
278 | |
279 | context->ustorm_ag_context.exp_cont_len = 0; |
280 | init_sqe(task_params, sgl_task_params, task_type); |
281 | } |
282 | |
283 | static void |
284 | init_common_initiator_read_task(struct nvmetcp_task_params *task_params, |
285 | struct nvme_tcp_cmd_pdu *, |
286 | struct nvme_command *nvme_cmd, |
287 | struct storage_sgl_task_params *sgl_task_params) |
288 | { |
289 | init_rw_nvmetcp_task(task_params, task_type: NVMETCP_TASK_TYPE_HOST_READ, |
290 | pdu_header: cmd_pdu_header, nvme_cmd, sgl_task_params); |
291 | } |
292 | |
293 | void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params, |
294 | struct nvme_tcp_cmd_pdu *, |
295 | struct nvme_command *nvme_cmd, |
296 | struct storage_sgl_task_params *sgl_task_params) |
297 | { |
298 | init_common_initiator_read_task(task_params, cmd_pdu_header: (void *)cmd_pdu_header, |
299 | nvme_cmd: (void *)nvme_cmd, sgl_task_params); |
300 | } |
301 | |
302 | static void |
303 | init_common_initiator_write_task(struct nvmetcp_task_params *task_params, |
304 | struct nvme_tcp_cmd_pdu *, |
305 | struct nvme_command *nvme_cmd, |
306 | struct storage_sgl_task_params *sgl_task_params) |
307 | { |
308 | init_rw_nvmetcp_task(task_params, task_type: NVMETCP_TASK_TYPE_HOST_WRITE, |
309 | pdu_header: cmd_pdu_header, nvme_cmd, sgl_task_params); |
310 | } |
311 | |
312 | void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params, |
313 | struct nvme_tcp_cmd_pdu *, |
314 | struct nvme_command *nvme_cmd, |
315 | struct storage_sgl_task_params *sgl_task_params) |
316 | { |
317 | init_common_initiator_write_task(task_params, cmd_pdu_header: (void *)cmd_pdu_header, |
318 | nvme_cmd: (void *)nvme_cmd, sgl_task_params); |
319 | } |
320 | |
321 | static void |
322 | init_common_login_request_task(struct nvmetcp_task_params *task_params, |
323 | void *, |
324 | struct storage_sgl_task_params *tx_sgl_task_params, |
325 | struct storage_sgl_task_params *rx_sgl_task_params) |
326 | { |
327 | struct e5_nvmetcp_task_context *context = task_params->context; |
328 | |
329 | init_default_nvmetcp_task(task_params, pdu_header: (void *)login_req_pdu_header, NULL, |
330 | task_type: NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); |
331 | |
332 | /* Ustorm Context: */ |
333 | init_ustorm_task_contexts(ustorm_st_context: &context->ustorm_st_context, |
334 | ustorm_ag_context: &context->ustorm_ag_context, |
335 | |
336 | /* Remaining Receive length is the Task Size */ |
337 | remaining_recv_len: task_params->rx_io_size ? |
338 | rx_sgl_task_params->total_buffer_size : 0, |
339 | |
340 | /* The size of the transmitted task */ |
341 | expected_data_transfer_len: task_params->tx_io_size ? |
342 | tx_sgl_task_params->total_buffer_size : 0, |
343 | num_sges: 0, /* num_sges */ |
344 | tx_dif_conn_err_en: 0); /* tx_dif_conn_err_en */ |
345 | |
346 | /* SGL context: */ |
347 | if (task_params->tx_io_size) |
348 | init_scsi_sgl_context(ctx_sgl_params: &context->ystorm_st_context.state.sgl_params, |
349 | ctx_data_desc: &context->ystorm_st_context.state.data_desc, |
350 | sgl_params: tx_sgl_task_params); |
351 | if (task_params->rx_io_size) |
352 | init_scsi_sgl_context(ctx_sgl_params: &context->mstorm_st_context.sgl_params, |
353 | ctx_data_desc: &context->mstorm_st_context.data_desc, |
354 | sgl_params: rx_sgl_task_params); |
355 | |
356 | context->mstorm_st_context.rem_task_size = |
357 | cpu_to_le32(task_params->rx_io_size ? |
358 | rx_sgl_task_params->total_buffer_size : 0); |
359 | init_sqe(task_params, sgl_task_params: tx_sgl_task_params, task_type: NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); |
360 | } |
361 | |
362 | /* The following function initializes Login task in Host mode: */ |
363 | void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params, |
364 | struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr, |
365 | struct storage_sgl_task_params *tx_sgl_task_params, |
366 | struct storage_sgl_task_params *rx_sgl_task_params) |
367 | { |
368 | init_common_login_request_task(task_params, login_req_pdu_header: init_conn_req_pdu_hdr, |
369 | tx_sgl_task_params, rx_sgl_task_params); |
370 | } |
371 | |
372 | void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params) |
373 | { |
374 | init_sqe(task_params, NULL, task_type: NVMETCP_TASK_TYPE_CLEANUP); |
375 | } |
376 | |