1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2/* Copyright 2021 Marvell. All rights reserved. */
3
4#ifndef _QED_NVMETCP_IF_H
5#define _QED_NVMETCP_IF_H
6#include <linux/types.h>
7#include <linux/qed/qed_if.h>
8#include <linux/qed/storage_common.h>
9#include <linux/qed/nvmetcp_common.h>
10
11#define QED_NVMETCP_MAX_IO_SIZE 0x800000
12#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr))
13#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu))
14#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16))
15
16typedef int (*nvmetcp_event_cb_t) (void *context,
17 u8 fw_event_code, void *fw_handle);
18
19struct qed_dev_nvmetcp_info {
20 struct qed_dev_info common;
21 u8 port_id; /* Physical port */
22 u8 num_cqs;
23};
24
25#define MAX_TID_BLOCKS_NVMETCP (512)
26struct qed_nvmetcp_tid {
27 u32 size; /* In bytes per task */
28 u32 num_tids_per_block;
29 u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
30};
31
32struct qed_nvmetcp_id_params {
33 u8 mac[ETH_ALEN];
34 u32 ip[4];
35 u16 port;
36};
37
38struct qed_nvmetcp_params_offload {
39 /* FW initializations */
40 dma_addr_t sq_pbl_addr;
41 dma_addr_t nvmetcp_cccid_itid_table_addr;
42 u16 nvmetcp_cccid_max_range;
43 u8 default_cq;
44
45 /* Networking and TCP stack initializations */
46 struct qed_nvmetcp_id_params src;
47 struct qed_nvmetcp_id_params dst;
48 u32 ka_timeout;
49 u32 ka_interval;
50 u32 max_rt_time;
51 u32 cwnd;
52 u16 mss;
53 u16 vlan_id;
54 bool timestamp_en;
55 bool delayed_ack_en;
56 bool tcp_keep_alive_en;
57 bool ecn_en;
58 u8 ip_version;
59 u8 ka_max_probe_cnt;
60 u8 ttl;
61 u8 tos_or_tc;
62 u8 rcv_wnd_scale;
63};
64
65struct qed_nvmetcp_params_update {
66 u32 max_io_size;
67 u32 max_recv_pdu_length;
68 u32 max_send_pdu_length;
69
70 /* Placeholder: pfv, cpda, hpda */
71
72 bool hdr_digest_en;
73 bool data_digest_en;
74};
75
76struct qed_nvmetcp_cb_ops {
77 struct qed_common_cb_ops common;
78};
79
80struct nvmetcp_sge {
81 struct regpair sge_addr; /* SGE address */
82 __le32 sge_len; /* SGE length */
83 __le32 reserved;
84};
85
86/* IO path HSI function SGL params */
87struct storage_sgl_task_params {
88 struct nvmetcp_sge *sgl;
89 struct regpair sgl_phys_addr;
90 u32 total_buffer_size;
91 u16 num_sges;
92 bool small_mid_sge;
93};
94
95/* IO path HSI function FW task context params */
96struct nvmetcp_task_params {
97 void *context; /* Output parameter - set/filled by the HSI function */
98 struct nvmetcp_wqe *sqe;
99 u32 tx_io_size; /* in bytes (Without DIF, if exists) */
100 u32 rx_io_size; /* in bytes (Without DIF, if exists) */
101 u16 conn_icid;
102 u16 itid;
103 struct regpair opq; /* qedn_task_ctx address */
104 u16 host_cccid;
105 u8 cq_rss_number;
106 bool send_write_incapsule;
107};
108
109/**
110 * struct qed_nvmetcp_ops - qed NVMeTCP operations.
111 * @common: common operations pointer
112 * @ll2: light L2 operations pointer
113 * @fill_dev_info: fills NVMeTCP specific information
114 * @param cdev
115 * @param info
116 * @return 0 on success, otherwise error value.
117 * @register_ops: register nvmetcp operations
118 * @param cdev
119 * @param ops - specified using qed_nvmetcp_cb_ops
120 * @param cookie - driver private
121 * @start: nvmetcp in FW
122 * @param cdev
123 * @param tasks - qed will fill information about tasks
124 * return 0 on success, otherwise error value.
125 * @stop: nvmetcp in FW
126 * @param cdev
127 * return 0 on success, otherwise error value.
128 * @acquire_conn: acquire a new nvmetcp connection
129 * @param cdev
130 * @param handle - qed will fill handle that should be
131 * used henceforth as identifier of the
132 * connection.
133 * @param p_doorbell - qed will fill the address of the
134 * doorbell.
135 * @return 0 on success, otherwise error value.
136 * @release_conn: release a previously acquired nvmetcp connection
137 * @param cdev
138 * @param handle - the connection handle.
139 * @return 0 on success, otherwise error value.
140 * @offload_conn: configures an offloaded connection
141 * @param cdev
142 * @param handle - the connection handle.
143 * @param conn_info - the configuration to use for the
144 * offload.
145 * @return 0 on success, otherwise error value.
146 * @update_conn: updates an offloaded connection
147 * @param cdev
148 * @param handle - the connection handle.
149 * @param conn_info - the configuration to use for the
150 * offload.
151 * @return 0 on success, otherwise error value.
152 * @destroy_conn: stops an offloaded connection
153 * @param cdev
154 * @param handle - the connection handle.
155 * @return 0 on success, otherwise error value.
156 * @clear_sq: clear all task in sq
157 * @param cdev
158 * @param handle - the connection handle.
159 * @return 0 on success, otherwise error value.
160 * @add_src_tcp_port_filter: Add source tcp port filter
161 * @param cdev
162 * @param src_port
163 * @remove_src_tcp_port_filter: Remove source tcp port filter
164 * @param cdev
165 * @param src_port
166 * @add_dst_tcp_port_filter: Add destination tcp port filter
167 * @param cdev
168 * @param dest_port
169 * @remove_dst_tcp_port_filter: Remove destination tcp port filter
170 * @param cdev
171 * @param dest_port
172 * @clear_all_filters: Clear all filters.
173 * @param cdev
174 * @init_read_io: Init read IO.
175 * @task_params
176 * @cmd_pdu_header
177 * @nvme_cmd
178 * @sgl_task_params
179 * @init_write_io: Init write IO.
180 * @task_params
181 * @cmd_pdu_header
182 * @nvme_cmd
183 * @sgl_task_params
184 * @init_icreq_exchange: Exchange ICReq.
185 * @task_params
186 * @init_conn_req_pdu_hdr
187 * @tx_sgl_task_params
188 * @rx_sgl_task_params
189 * @init_task_cleanup: Init task cleanup.
190 * @task_params
191 */
192struct qed_nvmetcp_ops {
193 const struct qed_common_ops *common;
194
195 const struct qed_ll2_ops *ll2;
196
197 int (*fill_dev_info)(struct qed_dev *cdev,
198 struct qed_dev_nvmetcp_info *info);
199
200 void (*register_ops)(struct qed_dev *cdev,
201 struct qed_nvmetcp_cb_ops *ops, void *cookie);
202
203 int (*start)(struct qed_dev *cdev,
204 struct qed_nvmetcp_tid *tasks,
205 void *event_context, nvmetcp_event_cb_t async_event_cb);
206
207 int (*stop)(struct qed_dev *cdev);
208
209 int (*acquire_conn)(struct qed_dev *cdev,
210 u32 *handle,
211 u32 *fw_cid, void __iomem **p_doorbell);
212
213 int (*release_conn)(struct qed_dev *cdev, u32 handle);
214
215 int (*offload_conn)(struct qed_dev *cdev,
216 u32 handle,
217 struct qed_nvmetcp_params_offload *conn_info);
218
219 int (*update_conn)(struct qed_dev *cdev,
220 u32 handle,
221 struct qed_nvmetcp_params_update *conn_info);
222
223 int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
224
225 int (*clear_sq)(struct qed_dev *cdev, u32 handle);
226
227 int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
228
229 void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
230
231 int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
232
233 void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
234
235 void (*clear_all_filters)(struct qed_dev *cdev);
236
237 void (*init_read_io)(struct nvmetcp_task_params *task_params,
238 struct nvme_tcp_cmd_pdu *cmd_pdu_header,
239 struct nvme_command *nvme_cmd,
240 struct storage_sgl_task_params *sgl_task_params);
241
242 void (*init_write_io)(struct nvmetcp_task_params *task_params,
243 struct nvme_tcp_cmd_pdu *cmd_pdu_header,
244 struct nvme_command *nvme_cmd,
245 struct storage_sgl_task_params *sgl_task_params);
246
247 void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params,
248 struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
249 struct storage_sgl_task_params *tx_sgl_task_params,
250 struct storage_sgl_task_params *rx_sgl_task_params);
251
252 void (*init_task_cleanup)(struct nvmetcp_task_params *task_params);
253};
254
255const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
256void qed_put_nvmetcp_ops(void);
257#endif
258

source code of linux/include/linux/qed/qed_nvmetcp_if.h