1 | /* |
2 | * cxgb4i.c: Chelsio T4 iSCSI driver. |
3 | * |
4 | * Copyright (c) 2010-2015 Chelsio Communications, Inc. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation. |
9 | * |
10 | * Written by: Karen Xie (kxie@chelsio.com) |
11 | * Rakesh Ranjan (rranjan@chelsio.com) |
12 | */ |
13 | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
15 | |
16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> |
18 | #include <linux/moduleparam.h> |
19 | #include <scsi/scsi_host.h> |
20 | #include <net/tcp.h> |
21 | #include <net/dst.h> |
22 | #include <linux/netdevice.h> |
23 | #include <net/addrconf.h> |
24 | |
25 | #include "t4_regs.h" |
26 | #include "t4_msg.h" |
27 | #include "cxgb4.h" |
28 | #include "cxgb4_uld.h" |
29 | #include "t4fw_api.h" |
30 | #include "l2t.h" |
31 | #include "cxgb4i.h" |
32 | #include "clip_tbl.h" |
33 | |
34 | static unsigned int dbg_level; |
35 | |
36 | #include "../libcxgbi.h" |
37 | |
38 | #ifdef CONFIG_CHELSIO_T4_DCB |
39 | #include <net/dcbevent.h> |
40 | #include "cxgb4_dcb.h" |
41 | #endif |
42 | |
43 | #define DRV_MODULE_NAME "cxgb4i" |
44 | #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" |
45 | #define DRV_MODULE_VERSION "0.9.5-ko" |
46 | #define DRV_MODULE_RELDATE "Apr. 2015" |
47 | |
48 | static char version[] = |
49 | DRV_MODULE_DESC " " DRV_MODULE_NAME |
50 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n" ; |
51 | |
52 | MODULE_AUTHOR("Chelsio Communications, Inc." ); |
53 | MODULE_DESCRIPTION(DRV_MODULE_DESC); |
54 | MODULE_VERSION(DRV_MODULE_VERSION); |
55 | MODULE_LICENSE("GPL" ); |
56 | |
57 | module_param(dbg_level, uint, 0644); |
58 | MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)" ); |
59 | |
60 | #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) |
61 | static int cxgb4i_rcv_win = -1; |
62 | module_param(cxgb4i_rcv_win, int, 0644); |
63 | MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes" ); |
64 | |
65 | #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) |
66 | static int cxgb4i_snd_win = -1; |
67 | module_param(cxgb4i_snd_win, int, 0644); |
68 | MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes" ); |
69 | |
70 | static int cxgb4i_rx_credit_thres = 10 * 1024; |
71 | module_param(cxgb4i_rx_credit_thres, int, 0644); |
72 | MODULE_PARM_DESC(cxgb4i_rx_credit_thres, |
73 | "RX credits return threshold in bytes (default=10KB)" ); |
74 | |
75 | static unsigned int cxgb4i_max_connect = (8 * 1024); |
76 | module_param(cxgb4i_max_connect, uint, 0644); |
77 | MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections" ); |
78 | |
79 | static unsigned short cxgb4i_sport_base = 20000; |
80 | module_param(cxgb4i_sport_base, ushort, 0644); |
81 | MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)" ); |
82 | |
83 | typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); |
84 | |
85 | static void *t4_uld_add(const struct cxgb4_lld_info *); |
86 | static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); |
87 | static int t4_uld_state_change(void *, enum cxgb4_state state); |
88 | static inline int send_tx_flowc_wr(struct cxgbi_sock *); |
89 | |
90 | static const struct cxgb4_uld_info cxgb4i_uld_info = { |
91 | .name = DRV_MODULE_NAME, |
92 | .nrxq = MAX_ULD_QSETS, |
93 | .ntxq = MAX_ULD_QSETS, |
94 | .rxq_size = 1024, |
95 | .lro = false, |
96 | .add = t4_uld_add, |
97 | .rx_handler = t4_uld_rx_handler, |
98 | .state_change = t4_uld_state_change, |
99 | }; |
100 | |
101 | static struct scsi_host_template cxgb4i_host_template = { |
102 | .module = THIS_MODULE, |
103 | .name = DRV_MODULE_NAME, |
104 | .proc_name = DRV_MODULE_NAME, |
105 | .can_queue = CXGB4I_SCSI_HOST_QDEPTH, |
106 | .queuecommand = iscsi_queuecommand, |
107 | .change_queue_depth = scsi_change_queue_depth, |
108 | .sg_tablesize = SG_ALL, |
109 | .max_sectors = 0xFFFF, |
110 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, |
111 | .eh_timed_out = iscsi_eh_cmd_timed_out, |
112 | .eh_abort_handler = iscsi_eh_abort, |
113 | .eh_device_reset_handler = iscsi_eh_device_reset, |
114 | .eh_target_reset_handler = iscsi_eh_recover_target, |
115 | .target_alloc = iscsi_target_alloc, |
116 | .dma_boundary = PAGE_SIZE - 1, |
117 | .this_id = -1, |
118 | .track_queue_depth = 1, |
119 | .cmd_size = sizeof(struct iscsi_cmd), |
120 | }; |
121 | |
122 | static struct iscsi_transport cxgb4i_iscsi_transport = { |
123 | .owner = THIS_MODULE, |
124 | .name = DRV_MODULE_NAME, |
125 | .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | |
126 | CAP_DATADGST | CAP_DIGEST_OFFLOAD | |
127 | CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, |
128 | .attr_is_visible = cxgbi_attr_is_visible, |
129 | .get_host_param = cxgbi_get_host_param, |
130 | .set_host_param = cxgbi_set_host_param, |
131 | /* session management */ |
132 | .create_session = cxgbi_create_session, |
133 | .destroy_session = cxgbi_destroy_session, |
134 | .get_session_param = iscsi_session_get_param, |
135 | /* connection management */ |
136 | .create_conn = cxgbi_create_conn, |
137 | .bind_conn = cxgbi_bind_conn, |
138 | .unbind_conn = iscsi_conn_unbind, |
139 | .destroy_conn = iscsi_tcp_conn_teardown, |
140 | .start_conn = iscsi_conn_start, |
141 | .stop_conn = iscsi_conn_stop, |
142 | .get_conn_param = iscsi_conn_get_param, |
143 | .set_param = cxgbi_set_conn_param, |
144 | .get_stats = cxgbi_get_conn_stats, |
145 | /* pdu xmit req from user space */ |
146 | .send_pdu = iscsi_conn_send_pdu, |
147 | /* task */ |
148 | .init_task = iscsi_tcp_task_init, |
149 | .xmit_task = iscsi_tcp_task_xmit, |
150 | .cleanup_task = cxgbi_cleanup_task, |
151 | /* pdu */ |
152 | .alloc_pdu = cxgbi_conn_alloc_pdu, |
153 | .init_pdu = cxgbi_conn_init_pdu, |
154 | .xmit_pdu = cxgbi_conn_xmit_pdu, |
155 | .parse_pdu_itt = cxgbi_parse_pdu_itt, |
156 | /* TCP connect/disconnect */ |
157 | .get_ep_param = cxgbi_get_ep_param, |
158 | .ep_connect = cxgbi_ep_connect, |
159 | .ep_poll = cxgbi_ep_poll, |
160 | .ep_disconnect = cxgbi_ep_disconnect, |
161 | /* Error recovery timeout call */ |
162 | .session_recovery_timedout = iscsi_session_recovery_timedout, |
163 | }; |
164 | |
165 | #ifdef CONFIG_CHELSIO_T4_DCB |
166 | static int |
167 | cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *); |
168 | |
169 | static struct notifier_block cxgb4_dcb_change = { |
170 | .notifier_call = cxgb4_dcb_change_notify, |
171 | }; |
172 | #endif |
173 | |
174 | static struct scsi_transport_template *cxgb4i_stt; |
175 | |
176 | /* |
177 | * CPL (Chelsio Protocol Language) defines a message passing interface between |
178 | * the host driver and Chelsio asic. |
179 | * The section below implments CPLs that related to iscsi tcp connection |
180 | * open/close/abort and data send/receive. |
181 | */ |
182 | |
183 | #define RCV_BUFSIZ_MASK 0x3FFU |
184 | #define MAX_IMM_TX_PKT_LEN 256 |
185 | |
186 | static int push_tx_frames(struct cxgbi_sock *, int); |
187 | |
188 | /* |
189 | * is_ofld_imm - check whether a packet can be sent as immediate data |
190 | * @skb: the packet |
191 | * |
192 | * Returns true if a packet can be sent as an offload WR with immediate |
193 | * data. We currently use the same limit as for Ethernet packets. |
194 | */ |
195 | static inline bool is_ofld_imm(const struct sk_buff *skb) |
196 | { |
197 | int len = skb->len; |
198 | |
199 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) |
200 | len += sizeof(struct fw_ofld_tx_data_wr); |
201 | |
202 | if (likely(cxgbi_skcb_test_flag((struct sk_buff *)skb, SKCBF_TX_ISO))) |
203 | len += sizeof(struct cpl_tx_data_iso); |
204 | |
205 | return (len <= MAX_IMM_OFLD_TX_DATA_WR_LEN); |
206 | } |
207 | |
208 | static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, |
209 | struct l2t_entry *e) |
210 | { |
211 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); |
212 | int wscale = cxgbi_sock_compute_wscale(win: csk->mss_idx); |
213 | unsigned long long opt0; |
214 | unsigned int opt2; |
215 | unsigned int qid_atid = ((unsigned int)csk->atid) | |
216 | (((unsigned int)csk->rss_qid) << 14); |
217 | |
218 | opt0 = KEEP_ALIVE_F | |
219 | WND_SCALE_V(wscale) | |
220 | MSS_IDX_V(csk->mss_idx) | |
221 | L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | |
222 | TX_CHAN_V(csk->tx_chan) | |
223 | SMAC_SEL_V(csk->smac_idx) | |
224 | ULP_MODE_V(ULP_MODE_ISCSI) | |
225 | RCV_BUFSIZ_V(csk->rcv_win >> 10); |
226 | |
227 | opt2 = RX_CHANNEL_V(0) | |
228 | RSS_QUEUE_VALID_F | |
229 | RSS_QUEUE_V(csk->rss_qid); |
230 | |
231 | if (is_t4(chip: lldi->adapter_type)) { |
232 | struct cpl_act_open_req *req = |
233 | (struct cpl_act_open_req *)skb->head; |
234 | |
235 | INIT_TP_WR(req, 0); |
236 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, |
237 | qid_atid)); |
238 | req->local_port = csk->saddr.sin_port; |
239 | req->peer_port = csk->daddr.sin_port; |
240 | req->local_ip = csk->saddr.sin_addr.s_addr; |
241 | req->peer_ip = csk->daddr.sin_addr.s_addr; |
242 | req->opt0 = cpu_to_be64(opt0); |
243 | req->params = cpu_to_be32(cxgb4_select_ntuple( |
244 | csk->cdev->ports[csk->port_id], |
245 | csk->l2t)); |
246 | opt2 |= RX_FC_VALID_F; |
247 | req->opt2 = cpu_to_be32(opt2); |
248 | |
249 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
250 | "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n" , |
251 | csk, &req->local_ip, ntohs(req->local_port), |
252 | &req->peer_ip, ntohs(req->peer_port), |
253 | csk->atid, csk->rss_qid); |
254 | } else if (is_t5(chip: lldi->adapter_type)) { |
255 | struct cpl_t5_act_open_req *req = |
256 | (struct cpl_t5_act_open_req *)skb->head; |
257 | u32 isn = (get_random_u32() & ~7UL) - 1; |
258 | |
259 | INIT_TP_WR(req, 0); |
260 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, |
261 | qid_atid)); |
262 | req->local_port = csk->saddr.sin_port; |
263 | req->peer_port = csk->daddr.sin_port; |
264 | req->local_ip = csk->saddr.sin_addr.s_addr; |
265 | req->peer_ip = csk->daddr.sin_addr.s_addr; |
266 | req->opt0 = cpu_to_be64(opt0); |
267 | req->params = cpu_to_be64(FILTER_TUPLE_V( |
268 | cxgb4_select_ntuple( |
269 | csk->cdev->ports[csk->port_id], |
270 | csk->l2t))); |
271 | req->rsvd = cpu_to_be32(isn); |
272 | opt2 |= T5_ISS_VALID; |
273 | opt2 |= T5_OPT_2_VALID_F; |
274 | |
275 | req->opt2 = cpu_to_be32(opt2); |
276 | |
277 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
278 | "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n" , |
279 | csk, &req->local_ip, ntohs(req->local_port), |
280 | &req->peer_ip, ntohs(req->peer_port), |
281 | csk->atid, csk->rss_qid); |
282 | } else { |
283 | struct cpl_t6_act_open_req *req = |
284 | (struct cpl_t6_act_open_req *)skb->head; |
285 | u32 isn = (get_random_u32() & ~7UL) - 1; |
286 | |
287 | INIT_TP_WR(req, 0); |
288 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, |
289 | qid_atid)); |
290 | req->local_port = csk->saddr.sin_port; |
291 | req->peer_port = csk->daddr.sin_port; |
292 | req->local_ip = csk->saddr.sin_addr.s_addr; |
293 | req->peer_ip = csk->daddr.sin_addr.s_addr; |
294 | req->opt0 = cpu_to_be64(opt0); |
295 | req->params = cpu_to_be64(FILTER_TUPLE_V( |
296 | cxgb4_select_ntuple( |
297 | csk->cdev->ports[csk->port_id], |
298 | csk->l2t))); |
299 | req->rsvd = cpu_to_be32(isn); |
300 | |
301 | opt2 |= T5_ISS_VALID; |
302 | opt2 |= RX_FC_DISABLE_F; |
303 | opt2 |= T5_OPT_2_VALID_F; |
304 | |
305 | req->opt2 = cpu_to_be32(opt2); |
306 | req->rsvd2 = cpu_to_be32(0); |
307 | req->opt3 = cpu_to_be32(0); |
308 | |
309 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
310 | "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n" , |
311 | csk, &req->local_ip, ntohs(req->local_port), |
312 | &req->peer_ip, ntohs(req->peer_port), |
313 | csk->atid, csk->rss_qid); |
314 | } |
315 | |
316 | set_wr_txq(skb, prio: CPL_PRIORITY_SETUP, queue: csk->port_id); |
317 | |
318 | pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n" , |
319 | (&csk->saddr), (&csk->daddr), |
320 | CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, |
321 | csk->state, csk->flags, csk->atid, csk->rss_qid); |
322 | |
323 | cxgb4_l2t_send(dev: csk->cdev->ports[csk->port_id], skb, e: csk->l2t); |
324 | } |
325 | |
326 | #if IS_ENABLED(CONFIG_IPV6) |
327 | static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, |
328 | struct l2t_entry *e) |
329 | { |
330 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); |
331 | int wscale = cxgbi_sock_compute_wscale(win: csk->mss_idx); |
332 | unsigned long long opt0; |
333 | unsigned int opt2; |
334 | unsigned int qid_atid = ((unsigned int)csk->atid) | |
335 | (((unsigned int)csk->rss_qid) << 14); |
336 | |
337 | opt0 = KEEP_ALIVE_F | |
338 | WND_SCALE_V(wscale) | |
339 | MSS_IDX_V(csk->mss_idx) | |
340 | L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | |
341 | TX_CHAN_V(csk->tx_chan) | |
342 | SMAC_SEL_V(csk->smac_idx) | |
343 | ULP_MODE_V(ULP_MODE_ISCSI) | |
344 | RCV_BUFSIZ_V(csk->rcv_win >> 10); |
345 | |
346 | opt2 = RX_CHANNEL_V(0) | |
347 | RSS_QUEUE_VALID_F | |
348 | RSS_QUEUE_V(csk->rss_qid); |
349 | |
350 | if (is_t4(chip: lldi->adapter_type)) { |
351 | struct cpl_act_open_req6 *req = |
352 | (struct cpl_act_open_req6 *)skb->head; |
353 | |
354 | INIT_TP_WR(req, 0); |
355 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, |
356 | qid_atid)); |
357 | req->local_port = csk->saddr6.sin6_port; |
358 | req->peer_port = csk->daddr6.sin6_port; |
359 | |
360 | req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); |
361 | req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + |
362 | 8); |
363 | req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); |
364 | req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + |
365 | 8); |
366 | |
367 | req->opt0 = cpu_to_be64(opt0); |
368 | |
369 | opt2 |= RX_FC_VALID_F; |
370 | req->opt2 = cpu_to_be32(opt2); |
371 | |
372 | req->params = cpu_to_be32(cxgb4_select_ntuple( |
373 | csk->cdev->ports[csk->port_id], |
374 | csk->l2t)); |
375 | } else if (is_t5(chip: lldi->adapter_type)) { |
376 | struct cpl_t5_act_open_req6 *req = |
377 | (struct cpl_t5_act_open_req6 *)skb->head; |
378 | |
379 | INIT_TP_WR(req, 0); |
380 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, |
381 | qid_atid)); |
382 | req->local_port = csk->saddr6.sin6_port; |
383 | req->peer_port = csk->daddr6.sin6_port; |
384 | req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); |
385 | req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + |
386 | 8); |
387 | req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); |
388 | req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + |
389 | 8); |
390 | req->opt0 = cpu_to_be64(opt0); |
391 | |
392 | opt2 |= T5_OPT_2_VALID_F; |
393 | req->opt2 = cpu_to_be32(opt2); |
394 | |
395 | req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( |
396 | csk->cdev->ports[csk->port_id], |
397 | csk->l2t))); |
398 | } else { |
399 | struct cpl_t6_act_open_req6 *req = |
400 | (struct cpl_t6_act_open_req6 *)skb->head; |
401 | |
402 | INIT_TP_WR(req, 0); |
403 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, |
404 | qid_atid)); |
405 | req->local_port = csk->saddr6.sin6_port; |
406 | req->peer_port = csk->daddr6.sin6_port; |
407 | req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); |
408 | req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + |
409 | 8); |
410 | req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); |
411 | req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + |
412 | 8); |
413 | req->opt0 = cpu_to_be64(opt0); |
414 | |
415 | opt2 |= RX_FC_DISABLE_F; |
416 | opt2 |= T5_OPT_2_VALID_F; |
417 | |
418 | req->opt2 = cpu_to_be32(opt2); |
419 | |
420 | req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( |
421 | csk->cdev->ports[csk->port_id], |
422 | csk->l2t))); |
423 | |
424 | req->rsvd2 = cpu_to_be32(0); |
425 | req->opt3 = cpu_to_be32(0); |
426 | } |
427 | |
428 | set_wr_txq(skb, prio: CPL_PRIORITY_SETUP, queue: csk->port_id); |
429 | |
430 | pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n" , |
431 | CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, |
432 | csk->flags, csk->atid, |
433 | &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), |
434 | &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), |
435 | csk->rss_qid); |
436 | |
437 | cxgb4_l2t_send(dev: csk->cdev->ports[csk->port_id], skb, e: csk->l2t); |
438 | } |
439 | #endif |
440 | |
441 | static void send_close_req(struct cxgbi_sock *csk) |
442 | { |
443 | struct sk_buff *skb = csk->cpl_close; |
444 | struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; |
445 | unsigned int tid = csk->tid; |
446 | |
447 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
448 | "csk 0x%p,%u,0x%lx, tid %u.\n" , |
449 | csk, csk->state, csk->flags, csk->tid); |
450 | csk->cpl_close = NULL; |
451 | set_wr_txq(skb, prio: CPL_PRIORITY_DATA, queue: csk->port_id); |
452 | INIT_TP_WR(req, tid); |
453 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); |
454 | req->rsvd = 0; |
455 | |
456 | cxgbi_sock_skb_entail(csk, skb); |
457 | if (csk->state >= CTP_ESTABLISHED) |
458 | push_tx_frames(csk, 1); |
459 | } |
460 | |
461 | static void abort_arp_failure(void *handle, struct sk_buff *skb) |
462 | { |
463 | struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; |
464 | struct cpl_abort_req *req; |
465 | |
466 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
467 | "csk 0x%p,%u,0x%lx, tid %u, abort.\n" , |
468 | csk, csk->state, csk->flags, csk->tid); |
469 | req = (struct cpl_abort_req *)skb->data; |
470 | req->cmd = CPL_ABORT_NO_RST; |
471 | cxgb4_ofld_send(dev: csk->cdev->ports[csk->port_id], skb); |
472 | } |
473 | |
474 | static void send_abort_req(struct cxgbi_sock *csk) |
475 | { |
476 | struct cpl_abort_req *req; |
477 | struct sk_buff *skb = csk->cpl_abort_req; |
478 | |
479 | if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) |
480 | return; |
481 | |
482 | if (!cxgbi_sock_flag(csk, flag: CTPF_TX_DATA_SENT)) { |
483 | send_tx_flowc_wr(csk); |
484 | cxgbi_sock_set_flag(csk, flag: CTPF_TX_DATA_SENT); |
485 | } |
486 | |
487 | cxgbi_sock_set_state(csk, state: CTP_ABORTING); |
488 | cxgbi_sock_set_flag(csk, flag: CTPF_ABORT_RPL_PENDING); |
489 | cxgbi_sock_purge_write_queue(csk); |
490 | |
491 | csk->cpl_abort_req = NULL; |
492 | req = (struct cpl_abort_req *)skb->head; |
493 | set_wr_txq(skb, prio: CPL_PRIORITY_DATA, queue: csk->port_id); |
494 | req->cmd = CPL_ABORT_SEND_RST; |
495 | t4_set_arp_err_handler(skb, handle: csk, handler: abort_arp_failure); |
496 | INIT_TP_WR(req, csk->tid); |
497 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); |
498 | req->rsvd0 = htonl(csk->snd_nxt); |
499 | req->rsvd1 = !cxgbi_sock_flag(csk, flag: CTPF_TX_DATA_SENT); |
500 | |
501 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
502 | "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n" , |
503 | csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, |
504 | req->rsvd1); |
505 | |
506 | cxgb4_l2t_send(dev: csk->cdev->ports[csk->port_id], skb, e: csk->l2t); |
507 | } |
508 | |
509 | static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) |
510 | { |
511 | struct sk_buff *skb = csk->cpl_abort_rpl; |
512 | struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; |
513 | |
514 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
515 | "csk 0x%p,%u,0x%lx,%u, status %d.\n" , |
516 | csk, csk->state, csk->flags, csk->tid, rst_status); |
517 | |
518 | csk->cpl_abort_rpl = NULL; |
519 | set_wr_txq(skb, prio: CPL_PRIORITY_DATA, queue: csk->port_id); |
520 | INIT_TP_WR(rpl, csk->tid); |
521 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); |
522 | rpl->cmd = rst_status; |
523 | cxgb4_ofld_send(dev: csk->cdev->ports[csk->port_id], skb); |
524 | } |
525 | |
526 | /* |
527 | * CPL connection rx data ack: host -> |
528 | * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of |
529 | * credits sent. |
530 | */ |
531 | static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) |
532 | { |
533 | struct sk_buff *skb; |
534 | struct cpl_rx_data_ack *req; |
535 | |
536 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
537 | "csk 0x%p,%u,0x%lx,%u, credit %u.\n" , |
538 | csk, csk->state, csk->flags, csk->tid, credits); |
539 | |
540 | skb = alloc_wr(wrlen: sizeof(*req), dlen: 0, GFP_ATOMIC); |
541 | if (!skb) { |
542 | pr_info("csk 0x%p, credit %u, OOM.\n" , csk, credits); |
543 | return 0; |
544 | } |
545 | req = (struct cpl_rx_data_ack *)skb->head; |
546 | |
547 | set_wr_txq(skb, prio: CPL_PRIORITY_ACK, queue: csk->port_id); |
548 | INIT_TP_WR(req, csk->tid); |
549 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, |
550 | csk->tid)); |
551 | req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
552 | | RX_FORCE_ACK_F); |
553 | cxgb4_ofld_send(dev: csk->cdev->ports[csk->port_id], skb); |
554 | return credits; |
555 | } |
556 | |
557 | /* |
558 | * sgl_len - calculates the size of an SGL of the given capacity |
559 | * @n: the number of SGL entries |
560 | * Calculates the number of flits needed for a scatter/gather list that |
561 | * can hold the given number of entries. |
562 | */ |
563 | static inline unsigned int sgl_len(unsigned int n) |
564 | { |
565 | n--; |
566 | return (3 * n) / 2 + (n & 1) + 2; |
567 | } |
568 | |
569 | /* |
570 | * calc_tx_flits_ofld - calculate # of flits for an offload packet |
571 | * @skb: the packet |
572 | * |
573 | * Returns the number of flits needed for the given offload packet. |
574 | * These packets are already fully constructed and no additional headers |
575 | * will be added. |
576 | */ |
577 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) |
578 | { |
579 | unsigned int flits, cnt; |
580 | |
581 | if (is_ofld_imm(skb)) |
582 | return DIV_ROUND_UP(skb->len, 8); |
583 | flits = skb_transport_offset(skb) / 8; |
584 | cnt = skb_shinfo(skb)->nr_frags; |
585 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) |
586 | cnt++; |
587 | return flits + sgl_len(n: cnt); |
588 | } |
589 | |
590 | #define FLOWC_WR_NPARAMS_MIN 9 |
591 | static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) |
592 | { |
593 | int nparams, flowclen16, flowclen; |
594 | |
595 | nparams = FLOWC_WR_NPARAMS_MIN; |
596 | #ifdef CONFIG_CHELSIO_T4_DCB |
597 | nparams++; |
598 | #endif |
599 | flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); |
600 | flowclen16 = DIV_ROUND_UP(flowclen, 16); |
601 | flowclen = flowclen16 * 16; |
602 | /* |
603 | * Return the number of 16-byte credits used by the FlowC request. |
604 | * Pass back the nparams and actual FlowC length if requested. |
605 | */ |
606 | if (nparamsp) |
607 | *nparamsp = nparams; |
608 | if (flowclenp) |
609 | *flowclenp = flowclen; |
610 | |
611 | return flowclen16; |
612 | } |
613 | |
614 | static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) |
615 | { |
616 | struct sk_buff *skb; |
617 | struct fw_flowc_wr *flowc; |
618 | int nparams, flowclen16, flowclen; |
619 | |
620 | #ifdef CONFIG_CHELSIO_T4_DCB |
621 | u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; |
622 | #endif |
623 | flowclen16 = tx_flowc_wr_credits(nparamsp: &nparams, flowclenp: &flowclen); |
624 | skb = alloc_wr(wrlen: flowclen, dlen: 0, GFP_ATOMIC); |
625 | flowc = (struct fw_flowc_wr *)skb->head; |
626 | flowc->op_to_nparams = |
627 | htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); |
628 | flowc->flowid_len16 = |
629 | htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); |
630 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; |
631 | flowc->mnemval[0].val = htonl(csk->cdev->pfvf); |
632 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; |
633 | flowc->mnemval[1].val = htonl(csk->tx_chan); |
634 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; |
635 | flowc->mnemval[2].val = htonl(csk->tx_chan); |
636 | flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; |
637 | flowc->mnemval[3].val = htonl(csk->rss_qid); |
638 | flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; |
639 | flowc->mnemval[4].val = htonl(csk->snd_nxt); |
640 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; |
641 | flowc->mnemval[5].val = htonl(csk->rcv_nxt); |
642 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; |
643 | flowc->mnemval[6].val = htonl(csk->snd_win); |
644 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; |
645 | flowc->mnemval[7].val = htonl(csk->advmss); |
646 | flowc->mnemval[8].mnemonic = 0; |
647 | flowc->mnemval[8].val = 0; |
648 | flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; |
649 | if (csk->cdev->skb_iso_txhdr) |
650 | flowc->mnemval[8].val = cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB); |
651 | else |
652 | flowc->mnemval[8].val = cpu_to_be32(16128); |
653 | #ifdef CONFIG_CHELSIO_T4_DCB |
654 | flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO; |
655 | if (vlan == CPL_L2T_VLAN_NONE) { |
656 | pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n" , |
657 | csk->tid); |
658 | flowc->mnemval[9].val = cpu_to_be32(0); |
659 | } else { |
660 | flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >> |
661 | VLAN_PRIO_SHIFT); |
662 | } |
663 | #endif |
664 | |
665 | set_wr_txq(skb, prio: CPL_PRIORITY_DATA, queue: csk->port_id); |
666 | |
667 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
668 | "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n" , |
669 | csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, |
670 | csk->snd_nxt, csk->rcv_nxt, csk->snd_win, |
671 | csk->advmss); |
672 | |
673 | cxgb4_ofld_send(dev: csk->cdev->ports[csk->port_id], skb); |
674 | |
675 | return flowclen16; |
676 | } |
677 | |
678 | static void |
679 | cxgb4i_make_tx_iso_cpl(struct sk_buff *skb, struct cpl_tx_data_iso *cpl) |
680 | { |
681 | struct cxgbi_iso_info *info = (struct cxgbi_iso_info *)skb->head; |
682 | u32 imm_en = !!(info->flags & CXGBI_ISO_INFO_IMM_ENABLE); |
683 | u32 fslice = !!(info->flags & CXGBI_ISO_INFO_FSLICE); |
684 | u32 lslice = !!(info->flags & CXGBI_ISO_INFO_LSLICE); |
685 | u32 pdu_type = (info->op == ISCSI_OP_SCSI_CMD) ? 0 : 1; |
686 | u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3; |
687 | |
688 | cpl->op_to_scsi = cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) | |
689 | CPL_TX_DATA_ISO_FIRST_V(fslice) | |
690 | CPL_TX_DATA_ISO_LAST_V(lslice) | |
691 | CPL_TX_DATA_ISO_CPLHDRLEN_V(0) | |
692 | CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) | |
693 | CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) | |
694 | CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en) | |
695 | CPL_TX_DATA_ISO_SCSI_V(pdu_type)); |
696 | |
697 | cpl->ahs_len = info->ahs; |
698 | cpl->mpdu = cpu_to_be16(DIV_ROUND_UP(info->mpdu, 4)); |
699 | cpl->burst_size = cpu_to_be32(info->burst_size); |
700 | cpl->len = cpu_to_be32(info->len); |
701 | cpl->reserved2_seglen_offset = |
702 | cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info->segment_offset)); |
703 | cpl->datasn_offset = cpu_to_be32(info->datasn_offset); |
704 | cpl->buffer_offset = cpu_to_be32(info->buffer_offset); |
705 | cpl->reserved3 = cpu_to_be32(0); |
706 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
707 | "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, " |
708 | "burst_size %u, iso_len %u\n" , |
709 | info->flags, info->op, info->ahs, info->num_pdu, |
710 | info->mpdu, info->burst_size << 2, info->len); |
711 | } |
712 | |
713 | static void |
714 | cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen, |
715 | int len, u32 credits, int compl) |
716 | { |
717 | struct cxgbi_device *cdev = csk->cdev; |
718 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
719 | struct fw_ofld_tx_data_wr *req; |
720 | struct cpl_tx_data_iso *cpl; |
721 | u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3; |
722 | u32 wr_ulp_mode = 0; |
723 | u32 hdr_size = sizeof(*req); |
724 | u32 opcode = FW_OFLD_TX_DATA_WR; |
725 | u32 immlen = 0; |
726 | u32 force = is_t5(chip: lldi->adapter_type) ? TX_FORCE_V(!submode) : |
727 | T6_TX_FORCE_F; |
728 | |
729 | if (cxgbi_skcb_test_flag(skb, flag: SKCBF_TX_ISO)) { |
730 | hdr_size += sizeof(struct cpl_tx_data_iso); |
731 | opcode = FW_ISCSI_TX_DATA_WR; |
732 | immlen += sizeof(struct cpl_tx_data_iso); |
733 | submode |= 8; |
734 | } |
735 | |
736 | if (is_ofld_imm(skb)) |
737 | immlen += dlen; |
738 | |
739 | req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, len: hdr_size); |
740 | req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) | |
741 | FW_WR_COMPL_V(compl) | |
742 | FW_WR_IMMDLEN_V(immlen)); |
743 | req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | |
744 | FW_WR_LEN16_V(credits)); |
745 | req->plen = cpu_to_be32(len); |
746 | cpl = (struct cpl_tx_data_iso *)(req + 1); |
747 | |
748 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) |
749 | cxgb4i_make_tx_iso_cpl(skb, cpl); |
750 | |
751 | if (submode) |
752 | wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | |
753 | FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); |
754 | |
755 | req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode | force | |
756 | FW_OFLD_TX_DATA_WR_SHOVE_V(1U)); |
757 | |
758 | if (!cxgbi_sock_flag(csk, flag: CTPF_TX_DATA_SENT)) |
759 | cxgbi_sock_set_flag(csk, flag: CTPF_TX_DATA_SENT); |
760 | } |
761 | |
762 | static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) |
763 | { |
764 | kfree_skb(skb); |
765 | } |
766 | |
767 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) |
768 | { |
769 | int total_size = 0; |
770 | struct sk_buff *skb; |
771 | |
772 | if (unlikely(csk->state < CTP_ESTABLISHED || |
773 | csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { |
774 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | |
775 | 1 << CXGBI_DBG_PDU_TX, |
776 | "csk 0x%p,%u,0x%lx,%u, in closing state.\n" , |
777 | csk, csk->state, csk->flags, csk->tid); |
778 | return 0; |
779 | } |
780 | |
781 | while (csk->wr_cred && ((skb = skb_peek(list_: &csk->write_queue)) != NULL)) { |
782 | struct cxgbi_iso_info *iso_cpl; |
783 | u32 dlen = skb->len; |
784 | u32 len = skb->len; |
785 | u32 iso_cpl_len = 0; |
786 | u32 flowclen16 = 0; |
787 | u32 credits_needed; |
788 | u32 num_pdu = 1, hdr_len; |
789 | |
790 | if (cxgbi_skcb_test_flag(skb, flag: SKCBF_TX_ISO)) |
791 | iso_cpl_len = sizeof(struct cpl_tx_data_iso); |
792 | |
793 | if (is_ofld_imm(skb)) |
794 | credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16); |
795 | else |
796 | credits_needed = |
797 | DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb)) + |
798 | iso_cpl_len, 16); |
799 | |
800 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) |
801 | credits_needed += |
802 | DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr), 16); |
803 | |
804 | /* |
805 | * Assumes the initial credits is large enough to support |
806 | * fw_flowc_wr plus largest possible first payload |
807 | */ |
808 | if (!cxgbi_sock_flag(csk, flag: CTPF_TX_DATA_SENT)) { |
809 | flowclen16 = send_tx_flowc_wr(csk); |
810 | csk->wr_cred -= flowclen16; |
811 | csk->wr_una_cred += flowclen16; |
812 | cxgbi_sock_set_flag(csk, flag: CTPF_TX_DATA_SENT); |
813 | } |
814 | |
815 | if (csk->wr_cred < credits_needed) { |
816 | log_debug(1 << CXGBI_DBG_PDU_TX, |
817 | "csk 0x%p, skb %u/%u, wr %d < %u.\n" , |
818 | csk, skb->len, skb->data_len, |
819 | credits_needed, csk->wr_cred); |
820 | |
821 | csk->no_tx_credits++; |
822 | break; |
823 | } |
824 | |
825 | csk->no_tx_credits = 0; |
826 | |
827 | __skb_unlink(skb, list: &csk->write_queue); |
828 | set_wr_txq(skb, prio: CPL_PRIORITY_DATA, queue: csk->port_id); |
829 | skb->csum = (__force __wsum)(credits_needed + flowclen16); |
830 | csk->wr_cred -= credits_needed; |
831 | csk->wr_una_cred += credits_needed; |
832 | cxgbi_sock_enqueue_wr(csk, skb); |
833 | |
834 | log_debug(1 << CXGBI_DBG_PDU_TX, |
835 | "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n" , |
836 | csk, skb->len, skb->data_len, credits_needed, |
837 | csk->wr_cred, csk->wr_una_cred); |
838 | |
839 | if (!req_completion && |
840 | ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || |
841 | after(csk->write_seq, (csk->snd_una + csk->snd_win / 2)))) |
842 | req_completion = 1; |
843 | |
844 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { |
845 | u32 ulp_mode = cxgbi_skcb_tx_ulp_mode(skb); |
846 | |
847 | if (cxgbi_skcb_test_flag(skb, flag: SKCBF_TX_ISO)) { |
848 | iso_cpl = (struct cxgbi_iso_info *)skb->head; |
849 | num_pdu = iso_cpl->num_pdu; |
850 | hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb); |
851 | len += (cxgbi_ulp_extra_len(submode: ulp_mode) * num_pdu) + |
852 | (hdr_len * (num_pdu - 1)); |
853 | } else { |
854 | len += cxgbi_ulp_extra_len(submode: ulp_mode); |
855 | } |
856 | |
857 | cxgb4i_make_tx_data_wr(csk, skb, dlen, len, |
858 | credits: credits_needed, compl: req_completion); |
859 | csk->snd_nxt += len; |
860 | cxgbi_skcb_clear_flag(skb, flag: SKCBF_TX_NEED_HDR); |
861 | } else if (cxgbi_skcb_test_flag(skb, flag: SKCBF_TX_FLAG_COMPL) && |
862 | (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { |
863 | struct cpl_close_con_req *req = |
864 | (struct cpl_close_con_req *)skb->data; |
865 | |
866 | req->wr.wr_hi |= cpu_to_be32(FW_WR_COMPL_F); |
867 | } |
868 | |
869 | total_size += skb->truesize; |
870 | t4_set_arp_err_handler(skb, handle: csk, handler: arp_failure_skb_discard); |
871 | |
872 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, |
873 | "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n" , |
874 | csk, csk->state, csk->flags, csk->tid, skb, len); |
875 | cxgb4_l2t_send(dev: csk->cdev->ports[csk->port_id], skb, e: csk->l2t); |
876 | } |
877 | return total_size; |
878 | } |
879 | |
880 | static inline void free_atid(struct cxgbi_sock *csk) |
881 | { |
882 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); |
883 | |
884 | if (cxgbi_sock_flag(csk, flag: CTPF_HAS_ATID)) { |
885 | cxgb4_free_atid(t: lldi->tids, atid: csk->atid); |
886 | cxgbi_sock_clear_flag(csk, flag: CTPF_HAS_ATID); |
887 | cxgbi_sock_put(csk); |
888 | } |
889 | } |
890 | |
891 | static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) |
892 | { |
893 | struct cxgbi_sock *csk; |
894 | struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; |
895 | unsigned short tcp_opt = ntohs(req->tcp_opt); |
896 | unsigned int tid = GET_TID(req); |
897 | unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); |
898 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
899 | struct tid_info *t = lldi->tids; |
900 | u32 rcv_isn = be32_to_cpu(req->rcv_isn); |
901 | |
902 | csk = lookup_atid(t, atid); |
903 | if (unlikely(!csk)) { |
904 | pr_err("NO conn. for atid %u, cdev 0x%p.\n" , atid, cdev); |
905 | goto rel_skb; |
906 | } |
907 | |
908 | if (csk->atid != atid) { |
909 | pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n" , |
910 | atid, csk, csk->state, csk->flags, csk->tid, csk->atid); |
911 | goto rel_skb; |
912 | } |
913 | |
914 | pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n" , |
915 | (&csk->saddr), (&csk->daddr), |
916 | atid, tid, csk, csk->state, csk->flags, rcv_isn); |
917 | |
918 | module_put(module: cdev->owner); |
919 | |
920 | cxgbi_sock_get(csk); |
921 | csk->tid = tid; |
922 | cxgb4_insert_tid(t: lldi->tids, data: csk, tid, family: csk->csk_family); |
923 | cxgbi_sock_set_flag(csk, flag: CTPF_HAS_TID); |
924 | |
925 | free_atid(csk); |
926 | |
927 | spin_lock_bh(lock: &csk->lock); |
928 | if (unlikely(csk->state != CTP_ACTIVE_OPEN)) |
929 | pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n" , |
930 | csk, csk->state, csk->flags, csk->tid); |
931 | |
932 | if (csk->retry_timer.function) { |
933 | del_timer(timer: &csk->retry_timer); |
934 | csk->retry_timer.function = NULL; |
935 | } |
936 | |
937 | csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; |
938 | /* |
939 | * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't |
940 | * pass through opt0. |
941 | */ |
942 | if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) |
943 | csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); |
944 | |
945 | csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; |
946 | if (TCPOPT_TSTAMP_G(tcp_opt)) |
947 | csk->advmss -= 12; |
948 | if (csk->advmss < 128) |
949 | csk->advmss = 128; |
950 | |
951 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
952 | "csk 0x%p, mss_idx %u, advmss %u.\n" , |
953 | csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); |
954 | |
955 | cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); |
956 | |
957 | if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) |
958 | send_abort_req(csk); |
959 | else { |
960 | if (skb_queue_len(list_: &csk->write_queue)) |
961 | push_tx_frames(csk, req_completion: 0); |
962 | cxgbi_conn_tx_open(csk); |
963 | } |
964 | spin_unlock_bh(lock: &csk->lock); |
965 | |
966 | rel_skb: |
967 | __kfree_skb(skb); |
968 | } |
969 | |
970 | static int act_open_rpl_status_to_errno(int status) |
971 | { |
972 | switch (status) { |
973 | case CPL_ERR_CONN_RESET: |
974 | return -ECONNREFUSED; |
975 | case CPL_ERR_ARP_MISS: |
976 | return -EHOSTUNREACH; |
977 | case CPL_ERR_CONN_TIMEDOUT: |
978 | return -ETIMEDOUT; |
979 | case CPL_ERR_TCAM_FULL: |
980 | return -ENOMEM; |
981 | case CPL_ERR_CONN_EXIST: |
982 | return -EADDRINUSE; |
983 | default: |
984 | return -EIO; |
985 | } |
986 | } |
987 | |
988 | static void csk_act_open_retry_timer(struct timer_list *t) |
989 | { |
990 | struct sk_buff *skb = NULL; |
991 | struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); |
992 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); |
993 | void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, |
994 | struct l2t_entry *); |
995 | int t4 = is_t4(chip: lldi->adapter_type), size, size6; |
996 | |
997 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
998 | "csk 0x%p,%u,0x%lx,%u.\n" , |
999 | csk, csk->state, csk->flags, csk->tid); |
1000 | |
1001 | cxgbi_sock_get(csk); |
1002 | spin_lock_bh(lock: &csk->lock); |
1003 | |
1004 | if (t4) { |
1005 | size = sizeof(struct cpl_act_open_req); |
1006 | size6 = sizeof(struct cpl_act_open_req6); |
1007 | } else { |
1008 | size = sizeof(struct cpl_t5_act_open_req); |
1009 | size6 = sizeof(struct cpl_t5_act_open_req6); |
1010 | } |
1011 | |
1012 | if (csk->csk_family == AF_INET) { |
1013 | send_act_open_func = send_act_open_req; |
1014 | skb = alloc_wr(wrlen: size, dlen: 0, GFP_ATOMIC); |
1015 | #if IS_ENABLED(CONFIG_IPV6) |
1016 | } else { |
1017 | send_act_open_func = send_act_open_req6; |
1018 | skb = alloc_wr(wrlen: size6, dlen: 0, GFP_ATOMIC); |
1019 | #endif |
1020 | } |
1021 | |
1022 | if (!skb) |
1023 | cxgbi_sock_fail_act_open(csk, -ENOMEM); |
1024 | else { |
1025 | skb->sk = (struct sock *)csk; |
1026 | t4_set_arp_err_handler(skb, handle: csk, |
1027 | handler: cxgbi_sock_act_open_req_arp_failure); |
1028 | send_act_open_func(csk, skb, csk->l2t); |
1029 | } |
1030 | |
1031 | spin_unlock_bh(lock: &csk->lock); |
1032 | cxgbi_sock_put(csk); |
1033 | |
1034 | } |
1035 | |
1036 | static inline bool is_neg_adv(unsigned int status) |
1037 | { |
1038 | return status == CPL_ERR_RTX_NEG_ADVICE || |
1039 | status == CPL_ERR_KEEPALV_NEG_ADVICE || |
1040 | status == CPL_ERR_PERSIST_NEG_ADVICE; |
1041 | } |
1042 | |
1043 | static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) |
1044 | { |
1045 | struct cxgbi_sock *csk; |
1046 | struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; |
1047 | unsigned int tid = GET_TID(rpl); |
1048 | unsigned int atid = |
1049 | TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); |
1050 | unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); |
1051 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1052 | struct tid_info *t = lldi->tids; |
1053 | |
1054 | csk = lookup_atid(t, atid); |
1055 | if (unlikely(!csk)) { |
1056 | pr_err("NO matching conn. atid %u, tid %u.\n" , atid, tid); |
1057 | goto rel_skb; |
1058 | } |
1059 | |
1060 | pr_info_ipaddr("tid %u/%u, status %u.\n" |
1061 | "csk 0x%p,%u,0x%lx. " , (&csk->saddr), (&csk->daddr), |
1062 | atid, tid, status, csk, csk->state, csk->flags); |
1063 | |
1064 | if (is_neg_adv(status)) |
1065 | goto rel_skb; |
1066 | |
1067 | module_put(module: cdev->owner); |
1068 | |
1069 | if (status && status != CPL_ERR_TCAM_FULL && |
1070 | status != CPL_ERR_CONN_EXIST && |
1071 | status != CPL_ERR_ARP_MISS) |
1072 | cxgb4_remove_tid(t: lldi->tids, qid: csk->port_id, GET_TID(rpl), |
1073 | family: csk->csk_family); |
1074 | |
1075 | cxgbi_sock_get(csk); |
1076 | spin_lock_bh(lock: &csk->lock); |
1077 | |
1078 | if (status == CPL_ERR_CONN_EXIST && |
1079 | csk->retry_timer.function != csk_act_open_retry_timer) { |
1080 | csk->retry_timer.function = csk_act_open_retry_timer; |
1081 | mod_timer(timer: &csk->retry_timer, expires: jiffies + HZ / 2); |
1082 | } else |
1083 | cxgbi_sock_fail_act_open(csk, |
1084 | act_open_rpl_status_to_errno(status)); |
1085 | |
1086 | spin_unlock_bh(lock: &csk->lock); |
1087 | cxgbi_sock_put(csk); |
1088 | rel_skb: |
1089 | __kfree_skb(skb); |
1090 | } |
1091 | |
1092 | static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) |
1093 | { |
1094 | struct cxgbi_sock *csk; |
1095 | struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; |
1096 | unsigned int tid = GET_TID(req); |
1097 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1098 | struct tid_info *t = lldi->tids; |
1099 | |
1100 | csk = lookup_tid(t, tid); |
1101 | if (unlikely(!csk)) { |
1102 | pr_err("can't find connection for tid %u.\n" , tid); |
1103 | goto rel_skb; |
1104 | } |
1105 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n" , |
1106 | (&csk->saddr), (&csk->daddr), |
1107 | csk, csk->state, csk->flags, csk->tid); |
1108 | cxgbi_sock_rcv_peer_close(csk); |
1109 | rel_skb: |
1110 | __kfree_skb(skb); |
1111 | } |
1112 | |
1113 | static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) |
1114 | { |
1115 | struct cxgbi_sock *csk; |
1116 | struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; |
1117 | unsigned int tid = GET_TID(rpl); |
1118 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1119 | struct tid_info *t = lldi->tids; |
1120 | |
1121 | csk = lookup_tid(t, tid); |
1122 | if (unlikely(!csk)) { |
1123 | pr_err("can't find connection for tid %u.\n" , tid); |
1124 | goto rel_skb; |
1125 | } |
1126 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n" , |
1127 | (&csk->saddr), (&csk->daddr), |
1128 | csk, csk->state, csk->flags, csk->tid); |
1129 | cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); |
1130 | rel_skb: |
1131 | __kfree_skb(skb); |
1132 | } |
1133 | |
1134 | static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, |
1135 | int *need_rst) |
1136 | { |
1137 | switch (abort_reason) { |
1138 | case CPL_ERR_BAD_SYN: |
1139 | case CPL_ERR_CONN_RESET: |
1140 | return csk->state > CTP_ESTABLISHED ? |
1141 | -EPIPE : -ECONNRESET; |
1142 | case CPL_ERR_XMIT_TIMEDOUT: |
1143 | case CPL_ERR_PERSIST_TIMEDOUT: |
1144 | case CPL_ERR_FINWAIT2_TIMEDOUT: |
1145 | case CPL_ERR_KEEPALIVE_TIMEDOUT: |
1146 | return -ETIMEDOUT; |
1147 | default: |
1148 | return -EIO; |
1149 | } |
1150 | } |
1151 | |
1152 | static void (struct cxgbi_device *cdev, struct sk_buff *skb) |
1153 | { |
1154 | struct cxgbi_sock *csk; |
1155 | struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; |
1156 | unsigned int tid = GET_TID(req); |
1157 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1158 | struct tid_info *t = lldi->tids; |
1159 | int rst_status = CPL_ABORT_NO_RST; |
1160 | |
1161 | csk = lookup_tid(t, tid); |
1162 | if (unlikely(!csk)) { |
1163 | pr_err("can't find connection for tid %u.\n" , tid); |
1164 | goto rel_skb; |
1165 | } |
1166 | |
1167 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n" , |
1168 | (&csk->saddr), (&csk->daddr), |
1169 | csk, csk->state, csk->flags, csk->tid, req->status); |
1170 | |
1171 | if (is_neg_adv(status: req->status)) |
1172 | goto rel_skb; |
1173 | |
1174 | cxgbi_sock_get(csk); |
1175 | spin_lock_bh(lock: &csk->lock); |
1176 | |
1177 | cxgbi_sock_clear_flag(csk, flag: CTPF_ABORT_REQ_RCVD); |
1178 | |
1179 | if (!cxgbi_sock_flag(csk, flag: CTPF_TX_DATA_SENT)) { |
1180 | send_tx_flowc_wr(csk); |
1181 | cxgbi_sock_set_flag(csk, flag: CTPF_TX_DATA_SENT); |
1182 | } |
1183 | |
1184 | cxgbi_sock_set_flag(csk, flag: CTPF_ABORT_REQ_RCVD); |
1185 | cxgbi_sock_set_state(csk, state: CTP_ABORTING); |
1186 | |
1187 | send_abort_rpl(csk, rst_status); |
1188 | |
1189 | if (!cxgbi_sock_flag(csk, flag: CTPF_ABORT_RPL_PENDING)) { |
1190 | csk->err = abort_status_to_errno(csk, abort_reason: req->status, need_rst: &rst_status); |
1191 | cxgbi_sock_closed(csk); |
1192 | } |
1193 | |
1194 | spin_unlock_bh(lock: &csk->lock); |
1195 | cxgbi_sock_put(csk); |
1196 | rel_skb: |
1197 | __kfree_skb(skb); |
1198 | } |
1199 | |
1200 | static void (struct cxgbi_device *cdev, struct sk_buff *skb) |
1201 | { |
1202 | struct cxgbi_sock *csk; |
1203 | struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; |
1204 | unsigned int tid = GET_TID(rpl); |
1205 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1206 | struct tid_info *t = lldi->tids; |
1207 | |
1208 | csk = lookup_tid(t, tid); |
1209 | if (!csk) |
1210 | goto rel_skb; |
1211 | |
1212 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n" , |
1213 | (&csk->saddr), (&csk->daddr), csk, |
1214 | csk->state, csk->flags, csk->tid, rpl->status); |
1215 | |
1216 | if (rpl->status == CPL_ERR_ABORT_FAILED) |
1217 | goto rel_skb; |
1218 | |
1219 | cxgbi_sock_rcv_abort_rpl(csk); |
1220 | rel_skb: |
1221 | __kfree_skb(skb); |
1222 | } |
1223 | |
1224 | static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) |
1225 | { |
1226 | struct cxgbi_sock *csk; |
1227 | struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; |
1228 | unsigned int tid = GET_TID(cpl); |
1229 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1230 | struct tid_info *t = lldi->tids; |
1231 | |
1232 | csk = lookup_tid(t, tid); |
1233 | if (!csk) { |
1234 | pr_err("can't find connection for tid %u.\n" , tid); |
1235 | } else { |
1236 | /* not expecting this, reset the connection. */ |
1237 | pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n" , csk, tid); |
1238 | spin_lock_bh(lock: &csk->lock); |
1239 | send_abort_req(csk); |
1240 | spin_unlock_bh(lock: &csk->lock); |
1241 | } |
1242 | __kfree_skb(skb); |
1243 | } |
1244 | |
1245 | static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) |
1246 | { |
1247 | struct cxgbi_sock *csk; |
1248 | struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; |
1249 | unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); |
1250 | unsigned int tid = GET_TID(cpl); |
1251 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1252 | struct tid_info *t = lldi->tids; |
1253 | |
1254 | csk = lookup_tid(t, tid); |
1255 | if (unlikely(!csk)) { |
1256 | pr_err("can't find conn. for tid %u.\n" , tid); |
1257 | goto rel_skb; |
1258 | } |
1259 | |
1260 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1261 | "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n" , |
1262 | csk, csk->state, csk->flags, csk->tid, skb, skb->len, |
1263 | pdu_len_ddp); |
1264 | |
1265 | spin_lock_bh(lock: &csk->lock); |
1266 | |
1267 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { |
1268 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1269 | "csk 0x%p,%u,0x%lx,%u, bad state.\n" , |
1270 | csk, csk->state, csk->flags, csk->tid); |
1271 | if (csk->state != CTP_ABORTING) |
1272 | goto abort_conn; |
1273 | else |
1274 | goto discard; |
1275 | } |
1276 | |
1277 | cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); |
1278 | cxgbi_skcb_flags(skb) = 0; |
1279 | |
1280 | skb_reset_transport_header(skb); |
1281 | __skb_pull(skb, len: sizeof(*cpl)); |
1282 | __pskb_trim(skb, ntohs(cpl->len)); |
1283 | |
1284 | if (!csk->skb_ulp_lhdr) { |
1285 | unsigned char *bhs; |
1286 | unsigned int hlen, dlen, plen; |
1287 | |
1288 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1289 | "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n" , |
1290 | csk, csk->state, csk->flags, csk->tid, skb); |
1291 | csk->skb_ulp_lhdr = skb; |
1292 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_HDR); |
1293 | |
1294 | if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) && |
1295 | (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { |
1296 | pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n" , |
1297 | csk->tid, cxgbi_skcb_tcp_seq(skb), |
1298 | csk->rcv_nxt); |
1299 | goto abort_conn; |
1300 | } |
1301 | |
1302 | bhs = skb->data; |
1303 | hlen = ntohs(cpl->len); |
1304 | dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; |
1305 | |
1306 | plen = ISCSI_PDU_LEN_G(pdu_len_ddp); |
1307 | if (is_t4(chip: lldi->adapter_type)) |
1308 | plen -= 40; |
1309 | |
1310 | if ((hlen + dlen) != plen) { |
1311 | pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " |
1312 | "mismatch %u != %u + %u, seq 0x%x.\n" , |
1313 | csk->tid, plen, hlen, dlen, |
1314 | cxgbi_skcb_tcp_seq(skb)); |
1315 | goto abort_conn; |
1316 | } |
1317 | |
1318 | cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); |
1319 | if (dlen) |
1320 | cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; |
1321 | csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); |
1322 | |
1323 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1324 | "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n" , |
1325 | csk, skb, *bhs, hlen, dlen, |
1326 | ntohl(*((unsigned int *)(bhs + 16))), |
1327 | ntohl(*((unsigned int *)(bhs + 24)))); |
1328 | |
1329 | } else { |
1330 | struct sk_buff *lskb = csk->skb_ulp_lhdr; |
1331 | |
1332 | cxgbi_skcb_set_flag(skb: lskb, flag: SKCBF_RX_DATA); |
1333 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1334 | "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n" , |
1335 | csk, csk->state, csk->flags, skb, lskb); |
1336 | } |
1337 | |
1338 | __skb_queue_tail(list: &csk->receive_queue, newsk: skb); |
1339 | spin_unlock_bh(lock: &csk->lock); |
1340 | return; |
1341 | |
1342 | abort_conn: |
1343 | send_abort_req(csk); |
1344 | discard: |
1345 | spin_unlock_bh(lock: &csk->lock); |
1346 | rel_skb: |
1347 | __kfree_skb(skb); |
1348 | } |
1349 | |
1350 | static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) |
1351 | { |
1352 | struct cxgbi_sock *csk; |
1353 | struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; |
1354 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1355 | struct tid_info *t = lldi->tids; |
1356 | struct sk_buff *lskb; |
1357 | u32 tid = GET_TID(cpl); |
1358 | u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); |
1359 | |
1360 | csk = lookup_tid(t, tid); |
1361 | if (unlikely(!csk)) { |
1362 | pr_err("can't find conn. for tid %u.\n" , tid); |
1363 | goto rel_skb; |
1364 | } |
1365 | |
1366 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1367 | "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n" , |
1368 | csk, csk->state, csk->flags, csk->tid, skb, |
1369 | skb->len, pdu_len_ddp); |
1370 | |
1371 | spin_lock_bh(lock: &csk->lock); |
1372 | |
1373 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { |
1374 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1375 | "csk 0x%p,%u,0x%lx,%u, bad state.\n" , |
1376 | csk, csk->state, csk->flags, csk->tid); |
1377 | |
1378 | if (csk->state != CTP_ABORTING) |
1379 | goto abort_conn; |
1380 | else |
1381 | goto discard; |
1382 | } |
1383 | |
1384 | cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); |
1385 | cxgbi_skcb_flags(skb) = 0; |
1386 | |
1387 | skb_reset_transport_header(skb); |
1388 | __skb_pull(skb, len: sizeof(*cpl)); |
1389 | __pskb_trim(skb, ntohs(cpl->len)); |
1390 | |
1391 | if (!csk->skb_ulp_lhdr) |
1392 | csk->skb_ulp_lhdr = skb; |
1393 | |
1394 | lskb = csk->skb_ulp_lhdr; |
1395 | cxgbi_skcb_set_flag(skb: lskb, flag: SKCBF_RX_DATA); |
1396 | |
1397 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1398 | "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n" , |
1399 | csk, csk->state, csk->flags, skb, lskb); |
1400 | |
1401 | __skb_queue_tail(list: &csk->receive_queue, newsk: skb); |
1402 | spin_unlock_bh(lock: &csk->lock); |
1403 | return; |
1404 | |
1405 | abort_conn: |
1406 | send_abort_req(csk); |
1407 | discard: |
1408 | spin_unlock_bh(lock: &csk->lock); |
1409 | rel_skb: |
1410 | __kfree_skb(skb); |
1411 | } |
1412 | |
1413 | static void |
1414 | cxgb4i_process_ddpvld(struct cxgbi_sock *csk, |
1415 | struct sk_buff *skb, u32 ddpvld) |
1416 | { |
1417 | if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { |
1418 | pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n" , |
1419 | csk, skb, ddpvld, cxgbi_skcb_flags(skb)); |
1420 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_HCRC_ERR); |
1421 | } |
1422 | |
1423 | if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { |
1424 | pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n" , |
1425 | csk, skb, ddpvld, cxgbi_skcb_flags(skb)); |
1426 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_DCRC_ERR); |
1427 | } |
1428 | |
1429 | if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { |
1430 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1431 | "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n" , |
1432 | csk, skb, ddpvld); |
1433 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_PAD_ERR); |
1434 | } |
1435 | |
1436 | if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && |
1437 | !cxgbi_skcb_test_flag(skb, flag: SKCBF_RX_DATA)) { |
1438 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1439 | "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n" , |
1440 | csk, skb, ddpvld); |
1441 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_DATA_DDPD); |
1442 | } |
1443 | } |
1444 | |
1445 | static void do_rx_data_ddp(struct cxgbi_device *cdev, |
1446 | struct sk_buff *skb) |
1447 | { |
1448 | struct cxgbi_sock *csk; |
1449 | struct sk_buff *lskb; |
1450 | struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; |
1451 | unsigned int tid = GET_TID(rpl); |
1452 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1453 | struct tid_info *t = lldi->tids; |
1454 | u32 ddpvld = be32_to_cpu(rpl->ddpvld); |
1455 | |
1456 | csk = lookup_tid(t, tid); |
1457 | if (unlikely(!csk)) { |
1458 | pr_err("can't find connection for tid %u.\n" , tid); |
1459 | goto rel_skb; |
1460 | } |
1461 | |
1462 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1463 | "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n" , |
1464 | csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); |
1465 | |
1466 | spin_lock_bh(lock: &csk->lock); |
1467 | |
1468 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { |
1469 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1470 | "csk 0x%p,%u,0x%lx,%u, bad state.\n" , |
1471 | csk, csk->state, csk->flags, csk->tid); |
1472 | if (csk->state != CTP_ABORTING) |
1473 | goto abort_conn; |
1474 | else |
1475 | goto discard; |
1476 | } |
1477 | |
1478 | if (!csk->skb_ulp_lhdr) { |
1479 | pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n" , csk->tid); |
1480 | goto abort_conn; |
1481 | } |
1482 | |
1483 | lskb = csk->skb_ulp_lhdr; |
1484 | csk->skb_ulp_lhdr = NULL; |
1485 | |
1486 | cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); |
1487 | |
1488 | if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) |
1489 | pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n" , |
1490 | csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); |
1491 | |
1492 | cxgb4i_process_ddpvld(csk, skb: lskb, ddpvld); |
1493 | |
1494 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1495 | "csk 0x%p, lskb 0x%p, f 0x%lx.\n" , |
1496 | csk, lskb, cxgbi_skcb_flags(lskb)); |
1497 | |
1498 | cxgbi_skcb_set_flag(skb: lskb, flag: SKCBF_RX_STATUS); |
1499 | cxgbi_conn_pdu_ready(csk); |
1500 | spin_unlock_bh(lock: &csk->lock); |
1501 | goto rel_skb; |
1502 | |
1503 | abort_conn: |
1504 | send_abort_req(csk); |
1505 | discard: |
1506 | spin_unlock_bh(lock: &csk->lock); |
1507 | rel_skb: |
1508 | __kfree_skb(skb); |
1509 | } |
1510 | |
1511 | static void |
1512 | do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) |
1513 | { |
1514 | struct cxgbi_sock *csk; |
1515 | struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; |
1516 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1517 | struct tid_info *t = lldi->tids; |
1518 | struct sk_buff *data_skb = NULL; |
1519 | u32 tid = GET_TID(rpl); |
1520 | u32 ddpvld = be32_to_cpu(rpl->ddpvld); |
1521 | u32 seq = be32_to_cpu(rpl->seq); |
1522 | u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); |
1523 | |
1524 | csk = lookup_tid(t, tid); |
1525 | if (unlikely(!csk)) { |
1526 | pr_err("can't find connection for tid %u.\n" , tid); |
1527 | goto rel_skb; |
1528 | } |
1529 | |
1530 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, |
1531 | "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " |
1532 | "pdu_len_ddp %u, status %u.\n" , |
1533 | csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, |
1534 | ntohs(rpl->len), pdu_len_ddp, rpl->status); |
1535 | |
1536 | spin_lock_bh(lock: &csk->lock); |
1537 | |
1538 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { |
1539 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1540 | "csk 0x%p,%u,0x%lx,%u, bad state.\n" , |
1541 | csk, csk->state, csk->flags, csk->tid); |
1542 | |
1543 | if (csk->state != CTP_ABORTING) |
1544 | goto abort_conn; |
1545 | else |
1546 | goto discard; |
1547 | } |
1548 | |
1549 | cxgbi_skcb_tcp_seq(skb) = seq; |
1550 | cxgbi_skcb_flags(skb) = 0; |
1551 | cxgbi_skcb_rx_pdulen(skb) = 0; |
1552 | |
1553 | skb_reset_transport_header(skb); |
1554 | __skb_pull(skb, len: sizeof(*rpl)); |
1555 | __pskb_trim(skb, be16_to_cpu(rpl->len)); |
1556 | |
1557 | csk->rcv_nxt = seq + pdu_len_ddp; |
1558 | |
1559 | if (csk->skb_ulp_lhdr) { |
1560 | data_skb = skb_peek(list_: &csk->receive_queue); |
1561 | if (!data_skb || |
1562 | !cxgbi_skcb_test_flag(skb: data_skb, flag: SKCBF_RX_DATA)) { |
1563 | pr_err("Error! freelist data not found 0x%p, tid %u\n" , |
1564 | data_skb, tid); |
1565 | |
1566 | goto abort_conn; |
1567 | } |
1568 | __skb_unlink(skb: data_skb, list: &csk->receive_queue); |
1569 | |
1570 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_DATA); |
1571 | |
1572 | __skb_queue_tail(list: &csk->receive_queue, newsk: skb); |
1573 | __skb_queue_tail(list: &csk->receive_queue, newsk: data_skb); |
1574 | } else { |
1575 | __skb_queue_tail(list: &csk->receive_queue, newsk: skb); |
1576 | } |
1577 | |
1578 | csk->skb_ulp_lhdr = NULL; |
1579 | |
1580 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_HDR); |
1581 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_STATUS); |
1582 | cxgbi_skcb_set_flag(skb, flag: SKCBF_RX_ISCSI_COMPL); |
1583 | cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); |
1584 | |
1585 | cxgb4i_process_ddpvld(csk, skb, ddpvld); |
1586 | |
1587 | log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n" , |
1588 | csk, skb, cxgbi_skcb_flags(skb)); |
1589 | |
1590 | cxgbi_conn_pdu_ready(csk); |
1591 | spin_unlock_bh(lock: &csk->lock); |
1592 | |
1593 | return; |
1594 | |
1595 | abort_conn: |
1596 | send_abort_req(csk); |
1597 | discard: |
1598 | spin_unlock_bh(lock: &csk->lock); |
1599 | rel_skb: |
1600 | __kfree_skb(skb); |
1601 | } |
1602 | |
1603 | static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) |
1604 | { |
1605 | struct cxgbi_sock *csk; |
1606 | struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; |
1607 | unsigned int tid = GET_TID(rpl); |
1608 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1609 | struct tid_info *t = lldi->tids; |
1610 | |
1611 | csk = lookup_tid(t, tid); |
1612 | if (unlikely(!csk)) |
1613 | pr_err("can't find connection for tid %u.\n" , tid); |
1614 | else { |
1615 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1616 | "csk 0x%p,%u,0x%lx,%u.\n" , |
1617 | csk, csk->state, csk->flags, csk->tid); |
1618 | cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), |
1619 | rpl->seq_vld); |
1620 | } |
1621 | __kfree_skb(skb); |
1622 | } |
1623 | |
1624 | static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) |
1625 | { |
1626 | struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; |
1627 | unsigned int tid = GET_TID(rpl); |
1628 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1629 | struct tid_info *t = lldi->tids; |
1630 | struct cxgbi_sock *csk; |
1631 | |
1632 | csk = lookup_tid(t, tid); |
1633 | if (!csk) { |
1634 | pr_err("can't find conn. for tid %u.\n" , tid); |
1635 | return; |
1636 | } |
1637 | |
1638 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1639 | "csk 0x%p,%u,%lx,%u, status 0x%x.\n" , |
1640 | csk, csk->state, csk->flags, csk->tid, rpl->status); |
1641 | |
1642 | if (rpl->status != CPL_ERR_NONE) { |
1643 | pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n" , |
1644 | csk, tid, rpl->status); |
1645 | csk->err = -EINVAL; |
1646 | } |
1647 | |
1648 | complete(&csk->cmpl); |
1649 | |
1650 | __kfree_skb(skb); |
1651 | } |
1652 | |
1653 | static int alloc_cpls(struct cxgbi_sock *csk) |
1654 | { |
1655 | csk->cpl_close = alloc_wr(wrlen: sizeof(struct cpl_close_con_req), |
1656 | dlen: 0, GFP_KERNEL); |
1657 | if (!csk->cpl_close) |
1658 | return -ENOMEM; |
1659 | |
1660 | csk->cpl_abort_req = alloc_wr(wrlen: sizeof(struct cpl_abort_req), |
1661 | dlen: 0, GFP_KERNEL); |
1662 | if (!csk->cpl_abort_req) |
1663 | goto free_cpls; |
1664 | |
1665 | csk->cpl_abort_rpl = alloc_wr(wrlen: sizeof(struct cpl_abort_rpl), |
1666 | dlen: 0, GFP_KERNEL); |
1667 | if (!csk->cpl_abort_rpl) |
1668 | goto free_cpls; |
1669 | return 0; |
1670 | |
1671 | free_cpls: |
1672 | cxgbi_sock_free_cpl_skbs(csk); |
1673 | return -ENOMEM; |
1674 | } |
1675 | |
1676 | static inline void l2t_put(struct cxgbi_sock *csk) |
1677 | { |
1678 | if (csk->l2t) { |
1679 | cxgb4_l2t_release(e: csk->l2t); |
1680 | csk->l2t = NULL; |
1681 | cxgbi_sock_put(csk); |
1682 | } |
1683 | } |
1684 | |
1685 | static void release_offload_resources(struct cxgbi_sock *csk) |
1686 | { |
1687 | struct cxgb4_lld_info *lldi; |
1688 | #if IS_ENABLED(CONFIG_IPV6) |
1689 | struct net_device *ndev = csk->cdev->ports[csk->port_id]; |
1690 | #endif |
1691 | |
1692 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1693 | "csk 0x%p,%u,0x%lx,%u.\n" , |
1694 | csk, csk->state, csk->flags, csk->tid); |
1695 | |
1696 | cxgbi_sock_free_cpl_skbs(csk); |
1697 | cxgbi_sock_purge_write_queue(csk); |
1698 | if (csk->wr_cred != csk->wr_max_cred) { |
1699 | cxgbi_sock_purge_wr_queue(csk); |
1700 | cxgbi_sock_reset_wr_list(csk); |
1701 | } |
1702 | |
1703 | l2t_put(csk); |
1704 | #if IS_ENABLED(CONFIG_IPV6) |
1705 | if (csk->csk_family == AF_INET6) |
1706 | cxgb4_clip_release(dev: ndev, |
1707 | lip: (const u32 *)&csk->saddr6.sin6_addr, v6: 1); |
1708 | #endif |
1709 | |
1710 | if (cxgbi_sock_flag(csk, flag: CTPF_HAS_ATID)) |
1711 | free_atid(csk); |
1712 | else if (cxgbi_sock_flag(csk, flag: CTPF_HAS_TID)) { |
1713 | lldi = cxgbi_cdev_priv(csk->cdev); |
1714 | cxgb4_remove_tid(t: lldi->tids, qid: 0, tid: csk->tid, |
1715 | family: csk->csk_family); |
1716 | cxgbi_sock_clear_flag(csk, flag: CTPF_HAS_TID); |
1717 | cxgbi_sock_put(csk); |
1718 | } |
1719 | csk->dst = NULL; |
1720 | } |
1721 | |
1722 | #ifdef CONFIG_CHELSIO_T4_DCB |
1723 | static inline u8 get_iscsi_dcb_state(struct net_device *ndev) |
1724 | { |
1725 | return ndev->dcbnl_ops->getstate(ndev); |
1726 | } |
1727 | |
1728 | static int select_priority(int pri_mask) |
1729 | { |
1730 | if (!pri_mask) |
1731 | return 0; |
1732 | return (ffs(pri_mask) - 1); |
1733 | } |
1734 | |
1735 | static u8 get_iscsi_dcb_priority(struct net_device *ndev) |
1736 | { |
1737 | int rv; |
1738 | u8 caps; |
1739 | |
1740 | struct dcb_app iscsi_dcb_app = { |
1741 | .protocol = 3260 |
1742 | }; |
1743 | |
1744 | rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); |
1745 | if (rv) |
1746 | return 0; |
1747 | |
1748 | if (caps & DCB_CAP_DCBX_VER_IEEE) { |
1749 | iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; |
1750 | rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); |
1751 | if (!rv) { |
1752 | iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; |
1753 | rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); |
1754 | } |
1755 | } else if (caps & DCB_CAP_DCBX_VER_CEE) { |
1756 | iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; |
1757 | rv = dcb_getapp(ndev, &iscsi_dcb_app); |
1758 | } |
1759 | |
1760 | log_debug(1 << CXGBI_DBG_ISCSI, |
1761 | "iSCSI priority is set to %u\n" , select_priority(rv)); |
1762 | return select_priority(pri_mask: rv); |
1763 | } |
1764 | #endif |
1765 | |
1766 | static int init_act_open(struct cxgbi_sock *csk) |
1767 | { |
1768 | struct cxgbi_device *cdev = csk->cdev; |
1769 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1770 | struct net_device *ndev = cdev->ports[csk->port_id]; |
1771 | struct sk_buff *skb = NULL; |
1772 | struct neighbour *n = NULL; |
1773 | void *daddr; |
1774 | unsigned int step; |
1775 | unsigned int rxq_idx; |
1776 | unsigned int size, size6; |
1777 | unsigned int linkspeed; |
1778 | unsigned int rcv_winf, snd_winf; |
1779 | #ifdef CONFIG_CHELSIO_T4_DCB |
1780 | u8 priority = 0; |
1781 | #endif |
1782 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1783 | "csk 0x%p,%u,0x%lx,%u.\n" , |
1784 | csk, csk->state, csk->flags, csk->tid); |
1785 | |
1786 | if (csk->csk_family == AF_INET) |
1787 | daddr = &csk->daddr.sin_addr.s_addr; |
1788 | #if IS_ENABLED(CONFIG_IPV6) |
1789 | else if (csk->csk_family == AF_INET6) |
1790 | daddr = &csk->daddr6.sin6_addr; |
1791 | #endif |
1792 | else { |
1793 | pr_err("address family 0x%x not supported\n" , csk->csk_family); |
1794 | goto rel_resource; |
1795 | } |
1796 | |
1797 | n = dst_neigh_lookup(dst: csk->dst, daddr); |
1798 | |
1799 | if (!n) { |
1800 | pr_err("%s, can't get neighbour of csk->dst.\n" , ndev->name); |
1801 | goto rel_resource; |
1802 | } |
1803 | |
1804 | if (!(n->nud_state & NUD_VALID)) |
1805 | neigh_event_send(neigh: n, NULL); |
1806 | |
1807 | csk->atid = cxgb4_alloc_atid(t: lldi->tids, data: csk); |
1808 | if (csk->atid < 0) { |
1809 | pr_err("%s, NO atid available.\n" , ndev->name); |
1810 | goto rel_resource_without_clip; |
1811 | } |
1812 | cxgbi_sock_set_flag(csk, flag: CTPF_HAS_ATID); |
1813 | cxgbi_sock_get(csk); |
1814 | |
1815 | #ifdef CONFIG_CHELSIO_T4_DCB |
1816 | if (get_iscsi_dcb_state(ndev)) |
1817 | priority = get_iscsi_dcb_priority(ndev); |
1818 | |
1819 | csk->dcb_priority = priority; |
1820 | csk->l2t = cxgb4_l2t_get(d: lldi->l2t, neigh: n, physdev: ndev, priority); |
1821 | #else |
1822 | csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); |
1823 | #endif |
1824 | if (!csk->l2t) { |
1825 | pr_err("%s, cannot alloc l2t.\n" , ndev->name); |
1826 | goto rel_resource_without_clip; |
1827 | } |
1828 | cxgbi_sock_get(csk); |
1829 | |
1830 | #if IS_ENABLED(CONFIG_IPV6) |
1831 | if (csk->csk_family == AF_INET6) |
1832 | cxgb4_clip_get(dev: ndev, lip: (const u32 *)&csk->saddr6.sin6_addr, v6: 1); |
1833 | #endif |
1834 | |
1835 | if (is_t4(chip: lldi->adapter_type)) { |
1836 | size = sizeof(struct cpl_act_open_req); |
1837 | size6 = sizeof(struct cpl_act_open_req6); |
1838 | } else if (is_t5(chip: lldi->adapter_type)) { |
1839 | size = sizeof(struct cpl_t5_act_open_req); |
1840 | size6 = sizeof(struct cpl_t5_act_open_req6); |
1841 | } else { |
1842 | size = sizeof(struct cpl_t6_act_open_req); |
1843 | size6 = sizeof(struct cpl_t6_act_open_req6); |
1844 | } |
1845 | |
1846 | if (csk->csk_family == AF_INET) |
1847 | skb = alloc_wr(wrlen: size, dlen: 0, GFP_NOIO); |
1848 | #if IS_ENABLED(CONFIG_IPV6) |
1849 | else |
1850 | skb = alloc_wr(wrlen: size6, dlen: 0, GFP_NOIO); |
1851 | #endif |
1852 | |
1853 | if (!skb) |
1854 | goto rel_resource; |
1855 | skb->sk = (struct sock *)csk; |
1856 | t4_set_arp_err_handler(skb, handle: csk, handler: cxgbi_sock_act_open_req_arp_failure); |
1857 | |
1858 | if (!csk->mtu) |
1859 | csk->mtu = dst_mtu(dst: csk->dst); |
1860 | cxgb4_best_mtu(mtus: lldi->mtus, mtu: csk->mtu, idx: &csk->mss_idx); |
1861 | csk->tx_chan = cxgb4_port_chan(dev: ndev); |
1862 | csk->smac_idx = ((struct port_info *)netdev_priv(dev: ndev))->smt_idx; |
1863 | step = lldi->ntxq / lldi->nchan; |
1864 | csk->txq_idx = cxgb4_port_idx(dev: ndev) * step; |
1865 | step = lldi->nrxq / lldi->nchan; |
1866 | rxq_idx = (cxgb4_port_idx(dev: ndev) * step) + (cdev->rxq_idx_cntr % step); |
1867 | cdev->rxq_idx_cntr++; |
1868 | csk->rss_qid = lldi->rxq_ids[rxq_idx]; |
1869 | linkspeed = ((struct port_info *)netdev_priv(dev: ndev))->link_cfg.speed; |
1870 | csk->snd_win = cxgb4i_snd_win; |
1871 | csk->rcv_win = cxgb4i_rcv_win; |
1872 | if (cxgb4i_rcv_win <= 0) { |
1873 | csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; |
1874 | rcv_winf = linkspeed / SPEED_10000; |
1875 | if (rcv_winf) |
1876 | csk->rcv_win *= rcv_winf; |
1877 | } |
1878 | if (cxgb4i_snd_win <= 0) { |
1879 | csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; |
1880 | snd_winf = linkspeed / SPEED_10000; |
1881 | if (snd_winf) |
1882 | csk->snd_win *= snd_winf; |
1883 | } |
1884 | csk->wr_cred = lldi->wr_cred - |
1885 | DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); |
1886 | csk->wr_max_cred = csk->wr_cred; |
1887 | csk->wr_una_cred = 0; |
1888 | cxgbi_sock_reset_wr_list(csk); |
1889 | csk->err = 0; |
1890 | |
1891 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n" , |
1892 | (&csk->saddr), (&csk->daddr), csk, csk->state, |
1893 | csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, |
1894 | csk->mtu, csk->mss_idx, csk->smac_idx); |
1895 | |
1896 | /* must wait for either a act_open_rpl or act_open_establish */ |
1897 | if (!try_module_get(module: cdev->owner)) { |
1898 | pr_err("%s, try_module_get failed.\n" , ndev->name); |
1899 | goto rel_resource; |
1900 | } |
1901 | |
1902 | cxgbi_sock_set_state(csk, state: CTP_ACTIVE_OPEN); |
1903 | if (csk->csk_family == AF_INET) |
1904 | send_act_open_req(csk, skb, e: csk->l2t); |
1905 | #if IS_ENABLED(CONFIG_IPV6) |
1906 | else |
1907 | send_act_open_req6(csk, skb, e: csk->l2t); |
1908 | #endif |
1909 | neigh_release(neigh: n); |
1910 | |
1911 | return 0; |
1912 | |
1913 | rel_resource: |
1914 | #if IS_ENABLED(CONFIG_IPV6) |
1915 | if (csk->csk_family == AF_INET6) |
1916 | cxgb4_clip_release(dev: ndev, |
1917 | lip: (const u32 *)&csk->saddr6.sin6_addr, v6: 1); |
1918 | #endif |
1919 | rel_resource_without_clip: |
1920 | if (n) |
1921 | neigh_release(neigh: n); |
1922 | if (skb) |
1923 | __kfree_skb(skb); |
1924 | return -EINVAL; |
1925 | } |
1926 | |
1927 | static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { |
1928 | [CPL_ACT_ESTABLISH] = do_act_establish, |
1929 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, |
1930 | [CPL_PEER_CLOSE] = do_peer_close, |
1931 | [CPL_ABORT_REQ_RSS] = do_abort_req_rss, |
1932 | [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, |
1933 | [CPL_CLOSE_CON_RPL] = do_close_con_rpl, |
1934 | [CPL_FW4_ACK] = do_fw4_ack, |
1935 | [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, |
1936 | [CPL_ISCSI_DATA] = do_rx_iscsi_data, |
1937 | [CPL_SET_TCB_RPL] = do_set_tcb_rpl, |
1938 | [CPL_RX_DATA_DDP] = do_rx_data_ddp, |
1939 | [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, |
1940 | [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, |
1941 | [CPL_RX_DATA] = do_rx_data, |
1942 | }; |
1943 | |
1944 | static int cxgb4i_ofld_init(struct cxgbi_device *cdev) |
1945 | { |
1946 | int rc; |
1947 | |
1948 | if (cxgb4i_max_connect > CXGB4I_MAX_CONN) |
1949 | cxgb4i_max_connect = CXGB4I_MAX_CONN; |
1950 | |
1951 | rc = cxgbi_device_portmap_create(cdev, base: cxgb4i_sport_base, |
1952 | max_conn: cxgb4i_max_connect); |
1953 | if (rc < 0) |
1954 | return rc; |
1955 | |
1956 | cdev->csk_release_offload_resources = release_offload_resources; |
1957 | cdev->csk_push_tx_frames = push_tx_frames; |
1958 | cdev->csk_send_abort_req = send_abort_req; |
1959 | cdev->csk_send_close_req = send_close_req; |
1960 | cdev->csk_send_rx_credits = send_rx_credits; |
1961 | cdev->csk_alloc_cpls = alloc_cpls; |
1962 | cdev->csk_init_act_open = init_act_open; |
1963 | |
1964 | pr_info("cdev 0x%p, offload up, added.\n" , cdev); |
1965 | return 0; |
1966 | } |
1967 | |
1968 | static inline void |
1969 | ulp_mem_io_set_hdr(struct cxgbi_device *cdev, |
1970 | struct ulp_mem_io *req, |
1971 | unsigned int wr_len, unsigned int dlen, |
1972 | unsigned int pm_addr, |
1973 | int tid) |
1974 | { |
1975 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
1976 | struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); |
1977 | |
1978 | INIT_ULPTX_WR(req, wr_len, 0, tid); |
1979 | req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | |
1980 | FW_WR_ATOMIC_V(0)); |
1981 | req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | |
1982 | ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | |
1983 | T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); |
1984 | req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); |
1985 | req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); |
1986 | req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); |
1987 | |
1988 | idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); |
1989 | idata->len = htonl(dlen); |
1990 | } |
1991 | |
1992 | static struct sk_buff * |
1993 | ddp_ppod_init_idata(struct cxgbi_device *cdev, |
1994 | struct cxgbi_ppm *ppm, |
1995 | unsigned int idx, unsigned int npods, |
1996 | unsigned int tid) |
1997 | { |
1998 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; |
1999 | unsigned int dlen = npods << PPOD_SIZE_SHIFT; |
2000 | unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + |
2001 | sizeof(struct ulptx_idata) + dlen, 16); |
2002 | struct sk_buff *skb = alloc_wr(wrlen: wr_len, dlen: 0, GFP_ATOMIC); |
2003 | |
2004 | if (!skb) { |
2005 | pr_err("%s: %s idx %u, npods %u, OOM.\n" , |
2006 | __func__, ppm->ndev->name, idx, npods); |
2007 | return NULL; |
2008 | } |
2009 | |
2010 | ulp_mem_io_set_hdr(cdev, req: (struct ulp_mem_io *)skb->head, wr_len, dlen, |
2011 | pm_addr, tid); |
2012 | |
2013 | return skb; |
2014 | } |
2015 | |
2016 | static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, |
2017 | struct cxgbi_task_tag_info *ttinfo, |
2018 | unsigned int idx, unsigned int npods, |
2019 | struct scatterlist **sg_pp, |
2020 | unsigned int *sg_off) |
2021 | { |
2022 | struct cxgbi_device *cdev = csk->cdev; |
2023 | struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, |
2024 | tid: csk->tid); |
2025 | struct ulp_mem_io *req; |
2026 | struct ulptx_idata *idata; |
2027 | struct cxgbi_pagepod *ppod; |
2028 | int i; |
2029 | |
2030 | if (!skb) |
2031 | return -ENOMEM; |
2032 | |
2033 | req = (struct ulp_mem_io *)skb->head; |
2034 | idata = (struct ulptx_idata *)(req + 1); |
2035 | ppod = (struct cxgbi_pagepod *)(idata + 1); |
2036 | |
2037 | for (i = 0; i < npods; i++, ppod++) |
2038 | cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); |
2039 | |
2040 | cxgbi_skcb_set_flag(skb, flag: SKCBF_TX_MEM_WRITE); |
2041 | cxgbi_skcb_set_flag(skb, flag: SKCBF_TX_FLAG_COMPL); |
2042 | set_wr_txq(skb, prio: CPL_PRIORITY_DATA, queue: csk->port_id); |
2043 | |
2044 | spin_lock_bh(lock: &csk->lock); |
2045 | cxgbi_sock_skb_entail(csk, skb); |
2046 | spin_unlock_bh(lock: &csk->lock); |
2047 | |
2048 | return 0; |
2049 | } |
2050 | |
2051 | static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, |
2052 | struct cxgbi_task_tag_info *ttinfo) |
2053 | { |
2054 | unsigned int pidx = ttinfo->idx; |
2055 | unsigned int npods = ttinfo->npods; |
2056 | unsigned int i, cnt; |
2057 | int err = 0; |
2058 | struct scatterlist *sg = ttinfo->sgl; |
2059 | unsigned int offset = 0; |
2060 | |
2061 | ttinfo->cid = csk->port_id; |
2062 | |
2063 | for (i = 0; i < npods; i += cnt, pidx += cnt) { |
2064 | cnt = npods - i; |
2065 | |
2066 | if (cnt > ULPMEM_IDATA_MAX_NPPODS) |
2067 | cnt = ULPMEM_IDATA_MAX_NPPODS; |
2068 | err = ddp_ppod_write_idata(ppm, csk, ttinfo, idx: pidx, npods: cnt, |
2069 | sg_pp: &sg, sg_off: &offset); |
2070 | if (err < 0) |
2071 | break; |
2072 | } |
2073 | |
2074 | return err; |
2075 | } |
2076 | |
2077 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, |
2078 | int pg_idx) |
2079 | { |
2080 | struct sk_buff *skb; |
2081 | struct cpl_set_tcb_field *req; |
2082 | |
2083 | if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) |
2084 | return 0; |
2085 | |
2086 | skb = alloc_wr(wrlen: sizeof(*req), dlen: 0, GFP_KERNEL); |
2087 | if (!skb) |
2088 | return -ENOMEM; |
2089 | |
2090 | /* set up ulp page size */ |
2091 | req = (struct cpl_set_tcb_field *)skb->head; |
2092 | INIT_TP_WR(req, csk->tid); |
2093 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); |
2094 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
2095 | req->word_cookie = htons(0); |
2096 | req->mask = cpu_to_be64(0x3 << 8); |
2097 | req->val = cpu_to_be64(pg_idx << 8); |
2098 | set_wr_txq(skb, prio: CPL_PRIORITY_CONTROL, queue: csk->port_id); |
2099 | |
2100 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
2101 | "csk 0x%p, tid 0x%x, pg_idx %u.\n" , csk, csk->tid, pg_idx); |
2102 | |
2103 | reinit_completion(x: &csk->cmpl); |
2104 | cxgb4_ofld_send(dev: csk->cdev->ports[csk->port_id], skb); |
2105 | wait_for_completion(&csk->cmpl); |
2106 | |
2107 | return csk->err; |
2108 | } |
2109 | |
2110 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, |
2111 | int hcrc, int dcrc) |
2112 | { |
2113 | struct sk_buff *skb; |
2114 | struct cpl_set_tcb_field *req; |
2115 | |
2116 | if (!hcrc && !dcrc) |
2117 | return 0; |
2118 | |
2119 | skb = alloc_wr(wrlen: sizeof(*req), dlen: 0, GFP_KERNEL); |
2120 | if (!skb) |
2121 | return -ENOMEM; |
2122 | |
2123 | csk->hcrc_len = (hcrc ? 4 : 0); |
2124 | csk->dcrc_len = (dcrc ? 4 : 0); |
2125 | /* set up ulp submode */ |
2126 | req = (struct cpl_set_tcb_field *)skb->head; |
2127 | INIT_TP_WR(req, tid); |
2128 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
2129 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
2130 | req->word_cookie = htons(0); |
2131 | req->mask = cpu_to_be64(0x3 << 4); |
2132 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | |
2133 | (dcrc ? ULP_CRC_DATA : 0)) << 4); |
2134 | set_wr_txq(skb, prio: CPL_PRIORITY_CONTROL, queue: csk->port_id); |
2135 | |
2136 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
2137 | "csk 0x%p, tid 0x%x, crc %d,%d.\n" , csk, csk->tid, hcrc, dcrc); |
2138 | |
2139 | reinit_completion(x: &csk->cmpl); |
2140 | cxgb4_ofld_send(dev: csk->cdev->ports[csk->port_id], skb); |
2141 | wait_for_completion(&csk->cmpl); |
2142 | |
2143 | return csk->err; |
2144 | } |
2145 | |
2146 | static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) |
2147 | { |
2148 | return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) |
2149 | (cxgbi_cdev_priv(cdev)))->iscsi_ppm); |
2150 | } |
2151 | |
2152 | static int cxgb4i_ddp_init(struct cxgbi_device *cdev) |
2153 | { |
2154 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
2155 | struct net_device *ndev = cdev->ports[0]; |
2156 | struct cxgbi_tag_format tformat; |
2157 | int i, err; |
2158 | |
2159 | if (!lldi->vr->iscsi.size) { |
2160 | pr_warn("%s, iscsi NOT enabled, check config!\n" , ndev->name); |
2161 | return -EACCES; |
2162 | } |
2163 | |
2164 | cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; |
2165 | |
2166 | memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); |
2167 | for (i = 0; i < 4; i++) |
2168 | tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) |
2169 | & 0xF; |
2170 | cxgbi_tagmask_check(tagmask: lldi->iscsi_tagmask, &tformat); |
2171 | |
2172 | pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x" , |
2173 | lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size); |
2174 | |
2175 | err = cxgbi_ddp_ppm_setup(ppm_pp: lldi->iscsi_ppm, cdev, tformat: &tformat, |
2176 | iscsi_size: lldi->vr->iscsi.size, llimit: lldi->iscsi_llimit, |
2177 | start: lldi->vr->iscsi.start, rsvd_factor: 2, |
2178 | edram_start: lldi->vr->ppod_edram.start, |
2179 | edram_size: lldi->vr->ppod_edram.size); |
2180 | |
2181 | if (err < 0) |
2182 | return err; |
2183 | |
2184 | cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; |
2185 | cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; |
2186 | cdev->csk_ddp_set_map = ddp_set_map; |
2187 | cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
2188 | lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); |
2189 | cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
2190 | lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); |
2191 | cdev->cdev2ppm = cdev2ppm; |
2192 | |
2193 | return 0; |
2194 | } |
2195 | |
2196 | static bool is_memfree(struct adapter *adap) |
2197 | { |
2198 | u32 io; |
2199 | |
2200 | io = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); |
2201 | if (is_t5(chip: adap->params.chip)) { |
2202 | if ((io & EXT_MEM0_ENABLE_F) || (io & EXT_MEM1_ENABLE_F)) |
2203 | return false; |
2204 | } else if (io & EXT_MEM_ENABLE_F) { |
2205 | return false; |
2206 | } |
2207 | |
2208 | return true; |
2209 | } |
2210 | |
2211 | static void *t4_uld_add(const struct cxgb4_lld_info *lldi) |
2212 | { |
2213 | struct cxgbi_device *cdev; |
2214 | struct port_info *pi; |
2215 | struct net_device *ndev; |
2216 | struct adapter *adap; |
2217 | struct tid_info *t; |
2218 | u32 max_cmds = CXGB4I_SCSI_HOST_QDEPTH; |
2219 | u32 max_conn = CXGBI_MAX_CONN; |
2220 | int i, rc; |
2221 | |
2222 | cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); |
2223 | if (!cdev) { |
2224 | pr_info("t4 device 0x%p, register failed.\n" , lldi); |
2225 | return NULL; |
2226 | } |
2227 | pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n" , |
2228 | cdev, lldi->adapter_type, lldi->nports, |
2229 | lldi->ports[0]->name, lldi->nchan, lldi->ntxq, |
2230 | lldi->nrxq, lldi->wr_cred); |
2231 | for (i = 0; i < lldi->nrxq; i++) |
2232 | log_debug(1 << CXGBI_DBG_DEV, |
2233 | "t4 0x%p, rxq id #%d: %u.\n" , |
2234 | cdev, i, lldi->rxq_ids[i]); |
2235 | |
2236 | memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); |
2237 | cdev->flags = CXGBI_FLAG_DEV_T4; |
2238 | cdev->pdev = lldi->pdev; |
2239 | cdev->ports = lldi->ports; |
2240 | cdev->nports = lldi->nports; |
2241 | cdev->mtus = lldi->mtus; |
2242 | cdev->nmtus = NMTUS; |
2243 | cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= |
2244 | CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; |
2245 | cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; |
2246 | cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); |
2247 | cdev->itp = &cxgb4i_iscsi_transport; |
2248 | cdev->owner = THIS_MODULE; |
2249 | |
2250 | cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf); |
2251 | pr_info("cdev 0x%p,%s, pfvf %u.\n" , |
2252 | cdev, lldi->ports[0]->name, cdev->pfvf); |
2253 | |
2254 | rc = cxgb4i_ddp_init(cdev); |
2255 | if (rc) { |
2256 | pr_info("t4 0x%p ddp init failed %d.\n" , cdev, rc); |
2257 | goto err_out; |
2258 | } |
2259 | |
2260 | ndev = cdev->ports[0]; |
2261 | adap = netdev2adap(dev: ndev); |
2262 | if (adap) { |
2263 | t = &adap->tids; |
2264 | if (t->ntids <= CXGBI_MAX_CONN) |
2265 | max_conn = t->ntids; |
2266 | |
2267 | if (is_memfree(adap)) { |
2268 | cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF; |
2269 | max_cmds = CXGB4I_SCSI_HOST_QDEPTH >> 2; |
2270 | |
2271 | pr_info("%s: 0x%p, tid %u, SO adapter.\n" , |
2272 | ndev->name, cdev, t->ntids); |
2273 | } |
2274 | } else { |
2275 | pr_info("%s, 0x%p, NO adapter struct.\n" , ndev->name, cdev); |
2276 | } |
2277 | |
2278 | /* ISO is enabled in T5/T6 firmware version >= 1.13.43.0 */ |
2279 | if (!is_t4(chip: lldi->adapter_type) && |
2280 | (lldi->fw_vers >= 0x10d2b00) && |
2281 | !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF)) |
2282 | cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso); |
2283 | |
2284 | rc = cxgb4i_ofld_init(cdev); |
2285 | if (rc) { |
2286 | pr_info("t4 0x%p ofld init failed.\n" , cdev); |
2287 | goto err_out; |
2288 | } |
2289 | |
2290 | cxgb4i_host_template.can_queue = max_cmds; |
2291 | rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn, |
2292 | &cxgb4i_host_template, cxgb4i_stt); |
2293 | if (rc) |
2294 | goto err_out; |
2295 | |
2296 | for (i = 0; i < cdev->nports; i++) { |
2297 | pi = netdev_priv(dev: lldi->ports[i]); |
2298 | cdev->hbas[i]->port_id = pi->port_id; |
2299 | } |
2300 | return cdev; |
2301 | |
2302 | err_out: |
2303 | cxgbi_device_unregister(cdev); |
2304 | return ERR_PTR(error: -ENOMEM); |
2305 | } |
2306 | |
2307 | #define RX_PULL_LEN 128 |
2308 | static int t4_uld_rx_handler(void *handle, const __be64 *rsp, |
2309 | const struct pkt_gl *pgl) |
2310 | { |
2311 | const struct cpl_act_establish *rpl; |
2312 | struct sk_buff *skb; |
2313 | unsigned int opc; |
2314 | struct cxgbi_device *cdev = handle; |
2315 | |
2316 | if (pgl == NULL) { |
2317 | unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; |
2318 | |
2319 | skb = alloc_wr(wrlen: len, dlen: 0, GFP_ATOMIC); |
2320 | if (!skb) |
2321 | goto nomem; |
2322 | skb_copy_to_linear_data(skb, from: &rsp[1], len); |
2323 | } else { |
2324 | if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { |
2325 | pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n" , |
2326 | pgl->va, be64_to_cpu(*rsp), |
2327 | be64_to_cpu(*(u64 *)pgl->va), |
2328 | pgl->tot_len); |
2329 | return 0; |
2330 | } |
2331 | skb = cxgb4_pktgl_to_skb(gl: pgl, RX_PULL_LEN, RX_PULL_LEN); |
2332 | if (unlikely(!skb)) |
2333 | goto nomem; |
2334 | } |
2335 | |
2336 | rpl = (struct cpl_act_establish *)skb->data; |
2337 | opc = rpl->ot.opcode; |
2338 | log_debug(1 << CXGBI_DBG_TOE, |
2339 | "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n" , |
2340 | cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); |
2341 | if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) { |
2342 | pr_err("No handler for opcode 0x%x.\n" , opc); |
2343 | __kfree_skb(skb); |
2344 | } else |
2345 | cxgb4i_cplhandlers[opc](cdev, skb); |
2346 | |
2347 | return 0; |
2348 | nomem: |
2349 | log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n" ); |
2350 | return 1; |
2351 | } |
2352 | |
2353 | static int t4_uld_state_change(void *handle, enum cxgb4_state state) |
2354 | { |
2355 | struct cxgbi_device *cdev = handle; |
2356 | |
2357 | switch (state) { |
2358 | case CXGB4_STATE_UP: |
2359 | pr_info("cdev 0x%p, UP.\n" , cdev); |
2360 | break; |
2361 | case CXGB4_STATE_START_RECOVERY: |
2362 | pr_info("cdev 0x%p, RECOVERY.\n" , cdev); |
2363 | /* close all connections */ |
2364 | break; |
2365 | case CXGB4_STATE_DOWN: |
2366 | pr_info("cdev 0x%p, DOWN.\n" , cdev); |
2367 | break; |
2368 | case CXGB4_STATE_DETACH: |
2369 | pr_info("cdev 0x%p, DETACH.\n" , cdev); |
2370 | cxgbi_device_unregister(cdev); |
2371 | break; |
2372 | default: |
2373 | pr_info("cdev 0x%p, unknown state %d.\n" , cdev, state); |
2374 | break; |
2375 | } |
2376 | return 0; |
2377 | } |
2378 | |
2379 | #ifdef CONFIG_CHELSIO_T4_DCB |
2380 | static int |
2381 | cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val, |
2382 | void *data) |
2383 | { |
2384 | int i, port = 0xFF; |
2385 | struct net_device *ndev; |
2386 | struct cxgbi_device *cdev = NULL; |
2387 | struct dcb_app_type *iscsi_app = data; |
2388 | struct cxgbi_ports_map *pmap; |
2389 | u8 priority; |
2390 | |
2391 | if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { |
2392 | if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && |
2393 | (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) |
2394 | return NOTIFY_DONE; |
2395 | |
2396 | priority = iscsi_app->app.priority; |
2397 | } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { |
2398 | if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) |
2399 | return NOTIFY_DONE; |
2400 | |
2401 | if (!iscsi_app->app.priority) |
2402 | return NOTIFY_DONE; |
2403 | |
2404 | priority = ffs(iscsi_app->app.priority) - 1; |
2405 | } else { |
2406 | return NOTIFY_DONE; |
2407 | } |
2408 | |
2409 | if (iscsi_app->app.protocol != 3260) |
2410 | return NOTIFY_DONE; |
2411 | |
2412 | log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n" , |
2413 | iscsi_app->ifindex, priority); |
2414 | |
2415 | ndev = dev_get_by_index(net: &init_net, ifindex: iscsi_app->ifindex); |
2416 | if (!ndev) |
2417 | return NOTIFY_DONE; |
2418 | |
2419 | cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port); |
2420 | |
2421 | dev_put(dev: ndev); |
2422 | if (!cdev) |
2423 | return NOTIFY_DONE; |
2424 | |
2425 | pmap = &cdev->pmap; |
2426 | |
2427 | for (i = 0; i < pmap->used; i++) { |
2428 | if (pmap->port_csk[i]) { |
2429 | struct cxgbi_sock *csk = pmap->port_csk[i]; |
2430 | |
2431 | if (csk->dcb_priority != priority) { |
2432 | iscsi_conn_failure(conn: csk->user_data, |
2433 | err: ISCSI_ERR_CONN_FAILED); |
2434 | pr_info("Restarting iSCSI connection %p with " |
2435 | "priority %u->%u.\n" , csk, |
2436 | csk->dcb_priority, priority); |
2437 | } |
2438 | } |
2439 | } |
2440 | return NOTIFY_OK; |
2441 | } |
2442 | #endif |
2443 | |
2444 | static int __init cxgb4i_init_module(void) |
2445 | { |
2446 | int rc; |
2447 | |
2448 | printk(KERN_INFO "%s" , version); |
2449 | |
2450 | rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); |
2451 | if (rc < 0) |
2452 | return rc; |
2453 | cxgb4_register_uld(type: CXGB4_ULD_ISCSI, p: &cxgb4i_uld_info); |
2454 | |
2455 | #ifdef CONFIG_CHELSIO_T4_DCB |
2456 | pr_info("%s dcb enabled.\n" , DRV_MODULE_NAME); |
2457 | register_dcbevent_notifier(nb: &cxgb4_dcb_change); |
2458 | #endif |
2459 | return 0; |
2460 | } |
2461 | |
2462 | static void __exit cxgb4i_exit_module(void) |
2463 | { |
2464 | #ifdef CONFIG_CHELSIO_T4_DCB |
2465 | unregister_dcbevent_notifier(nb: &cxgb4_dcb_change); |
2466 | #endif |
2467 | cxgb4_unregister_uld(type: CXGB4_ULD_ISCSI); |
2468 | cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); |
2469 | cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); |
2470 | } |
2471 | |
2472 | module_init(cxgb4i_init_module); |
2473 | module_exit(cxgb4i_exit_module); |
2474 | |