1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2018 Chelsio Communications, Inc. |
4 | * |
5 | * Written by: Atul Gupta (atul.gupta@chelsio.com) |
6 | */ |
7 | |
8 | #include <linux/module.h> |
9 | #include <linux/list.h> |
10 | #include <linux/workqueue.h> |
11 | #include <linux/skbuff.h> |
12 | #include <linux/timer.h> |
13 | #include <linux/notifier.h> |
14 | #include <linux/inetdevice.h> |
15 | #include <linux/ip.h> |
16 | #include <linux/tcp.h> |
17 | #include <linux/sched/signal.h> |
18 | #include <linux/kallsyms.h> |
19 | #include <linux/kprobes.h> |
20 | #include <linux/if_vlan.h> |
21 | #include <linux/ipv6.h> |
22 | #include <net/ipv6.h> |
23 | #include <net/transp_v6.h> |
24 | #include <net/ip6_route.h> |
25 | #include <net/inet_common.h> |
26 | #include <net/tcp.h> |
27 | #include <net/dst.h> |
28 | #include <net/tls.h> |
29 | #include <net/addrconf.h> |
30 | #include <net/secure_seq.h> |
31 | |
32 | #include "chtls.h" |
33 | #include "chtls_cm.h" |
34 | #include "clip_tbl.h" |
35 | #include "t4_tcb.h" |
36 | |
37 | /* |
38 | * State transitions and actions for close. Note that if we are in SYN_SENT |
39 | * we remain in that state as we cannot control a connection while it's in |
40 | * SYN_SENT; such connections are allowed to establish and are then aborted. |
41 | */ |
42 | static unsigned char new_state[16] = { |
43 | /* current state: new state: action: */ |
44 | /* (Invalid) */ TCP_CLOSE, |
45 | /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
46 | /* TCP_SYN_SENT */ TCP_SYN_SENT, |
47 | /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
48 | /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, |
49 | /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, |
50 | /* TCP_TIME_WAIT */ TCP_CLOSE, |
51 | /* TCP_CLOSE */ TCP_CLOSE, |
52 | /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, |
53 | /* TCP_LAST_ACK */ TCP_LAST_ACK, |
54 | /* TCP_LISTEN */ TCP_CLOSE, |
55 | /* TCP_CLOSING */ TCP_CLOSING, |
56 | }; |
57 | |
58 | static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev) |
59 | { |
60 | struct chtls_sock *csk = kzalloc(size: sizeof(*csk), GFP_ATOMIC); |
61 | |
62 | if (!csk) |
63 | return NULL; |
64 | |
65 | csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC); |
66 | if (!csk->txdata_skb_cache) { |
67 | kfree(objp: csk); |
68 | return NULL; |
69 | } |
70 | |
71 | kref_init(kref: &csk->kref); |
72 | csk->cdev = cdev; |
73 | skb_queue_head_init(list: &csk->txq); |
74 | csk->wr_skb_head = NULL; |
75 | csk->wr_skb_tail = NULL; |
76 | csk->mss = MAX_MSS; |
77 | csk->tlshws.ofld = 1; |
78 | csk->tlshws.txkey = -1; |
79 | csk->tlshws.rxkey = -1; |
80 | csk->tlshws.mfs = TLS_MFS; |
81 | skb_queue_head_init(list: &csk->tlshws.sk_recv_queue); |
82 | return csk; |
83 | } |
84 | |
85 | static void chtls_sock_release(struct kref *ref) |
86 | { |
87 | struct chtls_sock *csk = |
88 | container_of(ref, struct chtls_sock, kref); |
89 | |
90 | kfree(objp: csk); |
91 | } |
92 | |
93 | static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, |
94 | struct sock *sk) |
95 | { |
96 | struct adapter *adap = pci_get_drvdata(cdev->pdev); |
97 | struct net_device *ndev = cdev->ports[0]; |
98 | #if IS_ENABLED(CONFIG_IPV6) |
99 | struct net_device *temp; |
100 | int addr_type; |
101 | #endif |
102 | int i; |
103 | |
104 | switch (sk->sk_family) { |
105 | case PF_INET: |
106 | if (likely(!inet_sk(sk)->inet_rcv_saddr)) |
107 | return ndev; |
108 | ndev = __ip_dev_find(net: &init_net, inet_sk(sk)->inet_rcv_saddr, devref: false); |
109 | break; |
110 | #if IS_ENABLED(CONFIG_IPV6) |
111 | case PF_INET6: |
112 | addr_type = ipv6_addr_type(addr: &sk->sk_v6_rcv_saddr); |
113 | if (likely(addr_type == IPV6_ADDR_ANY)) |
114 | return ndev; |
115 | |
116 | for_each_netdev_rcu(&init_net, temp) { |
117 | if (ipv6_chk_addr(net: &init_net, addr: (struct in6_addr *) |
118 | &sk->sk_v6_rcv_saddr, dev: temp, strict: 1)) { |
119 | ndev = temp; |
120 | break; |
121 | } |
122 | } |
123 | break; |
124 | #endif |
125 | default: |
126 | return NULL; |
127 | } |
128 | |
129 | if (!ndev) |
130 | return NULL; |
131 | |
132 | if (is_vlan_dev(dev: ndev)) |
133 | ndev = vlan_dev_real_dev(dev: ndev); |
134 | |
135 | for_each_port(adap, i) |
136 | if (cdev->ports[i] == ndev) |
137 | return ndev; |
138 | return NULL; |
139 | } |
140 | |
141 | static void assign_rxopt(struct sock *sk, unsigned int opt) |
142 | { |
143 | const struct chtls_dev *cdev; |
144 | struct chtls_sock *csk; |
145 | struct tcp_sock *tp; |
146 | |
147 | csk = rcu_dereference_sk_user_data(sk); |
148 | tp = tcp_sk(sk); |
149 | |
150 | cdev = csk->cdev; |
151 | tp->tcp_header_len = sizeof(struct tcphdr); |
152 | tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40; |
153 | tp->mss_cache = tp->rx_opt.mss_clamp; |
154 | tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt); |
155 | tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt); |
156 | tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt); |
157 | SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt); |
158 | if (!tp->rx_opt.wscale_ok) |
159 | tp->rx_opt.rcv_wscale = 0; |
160 | if (tp->rx_opt.tstamp_ok) { |
161 | tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; |
162 | tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED; |
163 | } else if (csk->opt2 & TSTAMPS_EN_F) { |
164 | csk->opt2 &= ~TSTAMPS_EN_F; |
165 | csk->mtu_idx = TCPOPT_MSS_G(opt); |
166 | } |
167 | } |
168 | |
169 | static void chtls_purge_receive_queue(struct sock *sk) |
170 | { |
171 | struct sk_buff *skb; |
172 | |
173 | while ((skb = __skb_dequeue(list: &sk->sk_receive_queue)) != NULL) { |
174 | skb_dst_set(skb, dst: (void *)NULL); |
175 | kfree_skb(skb); |
176 | } |
177 | } |
178 | |
179 | static void chtls_purge_write_queue(struct sock *sk) |
180 | { |
181 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
182 | struct sk_buff *skb; |
183 | |
184 | while ((skb = __skb_dequeue(list: &csk->txq))) { |
185 | sk->sk_wmem_queued -= skb->truesize; |
186 | __kfree_skb(skb); |
187 | } |
188 | } |
189 | |
190 | static void chtls_purge_recv_queue(struct sock *sk) |
191 | { |
192 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
193 | struct chtls_hws *tlsk = &csk->tlshws; |
194 | struct sk_buff *skb; |
195 | |
196 | while ((skb = __skb_dequeue(list: &tlsk->sk_recv_queue)) != NULL) { |
197 | skb_dst_set(skb, NULL); |
198 | kfree_skb(skb); |
199 | } |
200 | } |
201 | |
202 | static void abort_arp_failure(void *handle, struct sk_buff *skb) |
203 | { |
204 | struct cpl_abort_req *req = cplhdr(skb); |
205 | struct chtls_dev *cdev; |
206 | |
207 | cdev = (struct chtls_dev *)handle; |
208 | req->cmd = CPL_ABORT_NO_RST; |
209 | cxgb4_ofld_send(cdev->lldi->ports[0], skb); |
210 | } |
211 | |
212 | static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len) |
213 | { |
214 | if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) { |
215 | __skb_trim(skb, len: 0); |
216 | refcount_inc(r: &skb->users); |
217 | } else { |
218 | skb = alloc_skb(size: len, GFP_KERNEL | __GFP_NOFAIL); |
219 | } |
220 | return skb; |
221 | } |
222 | |
223 | static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb) |
224 | { |
225 | struct cpl_abort_req *req; |
226 | struct chtls_sock *csk; |
227 | struct tcp_sock *tp; |
228 | |
229 | csk = rcu_dereference_sk_user_data(sk); |
230 | tp = tcp_sk(sk); |
231 | |
232 | if (!skb) |
233 | skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req)); |
234 | |
235 | req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req)); |
236 | INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid); |
237 | skb_set_queue_mapping(skb, queue_mapping: (csk->txq_idx << 1) | CPL_PRIORITY_DATA); |
238 | req->rsvd0 = htonl(tp->snd_nxt); |
239 | req->rsvd1 = !csk_flag_nochk(csk, flag: CSK_TX_DATA_SENT); |
240 | req->cmd = mode; |
241 | t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure); |
242 | send_or_defer(sk, tp, skb, through_l2t: mode == CPL_ABORT_SEND_RST); |
243 | } |
244 | |
245 | static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb) |
246 | { |
247 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
248 | |
249 | if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) || |
250 | !csk->cdev)) { |
251 | if (sk->sk_state == TCP_SYN_RECV) |
252 | csk_set_flag(csk, flag: CSK_RST_ABORTED); |
253 | goto out; |
254 | } |
255 | |
256 | if (!csk_flag_nochk(csk, flag: CSK_TX_DATA_SENT)) { |
257 | struct tcp_sock *tp = tcp_sk(sk); |
258 | |
259 | if (send_tx_flowc_wr(sk, compl: 0, snd_nxt: tp->snd_nxt, rcv_nxt: tp->rcv_nxt) < 0) |
260 | WARN_ONCE(1, "send tx flowc error" ); |
261 | csk_set_flag(csk, flag: CSK_TX_DATA_SENT); |
262 | } |
263 | |
264 | csk_set_flag(csk, flag: CSK_ABORT_RPL_PENDING); |
265 | chtls_purge_write_queue(sk); |
266 | |
267 | csk_set_flag(csk, flag: CSK_ABORT_SHUTDOWN); |
268 | if (sk->sk_state != TCP_SYN_RECV) |
269 | chtls_send_abort(sk, mode, skb); |
270 | else |
271 | chtls_set_tcb_field_rpl_skb(sk, word: TCB_T_FLAGS_W, |
272 | mask: TCB_T_FLAGS_V(TCB_T_FLAGS_M), val: 0, |
273 | cookie: TCB_FIELD_COOKIE_TFLAG, through_l2t: 1); |
274 | |
275 | return; |
276 | out: |
277 | kfree_skb(skb); |
278 | } |
279 | |
280 | static void release_tcp_port(struct sock *sk) |
281 | { |
282 | if (inet_csk(sk)->icsk_bind_hash) |
283 | inet_put_port(sk); |
284 | } |
285 | |
286 | static void tcp_uncork(struct sock *sk) |
287 | { |
288 | struct tcp_sock *tp = tcp_sk(sk); |
289 | |
290 | if (tp->nonagle & TCP_NAGLE_CORK) { |
291 | tp->nonagle &= ~TCP_NAGLE_CORK; |
292 | chtls_tcp_push(sk, flags: 0); |
293 | } |
294 | } |
295 | |
296 | static void chtls_close_conn(struct sock *sk) |
297 | { |
298 | struct cpl_close_con_req *req; |
299 | struct chtls_sock *csk; |
300 | struct sk_buff *skb; |
301 | unsigned int tid; |
302 | unsigned int len; |
303 | |
304 | len = roundup(sizeof(struct cpl_close_con_req), 16); |
305 | csk = rcu_dereference_sk_user_data(sk); |
306 | tid = csk->tid; |
307 | |
308 | skb = alloc_skb(size: len, GFP_KERNEL | __GFP_NOFAIL); |
309 | req = (struct cpl_close_con_req *)__skb_put(skb, len); |
310 | memset(req, 0, len); |
311 | req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | |
312 | FW_WR_IMMDLEN_V(sizeof(*req) - |
313 | sizeof(req->wr))); |
314 | req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) | |
315 | FW_WR_FLOWID_V(tid)); |
316 | |
317 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); |
318 | |
319 | tcp_uncork(sk); |
320 | skb_entail(sk, skb, flags: ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND); |
321 | if (sk->sk_state != TCP_SYN_SENT) |
322 | chtls_push_frames(csk, comp: 1); |
323 | } |
324 | |
325 | /* |
326 | * Perform a state transition during close and return the actions indicated |
327 | * for the transition. Do not make this function inline, the main reason |
328 | * it exists at all is to avoid multiple inlining of tcp_set_state. |
329 | */ |
330 | static int make_close_transition(struct sock *sk) |
331 | { |
332 | int next = (int)new_state[sk->sk_state]; |
333 | |
334 | tcp_set_state(sk, state: next & TCP_STATE_MASK); |
335 | return next & TCP_ACTION_FIN; |
336 | } |
337 | |
338 | void chtls_close(struct sock *sk, long timeout) |
339 | { |
340 | int data_lost, prev_state; |
341 | struct chtls_sock *csk; |
342 | |
343 | csk = rcu_dereference_sk_user_data(sk); |
344 | |
345 | lock_sock(sk); |
346 | sk->sk_shutdown |= SHUTDOWN_MASK; |
347 | |
348 | data_lost = skb_queue_len(list_: &sk->sk_receive_queue); |
349 | data_lost |= skb_queue_len(list_: &csk->tlshws.sk_recv_queue); |
350 | chtls_purge_recv_queue(sk); |
351 | chtls_purge_receive_queue(sk); |
352 | |
353 | if (sk->sk_state == TCP_CLOSE) { |
354 | goto wait; |
355 | } else if (data_lost || sk->sk_state == TCP_SYN_SENT) { |
356 | chtls_send_reset(sk, mode: CPL_ABORT_SEND_RST, NULL); |
357 | release_tcp_port(sk); |
358 | goto unlock; |
359 | } else if (sock_flag(sk, flag: SOCK_LINGER) && !sk->sk_lingertime) { |
360 | sk->sk_prot->disconnect(sk, 0); |
361 | } else if (make_close_transition(sk)) { |
362 | chtls_close_conn(sk); |
363 | } |
364 | wait: |
365 | if (timeout) |
366 | sk_stream_wait_close(sk, timeo_p: timeout); |
367 | |
368 | unlock: |
369 | prev_state = sk->sk_state; |
370 | sock_hold(sk); |
371 | sock_orphan(sk); |
372 | |
373 | release_sock(sk); |
374 | |
375 | local_bh_disable(); |
376 | bh_lock_sock(sk); |
377 | |
378 | if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
379 | goto out; |
380 | |
381 | if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 && |
382 | !csk_flag(sk, flag: CSK_ABORT_SHUTDOWN)) { |
383 | struct sk_buff *skb; |
384 | |
385 | skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC); |
386 | if (skb) |
387 | chtls_send_reset(sk, mode: CPL_ABORT_SEND_RST, skb); |
388 | } |
389 | |
390 | if (sk->sk_state == TCP_CLOSE) |
391 | inet_csk_destroy_sock(sk); |
392 | |
393 | out: |
394 | bh_unlock_sock(sk); |
395 | local_bh_enable(); |
396 | sock_put(sk); |
397 | } |
398 | |
399 | /* |
400 | * Wait until a socket enters on of the given states. |
401 | */ |
402 | static int wait_for_states(struct sock *sk, unsigned int states) |
403 | { |
404 | DECLARE_WAITQUEUE(wait, current); |
405 | struct socket_wq _sk_wq; |
406 | long current_timeo; |
407 | int err = 0; |
408 | |
409 | current_timeo = 200; |
410 | |
411 | /* |
412 | * We want this to work even when there's no associated struct socket. |
413 | * In that case we provide a temporary wait_queue_head_t. |
414 | */ |
415 | if (!sk->sk_wq) { |
416 | init_waitqueue_head(&_sk_wq.wait); |
417 | _sk_wq.fasync_list = NULL; |
418 | init_rcu_head_on_stack(head: &_sk_wq.rcu); |
419 | RCU_INIT_POINTER(sk->sk_wq, &_sk_wq); |
420 | } |
421 | |
422 | add_wait_queue(wq_head: sk_sleep(sk), wq_entry: &wait); |
423 | while (!sk_in_state(sk, states)) { |
424 | if (!current_timeo) { |
425 | err = -EBUSY; |
426 | break; |
427 | } |
428 | if (signal_pending(current)) { |
429 | err = sock_intr_errno(timeo: current_timeo); |
430 | break; |
431 | } |
432 | set_current_state(TASK_UNINTERRUPTIBLE); |
433 | release_sock(sk); |
434 | if (!sk_in_state(sk, states)) |
435 | current_timeo = schedule_timeout(timeout: current_timeo); |
436 | __set_current_state(TASK_RUNNING); |
437 | lock_sock(sk); |
438 | } |
439 | remove_wait_queue(wq_head: sk_sleep(sk), wq_entry: &wait); |
440 | |
441 | if (rcu_dereference(sk->sk_wq) == &_sk_wq) |
442 | sk->sk_wq = NULL; |
443 | return err; |
444 | } |
445 | |
446 | int chtls_disconnect(struct sock *sk, int flags) |
447 | { |
448 | struct tcp_sock *tp; |
449 | int err; |
450 | |
451 | tp = tcp_sk(sk); |
452 | chtls_purge_recv_queue(sk); |
453 | chtls_purge_receive_queue(sk); |
454 | chtls_purge_write_queue(sk); |
455 | |
456 | if (sk->sk_state != TCP_CLOSE) { |
457 | sk->sk_err = ECONNRESET; |
458 | chtls_send_reset(sk, mode: CPL_ABORT_SEND_RST, NULL); |
459 | err = wait_for_states(sk, states: TCPF_CLOSE); |
460 | if (err) |
461 | return err; |
462 | } |
463 | chtls_purge_recv_queue(sk); |
464 | chtls_purge_receive_queue(sk); |
465 | tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale); |
466 | return tcp_disconnect(sk, flags); |
467 | } |
468 | |
469 | #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \ |
470 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT) |
471 | void chtls_shutdown(struct sock *sk, int how) |
472 | { |
473 | if ((how & SEND_SHUTDOWN) && |
474 | sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) && |
475 | make_close_transition(sk)) |
476 | chtls_close_conn(sk); |
477 | } |
478 | |
479 | void chtls_destroy_sock(struct sock *sk) |
480 | { |
481 | struct chtls_sock *csk; |
482 | |
483 | csk = rcu_dereference_sk_user_data(sk); |
484 | chtls_purge_recv_queue(sk); |
485 | csk->ulp_mode = ULP_MODE_NONE; |
486 | chtls_purge_write_queue(sk); |
487 | free_tls_keyid(sk); |
488 | kref_put(kref: &csk->kref, release: chtls_sock_release); |
489 | if (sk->sk_family == AF_INET) |
490 | sk->sk_prot = &tcp_prot; |
491 | #if IS_ENABLED(CONFIG_IPV6) |
492 | else |
493 | sk->sk_prot = &tcpv6_prot; |
494 | #endif |
495 | sk->sk_prot->destroy(sk); |
496 | } |
497 | |
498 | static void reset_listen_child(struct sock *child) |
499 | { |
500 | struct chtls_sock *csk = rcu_dereference_sk_user_data(child); |
501 | struct sk_buff *skb; |
502 | |
503 | skb = alloc_ctrl_skb(csk->txdata_skb_cache, |
504 | sizeof(struct cpl_abort_req)); |
505 | |
506 | chtls_send_reset(sk: child, mode: CPL_ABORT_SEND_RST, skb); |
507 | sock_orphan(sk: child); |
508 | INC_ORPHAN_COUNT(child); |
509 | if (child->sk_state == TCP_CLOSE) |
510 | inet_csk_destroy_sock(sk: child); |
511 | } |
512 | |
513 | static void chtls_disconnect_acceptq(struct sock *listen_sk) |
514 | { |
515 | struct request_sock **pprev; |
516 | |
517 | pprev = ACCEPT_QUEUE(listen_sk); |
518 | while (*pprev) { |
519 | struct request_sock *req = *pprev; |
520 | |
521 | if (req->rsk_ops == &chtls_rsk_ops || |
522 | req->rsk_ops == &chtls_rsk_opsv6) { |
523 | struct sock *child = req->sk; |
524 | |
525 | *pprev = req->dl_next; |
526 | sk_acceptq_removed(sk: listen_sk); |
527 | reqsk_put(req); |
528 | sock_hold(sk: child); |
529 | local_bh_disable(); |
530 | bh_lock_sock(child); |
531 | release_tcp_port(sk: child); |
532 | reset_listen_child(child); |
533 | bh_unlock_sock(child); |
534 | local_bh_enable(); |
535 | sock_put(sk: child); |
536 | } else { |
537 | pprev = &req->dl_next; |
538 | } |
539 | } |
540 | } |
541 | |
542 | static int listen_hashfn(const struct sock *sk) |
543 | { |
544 | return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1); |
545 | } |
546 | |
547 | static struct listen_info *listen_hash_add(struct chtls_dev *cdev, |
548 | struct sock *sk, |
549 | unsigned int stid) |
550 | { |
551 | struct listen_info *p = kmalloc(size: sizeof(*p), GFP_KERNEL); |
552 | |
553 | if (p) { |
554 | int key = listen_hashfn(sk); |
555 | |
556 | p->sk = sk; |
557 | p->stid = stid; |
558 | spin_lock(lock: &cdev->listen_lock); |
559 | p->next = cdev->listen_hash_tab[key]; |
560 | cdev->listen_hash_tab[key] = p; |
561 | spin_unlock(lock: &cdev->listen_lock); |
562 | } |
563 | return p; |
564 | } |
565 | |
566 | static int listen_hash_find(struct chtls_dev *cdev, |
567 | struct sock *sk) |
568 | { |
569 | struct listen_info *p; |
570 | int stid = -1; |
571 | int key; |
572 | |
573 | key = listen_hashfn(sk); |
574 | |
575 | spin_lock(lock: &cdev->listen_lock); |
576 | for (p = cdev->listen_hash_tab[key]; p; p = p->next) |
577 | if (p->sk == sk) { |
578 | stid = p->stid; |
579 | break; |
580 | } |
581 | spin_unlock(lock: &cdev->listen_lock); |
582 | return stid; |
583 | } |
584 | |
585 | static int listen_hash_del(struct chtls_dev *cdev, |
586 | struct sock *sk) |
587 | { |
588 | struct listen_info *p, **prev; |
589 | int stid = -1; |
590 | int key; |
591 | |
592 | key = listen_hashfn(sk); |
593 | prev = &cdev->listen_hash_tab[key]; |
594 | |
595 | spin_lock(lock: &cdev->listen_lock); |
596 | for (p = *prev; p; prev = &p->next, p = p->next) |
597 | if (p->sk == sk) { |
598 | stid = p->stid; |
599 | *prev = p->next; |
600 | kfree(objp: p); |
601 | break; |
602 | } |
603 | spin_unlock(lock: &cdev->listen_lock); |
604 | return stid; |
605 | } |
606 | |
607 | static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent) |
608 | { |
609 | struct request_sock *req; |
610 | struct chtls_sock *csk; |
611 | |
612 | csk = rcu_dereference_sk_user_data(child); |
613 | req = csk->passive_reap_next; |
614 | |
615 | reqsk_queue_removed(queue: &inet_csk(sk: parent)->icsk_accept_queue, req); |
616 | __skb_unlink(skb: (struct sk_buff *)&csk->synq, list: &csk->listen_ctx->synq); |
617 | chtls_reqsk_free(req); |
618 | csk->passive_reap_next = NULL; |
619 | } |
620 | |
621 | static void chtls_reset_synq(struct listen_ctx *listen_ctx) |
622 | { |
623 | struct sock *listen_sk = listen_ctx->lsk; |
624 | |
625 | while (!skb_queue_empty(list: &listen_ctx->synq)) { |
626 | struct chtls_sock *csk = |
627 | container_of((struct synq *)skb_peek |
628 | (&listen_ctx->synq), struct chtls_sock, synq); |
629 | struct sock *child = csk->sk; |
630 | |
631 | cleanup_syn_rcv_conn(child, parent: listen_sk); |
632 | sock_hold(sk: child); |
633 | local_bh_disable(); |
634 | bh_lock_sock(child); |
635 | release_tcp_port(sk: child); |
636 | reset_listen_child(child); |
637 | bh_unlock_sock(child); |
638 | local_bh_enable(); |
639 | sock_put(sk: child); |
640 | } |
641 | } |
642 | |
643 | int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) |
644 | { |
645 | struct net_device *ndev; |
646 | #if IS_ENABLED(CONFIG_IPV6) |
647 | bool clip_valid = false; |
648 | #endif |
649 | struct listen_ctx *ctx; |
650 | struct adapter *adap; |
651 | struct port_info *pi; |
652 | int ret = 0; |
653 | int stid; |
654 | |
655 | rcu_read_lock(); |
656 | ndev = chtls_find_netdev(cdev, sk); |
657 | rcu_read_unlock(); |
658 | if (!ndev) |
659 | return -EBADF; |
660 | |
661 | pi = netdev_priv(dev: ndev); |
662 | adap = pi->adapter; |
663 | if (!(adap->flags & CXGB4_FULL_INIT_DONE)) |
664 | return -EBADF; |
665 | |
666 | if (listen_hash_find(cdev, sk) >= 0) /* already have it */ |
667 | return -EADDRINUSE; |
668 | |
669 | ctx = kmalloc(size: sizeof(*ctx), GFP_KERNEL); |
670 | if (!ctx) |
671 | return -ENOMEM; |
672 | |
673 | __module_get(THIS_MODULE); |
674 | ctx->lsk = sk; |
675 | ctx->cdev = cdev; |
676 | ctx->state = T4_LISTEN_START_PENDING; |
677 | skb_queue_head_init(list: &ctx->synq); |
678 | |
679 | stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx); |
680 | if (stid < 0) |
681 | goto free_ctx; |
682 | |
683 | sock_hold(sk); |
684 | if (!listen_hash_add(cdev, sk, stid)) |
685 | goto free_stid; |
686 | |
687 | if (sk->sk_family == PF_INET) { |
688 | ret = cxgb4_create_server(ndev, stid, |
689 | inet_sk(sk)->inet_rcv_saddr, |
690 | inet_sk(sk)->inet_sport, 0, |
691 | cdev->lldi->rxq_ids[0]); |
692 | #if IS_ENABLED(CONFIG_IPV6) |
693 | } else { |
694 | int addr_type; |
695 | |
696 | addr_type = ipv6_addr_type(addr: &sk->sk_v6_rcv_saddr); |
697 | if (addr_type != IPV6_ADDR_ANY) { |
698 | ret = cxgb4_clip_get(ndev, (const u32 *) |
699 | &sk->sk_v6_rcv_saddr, 1); |
700 | if (ret) |
701 | goto del_hash; |
702 | clip_valid = true; |
703 | } |
704 | ret = cxgb4_create_server6(ndev, stid, |
705 | &sk->sk_v6_rcv_saddr, |
706 | inet_sk(sk)->inet_sport, |
707 | cdev->lldi->rxq_ids[0]); |
708 | #endif |
709 | } |
710 | if (ret > 0) |
711 | ret = net_xmit_errno(ret); |
712 | if (ret) |
713 | goto del_hash; |
714 | return 0; |
715 | del_hash: |
716 | #if IS_ENABLED(CONFIG_IPV6) |
717 | if (clip_valid) |
718 | cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1); |
719 | #endif |
720 | listen_hash_del(cdev, sk); |
721 | free_stid: |
722 | cxgb4_free_stid(cdev->tids, stid, sk->sk_family); |
723 | sock_put(sk); |
724 | free_ctx: |
725 | kfree(objp: ctx); |
726 | module_put(THIS_MODULE); |
727 | return -EBADF; |
728 | } |
729 | |
730 | void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk) |
731 | { |
732 | struct listen_ctx *listen_ctx; |
733 | int stid; |
734 | |
735 | stid = listen_hash_del(cdev, sk); |
736 | if (stid < 0) |
737 | return; |
738 | |
739 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); |
740 | chtls_reset_synq(listen_ctx); |
741 | |
742 | cxgb4_remove_server(cdev->lldi->ports[0], stid, |
743 | cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6); |
744 | |
745 | #if IS_ENABLED(CONFIG_IPV6) |
746 | if (sk->sk_family == PF_INET6) { |
747 | struct net_device *ndev = chtls_find_netdev(cdev, sk); |
748 | int addr_type = 0; |
749 | |
750 | addr_type = ipv6_addr_type(addr: (const struct in6_addr *) |
751 | &sk->sk_v6_rcv_saddr); |
752 | if (addr_type != IPV6_ADDR_ANY) |
753 | cxgb4_clip_release(ndev, (const u32 *) |
754 | &sk->sk_v6_rcv_saddr, 1); |
755 | } |
756 | #endif |
757 | chtls_disconnect_acceptq(listen_sk: sk); |
758 | } |
759 | |
760 | static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
761 | { |
762 | struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR; |
763 | unsigned int stid = GET_TID(rpl); |
764 | struct listen_ctx *listen_ctx; |
765 | |
766 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); |
767 | if (!listen_ctx) |
768 | return CPL_RET_BUF_DONE; |
769 | |
770 | if (listen_ctx->state == T4_LISTEN_START_PENDING) { |
771 | listen_ctx->state = T4_LISTEN_STARTED; |
772 | return CPL_RET_BUF_DONE; |
773 | } |
774 | |
775 | if (rpl->status != CPL_ERR_NONE) { |
776 | pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n" , |
777 | rpl->status, stid); |
778 | } else { |
779 | cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); |
780 | sock_put(sk: listen_ctx->lsk); |
781 | kfree(objp: listen_ctx); |
782 | module_put(THIS_MODULE); |
783 | } |
784 | return CPL_RET_BUF_DONE; |
785 | } |
786 | |
787 | static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
788 | { |
789 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR; |
790 | struct listen_ctx *listen_ctx; |
791 | unsigned int stid; |
792 | void *data; |
793 | |
794 | stid = GET_TID(rpl); |
795 | data = lookup_stid(cdev->tids, stid); |
796 | listen_ctx = (struct listen_ctx *)data; |
797 | |
798 | if (rpl->status != CPL_ERR_NONE) { |
799 | pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n" , |
800 | rpl->status, stid); |
801 | } else { |
802 | cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); |
803 | sock_put(sk: listen_ctx->lsk); |
804 | kfree(objp: listen_ctx); |
805 | module_put(THIS_MODULE); |
806 | } |
807 | return CPL_RET_BUF_DONE; |
808 | } |
809 | |
810 | static void chtls_purge_wr_queue(struct sock *sk) |
811 | { |
812 | struct sk_buff *skb; |
813 | |
814 | while ((skb = dequeue_wr(sk)) != NULL) |
815 | kfree_skb(skb); |
816 | } |
817 | |
818 | static void chtls_release_resources(struct sock *sk) |
819 | { |
820 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
821 | struct chtls_dev *cdev = csk->cdev; |
822 | unsigned int tid = csk->tid; |
823 | struct tid_info *tids; |
824 | |
825 | if (!cdev) |
826 | return; |
827 | |
828 | tids = cdev->tids; |
829 | kfree_skb(skb: csk->txdata_skb_cache); |
830 | csk->txdata_skb_cache = NULL; |
831 | |
832 | if (csk->wr_credits != csk->wr_max_credits) { |
833 | chtls_purge_wr_queue(sk); |
834 | chtls_reset_wr_list(csk); |
835 | } |
836 | |
837 | if (csk->l2t_entry) { |
838 | cxgb4_l2t_release(csk->l2t_entry); |
839 | csk->l2t_entry = NULL; |
840 | } |
841 | |
842 | if (sk->sk_state != TCP_SYN_SENT) { |
843 | cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family); |
844 | sock_put(sk); |
845 | } |
846 | } |
847 | |
848 | static void chtls_conn_done(struct sock *sk) |
849 | { |
850 | if (sock_flag(sk, flag: SOCK_DEAD)) |
851 | chtls_purge_receive_queue(sk); |
852 | sk_wakeup_sleepers(sk, interruptable: 0); |
853 | tcp_done(sk); |
854 | } |
855 | |
856 | static void do_abort_syn_rcv(struct sock *child, struct sock *parent) |
857 | { |
858 | /* |
859 | * If the server is still open we clean up the child connection, |
860 | * otherwise the server already did the clean up as it was purging |
861 | * its SYN queue and the skb was just sitting in its backlog. |
862 | */ |
863 | if (likely(parent->sk_state == TCP_LISTEN)) { |
864 | cleanup_syn_rcv_conn(child, parent); |
865 | /* Without the below call to sock_orphan, |
866 | * we leak the socket resource with syn_flood test |
867 | * as inet_csk_destroy_sock will not be called |
868 | * in tcp_done since SOCK_DEAD flag is not set. |
869 | * Kernel handles this differently where new socket is |
870 | * created only after 3 way handshake is done. |
871 | */ |
872 | sock_orphan(sk: child); |
873 | INC_ORPHAN_COUNT(child); |
874 | chtls_release_resources(sk: child); |
875 | chtls_conn_done(sk: child); |
876 | } else { |
877 | if (csk_flag(sk: child, flag: CSK_RST_ABORTED)) { |
878 | chtls_release_resources(sk: child); |
879 | chtls_conn_done(sk: child); |
880 | } |
881 | } |
882 | } |
883 | |
884 | static void pass_open_abort(struct sock *child, struct sock *parent, |
885 | struct sk_buff *skb) |
886 | { |
887 | do_abort_syn_rcv(child, parent); |
888 | kfree_skb(skb); |
889 | } |
890 | |
891 | static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb) |
892 | { |
893 | pass_open_abort(child: skb->sk, parent: lsk, skb); |
894 | } |
895 | |
896 | static void chtls_pass_open_arp_failure(struct sock *sk, |
897 | struct sk_buff *skb) |
898 | { |
899 | const struct request_sock *oreq; |
900 | struct chtls_sock *csk; |
901 | struct chtls_dev *cdev; |
902 | struct sock *parent; |
903 | void *data; |
904 | |
905 | csk = rcu_dereference_sk_user_data(sk); |
906 | cdev = csk->cdev; |
907 | |
908 | /* |
909 | * If the connection is being aborted due to the parent listening |
910 | * socket going away there's nothing to do, the ABORT_REQ will close |
911 | * the connection. |
912 | */ |
913 | if (csk_flag(sk, flag: CSK_ABORT_RPL_PENDING)) { |
914 | kfree_skb(skb); |
915 | return; |
916 | } |
917 | |
918 | oreq = csk->passive_reap_next; |
919 | data = lookup_stid(cdev->tids, oreq->ts_recent); |
920 | parent = ((struct listen_ctx *)data)->lsk; |
921 | |
922 | bh_lock_sock(parent); |
923 | if (!sock_owned_by_user(sk: parent)) { |
924 | pass_open_abort(child: sk, parent, skb); |
925 | } else { |
926 | BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort; |
927 | __sk_add_backlog(sk: parent, skb); |
928 | } |
929 | bh_unlock_sock(parent); |
930 | } |
931 | |
932 | static void chtls_accept_rpl_arp_failure(void *handle, |
933 | struct sk_buff *skb) |
934 | { |
935 | struct sock *sk = (struct sock *)handle; |
936 | |
937 | sock_hold(sk); |
938 | process_cpl_msg(fn: chtls_pass_open_arp_failure, sk, skb); |
939 | sock_put(sk); |
940 | } |
941 | |
942 | static unsigned int chtls_select_mss(const struct chtls_sock *csk, |
943 | unsigned int pmtu, |
944 | struct cpl_pass_accept_req *req) |
945 | { |
946 | struct chtls_dev *cdev; |
947 | struct dst_entry *dst; |
948 | unsigned int tcpoptsz; |
949 | unsigned int iphdrsz; |
950 | unsigned int mtu_idx; |
951 | struct tcp_sock *tp; |
952 | unsigned int mss; |
953 | struct sock *sk; |
954 | |
955 | mss = ntohs(req->tcpopt.mss); |
956 | sk = csk->sk; |
957 | dst = __sk_dst_get(sk); |
958 | cdev = csk->cdev; |
959 | tp = tcp_sk(sk); |
960 | tcpoptsz = 0; |
961 | |
962 | #if IS_ENABLED(CONFIG_IPV6) |
963 | if (sk->sk_family == AF_INET6) |
964 | iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr); |
965 | else |
966 | #endif |
967 | iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); |
968 | if (req->tcpopt.tstamp) |
969 | tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4); |
970 | |
971 | tp->advmss = dst_metric_advmss(dst); |
972 | if (USER_MSS(tp) && tp->advmss > USER_MSS(tp)) |
973 | tp->advmss = USER_MSS(tp); |
974 | if (tp->advmss > pmtu - iphdrsz) |
975 | tp->advmss = pmtu - iphdrsz; |
976 | if (mss && tp->advmss > mss) |
977 | tp->advmss = mss; |
978 | |
979 | tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus, |
980 | iphdrsz + tcpoptsz, |
981 | tp->advmss - tcpoptsz, |
982 | 8, &mtu_idx); |
983 | tp->advmss -= iphdrsz; |
984 | |
985 | inet_csk(sk)->icsk_pmtu_cookie = pmtu; |
986 | return mtu_idx; |
987 | } |
988 | |
989 | static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp) |
990 | { |
991 | int wscale = 0; |
992 | |
993 | if (space > MAX_RCV_WND) |
994 | space = MAX_RCV_WND; |
995 | if (win_clamp && win_clamp < space) |
996 | space = win_clamp; |
997 | |
998 | if (wscale_ok) { |
999 | while (wscale < 14 && (65535 << wscale) < space) |
1000 | wscale++; |
1001 | } |
1002 | return wscale; |
1003 | } |
1004 | |
1005 | static void chtls_pass_accept_rpl(struct sk_buff *skb, |
1006 | struct cpl_pass_accept_req *req, |
1007 | unsigned int tid) |
1008 | |
1009 | { |
1010 | struct cpl_t5_pass_accept_rpl *rpl5; |
1011 | struct cxgb4_lld_info *lldi; |
1012 | const struct tcphdr *tcph; |
1013 | const struct tcp_sock *tp; |
1014 | struct chtls_sock *csk; |
1015 | unsigned int len; |
1016 | struct sock *sk; |
1017 | u32 opt2, hlen; |
1018 | u64 opt0; |
1019 | |
1020 | sk = skb->sk; |
1021 | tp = tcp_sk(sk); |
1022 | csk = sk->sk_user_data; |
1023 | csk->tid = tid; |
1024 | lldi = csk->cdev->lldi; |
1025 | len = roundup(sizeof(*rpl5), 16); |
1026 | |
1027 | rpl5 = __skb_put_zero(skb, len); |
1028 | INIT_TP_WR(rpl5, tid); |
1029 | |
1030 | OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, |
1031 | csk->tid)); |
1032 | csk->mtu_idx = chtls_select_mss(csk, pmtu: dst_mtu(dst: __sk_dst_get(sk)), |
1033 | req); |
1034 | opt0 = TCAM_BYPASS_F | |
1035 | WND_SCALE_V(RCV_WSCALE(tp)) | |
1036 | MSS_IDX_V(csk->mtu_idx) | |
1037 | L2T_IDX_V(csk->l2t_entry->idx) | |
1038 | NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) | |
1039 | TX_CHAN_V(csk->tx_chan) | |
1040 | SMAC_SEL_V(csk->smac_idx) | |
1041 | DSCP_V(csk->tos >> 2) | |
1042 | ULP_MODE_V(ULP_MODE_TLS) | |
1043 | RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M)); |
1044 | |
1045 | opt2 = RX_CHANNEL_V(0) | |
1046 | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); |
1047 | |
1048 | if (!is_t5(lldi->adapter_type)) |
1049 | opt2 |= RX_FC_DISABLE_F; |
1050 | if (req->tcpopt.tstamp) |
1051 | opt2 |= TSTAMPS_EN_F; |
1052 | if (req->tcpopt.sack) |
1053 | opt2 |= SACK_EN_F; |
1054 | hlen = ntohl(req->hdr_len); |
1055 | |
1056 | tcph = (struct tcphdr *)((u8 *)(req + 1) + |
1057 | T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen)); |
1058 | if (tcph->ece && tcph->cwr) |
1059 | opt2 |= CCTRL_ECN_V(1); |
1060 | opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); |
1061 | opt2 |= T5_ISS_F; |
1062 | opt2 |= T5_OPT_2_VALID_F; |
1063 | opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp)); |
1064 | rpl5->opt0 = cpu_to_be64(opt0); |
1065 | rpl5->opt2 = cpu_to_be32(opt2); |
1066 | rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1); |
1067 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); |
1068 | t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure); |
1069 | cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry); |
1070 | } |
1071 | |
1072 | static void inet_inherit_port(struct sock *lsk, struct sock *newsk) |
1073 | { |
1074 | local_bh_disable(); |
1075 | __inet_inherit_port(sk: lsk, child: newsk); |
1076 | local_bh_enable(); |
1077 | } |
1078 | |
1079 | static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
1080 | { |
1081 | if (skb->protocol) { |
1082 | kfree_skb(skb); |
1083 | return 0; |
1084 | } |
1085 | BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); |
1086 | return 0; |
1087 | } |
1088 | |
1089 | static void chtls_set_tcp_window(struct chtls_sock *csk) |
1090 | { |
1091 | struct net_device *ndev = csk->egress_dev; |
1092 | struct port_info *pi = netdev_priv(dev: ndev); |
1093 | unsigned int linkspeed; |
1094 | u8 scale; |
1095 | |
1096 | linkspeed = pi->link_cfg.speed; |
1097 | scale = linkspeed / SPEED_10000; |
1098 | #define CHTLS_10G_RCVWIN (256 * 1024) |
1099 | csk->rcv_win = CHTLS_10G_RCVWIN; |
1100 | if (scale) |
1101 | csk->rcv_win *= scale; |
1102 | #define CHTLS_10G_SNDWIN (256 * 1024) |
1103 | csk->snd_win = CHTLS_10G_SNDWIN; |
1104 | if (scale) |
1105 | csk->snd_win *= scale; |
1106 | } |
1107 | |
1108 | static struct sock *chtls_recv_sock(struct sock *lsk, |
1109 | struct request_sock *oreq, |
1110 | void *network_hdr, |
1111 | const struct cpl_pass_accept_req *req, |
1112 | struct chtls_dev *cdev) |
1113 | { |
1114 | struct adapter *adap = pci_get_drvdata(cdev->pdev); |
1115 | struct neighbour *n = NULL; |
1116 | struct inet_sock *newinet; |
1117 | const struct iphdr *iph; |
1118 | struct tls_context *ctx; |
1119 | struct net_device *ndev; |
1120 | struct chtls_sock *csk; |
1121 | struct dst_entry *dst; |
1122 | struct tcp_sock *tp; |
1123 | struct sock *newsk; |
1124 | bool found = false; |
1125 | u16 port_id; |
1126 | int rxq_idx; |
1127 | int step, i; |
1128 | |
1129 | iph = (const struct iphdr *)network_hdr; |
1130 | newsk = tcp_create_openreq_child(sk: lsk, req: oreq, skb: cdev->askb); |
1131 | if (!newsk) |
1132 | goto free_oreq; |
1133 | |
1134 | if (lsk->sk_family == AF_INET) { |
1135 | dst = inet_csk_route_child_sock(sk: lsk, newsk, req: oreq); |
1136 | if (!dst) |
1137 | goto free_sk; |
1138 | |
1139 | n = dst_neigh_lookup(dst, daddr: &iph->saddr); |
1140 | #if IS_ENABLED(CONFIG_IPV6) |
1141 | } else { |
1142 | const struct ipv6hdr *ip6h; |
1143 | struct flowi6 fl6; |
1144 | |
1145 | ip6h = (const struct ipv6hdr *)network_hdr; |
1146 | memset(&fl6, 0, sizeof(fl6)); |
1147 | fl6.flowi6_proto = IPPROTO_TCP; |
1148 | fl6.saddr = ip6h->daddr; |
1149 | fl6.daddr = ip6h->saddr; |
1150 | fl6.fl6_dport = inet_rsk(sk: oreq)->ir_rmt_port; |
1151 | fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num); |
1152 | security_req_classify_flow(req: oreq, flic: flowi6_to_flowi_common(fl6: &fl6)); |
1153 | dst = ip6_dst_lookup_flow(net: sock_net(sk: lsk), sk: lsk, fl6: &fl6, NULL); |
1154 | if (IS_ERR(ptr: dst)) |
1155 | goto free_sk; |
1156 | n = dst_neigh_lookup(dst, daddr: &ip6h->saddr); |
1157 | #endif |
1158 | } |
1159 | if (!n || !n->dev) |
1160 | goto free_dst; |
1161 | |
1162 | ndev = n->dev; |
1163 | if (is_vlan_dev(dev: ndev)) |
1164 | ndev = vlan_dev_real_dev(dev: ndev); |
1165 | |
1166 | for_each_port(adap, i) |
1167 | if (cdev->ports[i] == ndev) |
1168 | found = true; |
1169 | |
1170 | if (!found) |
1171 | goto free_dst; |
1172 | |
1173 | port_id = cxgb4_port_idx(ndev); |
1174 | |
1175 | csk = chtls_sock_create(cdev); |
1176 | if (!csk) |
1177 | goto free_dst; |
1178 | |
1179 | csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0); |
1180 | if (!csk->l2t_entry) |
1181 | goto free_csk; |
1182 | |
1183 | newsk->sk_user_data = csk; |
1184 | newsk->sk_backlog_rcv = chtls_backlog_rcv; |
1185 | |
1186 | tp = tcp_sk(newsk); |
1187 | newinet = inet_sk(newsk); |
1188 | |
1189 | if (iph->version == 0x4) { |
1190 | newinet->inet_daddr = iph->saddr; |
1191 | newinet->inet_rcv_saddr = iph->daddr; |
1192 | newinet->inet_saddr = iph->daddr; |
1193 | #if IS_ENABLED(CONFIG_IPV6) |
1194 | } else { |
1195 | struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk; |
1196 | struct inet_request_sock *treq = inet_rsk(sk: oreq); |
1197 | struct ipv6_pinfo *newnp = inet6_sk(sk: newsk); |
1198 | struct ipv6_pinfo *np = inet6_sk(sk: lsk); |
1199 | |
1200 | inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; |
1201 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
1202 | newsk->sk_v6_daddr = treq->ir_v6_rmt_addr; |
1203 | newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr; |
1204 | inet6_sk(sk: newsk)->saddr = treq->ir_v6_loc_addr; |
1205 | newnp->ipv6_fl_list = NULL; |
1206 | newnp->pktoptions = NULL; |
1207 | newsk->sk_bound_dev_if = treq->ir_iif; |
1208 | newinet->inet_opt = NULL; |
1209 | newinet->inet_daddr = LOOPBACK4_IPV6; |
1210 | newinet->inet_saddr = LOOPBACK4_IPV6; |
1211 | #endif |
1212 | } |
1213 | |
1214 | oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
1215 | sk_setup_caps(sk: newsk, dst); |
1216 | ctx = tls_get_ctx(sk: lsk); |
1217 | newsk->sk_destruct = ctx->sk_destruct; |
1218 | newsk->sk_prot_creator = lsk->sk_prot_creator; |
1219 | csk->sk = newsk; |
1220 | csk->passive_reap_next = oreq; |
1221 | csk->tx_chan = cxgb4_port_chan(ndev); |
1222 | csk->port_id = port_id; |
1223 | csk->egress_dev = ndev; |
1224 | csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); |
1225 | chtls_set_tcp_window(csk); |
1226 | tp->rcv_wnd = csk->rcv_win; |
1227 | csk->sndbuf = csk->snd_win; |
1228 | csk->ulp_mode = ULP_MODE_TLS; |
1229 | step = cdev->lldi->nrxq / cdev->lldi->nchan; |
1230 | rxq_idx = port_id * step; |
1231 | rxq_idx += cdev->round_robin_cnt++ % step; |
1232 | csk->rss_qid = cdev->lldi->rxq_ids[rxq_idx]; |
1233 | csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx : |
1234 | port_id * step; |
1235 | csk->sndbuf = newsk->sk_sndbuf; |
1236 | csk->smac_idx = ((struct port_info *)netdev_priv(dev: ndev))->smt_idx; |
1237 | RCV_WSCALE(tp) = select_rcv_wscale(space: tcp_full_space(sk: newsk), |
1238 | READ_ONCE(sock_net(newsk)-> |
1239 | ipv4.sysctl_tcp_window_scaling), |
1240 | win_clamp: tp->window_clamp); |
1241 | neigh_release(neigh: n); |
1242 | inet_inherit_port(lsk, newsk); |
1243 | csk_set_flag(csk, flag: CSK_CONN_INLINE); |
1244 | bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */ |
1245 | |
1246 | return newsk; |
1247 | free_csk: |
1248 | chtls_sock_release(ref: &csk->kref); |
1249 | free_dst: |
1250 | if (n) |
1251 | neigh_release(neigh: n); |
1252 | dst_release(dst); |
1253 | free_sk: |
1254 | inet_csk_prepare_forced_close(sk: newsk); |
1255 | tcp_done(sk: newsk); |
1256 | free_oreq: |
1257 | chtls_reqsk_free(req: oreq); |
1258 | return NULL; |
1259 | } |
1260 | |
1261 | /* |
1262 | * Populate a TID_RELEASE WR. The skb must be already propely sized. |
1263 | */ |
1264 | static void mk_tid_release(struct sk_buff *skb, |
1265 | unsigned int chan, unsigned int tid) |
1266 | { |
1267 | struct cpl_tid_release *req; |
1268 | unsigned int len; |
1269 | |
1270 | len = roundup(sizeof(struct cpl_tid_release), 16); |
1271 | req = (struct cpl_tid_release *)__skb_put(skb, len); |
1272 | memset(req, 0, len); |
1273 | set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); |
1274 | INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid); |
1275 | } |
1276 | |
1277 | static int chtls_get_module(struct sock *sk) |
1278 | { |
1279 | struct inet_connection_sock *icsk = inet_csk(sk); |
1280 | |
1281 | if (!try_module_get(module: icsk->icsk_ulp_ops->owner)) |
1282 | return -1; |
1283 | |
1284 | return 0; |
1285 | } |
1286 | |
1287 | static void chtls_pass_accept_request(struct sock *sk, |
1288 | struct sk_buff *skb) |
1289 | { |
1290 | struct cpl_t5_pass_accept_rpl *rpl; |
1291 | struct cpl_pass_accept_req *req; |
1292 | struct listen_ctx *listen_ctx; |
1293 | struct vlan_ethhdr *vlan_eh; |
1294 | struct request_sock *oreq; |
1295 | struct sk_buff *reply_skb; |
1296 | struct chtls_sock *csk; |
1297 | struct chtls_dev *cdev; |
1298 | struct ipv6hdr *ip6h; |
1299 | struct tcphdr *tcph; |
1300 | struct sock *newsk; |
1301 | struct ethhdr *eh; |
1302 | struct iphdr *iph; |
1303 | void *network_hdr; |
1304 | unsigned int stid; |
1305 | unsigned int len; |
1306 | unsigned int tid; |
1307 | bool th_ecn, ect; |
1308 | __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ |
1309 | u16 eth_hdr_len; |
1310 | bool ecn_ok; |
1311 | |
1312 | req = cplhdr(skb) + RSS_HDR; |
1313 | tid = GET_TID(req); |
1314 | cdev = BLOG_SKB_CB(skb)->cdev; |
1315 | newsk = lookup_tid(cdev->tids, tid); |
1316 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
1317 | if (newsk) { |
1318 | pr_info("tid (%d) already in use\n" , tid); |
1319 | return; |
1320 | } |
1321 | |
1322 | len = roundup(sizeof(*rpl), 16); |
1323 | reply_skb = alloc_skb(size: len, GFP_ATOMIC); |
1324 | if (!reply_skb) { |
1325 | cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family); |
1326 | kfree_skb(skb); |
1327 | return; |
1328 | } |
1329 | |
1330 | if (sk->sk_state != TCP_LISTEN) |
1331 | goto reject; |
1332 | |
1333 | if (inet_csk_reqsk_queue_is_full(sk)) |
1334 | goto reject; |
1335 | |
1336 | if (sk_acceptq_is_full(sk)) |
1337 | goto reject; |
1338 | |
1339 | |
1340 | eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len)); |
1341 | if (eth_hdr_len == ETH_HLEN) { |
1342 | eh = (struct ethhdr *)(req + 1); |
1343 | iph = (struct iphdr *)(eh + 1); |
1344 | ip6h = (struct ipv6hdr *)(eh + 1); |
1345 | network_hdr = (void *)(eh + 1); |
1346 | } else { |
1347 | vlan_eh = (struct vlan_ethhdr *)(req + 1); |
1348 | iph = (struct iphdr *)(vlan_eh + 1); |
1349 | ip6h = (struct ipv6hdr *)(vlan_eh + 1); |
1350 | network_hdr = (void *)(vlan_eh + 1); |
1351 | } |
1352 | |
1353 | if (iph->version == 0x4) { |
1354 | tcph = (struct tcphdr *)(iph + 1); |
1355 | skb_set_network_header(skb, offset: (void *)iph - (void *)req); |
1356 | oreq = inet_reqsk_alloc(ops: &chtls_rsk_ops, sk_listener: sk, attach_listener: true); |
1357 | } else { |
1358 | tcph = (struct tcphdr *)(ip6h + 1); |
1359 | skb_set_network_header(skb, offset: (void *)ip6h - (void *)req); |
1360 | oreq = inet_reqsk_alloc(ops: &chtls_rsk_opsv6, sk_listener: sk, attach_listener: false); |
1361 | } |
1362 | |
1363 | if (!oreq) |
1364 | goto reject; |
1365 | |
1366 | oreq->rsk_rcv_wnd = 0; |
1367 | oreq->rsk_window_clamp = 0; |
1368 | oreq->syncookie = 0; |
1369 | oreq->mss = 0; |
1370 | oreq->ts_recent = 0; |
1371 | |
1372 | tcp_rsk(req: oreq)->tfo_listener = false; |
1373 | tcp_rsk(req: oreq)->rcv_isn = ntohl(tcph->seq); |
1374 | chtls_set_req_port(oreq, source: tcph->source, dest: tcph->dest); |
1375 | if (iph->version == 0x4) { |
1376 | chtls_set_req_addr(oreq, local_ip: iph->daddr, peer_ip: iph->saddr); |
1377 | ip_dsfield = ipv4_get_dsfield(iph); |
1378 | #if IS_ENABLED(CONFIG_IPV6) |
1379 | } else { |
1380 | inet_rsk(sk: oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; |
1381 | inet_rsk(sk: oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; |
1382 | ip_dsfield = ipv6_get_dsfield(ipv6h: ipv6_hdr(skb)); |
1383 | #endif |
1384 | } |
1385 | if (req->tcpopt.wsf <= 14 && |
1386 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { |
1387 | inet_rsk(sk: oreq)->wscale_ok = 1; |
1388 | inet_rsk(sk: oreq)->snd_wscale = req->tcpopt.wsf; |
1389 | } |
1390 | inet_rsk(sk: oreq)->ir_iif = sk->sk_bound_dev_if; |
1391 | th_ecn = tcph->ece && tcph->cwr; |
1392 | if (th_ecn) { |
1393 | ect = !INET_ECN_is_not_ect(dsfield: ip_dsfield); |
1394 | ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn); |
1395 | if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk)) |
1396 | inet_rsk(sk: oreq)->ecn_ok = 1; |
1397 | } |
1398 | |
1399 | newsk = chtls_recv_sock(lsk: sk, oreq, network_hdr, req, cdev); |
1400 | if (!newsk) |
1401 | goto reject; |
1402 | |
1403 | if (chtls_get_module(sk: newsk)) |
1404 | goto reject; |
1405 | inet_csk_reqsk_queue_added(sk); |
1406 | reply_skb->sk = newsk; |
1407 | chtls_install_cpl_ops(sk: newsk); |
1408 | cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family); |
1409 | csk = rcu_dereference_sk_user_data(newsk); |
1410 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); |
1411 | csk->listen_ctx = listen_ctx; |
1412 | __skb_queue_tail(list: &listen_ctx->synq, newsk: (struct sk_buff *)&csk->synq); |
1413 | chtls_pass_accept_rpl(skb: reply_skb, req, tid); |
1414 | kfree_skb(skb); |
1415 | return; |
1416 | |
1417 | reject: |
1418 | mk_tid_release(skb: reply_skb, chan: 0, tid); |
1419 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
1420 | kfree_skb(skb); |
1421 | } |
1422 | |
1423 | /* |
1424 | * Handle a CPL_PASS_ACCEPT_REQ message. |
1425 | */ |
1426 | static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb) |
1427 | { |
1428 | struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR; |
1429 | struct listen_ctx *ctx; |
1430 | unsigned int stid; |
1431 | unsigned int tid; |
1432 | struct sock *lsk; |
1433 | void *data; |
1434 | |
1435 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
1436 | tid = GET_TID(req); |
1437 | |
1438 | data = lookup_stid(cdev->tids, stid); |
1439 | if (!data) |
1440 | return 1; |
1441 | |
1442 | ctx = (struct listen_ctx *)data; |
1443 | lsk = ctx->lsk; |
1444 | |
1445 | if (unlikely(tid_out_of_range(cdev->tids, tid))) { |
1446 | pr_info("passive open TID %u too large\n" , tid); |
1447 | return 1; |
1448 | } |
1449 | |
1450 | BLOG_SKB_CB(skb)->cdev = cdev; |
1451 | process_cpl_msg(fn: chtls_pass_accept_request, sk: lsk, skb); |
1452 | return 0; |
1453 | } |
1454 | |
1455 | /* |
1456 | * Completes some final bits of initialization for just established connections |
1457 | * and changes their state to TCP_ESTABLISHED. |
1458 | * |
1459 | * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1. |
1460 | */ |
1461 | static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) |
1462 | { |
1463 | struct tcp_sock *tp = tcp_sk(sk); |
1464 | |
1465 | tp->pushed_seq = snd_isn; |
1466 | tp->write_seq = snd_isn; |
1467 | tp->snd_nxt = snd_isn; |
1468 | tp->snd_una = snd_isn; |
1469 | atomic_set(v: &inet_sk(sk)->inet_id, i: get_random_u16()); |
1470 | assign_rxopt(sk, opt); |
1471 | |
1472 | if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) |
1473 | tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10); |
1474 | |
1475 | smp_mb(); |
1476 | tcp_set_state(sk, state: TCP_ESTABLISHED); |
1477 | } |
1478 | |
1479 | static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb) |
1480 | { |
1481 | struct sk_buff *abort_skb; |
1482 | |
1483 | abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC); |
1484 | if (abort_skb) |
1485 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb); |
1486 | } |
1487 | |
1488 | static struct sock *reap_list; |
1489 | static DEFINE_SPINLOCK(reap_list_lock); |
1490 | |
1491 | /* |
1492 | * Process the reap list. |
1493 | */ |
1494 | DECLARE_TASK_FUNC(process_reap_list, task_param) |
1495 | { |
1496 | spin_lock_bh(lock: &reap_list_lock); |
1497 | while (reap_list) { |
1498 | struct sock *sk = reap_list; |
1499 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
1500 | |
1501 | reap_list = csk->passive_reap_next; |
1502 | csk->passive_reap_next = NULL; |
1503 | spin_unlock(lock: &reap_list_lock); |
1504 | sock_hold(sk); |
1505 | |
1506 | bh_lock_sock(sk); |
1507 | chtls_abort_conn(sk, NULL); |
1508 | sock_orphan(sk); |
1509 | if (sk->sk_state == TCP_CLOSE) |
1510 | inet_csk_destroy_sock(sk); |
1511 | bh_unlock_sock(sk); |
1512 | sock_put(sk); |
1513 | spin_lock(lock: &reap_list_lock); |
1514 | } |
1515 | spin_unlock_bh(lock: &reap_list_lock); |
1516 | } |
1517 | |
1518 | static DECLARE_WORK(reap_task, process_reap_list); |
1519 | |
1520 | static void add_to_reap_list(struct sock *sk) |
1521 | { |
1522 | struct chtls_sock *csk = sk->sk_user_data; |
1523 | |
1524 | local_bh_disable(); |
1525 | release_tcp_port(sk); /* release the port immediately */ |
1526 | |
1527 | spin_lock(lock: &reap_list_lock); |
1528 | csk->passive_reap_next = reap_list; |
1529 | reap_list = sk; |
1530 | if (!csk->passive_reap_next) |
1531 | schedule_work(work: &reap_task); |
1532 | spin_unlock(lock: &reap_list_lock); |
1533 | local_bh_enable(); |
1534 | } |
1535 | |
1536 | static void add_pass_open_to_parent(struct sock *child, struct sock *lsk, |
1537 | struct chtls_dev *cdev) |
1538 | { |
1539 | struct request_sock *oreq; |
1540 | struct chtls_sock *csk; |
1541 | |
1542 | if (lsk->sk_state != TCP_LISTEN) |
1543 | return; |
1544 | |
1545 | csk = child->sk_user_data; |
1546 | oreq = csk->passive_reap_next; |
1547 | csk->passive_reap_next = NULL; |
1548 | |
1549 | reqsk_queue_removed(queue: &inet_csk(sk: lsk)->icsk_accept_queue, req: oreq); |
1550 | __skb_unlink(skb: (struct sk_buff *)&csk->synq, list: &csk->listen_ctx->synq); |
1551 | |
1552 | if (sk_acceptq_is_full(sk: lsk)) { |
1553 | chtls_reqsk_free(req: oreq); |
1554 | add_to_reap_list(sk: child); |
1555 | } else { |
1556 | refcount_set(r: &oreq->rsk_refcnt, n: 1); |
1557 | inet_csk_reqsk_queue_add(sk: lsk, req: oreq, child); |
1558 | lsk->sk_data_ready(lsk); |
1559 | } |
1560 | } |
1561 | |
1562 | static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb) |
1563 | { |
1564 | struct sock *child = skb->sk; |
1565 | |
1566 | skb->sk = NULL; |
1567 | add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev); |
1568 | kfree_skb(skb); |
1569 | } |
1570 | |
1571 | static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb) |
1572 | { |
1573 | struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR; |
1574 | struct chtls_sock *csk; |
1575 | struct sock *lsk, *sk; |
1576 | unsigned int hwtid; |
1577 | |
1578 | hwtid = GET_TID(req); |
1579 | sk = lookup_tid(cdev->tids, hwtid); |
1580 | if (!sk) |
1581 | return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE); |
1582 | |
1583 | bh_lock_sock(sk); |
1584 | if (unlikely(sock_owned_by_user(sk))) { |
1585 | kfree_skb(skb); |
1586 | } else { |
1587 | unsigned int stid; |
1588 | void *data; |
1589 | |
1590 | csk = sk->sk_user_data; |
1591 | csk->wr_max_credits = 64; |
1592 | csk->wr_credits = 64; |
1593 | csk->wr_unacked = 0; |
1594 | make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); |
1595 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
1596 | sk->sk_state_change(sk); |
1597 | if (unlikely(sk->sk_socket)) |
1598 | sk_wake_async(sk, how: 0, POLL_OUT); |
1599 | |
1600 | data = lookup_stid(cdev->tids, stid); |
1601 | if (!data) { |
1602 | /* listening server close */ |
1603 | kfree_skb(skb); |
1604 | goto unlock; |
1605 | } |
1606 | lsk = ((struct listen_ctx *)data)->lsk; |
1607 | |
1608 | bh_lock_sock(lsk); |
1609 | if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) { |
1610 | /* removed from synq */ |
1611 | bh_unlock_sock(lsk); |
1612 | kfree_skb(skb); |
1613 | goto unlock; |
1614 | } |
1615 | |
1616 | if (likely(!sock_owned_by_user(lsk))) { |
1617 | kfree_skb(skb); |
1618 | add_pass_open_to_parent(child: sk, lsk, cdev); |
1619 | } else { |
1620 | skb->sk = sk; |
1621 | BLOG_SKB_CB(skb)->cdev = cdev; |
1622 | BLOG_SKB_CB(skb)->backlog_rcv = |
1623 | bl_add_pass_open_to_parent; |
1624 | __sk_add_backlog(sk: lsk, skb); |
1625 | } |
1626 | bh_unlock_sock(lsk); |
1627 | } |
1628 | unlock: |
1629 | bh_unlock_sock(sk); |
1630 | return 0; |
1631 | } |
1632 | |
1633 | /* |
1634 | * Handle receipt of an urgent pointer. |
1635 | */ |
1636 | static void handle_urg_ptr(struct sock *sk, u32 urg_seq) |
1637 | { |
1638 | struct tcp_sock *tp = tcp_sk(sk); |
1639 | |
1640 | urg_seq--; |
1641 | if (tp->urg_data && !after(urg_seq, tp->urg_seq)) |
1642 | return; /* duplicate pointer */ |
1643 | |
1644 | sk_send_sigurg(sk); |
1645 | if (tp->urg_seq == tp->copied_seq && tp->urg_data && |
1646 | !sock_flag(sk, flag: SOCK_URGINLINE) && |
1647 | tp->copied_seq != tp->rcv_nxt) { |
1648 | struct sk_buff *skb = skb_peek(list_: &sk->sk_receive_queue); |
1649 | |
1650 | tp->copied_seq++; |
1651 | if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len) |
1652 | chtls_free_skb(sk, skb); |
1653 | } |
1654 | |
1655 | tp->urg_data = TCP_URG_NOTYET; |
1656 | tp->urg_seq = urg_seq; |
1657 | } |
1658 | |
1659 | static void check_sk_callbacks(struct chtls_sock *csk) |
1660 | { |
1661 | struct sock *sk = csk->sk; |
1662 | |
1663 | if (unlikely(sk->sk_user_data && |
1664 | !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD))) |
1665 | csk_set_flag(csk, flag: CSK_CALLBACKS_CHKD); |
1666 | } |
1667 | |
1668 | /* |
1669 | * Handles Rx data that arrives in a state where the socket isn't accepting |
1670 | * new data. |
1671 | */ |
1672 | static void handle_excess_rx(struct sock *sk, struct sk_buff *skb) |
1673 | { |
1674 | if (!csk_flag(sk, flag: CSK_ABORT_SHUTDOWN)) |
1675 | chtls_abort_conn(sk, skb); |
1676 | |
1677 | kfree_skb(skb); |
1678 | } |
1679 | |
1680 | static void chtls_recv_data(struct sock *sk, struct sk_buff *skb) |
1681 | { |
1682 | struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR; |
1683 | struct chtls_sock *csk; |
1684 | struct tcp_sock *tp; |
1685 | |
1686 | csk = rcu_dereference_sk_user_data(sk); |
1687 | tp = tcp_sk(sk); |
1688 | |
1689 | if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { |
1690 | handle_excess_rx(sk, skb); |
1691 | return; |
1692 | } |
1693 | |
1694 | ULP_SKB_CB(skb)->seq = ntohl(hdr->seq); |
1695 | ULP_SKB_CB(skb)->psh = hdr->psh; |
1696 | skb_ulp_mode(skb) = ULP_MODE_NONE; |
1697 | |
1698 | skb_reset_transport_header(skb); |
1699 | __skb_pull(skb, sizeof(*hdr) + RSS_HDR); |
1700 | if (!skb->data_len) |
1701 | __skb_trim(skb, ntohs(hdr->len)); |
1702 | |
1703 | if (unlikely(hdr->urg)) |
1704 | handle_urg_ptr(sk, urg_seq: tp->rcv_nxt + ntohs(hdr->urg)); |
1705 | if (unlikely(tp->urg_data == TCP_URG_NOTYET && |
1706 | tp->urg_seq - tp->rcv_nxt < skb->len)) |
1707 | tp->urg_data = TCP_URG_VALID | |
1708 | skb->data[tp->urg_seq - tp->rcv_nxt]; |
1709 | |
1710 | if (unlikely(hdr->dack_mode != csk->delack_mode)) { |
1711 | csk->delack_mode = hdr->dack_mode; |
1712 | csk->delack_seq = tp->rcv_nxt; |
1713 | } |
1714 | |
1715 | tcp_hdr(skb)->fin = 0; |
1716 | tp->rcv_nxt += skb->len; |
1717 | |
1718 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb); |
1719 | |
1720 | if (!sock_flag(sk, flag: SOCK_DEAD)) { |
1721 | check_sk_callbacks(csk); |
1722 | sk->sk_data_ready(sk); |
1723 | } |
1724 | } |
1725 | |
1726 | static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb) |
1727 | { |
1728 | struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR; |
1729 | unsigned int hwtid = GET_TID(req); |
1730 | struct sock *sk; |
1731 | |
1732 | sk = lookup_tid(cdev->tids, hwtid); |
1733 | if (unlikely(!sk)) { |
1734 | pr_err("can't find conn. for hwtid %u.\n" , hwtid); |
1735 | return -EINVAL; |
1736 | } |
1737 | skb_dst_set(skb, NULL); |
1738 | process_cpl_msg(fn: chtls_recv_data, sk, skb); |
1739 | return 0; |
1740 | } |
1741 | |
1742 | static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb) |
1743 | { |
1744 | struct cpl_tls_data *hdr = cplhdr(skb); |
1745 | struct chtls_sock *csk; |
1746 | struct chtls_hws *tlsk; |
1747 | struct tcp_sock *tp; |
1748 | |
1749 | csk = rcu_dereference_sk_user_data(sk); |
1750 | tlsk = &csk->tlshws; |
1751 | tp = tcp_sk(sk); |
1752 | |
1753 | if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { |
1754 | handle_excess_rx(sk, skb); |
1755 | return; |
1756 | } |
1757 | |
1758 | ULP_SKB_CB(skb)->seq = ntohl(hdr->seq); |
1759 | ULP_SKB_CB(skb)->flags = 0; |
1760 | skb_ulp_mode(skb) = ULP_MODE_TLS; |
1761 | |
1762 | skb_reset_transport_header(skb); |
1763 | __skb_pull(skb, sizeof(*hdr)); |
1764 | if (!skb->data_len) |
1765 | __skb_trim(skb, |
1766 | len: CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd))); |
1767 | |
1768 | if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq - |
1769 | tp->rcv_nxt < skb->len)) |
1770 | tp->urg_data = TCP_URG_VALID | |
1771 | skb->data[tp->urg_seq - tp->rcv_nxt]; |
1772 | |
1773 | tcp_hdr(skb)->fin = 0; |
1774 | tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)); |
1775 | __skb_queue_tail(list: &tlsk->sk_recv_queue, newsk: skb); |
1776 | } |
1777 | |
1778 | static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb) |
1779 | { |
1780 | struct cpl_tls_data *req = cplhdr(skb); |
1781 | unsigned int hwtid = GET_TID(req); |
1782 | struct sock *sk; |
1783 | |
1784 | sk = lookup_tid(cdev->tids, hwtid); |
1785 | if (unlikely(!sk)) { |
1786 | pr_err("can't find conn. for hwtid %u.\n" , hwtid); |
1787 | return -EINVAL; |
1788 | } |
1789 | skb_dst_set(skb, NULL); |
1790 | process_cpl_msg(fn: chtls_recv_pdu, sk, skb); |
1791 | return 0; |
1792 | } |
1793 | |
1794 | static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen) |
1795 | { |
1796 | struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb); |
1797 | |
1798 | skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length); |
1799 | tls_cmp_hdr->length = ntohs((__force __be16)nlen); |
1800 | } |
1801 | |
1802 | static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb) |
1803 | { |
1804 | struct tlsrx_cmp_hdr *tls_hdr_pkt; |
1805 | struct cpl_rx_tls_cmp *cmp_cpl; |
1806 | struct sk_buff *skb_rec; |
1807 | struct chtls_sock *csk; |
1808 | struct chtls_hws *tlsk; |
1809 | struct tcp_sock *tp; |
1810 | |
1811 | cmp_cpl = cplhdr(skb); |
1812 | csk = rcu_dereference_sk_user_data(sk); |
1813 | tlsk = &csk->tlshws; |
1814 | tp = tcp_sk(sk); |
1815 | |
1816 | ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq); |
1817 | ULP_SKB_CB(skb)->flags = 0; |
1818 | |
1819 | skb_reset_transport_header(skb); |
1820 | __skb_pull(skb, sizeof(*cmp_cpl)); |
1821 | tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data; |
1822 | if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M) |
1823 | tls_hdr_pkt->type = CONTENT_TYPE_ERROR; |
1824 | if (!skb->data_len) |
1825 | __skb_trim(skb, TLS_HEADER_LENGTH); |
1826 | |
1827 | tp->rcv_nxt += |
1828 | CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length)); |
1829 | |
1830 | ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR; |
1831 | skb_rec = __skb_dequeue(list: &tlsk->sk_recv_queue); |
1832 | if (!skb_rec) { |
1833 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb); |
1834 | } else { |
1835 | chtls_set_hdrlen(skb, nlen: tlsk->pldlen); |
1836 | tlsk->pldlen = 0; |
1837 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb); |
1838 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb_rec); |
1839 | } |
1840 | |
1841 | if (!sock_flag(sk, flag: SOCK_DEAD)) { |
1842 | check_sk_callbacks(csk); |
1843 | sk->sk_data_ready(sk); |
1844 | } |
1845 | } |
1846 | |
1847 | static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb) |
1848 | { |
1849 | struct cpl_rx_tls_cmp *req = cplhdr(skb); |
1850 | unsigned int hwtid = GET_TID(req); |
1851 | struct sock *sk; |
1852 | |
1853 | sk = lookup_tid(cdev->tids, hwtid); |
1854 | if (unlikely(!sk)) { |
1855 | pr_err("can't find conn. for hwtid %u.\n" , hwtid); |
1856 | return -EINVAL; |
1857 | } |
1858 | skb_dst_set(skb, NULL); |
1859 | process_cpl_msg(fn: chtls_rx_hdr, sk, skb); |
1860 | |
1861 | return 0; |
1862 | } |
1863 | |
1864 | static void chtls_timewait(struct sock *sk) |
1865 | { |
1866 | struct tcp_sock *tp = tcp_sk(sk); |
1867 | |
1868 | tp->rcv_nxt++; |
1869 | tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); |
1870 | tp->srtt_us = 0; |
1871 | tcp_time_wait(sk, state: TCP_TIME_WAIT, timeo: 0); |
1872 | } |
1873 | |
1874 | static void chtls_peer_close(struct sock *sk, struct sk_buff *skb) |
1875 | { |
1876 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
1877 | |
1878 | if (csk_flag_nochk(csk, flag: CSK_ABORT_RPL_PENDING)) |
1879 | goto out; |
1880 | |
1881 | sk->sk_shutdown |= RCV_SHUTDOWN; |
1882 | sock_set_flag(sk, flag: SOCK_DONE); |
1883 | |
1884 | switch (sk->sk_state) { |
1885 | case TCP_SYN_RECV: |
1886 | case TCP_ESTABLISHED: |
1887 | tcp_set_state(sk, state: TCP_CLOSE_WAIT); |
1888 | break; |
1889 | case TCP_FIN_WAIT1: |
1890 | tcp_set_state(sk, state: TCP_CLOSING); |
1891 | break; |
1892 | case TCP_FIN_WAIT2: |
1893 | chtls_release_resources(sk); |
1894 | if (csk_flag_nochk(csk, flag: CSK_ABORT_RPL_PENDING)) |
1895 | chtls_conn_done(sk); |
1896 | else |
1897 | chtls_timewait(sk); |
1898 | break; |
1899 | default: |
1900 | pr_info("cpl_peer_close in bad state %d\n" , sk->sk_state); |
1901 | } |
1902 | |
1903 | if (!sock_flag(sk, flag: SOCK_DEAD)) { |
1904 | sk->sk_state_change(sk); |
1905 | /* Do not send POLL_HUP for half duplex close. */ |
1906 | |
1907 | if ((sk->sk_shutdown & SEND_SHUTDOWN) || |
1908 | sk->sk_state == TCP_CLOSE) |
1909 | sk_wake_async(sk, how: SOCK_WAKE_WAITD, POLL_HUP); |
1910 | else |
1911 | sk_wake_async(sk, how: SOCK_WAKE_WAITD, POLL_IN); |
1912 | } |
1913 | out: |
1914 | kfree_skb(skb); |
1915 | } |
1916 | |
1917 | static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb) |
1918 | { |
1919 | struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR; |
1920 | struct chtls_sock *csk; |
1921 | struct tcp_sock *tp; |
1922 | |
1923 | csk = rcu_dereference_sk_user_data(sk); |
1924 | |
1925 | if (csk_flag_nochk(csk, flag: CSK_ABORT_RPL_PENDING)) |
1926 | goto out; |
1927 | |
1928 | tp = tcp_sk(sk); |
1929 | |
1930 | tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */ |
1931 | |
1932 | switch (sk->sk_state) { |
1933 | case TCP_CLOSING: |
1934 | chtls_release_resources(sk); |
1935 | if (csk_flag_nochk(csk, flag: CSK_ABORT_RPL_PENDING)) |
1936 | chtls_conn_done(sk); |
1937 | else |
1938 | chtls_timewait(sk); |
1939 | break; |
1940 | case TCP_LAST_ACK: |
1941 | chtls_release_resources(sk); |
1942 | chtls_conn_done(sk); |
1943 | break; |
1944 | case TCP_FIN_WAIT1: |
1945 | tcp_set_state(sk, state: TCP_FIN_WAIT2); |
1946 | sk->sk_shutdown |= SEND_SHUTDOWN; |
1947 | |
1948 | if (!sock_flag(sk, flag: SOCK_DEAD)) |
1949 | sk->sk_state_change(sk); |
1950 | else if (tcp_sk(sk)->linger2 < 0 && |
1951 | !csk_flag_nochk(csk, flag: CSK_ABORT_SHUTDOWN)) |
1952 | chtls_abort_conn(sk, skb); |
1953 | else if (csk_flag_nochk(csk, flag: CSK_TX_DATA_SENT)) |
1954 | chtls_set_quiesce_ctrl(sk, val: 0); |
1955 | break; |
1956 | default: |
1957 | pr_info("close_con_rpl in bad state %d\n" , sk->sk_state); |
1958 | } |
1959 | out: |
1960 | kfree_skb(skb); |
1961 | } |
1962 | |
1963 | static struct sk_buff *get_cpl_skb(struct sk_buff *skb, |
1964 | size_t len, gfp_t gfp) |
1965 | { |
1966 | if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) { |
1967 | WARN_ONCE(skb->len < len, "skb alloc error" ); |
1968 | __skb_trim(skb, len); |
1969 | skb_get(skb); |
1970 | } else { |
1971 | skb = alloc_skb(size: len, priority: gfp); |
1972 | if (skb) |
1973 | __skb_put(skb, len); |
1974 | } |
1975 | return skb; |
1976 | } |
1977 | |
1978 | static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid, |
1979 | int cmd) |
1980 | { |
1981 | struct cpl_abort_rpl *rpl = cplhdr(skb); |
1982 | |
1983 | INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid); |
1984 | rpl->cmd = cmd; |
1985 | } |
1986 | |
1987 | static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
1988 | { |
1989 | struct *req = cplhdr(skb); |
1990 | struct sk_buff *reply_skb; |
1991 | |
1992 | reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), |
1993 | GFP_KERNEL | __GFP_NOFAIL); |
1994 | __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); |
1995 | set_abort_rpl_wr(reply_skb, GET_TID(req), |
1996 | (req->status & CPL_ABORT_NO_RST)); |
1997 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1); |
1998 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
1999 | kfree_skb(skb); |
2000 | } |
2001 | |
2002 | /* |
2003 | * Add an skb to the deferred skb queue for processing from process context. |
2004 | */ |
2005 | static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, |
2006 | defer_handler_t handler) |
2007 | { |
2008 | DEFERRED_SKB_CB(skb)->handler = handler; |
2009 | spin_lock_bh(lock: &cdev->deferq.lock); |
2010 | __skb_queue_tail(list: &cdev->deferq, newsk: skb); |
2011 | if (skb_queue_len(list_: &cdev->deferq) == 1) |
2012 | schedule_work(work: &cdev->deferq_task); |
2013 | spin_unlock_bh(lock: &cdev->deferq.lock); |
2014 | } |
2015 | |
2016 | static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, |
2017 | struct chtls_dev *cdev, |
2018 | int status, int queue) |
2019 | { |
2020 | struct *req = cplhdr(skb) + RSS_HDR; |
2021 | struct sk_buff *reply_skb; |
2022 | struct chtls_sock *csk; |
2023 | unsigned int tid; |
2024 | |
2025 | csk = rcu_dereference_sk_user_data(sk); |
2026 | tid = GET_TID(req); |
2027 | |
2028 | reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any()); |
2029 | if (!reply_skb) { |
2030 | req->status = (queue << 1) | status; |
2031 | t4_defer_reply(skb, cdev, handler: send_defer_abort_rpl); |
2032 | return; |
2033 | } |
2034 | |
2035 | set_abort_rpl_wr(skb: reply_skb, tid, cmd: status); |
2036 | kfree_skb(skb); |
2037 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); |
2038 | if (csk_conn_inline(csk)) { |
2039 | struct l2t_entry *e = csk->l2t_entry; |
2040 | |
2041 | if (e && sk->sk_state != TCP_SYN_RECV) { |
2042 | cxgb4_l2t_send(csk->egress_dev, reply_skb, e); |
2043 | return; |
2044 | } |
2045 | } |
2046 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
2047 | } |
2048 | |
2049 | /* |
2050 | * This is run from a listener's backlog to abort a child connection in |
2051 | * SYN_RCV state (i.e., one on the listener's SYN queue). |
2052 | */ |
2053 | static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb) |
2054 | { |
2055 | struct chtls_sock *csk; |
2056 | struct sock *child; |
2057 | int queue; |
2058 | |
2059 | child = skb->sk; |
2060 | csk = rcu_dereference_sk_user_data(child); |
2061 | queue = csk->txq_idx; |
2062 | |
2063 | skb->sk = NULL; |
2064 | chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, |
2065 | CPL_ABORT_NO_RST, queue); |
2066 | do_abort_syn_rcv(child, parent: lsk); |
2067 | } |
2068 | |
2069 | static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) |
2070 | { |
2071 | const struct request_sock *oreq; |
2072 | struct listen_ctx *listen_ctx; |
2073 | struct chtls_sock *csk; |
2074 | struct chtls_dev *cdev; |
2075 | struct sock *psk; |
2076 | void *ctx; |
2077 | |
2078 | csk = sk->sk_user_data; |
2079 | oreq = csk->passive_reap_next; |
2080 | cdev = csk->cdev; |
2081 | |
2082 | if (!oreq) |
2083 | return -1; |
2084 | |
2085 | ctx = lookup_stid(cdev->tids, oreq->ts_recent); |
2086 | if (!ctx) |
2087 | return -1; |
2088 | |
2089 | listen_ctx = (struct listen_ctx *)ctx; |
2090 | psk = listen_ctx->lsk; |
2091 | |
2092 | bh_lock_sock(psk); |
2093 | if (!sock_owned_by_user(sk: psk)) { |
2094 | int queue = csk->txq_idx; |
2095 | |
2096 | chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); |
2097 | do_abort_syn_rcv(child: sk, parent: psk); |
2098 | } else { |
2099 | skb->sk = sk; |
2100 | BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; |
2101 | __sk_add_backlog(sk: psk, skb); |
2102 | } |
2103 | bh_unlock_sock(psk); |
2104 | return 0; |
2105 | } |
2106 | |
2107 | static void (struct sock *sk, struct sk_buff *skb) |
2108 | { |
2109 | const struct *req = cplhdr(skb) + RSS_HDR; |
2110 | struct chtls_sock *csk = sk->sk_user_data; |
2111 | int rst_status = CPL_ABORT_NO_RST; |
2112 | int queue = csk->txq_idx; |
2113 | |
2114 | if (is_neg_adv(status: req->status)) { |
2115 | kfree_skb(skb); |
2116 | return; |
2117 | } |
2118 | |
2119 | csk_reset_flag(csk, flag: CSK_ABORT_REQ_RCVD); |
2120 | |
2121 | if (!csk_flag_nochk(csk, flag: CSK_ABORT_SHUTDOWN) && |
2122 | !csk_flag_nochk(csk, flag: CSK_TX_DATA_SENT)) { |
2123 | struct tcp_sock *tp = tcp_sk(sk); |
2124 | |
2125 | if (send_tx_flowc_wr(sk, compl: 0, snd_nxt: tp->snd_nxt, rcv_nxt: tp->rcv_nxt) < 0) |
2126 | WARN_ONCE(1, "send_tx_flowc error" ); |
2127 | csk_set_flag(csk, flag: CSK_TX_DATA_SENT); |
2128 | } |
2129 | |
2130 | csk_set_flag(csk, flag: CSK_ABORT_SHUTDOWN); |
2131 | |
2132 | if (!csk_flag_nochk(csk, flag: CSK_ABORT_RPL_PENDING)) { |
2133 | sk->sk_err = ETIMEDOUT; |
2134 | |
2135 | if (!sock_flag(sk, flag: SOCK_DEAD)) |
2136 | sk_error_report(sk); |
2137 | |
2138 | if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) |
2139 | return; |
2140 | |
2141 | } |
2142 | |
2143 | chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev, |
2144 | status: rst_status, queue); |
2145 | chtls_release_resources(sk); |
2146 | chtls_conn_done(sk); |
2147 | } |
2148 | |
2149 | static void (struct sock *sk, struct sk_buff *skb) |
2150 | { |
2151 | struct *rpl = cplhdr(skb) + RSS_HDR; |
2152 | struct chtls_sock *csk; |
2153 | struct chtls_dev *cdev; |
2154 | |
2155 | csk = rcu_dereference_sk_user_data(sk); |
2156 | cdev = csk->cdev; |
2157 | |
2158 | if (csk_flag_nochk(csk, flag: CSK_ABORT_RPL_PENDING)) { |
2159 | csk_reset_flag(csk, flag: CSK_ABORT_RPL_PENDING); |
2160 | if (!csk_flag_nochk(csk, flag: CSK_ABORT_REQ_RCVD)) { |
2161 | if (sk->sk_state == TCP_SYN_SENT) { |
2162 | cxgb4_remove_tid(cdev->tids, |
2163 | csk->port_id, |
2164 | GET_TID(rpl), |
2165 | sk->sk_family); |
2166 | sock_put(sk); |
2167 | } |
2168 | chtls_release_resources(sk); |
2169 | chtls_conn_done(sk); |
2170 | } |
2171 | } |
2172 | kfree_skb(skb); |
2173 | } |
2174 | |
2175 | static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb) |
2176 | { |
2177 | struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR; |
2178 | void (*fn)(struct sock *sk, struct sk_buff *skb); |
2179 | unsigned int hwtid = GET_TID(req); |
2180 | struct chtls_sock *csk; |
2181 | struct sock *sk; |
2182 | u8 opcode; |
2183 | |
2184 | opcode = ((const struct rss_header *)cplhdr(skb))->opcode; |
2185 | |
2186 | sk = lookup_tid(cdev->tids, hwtid); |
2187 | if (!sk) |
2188 | goto rel_skb; |
2189 | |
2190 | csk = sk->sk_user_data; |
2191 | |
2192 | switch (opcode) { |
2193 | case CPL_PEER_CLOSE: |
2194 | fn = chtls_peer_close; |
2195 | break; |
2196 | case CPL_CLOSE_CON_RPL: |
2197 | fn = chtls_close_con_rpl; |
2198 | break; |
2199 | case CPL_ABORT_REQ_RSS: |
2200 | /* |
2201 | * Save the offload device in the skb, we may process this |
2202 | * message after the socket has closed. |
2203 | */ |
2204 | BLOG_SKB_CB(skb)->cdev = csk->cdev; |
2205 | fn = chtls_abort_req_rss; |
2206 | break; |
2207 | case CPL_ABORT_RPL_RSS: |
2208 | fn = chtls_abort_rpl_rss; |
2209 | break; |
2210 | default: |
2211 | goto rel_skb; |
2212 | } |
2213 | |
2214 | process_cpl_msg(fn, sk, skb); |
2215 | return 0; |
2216 | |
2217 | rel_skb: |
2218 | kfree_skb(skb); |
2219 | return 0; |
2220 | } |
2221 | |
2222 | static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) |
2223 | { |
2224 | struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR; |
2225 | struct chtls_sock *csk = sk->sk_user_data; |
2226 | struct tcp_sock *tp = tcp_sk(sk); |
2227 | u32 credits = hdr->credits; |
2228 | u32 snd_una; |
2229 | |
2230 | snd_una = ntohl(hdr->snd_una); |
2231 | csk->wr_credits += credits; |
2232 | |
2233 | if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits) |
2234 | csk->wr_unacked = csk->wr_max_credits - csk->wr_credits; |
2235 | |
2236 | while (credits) { |
2237 | struct sk_buff *pskb = csk->wr_skb_head; |
2238 | u32 csum; |
2239 | |
2240 | if (unlikely(!pskb)) { |
2241 | if (csk->wr_nondata) |
2242 | csk->wr_nondata -= credits; |
2243 | break; |
2244 | } |
2245 | csum = (__force u32)pskb->csum; |
2246 | if (unlikely(credits < csum)) { |
2247 | pskb->csum = (__force __wsum)(csum - credits); |
2248 | break; |
2249 | } |
2250 | dequeue_wr(sk); |
2251 | credits -= csum; |
2252 | kfree_skb(skb: pskb); |
2253 | } |
2254 | if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) { |
2255 | if (unlikely(before(snd_una, tp->snd_una))) { |
2256 | kfree_skb(skb); |
2257 | return; |
2258 | } |
2259 | |
2260 | if (tp->snd_una != snd_una) { |
2261 | tp->snd_una = snd_una; |
2262 | tp->rcv_tstamp = tcp_jiffies32; |
2263 | if (tp->snd_una == tp->snd_nxt && |
2264 | !csk_flag_nochk(csk, flag: CSK_TX_FAILOVER)) |
2265 | csk_reset_flag(csk, flag: CSK_TX_WAIT_IDLE); |
2266 | } |
2267 | } |
2268 | |
2269 | if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) { |
2270 | unsigned int fclen16 = roundup(failover_flowc_wr_len, 16); |
2271 | |
2272 | csk->wr_credits -= fclen16; |
2273 | csk_reset_flag(csk, flag: CSK_TX_WAIT_IDLE); |
2274 | csk_reset_flag(csk, flag: CSK_TX_FAILOVER); |
2275 | } |
2276 | if (skb_queue_len(list_: &csk->txq) && chtls_push_frames(csk, comp: 0)) |
2277 | sk->sk_write_space(sk); |
2278 | |
2279 | kfree_skb(skb); |
2280 | } |
2281 | |
2282 | static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb) |
2283 | { |
2284 | struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR; |
2285 | unsigned int hwtid = GET_TID(rpl); |
2286 | struct sock *sk; |
2287 | |
2288 | sk = lookup_tid(cdev->tids, hwtid); |
2289 | if (unlikely(!sk)) { |
2290 | pr_err("can't find conn. for hwtid %u.\n" , hwtid); |
2291 | return -EINVAL; |
2292 | } |
2293 | process_cpl_msg(fn: chtls_rx_ack, sk, skb); |
2294 | |
2295 | return 0; |
2296 | } |
2297 | |
2298 | static int chtls_set_tcb_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
2299 | { |
2300 | struct cpl_set_tcb_rpl *rpl = cplhdr(skb) + RSS_HDR; |
2301 | unsigned int hwtid = GET_TID(rpl); |
2302 | struct sock *sk; |
2303 | |
2304 | sk = lookup_tid(cdev->tids, hwtid); |
2305 | |
2306 | /* return EINVAL if socket doesn't exist */ |
2307 | if (!sk) |
2308 | return -EINVAL; |
2309 | |
2310 | /* Reusing the skb as size of cpl_set_tcb_field structure |
2311 | * is greater than cpl_abort_req |
2312 | */ |
2313 | if (TCB_COOKIE_G(rpl->cookie) == TCB_FIELD_COOKIE_TFLAG) |
2314 | chtls_send_abort(sk, CPL_ABORT_SEND_RST, NULL); |
2315 | |
2316 | kfree_skb(skb); |
2317 | return 0; |
2318 | } |
2319 | |
2320 | chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = { |
2321 | [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl, |
2322 | [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl, |
2323 | [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req, |
2324 | [CPL_PASS_ESTABLISH] = chtls_pass_establish, |
2325 | [CPL_RX_DATA] = chtls_rx_data, |
2326 | [CPL_TLS_DATA] = chtls_rx_pdu, |
2327 | [CPL_RX_TLS_CMP] = chtls_rx_cmp, |
2328 | [CPL_PEER_CLOSE] = chtls_conn_cpl, |
2329 | [CPL_CLOSE_CON_RPL] = chtls_conn_cpl, |
2330 | [CPL_ABORT_REQ_RSS] = chtls_conn_cpl, |
2331 | [CPL_ABORT_RPL_RSS] = chtls_conn_cpl, |
2332 | [CPL_FW4_ACK] = chtls_wr_ack, |
2333 | [CPL_SET_TCB_RPL] = chtls_set_tcb_rpl, |
2334 | }; |
2335 | |