1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (c) 2018 Chelsio Communications, Inc. |
4 | */ |
5 | |
6 | #ifndef __CHTLS_CM_H__ |
7 | #define __CHTLS_CM_H__ |
8 | |
9 | /* |
10 | * TCB settings |
11 | */ |
12 | /* 3:0 */ |
13 | #define TCB_ULP_TYPE_W 0 |
14 | #define TCB_ULP_TYPE_S 0 |
15 | #define TCB_ULP_TYPE_M 0xfULL |
16 | #define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S) |
17 | |
18 | /* 11:4 */ |
19 | #define TCB_ULP_RAW_W 0 |
20 | #define TCB_ULP_RAW_S 4 |
21 | #define TCB_ULP_RAW_M 0xffULL |
22 | #define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S) |
23 | |
24 | #define TF_TLS_KEY_SIZE_S 7 |
25 | #define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S) |
26 | |
27 | #define TF_TLS_CONTROL_S 2 |
28 | #define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S) |
29 | |
30 | #define TF_TLS_ACTIVE_S 1 |
31 | #define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S) |
32 | |
33 | #define TF_TLS_ENABLE_S 0 |
34 | #define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S) |
35 | |
36 | #define TF_RX_QUIESCE_S 15 |
37 | #define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S) |
38 | |
39 | /* |
40 | * Max receive window supported by HW in bytes. Only a small part of it can |
41 | * be set through option0, the rest needs to be set through RX_DATA_ACK. |
42 | */ |
43 | #define MAX_RCV_WND ((1U << 27) - 1) |
44 | #define MAX_MSS 65536 |
45 | |
46 | /* |
47 | * Min receive window. We want it to be large enough to accommodate receive |
48 | * coalescing, handle jumbo frames, and not trigger sender SWS avoidance. |
49 | */ |
50 | #define MIN_RCV_WND (24 * 1024U) |
51 | #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000)) |
52 | |
53 | /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ |
54 | #define \ |
55 | (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) |
56 | #define TX_TLSHDR_LEN \ |
57 | (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \ |
58 | sizeof(struct sge_opaque_hdr)) |
59 | #define TXDATA_SKB_LEN 128 |
60 | |
61 | enum { |
62 | CPL_TX_TLS_SFO_TYPE_CCS, |
63 | CPL_TX_TLS_SFO_TYPE_ALERT, |
64 | CPL_TX_TLS_SFO_TYPE_HANDSHAKE, |
65 | CPL_TX_TLS_SFO_TYPE_DATA, |
66 | CPL_TX_TLS_SFO_TYPE_HEARTBEAT, |
67 | }; |
68 | |
69 | enum { |
70 | TLS_HDR_TYPE_CCS = 20, |
71 | TLS_HDR_TYPE_ALERT, |
72 | TLS_HDR_TYPE_HANDSHAKE, |
73 | TLS_HDR_TYPE_RECORD, |
74 | TLS_HDR_TYPE_HEARTBEAT, |
75 | }; |
76 | |
77 | typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb); |
78 | extern struct request_sock_ops chtls_rsk_ops; |
79 | extern struct request_sock_ops chtls_rsk_opsv6; |
80 | |
81 | struct deferred_skb_cb { |
82 | defer_handler_t handler; |
83 | struct chtls_dev *dev; |
84 | }; |
85 | |
86 | #define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb) |
87 | #define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3]) |
88 | #define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb) |
89 | #define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head) |
90 | |
91 | #define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale) |
92 | #define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale) |
93 | #define USER_MSS(tp) ((tp)->rx_opt.user_mss) |
94 | #define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp) |
95 | #define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok) |
96 | #define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok) |
97 | #define SACK_OK(tp) ((tp)->rx_opt.sack_ok) |
98 | #define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count) |
99 | |
100 | /* TLS SKB */ |
101 | #define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld) |
102 | #define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv) |
103 | |
104 | void chtls_defer_reply(struct sk_buff *skb, struct chtls_dev *dev, |
105 | defer_handler_t handler); |
106 | |
107 | /* |
108 | * Returns true if the socket is in one of the supplied states. |
109 | */ |
110 | static inline unsigned int sk_in_state(const struct sock *sk, |
111 | unsigned int states) |
112 | { |
113 | return states & (1 << sk->sk_state); |
114 | } |
115 | |
116 | static void chtls_rsk_destructor(struct request_sock *req) |
117 | { |
118 | /* do nothing */ |
119 | } |
120 | |
121 | static inline void chtls_init_rsk_ops(struct proto *chtls_tcp_prot, |
122 | struct request_sock_ops *chtls_tcp_ops, |
123 | struct proto *tcp_prot, int family) |
124 | { |
125 | memset(chtls_tcp_ops, 0, sizeof(*chtls_tcp_ops)); |
126 | chtls_tcp_ops->family = family; |
127 | chtls_tcp_ops->obj_size = sizeof(struct tcp_request_sock); |
128 | chtls_tcp_ops->destructor = chtls_rsk_destructor; |
129 | chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab; |
130 | chtls_tcp_prot->rsk_prot = chtls_tcp_ops; |
131 | } |
132 | |
133 | static inline void chtls_reqsk_free(struct request_sock *req) |
134 | { |
135 | if (req->rsk_listener) |
136 | sock_put(sk: req->rsk_listener); |
137 | kmem_cache_free(s: req->rsk_ops->slab, objp: req); |
138 | } |
139 | |
140 | #define DECLARE_TASK_FUNC(task, task_param) \ |
141 | static void task(struct work_struct *task_param) |
142 | |
143 | static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable) |
144 | { |
145 | struct socket_wq *wq; |
146 | |
147 | rcu_read_lock(); |
148 | wq = rcu_dereference(sk->sk_wq); |
149 | if (skwq_has_sleeper(wq)) { |
150 | if (interruptable) |
151 | wake_up_interruptible(sk_sleep(sk)); |
152 | else |
153 | wake_up_all(sk_sleep(sk)); |
154 | } |
155 | rcu_read_unlock(); |
156 | } |
157 | |
158 | static inline void chtls_set_req_port(struct request_sock *oreq, |
159 | __be16 source, __be16 dest) |
160 | { |
161 | inet_rsk(sk: oreq)->ir_rmt_port = source; |
162 | inet_rsk(sk: oreq)->ir_num = ntohs(dest); |
163 | } |
164 | |
165 | static inline void chtls_set_req_addr(struct request_sock *oreq, |
166 | __be32 local_ip, __be32 peer_ip) |
167 | { |
168 | inet_rsk(sk: oreq)->ir_loc_addr = local_ip; |
169 | inet_rsk(sk: oreq)->ir_rmt_addr = peer_ip; |
170 | } |
171 | |
172 | static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb) |
173 | { |
174 | skb_dst_set(skb, NULL); |
175 | __skb_unlink(skb, list: &sk->sk_receive_queue); |
176 | __kfree_skb(skb); |
177 | } |
178 | |
179 | static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb) |
180 | { |
181 | skb_dst_set(skb, NULL); |
182 | __skb_unlink(skb, list: &sk->sk_receive_queue); |
183 | kfree_skb(skb); |
184 | } |
185 | |
186 | static inline void chtls_reset_wr_list(struct chtls_sock *csk) |
187 | { |
188 | csk->wr_skb_head = NULL; |
189 | csk->wr_skb_tail = NULL; |
190 | } |
191 | |
192 | static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) |
193 | { |
194 | WR_SKB_CB(skb)->next_wr = NULL; |
195 | |
196 | skb_get(skb); |
197 | |
198 | if (!csk->wr_skb_head) |
199 | csk->wr_skb_head = skb; |
200 | else |
201 | WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb; |
202 | csk->wr_skb_tail = skb; |
203 | } |
204 | |
205 | static inline struct sk_buff *dequeue_wr(struct sock *sk) |
206 | { |
207 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
208 | struct sk_buff *skb = NULL; |
209 | |
210 | skb = csk->wr_skb_head; |
211 | |
212 | if (likely(skb)) { |
213 | /* Don't bother clearing the tail */ |
214 | csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; |
215 | WR_SKB_CB(skb)->next_wr = NULL; |
216 | } |
217 | return skb; |
218 | } |
219 | #endif |
220 | |