1 | /* |
2 | * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. |
3 | * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. |
4 | * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. |
5 | * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. |
6 | * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. |
7 | * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io |
8 | * |
9 | * This software is available to you under a choice of one of two |
10 | * licenses. You may choose to be licensed under the terms of the GNU |
11 | * General Public License (GPL) Version 2, available from the file |
12 | * COPYING in the main directory of this source tree, or the |
13 | * OpenIB.org BSD license below: |
14 | * |
15 | * Redistribution and use in source and binary forms, with or |
16 | * without modification, are permitted provided that the following |
17 | * conditions are met: |
18 | * |
19 | * - Redistributions of source code must retain the above |
20 | * copyright notice, this list of conditions and the following |
21 | * disclaimer. |
22 | * |
23 | * - Redistributions in binary form must reproduce the above |
24 | * copyright notice, this list of conditions and the following |
25 | * disclaimer in the documentation and/or other materials |
26 | * provided with the distribution. |
27 | * |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
29 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
30 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
31 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
32 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
33 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
35 | * SOFTWARE. |
36 | */ |
37 | |
38 | #include <linux/bug.h> |
39 | #include <linux/sched/signal.h> |
40 | #include <linux/module.h> |
41 | #include <linux/kernel.h> |
42 | #include <linux/splice.h> |
43 | #include <crypto/aead.h> |
44 | |
45 | #include <net/strparser.h> |
46 | #include <net/tls.h> |
47 | #include <trace/events/sock.h> |
48 | |
49 | #include "tls.h" |
50 | |
51 | struct tls_decrypt_arg { |
52 | struct_group(inargs, |
53 | bool zc; |
54 | bool async; |
55 | bool async_done; |
56 | u8 tail; |
57 | ); |
58 | |
59 | struct sk_buff *skb; |
60 | }; |
61 | |
62 | struct tls_decrypt_ctx { |
63 | struct sock *sk; |
64 | u8 iv[TLS_MAX_IV_SIZE]; |
65 | u8 aad[TLS_MAX_AAD_SIZE]; |
66 | u8 tail; |
67 | bool free_sgout; |
68 | struct scatterlist sg[]; |
69 | }; |
70 | |
71 | noinline void tls_err_abort(struct sock *sk, int err) |
72 | { |
73 | WARN_ON_ONCE(err >= 0); |
74 | /* sk->sk_err should contain a positive error code. */ |
75 | WRITE_ONCE(sk->sk_err, -err); |
76 | /* Paired with smp_rmb() in tcp_poll() */ |
77 | smp_wmb(); |
78 | sk_error_report(sk); |
79 | } |
80 | |
81 | static int __skb_nsg(struct sk_buff *skb, int offset, int len, |
82 | unsigned int recursion_level) |
83 | { |
84 | int start = skb_headlen(skb); |
85 | int i, chunk = start - offset; |
86 | struct sk_buff *frag_iter; |
87 | int elt = 0; |
88 | |
89 | if (unlikely(recursion_level >= 24)) |
90 | return -EMSGSIZE; |
91 | |
92 | if (chunk > 0) { |
93 | if (chunk > len) |
94 | chunk = len; |
95 | elt++; |
96 | len -= chunk; |
97 | if (len == 0) |
98 | return elt; |
99 | offset += chunk; |
100 | } |
101 | |
102 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
103 | int end; |
104 | |
105 | WARN_ON(start > offset + len); |
106 | |
107 | end = start + skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); |
108 | chunk = end - offset; |
109 | if (chunk > 0) { |
110 | if (chunk > len) |
111 | chunk = len; |
112 | elt++; |
113 | len -= chunk; |
114 | if (len == 0) |
115 | return elt; |
116 | offset += chunk; |
117 | } |
118 | start = end; |
119 | } |
120 | |
121 | if (unlikely(skb_has_frag_list(skb))) { |
122 | skb_walk_frags(skb, frag_iter) { |
123 | int end, ret; |
124 | |
125 | WARN_ON(start > offset + len); |
126 | |
127 | end = start + frag_iter->len; |
128 | chunk = end - offset; |
129 | if (chunk > 0) { |
130 | if (chunk > len) |
131 | chunk = len; |
132 | ret = __skb_nsg(skb: frag_iter, offset: offset - start, len: chunk, |
133 | recursion_level: recursion_level + 1); |
134 | if (unlikely(ret < 0)) |
135 | return ret; |
136 | elt += ret; |
137 | len -= chunk; |
138 | if (len == 0) |
139 | return elt; |
140 | offset += chunk; |
141 | } |
142 | start = end; |
143 | } |
144 | } |
145 | BUG_ON(len); |
146 | return elt; |
147 | } |
148 | |
149 | /* Return the number of scatterlist elements required to completely map the |
150 | * skb, or -EMSGSIZE if the recursion depth is exceeded. |
151 | */ |
152 | static int skb_nsg(struct sk_buff *skb, int offset, int len) |
153 | { |
154 | return __skb_nsg(skb, offset, len, recursion_level: 0); |
155 | } |
156 | |
157 | static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb, |
158 | struct tls_decrypt_arg *darg) |
159 | { |
160 | struct strp_msg *rxm = strp_msg(skb); |
161 | struct tls_msg *tlm = tls_msg(skb); |
162 | int sub = 0; |
163 | |
164 | /* Determine zero-padding length */ |
165 | if (prot->version == TLS_1_3_VERSION) { |
166 | int offset = rxm->full_len - TLS_TAG_SIZE - 1; |
167 | char content_type = darg->zc ? darg->tail : 0; |
168 | int err; |
169 | |
170 | while (content_type == 0) { |
171 | if (offset < prot->prepend_size) |
172 | return -EBADMSG; |
173 | err = skb_copy_bits(skb, offset: rxm->offset + offset, |
174 | to: &content_type, len: 1); |
175 | if (err) |
176 | return err; |
177 | if (content_type) |
178 | break; |
179 | sub++; |
180 | offset--; |
181 | } |
182 | tlm->control = content_type; |
183 | } |
184 | return sub; |
185 | } |
186 | |
187 | static void tls_decrypt_done(void *data, int err) |
188 | { |
189 | struct aead_request *aead_req = data; |
190 | struct crypto_aead *aead = crypto_aead_reqtfm(req: aead_req); |
191 | struct scatterlist *sgout = aead_req->dst; |
192 | struct tls_sw_context_rx *ctx; |
193 | struct tls_decrypt_ctx *dctx; |
194 | struct tls_context *tls_ctx; |
195 | struct scatterlist *sg; |
196 | unsigned int pages; |
197 | struct sock *sk; |
198 | int aead_size; |
199 | |
200 | /* If requests get too backlogged crypto API returns -EBUSY and calls |
201 | * ->complete(-EINPROGRESS) immediately followed by ->complete(0) |
202 | * to make waiting for backlog to flush with crypto_wait_req() easier. |
203 | * First wait converts -EBUSY -> -EINPROGRESS, and the second one |
204 | * -EINPROGRESS -> 0. |
205 | * We have a single struct crypto_async_request per direction, this |
206 | * scheme doesn't help us, so just ignore the first ->complete(). |
207 | */ |
208 | if (err == -EINPROGRESS) |
209 | return; |
210 | |
211 | aead_size = sizeof(*aead_req) + crypto_aead_reqsize(tfm: aead); |
212 | aead_size = ALIGN(aead_size, __alignof__(*dctx)); |
213 | dctx = (void *)((u8 *)aead_req + aead_size); |
214 | |
215 | sk = dctx->sk; |
216 | tls_ctx = tls_get_ctx(sk); |
217 | ctx = tls_sw_ctx_rx(tls_ctx); |
218 | |
219 | /* Propagate if there was an err */ |
220 | if (err) { |
221 | if (err == -EBADMSG) |
222 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); |
223 | ctx->async_wait.err = err; |
224 | tls_err_abort(sk, err); |
225 | } |
226 | |
227 | /* Free the destination pages if skb was not decrypted inplace */ |
228 | if (dctx->free_sgout) { |
229 | /* Skip the first S/G entry as it points to AAD */ |
230 | for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { |
231 | if (!sg) |
232 | break; |
233 | put_page(page: sg_page(sg)); |
234 | } |
235 | } |
236 | |
237 | kfree(objp: aead_req); |
238 | |
239 | if (atomic_dec_and_test(v: &ctx->decrypt_pending)) |
240 | complete(&ctx->async_wait.completion); |
241 | } |
242 | |
243 | static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) |
244 | { |
245 | if (!atomic_dec_and_test(v: &ctx->decrypt_pending)) |
246 | crypto_wait_req(err: -EINPROGRESS, wait: &ctx->async_wait); |
247 | atomic_inc(v: &ctx->decrypt_pending); |
248 | |
249 | return ctx->async_wait.err; |
250 | } |
251 | |
252 | static int tls_do_decryption(struct sock *sk, |
253 | struct scatterlist *sgin, |
254 | struct scatterlist *sgout, |
255 | char *iv_recv, |
256 | size_t data_len, |
257 | struct aead_request *aead_req, |
258 | struct tls_decrypt_arg *darg) |
259 | { |
260 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
261 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
262 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
263 | int ret; |
264 | |
265 | aead_request_set_tfm(req: aead_req, tfm: ctx->aead_recv); |
266 | aead_request_set_ad(req: aead_req, assoclen: prot->aad_size); |
267 | aead_request_set_crypt(req: aead_req, src: sgin, dst: sgout, |
268 | cryptlen: data_len + prot->tag_size, |
269 | iv: (u8 *)iv_recv); |
270 | |
271 | if (darg->async) { |
272 | aead_request_set_callback(req: aead_req, |
273 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
274 | compl: tls_decrypt_done, data: aead_req); |
275 | DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1); |
276 | atomic_inc(v: &ctx->decrypt_pending); |
277 | } else { |
278 | DECLARE_CRYPTO_WAIT(wait); |
279 | |
280 | aead_request_set_callback(req: aead_req, |
281 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
282 | compl: crypto_req_done, data: &wait); |
283 | ret = crypto_aead_decrypt(req: aead_req); |
284 | if (ret == -EINPROGRESS || ret == -EBUSY) |
285 | ret = crypto_wait_req(err: ret, wait: &wait); |
286 | return ret; |
287 | } |
288 | |
289 | ret = crypto_aead_decrypt(req: aead_req); |
290 | if (ret == -EINPROGRESS) |
291 | return 0; |
292 | |
293 | if (ret == -EBUSY) { |
294 | ret = tls_decrypt_async_wait(ctx); |
295 | darg->async_done = true; |
296 | /* all completions have run, we're not doing async anymore */ |
297 | darg->async = false; |
298 | return ret; |
299 | } |
300 | |
301 | atomic_dec(v: &ctx->decrypt_pending); |
302 | darg->async = false; |
303 | |
304 | return ret; |
305 | } |
306 | |
307 | static void tls_trim_both_msgs(struct sock *sk, int target_size) |
308 | { |
309 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
310 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
311 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
312 | struct tls_rec *rec = ctx->open_rec; |
313 | |
314 | sk_msg_trim(sk, msg: &rec->msg_plaintext, len: target_size); |
315 | if (target_size > 0) |
316 | target_size += prot->overhead_size; |
317 | sk_msg_trim(sk, msg: &rec->msg_encrypted, len: target_size); |
318 | } |
319 | |
320 | static int tls_alloc_encrypted_msg(struct sock *sk, int len) |
321 | { |
322 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
323 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
324 | struct tls_rec *rec = ctx->open_rec; |
325 | struct sk_msg *msg_en = &rec->msg_encrypted; |
326 | |
327 | return sk_msg_alloc(sk, msg: msg_en, len, elem_first_coalesce: 0); |
328 | } |
329 | |
330 | static int tls_clone_plaintext_msg(struct sock *sk, int required) |
331 | { |
332 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
333 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
334 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
335 | struct tls_rec *rec = ctx->open_rec; |
336 | struct sk_msg *msg_pl = &rec->msg_plaintext; |
337 | struct sk_msg *msg_en = &rec->msg_encrypted; |
338 | int skip, len; |
339 | |
340 | /* We add page references worth len bytes from encrypted sg |
341 | * at the end of plaintext sg. It is guaranteed that msg_en |
342 | * has enough required room (ensured by caller). |
343 | */ |
344 | len = required - msg_pl->sg.size; |
345 | |
346 | /* Skip initial bytes in msg_en's data to be able to use |
347 | * same offset of both plain and encrypted data. |
348 | */ |
349 | skip = prot->prepend_size + msg_pl->sg.size; |
350 | |
351 | return sk_msg_clone(sk, dst: msg_pl, src: msg_en, off: skip, len); |
352 | } |
353 | |
354 | static struct tls_rec *tls_get_rec(struct sock *sk) |
355 | { |
356 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
357 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
358 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
359 | struct sk_msg *msg_pl, *msg_en; |
360 | struct tls_rec *rec; |
361 | int mem_size; |
362 | |
363 | mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(tfm: ctx->aead_send); |
364 | |
365 | rec = kzalloc(size: mem_size, flags: sk->sk_allocation); |
366 | if (!rec) |
367 | return NULL; |
368 | |
369 | msg_pl = &rec->msg_plaintext; |
370 | msg_en = &rec->msg_encrypted; |
371 | |
372 | sk_msg_init(msg: msg_pl); |
373 | sk_msg_init(msg: msg_en); |
374 | |
375 | sg_init_table(rec->sg_aead_in, 2); |
376 | sg_set_buf(sg: &rec->sg_aead_in[0], buf: rec->aad_space, buflen: prot->aad_size); |
377 | sg_unmark_end(sg: &rec->sg_aead_in[1]); |
378 | |
379 | sg_init_table(rec->sg_aead_out, 2); |
380 | sg_set_buf(sg: &rec->sg_aead_out[0], buf: rec->aad_space, buflen: prot->aad_size); |
381 | sg_unmark_end(sg: &rec->sg_aead_out[1]); |
382 | |
383 | rec->sk = sk; |
384 | |
385 | return rec; |
386 | } |
387 | |
388 | static void tls_free_rec(struct sock *sk, struct tls_rec *rec) |
389 | { |
390 | sk_msg_free(sk, msg: &rec->msg_encrypted); |
391 | sk_msg_free(sk, msg: &rec->msg_plaintext); |
392 | kfree(objp: rec); |
393 | } |
394 | |
395 | static void tls_free_open_rec(struct sock *sk) |
396 | { |
397 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
398 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
399 | struct tls_rec *rec = ctx->open_rec; |
400 | |
401 | if (rec) { |
402 | tls_free_rec(sk, rec); |
403 | ctx->open_rec = NULL; |
404 | } |
405 | } |
406 | |
407 | int tls_tx_records(struct sock *sk, int flags) |
408 | { |
409 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
410 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
411 | struct tls_rec *rec, *tmp; |
412 | struct sk_msg *msg_en; |
413 | int tx_flags, rc = 0; |
414 | |
415 | if (tls_is_partially_sent_record(ctx: tls_ctx)) { |
416 | rec = list_first_entry(&ctx->tx_list, |
417 | struct tls_rec, list); |
418 | |
419 | if (flags == -1) |
420 | tx_flags = rec->tx_flags; |
421 | else |
422 | tx_flags = flags; |
423 | |
424 | rc = tls_push_partial_record(sk, ctx: tls_ctx, flags: tx_flags); |
425 | if (rc) |
426 | goto tx_err; |
427 | |
428 | /* Full record has been transmitted. |
429 | * Remove the head of tx_list |
430 | */ |
431 | list_del(entry: &rec->list); |
432 | sk_msg_free(sk, msg: &rec->msg_plaintext); |
433 | kfree(objp: rec); |
434 | } |
435 | |
436 | /* Tx all ready records */ |
437 | list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { |
438 | if (READ_ONCE(rec->tx_ready)) { |
439 | if (flags == -1) |
440 | tx_flags = rec->tx_flags; |
441 | else |
442 | tx_flags = flags; |
443 | |
444 | msg_en = &rec->msg_encrypted; |
445 | rc = tls_push_sg(sk, ctx: tls_ctx, |
446 | sg: &msg_en->sg.data[msg_en->sg.curr], |
447 | first_offset: 0, flags: tx_flags); |
448 | if (rc) |
449 | goto tx_err; |
450 | |
451 | list_del(entry: &rec->list); |
452 | sk_msg_free(sk, msg: &rec->msg_plaintext); |
453 | kfree(objp: rec); |
454 | } else { |
455 | break; |
456 | } |
457 | } |
458 | |
459 | tx_err: |
460 | if (rc < 0 && rc != -EAGAIN) |
461 | tls_err_abort(sk, err: -EBADMSG); |
462 | |
463 | return rc; |
464 | } |
465 | |
466 | static void tls_encrypt_done(void *data, int err) |
467 | { |
468 | struct tls_sw_context_tx *ctx; |
469 | struct tls_context *tls_ctx; |
470 | struct tls_prot_info *prot; |
471 | struct tls_rec *rec = data; |
472 | struct scatterlist *sge; |
473 | struct sk_msg *msg_en; |
474 | struct sock *sk; |
475 | |
476 | if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */ |
477 | return; |
478 | |
479 | msg_en = &rec->msg_encrypted; |
480 | |
481 | sk = rec->sk; |
482 | tls_ctx = tls_get_ctx(sk); |
483 | prot = &tls_ctx->prot_info; |
484 | ctx = tls_sw_ctx_tx(tls_ctx); |
485 | |
486 | sge = sk_msg_elem(msg: msg_en, which: msg_en->sg.curr); |
487 | sge->offset -= prot->prepend_size; |
488 | sge->length += prot->prepend_size; |
489 | |
490 | /* Check if error is previously set on socket */ |
491 | if (err || sk->sk_err) { |
492 | rec = NULL; |
493 | |
494 | /* If err is already set on socket, return the same code */ |
495 | if (sk->sk_err) { |
496 | ctx->async_wait.err = -sk->sk_err; |
497 | } else { |
498 | ctx->async_wait.err = err; |
499 | tls_err_abort(sk, err); |
500 | } |
501 | } |
502 | |
503 | if (rec) { |
504 | struct tls_rec *first_rec; |
505 | |
506 | /* Mark the record as ready for transmission */ |
507 | smp_store_mb(rec->tx_ready, true); |
508 | |
509 | /* If received record is at head of tx_list, schedule tx */ |
510 | first_rec = list_first_entry(&ctx->tx_list, |
511 | struct tls_rec, list); |
512 | if (rec == first_rec) { |
513 | /* Schedule the transmission */ |
514 | if (!test_and_set_bit(BIT_TX_SCHEDULED, |
515 | addr: &ctx->tx_bitmask)) |
516 | schedule_delayed_work(dwork: &ctx->tx_work.work, delay: 1); |
517 | } |
518 | } |
519 | |
520 | if (atomic_dec_and_test(v: &ctx->encrypt_pending)) |
521 | complete(&ctx->async_wait.completion); |
522 | } |
523 | |
524 | static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) |
525 | { |
526 | if (!atomic_dec_and_test(v: &ctx->encrypt_pending)) |
527 | crypto_wait_req(err: -EINPROGRESS, wait: &ctx->async_wait); |
528 | atomic_inc(v: &ctx->encrypt_pending); |
529 | |
530 | return ctx->async_wait.err; |
531 | } |
532 | |
533 | static int tls_do_encryption(struct sock *sk, |
534 | struct tls_context *tls_ctx, |
535 | struct tls_sw_context_tx *ctx, |
536 | struct aead_request *aead_req, |
537 | size_t data_len, u32 start) |
538 | { |
539 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
540 | struct tls_rec *rec = ctx->open_rec; |
541 | struct sk_msg *msg_en = &rec->msg_encrypted; |
542 | struct scatterlist *sge = sk_msg_elem(msg: msg_en, which: start); |
543 | int rc, iv_offset = 0; |
544 | |
545 | /* For CCM based ciphers, first byte of IV is a constant */ |
546 | switch (prot->cipher_type) { |
547 | case TLS_CIPHER_AES_CCM_128: |
548 | rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; |
549 | iv_offset = 1; |
550 | break; |
551 | case TLS_CIPHER_SM4_CCM: |
552 | rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; |
553 | iv_offset = 1; |
554 | break; |
555 | } |
556 | |
557 | memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, |
558 | prot->iv_size + prot->salt_size); |
559 | |
560 | tls_xor_iv_with_seq(prot, iv: rec->iv_data + iv_offset, |
561 | seq: tls_ctx->tx.rec_seq); |
562 | |
563 | sge->offset += prot->prepend_size; |
564 | sge->length -= prot->prepend_size; |
565 | |
566 | msg_en->sg.curr = start; |
567 | |
568 | aead_request_set_tfm(req: aead_req, tfm: ctx->aead_send); |
569 | aead_request_set_ad(req: aead_req, assoclen: prot->aad_size); |
570 | aead_request_set_crypt(req: aead_req, src: rec->sg_aead_in, |
571 | dst: rec->sg_aead_out, |
572 | cryptlen: data_len, iv: rec->iv_data); |
573 | |
574 | aead_request_set_callback(req: aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
575 | compl: tls_encrypt_done, data: rec); |
576 | |
577 | /* Add the record in tx_list */ |
578 | list_add_tail(new: (struct list_head *)&rec->list, head: &ctx->tx_list); |
579 | DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1); |
580 | atomic_inc(v: &ctx->encrypt_pending); |
581 | |
582 | rc = crypto_aead_encrypt(req: aead_req); |
583 | if (rc == -EBUSY) { |
584 | rc = tls_encrypt_async_wait(ctx); |
585 | rc = rc ?: -EINPROGRESS; |
586 | } |
587 | if (!rc || rc != -EINPROGRESS) { |
588 | atomic_dec(v: &ctx->encrypt_pending); |
589 | sge->offset -= prot->prepend_size; |
590 | sge->length += prot->prepend_size; |
591 | } |
592 | |
593 | if (!rc) { |
594 | WRITE_ONCE(rec->tx_ready, true); |
595 | } else if (rc != -EINPROGRESS) { |
596 | list_del(entry: &rec->list); |
597 | return rc; |
598 | } |
599 | |
600 | /* Unhook the record from context if encryption is not failure */ |
601 | ctx->open_rec = NULL; |
602 | tls_advance_record_sn(sk, prot, ctx: &tls_ctx->tx); |
603 | return rc; |
604 | } |
605 | |
606 | static int tls_split_open_record(struct sock *sk, struct tls_rec *from, |
607 | struct tls_rec **to, struct sk_msg *msg_opl, |
608 | struct sk_msg *msg_oen, u32 split_point, |
609 | u32 tx_overhead_size, u32 *orig_end) |
610 | { |
611 | u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; |
612 | struct scatterlist *sge, *osge, *nsge; |
613 | u32 orig_size = msg_opl->sg.size; |
614 | struct scatterlist tmp = { }; |
615 | struct sk_msg *msg_npl; |
616 | struct tls_rec *new; |
617 | int ret; |
618 | |
619 | new = tls_get_rec(sk); |
620 | if (!new) |
621 | return -ENOMEM; |
622 | ret = sk_msg_alloc(sk, msg: &new->msg_encrypted, len: msg_opl->sg.size + |
623 | tx_overhead_size, elem_first_coalesce: 0); |
624 | if (ret < 0) { |
625 | tls_free_rec(sk, rec: new); |
626 | return ret; |
627 | } |
628 | |
629 | *orig_end = msg_opl->sg.end; |
630 | i = msg_opl->sg.start; |
631 | sge = sk_msg_elem(msg: msg_opl, which: i); |
632 | while (apply && sge->length) { |
633 | if (sge->length > apply) { |
634 | u32 len = sge->length - apply; |
635 | |
636 | get_page(page: sg_page(sg: sge)); |
637 | sg_set_page(sg: &tmp, page: sg_page(sg: sge), len, |
638 | offset: sge->offset + apply); |
639 | sge->length = apply; |
640 | bytes += apply; |
641 | apply = 0; |
642 | } else { |
643 | apply -= sge->length; |
644 | bytes += sge->length; |
645 | } |
646 | |
647 | sk_msg_iter_var_next(i); |
648 | if (i == msg_opl->sg.end) |
649 | break; |
650 | sge = sk_msg_elem(msg: msg_opl, which: i); |
651 | } |
652 | |
653 | msg_opl->sg.end = i; |
654 | msg_opl->sg.curr = i; |
655 | msg_opl->sg.copybreak = 0; |
656 | msg_opl->apply_bytes = 0; |
657 | msg_opl->sg.size = bytes; |
658 | |
659 | msg_npl = &new->msg_plaintext; |
660 | msg_npl->apply_bytes = apply; |
661 | msg_npl->sg.size = orig_size - bytes; |
662 | |
663 | j = msg_npl->sg.start; |
664 | nsge = sk_msg_elem(msg: msg_npl, which: j); |
665 | if (tmp.length) { |
666 | memcpy(nsge, &tmp, sizeof(*nsge)); |
667 | sk_msg_iter_var_next(j); |
668 | nsge = sk_msg_elem(msg: msg_npl, which: j); |
669 | } |
670 | |
671 | osge = sk_msg_elem(msg: msg_opl, which: i); |
672 | while (osge->length) { |
673 | memcpy(nsge, osge, sizeof(*nsge)); |
674 | sg_unmark_end(sg: nsge); |
675 | sk_msg_iter_var_next(i); |
676 | sk_msg_iter_var_next(j); |
677 | if (i == *orig_end) |
678 | break; |
679 | osge = sk_msg_elem(msg: msg_opl, which: i); |
680 | nsge = sk_msg_elem(msg: msg_npl, which: j); |
681 | } |
682 | |
683 | msg_npl->sg.end = j; |
684 | msg_npl->sg.curr = j; |
685 | msg_npl->sg.copybreak = 0; |
686 | |
687 | *to = new; |
688 | return 0; |
689 | } |
690 | |
691 | static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, |
692 | struct tls_rec *from, u32 orig_end) |
693 | { |
694 | struct sk_msg *msg_npl = &from->msg_plaintext; |
695 | struct sk_msg *msg_opl = &to->msg_plaintext; |
696 | struct scatterlist *osge, *nsge; |
697 | u32 i, j; |
698 | |
699 | i = msg_opl->sg.end; |
700 | sk_msg_iter_var_prev(i); |
701 | j = msg_npl->sg.start; |
702 | |
703 | osge = sk_msg_elem(msg: msg_opl, which: i); |
704 | nsge = sk_msg_elem(msg: msg_npl, which: j); |
705 | |
706 | if (sg_page(sg: osge) == sg_page(sg: nsge) && |
707 | osge->offset + osge->length == nsge->offset) { |
708 | osge->length += nsge->length; |
709 | put_page(page: sg_page(sg: nsge)); |
710 | } |
711 | |
712 | msg_opl->sg.end = orig_end; |
713 | msg_opl->sg.curr = orig_end; |
714 | msg_opl->sg.copybreak = 0; |
715 | msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; |
716 | msg_opl->sg.size += msg_npl->sg.size; |
717 | |
718 | sk_msg_free(sk, msg: &to->msg_encrypted); |
719 | sk_msg_xfer_full(dst: &to->msg_encrypted, src: &from->msg_encrypted); |
720 | |
721 | kfree(objp: from); |
722 | } |
723 | |
724 | static int tls_push_record(struct sock *sk, int flags, |
725 | unsigned char record_type) |
726 | { |
727 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
728 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
729 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
730 | struct tls_rec *rec = ctx->open_rec, *tmp = NULL; |
731 | u32 i, split_point, orig_end; |
732 | struct sk_msg *msg_pl, *msg_en; |
733 | struct aead_request *req; |
734 | bool split; |
735 | int rc; |
736 | |
737 | if (!rec) |
738 | return 0; |
739 | |
740 | msg_pl = &rec->msg_plaintext; |
741 | msg_en = &rec->msg_encrypted; |
742 | |
743 | split_point = msg_pl->apply_bytes; |
744 | split = split_point && split_point < msg_pl->sg.size; |
745 | if (unlikely((!split && |
746 | msg_pl->sg.size + |
747 | prot->overhead_size > msg_en->sg.size) || |
748 | (split && |
749 | split_point + |
750 | prot->overhead_size > msg_en->sg.size))) { |
751 | split = true; |
752 | split_point = msg_en->sg.size; |
753 | } |
754 | if (split) { |
755 | rc = tls_split_open_record(sk, from: rec, to: &tmp, msg_opl: msg_pl, msg_oen: msg_en, |
756 | split_point, tx_overhead_size: prot->overhead_size, |
757 | orig_end: &orig_end); |
758 | if (rc < 0) |
759 | return rc; |
760 | /* This can happen if above tls_split_open_record allocates |
761 | * a single large encryption buffer instead of two smaller |
762 | * ones. In this case adjust pointers and continue without |
763 | * split. |
764 | */ |
765 | if (!msg_pl->sg.size) { |
766 | tls_merge_open_record(sk, to: rec, from: tmp, orig_end); |
767 | msg_pl = &rec->msg_plaintext; |
768 | msg_en = &rec->msg_encrypted; |
769 | split = false; |
770 | } |
771 | sk_msg_trim(sk, msg: msg_en, len: msg_pl->sg.size + |
772 | prot->overhead_size); |
773 | } |
774 | |
775 | rec->tx_flags = flags; |
776 | req = &rec->aead_req; |
777 | |
778 | i = msg_pl->sg.end; |
779 | sk_msg_iter_var_prev(i); |
780 | |
781 | rec->content_type = record_type; |
782 | if (prot->version == TLS_1_3_VERSION) { |
783 | /* Add content type to end of message. No padding added */ |
784 | sg_set_buf(sg: &rec->sg_content_type, buf: &rec->content_type, buflen: 1); |
785 | sg_mark_end(sg: &rec->sg_content_type); |
786 | sg_chain(prv: msg_pl->sg.data, prv_nents: msg_pl->sg.end + 1, |
787 | sgl: &rec->sg_content_type); |
788 | } else { |
789 | sg_mark_end(sg: sk_msg_elem(msg: msg_pl, which: i)); |
790 | } |
791 | |
792 | if (msg_pl->sg.end < msg_pl->sg.start) { |
793 | sg_chain(prv: &msg_pl->sg.data[msg_pl->sg.start], |
794 | MAX_SKB_FRAGS - msg_pl->sg.start + 1, |
795 | sgl: msg_pl->sg.data); |
796 | } |
797 | |
798 | i = msg_pl->sg.start; |
799 | sg_chain(prv: rec->sg_aead_in, prv_nents: 2, sgl: &msg_pl->sg.data[i]); |
800 | |
801 | i = msg_en->sg.end; |
802 | sk_msg_iter_var_prev(i); |
803 | sg_mark_end(sg: sk_msg_elem(msg: msg_en, which: i)); |
804 | |
805 | i = msg_en->sg.start; |
806 | sg_chain(prv: rec->sg_aead_out, prv_nents: 2, sgl: &msg_en->sg.data[i]); |
807 | |
808 | tls_make_aad(buf: rec->aad_space, size: msg_pl->sg.size + prot->tail_size, |
809 | record_sequence: tls_ctx->tx.rec_seq, record_type, prot); |
810 | |
811 | tls_fill_prepend(ctx: tls_ctx, |
812 | page_address(sg_page(&msg_en->sg.data[i])) + |
813 | msg_en->sg.data[i].offset, |
814 | plaintext_len: msg_pl->sg.size + prot->tail_size, |
815 | record_type); |
816 | |
817 | tls_ctx->pending_open_record_frags = false; |
818 | |
819 | rc = tls_do_encryption(sk, tls_ctx, ctx, aead_req: req, |
820 | data_len: msg_pl->sg.size + prot->tail_size, start: i); |
821 | if (rc < 0) { |
822 | if (rc != -EINPROGRESS) { |
823 | tls_err_abort(sk, err: -EBADMSG); |
824 | if (split) { |
825 | tls_ctx->pending_open_record_frags = true; |
826 | tls_merge_open_record(sk, to: rec, from: tmp, orig_end); |
827 | } |
828 | } |
829 | ctx->async_capable = 1; |
830 | return rc; |
831 | } else if (split) { |
832 | msg_pl = &tmp->msg_plaintext; |
833 | msg_en = &tmp->msg_encrypted; |
834 | sk_msg_trim(sk, msg: msg_en, len: msg_pl->sg.size + prot->overhead_size); |
835 | tls_ctx->pending_open_record_frags = true; |
836 | ctx->open_rec = tmp; |
837 | } |
838 | |
839 | return tls_tx_records(sk, flags); |
840 | } |
841 | |
842 | static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, |
843 | bool full_record, u8 record_type, |
844 | ssize_t *copied, int flags) |
845 | { |
846 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
847 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
848 | struct sk_msg msg_redir = { }; |
849 | struct sk_psock *psock; |
850 | struct sock *sk_redir; |
851 | struct tls_rec *rec; |
852 | bool enospc, policy, redir_ingress; |
853 | int err = 0, send; |
854 | u32 delta = 0; |
855 | |
856 | policy = !(flags & MSG_SENDPAGE_NOPOLICY); |
857 | psock = sk_psock_get(sk); |
858 | if (!psock || !policy) { |
859 | err = tls_push_record(sk, flags, record_type); |
860 | if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { |
861 | *copied -= sk_msg_free(sk, msg); |
862 | tls_free_open_rec(sk); |
863 | err = -sk->sk_err; |
864 | } |
865 | if (psock) |
866 | sk_psock_put(sk, psock); |
867 | return err; |
868 | } |
869 | more_data: |
870 | enospc = sk_msg_full(msg); |
871 | if (psock->eval == __SK_NONE) { |
872 | delta = msg->sg.size; |
873 | psock->eval = sk_psock_msg_verdict(sk, psock, msg); |
874 | delta -= msg->sg.size; |
875 | } |
876 | if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && |
877 | !enospc && !full_record) { |
878 | err = -ENOSPC; |
879 | goto out_err; |
880 | } |
881 | msg->cork_bytes = 0; |
882 | send = msg->sg.size; |
883 | if (msg->apply_bytes && msg->apply_bytes < send) |
884 | send = msg->apply_bytes; |
885 | |
886 | switch (psock->eval) { |
887 | case __SK_PASS: |
888 | err = tls_push_record(sk, flags, record_type); |
889 | if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { |
890 | *copied -= sk_msg_free(sk, msg); |
891 | tls_free_open_rec(sk); |
892 | err = -sk->sk_err; |
893 | goto out_err; |
894 | } |
895 | break; |
896 | case __SK_REDIRECT: |
897 | redir_ingress = psock->redir_ingress; |
898 | sk_redir = psock->sk_redir; |
899 | memcpy(&msg_redir, msg, sizeof(*msg)); |
900 | if (msg->apply_bytes < send) |
901 | msg->apply_bytes = 0; |
902 | else |
903 | msg->apply_bytes -= send; |
904 | sk_msg_return_zero(sk, msg, bytes: send); |
905 | msg->sg.size -= send; |
906 | release_sock(sk); |
907 | err = tcp_bpf_sendmsg_redir(sk: sk_redir, ingress: redir_ingress, |
908 | msg: &msg_redir, bytes: send, flags); |
909 | lock_sock(sk); |
910 | if (err < 0) { |
911 | *copied -= sk_msg_free_nocharge(sk, msg: &msg_redir); |
912 | msg->sg.size = 0; |
913 | } |
914 | if (msg->sg.size == 0) |
915 | tls_free_open_rec(sk); |
916 | break; |
917 | case __SK_DROP: |
918 | default: |
919 | sk_msg_free_partial(sk, msg, bytes: send); |
920 | if (msg->apply_bytes < send) |
921 | msg->apply_bytes = 0; |
922 | else |
923 | msg->apply_bytes -= send; |
924 | if (msg->sg.size == 0) |
925 | tls_free_open_rec(sk); |
926 | *copied -= (send + delta); |
927 | err = -EACCES; |
928 | } |
929 | |
930 | if (likely(!err)) { |
931 | bool reset_eval = !ctx->open_rec; |
932 | |
933 | rec = ctx->open_rec; |
934 | if (rec) { |
935 | msg = &rec->msg_plaintext; |
936 | if (!msg->apply_bytes) |
937 | reset_eval = true; |
938 | } |
939 | if (reset_eval) { |
940 | psock->eval = __SK_NONE; |
941 | if (psock->sk_redir) { |
942 | sock_put(sk: psock->sk_redir); |
943 | psock->sk_redir = NULL; |
944 | } |
945 | } |
946 | if (rec) |
947 | goto more_data; |
948 | } |
949 | out_err: |
950 | sk_psock_put(sk, psock); |
951 | return err; |
952 | } |
953 | |
954 | static int tls_sw_push_pending_record(struct sock *sk, int flags) |
955 | { |
956 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
957 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
958 | struct tls_rec *rec = ctx->open_rec; |
959 | struct sk_msg *msg_pl; |
960 | size_t copied; |
961 | |
962 | if (!rec) |
963 | return 0; |
964 | |
965 | msg_pl = &rec->msg_plaintext; |
966 | copied = msg_pl->sg.size; |
967 | if (!copied) |
968 | return 0; |
969 | |
970 | return bpf_exec_tx_verdict(msg: msg_pl, sk, full_record: true, record_type: TLS_RECORD_TYPE_DATA, |
971 | copied: &copied, flags); |
972 | } |
973 | |
974 | static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, |
975 | struct sk_msg *msg_pl, size_t try_to_copy, |
976 | ssize_t *copied) |
977 | { |
978 | struct page *page = NULL, **pages = &page; |
979 | |
980 | do { |
981 | ssize_t part; |
982 | size_t off; |
983 | |
984 | part = iov_iter_extract_pages(i: &msg->msg_iter, pages: &pages, |
985 | maxsize: try_to_copy, maxpages: 1, extraction_flags: 0, offset0: &off); |
986 | if (part <= 0) |
987 | return part ?: -EIO; |
988 | |
989 | if (WARN_ON_ONCE(!sendpage_ok(page))) { |
990 | iov_iter_revert(i: &msg->msg_iter, bytes: part); |
991 | return -EIO; |
992 | } |
993 | |
994 | sk_msg_page_add(msg: msg_pl, page, len: part, offset: off); |
995 | msg_pl->sg.copybreak = 0; |
996 | msg_pl->sg.curr = msg_pl->sg.end; |
997 | sk_mem_charge(sk, size: part); |
998 | *copied += part; |
999 | try_to_copy -= part; |
1000 | } while (try_to_copy && !sk_msg_full(msg: msg_pl)); |
1001 | |
1002 | return 0; |
1003 | } |
1004 | |
1005 | static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, |
1006 | size_t size) |
1007 | { |
1008 | long timeo = sock_sndtimeo(sk, noblock: msg->msg_flags & MSG_DONTWAIT); |
1009 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1010 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
1011 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
1012 | bool async_capable = ctx->async_capable; |
1013 | unsigned char record_type = TLS_RECORD_TYPE_DATA; |
1014 | bool is_kvec = iov_iter_is_kvec(i: &msg->msg_iter); |
1015 | bool eor = !(msg->msg_flags & MSG_MORE); |
1016 | size_t try_to_copy; |
1017 | ssize_t copied = 0; |
1018 | struct sk_msg *msg_pl, *msg_en; |
1019 | struct tls_rec *rec; |
1020 | int required_size; |
1021 | int num_async = 0; |
1022 | bool full_record; |
1023 | int record_room; |
1024 | int num_zc = 0; |
1025 | int orig_size; |
1026 | int ret = 0; |
1027 | |
1028 | if (!eor && (msg->msg_flags & MSG_EOR)) |
1029 | return -EINVAL; |
1030 | |
1031 | if (unlikely(msg->msg_controllen)) { |
1032 | ret = tls_process_cmsg(sk, msg, record_type: &record_type); |
1033 | if (ret) { |
1034 | if (ret == -EINPROGRESS) |
1035 | num_async++; |
1036 | else if (ret != -EAGAIN) |
1037 | goto send_end; |
1038 | } |
1039 | } |
1040 | |
1041 | while (msg_data_left(msg)) { |
1042 | if (sk->sk_err) { |
1043 | ret = -sk->sk_err; |
1044 | goto send_end; |
1045 | } |
1046 | |
1047 | if (ctx->open_rec) |
1048 | rec = ctx->open_rec; |
1049 | else |
1050 | rec = ctx->open_rec = tls_get_rec(sk); |
1051 | if (!rec) { |
1052 | ret = -ENOMEM; |
1053 | goto send_end; |
1054 | } |
1055 | |
1056 | msg_pl = &rec->msg_plaintext; |
1057 | msg_en = &rec->msg_encrypted; |
1058 | |
1059 | orig_size = msg_pl->sg.size; |
1060 | full_record = false; |
1061 | try_to_copy = msg_data_left(msg); |
1062 | record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; |
1063 | if (try_to_copy >= record_room) { |
1064 | try_to_copy = record_room; |
1065 | full_record = true; |
1066 | } |
1067 | |
1068 | required_size = msg_pl->sg.size + try_to_copy + |
1069 | prot->overhead_size; |
1070 | |
1071 | if (!sk_stream_memory_free(sk)) |
1072 | goto wait_for_sndbuf; |
1073 | |
1074 | alloc_encrypted: |
1075 | ret = tls_alloc_encrypted_msg(sk, len: required_size); |
1076 | if (ret) { |
1077 | if (ret != -ENOSPC) |
1078 | goto wait_for_memory; |
1079 | |
1080 | /* Adjust try_to_copy according to the amount that was |
1081 | * actually allocated. The difference is due |
1082 | * to max sg elements limit |
1083 | */ |
1084 | try_to_copy -= required_size - msg_en->sg.size; |
1085 | full_record = true; |
1086 | } |
1087 | |
1088 | if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) { |
1089 | ret = tls_sw_sendmsg_splice(sk, msg, msg_pl, |
1090 | try_to_copy, copied: &copied); |
1091 | if (ret < 0) |
1092 | goto send_end; |
1093 | tls_ctx->pending_open_record_frags = true; |
1094 | |
1095 | if (sk_msg_full(msg: msg_pl)) |
1096 | full_record = true; |
1097 | |
1098 | if (full_record || eor) |
1099 | goto copied; |
1100 | continue; |
1101 | } |
1102 | |
1103 | if (!is_kvec && (full_record || eor) && !async_capable) { |
1104 | u32 first = msg_pl->sg.end; |
1105 | |
1106 | ret = sk_msg_zerocopy_from_iter(sk, from: &msg->msg_iter, |
1107 | msg: msg_pl, bytes: try_to_copy); |
1108 | if (ret) |
1109 | goto fallback_to_reg_send; |
1110 | |
1111 | num_zc++; |
1112 | copied += try_to_copy; |
1113 | |
1114 | sk_msg_sg_copy_set(msg: msg_pl, start: first); |
1115 | ret = bpf_exec_tx_verdict(msg: msg_pl, sk, full_record, |
1116 | record_type, copied: &copied, |
1117 | flags: msg->msg_flags); |
1118 | if (ret) { |
1119 | if (ret == -EINPROGRESS) |
1120 | num_async++; |
1121 | else if (ret == -ENOMEM) |
1122 | goto wait_for_memory; |
1123 | else if (ctx->open_rec && ret == -ENOSPC) |
1124 | goto rollback_iter; |
1125 | else if (ret != -EAGAIN) |
1126 | goto send_end; |
1127 | } |
1128 | continue; |
1129 | rollback_iter: |
1130 | copied -= try_to_copy; |
1131 | sk_msg_sg_copy_clear(msg: msg_pl, start: first); |
1132 | iov_iter_revert(i: &msg->msg_iter, |
1133 | bytes: msg_pl->sg.size - orig_size); |
1134 | fallback_to_reg_send: |
1135 | sk_msg_trim(sk, msg: msg_pl, len: orig_size); |
1136 | } |
1137 | |
1138 | required_size = msg_pl->sg.size + try_to_copy; |
1139 | |
1140 | ret = tls_clone_plaintext_msg(sk, required: required_size); |
1141 | if (ret) { |
1142 | if (ret != -ENOSPC) |
1143 | goto send_end; |
1144 | |
1145 | /* Adjust try_to_copy according to the amount that was |
1146 | * actually allocated. The difference is due |
1147 | * to max sg elements limit |
1148 | */ |
1149 | try_to_copy -= required_size - msg_pl->sg.size; |
1150 | full_record = true; |
1151 | sk_msg_trim(sk, msg: msg_en, |
1152 | len: msg_pl->sg.size + prot->overhead_size); |
1153 | } |
1154 | |
1155 | if (try_to_copy) { |
1156 | ret = sk_msg_memcopy_from_iter(sk, from: &msg->msg_iter, |
1157 | msg: msg_pl, bytes: try_to_copy); |
1158 | if (ret < 0) |
1159 | goto trim_sgl; |
1160 | } |
1161 | |
1162 | /* Open records defined only if successfully copied, otherwise |
1163 | * we would trim the sg but not reset the open record frags. |
1164 | */ |
1165 | tls_ctx->pending_open_record_frags = true; |
1166 | copied += try_to_copy; |
1167 | copied: |
1168 | if (full_record || eor) { |
1169 | ret = bpf_exec_tx_verdict(msg: msg_pl, sk, full_record, |
1170 | record_type, copied: &copied, |
1171 | flags: msg->msg_flags); |
1172 | if (ret) { |
1173 | if (ret == -EINPROGRESS) |
1174 | num_async++; |
1175 | else if (ret == -ENOMEM) |
1176 | goto wait_for_memory; |
1177 | else if (ret != -EAGAIN) { |
1178 | if (ret == -ENOSPC) |
1179 | ret = 0; |
1180 | goto send_end; |
1181 | } |
1182 | } |
1183 | } |
1184 | |
1185 | continue; |
1186 | |
1187 | wait_for_sndbuf: |
1188 | set_bit(SOCK_NOSPACE, addr: &sk->sk_socket->flags); |
1189 | wait_for_memory: |
1190 | ret = sk_stream_wait_memory(sk, timeo_p: &timeo); |
1191 | if (ret) { |
1192 | trim_sgl: |
1193 | if (ctx->open_rec) |
1194 | tls_trim_both_msgs(sk, target_size: orig_size); |
1195 | goto send_end; |
1196 | } |
1197 | |
1198 | if (ctx->open_rec && msg_en->sg.size < required_size) |
1199 | goto alloc_encrypted; |
1200 | } |
1201 | |
1202 | if (!num_async) { |
1203 | goto send_end; |
1204 | } else if (num_zc) { |
1205 | int err; |
1206 | |
1207 | /* Wait for pending encryptions to get completed */ |
1208 | err = tls_encrypt_async_wait(ctx); |
1209 | if (err) { |
1210 | ret = err; |
1211 | copied = 0; |
1212 | } |
1213 | } |
1214 | |
1215 | /* Transmit if any encryptions have completed */ |
1216 | if (test_and_clear_bit(BIT_TX_SCHEDULED, addr: &ctx->tx_bitmask)) { |
1217 | cancel_delayed_work(dwork: &ctx->tx_work.work); |
1218 | tls_tx_records(sk, flags: msg->msg_flags); |
1219 | } |
1220 | |
1221 | send_end: |
1222 | ret = sk_stream_error(sk, flags: msg->msg_flags, err: ret); |
1223 | return copied > 0 ? copied : ret; |
1224 | } |
1225 | |
1226 | int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
1227 | { |
1228 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1229 | int ret; |
1230 | |
1231 | if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | |
1232 | MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR | |
1233 | MSG_SENDPAGE_NOPOLICY)) |
1234 | return -EOPNOTSUPP; |
1235 | |
1236 | ret = mutex_lock_interruptible(&tls_ctx->tx_lock); |
1237 | if (ret) |
1238 | return ret; |
1239 | lock_sock(sk); |
1240 | ret = tls_sw_sendmsg_locked(sk, msg, size); |
1241 | release_sock(sk); |
1242 | mutex_unlock(lock: &tls_ctx->tx_lock); |
1243 | return ret; |
1244 | } |
1245 | |
1246 | /* |
1247 | * Handle unexpected EOF during splice without SPLICE_F_MORE set. |
1248 | */ |
1249 | void tls_sw_splice_eof(struct socket *sock) |
1250 | { |
1251 | struct sock *sk = sock->sk; |
1252 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1253 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
1254 | struct tls_rec *rec; |
1255 | struct sk_msg *msg_pl; |
1256 | ssize_t copied = 0; |
1257 | bool retrying = false; |
1258 | int ret = 0; |
1259 | |
1260 | if (!ctx->open_rec) |
1261 | return; |
1262 | |
1263 | mutex_lock(&tls_ctx->tx_lock); |
1264 | lock_sock(sk); |
1265 | |
1266 | retry: |
1267 | /* same checks as in tls_sw_push_pending_record() */ |
1268 | rec = ctx->open_rec; |
1269 | if (!rec) |
1270 | goto unlock; |
1271 | |
1272 | msg_pl = &rec->msg_plaintext; |
1273 | if (msg_pl->sg.size == 0) |
1274 | goto unlock; |
1275 | |
1276 | /* Check the BPF advisor and perform transmission. */ |
1277 | ret = bpf_exec_tx_verdict(msg: msg_pl, sk, full_record: false, record_type: TLS_RECORD_TYPE_DATA, |
1278 | copied: &copied, flags: 0); |
1279 | switch (ret) { |
1280 | case 0: |
1281 | case -EAGAIN: |
1282 | if (retrying) |
1283 | goto unlock; |
1284 | retrying = true; |
1285 | goto retry; |
1286 | case -EINPROGRESS: |
1287 | break; |
1288 | default: |
1289 | goto unlock; |
1290 | } |
1291 | |
1292 | /* Wait for pending encryptions to get completed */ |
1293 | if (tls_encrypt_async_wait(ctx)) |
1294 | goto unlock; |
1295 | |
1296 | /* Transmit if any encryptions have completed */ |
1297 | if (test_and_clear_bit(BIT_TX_SCHEDULED, addr: &ctx->tx_bitmask)) { |
1298 | cancel_delayed_work(dwork: &ctx->tx_work.work); |
1299 | tls_tx_records(sk, flags: 0); |
1300 | } |
1301 | |
1302 | unlock: |
1303 | release_sock(sk); |
1304 | mutex_unlock(lock: &tls_ctx->tx_lock); |
1305 | } |
1306 | |
1307 | static int |
1308 | tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, |
1309 | bool released) |
1310 | { |
1311 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1312 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
1313 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
1314 | int ret = 0; |
1315 | long timeo; |
1316 | |
1317 | timeo = sock_rcvtimeo(sk, noblock: nonblock); |
1318 | |
1319 | while (!tls_strp_msg_ready(ctx)) { |
1320 | if (!sk_psock_queue_empty(psock)) |
1321 | return 0; |
1322 | |
1323 | if (sk->sk_err) |
1324 | return sock_error(sk); |
1325 | |
1326 | if (ret < 0) |
1327 | return ret; |
1328 | |
1329 | if (!skb_queue_empty(list: &sk->sk_receive_queue)) { |
1330 | tls_strp_check_rcv(strp: &ctx->strp); |
1331 | if (tls_strp_msg_ready(ctx)) |
1332 | break; |
1333 | } |
1334 | |
1335 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1336 | return 0; |
1337 | |
1338 | if (sock_flag(sk, flag: SOCK_DONE)) |
1339 | return 0; |
1340 | |
1341 | if (!timeo) |
1342 | return -EAGAIN; |
1343 | |
1344 | released = true; |
1345 | add_wait_queue(wq_head: sk_sleep(sk), wq_entry: &wait); |
1346 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
1347 | ret = sk_wait_event(sk, &timeo, |
1348 | tls_strp_msg_ready(ctx) || |
1349 | !sk_psock_queue_empty(psock), |
1350 | &wait); |
1351 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
1352 | remove_wait_queue(wq_head: sk_sleep(sk), wq_entry: &wait); |
1353 | |
1354 | /* Handle signals */ |
1355 | if (signal_pending(current)) |
1356 | return sock_intr_errno(timeo); |
1357 | } |
1358 | |
1359 | tls_strp_msg_load(strp: &ctx->strp, force_refresh: released); |
1360 | |
1361 | return 1; |
1362 | } |
1363 | |
1364 | static int tls_setup_from_iter(struct iov_iter *from, |
1365 | int length, int *pages_used, |
1366 | struct scatterlist *to, |
1367 | int to_max_pages) |
1368 | { |
1369 | int rc = 0, i = 0, num_elem = *pages_used, maxpages; |
1370 | struct page *pages[MAX_SKB_FRAGS]; |
1371 | unsigned int size = 0; |
1372 | ssize_t copied, use; |
1373 | size_t offset; |
1374 | |
1375 | while (length > 0) { |
1376 | i = 0; |
1377 | maxpages = to_max_pages - num_elem; |
1378 | if (maxpages == 0) { |
1379 | rc = -EFAULT; |
1380 | goto out; |
1381 | } |
1382 | copied = iov_iter_get_pages2(i: from, pages, |
1383 | maxsize: length, |
1384 | maxpages, start: &offset); |
1385 | if (copied <= 0) { |
1386 | rc = -EFAULT; |
1387 | goto out; |
1388 | } |
1389 | |
1390 | length -= copied; |
1391 | size += copied; |
1392 | while (copied) { |
1393 | use = min_t(int, copied, PAGE_SIZE - offset); |
1394 | |
1395 | sg_set_page(sg: &to[num_elem], |
1396 | page: pages[i], len: use, offset); |
1397 | sg_unmark_end(sg: &to[num_elem]); |
1398 | /* We do not uncharge memory from this API */ |
1399 | |
1400 | offset = 0; |
1401 | copied -= use; |
1402 | |
1403 | i++; |
1404 | num_elem++; |
1405 | } |
1406 | } |
1407 | /* Mark the end in the last sg entry if newly added */ |
1408 | if (num_elem > *pages_used) |
1409 | sg_mark_end(sg: &to[num_elem - 1]); |
1410 | out: |
1411 | if (rc) |
1412 | iov_iter_revert(i: from, bytes: size); |
1413 | *pages_used = num_elem; |
1414 | |
1415 | return rc; |
1416 | } |
1417 | |
1418 | static struct sk_buff * |
1419 | tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb, |
1420 | unsigned int full_len) |
1421 | { |
1422 | struct strp_msg *clr_rxm; |
1423 | struct sk_buff *clr_skb; |
1424 | int err; |
1425 | |
1426 | clr_skb = alloc_skb_with_frags(header_len: 0, data_len: full_len, TLS_PAGE_ORDER, |
1427 | errcode: &err, gfp_mask: sk->sk_allocation); |
1428 | if (!clr_skb) |
1429 | return NULL; |
1430 | |
1431 | skb_copy_header(new: clr_skb, old: skb); |
1432 | clr_skb->len = full_len; |
1433 | clr_skb->data_len = full_len; |
1434 | |
1435 | clr_rxm = strp_msg(skb: clr_skb); |
1436 | clr_rxm->offset = 0; |
1437 | |
1438 | return clr_skb; |
1439 | } |
1440 | |
1441 | /* Decrypt handlers |
1442 | * |
1443 | * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers. |
1444 | * They must transform the darg in/out argument are as follows: |
1445 | * | Input | Output |
1446 | * ------------------------------------------------------------------- |
1447 | * zc | Zero-copy decrypt allowed | Zero-copy performed |
1448 | * async | Async decrypt allowed | Async crypto used / in progress |
1449 | * skb | * | Output skb |
1450 | * |
1451 | * If ZC decryption was performed darg.skb will point to the input skb. |
1452 | */ |
1453 | |
1454 | /* This function decrypts the input skb into either out_iov or in out_sg |
1455 | * or in skb buffers itself. The input parameter 'darg->zc' indicates if |
1456 | * zero-copy mode needs to be tried or not. With zero-copy mode, either |
1457 | * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are |
1458 | * NULL, then the decryption happens inside skb buffers itself, i.e. |
1459 | * zero-copy gets disabled and 'darg->zc' is updated. |
1460 | */ |
1461 | static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, |
1462 | struct scatterlist *out_sg, |
1463 | struct tls_decrypt_arg *darg) |
1464 | { |
1465 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1466 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
1467 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
1468 | int n_sgin, n_sgout, aead_size, err, pages = 0; |
1469 | struct sk_buff *skb = tls_strp_msg(ctx); |
1470 | const struct strp_msg *rxm = strp_msg(skb); |
1471 | const struct tls_msg *tlm = tls_msg(skb); |
1472 | struct aead_request *aead_req; |
1473 | struct scatterlist *sgin = NULL; |
1474 | struct scatterlist *sgout = NULL; |
1475 | const int data_len = rxm->full_len - prot->overhead_size; |
1476 | int tail_pages = !!prot->tail_size; |
1477 | struct tls_decrypt_ctx *dctx; |
1478 | struct sk_buff *clear_skb; |
1479 | int iv_offset = 0; |
1480 | u8 *mem; |
1481 | |
1482 | n_sgin = skb_nsg(skb, offset: rxm->offset + prot->prepend_size, |
1483 | len: rxm->full_len - prot->prepend_size); |
1484 | if (n_sgin < 1) |
1485 | return n_sgin ?: -EBADMSG; |
1486 | |
1487 | if (darg->zc && (out_iov || out_sg)) { |
1488 | clear_skb = NULL; |
1489 | |
1490 | if (out_iov) |
1491 | n_sgout = 1 + tail_pages + |
1492 | iov_iter_npages_cap(i: out_iov, INT_MAX, max_bytes: data_len); |
1493 | else |
1494 | n_sgout = sg_nents(sg: out_sg); |
1495 | } else { |
1496 | darg->zc = false; |
1497 | |
1498 | clear_skb = tls_alloc_clrtxt_skb(sk, skb, full_len: rxm->full_len); |
1499 | if (!clear_skb) |
1500 | return -ENOMEM; |
1501 | |
1502 | n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags; |
1503 | } |
1504 | |
1505 | /* Increment to accommodate AAD */ |
1506 | n_sgin = n_sgin + 1; |
1507 | |
1508 | /* Allocate a single block of memory which contains |
1509 | * aead_req || tls_decrypt_ctx. |
1510 | * Both structs are variable length. |
1511 | */ |
1512 | aead_size = sizeof(*aead_req) + crypto_aead_reqsize(tfm: ctx->aead_recv); |
1513 | aead_size = ALIGN(aead_size, __alignof__(*dctx)); |
1514 | mem = kmalloc(size: aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)), |
1515 | flags: sk->sk_allocation); |
1516 | if (!mem) { |
1517 | err = -ENOMEM; |
1518 | goto exit_free_skb; |
1519 | } |
1520 | |
1521 | /* Segment the allocated memory */ |
1522 | aead_req = (struct aead_request *)mem; |
1523 | dctx = (struct tls_decrypt_ctx *)(mem + aead_size); |
1524 | dctx->sk = sk; |
1525 | sgin = &dctx->sg[0]; |
1526 | sgout = &dctx->sg[n_sgin]; |
1527 | |
1528 | /* For CCM based ciphers, first byte of nonce+iv is a constant */ |
1529 | switch (prot->cipher_type) { |
1530 | case TLS_CIPHER_AES_CCM_128: |
1531 | dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE; |
1532 | iv_offset = 1; |
1533 | break; |
1534 | case TLS_CIPHER_SM4_CCM: |
1535 | dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE; |
1536 | iv_offset = 1; |
1537 | break; |
1538 | } |
1539 | |
1540 | /* Prepare IV */ |
1541 | if (prot->version == TLS_1_3_VERSION || |
1542 | prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { |
1543 | memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, |
1544 | prot->iv_size + prot->salt_size); |
1545 | } else { |
1546 | err = skb_copy_bits(skb, offset: rxm->offset + TLS_HEADER_SIZE, |
1547 | to: &dctx->iv[iv_offset] + prot->salt_size, |
1548 | len: prot->iv_size); |
1549 | if (err < 0) |
1550 | goto exit_free; |
1551 | memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size); |
1552 | } |
1553 | tls_xor_iv_with_seq(prot, iv: &dctx->iv[iv_offset], seq: tls_ctx->rx.rec_seq); |
1554 | |
1555 | /* Prepare AAD */ |
1556 | tls_make_aad(buf: dctx->aad, size: rxm->full_len - prot->overhead_size + |
1557 | prot->tail_size, |
1558 | record_sequence: tls_ctx->rx.rec_seq, record_type: tlm->control, prot); |
1559 | |
1560 | /* Prepare sgin */ |
1561 | sg_init_table(sgin, n_sgin); |
1562 | sg_set_buf(sg: &sgin[0], buf: dctx->aad, buflen: prot->aad_size); |
1563 | err = skb_to_sgvec(skb, sg: &sgin[1], |
1564 | offset: rxm->offset + prot->prepend_size, |
1565 | len: rxm->full_len - prot->prepend_size); |
1566 | if (err < 0) |
1567 | goto exit_free; |
1568 | |
1569 | if (clear_skb) { |
1570 | sg_init_table(sgout, n_sgout); |
1571 | sg_set_buf(sg: &sgout[0], buf: dctx->aad, buflen: prot->aad_size); |
1572 | |
1573 | err = skb_to_sgvec(skb: clear_skb, sg: &sgout[1], offset: prot->prepend_size, |
1574 | len: data_len + prot->tail_size); |
1575 | if (err < 0) |
1576 | goto exit_free; |
1577 | } else if (out_iov) { |
1578 | sg_init_table(sgout, n_sgout); |
1579 | sg_set_buf(sg: &sgout[0], buf: dctx->aad, buflen: prot->aad_size); |
1580 | |
1581 | err = tls_setup_from_iter(from: out_iov, length: data_len, pages_used: &pages, to: &sgout[1], |
1582 | to_max_pages: (n_sgout - 1 - tail_pages)); |
1583 | if (err < 0) |
1584 | goto exit_free_pages; |
1585 | |
1586 | if (prot->tail_size) { |
1587 | sg_unmark_end(sg: &sgout[pages]); |
1588 | sg_set_buf(sg: &sgout[pages + 1], buf: &dctx->tail, |
1589 | buflen: prot->tail_size); |
1590 | sg_mark_end(sg: &sgout[pages + 1]); |
1591 | } |
1592 | } else if (out_sg) { |
1593 | memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); |
1594 | } |
1595 | dctx->free_sgout = !!pages; |
1596 | |
1597 | /* Prepare and submit AEAD request */ |
1598 | err = tls_do_decryption(sk, sgin, sgout, iv_recv: dctx->iv, |
1599 | data_len: data_len + prot->tail_size, aead_req, darg); |
1600 | if (err) { |
1601 | if (darg->async_done) |
1602 | goto exit_free_skb; |
1603 | goto exit_free_pages; |
1604 | } |
1605 | |
1606 | darg->skb = clear_skb ?: tls_strp_msg(ctx); |
1607 | clear_skb = NULL; |
1608 | |
1609 | if (unlikely(darg->async)) { |
1610 | err = tls_strp_msg_hold(strp: &ctx->strp, dst: &ctx->async_hold); |
1611 | if (err) |
1612 | __skb_queue_tail(list: &ctx->async_hold, newsk: darg->skb); |
1613 | return err; |
1614 | } |
1615 | |
1616 | if (unlikely(darg->async_done)) |
1617 | return 0; |
1618 | |
1619 | if (prot->tail_size) |
1620 | darg->tail = dctx->tail; |
1621 | |
1622 | exit_free_pages: |
1623 | /* Release the pages in case iov was mapped to pages */ |
1624 | for (; pages > 0; pages--) |
1625 | put_page(page: sg_page(sg: &sgout[pages])); |
1626 | exit_free: |
1627 | kfree(objp: mem); |
1628 | exit_free_skb: |
1629 | consume_skb(skb: clear_skb); |
1630 | return err; |
1631 | } |
1632 | |
1633 | static int |
1634 | tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx, |
1635 | struct msghdr *msg, struct tls_decrypt_arg *darg) |
1636 | { |
1637 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
1638 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
1639 | struct strp_msg *rxm; |
1640 | int pad, err; |
1641 | |
1642 | err = tls_decrypt_sg(sk, out_iov: &msg->msg_iter, NULL, darg); |
1643 | if (err < 0) { |
1644 | if (err == -EBADMSG) |
1645 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); |
1646 | return err; |
1647 | } |
1648 | /* keep going even for ->async, the code below is TLS 1.3 */ |
1649 | |
1650 | /* If opportunistic TLS 1.3 ZC failed retry without ZC */ |
1651 | if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && |
1652 | darg->tail != TLS_RECORD_TYPE_DATA)) { |
1653 | darg->zc = false; |
1654 | if (!darg->tail) |
1655 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); |
1656 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); |
1657 | return tls_decrypt_sw(sk, tls_ctx, msg, darg); |
1658 | } |
1659 | |
1660 | pad = tls_padding_length(prot, skb: darg->skb, darg); |
1661 | if (pad < 0) { |
1662 | if (darg->skb != tls_strp_msg(ctx)) |
1663 | consume_skb(skb: darg->skb); |
1664 | return pad; |
1665 | } |
1666 | |
1667 | rxm = strp_msg(skb: darg->skb); |
1668 | rxm->full_len -= pad; |
1669 | |
1670 | return 0; |
1671 | } |
1672 | |
1673 | static int |
1674 | tls_decrypt_device(struct sock *sk, struct msghdr *msg, |
1675 | struct tls_context *tls_ctx, struct tls_decrypt_arg *darg) |
1676 | { |
1677 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
1678 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
1679 | struct strp_msg *rxm; |
1680 | int pad, err; |
1681 | |
1682 | if (tls_ctx->rx_conf != TLS_HW) |
1683 | return 0; |
1684 | |
1685 | err = tls_device_decrypted(sk, tls_ctx); |
1686 | if (err <= 0) |
1687 | return err; |
1688 | |
1689 | pad = tls_padding_length(prot, skb: tls_strp_msg(ctx), darg); |
1690 | if (pad < 0) |
1691 | return pad; |
1692 | |
1693 | darg->async = false; |
1694 | darg->skb = tls_strp_msg(ctx); |
1695 | /* ->zc downgrade check, in case TLS 1.3 gets here */ |
1696 | darg->zc &= !(prot->version == TLS_1_3_VERSION && |
1697 | tls_msg(skb: darg->skb)->control != TLS_RECORD_TYPE_DATA); |
1698 | |
1699 | rxm = strp_msg(skb: darg->skb); |
1700 | rxm->full_len -= pad; |
1701 | |
1702 | if (!darg->zc) { |
1703 | /* Non-ZC case needs a real skb */ |
1704 | darg->skb = tls_strp_msg_detach(ctx); |
1705 | if (!darg->skb) |
1706 | return -ENOMEM; |
1707 | } else { |
1708 | unsigned int off, len; |
1709 | |
1710 | /* In ZC case nobody cares about the output skb. |
1711 | * Just copy the data here. Note the skb is not fully trimmed. |
1712 | */ |
1713 | off = rxm->offset + prot->prepend_size; |
1714 | len = rxm->full_len - prot->overhead_size; |
1715 | |
1716 | err = skb_copy_datagram_msg(from: darg->skb, offset: off, msg, size: len); |
1717 | if (err) |
1718 | return err; |
1719 | } |
1720 | return 1; |
1721 | } |
1722 | |
1723 | static int tls_rx_one_record(struct sock *sk, struct msghdr *msg, |
1724 | struct tls_decrypt_arg *darg) |
1725 | { |
1726 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1727 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
1728 | struct strp_msg *rxm; |
1729 | int err; |
1730 | |
1731 | err = tls_decrypt_device(sk, msg, tls_ctx, darg); |
1732 | if (!err) |
1733 | err = tls_decrypt_sw(sk, tls_ctx, msg, darg); |
1734 | if (err < 0) |
1735 | return err; |
1736 | |
1737 | rxm = strp_msg(skb: darg->skb); |
1738 | rxm->offset += prot->prepend_size; |
1739 | rxm->full_len -= prot->overhead_size; |
1740 | tls_advance_record_sn(sk, prot, ctx: &tls_ctx->rx); |
1741 | |
1742 | return 0; |
1743 | } |
1744 | |
1745 | int decrypt_skb(struct sock *sk, struct scatterlist *sgout) |
1746 | { |
1747 | struct tls_decrypt_arg darg = { .zc = true, }; |
1748 | |
1749 | return tls_decrypt_sg(sk, NULL, out_sg: sgout, darg: &darg); |
1750 | } |
1751 | |
1752 | static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, |
1753 | u8 *control) |
1754 | { |
1755 | int err; |
1756 | |
1757 | if (!*control) { |
1758 | *control = tlm->control; |
1759 | if (!*control) |
1760 | return -EBADMSG; |
1761 | |
1762 | err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, |
1763 | len: sizeof(*control), data: control); |
1764 | if (*control != TLS_RECORD_TYPE_DATA) { |
1765 | if (err || msg->msg_flags & MSG_CTRUNC) |
1766 | return -EIO; |
1767 | } |
1768 | } else if (*control != tlm->control) { |
1769 | return 0; |
1770 | } |
1771 | |
1772 | return 1; |
1773 | } |
1774 | |
1775 | static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) |
1776 | { |
1777 | tls_strp_msg_done(strp: &ctx->strp); |
1778 | } |
1779 | |
1780 | /* This function traverses the rx_list in tls receive context to copies the |
1781 | * decrypted records into the buffer provided by caller zero copy is not |
1782 | * true. Further, the records are removed from the rx_list if it is not a peek |
1783 | * case and the record has been consumed completely. |
1784 | */ |
1785 | static int process_rx_list(struct tls_sw_context_rx *ctx, |
1786 | struct msghdr *msg, |
1787 | u8 *control, |
1788 | size_t skip, |
1789 | size_t len, |
1790 | bool is_peek, |
1791 | bool *more) |
1792 | { |
1793 | struct sk_buff *skb = skb_peek(list_: &ctx->rx_list); |
1794 | struct tls_msg *tlm; |
1795 | ssize_t copied = 0; |
1796 | int err; |
1797 | |
1798 | while (skip && skb) { |
1799 | struct strp_msg *rxm = strp_msg(skb); |
1800 | tlm = tls_msg(skb); |
1801 | |
1802 | err = tls_record_content_type(msg, tlm, control); |
1803 | if (err <= 0) |
1804 | goto more; |
1805 | |
1806 | if (skip < rxm->full_len) |
1807 | break; |
1808 | |
1809 | skip = skip - rxm->full_len; |
1810 | skb = skb_peek_next(skb, list_: &ctx->rx_list); |
1811 | } |
1812 | |
1813 | while (len && skb) { |
1814 | struct sk_buff *next_skb; |
1815 | struct strp_msg *rxm = strp_msg(skb); |
1816 | int chunk = min_t(unsigned int, rxm->full_len - skip, len); |
1817 | |
1818 | tlm = tls_msg(skb); |
1819 | |
1820 | err = tls_record_content_type(msg, tlm, control); |
1821 | if (err <= 0) |
1822 | goto more; |
1823 | |
1824 | err = skb_copy_datagram_msg(from: skb, offset: rxm->offset + skip, |
1825 | msg, size: chunk); |
1826 | if (err < 0) |
1827 | goto more; |
1828 | |
1829 | len = len - chunk; |
1830 | copied = copied + chunk; |
1831 | |
1832 | /* Consume the data from record if it is non-peek case*/ |
1833 | if (!is_peek) { |
1834 | rxm->offset = rxm->offset + chunk; |
1835 | rxm->full_len = rxm->full_len - chunk; |
1836 | |
1837 | /* Return if there is unconsumed data in the record */ |
1838 | if (rxm->full_len - skip) |
1839 | break; |
1840 | } |
1841 | |
1842 | /* The remaining skip-bytes must lie in 1st record in rx_list. |
1843 | * So from the 2nd record, 'skip' should be 0. |
1844 | */ |
1845 | skip = 0; |
1846 | |
1847 | if (msg) |
1848 | msg->msg_flags |= MSG_EOR; |
1849 | |
1850 | next_skb = skb_peek_next(skb, list_: &ctx->rx_list); |
1851 | |
1852 | if (!is_peek) { |
1853 | __skb_unlink(skb, list: &ctx->rx_list); |
1854 | consume_skb(skb); |
1855 | } |
1856 | |
1857 | skb = next_skb; |
1858 | } |
1859 | err = 0; |
1860 | |
1861 | out: |
1862 | return copied ? : err; |
1863 | more: |
1864 | if (more) |
1865 | *more = true; |
1866 | goto out; |
1867 | } |
1868 | |
1869 | static bool |
1870 | tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, |
1871 | size_t len_left, size_t decrypted, ssize_t done, |
1872 | size_t *flushed_at) |
1873 | { |
1874 | size_t max_rec; |
1875 | |
1876 | if (len_left <= decrypted) |
1877 | return false; |
1878 | |
1879 | max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE; |
1880 | if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec) |
1881 | return false; |
1882 | |
1883 | *flushed_at = done; |
1884 | return sk_flush_backlog(sk); |
1885 | } |
1886 | |
1887 | static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, |
1888 | bool nonblock) |
1889 | { |
1890 | long timeo; |
1891 | int ret; |
1892 | |
1893 | timeo = sock_rcvtimeo(sk, noblock: nonblock); |
1894 | |
1895 | while (unlikely(ctx->reader_present)) { |
1896 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
1897 | |
1898 | ctx->reader_contended = 1; |
1899 | |
1900 | add_wait_queue(wq_head: &ctx->wq, wq_entry: &wait); |
1901 | ret = sk_wait_event(sk, &timeo, |
1902 | !READ_ONCE(ctx->reader_present), &wait); |
1903 | remove_wait_queue(wq_head: &ctx->wq, wq_entry: &wait); |
1904 | |
1905 | if (timeo <= 0) |
1906 | return -EAGAIN; |
1907 | if (signal_pending(current)) |
1908 | return sock_intr_errno(timeo); |
1909 | if (ret < 0) |
1910 | return ret; |
1911 | } |
1912 | |
1913 | WRITE_ONCE(ctx->reader_present, 1); |
1914 | |
1915 | return 0; |
1916 | } |
1917 | |
1918 | static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, |
1919 | bool nonblock) |
1920 | { |
1921 | int err; |
1922 | |
1923 | lock_sock(sk); |
1924 | err = tls_rx_reader_acquire(sk, ctx, nonblock); |
1925 | if (err) |
1926 | release_sock(sk); |
1927 | return err; |
1928 | } |
1929 | |
1930 | static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) |
1931 | { |
1932 | if (unlikely(ctx->reader_contended)) { |
1933 | if (wq_has_sleeper(wq_head: &ctx->wq)) |
1934 | wake_up(&ctx->wq); |
1935 | else |
1936 | ctx->reader_contended = 0; |
1937 | |
1938 | WARN_ON_ONCE(!ctx->reader_present); |
1939 | } |
1940 | |
1941 | WRITE_ONCE(ctx->reader_present, 0); |
1942 | } |
1943 | |
1944 | static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) |
1945 | { |
1946 | tls_rx_reader_release(sk, ctx); |
1947 | release_sock(sk); |
1948 | } |
1949 | |
1950 | int tls_sw_recvmsg(struct sock *sk, |
1951 | struct msghdr *msg, |
1952 | size_t len, |
1953 | int flags, |
1954 | int *addr_len) |
1955 | { |
1956 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1957 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
1958 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
1959 | ssize_t decrypted = 0, async_copy_bytes = 0; |
1960 | struct sk_psock *psock; |
1961 | unsigned char control = 0; |
1962 | size_t flushed_at = 0; |
1963 | struct strp_msg *rxm; |
1964 | struct tls_msg *tlm; |
1965 | ssize_t copied = 0; |
1966 | ssize_t peeked = 0; |
1967 | bool async = false; |
1968 | int target, err; |
1969 | bool is_kvec = iov_iter_is_kvec(i: &msg->msg_iter); |
1970 | bool is_peek = flags & MSG_PEEK; |
1971 | bool rx_more = false; |
1972 | bool released = true; |
1973 | bool bpf_strp_enabled; |
1974 | bool zc_capable; |
1975 | |
1976 | if (unlikely(flags & MSG_ERRQUEUE)) |
1977 | return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); |
1978 | |
1979 | err = tls_rx_reader_lock(sk, ctx, nonblock: flags & MSG_DONTWAIT); |
1980 | if (err < 0) |
1981 | return err; |
1982 | psock = sk_psock_get(sk); |
1983 | bpf_strp_enabled = sk_psock_strp_enabled(psock); |
1984 | |
1985 | /* If crypto failed the connection is broken */ |
1986 | err = ctx->async_wait.err; |
1987 | if (err) |
1988 | goto end; |
1989 | |
1990 | /* Process pending decrypted records. It must be non-zero-copy */ |
1991 | err = process_rx_list(ctx, msg, control: &control, skip: 0, len, is_peek, more: &rx_more); |
1992 | if (err < 0) |
1993 | goto end; |
1994 | |
1995 | copied = err; |
1996 | if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more) |
1997 | goto end; |
1998 | |
1999 | target = sock_rcvlowat(sk, waitall: flags & MSG_WAITALL, len); |
2000 | len = len - copied; |
2001 | |
2002 | zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek && |
2003 | ctx->zc_capable; |
2004 | decrypted = 0; |
2005 | while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) { |
2006 | struct tls_decrypt_arg darg; |
2007 | int to_decrypt, chunk; |
2008 | |
2009 | err = tls_rx_rec_wait(sk, psock, nonblock: flags & MSG_DONTWAIT, |
2010 | released); |
2011 | if (err <= 0) { |
2012 | if (psock) { |
2013 | chunk = sk_msg_recvmsg(sk, psock, msg, len, |
2014 | flags); |
2015 | if (chunk > 0) { |
2016 | decrypted += chunk; |
2017 | len -= chunk; |
2018 | continue; |
2019 | } |
2020 | } |
2021 | goto recv_end; |
2022 | } |
2023 | |
2024 | memset(&darg.inargs, 0, sizeof(darg.inargs)); |
2025 | |
2026 | rxm = strp_msg(skb: tls_strp_msg(ctx)); |
2027 | tlm = tls_msg(skb: tls_strp_msg(ctx)); |
2028 | |
2029 | to_decrypt = rxm->full_len - prot->overhead_size; |
2030 | |
2031 | if (zc_capable && to_decrypt <= len && |
2032 | tlm->control == TLS_RECORD_TYPE_DATA) |
2033 | darg.zc = true; |
2034 | |
2035 | /* Do not use async mode if record is non-data */ |
2036 | if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) |
2037 | darg.async = ctx->async_capable; |
2038 | else |
2039 | darg.async = false; |
2040 | |
2041 | err = tls_rx_one_record(sk, msg, darg: &darg); |
2042 | if (err < 0) { |
2043 | tls_err_abort(sk, err: -EBADMSG); |
2044 | goto recv_end; |
2045 | } |
2046 | |
2047 | async |= darg.async; |
2048 | |
2049 | /* If the type of records being processed is not known yet, |
2050 | * set it to record type just dequeued. If it is already known, |
2051 | * but does not match the record type just dequeued, go to end. |
2052 | * We always get record type here since for tls1.2, record type |
2053 | * is known just after record is dequeued from stream parser. |
2054 | * For tls1.3, we disable async. |
2055 | */ |
2056 | err = tls_record_content_type(msg, tlm: tls_msg(skb: darg.skb), control: &control); |
2057 | if (err <= 0) { |
2058 | DEBUG_NET_WARN_ON_ONCE(darg.zc); |
2059 | tls_rx_rec_done(ctx); |
2060 | put_on_rx_list_err: |
2061 | __skb_queue_tail(list: &ctx->rx_list, newsk: darg.skb); |
2062 | goto recv_end; |
2063 | } |
2064 | |
2065 | /* periodically flush backlog, and feed strparser */ |
2066 | released = tls_read_flush_backlog(sk, prot, len_left: len, decrypted: to_decrypt, |
2067 | done: decrypted + copied, |
2068 | flushed_at: &flushed_at); |
2069 | |
2070 | /* TLS 1.3 may have updated the length by more than overhead */ |
2071 | rxm = strp_msg(skb: darg.skb); |
2072 | chunk = rxm->full_len; |
2073 | tls_rx_rec_done(ctx); |
2074 | |
2075 | if (!darg.zc) { |
2076 | bool partially_consumed = chunk > len; |
2077 | struct sk_buff *skb = darg.skb; |
2078 | |
2079 | DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor); |
2080 | |
2081 | if (async) { |
2082 | /* TLS 1.2-only, to_decrypt must be text len */ |
2083 | chunk = min_t(int, to_decrypt, len); |
2084 | async_copy_bytes += chunk; |
2085 | put_on_rx_list: |
2086 | decrypted += chunk; |
2087 | len -= chunk; |
2088 | __skb_queue_tail(list: &ctx->rx_list, newsk: skb); |
2089 | if (unlikely(control != TLS_RECORD_TYPE_DATA)) |
2090 | break; |
2091 | continue; |
2092 | } |
2093 | |
2094 | if (bpf_strp_enabled) { |
2095 | released = true; |
2096 | err = sk_psock_tls_strp_read(psock, skb); |
2097 | if (err != __SK_PASS) { |
2098 | rxm->offset = rxm->offset + rxm->full_len; |
2099 | rxm->full_len = 0; |
2100 | if (err == __SK_DROP) |
2101 | consume_skb(skb); |
2102 | continue; |
2103 | } |
2104 | } |
2105 | |
2106 | if (partially_consumed) |
2107 | chunk = len; |
2108 | |
2109 | err = skb_copy_datagram_msg(from: skb, offset: rxm->offset, |
2110 | msg, size: chunk); |
2111 | if (err < 0) |
2112 | goto put_on_rx_list_err; |
2113 | |
2114 | if (is_peek) { |
2115 | peeked += chunk; |
2116 | goto put_on_rx_list; |
2117 | } |
2118 | |
2119 | if (partially_consumed) { |
2120 | rxm->offset += chunk; |
2121 | rxm->full_len -= chunk; |
2122 | goto put_on_rx_list; |
2123 | } |
2124 | |
2125 | consume_skb(skb); |
2126 | } |
2127 | |
2128 | decrypted += chunk; |
2129 | len -= chunk; |
2130 | |
2131 | /* Return full control message to userspace before trying |
2132 | * to parse another message type |
2133 | */ |
2134 | msg->msg_flags |= MSG_EOR; |
2135 | if (control != TLS_RECORD_TYPE_DATA) |
2136 | break; |
2137 | } |
2138 | |
2139 | recv_end: |
2140 | if (async) { |
2141 | int ret; |
2142 | |
2143 | /* Wait for all previously submitted records to be decrypted */ |
2144 | ret = tls_decrypt_async_wait(ctx); |
2145 | __skb_queue_purge(list: &ctx->async_hold); |
2146 | |
2147 | if (ret) { |
2148 | if (err >= 0 || err == -EINPROGRESS) |
2149 | err = ret; |
2150 | decrypted = 0; |
2151 | goto end; |
2152 | } |
2153 | |
2154 | /* Drain records from the rx_list & copy if required */ |
2155 | if (is_peek) |
2156 | err = process_rx_list(ctx, msg, control: &control, skip: copied + peeked, |
2157 | len: decrypted - peeked, is_peek, NULL); |
2158 | else |
2159 | err = process_rx_list(ctx, msg, control: &control, skip: 0, |
2160 | len: async_copy_bytes, is_peek, NULL); |
2161 | |
2162 | /* we could have copied less than we wanted, and possibly nothing */ |
2163 | decrypted += max(err, 0) - async_copy_bytes; |
2164 | } |
2165 | |
2166 | copied += decrypted; |
2167 | |
2168 | end: |
2169 | tls_rx_reader_unlock(sk, ctx); |
2170 | if (psock) |
2171 | sk_psock_put(sk, psock); |
2172 | return copied ? : err; |
2173 | } |
2174 | |
2175 | ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, |
2176 | struct pipe_inode_info *pipe, |
2177 | size_t len, unsigned int flags) |
2178 | { |
2179 | struct tls_context *tls_ctx = tls_get_ctx(sk: sock->sk); |
2180 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
2181 | struct strp_msg *rxm = NULL; |
2182 | struct sock *sk = sock->sk; |
2183 | struct tls_msg *tlm; |
2184 | struct sk_buff *skb; |
2185 | ssize_t copied = 0; |
2186 | int chunk; |
2187 | int err; |
2188 | |
2189 | err = tls_rx_reader_lock(sk, ctx, nonblock: flags & SPLICE_F_NONBLOCK); |
2190 | if (err < 0) |
2191 | return err; |
2192 | |
2193 | if (!skb_queue_empty(list: &ctx->rx_list)) { |
2194 | skb = __skb_dequeue(list: &ctx->rx_list); |
2195 | } else { |
2196 | struct tls_decrypt_arg darg; |
2197 | |
2198 | err = tls_rx_rec_wait(sk, NULL, nonblock: flags & SPLICE_F_NONBLOCK, |
2199 | released: true); |
2200 | if (err <= 0) |
2201 | goto splice_read_end; |
2202 | |
2203 | memset(&darg.inargs, 0, sizeof(darg.inargs)); |
2204 | |
2205 | err = tls_rx_one_record(sk, NULL, darg: &darg); |
2206 | if (err < 0) { |
2207 | tls_err_abort(sk, err: -EBADMSG); |
2208 | goto splice_read_end; |
2209 | } |
2210 | |
2211 | tls_rx_rec_done(ctx); |
2212 | skb = darg.skb; |
2213 | } |
2214 | |
2215 | rxm = strp_msg(skb); |
2216 | tlm = tls_msg(skb); |
2217 | |
2218 | /* splice does not support reading control messages */ |
2219 | if (tlm->control != TLS_RECORD_TYPE_DATA) { |
2220 | err = -EINVAL; |
2221 | goto splice_requeue; |
2222 | } |
2223 | |
2224 | chunk = min_t(unsigned int, rxm->full_len, len); |
2225 | copied = skb_splice_bits(skb, sk, offset: rxm->offset, pipe, len: chunk, flags); |
2226 | if (copied < 0) |
2227 | goto splice_requeue; |
2228 | |
2229 | if (chunk < rxm->full_len) { |
2230 | rxm->offset += len; |
2231 | rxm->full_len -= len; |
2232 | goto splice_requeue; |
2233 | } |
2234 | |
2235 | consume_skb(skb); |
2236 | |
2237 | splice_read_end: |
2238 | tls_rx_reader_unlock(sk, ctx); |
2239 | return copied ? : err; |
2240 | |
2241 | splice_requeue: |
2242 | __skb_queue_head(list: &ctx->rx_list, newsk: skb); |
2243 | goto splice_read_end; |
2244 | } |
2245 | |
2246 | int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, |
2247 | sk_read_actor_t read_actor) |
2248 | { |
2249 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
2250 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
2251 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
2252 | struct strp_msg *rxm = NULL; |
2253 | struct sk_buff *skb = NULL; |
2254 | struct sk_psock *psock; |
2255 | size_t flushed_at = 0; |
2256 | bool released = true; |
2257 | struct tls_msg *tlm; |
2258 | ssize_t copied = 0; |
2259 | ssize_t decrypted; |
2260 | int err, used; |
2261 | |
2262 | psock = sk_psock_get(sk); |
2263 | if (psock) { |
2264 | sk_psock_put(sk, psock); |
2265 | return -EINVAL; |
2266 | } |
2267 | err = tls_rx_reader_acquire(sk, ctx, nonblock: true); |
2268 | if (err < 0) |
2269 | return err; |
2270 | |
2271 | /* If crypto failed the connection is broken */ |
2272 | err = ctx->async_wait.err; |
2273 | if (err) |
2274 | goto read_sock_end; |
2275 | |
2276 | decrypted = 0; |
2277 | do { |
2278 | if (!skb_queue_empty(list: &ctx->rx_list)) { |
2279 | skb = __skb_dequeue(list: &ctx->rx_list); |
2280 | rxm = strp_msg(skb); |
2281 | tlm = tls_msg(skb); |
2282 | } else { |
2283 | struct tls_decrypt_arg darg; |
2284 | |
2285 | err = tls_rx_rec_wait(sk, NULL, nonblock: true, released); |
2286 | if (err <= 0) |
2287 | goto read_sock_end; |
2288 | |
2289 | memset(&darg.inargs, 0, sizeof(darg.inargs)); |
2290 | |
2291 | err = tls_rx_one_record(sk, NULL, darg: &darg); |
2292 | if (err < 0) { |
2293 | tls_err_abort(sk, err: -EBADMSG); |
2294 | goto read_sock_end; |
2295 | } |
2296 | |
2297 | released = tls_read_flush_backlog(sk, prot, INT_MAX, |
2298 | decrypted: 0, done: decrypted, |
2299 | flushed_at: &flushed_at); |
2300 | skb = darg.skb; |
2301 | rxm = strp_msg(skb); |
2302 | tlm = tls_msg(skb); |
2303 | decrypted += rxm->full_len; |
2304 | |
2305 | tls_rx_rec_done(ctx); |
2306 | } |
2307 | |
2308 | /* read_sock does not support reading control messages */ |
2309 | if (tlm->control != TLS_RECORD_TYPE_DATA) { |
2310 | err = -EINVAL; |
2311 | goto read_sock_requeue; |
2312 | } |
2313 | |
2314 | used = read_actor(desc, skb, rxm->offset, rxm->full_len); |
2315 | if (used <= 0) { |
2316 | if (!copied) |
2317 | err = used; |
2318 | goto read_sock_requeue; |
2319 | } |
2320 | copied += used; |
2321 | if (used < rxm->full_len) { |
2322 | rxm->offset += used; |
2323 | rxm->full_len -= used; |
2324 | if (!desc->count) |
2325 | goto read_sock_requeue; |
2326 | } else { |
2327 | consume_skb(skb); |
2328 | if (!desc->count) |
2329 | skb = NULL; |
2330 | } |
2331 | } while (skb); |
2332 | |
2333 | read_sock_end: |
2334 | tls_rx_reader_release(sk, ctx); |
2335 | return copied ? : err; |
2336 | |
2337 | read_sock_requeue: |
2338 | __skb_queue_head(list: &ctx->rx_list, newsk: skb); |
2339 | goto read_sock_end; |
2340 | } |
2341 | |
2342 | bool tls_sw_sock_is_readable(struct sock *sk) |
2343 | { |
2344 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
2345 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
2346 | bool ingress_empty = true; |
2347 | struct sk_psock *psock; |
2348 | |
2349 | rcu_read_lock(); |
2350 | psock = sk_psock(sk); |
2351 | if (psock) |
2352 | ingress_empty = list_empty(head: &psock->ingress_msg); |
2353 | rcu_read_unlock(); |
2354 | |
2355 | return !ingress_empty || tls_strp_msg_ready(ctx) || |
2356 | !skb_queue_empty(list: &ctx->rx_list); |
2357 | } |
2358 | |
2359 | int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) |
2360 | { |
2361 | struct tls_context *tls_ctx = tls_get_ctx(sk: strp->sk); |
2362 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
2363 | char [TLS_HEADER_SIZE + TLS_MAX_IV_SIZE]; |
2364 | size_t cipher_overhead; |
2365 | size_t data_len = 0; |
2366 | int ret; |
2367 | |
2368 | /* Verify that we have a full TLS header, or wait for more data */ |
2369 | if (strp->stm.offset + prot->prepend_size > skb->len) |
2370 | return 0; |
2371 | |
2372 | /* Sanity-check size of on-stack buffer. */ |
2373 | if (WARN_ON(prot->prepend_size > sizeof(header))) { |
2374 | ret = -EINVAL; |
2375 | goto read_failure; |
2376 | } |
2377 | |
2378 | /* Linearize header to local buffer */ |
2379 | ret = skb_copy_bits(skb, offset: strp->stm.offset, to: header, len: prot->prepend_size); |
2380 | if (ret < 0) |
2381 | goto read_failure; |
2382 | |
2383 | strp->mark = header[0]; |
2384 | |
2385 | data_len = ((header[4] & 0xFF) | (header[3] << 8)); |
2386 | |
2387 | cipher_overhead = prot->tag_size; |
2388 | if (prot->version != TLS_1_3_VERSION && |
2389 | prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) |
2390 | cipher_overhead += prot->iv_size; |
2391 | |
2392 | if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + |
2393 | prot->tail_size) { |
2394 | ret = -EMSGSIZE; |
2395 | goto read_failure; |
2396 | } |
2397 | if (data_len < cipher_overhead) { |
2398 | ret = -EBADMSG; |
2399 | goto read_failure; |
2400 | } |
2401 | |
2402 | /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ |
2403 | if (header[1] != TLS_1_2_VERSION_MINOR || |
2404 | header[2] != TLS_1_2_VERSION_MAJOR) { |
2405 | ret = -EINVAL; |
2406 | goto read_failure; |
2407 | } |
2408 | |
2409 | tls_device_rx_resync_new_rec(sk: strp->sk, rcd_len: data_len + TLS_HEADER_SIZE, |
2410 | TCP_SKB_CB(skb)->seq + strp->stm.offset); |
2411 | return data_len + TLS_HEADER_SIZE; |
2412 | |
2413 | read_failure: |
2414 | tls_err_abort(sk: strp->sk, err: ret); |
2415 | |
2416 | return ret; |
2417 | } |
2418 | |
2419 | void tls_rx_msg_ready(struct tls_strparser *strp) |
2420 | { |
2421 | struct tls_sw_context_rx *ctx; |
2422 | |
2423 | ctx = container_of(strp, struct tls_sw_context_rx, strp); |
2424 | ctx->saved_data_ready(strp->sk); |
2425 | } |
2426 | |
2427 | static void tls_data_ready(struct sock *sk) |
2428 | { |
2429 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
2430 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
2431 | struct sk_psock *psock; |
2432 | gfp_t alloc_save; |
2433 | |
2434 | trace_sk_data_ready(sk); |
2435 | |
2436 | alloc_save = sk->sk_allocation; |
2437 | sk->sk_allocation = GFP_ATOMIC; |
2438 | tls_strp_data_ready(strp: &ctx->strp); |
2439 | sk->sk_allocation = alloc_save; |
2440 | |
2441 | psock = sk_psock_get(sk); |
2442 | if (psock) { |
2443 | if (!list_empty(head: &psock->ingress_msg)) |
2444 | ctx->saved_data_ready(sk); |
2445 | sk_psock_put(sk, psock); |
2446 | } |
2447 | } |
2448 | |
2449 | void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) |
2450 | { |
2451 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
2452 | |
2453 | set_bit(BIT_TX_CLOSING, addr: &ctx->tx_bitmask); |
2454 | set_bit(BIT_TX_SCHEDULED, addr: &ctx->tx_bitmask); |
2455 | cancel_delayed_work_sync(dwork: &ctx->tx_work.work); |
2456 | } |
2457 | |
2458 | void tls_sw_release_resources_tx(struct sock *sk) |
2459 | { |
2460 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
2461 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
2462 | struct tls_rec *rec, *tmp; |
2463 | |
2464 | /* Wait for any pending async encryptions to complete */ |
2465 | tls_encrypt_async_wait(ctx); |
2466 | |
2467 | tls_tx_records(sk, flags: -1); |
2468 | |
2469 | /* Free up un-sent records in tx_list. First, free |
2470 | * the partially sent record if any at head of tx_list. |
2471 | */ |
2472 | if (tls_ctx->partially_sent_record) { |
2473 | tls_free_partial_record(sk, ctx: tls_ctx); |
2474 | rec = list_first_entry(&ctx->tx_list, |
2475 | struct tls_rec, list); |
2476 | list_del(entry: &rec->list); |
2477 | sk_msg_free(sk, msg: &rec->msg_plaintext); |
2478 | kfree(objp: rec); |
2479 | } |
2480 | |
2481 | list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { |
2482 | list_del(entry: &rec->list); |
2483 | sk_msg_free(sk, msg: &rec->msg_encrypted); |
2484 | sk_msg_free(sk, msg: &rec->msg_plaintext); |
2485 | kfree(objp: rec); |
2486 | } |
2487 | |
2488 | crypto_free_aead(tfm: ctx->aead_send); |
2489 | tls_free_open_rec(sk); |
2490 | } |
2491 | |
2492 | void tls_sw_free_ctx_tx(struct tls_context *tls_ctx) |
2493 | { |
2494 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
2495 | |
2496 | kfree(objp: ctx); |
2497 | } |
2498 | |
2499 | void tls_sw_release_resources_rx(struct sock *sk) |
2500 | { |
2501 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
2502 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
2503 | |
2504 | if (ctx->aead_recv) { |
2505 | __skb_queue_purge(list: &ctx->rx_list); |
2506 | crypto_free_aead(tfm: ctx->aead_recv); |
2507 | tls_strp_stop(strp: &ctx->strp); |
2508 | /* If tls_sw_strparser_arm() was not called (cleanup paths) |
2509 | * we still want to tls_strp_stop(), but sk->sk_data_ready was |
2510 | * never swapped. |
2511 | */ |
2512 | if (ctx->saved_data_ready) { |
2513 | write_lock_bh(&sk->sk_callback_lock); |
2514 | sk->sk_data_ready = ctx->saved_data_ready; |
2515 | write_unlock_bh(&sk->sk_callback_lock); |
2516 | } |
2517 | } |
2518 | } |
2519 | |
2520 | void tls_sw_strparser_done(struct tls_context *tls_ctx) |
2521 | { |
2522 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
2523 | |
2524 | tls_strp_done(strp: &ctx->strp); |
2525 | } |
2526 | |
2527 | void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) |
2528 | { |
2529 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
2530 | |
2531 | kfree(objp: ctx); |
2532 | } |
2533 | |
2534 | void tls_sw_free_resources_rx(struct sock *sk) |
2535 | { |
2536 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
2537 | |
2538 | tls_sw_release_resources_rx(sk); |
2539 | tls_sw_free_ctx_rx(tls_ctx); |
2540 | } |
2541 | |
2542 | /* The work handler to transmitt the encrypted records in tx_list */ |
2543 | static void tx_work_handler(struct work_struct *work) |
2544 | { |
2545 | struct delayed_work *delayed_work = to_delayed_work(work); |
2546 | struct tx_work *tx_work = container_of(delayed_work, |
2547 | struct tx_work, work); |
2548 | struct sock *sk = tx_work->sk; |
2549 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
2550 | struct tls_sw_context_tx *ctx; |
2551 | |
2552 | if (unlikely(!tls_ctx)) |
2553 | return; |
2554 | |
2555 | ctx = tls_sw_ctx_tx(tls_ctx); |
2556 | if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) |
2557 | return; |
2558 | |
2559 | if (!test_and_clear_bit(BIT_TX_SCHEDULED, addr: &ctx->tx_bitmask)) |
2560 | return; |
2561 | |
2562 | if (mutex_trylock(lock: &tls_ctx->tx_lock)) { |
2563 | lock_sock(sk); |
2564 | tls_tx_records(sk, flags: -1); |
2565 | release_sock(sk); |
2566 | mutex_unlock(lock: &tls_ctx->tx_lock); |
2567 | } else if (!test_and_set_bit(BIT_TX_SCHEDULED, addr: &ctx->tx_bitmask)) { |
2568 | /* Someone is holding the tx_lock, they will likely run Tx |
2569 | * and cancel the work on their way out of the lock section. |
2570 | * Schedule a long delay just in case. |
2571 | */ |
2572 | schedule_delayed_work(dwork: &ctx->tx_work.work, delay: msecs_to_jiffies(m: 10)); |
2573 | } |
2574 | } |
2575 | |
2576 | static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) |
2577 | { |
2578 | struct tls_rec *rec; |
2579 | |
2580 | rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list); |
2581 | if (!rec) |
2582 | return false; |
2583 | |
2584 | return READ_ONCE(rec->tx_ready); |
2585 | } |
2586 | |
2587 | void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) |
2588 | { |
2589 | struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(tls_ctx: ctx); |
2590 | |
2591 | /* Schedule the transmission if tx list is ready */ |
2592 | if (tls_is_tx_ready(ctx: tx_ctx) && |
2593 | !test_and_set_bit(BIT_TX_SCHEDULED, addr: &tx_ctx->tx_bitmask)) |
2594 | schedule_delayed_work(dwork: &tx_ctx->tx_work.work, delay: 0); |
2595 | } |
2596 | |
2597 | void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) |
2598 | { |
2599 | struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); |
2600 | |
2601 | write_lock_bh(&sk->sk_callback_lock); |
2602 | rx_ctx->saved_data_ready = sk->sk_data_ready; |
2603 | sk->sk_data_ready = tls_data_ready; |
2604 | write_unlock_bh(&sk->sk_callback_lock); |
2605 | } |
2606 | |
2607 | void tls_update_rx_zc_capable(struct tls_context *tls_ctx) |
2608 | { |
2609 | struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); |
2610 | |
2611 | rx_ctx->zc_capable = tls_ctx->rx_no_pad || |
2612 | tls_ctx->prot_info.version != TLS_1_3_VERSION; |
2613 | } |
2614 | |
2615 | static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) |
2616 | { |
2617 | struct tls_sw_context_tx *sw_ctx_tx; |
2618 | |
2619 | if (!ctx->priv_ctx_tx) { |
2620 | sw_ctx_tx = kzalloc(size: sizeof(*sw_ctx_tx), GFP_KERNEL); |
2621 | if (!sw_ctx_tx) |
2622 | return NULL; |
2623 | } else { |
2624 | sw_ctx_tx = ctx->priv_ctx_tx; |
2625 | } |
2626 | |
2627 | crypto_init_wait(wait: &sw_ctx_tx->async_wait); |
2628 | atomic_set(v: &sw_ctx_tx->encrypt_pending, i: 1); |
2629 | INIT_LIST_HEAD(list: &sw_ctx_tx->tx_list); |
2630 | INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); |
2631 | sw_ctx_tx->tx_work.sk = sk; |
2632 | |
2633 | return sw_ctx_tx; |
2634 | } |
2635 | |
2636 | static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) |
2637 | { |
2638 | struct tls_sw_context_rx *sw_ctx_rx; |
2639 | |
2640 | if (!ctx->priv_ctx_rx) { |
2641 | sw_ctx_rx = kzalloc(size: sizeof(*sw_ctx_rx), GFP_KERNEL); |
2642 | if (!sw_ctx_rx) |
2643 | return NULL; |
2644 | } else { |
2645 | sw_ctx_rx = ctx->priv_ctx_rx; |
2646 | } |
2647 | |
2648 | crypto_init_wait(wait: &sw_ctx_rx->async_wait); |
2649 | atomic_set(v: &sw_ctx_rx->decrypt_pending, i: 1); |
2650 | init_waitqueue_head(&sw_ctx_rx->wq); |
2651 | skb_queue_head_init(list: &sw_ctx_rx->rx_list); |
2652 | skb_queue_head_init(list: &sw_ctx_rx->async_hold); |
2653 | |
2654 | return sw_ctx_rx; |
2655 | } |
2656 | |
2657 | int init_prot_info(struct tls_prot_info *prot, |
2658 | const struct tls_crypto_info *crypto_info, |
2659 | const struct tls_cipher_desc *cipher_desc) |
2660 | { |
2661 | u16 nonce_size = cipher_desc->nonce; |
2662 | |
2663 | if (crypto_info->version == TLS_1_3_VERSION) { |
2664 | nonce_size = 0; |
2665 | prot->aad_size = TLS_HEADER_SIZE; |
2666 | prot->tail_size = 1; |
2667 | } else { |
2668 | prot->aad_size = TLS_AAD_SPACE_SIZE; |
2669 | prot->tail_size = 0; |
2670 | } |
2671 | |
2672 | /* Sanity-check the sizes for stack allocations. */ |
2673 | if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) |
2674 | return -EINVAL; |
2675 | |
2676 | prot->version = crypto_info->version; |
2677 | prot->cipher_type = crypto_info->cipher_type; |
2678 | prot->prepend_size = TLS_HEADER_SIZE + nonce_size; |
2679 | prot->tag_size = cipher_desc->tag; |
2680 | prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size; |
2681 | prot->iv_size = cipher_desc->iv; |
2682 | prot->salt_size = cipher_desc->salt; |
2683 | prot->rec_seq_size = cipher_desc->rec_seq; |
2684 | |
2685 | return 0; |
2686 | } |
2687 | |
2688 | int tls_set_sw_offload(struct sock *sk, int tx) |
2689 | { |
2690 | struct tls_sw_context_tx *sw_ctx_tx = NULL; |
2691 | struct tls_sw_context_rx *sw_ctx_rx = NULL; |
2692 | const struct tls_cipher_desc *cipher_desc; |
2693 | struct tls_crypto_info *crypto_info; |
2694 | char *iv, *rec_seq, *key, *salt; |
2695 | struct cipher_context *cctx; |
2696 | struct tls_prot_info *prot; |
2697 | struct crypto_aead **aead; |
2698 | struct tls_context *ctx; |
2699 | struct crypto_tfm *tfm; |
2700 | int rc = 0; |
2701 | |
2702 | ctx = tls_get_ctx(sk); |
2703 | prot = &ctx->prot_info; |
2704 | |
2705 | if (tx) { |
2706 | ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); |
2707 | if (!ctx->priv_ctx_tx) |
2708 | return -ENOMEM; |
2709 | |
2710 | sw_ctx_tx = ctx->priv_ctx_tx; |
2711 | crypto_info = &ctx->crypto_send.info; |
2712 | cctx = &ctx->tx; |
2713 | aead = &sw_ctx_tx->aead_send; |
2714 | } else { |
2715 | ctx->priv_ctx_rx = init_ctx_rx(ctx); |
2716 | if (!ctx->priv_ctx_rx) |
2717 | return -ENOMEM; |
2718 | |
2719 | sw_ctx_rx = ctx->priv_ctx_rx; |
2720 | crypto_info = &ctx->crypto_recv.info; |
2721 | cctx = &ctx->rx; |
2722 | aead = &sw_ctx_rx->aead_recv; |
2723 | } |
2724 | |
2725 | cipher_desc = get_cipher_desc(cipher_type: crypto_info->cipher_type); |
2726 | if (!cipher_desc) { |
2727 | rc = -EINVAL; |
2728 | goto free_priv; |
2729 | } |
2730 | |
2731 | rc = init_prot_info(prot, crypto_info, cipher_desc); |
2732 | if (rc) |
2733 | goto free_priv; |
2734 | |
2735 | iv = crypto_info_iv(crypto_info, cipher_desc); |
2736 | key = crypto_info_key(crypto_info, cipher_desc); |
2737 | salt = crypto_info_salt(crypto_info, cipher_desc); |
2738 | rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); |
2739 | |
2740 | memcpy(cctx->iv, salt, cipher_desc->salt); |
2741 | memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv); |
2742 | memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq); |
2743 | |
2744 | if (!*aead) { |
2745 | *aead = crypto_alloc_aead(alg_name: cipher_desc->cipher_name, type: 0, mask: 0); |
2746 | if (IS_ERR(ptr: *aead)) { |
2747 | rc = PTR_ERR(ptr: *aead); |
2748 | *aead = NULL; |
2749 | goto free_priv; |
2750 | } |
2751 | } |
2752 | |
2753 | ctx->push_pending_record = tls_sw_push_pending_record; |
2754 | |
2755 | rc = crypto_aead_setkey(tfm: *aead, key, keylen: cipher_desc->key); |
2756 | if (rc) |
2757 | goto free_aead; |
2758 | |
2759 | rc = crypto_aead_setauthsize(tfm: *aead, authsize: prot->tag_size); |
2760 | if (rc) |
2761 | goto free_aead; |
2762 | |
2763 | if (sw_ctx_rx) { |
2764 | tfm = crypto_aead_tfm(tfm: sw_ctx_rx->aead_recv); |
2765 | |
2766 | tls_update_rx_zc_capable(tls_ctx: ctx); |
2767 | sw_ctx_rx->async_capable = |
2768 | crypto_info->version != TLS_1_3_VERSION && |
2769 | !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); |
2770 | |
2771 | rc = tls_strp_init(strp: &sw_ctx_rx->strp, sk); |
2772 | if (rc) |
2773 | goto free_aead; |
2774 | } |
2775 | |
2776 | goto out; |
2777 | |
2778 | free_aead: |
2779 | crypto_free_aead(tfm: *aead); |
2780 | *aead = NULL; |
2781 | free_priv: |
2782 | if (tx) { |
2783 | kfree(objp: ctx->priv_ctx_tx); |
2784 | ctx->priv_ctx_tx = NULL; |
2785 | } else { |
2786 | kfree(objp: ctx->priv_ctx_rx); |
2787 | ctx->priv_ctx_rx = NULL; |
2788 | } |
2789 | out: |
2790 | return rc; |
2791 | } |
2792 | |